repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
stuart-warren/django-s3-proxy
|
proxy/urls.py
|
Python
|
apache-2.0
| 214
| 0.004673
|
from django.conf.ur
|
ls import patterns, url
from proxy import views
urlpatterns = patterns('',
url(r'^$', views.search, name='search'),
url(r'^(?P<bucket_name>\S+?)(?P<key>/\S*)', views.get, name=
|
'get')
)
|
Manexware/medical
|
oemedical/oemedical_medicament_category/oemedical_medicament_category.py
|
Python
|
gpl-2.0
| 589
| 0.011885
|
from openerp import models,fields
class OeMedicalMedicamentCategory(models.Model):
_name = 'oemedical.medicament.category'
childs = fields.One2many('oemedical.medicament.category',
'parent_id', string='Children', )
name = fields.Char(size=256, string='Name', requ
|
ired=True)
parent_id = fields.Many2one('oemedical.medicament.category',
string='Parent', select=True)
_c
|
onstraints = [
(models.Model._check_recursion, 'Error ! You cannot create recursive \n'
'Category.', ['parent_id'])
]
|
andyneff/voxel-globe
|
voxel_globe/task/views.py
|
Python
|
mit
| 1,192
| 0.026846
|
from django.shortcuts import render, HttpResponse
import os
# Create your views here.
def status(request, task_id):
from celery.result import AsyncResult
task = AsyncResult(task_id);
task.traceback_html = tracebackToHtml(task.traceback)
return render(request, 't
|
ask/html/task_status.html',
{'t
|
ask': task,
'celery_url':'%s:%s' % (os.environ['VIP_FLOWER_HOST'],
os.environ['VIP_FLOWER_PORT'])})
def tracebackToHtml(txt):
html = str(txt).replace(' '*2, ' '*4)
html = html.split('\n')
html = map(lambda x: '<div style="text-indent: -4em; padding-left: 4em">' + \
x + '</div>', html)
html = '\n'.join(html)
return html
def listQueues(request):
def safe_int(i):
try:
return int(i)
except ValueError:
return None
import pyrabbit
#These values need to be unhardcoded...
client = pyrabbit.api.Client('localhost:15672', 'guest', 'guest')
names = [x['name'] for x in client.get_queues()]
tasks = [x for x in map(safe_int, names) if x is not None]
return render(request, 'task/html/task_list.html',
{'tasks': tasks})
|
devilry/trix2
|
trix/trix_core/tests/test_trix_markdown.py
|
Python
|
bsd-3-clause
| 421
| 0
|
from django.test import TestCase
from trix.trix_core import trix_markdown
class TestTrix
|
Markdown(TestCase):
def test_simple(self):
self.assertEqual(
|
trix_markdown.assignment_markdown('# Hello world\n'),
'<h1>Hello world</h1>')
def test_nl2br(self):
self.assertEqual(
trix_markdown.assignment_markdown('Hello\nworld'),
'<p>Hello<br>\nworld</p>')
|
samyoyo/3vilTwinAttacker
|
3vilTwin-Attacker.py
|
Python
|
mit
| 1,843
| 0.01465
|
#!/usr/bin/env python2.7
#The MIT License (MIT)
#Copyright (c) 2015-2016 mh4x0f P0cL4bs Team
#Permission is hereby granted, free of charge, to any perso
|
n obtaining a copy of
#this software and associated documentation f
|
iles (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from sys import argv,exit
from os import getuid
from PyQt4.QtGui import QApplication,QIcon
from Core.Privilege import frm_privelege
from Core.Main import Initialize
from Core.check import check_dependencies
from Modules.utils import Refactor
def ExecRootApp():
check_dependencies()
root = QApplication(argv)
app = Initialize()
app.setWindowIcon(QIcon('rsc/icon.ico'))
app.center(),app.show()
exit(root.exec_())
if __name__ == '__main__':
if not getuid() == 0:
app2 = QApplication(argv)
priv = frm_privelege()
priv.setWindowIcon(QIcon('rsc/icon.ico'))
priv.show(),app2.exec_()
exit(Refactor.threadRoot(priv.Editpassword.text()))
ExecRootApp()
|
SINGROUP/pycp2k
|
pycp2k/classes/_implicit_psolver1.py
|
Python
|
lgpl-3.0
| 709
| 0.002821
|
from pycp2k.inputsection import InputSection
from ._dielectric_cube1 import _dielectric_cube1
from ._dirichlet_bc_cube1 import _dirichlet_bc_cube1
from ._dirichlet_cstr_charge_cube1 import _dirichlet_cstr_charge_cube1
class _implicit_psolver1(InputSection):
def __init__(self):
InputSe
|
ction.__init__(self)
self.DIELECTRIC_CUBE = _dielectric_cube1()
self.DIRICHLET_BC_CUBE = _dirichlet_bc_cube1()
self.DIRICHLET_CSTR_CHARGE_CUBE = _dirichlet_cstr_charge_cube1()
self._name = "IMPLICIT_PSOLVER"
self._subsections = {'DIRICHLET_BC_CUBE': 'DIRIC
|
HLET_BC_CUBE', 'DIRICHLET_CSTR_CHARGE_CUBE': 'DIRICHLET_CSTR_CHARGE_CUBE', 'DIELECTRIC_CUBE': 'DIELECTRIC_CUBE'}
|
rain1024/underthesea
|
underthesea/classification/model_fasttext.py
|
Python
|
gpl-3.0
| 2,185
| 0
|
import random
from os.path import join, dirname
import numpy as np
from sklearn.base import ClassifierMixin, BaseEstimator
import fasttext as ft
from underthesea.util.file_io import write
import os
from underthesea.util.singleton import Singleton
class FastTextClassifier(ClassifierMixin, BaseEstimator):
def __init__(self):
self.estimator = None
def fit(self, X, y, model_filename=None):
"""Fit FastText according to X, y
Parameters:
----------
X : list of text
each item is a text
y: list
each item is either a label (in multi class problem) or list of
labels (in multi label problem)
"""
train_file = "temp.train"
X = [x.replace("\n", " ") for x in X]
y = [item[0] for item in y]
y = [_.replace(" ", "-") for _ in y]
lines = ["__label__{} , {}".format(j, i) for i, j in zip(X, y)]
content = "\n".join(lines)
write(train_file, content)
if model_filename:
self.estimator = ft.supervised(train_file, model_filename)
else:
self.estimator = ft.supervised(train_file)
os.remove(train_file)
def predict(self, X):
return
def predict_proba(self, X):
output_ = self.estimator.predict_proba(X)
def transform_item(item):
label, score = item[0]
label = label.replace("__label__", "")
label = int(label)
|
if label == 0:
label = 1
score = 1 - score
return [label, score]
output_ = [transform_item(item) for item in output_]
output1 = np.array(output_)
return output1
@Singleton
class FastTextPredictor:
def __init__(self):
filepath = join(dirname(__file__), "f
|
asttext.model")
self.estimator = ft.load_model(filepath)
def tranform_output(self, y):
y = y[0].replace("__label__", "")
y = y.replace("-", " ")
return y
def predict(self, X):
X = [X]
y_pred = self.estimator.predict(X)
y_pred = [self.tranform_output(item) for item in y_pred]
return y_pred
|
open-o/nfvo
|
lcm/lcm/ns/vls/urls.py
|
Python
|
apache-2.0
| 1,036
| 0.000965
|
# Copyright 2016 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on
|
an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import patterns, url
from rest_framework.urlpatterns import format_suffix_patterns
from lcm.ns.vls.views import VlView, VlDetailView
urlpatterns = patterns('',
|
url(r'^openoapi/nslcm/v1/ns/vls$', VlView.as_view()),
url(r'^openoapi/nslcm/v1/ns/vls/(?P<vl_inst_id>[0-9a-zA-Z_-]+)$', VlDetailView.as_view()),
)
urlpatterns = format_suffix_patterns(urlpatterns)
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/markdown/extensions/nl2br.py
|
Python
|
agpl-3.0
| 765
| 0.002614
|
"""
NL2BR Extension
===============
A Python-Markdown extension to treat newlines as hard breaks; like
GitHub-flavored Markdown does.
Usage:
>>> import markdown
>>> print markdown.markdown('line 1\\nline 2', extensions=['nl2br'])
<p>line 1<br />
line 2</p>
Copyright 2011 [Brian Neal](http://deathofagremmie.com/)
Dependencies:
* [Python 2.4+](http://python.org)
* [Markdown 2.1+](http://packages.python.org/Markdown/)
"""
import markdown
BR_RE = r'\n'
class Nl2BrExtension(markdown.Extension):
def extendMarkdown(self, md, md_globals):
|
br_tag = markdown
|
.inlinepatterns.SubstituteTagPattern(BR_RE, 'br')
md.inlinePatterns.add('nl', br_tag, '_end')
def makeExtension(configs=None):
return Nl2BrExtension(configs)
|
feuervogel/django-taggit-templatetags
|
taggit_templatetags/tests/models.py
|
Python
|
bsd-3-clause
| 379
| 0.015831
|
fro
|
m django.db import models
from taggit.managers import TaggableManager
class BaseModel(models.Model):
name = models.CharField(max_length=50, unique=True)
tags = TaggableManager()
def __unicode__(self):
return self.name
class Meta(object):
abstract = True
class AlphaModel(BaseModel):
pass
class BetaMode
|
l(BaseModel):
pass
|
micumatei/learning-goals
|
Probleme/Solutii/Find_Cost_of_Tile_to_Cover_WxH_Floor/mmicu/python/main.py
|
Python
|
mit
| 1,283
| 0.000779
|
#!/usr/bin/env python3
"""
Calculate the total cost of tile it would take to cover a floor
plan of width and height, using a cost entered by the user.
"""
from __future__ import print_function
import argparse
import sys
class App(object):
"""Application."""
def __init__(self, args):
self._raw_args = args
self._args = None
self._argparse = argparse.ArgumentParser(
description="Calculate Fibbonaci numbers ...")
self.prepare_parser()
def prepare_parser(self):
"""Prepare Argument Parser."""
self._argparse.add_argument(
"w", type=int, help="Width")
self._argparse.add_argument(
"h", type=int, help="Height")
self._argparse.add_argument(
"c", type=float, help="Cost of Tile assuming that a tile is 1x1")
def run(self):
"""Run the application."""
self._args = self._argparse.parse_args(self._raw_args)
rez = App.get_cost(self._args.w, self._args.h, self._args.c)
output = "The cost is : {}".format(rez)
print(output)
@staticmethod
de
|
f get_cost(widht, height, cost):
"""Compute the cost."""
|
return (widht * height) * float(cost)
if __name__ == "__main__":
App(sys.argv[1:]).run()
|
csherwood-usgs/landlab
|
landlab/ca/examples/diffusion_in_gravity.py
|
Python
|
mit
| 5,014
| 0.01077
|
#!/usr/env/python
"""
diffusion_in_gravity.py
Example of a continuous-time, stochastic, pair-based cellular automaton model,
which simulates diffusion by random particle motion in a gravitational field.
The purpose of the example is to demonstrate the use of an OrientedRasterLCA.
GT, September 2014
"""
from __future__ import print_function
_DEBUG = False
import time
from numpy import where, bitwise_and
from landlab import RasterModelGrid
from landlab.ca.celllab_cts import Transition, CAPlotter
from landlab.ca.oriented_raster_cts import OrientedRasterCTS
def setup_transition_list():
"""
Creates and returns a list of Transition() objects to represent state
transitions for a biased random walk, in which the rate of downward
motion is greater than the rate in the other three directions.
Parameters
----------
(none)
Returns
-------
xn_list : list of Transition objects
List of objects that encode information about the link-state transitions.
Notes
-----
State 0 represents fluid and state 1 represents a particle (such as a
sediment grain or dissolved heavy particle).
The states and transitions are as follows:
Pair state Transition to Process Rate
========== ============= ======= ====
0 (0-0) (none) - -
1 (0-1) 2 (1-0) left motion 1.0
2 (1-0) 1 (0-1) right motion 1.0
3 (1-1) (none) - -
4 (0/0) (none) - -
5 (0/1) 6 (1/0) down motion 1.1
6 (1/0) 5 (0/1) up motion 0.9
7 (1/1) (none) - -
"""
xn_list = []
xn_list.append( Transition((0,1,0), (1,0,0), 1., 'left motion') )
xn_list.append( Transition((1,0,0), (0,1,0), 1., 'right motion') )
xn_list.append( Transition((0,1,1), (1,0,1), 1.1, 'down motion') )
xn_list.append( Transition((1,0,1), (0,1,1), 0.9, 'up motion') )
if _DEBUG:
print()
print('setup_transition_list(): list has',len(xn_list),'transitions:')
for t in xn_list:
print(' From state',t.from_state,'to state',t.to_state,'at rate',t.rate,'called',t.name)
return xn_list
def main():
# INITIALIZE
# User-defined parameters
nr = 80
nc = 80
plot_interval = 2
run_duration = 200
report_interval = 5.0 # report interval, in real-time seconds
# Remember the clock time, and calculate when we next want to report
# progress.
current_real_time = time.time()
next_report = current_real_time + report_interval
# Create grid
mg = RasterModelGrid(nr, nc, 1.0)
# Make the boundaries be walls
mg.set_closed_boundaries_at_grid_edges(True, True, True, True)
# Set up the states and pair transitions.
ns_dict = { 0 : 'fluid', 1 : 'particle' }
xn_list = setup_transition_list()
# Create the node-state array and attach it to the grid
node_state_grid = mg.add_zeros('node', 'node_state_map', dtype=int)
# Initialize the node-state array
middle_rows = where(bitwise_and(mg.node_y>0.45*nr, mg.node_y<0.55*nr))[0]
node_state_grid[middle_rows] = 1
# Create the CA model
ca = OrientedRasterCTS(mg, ns_dict, xn_list, node_state_grid)
# Debug output if needed
if _DEBUG:
n = ca.grid.number_of_nodes
for r in range(ca.grid.number_of_node_rows):
for c in range(ca.grid.number_of_node_columns):
n -= 1
print('{0:.0f}'.format(ca.node_state[n]), end=' ')
print()
# Create a CAPlotter object for handling screen display
ca_plotter = CAPlotter(ca)
# Plot the initial grid
ca_plotter.update_plot()
# RUN
current_time = 0.0
while current_time < run_duration:
# Once in a while, print out simulation and real time to let the user
# know that the sim is running ok
current_real_time = time.time()
if current_real_time >= next_report:
print('Current sim time',current_time,'(',100*current_time/run_duration,
|
'%)')
next_report = current_real_time + report_interval
# Run the model forward in time until the next output step
ca.run(current_time+plot_interval, ca.node_state,
plot_each_transition=False) #, plotter=ca_plotter)
current_time += plot_interval
# Plot the current grid
ca_plotter.update_plot()
# for debugging
if _DEBUG:
n = ca.grid.number_of_nodes
for r in range(ca.grid.number_
|
of_node_rows):
for c in range(ca.grid.number_of_node_columns):
n -= 1
print('{0:.0f}'.format(ca.node_state[n]), end=' ')
print()
# FINALIZE
# Plot
ca_plotter.finalize()
if __name__ == "__main__":
main()
|
mmnelemane/nova
|
nova/tests/functional/test_extensions.py
|
Python
|
apache-2.0
| 1,588
| 0
|
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS I
|
S" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
# Import extensions to pull in osapi_compute_extension CONF option used below.
from nova.tests.functional import integrated_helpers
CONF = cfg.CONF
LOG = logging.getLogger(
|
__name__)
class ExtensionsTest(integrated_helpers._IntegratedTestBase):
_api_version = 'v2'
def _get_flags(self):
f = super(ExtensionsTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.tests.unit.api.openstack.compute.legacy_v2.extensions.'
'foxinsocks.Foxinsocks')
return f
def test_get_foxnsocks(self):
# Simple check that fox-n-socks works.
response = self.api.api_request('/foxnsocks')
foxnsocks = response.content
LOG.debug("foxnsocks: %s" % foxnsocks)
self.assertEqual('Try to say this Mr. Knox, sir...', foxnsocks)
|
alexmorozov/templated-docs
|
example/invoices/views.py
|
Python
|
mit
| 643
| 0.001555
|
#--coding: utf8--
from django.shortcuts import render
f
|
rom templated_docs import fill_template
from templated_docs.http import FileResponse
from invoices.forms import InvoiceForm
def invoice_view(request):
form = InvoiceForm(request.POST or None)
if form.is_valid():
doctype = form.cleaned_data['format']
filename = fill_template(
'invoices/invoice.odt', form.cl
|
eaned_data,
output_format=doctype)
visible_filename = 'invoice.{}'.format(doctype)
return FileResponse(filename, visible_filename)
else:
return render(request, 'invoices/form.html', {'form': form})
|
stephenjelfs/aws-iot-gddev2016
|
controlUnit.py
|
Python
|
mit
| 3,371
| 0.00445
|
#!/usr/bin/env python
import argparse
import json
import time
import logging
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient
import RPi.GPIO as GPIO
parser = argparse.ArgumentParser(description='Lightbulb control unit.')
parser.add_argument('-e', '--endpoint', required=True, help='The AWS Iot endpoint.')
parser.add_argument('-r', '--rootCA', required=True, help='Root CA file path.')
parser.add_argument('-c', '--cert', required=True, help='Certificate file path.')
parser.add_argument('-k', '--key', required=True, help='Private key file path.')
args = parser.parse_args()
def lightbulbShadowCallback_Update(payload, responseStatus, token):
if responseStatus == "timeout":
print("Update request " + token + " time out!")
if responseStatus == "accepted":
payloadDict = json.loads(payload)
print("~~~~~~~~~~~~~~~~~~~~~~~")
print("Update request with token: " + token + " accepted!")
print("property: " + str(payloadDict["state"]["desired"]["color"]))
print("~~~~~~~~~~~~~~~~~~~~~~~\n\n")
if responseStatus == "rejected":
print("Update request " + token + " rejected!")
def lightBulbShadowCallback_Delete(payload, responseStatus, token):
if responseStatus == "timeout":
print("Delete request " + token + " time out!")
if responseStatus == "accepted":
print("~~~~~~~~~~~~~~~~~~~~~~~")
print("Delete request with token: " + token + " accepted!")
print("~~~~~~~~~~~~~~~~~~~~~~~\n\n")
if responseStatus == "rejected":
print("Delete request " + token + " rejected!")
# Configure logging
logger = logging.getLogger("AWSIoTPythonSDK.core")
logger.setLevel(logging.DEBUG)
streamHandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
# Init AWSIoTMQTTShadowClient
lightBulbShadowClient = AWSIoTMQTTShadowClient("controlUnitClient")
lightBulbShadowClient.configureEndpoint(args.endpoint, 8883)
lightBulbShadowClient.configureCredentials(args.rootCA, args.key, args.cert)
# AWSIoTMQTTShadowClient configuration
lightBulbShadowClient.configureAutoReconnectBackoffTime(1, 32, 20)
lightBulbShadowClient.configureConnectDisconnectTimeout(10) # 10 sec
lightBulbShadowClient.configureMQTTOperationTimeout(5) # 5 sec
# Connect to AWS IoT
lightBulbShadowClient.connect()
# Create a deviceShadow with persistent subscription
ControlUnit = lightBulbShadowClient.createShadowHandlerWithName("rpi-sense-hat", True)
# Delet
|
e shadow JSON doc
ControlUnit.shadowDelete(lightBulbShadowCallback_Delete, 5)
# Update shadow
def updateShadow(color):
JSONPayload = '{"state":{"desired":{"color":"' + color + '"}}}'
ControlUnit.shadowUpdate(JSONPayload, lightbulbShadowCallback_Update, 5)
RED = 9
GREEN = 10
BLUE = 11
GPIO.setmode(GPIO.BCM)
GPIO.setup(RED, GPIO.IN)
GPIO.setup(GREEN, GPIO.IN)
GPIO.setup(BLUE, GPIO.IN
|
)
lastButton = None
while True:
if (lastButton != RED and GPIO.input(RED) == False):
lastButton = RED
updateShadow("red")
if (lastButton != GREEN and GPIO.input(GREEN) == False):
lastButton = GREEN
updateShadow("green")
if (lastButton != BLUE and GPIO.input(BLUE)== False):
lastButton = BLUE
updateShadow("blue")
time.sleep(0.05);
|
laurentb/weboob
|
modules/lyricsdotcom/module.py
|
Python
|
lgpl-3.0
| 1,693
| 0.000591
|
# -*- coding: utf-8 -*-
# Copyright(C) 2016 Julien Veyssier
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.lyrics import CapLyrics, SongLyrics
from weboob.to
|
ols.backend import Module
from weboob.tools.compat import quote_plus
from .browser import LyricsdotcomBrowser
__all__ = ['LyricsdotcomModule']
class LyricsdotcomModule(Module, CapLyrics):
NAME = 'lyricsdotcom'
MAINTAINER = u'Julien Veyssier'
EMAIL = '[email protected]'
VERSION = '2.1'
DESCRIPTION = 'Lyrics.com lyrics website'
|
LICENSE = 'AGPLv3+'
BROWSER = LyricsdotcomBrowser
def get_lyrics(self, id):
return self.browser.get_lyrics(id)
def iter_lyrics(self, criteria, pattern):
return self.browser.iter_lyrics(criteria, quote_plus(pattern.encode('utf-8')))
def fill_songlyrics(self, songlyrics, fields):
if 'content' in fields:
songlyrics = self.get_lyrics(songlyrics.id)
return songlyrics
OBJECTS = {
SongLyrics: fill_songlyrics
}
|
bmazin/SDR
|
Projects/FirmwareTests/darkDebug/phaseStreamTest.py
|
Python
|
gpl-2.0
| 17,347
| 0.022655
|
"""
File: phaseStreamTest.py
Author: Matt Strader
Date: Feb 18, 2016
Firmware: pgbe0_2016_Feb_19_2018.fpg
This script inserts a phase pulse in the qdr dds table and sets up the fake adc lut. It checks snap blocks for each stage of the channelization process. In the end the phase pulse should be recovered in the phase timestream of the chosen channel.
"""
import matplotlib, time, struct
import numpy as np
import matplotlib.pyplot as plt
import casperfpga
import corr
import logging
from myQdr import Qdr as myQdr
import types
import sys
import functools
from loadWavePulseLut import loadWaveToMem,loadDdsToMem
from loadWaveLut import writeBram
from Utils.binTools import castBin
def snapDdc(fpga,bSnapAll=False,bPlot=False,selBinIndex=0,selChanIndex=0,selChanStream=0,ddsAddrTrig=0):
"""trigger and read snapshots of aligned input and data values in the firmware
INPUTS:
bSnapAll: If True, snapshot will record values for all channels, not just one
bPlot: If True, will popup a plot of snapped values
selBinIndex: the fft bin to be inspected
selChanIndex: the channel within a stream (after channel selection) to be inspected
selChanStream: which of the four simultaneous streams of channels to inspect
ddsAddrTrig: trigger when the address for the DDS look up table reaches this value (out of 2**20)
OUTPUT:
dict with keys:
'bin': complex values seen in a chosen fft bin
'chan': complex values in a chosen channel
'dds': complex values coming from the QDR look-up table
'mix': complex values after the dds mixer but before the low pass filter
'ddcOut': complex values after the DDC low pass filter and downsampling
'chanCtr': the channel numbers associated with values in 'chan','dds','mix','ddcOut'.
If bSnapAll=False, these should all equal selChanIndex
'expectedMix': the values of 'chan' multiplied by 'dds'. Hopefully this matches the values in
'mix'.
"""
#set up the snapshots to record the selected bin/channel
fpga.write_int('sel_bin',selBinIndex)
fpga.write_int('sel_bch',selChanIndex)
fpga.write_int('sel_stream',selChanStream)
fpga.write_int('sel_ctr',ddsAddrTrig)
snapshotNames = ['snp2_bin_ss','snp2_ch_ss','snp2_dds_ss','snp2_mix_ss','snp2_ctr_ss','snp3_ddc_ss','snp3_cap_ss']
for name in snapshotNames:
fpga.snapshots[name].arm(man_valid=bSnapAll)
time.sleep(.1)
fpga.write_int('trig_buf',1)#trigger snapshots
time.sleep(.1) #wait for other trigger conditions to be met
fpga.write_int('trig_buf',0)#release trigger
#in most of the snapshots, we get two IQ values per cycle (I[t=0],Q[t=0]) and (I[t=1],Q[t=1])
#Retrieve them separately and then interleave them
binData = fpga.snapshots['snp2_bin_ss'].read(timeout=5,arm=False, man_valid=bSnapAll)['data']
i0 = np.array(binData['i0'])
i1 = np.array(binData['i1'])
q0 = np.array(binData['q0'])
q1 = np.array(binData['q1'])
#interleave values from alternating cycles (I0,Q0) and (I1,Q1)
bi = np.vstack((i0,i1)).flatten('F')
bq = np.vstack((q0,q1)).flatten('F')
chanData = fpga.snapshots['snp2_ch_ss'].read(timeout=5,arm=False, man_valid=bSnapAll)['data']
ci0 = np.array(chanData['i0'])
ci1 = np.array(chanData['i1'])
cq0 = np.array(chanData['q0'])
cq1 = np.array(chanData['q1'])
ci = np.vstack((ci0,ci1)).flatten('F')
cq = np.vstack((cq0,cq1)).flatten('F')
ddsData = fpga.snapshots['snp2_dds_ss'].read(timeout=5,arm=False, man_valid=bSnapAll)['data']
di0 = np.array(ddsData['i0'])
di1 = np.array(ddsData['i1'])
dq0 = np.array(ddsData['q0'])
dq1 = np.array(ddsData['q1'])
#interleave i0 and i1 values
di = np.vstack((di0,di1)).flatten('F')
dq = np.vstack((dq0,dq1)).flatten('F')
expectedMix = (ci+1.j*cq)*(di-1.j*dq)
mixerData = fpga.snapshots['snp2_mix_ss'].read(timeout=5,arm=False, man_valid=bSnapAll)['data']
mi0 = np.array(mixerData['i0'])
mi1 = np.array(mixerData['i1'])
mq0 = np.array(mixerData['q0'])
mq1 = np.array(mixerData['q1'])
#interleave i0 and i1 values
mi = np.vstack((mi0,mi1)).flatten('F')
mq = np.vstack((mq0,mq1)).flatten('F')
#The low-pass filter in the DDC stage downsamples by 2, so we only get one sample per cycle here
ddcData = fpga.snapshots['snp3_ddc_ss'].read(timeout=5,arm=False, man_valid=bSnapAll)['data']
li = np.array(ddcData['i0'])
lq = np.array(ddcData['q0'])
rawPhase = np.array(ddcData['raw_phase'])
phaseData = fpga.snapshots['snp3_cap_ss'].read(timeout=5,arm=False,man_valid=bSnapAll)['data']
filtPhase = np.array(phaseData['phase'])
basePhase = np.array(phaseData['base'])
trig = np.array(phaseData['trig'],dtype=np.bool)
trig2 = np.array(phaseData['trig_raw'],dtype=np.bool)
ctrData = fpga.snapshots['snp2_ctr_ss'].read(timeout=5,arm=False, man_valid=bSnapAll)['data']
ctr = np.array(ctrData['ctr']) #the channel counter (0-256)
dctr = np.array(ctrData['dctr']) #the dds lut address counter (0-2**20)
if bPlot:
#we have the same number of samples from the lpf/downsample as everything else, but the each one
#corresponds to every other timesample in the others. So leave off the second half of lpf samples
#so the samples we have correspond to the same time period as the others, at least when plotting.
liSample = li[0:len(mi)/2]
fig,ax = plt.subplots(1,1)
ax.plot(di,'r.-',label='dds')
ax.plot(bi,'bv-',label='bin')
ax.plot(ci,'g.-',label='channel')
ax.plot(mi,'mo-',label='mix')
ddcTimes = 2.*np.arang
|
e(0,len(liSample))
ax.plot(ddcTimes,liSample,'k.-',label='ddcOut')
ax.
|
set_title('I')
ax.legend(loc='best')
return {'bin':(bi+1.j*bq),'chan':(ci+1.j*cq),'dds':(di+1.j*dq),'mix':(mi+1.j*mq),'ddcOut':(li+1.j*lq),'chanCtr':ctr,'ddsCtr':dctr,'expectedMix':expectedMix,'rawPhase':rawPhase,'filtPhase':filtPhase,'trig':trig,'trig2':trig2,'basePhase':basePhase}
def setSingleChanSelection(fpga,selBinNums=[0,0,0,0],chanNum=0):
"""assigns bin numbers to a single channel (in each stream), to configure chan_sel block
INPUTS:
selBinNums: 4 bin numbers (for 4 streams) to be assigned to chanNum
chanNum: the channel number to be assigned
"""
nStreams = 4
if len(selBinNums) != nStreams:
raise TypeError,'selBinNums must have number of elements matching number of streams in firmware'
fpga.write_int('chan_sel_load',0) #set to zero so nothing loads while we set other registers.
#assign the bin number to be loaded to each stream
fpga.write_int('chan_sel_ch_bin0',selBinNums[0])
fpga.write_int('chan_sel_ch_bin1',selBinNums[1])
fpga.write_int('chan_sel_ch_bin2',selBinNums[2])
fpga.write_int('chan_sel_ch_bin3',selBinNums[3])
time.sleep(.1)
#in the register chan_sel_load, the lsb initiates the loading of the above bin numbers into memory
#the 8 bits above the lsb indicate which channel is being loaded (for all streams)
loadVal = (chanNum << 1) + 1
fpga.write_int('chan_sel_load',loadVal)
time.sleep(.1) #give it a chance to load
fpga.write_int('chan_sel_load',0) #stop loading
def startStream(fpga,selChanIndex=0):
"""initiates streaming of phase timestream (after prog_fir) to the 1Gbit ethernet
INPUTS:
selChanIndex: which channel to stream
"""
dest_ip =167772210 #corresponds to IP 10.0.0.50
fabric_port=50000
pktsPerFrame = 100 #how many 8byte words to accumulate before sending a frame
#configure the gbe core,
print 'restarting'
fpga.write_int('stream_phase_gbe64_dest_ip',dest_ip)
fpga.write_int('stream_phase_gbe64_dest_port',fabric_port)
fpga.write_int('stream_phase_gbe64_words_per_frame',pktsPerFrame)
#reset the core to make sure it's in a clean state
fpga.write_int('stream_phase_gbe64_rst_core',1)
time.sleep(.1)
fpga.write_int('stream_phase_gbe64_rst_core',0)
#choose what channel to stream
fpga.write_int('stream_phase_ch_we',selChanI
|
OCA/stock-logistics-warehouse
|
stock_move_auto_assign/models/__init__.py
|
Python
|
agpl-3.0
| 55
| 0
|
from . import stock_move
fr
|
om . impo
|
rt product_product
|
trabucayre/gnuradio
|
gr-blocks/python/blocks/qa_max.py
|
Python
|
gpl-3.0
| 5,142
| 0.00739
|
#!/usr/bin/env python
#
# Copyright 2007,2010,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gr_unittest, blocks
import math
class test_max(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001(self):
src_data = [0,0.2,-0.3,0,12,0]
expected_result = [float(max(src_data)),]
src = blocks.vector_source_f(src_data)
s2v = blocks.stream_to_vector(gr.sizeof_float, len(src_data))
op = blocks.max_ff(len(src_data))
dst = blocks.vector_sink_f()
self.tb.connect(src, s2v, op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def stest_002(self):
src_data=[-100,-99,-98,-97,-96,-1]
expected_result = [float(max(src_data)),]
src = blocks.vector_source_f(src_data)
s2v = blocks.stream_to_vector(gr.sizeof_float, len(src_data))
op = blocks.max_ff(len(src_data))
dst = blocks.vector_sink_f()
self.tb.connect(src, s2v, op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def stest_003(self):
src_data0 = [0, 2, -3, 0, 12, 0]
src_data1 = [1, 1, 1, 1, 1, 1]
expected_result = [float(max(x,y)) for x,y in zip(src_data0, src_data1)]
src0 = blocks.vector_source_f(src_data0)
src1 = blocks.vector_source_f(src_data1)
op = blocks.max_ff(1)
dst = blocks.vector_sink_f()
self.tb.connect(src0, (op, 0))
self.tb.connect(src1, (op, 1))
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def stest_004(self):
dim = 2
src_data0 = [0, 2, -3, 0, 12, 0]
src_data1 = [1, 1, 1, 1, 1, 1]
expected_data = []
tmp = [float(max(x,y)) for x,y in zip(src_data0, src_data1)]
for i in range(len(tmp) / dim):
expected_data.append(float(max(tmp[i*dim:(i+1)*dim])))
src0 = blocks.vector_source_f(src_data0)
s2v0 = blocks.stream_to_vector(gr.sizeof_float,dim)
src1 = blocks.vector_source_f(src_data1)
s2v1 = blocks.stream_to_vector(gr.sizeof_float,dim)
op = blocks.max_ff(dim)
dst = blocks.vector_sink_f()
self.tb.connect(src0, s2v0, (op, 0))
self.tb.connect(src1, s2v1, (op, 1))
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def stest_s001(self):
src_data = [0, 2, -3, 0, 12, 0]
expected_result = [max(src_data),]
src = blocks.vector_source_s(src_data)
s2v = blocks.stream_to_vector(gr.sizeof_short,len(src_data))
op = blocks.max_ss(len(src_data))
dst = blocks.vector_sink_s()
self.tb.connect(src, s2v, op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def stest_s002(self):
src_data=[-100,-99,-98,-97,-96,-1]
expected_result = [max(src_data),]
src = blocks.vector_source_s(src_data)
s2v = blocks.stream_to_vector(gr.sizeof_short, len(src_data))
op = blocks.max_ss(len(src_data))
dst = blocks.vector_sink_s()
self.tb.connect(src, s2v, op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def stest_s003(self):
src_data0 = [0, 2, -3, 0, 12, 0]
src
|
_data1 = [1, 1, 1, 1, 1, 1]
expected_result = [max(x,y) for x,y in zip(src_data0, src_data1)]
src0 = blocks.vector_source_s(src_data0)
src1 = blocks.vector_source_s(src_data1)
op = blocks.max_ss(1)
dst = blocks.vector_sink_s()
self.tb.connect(src0,
|
(op, 0))
self.tb.connect(src1, (op, 1))
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def stest_s004(self):
dim = 2
src_data0 = [0, 2, -3, 0, 12, 0]
src_data1 = [1, 1, 1, 1, 1, 1]
expected_data = []
tmp = [max(x,y) for x,y in zip(src_data0, src_data1)]
for i in range(len(tmp) / dim):
expected_data.append(max(tmp[i*dim:(i+1)*dim]))
src0 = blocks.vector_source_s(src_data0)
s2v0 = blocks.stream_to_vector(gr.sizeof_short,dim)
src1 = blocks.vector_source_s(src_data1)
s2v1 = blocks.stream_to_vector(gr.sizeof_short,dim)
op = blocks.max_ss(dim)
dst = blocks.vector_sink_s()
self.tb.connect(src0, s2v0, (op, 0))
self.tb.connect(src1, s2v1, (op, 1))
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
if __name__ == '__main__':
gr_unittest.run(test_max, "test_max.xml")
|
guandalf/projecteuler
|
pe0001.py
|
Python
|
mit
| 155
| 0.012903
|
def pe0001(upto):
total = 0
for i in range(upto):
if i % 3 == 0 or i % 5 == 0:
total += i
return total
print(pe0001(10
|
00))
|
|
mozilla/caseconductor-ui
|
ccui/environments/views.py
|
Python
|
gpl-3.0
| 1,892
| 0.002643
|
# Case Conductor is a Test Case Management system.
# Copyright (C) 2011 uTest Inc.
#
# This file is part of Case Conductor.
#
# Case Conductor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Case Conductor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHAN
|
TABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public Licens
|
e for more details.
#
# You should have received a copy of the GNU General Public License
# along with Case Conductor. If not, see <http://www.gnu.org/licenses/>.
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from ..core.util import get_object_or_404
from ..users.decorators import login_redirect
from ..testexecution.models import TestRunList
from .forms import EnvironmentSelectionForm
@login_redirect
def set_environment(request, testrun_id):
"""
Given a test run ID, allow the user to choose a valid environment-group
from among those valid for that test run, set that environment-group ID in
the user's session, and redirect to that test run.
"""
run = get_object_or_404(TestRunList, testrun_id, auth=request.auth)
form = EnvironmentSelectionForm(
request.POST or None,
groups=run.environmentgroups_prefetch,
current=request.session.get("environments", None))
if request.method == "POST" and form.is_valid():
request.session["environments"] = form.save()
return redirect("runtests_run", testrun_id=testrun_id)
return TemplateResponse(
request,
"runtests/environment.html",
{"form": form,
"testrun": run,
})
|
SteveViss/readthedocs.org
|
readthedocs/core/middleware.py
|
Python
|
mit
| 6,904
| 0.001593
|
import logging
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.http import Http404
from readthedocs.projects.models import Project, Domain
log = logging.getLogger(__name__)
LOG_TEMPLATE = u"(Middleware) {msg} [{host}{path}]"
class SubdomainMiddleware(object):
def process_request(self, request):
host = request.get_host().lower()
path = request.get_full_path()
log_kwargs = dict(host=host, path=path)
if settings.DEBUG:
log.debug(LOG_TEMPLATE.format(msg='DEBUG on, not processing middleware', **log_kwargs))
return None
if ':' in host:
host = host.split(':')[0]
domain_parts = host.split('.')
# Serve subdomains - but don't depend on the production domain only having 2 parts
if len(domain_parts) == len(settings.PRODUCTION_DOMAIN.split('.')) + 1:
subdomain = domain_parts[0]
is_www = subdomain.lower() == 'www'
is_ssl = subdomain.lower() == 'ssl'
if not is_www and not is_ssl and settings.PRODUCTION_DOMAIN in host:
request.subdomain = True
request.slug = subdomain
request.urlconf = 'readthedocs.core.subdomain_urls'
return None
# Serve CNAMEs
if settings.PRODUCTION_DOMAIN not in host and \
'localhost' not in host and \
'testserver' not in host:
request.cname = True
domains = Domain.objects.filter(domain=host)
if domains.count():
for domain in domains:
if domain.domain == host:
request.slug = domain.project.slug
request.urlconf = 'core.subdomain_urls'
request.domain_object = True
domain.count = domain.count + 1
domain.save()
log.debug(LOG_TEMPLATE.format(
msg='Domain Object Detected: %s' % domain.domain, **log_kwargs))
break
if not hasattr(request, 'domain_object') and 'HTTP_X_RTD_SLUG' in request.META:
request.slug = request.META['HTTP_X_RTD_SLUG'].lower()
request.urlconf = 'readthedocs.core.subdomain_urls'
request.rtdheader = True
log.debug(LOG_TEMPLA
|
TE.format(
msg='X-RTD-Slug header detetected: %s' % request.slug, **log_kwargs))
# Try header first, then DNS
elif not hasattr(request, 'domain_object'):
try:
slug = cache.get(host)
if not slug:
from dns import resolver
answer = [ans for ans in resolver.query(host, 'CNAME')][0]
d
|
omain = answer.target.to_unicode().lower()
slug = domain.split('.')[0]
cache.set(host, slug, 60 * 60)
# Cache the slug -> host mapping permanently.
log.debug(LOG_TEMPLATE.format(
msg='CNAME cached: %s->%s' % (slug, host),
**log_kwargs))
request.slug = slug
request.urlconf = 'readthedocs.core.subdomain_urls'
log.debug(LOG_TEMPLATE.format(
msg='CNAME detetected: %s' % request.slug,
**log_kwargs))
try:
proj = Project.objects.get(slug=slug)
domain, created = Domain.objects.get_or_create(
project=proj,
domain=host,
)
if created:
domain.machine = True
domain.cname = True
domain.count = domain.count + 1
domain.save()
except (ObjectDoesNotExist, MultipleObjectsReturned):
log.debug(LOG_TEMPLATE.format(
msg='Project CNAME does not exist: %s' % slug,
**log_kwargs))
except:
# Some crazy person is CNAMEing to us. 404.
log.exception(LOG_TEMPLATE.format(msg='CNAME 404', **log_kwargs))
raise Http404(_('Invalid hostname'))
# Google was finding crazy www.blah.readthedocs.org domains.
# Block these explicitly after trying CNAME logic.
if len(domain_parts) > 3:
# Stop www.fooo.readthedocs.org
if domain_parts[0] == 'www':
log.debug(LOG_TEMPLATE.format(msg='404ing long domain', **log_kwargs))
raise Http404(_('Invalid hostname'))
log.debug(LOG_TEMPLATE.format(msg='Allowing long domain name', **log_kwargs))
# raise Http404(_('Invalid hostname'))
# Normal request.
return None
class SingleVersionMiddleware(object):
"""Reset urlconf for requests for 'single_version' docs.
In settings.MIDDLEWARE_CLASSES, SingleVersionMiddleware must follow
after SubdomainMiddleware.
"""
def _get_slug(self, request):
"""Get slug from URLs requesting docs.
If URL is like '/docs/<project_name>/', we split path
and pull out slug.
If URL is subdomain or CNAME, we simply read request.slug, which is
set by SubdomainMiddleware.
"""
slug = None
if hasattr(request, 'slug'):
# Handle subdomains and CNAMEs.
slug = request.slug.lower()
else:
# Handle '/docs/<project>/' URLs
path = request.get_full_path()
path_parts = path.split('/')
if len(path_parts) > 2 and path_parts[1] == 'docs':
slug = path_parts[2].lower()
return slug
def process_request(self, request):
slug = self._get_slug(request)
if slug:
try:
proj = Project.objects.get(slug=slug)
except (ObjectDoesNotExist, MultipleObjectsReturned):
# Let 404 be handled further up stack.
return None
if getattr(proj, 'single_version', False):
request.urlconf = 'readthedocs.core.single_version_urls'
# Logging
host = request.get_host()
path = request.get_full_path()
log_kwargs = dict(host=host, path=path)
log.debug(LOG_TEMPLATE.format(
msg='Handling single_version request', **log_kwargs)
)
return None
|
illicium/ccss2edr
|
ccss2edr/dumpedr.py
|
Python
|
mit
| 1,581
| 0
|
#!/usr/bin/env python3
import argparse
import dataclasses
|
from array import array
from .edr import (EDRHeader, EDRDisplayDataHeader, EDRSpectralDataHeader)
def parse_args():
parser = argparse.ArgumentParser(description='Print .edr file')
parser.add_argument('edr',
|
type=argparse.FileType('rb'),
help='.edr input filename')
return parser.parse_args()
def main():
args = parse_args()
print('EDR Header:')
edr_header = EDRHeader.unpack_from(args.edr.read(EDRHeader.struct.size))
print_dataclass(edr_header, indent=1)
for set_num in range(1, edr_header.num_sets + 1):
print('Set {}'.format(set_num))
print('\tDisplay Data Header:')
edr_header = EDRDisplayDataHeader.unpack_from(
args.edr.read(EDRDisplayDataHeader.struct.size))
print_dataclass(edr_header, indent=2)
print('\tSpectral Data Header:')
spec_header = EDRSpectralDataHeader.unpack_from(
args.edr.read(EDRSpectralDataHeader.struct.size))
print_dataclass(spec_header, indent=2)
spec_data = args.edr.read(8 *
spec_header.num_samples) # array of doubles
spec_data_arr = array('d', spec_data)
print('\tSpectral Data: {!s}'.format(spec_data_arr.tolist()))
def print_dataclass(obj, indent=0):
for field in dataclasses.fields(obj):
print('{}{}: {!r}'.format('\t' * indent, field.name,
getattr(obj, field.name)))
if __name__ == '__main__':
main()
|
Cheaterman/kivy
|
kivy/input/provider.py
|
Python
|
mit
| 1,082
| 0
|
'''
Motion Event Provider
=====================
Abstract class for the implementation of a
:class:`~kivy.input.motionevent.MotionEvent`
provider. The implementation must support the
:meth:`
|
~MotionEventProvider.start`, :meth:`~MotionEventProvider.stop` and
:meth:`~MotionEventProvider.update` methods.
'''
__all__ = ('MotionEventProvider', )
class MotionEventProvider(object):
'''Base class for a provider.
'''
def __init__(self, device, args):
self.device = device
|
if self.__class__ == MotionEventProvider:
raise NotImplementedError('class MotionEventProvider is abstract')
def start(self):
'''Start the provider. This method is automatically called when the
application is started and if the configuration uses the current
provider.
'''
pass
def stop(self):
'''Stop the provider.
'''
pass
def update(self, dispatch_fn):
'''Update the provider and dispatch all the new touch events though the
`dispatch_fn` argument.
'''
pass
|
samitnuk/online_shop
|
apps/orders/migrations/0002_auto_20170317_2119.py
|
Python
|
mit
| 1,976
| 0.002614
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-17 19:19
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('orders', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='order',
options={'ordering': ['-created'], 'verbose_name': 'Замовлення', 'verbose_name_plural': 'Замовлення'},
),
migrations.RemoveField(
model_name='order',
name='address',
),
migrations.RemoveField(
model_name='order',
name='email',
),
migrations.RemoveField(
model_name='order',
name='postal_code',
),
|
migrations.AddField(
model_name='order',
name='carrier',
field=models.CharField(default='Нова пошта', max_length=250, verbose_name='Перевізник'),
preserve_default=False,
),
migrations.AddField(
model_name='order',
name='phone_num',
field=models.CharField(default='(050) 123-45-67', max_length=20, verb
|
ose_name='Номер телефону'),
preserve_default=False,
),
migrations.AddField(
model_name='order',
name='user',
field=models.ForeignKey(default='1', on_delete=django.db.models.deletion.CASCADE, related_name='orders', to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='order',
name='warehouse_num',
field=models.PositiveIntegerField(default=1, verbose_name='Номер складу'),
preserve_default=False,
),
]
|
OmegaDroid/quokka
|
utils/templatetags/utils.py
|
Python
|
mit
| 767
| 0.005215
|
from django import
|
template
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter
def img_tag(obj, cls=""):
if hasattr(obj, "img"):
if obj.img:
return mark_safe("<img class='" + cls + "' src='" + obj.img.url + "'/>")
return mark_safe("<span class='glyphicon glyphicon-picture " + cls + "'></span>")
@register.filter
def concat(obj, other):
try:
return str(obj) + str(other)
except
|
:
return ""
@register.filter
def object_link(obj):
try:
return ("/" + type(obj).__name__ + "/" + str(obj.id) + "/").lower()
except:
return ""
@register.filter
def object_anchor(obj):
return mark_safe("<a href='" + object_link(obj) + "'>" + str(obj) + "</a>")
|
xapharius/mrEnsemble
|
Engine/src/jobs/validation_job.py
|
Python
|
mit
| 746
| 0.00134
|
'''
Created on Mar 19, 2014
@author: Simon
'''
from engine.engine_job import EngineJob
class ValidationJob(EngineJob):
'''
M/R job for validating a trained model.
'''
def mapper(self, key, values):
data_processor = self.get_data_processor()
data_processor.set_data(values)
data_processor.normalize_data(self.data_handler.get_statistics())
data_set = data_processor.get_data_set()
alg = self.get_trained_alg()
validator = self.get_validator()
|
yield 'validation', validator.validate(alg, data_set)
def reducer(self, key, values):
vals = list(values)
yield key, self.get_validator().aggregate(vals)
if __name__ == '__m
|
ain__':
ValidationJob.run()
|
t3dev/odoo
|
odoo/addons/test_testing_utilities/models.py
|
Python
|
gpl-3.0
| 6,753
| 0.002962
|
# -*- coding: utf-8 -*-
from __future__ import division
from odoo import api, fields, models
class A(models.Model):
_name = 'test_testing_utilities.a'
_description = 'Testing Utilities A'
f1 = fields.Char(required=True)
f2 = fields.Integer(default=42)
f3 = fields.Integer()
f4 = fields.Integer(compute='_compute_f4')
f5 = fields.Integer()
f6 = fields.Integer()
@api.onchange('f2')
def _on_change_f2(self):
self.f3 = int(self.f2 / 2)
self.f5 = self.f2
self.f6 = self.f2
@api.depends('f1', 'f2')
def _compute_f4(self):
for r in self:
r.f4 = r.f2 / (int(r.f1) or 1)
class B(models.Model):
_name = 'test_testing_utilities.readonly'
_description = 'Testing Utilities Readonly'
f1 = fields.Integer(default=1, readonly=True)
f2 = fields.Integer(compute='_compute_f2')
@api.depends('f1')
def _compute_f2(self):
for r in self:
r.f2 = 2 * r.f1
class C(models.Model):
_name = 'test_testing_utilities.c'
_description = 'Testing Utilities C'
name = fields.Char("name", required=True)
f2 = fields.Many2one('test_testing_utilities.m2o')
@api.onchange('f2')
def _on_change_f2(self):
self.name = self.f2.name
class M2O(models.Model):
_name = 'test_testing_utilities.m2o'
_description = 'Testing Utilities Many To One'
name = fields.Char(required=True)
class M2Onchange(models.Model):
_name = 'test_testing_utilities.d'
_description = 'Testing Utilities D'
# used to check that defaults & onchange to m2o work
f = fields.Many2one(
'test_testing_utilities.m2o',
required=True,
default=lambda self: self.env['test_testing_utilities.m2o'].search(
[], limit=1
)
)
f2 = fields.Char()
@api.onchange('f2')
def _on_change_f2(self):
self.f = self.env['test_testing_utilities.m2o'].search([
('name', 'ilike', self.f2),
], limit=1) if self.f2 else False
class M2MChange(models.Model):
_name = 'test_testing_utilities.e'
_description = 'Testing Utilities E'
m2m = fields.Many2many('test_testing_utilities.sub2')
count = fields.Integer(compute='_m2m_count')
@api.depends('m2m')
def _m2m_count(self):
for r in self:
r.count = len(r.m2m)
class M2MSub(models.Model):
_name = 'test_testing_utilities.sub2'
_description = 'Testing Utilities Subtraction 2'
name = fields.Char()
class M2MChange2(models.Model):
_name = 'test_testing_utilities.f'
_description = 'Testing Utilities F'
def _get_some(self):
r = self.env['test_testing_utilities.sub2'].search([], limit=2)
return r
m2m = fields.Many2many(
'test_testing_utilities.sub2',
default=_get_some,
)
m2o = fields.Many2one('test_testing_utilities.sub2')
@api.onchange('m2o')
def _on_change_m2o(self):
self.m2m = self.m2m | self.m2o
class M2MReadonly(models.Model):
_name = 'test_testing_utilities.g'
_description = 'Testing Utilities G'
m2m = fields.Many2many('test_testing_utilities.sub3', readonly=True)
class M2MSub3(models.Model):
_name = 'test_testing_utilities.sub3'
_description = 'Testing Utilities Subtraction 3'
name = fields.Char()
class O2MChange(models.Model):
_name = 'test_testing_utilities.parent'
_description = 'Testing Utilities Parent'
value = fields.Integer(default=1)
v = fields.Integer()
subs = fields.One2many('test_testing_utili
|
ties.sub', 'parent_id')
@api.onchange('val
|
ue', 'subs')
def _onchange_values(self):
self.v = self.value + sum(s.value for s in self.subs)
class O2MSub(models.Model):
_name = 'test_testing_utilities.sub'
_description = 'Testing Utilities Subtraction'
name = fields.Char(compute='_compute_name')
value = fields.Integer(default=2)
v = fields.Integer()
parent_id = fields.Many2one('test_testing_utilities.parent')
has_parent = fields.Boolean()
@api.onchange('value')
def _onchange_value(self):
self.v = self.value
@api.depends('v')
def _compute_name(self):
for r in self:
r.name = str(r.v)
@api.onchange('has_parent')
def _onchange_has_parent(self):
if self.has_parent:
self.value = self.parent_id.value
class O2MDefault(models.Model):
_name = 'test_testing_utilities.default'
_description = 'Testing Utilities Default'
def _default_subs(self):
return [
(0, 0, {'v': 5})
]
value = fields.Integer(default=1)
v = fields.Integer()
subs = fields.One2many('test_testing_utilities.sub3', 'parent_id', default=_default_subs)
class O2MSub3(models.Model):
_name = 'test_testing_utilities.sub3'
_description = 'Testing Utilities Subtraction 3'
name = fields.Char(compute='_compute_name')
value = fields.Integer(default=2)
v = fields.Integer(default=6)
parent_id = fields.Many2one('test_testing_utilities.default')
@api.onchange('value')
def _onchange_value(self):
self.v = self.value
@api.depends('v')
def _compute_name(self):
for r in self:
r.name = str(r.v)
class O2MOnchangeParent(models.Model):
_name = 'test_testing_utilities.onchange_parent'
_description = 'Testing Utilities Onchange Parent'
line_ids = fields.One2many('test_testing_utilities.onchange_line', 'parent')
@api.onchange('line_ids')
def _onchange_line_ids(self):
for line in self.line_ids.filtered(lambda l: l.flag):
self.env['test_testing_utilities.onchange_line'].new({'parent': self.id})
class M2OOnchangeLine(models.Model):
_name = 'test_testing_utilities.onchange_line'
_description = 'Testing Utilities Onchange Line'
parent = fields.Many2one('test_testing_utilities.onchange_parent')
dummy = fields.Float()
flag = fields.Boolean(store=False)
@api.onchange('dummy')
def _onchange_flag(self):
self.flag = True
class O2MChangeCount(models.Model):
_name = 'test_testing_utilities.onchange_count'
_description = _name
count = fields.Integer()
line_ids = fields.One2many('test_testing_utilities.onchange_count_sub', 'parent')
@api.onchange('count')
def _onchange_count(self):
Sub = self.env['test_testing_utilities.onchange_count_sub']
recs = Sub
for i in range(self.count):
recs |= Sub.new({'name': str(i)})
self.line_ids = recs
class O2MChangeSub(models.Model):
_name = 'test_testing_utilities.onchange_count_sub'
_description = _name
parent = fields.Many2one('test_testing_utilities.onchange_count')
name = fields.Char()
|
dcrankshaw/clipper
|
integration-tests/deploy_xgboost_models.py
|
Python
|
apache-2.0
| 4,355
| 0.000459
|
import os
import sys
if sys.version_info >= (3, 0):
sys.exit(0)
import requests
import json
import numpy as np
import time
import logging
import xgboost as xgb
cur_dir = os.path.dirname(os.path.abspath(__file__))
from test_utils import (create_docker_connection, BenchmarkException, headers,
log_clipper_state)
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.abspath("%s/../clipper_admin" % cur_dir))
from clipper_admin.deployers.python import deploy_python_closure
logging.basicCo
|
nfig(
format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%y-%m-%d:%H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
app_name = "xgboost-test"
model_name = "xgboost-model"
def deploy_and_test_model(clipper_conn,
|
model,
version,
predict_fn,
link_model=False):
deploy_python_closure(
clipper_conn,
model_name,
version,
"integers",
predict_fn,
pkgs_to_install=['xgboost'])
time.sleep(5)
if link_model:
clipper_conn.link_model_to_app(app_name, model_name)
time.sleep(5)
test_model(clipper_conn, app_name, version)
def test_model(clipper_conn, app, version):
time.sleep(25)
num_preds = 25
num_defaults = 0
addr = clipper_conn.get_query_addr()
for i in range(num_preds):
response = requests.post(
"http://%s/%s/predict" % (addr, app),
headers=headers,
data=json.dumps({
'input': get_test_point()
}))
result = response.json()
if response.status_code == requests.codes.ok and result["default"]:
num_defaults += 1
elif response.status_code != requests.codes.ok:
print(result)
raise BenchmarkException(response.text)
if num_defaults > 0:
print("Error: %d/%d predictions were default" % (num_defaults,
num_preds))
if num_defaults > num_preds / 2:
raise BenchmarkException("Error querying APP %s, MODEL %s:%d" %
(app, model_name, version))
def get_test_point():
return [np.random.randint(255) for _ in range(784)]
if __name__ == "__main__":
pos_label = 3
try:
clipper_conn = create_docker_connection(
cleanup=True, start_clipper=True)
try:
clipper_conn.register_application(app_name, "integers",
"default_pred", 100000)
time.sleep(1)
addr = clipper_conn.get_query_addr()
response = requests.post(
"http://%s/%s/predict" % (addr, app_name),
headers=headers,
data=json.dumps({
'input': get_test_point()
}))
result = response.json()
if response.status_code != requests.codes.ok:
print("Error: %s" % response.text)
raise BenchmarkException("Error creating app %s" % app_name)
version = 1
dtrain = xgb.DMatrix(get_test_point(), label=[0])
param = {
'max_depth': 2,
'eta': 1,
'silent': 1,
'objective': 'binary:logistic'
}
watchlist = [(dtrain, 'train')]
num_round = 2
bst = xgb.train(param, dtrain, num_round, watchlist)
def predict(xs):
return [str(bst.predict(xgb.DMatrix(xs)))]
deploy_and_test_model(
clipper_conn, bst, version, predict, link_model=True)
except BenchmarkException as e:
log_clipper_state(clipper_conn)
logger.exception("BenchmarkException")
clipper_conn = create_docker_connection(
cleanup=True, start_clipper=False)
sys.exit(1)
else:
clipper_conn = create_docker_connection(
cleanup=True, start_clipper=False)
except Exception as e:
logger.exception("Exception")
clipper_conn = create_docker_connection(
cleanup=True, start_clipper=False)
sys.exit(1)
|
piMoll/SEILAPLAN
|
lib/reportlab/graphics/charts/doughnut.py
|
Python
|
gpl-2.0
| 19,476
| 0.011861
|
#Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
#history https://hg.reportlab.com/hg-public/reportlab/log/tip/src/reportlab/graphics/charts/doughnut.py
# doughnut chart
__version__='3.3.0'
__doc__="""Doughnut chart
Produces a circular chart like the doughnut charts produced by Excel.
Can handle multiple series (which produce concentric 'rings' in the chart).
"""
import copy
from math import sin, cos, pi
from reportlab.lib import colors
from reportlab.lib.validators import isColor, isNumber, isListOfNumbersOrNone,\
isListOfNumbers, isColorOrNone, isString,\
isListOfStringsOrNone, OneOf, SequenceOf,\
isBoolean, isListOfColors,\
isNoneOrListOfNoneOrStrings,\
isNoneOrListOfNoneOrNumbers,\
isNumberOrNone, isListOfNoneOrNumber,\
isListOfListOfNoneOrNumber, EitherOr
from reportlab.lib.attrmap import *
from reportlab.pdfgen.canvas import Canvas
from reportlab.graphics.shapes import Group, Drawing, Line, Rect, Polygon, Ellipse, \
Wedge, String, SolidShape, UserNode, STATE_DEFAULTS
from reportlab.graphics.widgetbase import Widget, TypedPropertyCollection, PropHolder
from reportlab.graphics.charts.piecharts import AbstractPieChart, WedgeProperties, _addWedgeLabel, fixLabelOverlaps
from reportlab.graphics.charts.textlabels import Label
from reportlab.graphics.widgets.markers import Marker
from functools import reduce
from reportlab import xrange
class SectorProperties(WedgeProperties):
"""This holds descriptive information about the sectors in a doughnut chart.
It is not to be confused with the 'sector itself'; this just holds
a recipe for how to format one, and does not allow you to hack the
angles. It can format a genuine Sector object for you with its
format method.
"""
_attrMap = AttrMap(BASE=WedgeProperties,
)
class Doughnut(AbstractPieChart):
_attrMap = AttrMap(
x = AttrMapValue(isNumber, desc='X position of the chart within its container.'),
y = AttrMapValue(isNumber, desc='Y position of the chart within its container.'),
width = AttrMapValue(isNumber, desc='width of doughnut bounding box. Need not be same as width.'),
height = AttrMapValue(isNumber, desc='height of doughnut bounding box. Need not be same as height.'),
data = AttrMapValue(EitherOr((isListOfNoneOrNumber,isListOfListOfNoneOrNumber)), desc='list of numbers defining sector sizes; need not sum to 1'),
labels = AttrMapValue(isListOfStringsOrNone, desc="optional list of labels to use for each data point"),
startAngle = AttrMapValue(isNumber, desc="angle of first slice; like the compass, 0 is due North"),
direction = AttrMapValue(OneOf('clockwise', 'anticlockwise'), desc="'clockwise' or 'anticlockwise'")
|
,
slices = AttrMapValue(None, desc="collection of sector descriptor objects"),
simpleLabels = AttrMapValue(isBoolean, desc="If true(default) use String not super duper WedgeLabel"),
# advanced usage
checkLabelOverlap = AttrMapValue(isBoolean, desc="If true check and attempt
|
to fix\n standard label overlaps(default off)",advancedUsage=1),
sideLabels = AttrMapValue(isBoolean, desc="If true attempt to make chart with labels along side and pointers", advancedUsage=1),
innerRadiusFraction = AttrMapValue(isNumberOrNone,
desc='None or the fraction of the radius to be used as the inner hole.\nIf not a suitable default will be used.'),
)
def __init__(self):
self.x = 0
self.y = 0
self.width = 100
self.height = 100
self.data = [1,1]
self.labels = None # or list of strings
self.startAngle = 90
self.direction = "clockwise"
self.simpleLabels = 1
self.checkLabelOverlap = 0
self.sideLabels = 0
self.innerRadiusFraction = None
self.slices = TypedPropertyCollection(SectorProperties)
self.slices[0].fillColor = colors.darkcyan
self.slices[1].fillColor = colors.blueviolet
self.slices[2].fillColor = colors.blue
self.slices[3].fillColor = colors.cyan
self.slices[4].fillColor = colors.pink
self.slices[5].fillColor = colors.magenta
self.slices[6].fillColor = colors.yellow
def demo(self):
d = Drawing(200, 100)
dn = Doughnut()
dn.x = 50
dn.y = 10
dn.width = 100
dn.height = 80
dn.data = [10,20,30,40,50,60]
dn.labels = ['a','b','c','d','e','f']
dn.slices.strokeWidth=0.5
dn.slices[3].popout = 10
dn.slices[3].strokeWidth = 2
dn.slices[3].strokeDashArray = [2,2]
dn.slices[3].labelRadius = 1.75
dn.slices[3].fontColor = colors.red
dn.slices[0].fillColor = colors.darkcyan
dn.slices[1].fillColor = colors.blueviolet
dn.slices[2].fillColor = colors.blue
dn.slices[3].fillColor = colors.cyan
dn.slices[4].fillColor = colors.aquamarine
dn.slices[5].fillColor = colors.cadetblue
dn.slices[6].fillColor = colors.lightcoral
d.add(dn)
return d
def normalizeData(self, data=None):
from operator import add
sum = float(reduce(add,data,0))
return abs(sum)>=1e-8 and list(map(lambda x,f=360./sum: f*x, data)) or len(data)*[0]
def makeSectors(self):
# normalize slice data
data = self.data
multi = isListOfListOfNoneOrNumber(data)
if multi:
#it's a nested list, more than one sequence
normData = []
n = []
for l in data:
t = self.normalizeData(l)
normData.append(t)
n.append(len(t))
self._seriesCount = max(n)
else:
normData = self.normalizeData(data)
n = len(normData)
self._seriesCount = n
#labels
checkLabelOverlap = self.checkLabelOverlap
L = []
L_add = L.append
labels = self.labels
if labels is None:
labels = []
if not multi:
labels = [''] * n
else:
for m in n:
labels = list(labels) + [''] * m
else:
#there's no point in raising errors for less than enough labels if
#we silently create all for the extreme case of no labels.
if not multi:
i = n-len(labels)
if i>0:
labels = list(labels) + [''] * i
else:
tlab = 0
for m in n:
tlab += m
i = tlab-len(labels)
if i>0:
labels = list(labels) + [''] * i
self.labels = labels
xradius = self.width/2.0
yradius = self.height/2.0
centerx = self.x + xradius
centery = self.y + yradius
if self.direction == "anticlockwise":
whichWay = 1
else:
whichWay = -1
g = Group()
startAngle = self.startAngle #% 360
styleCount = len(self.slices)
irf = self.innerRadiusFraction
if multi:
#multi-series doughnut
ndata = len(data)
if irf is None:
yir = (yradius/2.5)/ndata
xir = (xradius/2.5)/ndata
else:
yir = yradius*irf
xir = xradius*irf
ydr = (yradius-yir)/ndata
xdr = (xradius-xir)/ndata
for sn,series in enumerate(normData):
for i,angle in enumerate(series):
endAngle = (startAngle + (angle * whichWay)) #% 360
aa = abs(startAngle-endAngle)
if aa<1e-5:
startAngle = endAngle
continue
if startAng
|
r-alex-hall/fontDevTools
|
scripts/imgAndVideo/color_growth.py
|
Python
|
gpl-3.0
| 45,584
| 0.005484
|
# DESCRIPTION
# Renders a PNG image li
|
ke bacteria that mutate color as they spread. TRY IT. The output is awesome.
# DEPENDENCIES
# python 3 with numpy, queue, and pyimage modules installed (and others--see the import statements).
# USAGE
# Run this script through a Python interpreter without any parameters, and it will use a default set of parameters:
# python /path/to_this_script/color_growth.py
# To see available parameters, run this script with the --help switch:
# python /pat
|
h/to_this_script/ --help
# NOTES
# - GitHub user `scribblemaniac` sped up this script (with a submitted pull request) by orders of magnitute vs. an earlier version of the script. An image that took seven minutes to render took 5 seconds after speedup.
# - Output file names are based on the date and time and random characters. Inspired and drastically evolved from `color_fibers.py`, which was horked and adapted from:
#
# https://scipython.com/blog/computer-generated-contemporary-art
#
# KNOWN ISSUES
# See help for `--RANDOM_SEED`.
# CODE
# TO DO
# - figure out whether I broke RND continuity? It would seem the same presets are no longer producing the same results?
# - isolate what situation didn't create a new preset / anim folder when I expected it to, and fix that (or document in help).
# - make naming convention of variables consistent? I think I'm all over the place with this . . . :p
# - possibly things in the color_growth_v1.py's TO DO list.
# - determine whether any code in the fast fork (now this script) is leftover from color_growth_v1.py, and delete them?
# - make it properly use negative or > 8 growth-clip values again? Since the color_growth_fast.py fork it isn't.
# VERSION HISTORY
# v2.8.7:
# Edit speedup credit comment.
# START IMPORTS AND GLOBALS
ColorGrowthPyVersionString = 'v2.8.7'
import datetime
import random
import argparse
import ast
import os.path
import sys
import re
import queue
from more_itertools import unique_everseen
import platform
# I'm also using another psuedorandom number generator built into numpy as np:
import numpy as np
from PIL import Image
# Defaults which will be overridden if arguments of the same name are provided to the script:
WIDTH = 600
HEIGHT = 300
RSHIFT = 8
STOP_AT_PERCENT = 1
SAVE_EVERY_N = 0
RAMP_UP_SAVE_EVERY_N = False
START_COORDS_RANGE = (1,3)
GROWTH_CLIP = (0,5)
SAVE_PRESET = True
animationFrameCounter = 0
renderedFrameCounter = 0
saveNextFrameNumber = 0
imageFrameFileName = ''
padFileNameNumbersDigitsWidth = 0
# SOME BACKGROUND COLOR options;
# any of these (uncomment only one) are made into a list later by ast.literal_eval(BG_COLOR) :
# BG_COLOR = "[157,140,157]" # Medium purplish gray
BG_COLOR = "[252,251,201]" # Buttery light yellow
# BG_COLOR = "[255,63,52]" # Scarlet-scarlet-orange
RECLAIM_ORPHANS = True
BORDER_BLEND = True
TILEABLE = False
SCRIPT_ARGS_STR = ''
# END GLOBALS
# START OPTIONS (which affect globals)
# allows me to have a version string parser option that prints
# and exits; re: https://stackoverflow.com/a/41575802/1397555
class versionStringPrintAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
print('color_growth.py', ColorGrowthPyVersionString)
parser.exit()
PARSER = argparse.ArgumentParser(description=
'Renders a PNG image like bacteria that produce random color mutations \
as they grow over a surface. Output file names are named after the date \
and time. Inspired by and drastically evolved from colorFibers.py, which \
was horked and adapted from \
https://scipython.com/blog/computer-generated-contemporary-art/ \
NOTE: CLI options have had breaking changes over time. If reusing settings \
from a previous version, check those settings first if you get errors. \
NOTE: by default the --RAMP_UP_SAVE_EVERY_N switch has a False value, but \
you probably want it True if you save animation frames (--SAVE_EVERY_N).'
)
PARSER.register('action', 'versionStringPrint', versionStringPrintAction)
PARSER.add_argument('-v', '--VERSION', nargs=0, action='versionStringPrint', help='Print version number and exit.')
PARSER.add_argument('--WIDTH', type=int, help=
'WIDTH of output image(s). Default ' + str(WIDTH) + '.')
PARSER.add_argument('--HEIGHT', type=int, help=
'HEIGHT of output image(s). Default ' + str(HEIGHT) + '.')
PARSER.add_argument('-r', '--RSHIFT', type=int, help=
'Vary R, G and B channel values randomly in the range negative this \
value or positive this value. Note that this means the range is RSHIFT \
times two. Defaut ' + str(RSHIFT) + '.'
)
PARSER.add_argument('-b', '--BG_COLOR', type=str, help=
'Canvas color. Expressed as a python list or single number that will be \
assigned to every value in an RGB triplet. If a list, give the RGB \
values in the format \'[255,70,70]\' (if you add spaces after the \
commas, you must surround the parameter in single or double quotes). \
This example would produce a deep red, as Red = 255, Green = 70, Blue = \
70). A single number example like just 150 will result in a medium-light \
gray of [150,150,150] (Red = 150, Green = 150, Blue = 150). All values \
must be between 0 and 255. Default ' + str(BG_COLOR) + '.'
)
PARSER.add_argument('-c', '--COLOR_MUTATION_BASE', type=str, help=
'Base initialization color for pixels, which randomly mutates as \
painting proceeds. If omitted, defaults to whatever BG_COLOR is. If \
included, may differ from BG_COLOR. This option must be given in the \
same format as BG_COLOR. You may make the base initialization color of \
each origin random by specifying "--COLOR_MUTATION_BASE random".'
)
PARSER.add_argument('--BORDER_BLEND', type=str, help=
'If this is enabled, the hard edges between different colonies will be \
blended together. Enabled by default. To disable pass \
--BORDER_BLEND False or --BORDER_BLEND 0.'
)
PARSER.add_argument('--TILEABLE', type=str, help=
'Make the generated image seamlessly tile. Colonies will wrap around \
the edge when they encounter it. Disabled by default. Enable with \
--TILEABLE True or --TILEABLE 1.'
)
PARSER.add_argument('--STOP_AT_PERCENT', type=float, help=
'What percent canvas fill to stop painting at. To paint until the canvas \
is filled (which can take extremely long for higher resolutions), pass 1 \
(for 100 percent). If not 1, value should be a percent expressed as a \
decimal (float) between 0 and 1 (e.g 0.4 for 40 percent. Default ' + \
str(STOP_AT_PERCENT) + '. For high --failedMutationsThreshold or random \
walk (neither of which is implemented at this writing), 0.475 (around 48 \
percent) is recommended. Stop percent is adhered to approximately (it \
could be much less efficient to make it exact).'
)
PARSER.add_argument('-a', '--SAVE_EVERY_N', type=int, help=
'Every N successful coordinate and color mutations, save an animation \
frame into a subfolder named after the intended final art file. To save \
every frame, set this to 1, or to save every 3rd frame set it to 3, etc. \
Saves zero-padded numbered frames to a subfolder which may be strung \
together into an animation of the entire painting process (for example \
via ffmpegAnim.sh). May substantially slow down render, and can also \
create many, many gigabytes of data, depending. ' + str(SAVE_EVERY_N) + \
' by default. To disable, set it to 0 with: -a 0 OR: --SAVE_EVERY_N 0. \
NOTE: If this is nonzero and you do not set --RAMP_UP_SAVE_EVERY_N to \
either True or False (see), the default --RAMP_UP_SAVE_EVERY_N False \
will override to True, as it is strongly suggested you want that if \
you render an animation. If that is not what you want, manually set \
--RAMP_UP_SAVE_EVERY_N False.'
)
PARSER.add_argument('--RAMP_UP_SAVE_EVERY_N', type=str, help=
'Increase the value of --SAVE_EVERY_N over time. Without this, the \
animation may seem to slow toward the middle and end, because the \
interval --SAVE_EVERY_N is constant; the same number of new mutated \
coordinates is spread over a wider area every save frame. \
--RAMP_UP_SAVE_EVERY_N causes the value of --SAVE_EVERY_N to increase \
over time, like dragging the corner of a selection rectangle to increase \
rendered area over the whole canvas. The result is an app
|
OpenCMISS-Dependencies/slepc
|
config/packages/petsc.py
|
Python
|
lgpl-3.0
| 6,234
| 0.022137
|
#
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
-
# SLEPc - Scalable Library for Eigenvalue Problem Computations
# Copyrigh
|
t (c) 2002-2015, Universitat Politecnica de Valencia, Spain
#
# This file is part of SLEPc.
#
# SLEPc is free software: you can redistribute it and/or modify it under the
# terms of version 3 of the GNU Lesser General Public License as published by
# the Free Software Foundation.
#
# SLEPc is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with SLEPc. If not, see <http://www.gnu.org/licenses/>.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
import package, os, sys, commands
class PETSc(package.Package):
def __init__(self,argdb,log):
self.packagename = 'petsc'
self.downloadable = False
self.log = log
def Check(self):
self.havepackage = self.Link([],[],[])
def InitDir(self):
if 'PETSC_DIR' in os.environ:
self.dir = os.environ['PETSC_DIR']
if not os.path.exists(self.dir):
sys.exit('ERROR: PETSC_DIR enviroment variable is not valid')
else:
if slepc.prefixdir:
self.dir = slepc.prefixdir
os.environ['PETSC_DIR'] = self.dir
else:
sys.exit('ERROR: PETSC_DIR enviroment variable is not set')
def LoadVersion(self):
try:
f = open(os.path.join(self.dir,'include','petscversion.h'))
for l in f.readlines():
l = l.split()
if len(l) == 3:
if l[1] == 'PETSC_VERSION_RELEASE':
self.release = l[2]
if l[1] == 'PETSC_VERSION_MAJOR':
major = l[2]
elif l[1] == 'PETSC_VERSION_MINOR':
minor = l[2]
elif l[1] == 'PETSC_VERSION_SUBMINOR':
subminor = l[2]
elif l[1] == 'PETSC_VERSION_PATCH':
patchlevel = l[2]
f.close()
self.version = major + '.' + minor
self.lversion = major + '.' + minor + '.' + subminor
except:
self.log.Exit('ERROR: File error while reading PETSc version')
# Check whether this is a working copy of the repository
self.isrepo = False
if os.path.exists(os.path.join(self.dir,'.git')):
(status, output) = commands.getstatusoutput('cd '+self.dir+';git rev-parse')
if not status:
self.isrepo = True
(status, self.gitrev) = commands.getstatusoutput('cd '+self.dir+';git log -1 --pretty=format:%H')
(status, self.gitdate) = commands.getstatusoutput('cd '+self.dir+';git log -1 --pretty=format:%ci')
(status, self.branch) = commands.getstatusoutput('cd '+self.dir+';git describe --contains --all HEAD')
def LoadConf(self):
if 'PETSC_ARCH' in os.environ and os.environ['PETSC_ARCH']:
self.isinstall = False
self.arch = os.environ['PETSC_ARCH']
petscvariables = os.path.join(self.dir,self.arch,'lib','petsc','conf','petscvariables')
petscconf_h = os.path.join(self.dir,self.arch,'include','petscconf.h')
else:
self.isinstall = True
petscvariables = os.path.join(self.dir,'lib','petsc','conf','petscvariables')
petscconf_h = os.path.join(self.dir,'include','petscconf.h')
self.build_using_cmake = 0
self.make_is_gnumake = 0
self.language = 'c'
self.bfort = 'nobfortinpetsc'
try:
f = open(petscvariables)
for l in f.readlines():
r = l.split('=',1)
if len(r)!=2: continue
k = r[0].strip()
v = r[1].strip()
if k == 'PETSC_SCALAR':
self.scalar = v
elif k == 'PETSC_PRECISION':
self.precision = v
elif k == 'MAKE':
self.make = v
elif k == 'DESTDIR':
self.destdir = v
elif k == 'BFORT':
self.bfort = v
elif k == 'TEST_RUNS':
self.test_runs = v
elif k == 'CC':
self.cc = v
elif k == 'CC_FLAGS':
self.cc_flags = v
elif k == 'FC' and not v=='':
self.fc = v
elif k == 'AR':
self.ar = v
elif k == 'AR_FLAGS':
self.ar_flags = v
elif k == 'AR_LIB_SUFFIX':
self.ar_lib_suffix = v
elif k == 'CC_LINKER_SLFLAG':
self.slflag = v
elif k == 'RANLIB':
self.ranlib = v
elif k == 'PETSC_BUILD_USING_CMAKE':
self.build_using_cmake = v
elif k == 'MAKE_IS_GNUMAKE':
self.make_is_gnumake = v
elif k == 'PETSC_LANGUAGE' and v=='CXXONLY':
self.language = 'c++'
f.close()
except:
self.log.Exit('ERROR: cannot process file ' + petscvariables)
self.ind64 = False
self.mpiuni = False
self.debug = False
self.singlelib = False
try:
f = open(petscconf_h)
for l in f.readlines():
l = l.split()
if len(l)==3 and l[0]=='#define' and l[1]=='PETSC_USE_64BIT_INDICES' and l[2]=='1':
self.ind64 = True
elif len(l)==3 and l[0]=='#define' and l[1]=='PETSC_HAVE_MPIUNI' and l[2]=='1':
self.mpiuni = True
elif len(l)==3 and l[0]=='#define' and l[1]=='PETSC_USE_DEBUG' and l[2]=='1':
self.debug = True
elif len(l)==3 and l[0]=='#define' and l[1]=='PETSC_USE_SINGLE_LIBRARY' and l[2]=='1':
self.singlelib = True
elif self.isinstall and len(l)==3 and l[0]=='#define' and l[1]=='PETSC_ARCH':
self.arch = l[2].strip('"')
f.close()
except:
if self.isinstall:
self.log.Exit('ERROR: cannot process file ' + petscconf_h + ', maybe you forgot to set PETSC_ARCH')
else:
self.log.Exit('ERROR: cannot process file ' + petscconf_h)
# empty PETSC_ARCH, guess an arch name
if self.isinstall and not self.arch:
self.arch = 'arch-' + sys.platform.replace('cygwin','mswin')+ '-' + self.language
if self.debug:
self.arch += '-debug'
else:
self.arch += '-opt'
if not 'real' in self.scalar:
self.arch += '-' + self.scalar
|
ghwatson/SpanishAcquisitionIQC
|
spacq/gui/display/table/generic.py
|
Python
|
bsd-2-clause
| 4,604
| 0.03258
|
from numpy import array, compress, zeros
import wx
from wx.lib.mixins.listctrl import ListCtrlAutoWidthMixin
from spacq.interface.list_columns import ListParser
"""
Embeddable, generic, virtual, tabular display.
"""
class VirtualListCtrl(wx.ListCtrl, ListCtrlAutoWidthMixin):
"""
A generic virtual list.
"""
max_value_len = 250 # Characters.
@staticmethod
def find_type(value):
"""
Determine the type of a column based on a single value.
The type is one of: scalar, list, string.
"""
try:
float(value)
except ValueError:
pass
else:
return 'scalar'
try:
ListParser()(value)
except ValueError:
pass
else:
return 'list'
return 'string'
def __init__(self, parent, *args, **kwargs):
wx.ListCtrl.__init__(self, parent,
style=wx.LC_REPORT|wx.LC_VIRTUAL|wx.LC_HRULES|wx.LC_VRULES,
*args, **kwargs)
ListCtrlAutoWidthMixin.__init__(self)
self.reset()
def reset(self):
self.headings = []
self.data = array([])
self.filtered_data = None
self.display_data = array([])
self.types = []
def refresh_with_values(self, data):
self.ItemCount = len(data)
if self.ItemCount > 0:
self.display_data = zeros(data.shape, dtype='|S{0}'.format(self.max_value_len))
for i, _ in enumerate(self.headings):
# Truncate for display.
s
|
elf.display_data[:,i] = [str(x)[:self.max_value_len] for x in data[:,i]]
self.Refresh()
def apply_filter(self, f, afresh=False):
"""
Set the data to be the
|
old data, along with the application of a filter.
f is a function of two parameters: the index of the row and the row itself.
f must return True if the row is to be kept and False otherwise.
If afresh is True, all old filtered data is discarded.
Otherwise, a new filter can be quickly applied.
"""
if afresh:
self.filtered_data = None
if self.filtered_data is not None:
original_set = self.filtered_data
else:
original_set = self.data
self.filtered_data = compress([f(i, x) for i, x in enumerate(original_set)], original_set, axis=0)
self.refresh_with_values(self.filtered_data)
def GetValue(self, types=None):
# Get all types by default.
if types is None:
types = set(self.types)
else:
types = set(types)
# Find column indices of the correct type.
idxs = [i for i, t in enumerate(self.types) if t in types]
if self.filtered_data is not None:
data = self.filtered_data
else:
data = self.data
return ([self.headings[i] for i in idxs], data[:,idxs], [self.types[i] for i in idxs])
def SetValue(self, headings, data):
"""
headings: A list of strings.
data: A 2D NumPy array.
"""
self.ClearAll()
self.reset()
self.headings = headings
self.data = data
self.refresh_with_values(self.data)
if self.ItemCount > 0:
width, height = self.GetSize()
# Give some room for the scrollbar.
col_width = (width - 50) / len(self.headings)
for i, heading in enumerate(self.headings):
self.InsertColumn(i, heading, width=col_width)
type = self.find_type(data[0,i])
self.types.append(type)
def OnGetItemText(self, item, col):
"""
Return cell value for LC_VIRTUAL.
"""
return self.display_data[item,col]
class TabularDisplayPanel(wx.Panel):
"""
A panel to display arbitrary tabular data.
"""
def __init__(self, parent, *args, **kwargs):
wx.Panel.__init__(self, parent, *args, **kwargs)
# Panel.
panel_box = wx.BoxSizer(wx.VERTICAL)
## Table.
self.table = VirtualListCtrl(self)
panel_box.Add(self.table, proportion=1, flag=wx.EXPAND)
self.SetSizer(panel_box)
def __len__(self):
return self.table.ItemCount
def from_csv_data(self, has_header, values):
"""
Import the given CSV data into the table.
If has_header is True, the first row is treated specially.
"""
if has_header:
headers, rows = values[0], array(values[1:])
else:
headers, rows = [''] * len(values[0]), array(values)
# Ensure that all columns have a header.
for i, header in enumerate(headers):
if not header:
headers[i] = 'Column {0}'.format(i + 1)
self.SetValue(headers, rows)
def GetValue(self, *args, **kwargs):
return self.table.GetValue(*args, **kwargs)
def SetValue(self, headings, values):
self.table.SetValue(headings, values)
class TabularDisplayFrame(wx.Frame):
def __init__(self, parent, *args, **kwargs):
wx.Frame.__init__(self, parent, *args, **kwargs)
# Frame.
frame_box = wx.BoxSizer(wx.VERTICAL)
## Display panel.
self.display_panel = TabularDisplayPanel(self)
frame_box.Add(self.display_panel, proportion=1, flag=wx.EXPAND)
self.SetSizer(frame_box)
|
daboross/screeps-warreport
|
warreport/constants.py
|
Python
|
mit
| 280
| 0
|
ranged_attacker = "ranged attacker"
|
melee_attacker = "melee attacker"
healer = 'healer'
dismantling_attacker = 'dismantler'
general_attacker = 'general attacker'
tough_attacker = 'tough guy'
work_and_carry_attacker = 'multi-purpose attacker'
civilian = 'civilian'
scout =
|
'scout'
|
Raag079/self-driving-car
|
Term01-Computer-Vision-and-Deep-Learning/Labs/05-CarND-Alexnet-Feature-Extraction/feature_extraction.py
|
Python
|
mit
| 1,499
| 0.002668
|
import time
import tensorflow as tf
import numpy as np
import pandas as pd
from scipy.misc import imread
from alexnet import AlexNet
sign_names = pd.read_csv('signnames.csv')
nb_classes = 43
x = tf.placeholder(tf.float32, (None, 32, 32, 3))
resized = tf.image.resize_images(x, (227, 227))
# NOTE: By setting `feature_extract` to `True` we return
# the second to last layer.
fc7 = AlexNet(resized, feature_extract=True)
# TODO: Define a new fully connected layer followed by a softmax activation to classify
# the traffic signs. Assign the result of the softmax activation to `probs
|
` below.
shape = (fc7.get_shape
|
().as_list()[-1], nb_classes) # use this shape for the weight matrix
fc8W = tf.Variable(tf.truncated_normal(shape, stddev=1e-2))
fc8b = tf.Variable(tf.zeros(nb_classes))
logits = tf.nn.xw_plus_b(fc7, fc8W, fc8b)
probs = tf.nn.softmax(logits)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
# Read Images
im1 = imread("construction.jpg").astype(np.float32)
im1 = im1 - np.mean(im1)
im2 = imread("stop.jpg").astype(np.float32)
im2 = im2 - np.mean(im2)
# Run Inference
t = time.time()
output = sess.run(probs, feed_dict={x: [im1, im2]})
# Print Output
for input_im_ind in range(output.shape[0]):
inds = np.argsort(output)[input_im_ind, :]
print("Image", input_im_ind)
for i in range(5):
print("%s: %.3f" % (sign_names.ix[inds[-1 - i]][1], output[input_im_ind, inds[-1 - i]]))
print()
print("Time: %.3f seconds" % (time.time() - t))
|
svinota/pyrouted
|
pyrouted/api.py
|
Python
|
gpl-2.0
| 2,803
| 0
|
import json
import bottle
from pyrouted.util import make_spec
def route(method, path):
def decorator(f):
f.http_route = path
f.http_method = method
return f
return decorator
class APIv1(object):
prefix = '/v1'
def __init__(self, ndb, config):
self.ndb = ndb
self.config = config
@route('GET', '/sources')
def sources_list(self, mode='short'):
ret = {}
mode = bottle.request.query.mode or mode
for name, spec in self.ndb.sources.items():
ret[name] = {'class': spec.nl.__class__.__name__,
'status': spec.status}
if mode == 'full':
ret[name]['config'] = spec.nl_kwarg
return bottle.template('{{!ret}}', ret=json.dumps(ret))
@route('PUT', '/sources')
def sources_restart(self):
node = bottle.request.body.getvalue().decode('utf-8')
self.ndb.sources[node].start()
@route('POST', '/sources')
def sources_add(self):
data = bottle.request.body.getvalue().decode('utf-8')
node, spec = make_spec(data, self.config)
self.config['sources'].append(node)
self.ndb.connect_source(node, spec)
@route('DELETE', '/sources')
def sources_del(self):
node = bottle.request.body.getvalue().decode('utf-8')
self.config['sources'].remove(node)
self.ndb.disconnect_source(node)
@route('GET', '/config')
def config_get(self):
return bottle.template('{{!ret}}',
ret=json.dumps(self.config))
@route('PUT', '/config')
def config_dump(self):
path = bottle.request.body.getvalue().decode('utf-8')
self.config.dump(path)
@route('GET', '/<name:re:(%s|%s|%s|%s|%s|%s)>' % ('interfaces',
'addresses',
|
'routes',
'neighbours',
|
'vlans',
'bridges'))
def view(self, name):
ret = []
obj = getattr(self.ndb, name)
for line in obj.dump():
ret.append(line)
return bottle.template('{{!ret}}', ret=json.dumps(ret))
@route('GET', '/query/<name:re:(%s|%s|%s|%s)>' % ('nodes',
'p2p_edges',
'l2_edges',
'l3_edges'))
def query(self, name):
ret = []
obj = getattr(self.ndb.query, name)
for line in obj():
ret.append(line)
return bottle.template('{{!ret}}', ret=json.dumps(ret))
|
Statoil/libecl
|
python/tests/legacy_tests/test_test.py
|
Python
|
gpl-3.0
| 358
| 0.002793
|
from ert.test imp
|
ort TestRun
from ert.test i
|
mport path_exists
from ert.test import SourceEnumerator
from ert.test import TestArea , TestAreaContext
from ert.test import ErtTestRunner
from ert.test import PathContext
from ert.test import LintTestCase
from ert.test import ImportTestCase
from tests import EclTest
class ErtLegacyTestTest(EclTest):
pass
|
vpodzime/lvm-dubstep
|
lvmdbus/automatedproperties.py
|
Python
|
gpl-3.0
| 6,035
| 0
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 20
|
15, Tony Asleson <[email protected]>
import dbus
import cfg
from utils import get
|
_properties, add_properties, get_object_property_diff
from state import State
# noinspection PyPep8Naming
class AutomatedProperties(dbus.service.Object):
"""
This class implements the needed interfaces for:
org.freedesktop.DBus.Properties
Other classes inherit from it to get the same behavior
"""
DBUS_INTERFACE = ''
def __init__(self, object_path, interface, search_method=None):
dbus.service.Object.__init__(self, cfg.bus, object_path)
self._ap_interface = interface
self._ap_o_path = object_path
self._ap_search_method = search_method
self.state = None
def dbus_object_path(self):
return self._ap_o_path
def emit_data(self):
props = {}
for i in self.interface():
props[i] = self.GetAll(i)
return self._ap_o_path, props
def interface(self, all_interfaces=False):
return [self._ap_interface]
# Properties
# noinspection PyUnusedLocal
@dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE,
in_signature='ss', out_signature='v')
def Get(self, interface_name, property_name):
value = getattr(self, property_name)
# Note: If we get an exception in this handler we won't know about it,
# only the side effect of no returned value!
print 'Get (%s), type (%s), value(%s)' % \
(property_name, str(type(value)), str(value))
return value
@dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE,
in_signature='s', out_signature='a{sv}')
def GetAll(self, interface_name):
if interface_name in self.interface():
# Using introspection, lets build this dynamically
return get_properties(self, interface_name)[1]
raise dbus.exceptions.DBusException(
self._ap_interface,
'The object %s does not implement the %s interface'
% (self.__class__, interface_name))
@dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE,
in_signature='ssv')
def Set(self, interface_name, property_name, new_value):
setattr(self, property_name, new_value)
self.PropertiesChanged(interface_name,
{property_name: new_value}, [])
# As dbus-python does not support introspection for properties we will
# get the autogenerated xml and then add our wanted properties to it.
@dbus.service.method(dbus_interface=dbus.INTROSPECTABLE_IFACE,
out_signature='s')
def Introspect(self):
r = dbus.service.Object.Introspect(self, self._ap_o_path, cfg.bus)
# Look at the properties in the class
return add_properties(r, self._ap_interface, get_properties(self)[0])
@dbus.service.signal(dbus_interface=dbus.PROPERTIES_IFACE,
signature='sa{sv}as')
def PropertiesChanged(self, interface_name, changed_properties,
invalidated_properties):
print('SIGNAL: PropertiesChanged(%s, %s, %s, %s)' %
(str(self._ap_o_path), str(interface_name),
str(changed_properties), str(invalidated_properties)))
def refresh(self, search_key=None, object_state=None):
"""
Take the values (properties) of an object and update them with what
lvm currently has. You can either fetch the new ones or supply the
new state to be updated with
:param search_key: The value to use to search for
:param object_state: Use this as the new object state
"""
num_changed = 0
# If we can't do a lookup, bail now, this happens if we blindly walk
# through all dbus objects as some don't have a search method, like
# 'Manager' object.
if not self._ap_search_method:
return
search = self.lvm_id
if search_key:
search = search_key
# Either we have the new object state or we need to go fetch it
if object_state:
new_state = object_state
else:
new_state = self._ap_search_method([search])[0]
assert isinstance(new_state, State)
assert new_state
# When we refresh an object the object identifiers might have changed
# because LVM allows the user to change them (name & uuid), thus if
# they have changed we need to update the object manager so that
# look-ups will happen correctly
old_id = self.state.identifiers()
new_id = new_state.identifiers()
if old_id[0] != new_id[0] or old_id[1] != new_id[1]:
cfg.om.lookup_update(self)
# Grab the properties values, then replace the state of the object
# and retrieve the new values
# TODO: We need to add locking to prevent concurrent access to the
# properties so that a client is not accessing while we are
# replacing.
o_prop = get_properties(self)[1]
self.state = new_state
n_prop = get_properties(self)[1]
changed = get_object_property_diff(o_prop, n_prop)
if changed:
self.PropertiesChanged(self._ap_interface, changed, [])
num_changed += 1
return num_changed
|
asydorchuk/robotics
|
python/robotics/robots/factory.py
|
Python
|
mit
| 1,253
| 0.000798
|
from RPi import GPIO as gpio
from robotics.actors.redbot_motor_actor import RedbotMotorActor
from robotics.interfaces.spi.mcp3008_spi_interface import MCP3008SpiInterface
from robotics.robots.aizek_robot import AizekRobot
from robotics.sensors.redbot_wheel_encoder_sensor import RedbotWheelEncoderSensor
from robotics.sensors.sharp_ir_distance_sensor import SharpIrDistanceSensor
class RobotFactory(object):
@staticmethod
def createAizekRobot():
gpio.setmode(gpio.BOARD)
lmotor = RedbotMotorActor(gpio, 8, 10, 12)
rmotor = RedbotMotorActor(gpio, 11, 13, 15)
spi = MCP3008SpiInterface(0)
wencoder = RedbotWheelEncoderSensor(spi)
lsensor = SharpIrDistanceSensor(spi, 5)
fsensor = SharpIrDistanceSensor(spi, 4)
|
rsensor = SharpIrDistanceSensor(spi, 3)
wheel_radius = 0.032
wheel_distance = 0.1
robot = AizekRobot(
left_motor=lmotor,
right_motor=rmotor,
wheel_encoder=wenc
|
oder,
left_distance_sensor=lsensor,
front_distance_sensor=fsensor,
right_distance_sensor=rsensor,
wheel_radius=wheel_radius,
wheel_distance=wheel_distance,
)
return robot
|
tynn/numpy
|
numpy/polynomial/tests/test_chebyshev.py
|
Python
|
bsd-3-clause
| 20,348
| 0.000934
|
"""Tests for chebyshev module.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.chebyshev as cheb
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
)
def trim(x):
return cheb.chebtrim(x, tol=1e-6)
T0 = [1]
T1 = [0, 1]
T2 = [-1, 0, 2]
T3 = [0, -3, 0, 4]
T4 = [1, 0, -8, 0, 8]
T5 = [0, 5, 0, -20, 0, 16]
T6 = [-1, 0, 18, 0, -48, 0, 32]
T7 = [0, -7, 0, 56, 0, -112, 0, 64]
T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128]
T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256]
Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]
class TestPrivate(object):
def test__cseries_to_zseries(self):
for i in range(5):
inp = np.array([2] + [1]*i, np.double)
tgt = np.array([.5]*i + [2] + [.5]*i, np.double)
res = cheb._cseries_to_zseries(inp)
assert_equal(res, tgt)
def test__zseries_to_cseries(self):
for i in range(5):
inp = np.array([.5]*i + [2] + [.5]*i, np.double)
tgt = np.array([2] + [1]*i, np.double)
res = cheb._zseries_to_cseries(inp)
assert_equal(res, tgt)
class TestConstants(object):
def test_chebdomain(self):
assert_equal(cheb.chebdomain, [-1, 1])
def test_chebzero(self):
assert_equal(cheb.chebzero, [0])
def test_chebone(self):
assert_equal(cheb.chebone, [1])
def test_chebx(self):
assert_equal(cheb.chebx, [0, 1])
class TestArithmetic(object):
def test_chebadd(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = cheb.chebadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebsub(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = cheb.chebsub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebmulx(self):
assert_equal(cheb.chebmulx([0]), [0])
assert_equal(cheb.chebmulx([1]), [0, 1])
for i in range(1, 5):
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [.5, 0, .5]
assert_equal(cheb.chebmulx(ser), tgt)
def test_chebmul(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(i + j + 1)
tgt[i + j] += .5
tgt[abs(i - j)] += .5
res = cheb.chebmul([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebdiv(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = cheb.chebadd(ci, cj)
quo, rem = cheb.chebdiv(tgt, ci)
res = cheb.chebadd(cheb.chebmul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation(object):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2.5, 2., 1.5])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = polyval(x, [1., 2., 3.])
def test_chebval(self):
#check empty input
assert_equal(cheb.chebval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Tlist]
for i in range(10):
msg = "At i=%d" % i
tgt = y[i]
res = cheb.chebval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
#check that shape is preserved
for i in range(3):
dims = [2]*i
x = np.zeros(dims)
assert_equal(cheb.chebval(x, [1]).shape, dims)
|
assert_equal(cheb.chebval(x, [1, 0]).shape, dims)
assert_equal(cheb.chebval(x, [1, 0, 0]).shape, dims)
def test_chebval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, cheb.chebval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = cheb.chebval2d(x1,
|
x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = cheb.chebval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_chebval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, cheb.chebval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = cheb.chebval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = cheb.chebval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_chebgrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = cheb.chebgrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = cheb.chebgrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_chebgrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = cheb.chebgrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = cheb.chebgrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral(object):
def test_chebint(self):
# check exceptions
assert_raises(ValueError, cheb.chebint, [0], .5)
assert_raises(ValueError, cheb.chebint, [0], -1)
assert_raises(ValueError, cheb.chebint, [0], 1, [0, 0])
assert_raises(ValueError, cheb.chebint, [0], lbnd=[0])
assert_raises(ValueError, cheb.chebint, [0], scl=[0])
assert_raises(ValueError, cheb.chebint, [0], axis=.5)
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = cheb.chebint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
chebpol = cheb.poly2cheb(pol)
chebint = cheb.chebint(chebpol, m=1, k=[i])
res = cheb.cheb2poly(chebint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
chebpol = cheb.poly2cheb(pol)
chebint = cheb.chebint(chebpol, m=1, k=[i], lbnd=-1)
assert_almost_equal(cheb.chebval(-1, chebint), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
chebpol = cheb.poly2cheb(pol)
chebint = cheb.chebint(chebpol, m=1, k=[i], scl=2)
res = cheb.cheb2poly(chebint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = cheb.chebint(tgt, m=1)
res = cheb.chebint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple i
|
uw-it-aca/course-dashboards
|
coursedashboards/migrations/0006_auto_20170918_1954.py
|
Python
|
apache-2.0
| 2,064
| 0.001938
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-09-18 19:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('coursedashboards', '0005_auto_20170915_2036'),
]
operations = [
migrations.CreateModel(
name='CourseOfferingMajor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.IntegerField()),
],
options={
'db_table': 'CourseOfferingMajor',
},
),
migrations.AlterUniqueTogether(
name='coursemajor',
unique_together=set([]),
),
migrations.RemoveField(
model_name='coursemajor',
name='course',
),
migrations.RemoveField(
model_name='coursemajor',
name='major',
),
migrations.AlterField(
model_name='course',
name='curriculum',
field=models.CharField(max_length=20),
),
migrations.DeleteModel(
name='CourseMajor',
),
migrations.AddField(
model_name='courseofferingmajor',
name='course',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='coursedashboards.Course'),
),
migrations.AddField(
model_name='courseofferingmajor',
name='major',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='coursedashboards.Major'),
),
migrations.AddField(
model_name='courseofferingmajor',
name='term',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='coursedashboards.Term'),
),
migrations.AlterUniqueToge
|
ther(
name='courseofferingmajor',
unique_together=set([(
|
'major', 'term', 'course')]),
),
]
|
juanpex/django-model-deploy
|
test_project/wsgi.py
|
Python
|
bsd-3-clause
| 1,153
| 0.000867
|
"""
WSGI config for django_model_deploy project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a c
|
ustom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.envi
|
ron.setdefault("DJANGO_SETTINGS_MODULE", "test_project.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
sony/nnabla
|
python/test/function/test_div2.py
|
Python
|
apache-2.0
| 1,404
| 0
|
# Copyright 2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
|
and
# limitations under the License.
import pytest
im
|
port numpy as np
import nnabla.functions as F
from nbla_test_utils import list_context
ctxs = list_context('Div2')
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("inplace", [False, True])
def test_div2_double_backward(inplace, seed, ctx, func_name):
from nbla_test_utils import backward_function_tester
rng = np.random.RandomState(seed)
inputs = [rng.randn(2, 3).astype(np.float32),
rng.randn(2, 3).astype(np.float32) * 2]
backward_function_tester(rng, F.div2,
inputs=inputs,
func_args=[inplace], func_kwargs={},
atol_accum=1e-1,
dstep=1e-4,
ctx=ctx)
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/scipy/_lib/_version.py
|
Python
|
mit
| 4,793
| 0.000209
|
"""Utility to compare (Numpy) version strings.
The NumpyVersion class allows properly comparing numpy version strings.
The LooseVersion and StrictVersion classes that distutils provides don't
work; they don't recognize anything like alpha/beta/rc/dev versions.
"""
import re
from scipy._lib.si
|
x import string_types
__all__ = ['NumpyVersion']
class NumpyVersion():
"""Parse and compare numpy version strings.
Numpy has the following versioning scheme (numbers given are examples; they
can be >9) in principle):
- Released version: '1.8.0', '1.8.1', etc.
- Alpha: '1.8.0a1', '1.8.0a2', etc.
- Beta: '1.8.0b1', '1.8.0b2', etc.
- Release candi
|
dates: '1.8.0rc1', '1.8.0rc2', etc.
- Development versions: '1.8.0.dev-f1234afa' (git commit hash appended)
- Development versions after a1: '1.8.0a1.dev-f1234afa',
'1.8.0b2.dev-f1234afa',
'1.8.1rc1.dev-f1234afa', etc.
- Development versions (no git hash available): '1.8.0.dev-Unknown'
Comparing needs to be done against a valid version string or other
`NumpyVersion` instance.
Parameters
----------
vstring : str
Numpy version string (``np.__version__``).
Notes
-----
All dev versions of the same (pre-)release compare equal.
Examples
--------
>>> from scipy._lib._version import NumpyVersion
>>> if NumpyVersion(np.__version__) < '1.7.0':
... print('skip')
skip
>>> NumpyVersion('1.7') # raises ValueError, add ".0"
"""
def __init__(self, vstring):
self.vstring = vstring
ver_main = re.match(r'\d[.]\d+[.]\d+', vstring)
if not ver_main:
raise ValueError("Not a valid numpy version string")
self.version = ver_main.group()
self.major, self.minor, self.bugfix = [int(x) for x in
self.version.split('.')]
if len(vstring) == ver_main.end():
self.pre_release = 'final'
else:
alpha = re.match(r'a\d', vstring[ver_main.end():])
beta = re.match(r'b\d', vstring[ver_main.end():])
rc = re.match(r'rc\d', vstring[ver_main.end():])
pre_rel = [m for m in [alpha, beta, rc] if m is not None]
if pre_rel:
self.pre_release = pre_rel[0].group()
else:
self.pre_release = ''
self.is_devversion = bool(re.search(r'.dev', vstring))
def _compare_version(self, other):
"""Compare major.minor.bugfix"""
if self.major == other.major:
if self.minor == other.minor:
if self.bugfix == other.bugfix:
vercmp = 0
elif self.bugfix > other.bugfix:
vercmp = 1
else:
vercmp = -1
elif self.minor > other.minor:
vercmp = 1
else:
vercmp = -1
elif self.major > other.major:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare_pre_release(self, other):
"""Compare alpha/beta/rc/final."""
if self.pre_release == other.pre_release:
vercmp = 0
elif self.pre_release == 'final':
vercmp = 1
elif other.pre_release == 'final':
vercmp = -1
elif self.pre_release > other.pre_release:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare(self, other):
if not isinstance(other, (string_types, NumpyVersion)):
raise ValueError("Invalid object to compare with NumpyVersion.")
if isinstance(other, string_types):
other = NumpyVersion(other)
vercmp = self._compare_version(other)
if vercmp == 0:
# Same x.y.z version, check for alpha/beta/rc
vercmp = self._compare_pre_release(other)
if vercmp == 0:
# Same version and same pre-release, check if dev version
if self.is_devversion is other.is_devversion:
vercmp = 0
elif self.is_devversion:
vercmp = -1
else:
vercmp = 1
return vercmp
def __lt__(self, other):
return self._compare(other) < 0
def __le__(self, other):
return self._compare(other) <= 0
def __eq__(self, other):
return self._compare(other) == 0
def __ne__(self, other):
return self._compare(other) != 0
def __gt__(self, other):
return self._compare(other) > 0
def __ge__(self, other):
return self._compare(other) >= 0
def __repr__(self):
return "NumpyVersion(%s)" % self.vstring
|
acasadoquijada/bares
|
practica4/settings.py
|
Python
|
gpl-3.0
| 4,257
| 0.003993
|
"""
Django settings for practica4 project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os,django
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-vsi*yr21o3)d%-u%ho28+tdci&afj5-lk4sqo#c%-9(itd!v@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
TEMPLATE_PATH = os.path.join(BASE_DIR, 'templates')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bares',
'easy_maps',
'bootstrap_toolkit',
]
if django.VERSION < (1, 7):
INSTALLED_APPS += (
'south',
)
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMi
|
ddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'practica4.urls'
TEMPLATES = [
{
'BACKEND
|
': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_PATH],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'practica4.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_PATH = os.path.join(BASE_DIR,'static')
STATIC_ROOT = 'staticfiles'
STATICFILES_DIRS = (
STATIC_PATH,
)
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
REGISTRATION_OPEN = True # If True, users can register
ACCOUNT_ACTIVATION_DAYS = 7 # One-week activation window; you may, of course, use a different value.
REGISTRATION_AUTO_LOGIN = True # If True, the user will be automatically logged in.
LOGIN_REDIRECT_URL = '/bares/' # The page you want users to arrive at after they successful log in
LOGIN_URL = '/accounts/login/' # The page users are directed to if they are not logged in,
# and are trying to access pages requiring authentication
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media') # Absolute path to the media directory
|
loafbaker/django_launch_with_code
|
joins/forms.py
|
Python
|
mit
| 211
| 0.018957
|
from django im
|
port forms
from .models import Join
class EmailForm(forms.Form):
email = forms.EmailField()
class JoinForm(forms.ModelForm):
class Meta:
model = Join
fields = [
|
"email",]
|
npuichigo/ttsflow
|
third_party/tensorflow/tensorflow/python/kernel_tests/string_to_hash_bucket_op_test.py
|
Python
|
apache-2.0
| 4,034
| 0.010659
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for StringToHashBucket op from string_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class StringToHashBucketOpTest(test.TestCase):
def testStringToOneHashBucketFast(self):
with self.test_session():
input_string = array_ops.placeholder(dtypes.string)
output = string_ops.string_to_hash_bucket_fast(input_string, 1)
result = output.eval(feed_dict={input_string: ['a', 'b', 'c']})
self.assertAllEqual([0, 0, 0], result)
def testStringToHashBucketsFast(self):
with self.test_session():
input_string = array_ops.placeholder(dtypes.string)
output = string_ops.string_to_hash_bucket_fast(input_string, 10)
result = output.eval(feed_dict={input_string: ['a', 'b', 'c', 'd']})
# Fingerprint64('a') -> 12917804110809363939 -> mod 10 -> 9
# Fingerprint64('b') -> 11795596070477164822 -> mod 10 -> 2
# Fingerprint64('c')
|
-> 11430444447143000872 -> mod 10 -> 2
# Fingerprint64('d') -> 4470636696479570465 -> mod 10 -> 5
self.assertAllEqual([9, 2, 2, 5], result)
def testStringToOneHashBucketLegacyHash(self):
with self.test_session():
input_string = array_ops.placeholder(dtypes.string)
output = string_ops.string_to_hash_bucket(input_string, 1)
result = output.eval(feed_dict={input_string: ['a', 'b', 'c']})
self.assertAllEqual([0, 0, 0], result)
def testStringTo
|
HashBucketsLegacyHash(self):
with self.test_session():
input_string = array_ops.placeholder(dtypes.string)
output = string_ops.string_to_hash_bucket(input_string, 10)
result = output.eval(feed_dict={input_string: ['a', 'b', 'c']})
# Hash64('a') -> 2996632905371535868 -> mod 10 -> 8
# Hash64('b') -> 5795986006276551370 -> mod 10 -> 0
# Hash64('c') -> 14899841994519054197 -> mod 10 -> 7
self.assertAllEqual([8, 0, 7], result)
def testStringToOneHashBucketStrongOneHashBucket(self):
with self.test_session():
input_string = constant_op.constant(['a', 'b', 'c'])
output = string_ops.string_to_hash_bucket_strong(
input_string, 1, key=[123, 345])
self.assertAllEqual([0, 0, 0], output.eval())
def testStringToHashBucketsStrong(self):
with self.test_session():
input_string = constant_op.constant(['a', 'b', 'c'])
output = string_ops.string_to_hash_bucket_strong(
input_string, 10, key=[98765, 132])
# key = [98765, 132]
# StrongKeyedHash(key, 'a') -> 7157389809176466784 -> mod 10 -> 4
# StrongKeyedHash(key, 'b') -> 15805638358933211562 -> mod 10 -> 2
# StrongKeyedHash(key, 'c') -> 18100027895074076528 -> mod 10 -> 8
self.assertAllEqual([4, 2, 8], output.eval())
def testStringToHashBucketsStrongInvalidKey(self):
with self.test_session():
input_string = constant_op.constant(['a', 'b', 'c'])
with self.assertRaisesOpError('Key must have 2 elements'):
string_ops.string_to_hash_bucket_strong(
input_string, 10, key=[98765]).eval()
if __name__ == '__main__':
test.main()
|
m3wolf/orgwolf
|
setup.py
|
Python
|
gpl-3.0
| 1,198
| 0
|
import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='orgwolf',
version='0.1',
packages=find_packages(),
include_package_data=True,
license='BSD License', # example license
description='A simple Django app to conduct Web-based polls.',
long_description=README,
url='https://www.example.com/',
author='Your Name',
author_email='[email protected]',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: X.Y', # replace "X.Y" as appropriate
'In
|
tended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet ::
|
WWW/HTTP :: Dynamic Content',
],
)
|
aleksandarmilicevic/pygments-red
|
setup.py
|
Python
|
mit
| 1,246
| 0
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='pygments-red',
version='0.2',
description='Pygments lexer for Ruby + Red.',
keywords='pygments ruby red lexer',
license='MIT',
author='Aleksandar Milicevic',
author_email='[email protected]',
url='https://github.com/aleksandarmilicevic/pygments-red',
packages=find_packages(),
install_requires=['pygme
|
nts >= 1.4'],
entry_points='''[pygments.lexers]
ruby193=pygments_red:Ruby193Lexer
arby=pygments_red:ARbyLexer
red=pygments_red:RedLexer
sunny=pygments_red:SunnyLexer
|
handlebars=pygments_red:HandlebarsLexer
html+handlebars=pygments_red:HandlebarsHtmlLexer
slang=pygments_red:SlangLexer
errb=pygments_red:ErrbLexer
ered=pygments_red:EredLexer
redhtml=pygments_red:RedHtmlLexer
[pygments.styles]
redstyle=pygments_red:RedStyle
github=pygments_red:GithubStyle
githubcustom=pygments_red:GithubCustom1Style''',
classifiers=[
],
)
|
Julian/cardboard
|
cardboard/cards/sets/stronghold.py
|
Python
|
mit
| 26,294
| 0.000038
|
from cardboard import types
from cardboard.ability import (
AbilityNotImplemented, spell, activated, triggered, static
)
from cardboard.cards import card, common, keywords, match
@card("Crovax the Cursed")
def crovax_the_cursed(card, abilities):
def crovax_the_cursed():
return AbilityNotImplemented
def crovax_the_cursed():
return AbilityNotImplemented
def crovax_the_cursed():
return AbilityNotImplemented
return crovax_the_cursed, crovax_the_cursed, crovax_the_cursed,
@card("Intruder Alarm")
def intruder_alarm(card, abilities):
def intruder_alarm():
return AbilityNotImplemented
def intruder_alarm():
return AbilityNotImplemented
return intruder_alarm, intruder_alarm,
@card("Cannibalize")
def cannibalize(card, abilities):
def cannibalize():
return AbilityNotImplemented
return cannibalize,
@card("Spike Worker")
def spike_worker(card, abilities):
def spike_worker():
return AbilityNotImplemented
def spike_worker():
return AbilityNotImplemented
return spike_worker, spike_worker,
@card("Contemplation")
def contemplation(card, abilities):
def contemplation():
return AbilityNotImplemented
return contemplation,
@card("Megrim")
def megrim(card, abilities):
def megrim():
return AbilityNotImplemented
return megrim,
@card("Shifting Wall")
def shifting_wall(card, abilities):
def shifting_wall():
return AbilityNotImplemented
def shifting_wall():
return AbilityNotImplemented
return shifting_wall, shifting_wall,
@card("Dauthi Trapper")
def dauthi_trapper(card, abilities):
def dauthi_trapper():
return AbilityNotImplemented
return dauthi_trapper,
@card("Rabid Rats")
def rabid_rats(card, abilities):
def rabid_rats():
return AbilityNotImplemented
return rabid_rats,
@card("Morgue Thrull")
def morgue_thrull(card, abilities):
def morgue_thrull():
return AbilityNotImplemented
return morgue_thrull,
@card("Shard Phoenix")
def shard_phoenix(card, abilities):
d
|
ef shard_phoenix():
return AbilityNotImplemented
def shard_phoenix():
return AbilityNotImplemented
def shard_phoenix():
return AbilityNotImplemented
return shard_phoenix, shard_phoenix, shard_phoenix,
@card("Skyshroud Archer")
def skyshroud_archer(card, abilities):
def skyshroud_archer():
return AbilityNotImplemented
return skyshroud_archer,
@card("Mask of the
|
Mimic")
def mask_of_the_mimic(card, abilities):
def mask_of_the_mimic():
return AbilityNotImplemented
def mask_of_the_mimic():
return AbilityNotImplemented
return mask_of_the_mimic, mask_of_the_mimic,
@card("Provoke")
def provoke(card, abilities):
def provoke():
return AbilityNotImplemented
def provoke():
return AbilityNotImplemented
return provoke, provoke,
@card("Duct Crawler")
def duct_crawler(card, abilities):
def duct_crawler():
return AbilityNotImplemented
return duct_crawler,
@card("Nomads en-Kor")
def nomads_enkor(card, abilities):
def nomads_enkor():
return AbilityNotImplemented
return nomads_enkor,
@card("Change of Heart")
def change_of_heart(card, abilities):
def change_of_heart():
return AbilityNotImplemented
def change_of_heart():
return AbilityNotImplemented
return change_of_heart, change_of_heart,
@card("Overgrowth")
def overgrowth(card, abilities):
def overgrowth():
return AbilityNotImplemented
def overgrowth():
return AbilityNotImplemented
return overgrowth, overgrowth,
@card("Pursuit of Knowledge")
def pursuit_of_knowledge(card, abilities):
def pursuit_of_knowledge():
return AbilityNotImplemented
def pursuit_of_knowledge():
return AbilityNotImplemented
return pursuit_of_knowledge, pursuit_of_knowledge,
@card("Sift")
def sift(card, abilities):
def sift():
return AbilityNotImplemented
return sift,
@card("Portcullis")
def portcullis(card, abilities):
def portcullis():
return AbilityNotImplemented
return portcullis,
@card("Dream Halls")
def dream_halls(card, abilities):
def dream_halls():
return AbilityNotImplemented
return dream_halls,
@card("Ruination")
def ruination(card, abilities):
def ruination():
return AbilityNotImplemented
return ruination,
@card("Horn of Greed")
def horn_of_greed(card, abilities):
def horn_of_greed():
return AbilityNotImplemented
return horn_of_greed,
@card("Hermit Druid")
def hermit_druid(card, abilities):
def hermit_druid():
return AbilityNotImplemented
return hermit_druid,
@card("Spined Sliver")
def spined_sliver(card, abilities):
def spined_sliver():
return AbilityNotImplemented
return spined_sliver,
@card("Dream Prowler")
def dream_prowler(card, abilities):
def dream_prowler():
return AbilityNotImplemented
return dream_prowler,
@card("Spike Soldier")
def spike_soldier(card, abilities):
def spike_soldier():
return AbilityNotImplemented
def spike_soldier():
return AbilityNotImplemented
def spike_soldier():
return AbilityNotImplemented
return spike_soldier, spike_soldier, spike_soldier,
@card("Tortured Existence")
def tortured_existence(card, abilities):
def tortured_existence():
return AbilityNotImplemented
return tortured_existence,
@card("Mana Leak")
def mana_leak(card, abilities):
def mana_leak():
return AbilityNotImplemented
return mana_leak,
@card("Mob Justice")
def mob_justice(card, abilities):
def mob_justice():
return AbilityNotImplemented
return mob_justice,
@card("Primal Rage")
def primal_rage(card, abilities):
def primal_rage():
return AbilityNotImplemented
return primal_rage,
@card("Constant Mists")
def constant_mists(card, abilities):
def constant_mists():
return AbilityNotImplemented
def constant_mists():
return AbilityNotImplemented
return constant_mists, constant_mists,
@card("Crystalline Sliver")
def crystalline_sliver(card, abilities):
def crystalline_sliver():
return AbilityNotImplemented
return crystalline_sliver,
@card("Conviction")
def conviction(card, abilities):
def conviction():
return AbilityNotImplemented
def conviction():
return AbilityNotImplemented
def conviction():
return AbilityNotImplemented
return conviction, conviction, conviction,
@card("Death Stroke")
def death_stroke(card, abilities):
def death_stroke():
return AbilityNotImplemented
return death_stroke,
@card("Mindwarper")
def mindwarper(card, abilities):
def mindwarper():
return AbilityNotImplemented
def mindwarper():
return AbilityNotImplemented
return mindwarper, mindwarper,
@card("Silver Wyvern")
def silver_wyvern(card, abilities):
def silver_wyvern():
return AbilityNotImplemented
def silver_wyvern():
return AbilityNotImplemented
return silver_wyvern, silver_wyvern,
@card("Mind Peel")
def mind_peel(card, abilities):
def mind_peel():
return AbilityNotImplemented
def mind_peel():
return AbilityNotImplemented
return mind_peel, mind_peel,
@card("Scapegoat")
def scapegoat(card, abilities):
def scapegoat():
return AbilityNotImplemented
def scapegoat():
return AbilityNotImplemented
return scapegoat, scapegoat,
@card("Mind Games")
def mind_games(card, abilities):
def mind_games():
return AbilityNotImplemented
def mind_games():
return AbilityNotImplemented
return mind_games, mind_games,
@card("Flame Wave")
def flame_wave(card, abilities):
def flame_wave():
return AbilityNotImplemented
return flame_wave,
@card("Dungeon Shade")
def dungeon_shade(card, abilities):
def dungeon_shade():
return Abilit
|
wateraccounting/wa
|
Collect/JRC/__init__.py
|
Python
|
apache-2.0
| 647
| 0.003091
|
# -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
UNESCO-IHE 2017
Contact: [email protected]
Repository: http
|
s://github.com/wateraccounting/wa
Module: Collect/JRC
Description:
This module downloads JRC water occurrence data from http://storage.googleapis.com/global-surface-water/downloads/.
Use the JRC.Occurrence function to
download and create a water occurrence image in Gtiff format.
The data represents the period 1984-2015.
|
Examples:
from wa.Collect import JRC
JRC.Occurrence(Dir='C:/Temp3/', latlim=[41, 45], lonlim=[-8, -5])
"""
from .Occurrence import main as Occurrence
__all__ = ['Occurrence']
__version__ = '0.1'
|
the-zebulan/CodeWars
|
katas/kyu_7/supernatural.py
|
Python
|
mit
| 1,003
| 0
|
drunkenDoodling = {
'ghost': "Salt and iron, and don't forget to burn the corpse",
'wendigo':
|
'Burn it to death',
'phoenix': 'Use the colt',
'angel': 'Use the angelic blade',
'werewolf': 'Silver knife or bullet to the heart',
'shapeshifter': 'Silver knife or bull
|
et to the heart',
'rugaru': 'Burn it alive',
'reaper': "If it's nasty, you should gank who controls it",
'demon': "Use Ruby's knife, or some Jesus-juice",
'vampire': 'Behead it with a machete',
'dragon': 'You have to find the excalibur for that',
'leviathan': 'Use some Borax, then kill Dick',
'witch': 'They are humans',
'djinn': "Stab it with silver knife dipped in a lamb's blood",
'pagan god': 'It depends on which one it is',
'skinwalker': 'A silver bullet will do it',
'jefferson starship': 'Behead it with a silver blade',
'ghoul': 'Behead it'
}
def bob(what):
return '{}, idjits!'.format(drunkenDoodling.get(
what, 'I have friggin no idea yet'))
|
Oksisane/RSS-Bot
|
Trolly-master/trolly/trelloobject.py
|
Python
|
gpl-3.0
| 3,944
| 0.000761
|
"""
Created on 9 Nov 2012
@author: plish
"""
class TrelloObject(object):
"""
This class is a base object that should be used by all trello objects;
Board, List, Card, etc. It contains methods needed and used by all those
objects and masks the client calls as methods belonging to the object.
"""
def __init__(self, trello_client):
"""
A Trello client, Oauth of HTTP client is required for each object.
"""
super(TrelloObject, self).__init__()
self.client = trello_client
def fetch_json(self, uri_path, http_method='GET', query_params=None, body=None, headers=None):
return self.client.fetch_json(
uri_path=uri_path,
http_method=http_method,
query_params=query_params or {},
body=body,
headers=headers or {}
)
def get_organisations_json(self, base_uri):
return self.fetch_json(base_uri + '/organization')
def get_boards_json(self, base_uri):
return self.fetch_json(base_uri + '/boards')
def get_board_json(self, base_uri):
return self.fetch_json(base_uri + '/board')
def get_lists_json(self, base_uri):
return self.fetch_json(base_uri + '/lists')
def get_list_json(self, base_uri):
return self.fetch_json(base_uri + '/list')
def get_cards_json(self, base_uri):
return self.fetch_json(base_uri + '/cards')
def get_checklist_json(self, base_uri):
return self.fetch_json(base_uri + '/checklists')
def get_members_json(self, base_uri):
return self.fetch_json(base_uri + '/members')
def create_organisation(self, organisation_json, **kwargs):
return self.client.create_organisation(organisation_json, **kwargs)
def create_board(self, board_json, **kwargs):
return self.client.create_board(board_json, **kwargs)
def create_list(self, list_json, **kwargs):
return self.client.create_list(list_json, **kwargs)
def create_card(self, card_json, **kwargs):
return self.client.create_card(card_json, **kwargs)
def create_checklist(self, checklist_json, **kwargs):
return self.client.create_checklist(checklist_json, **kwargs)
def create_member(self, member_json, **kwargs):
return self.client.create_member(member_json, **kwargs)
# Deprecated method names
def fetchJson(self, uri_path, http_method='GET', query_params=None, body=None, headers=None):
return self.fetch_json(uri_path, http_method, query_params or {}, body, headers or {})
def getOrganisationsJson(self, base_uri):
return self.get_organisations_json(base_uri)
def getBoardsJson(self, base_uri):
return
|
self.get_boards_json(base_uri)
def getBoardJson(self, base_uri):
return self.get_board_json(base_uri)
def getListsJson(self, base_uri):
return self.get_lists_json(base_uri)
def getListJson(self, base_uri):
return self.get_list_json(base_uri)
def getCardsJson(self, base_uri):
return self.get_cards_json(base_uri)
def getChecklistsJson(self, base_uri):
retur
|
n self.get_checklist_json(base_uri)
def getMembersJson(self, base_uri):
return self.get_members_json(base_uri)
def createOrganisation(self, organisation_json, **kwargs):
return self.create_organisation(organisation_json, **kwargs)
def createBoard(self, board_json, **kwargs):
return self.create_board(board_json, **kwargs)
def createList(self, list_json, **kwargs):
return self.create_list(list_json, **kwargs)
def createCard(self, card_json, **kwargs):
return self.create_card(card_json, **kwargs)
def createChecklist(self, checklist_json, **kwargs):
return self.create_checklist(checklist_json, **kwargs)
def createMember(self, member_json, **kwargs):
return self.create_member(member_json, **kwargs)
|
phasis/phasis
|
phasis/base/srctblcf.py
|
Python
|
gpl-2.0
| 5,891
| 0.016466
|
# -*- coding: iso-8859-1 -*-
#
# Copyright (C) 2001 - 2020 Massimo Gerardi all rights reserved.
#
# Author: Massimo Gerardi [email protected]
#
# Copyright (c) 2020 Qsistemi.com. All rights reserved.
#
# Viale Giorgio Ribotta, 11 (Roma)
# 00144 Roma (RM) - Italy
# Phone: (+39) 06.87.163
#
#
# Si veda file COPYING per le condizioni di software.
#
# www.qsistemi.com - [email protected]
import wx
from cfg import *
import cfg
ttl=_("Ricerca Anagrafica")
def create(parent,cnt):
return SrcTblCF(parent,cnt)
#---------------------------------------------------------------------------
class SrcTblCF(wx.MiniFrame):
def __init__(self, prnt, cnt):
wx.MiniFrame.__init__(self, id=wx.NewId(), name='',
parent=prnt, pos=wx.Point(0, 0),
style=wx.DEFAULT_FRAME_STYLE, title=ttl)
self.SetIcon(wx.Icon(cfg.path_img+"/null.ico", wx.BITMAP_TYPE_ICO))
#self.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL, wx.NORMAL, False))
self.cnt = cnt
self.tcpart = cnt[0]
Nid = wx.NewId()
self.CnAz = prnt.CnAz
#self.font = self.GetFont()
self.__FRM__ = prnt.__FRM__
self.__MDI__ = wx.GetApp().GetPhasisMdi()
self.font=self.__MDI__.font
self.SetFont(self.font)
self.pnl = wx.Panel(id = wx.NewId(), name = 'panel',
parent = self, pos = wx.Point(0, 0))
self.pnl.SetFont(self.font)
self.lc = wx.ListCtrl(self.pnl , Nid,
wx.DLG_PNT(self, 5,5), wx.DLG_SZE(self.pnl , 335,110),
wx.LC_REPORT|wx.LC_HRULES|wx.LC_VRULES)
self.lc.InsertColumn(0, _("Codice"))
self.lc.InsertColumn(1, _("Rag. Soc.1 Cognome"))
self.lc.InsertColumn(2, _("Rag. Soc.2 Nome"))
self.lc.InsertColumn(3, _("Indirizzo"))
self.lc.InsertColumn(4, _("Telefono"))
self.lc.InsertColumn(5, _("Ufficio"))
self.lc.InsertColumn(6, _("Fax"))
self.lc.SetColumnWidth(0, wx.DLG_SZE(self, 30,-1).width)
self.lc.SetColumnWidth(1, wx.DLG_SZE(self, 70,-1).width)
self.lc.SetColumnWidth(2, wx.DLG_SZE(self, 70,-1).width)
self.lc.SetColumnWidth(3, wx.DLG_SZE(self, 100,-1).width)
self.lc.SetColumnWidth(4, wx.DLG_SZE(self, 60,-1).width)
self.lc.SetColumnWidth(5, wx.DLG_SZE(self, 60,-1).width)
self.lc.SetColumnWidth(6, wx.DLG_SZE(self, 60,-1).width)
#self.lc.SetFont(self.font)
rowlc=0
codcf = self.cnt[4].GetValue()
val=self.cnt[2].GetValue().upper()
sql = """ SELECT * FROM tblcf WHERE rag_soc1 like '%s'
AND t_cpart = '%s' AND CODCF = '%s'
ORDER BY rag_soc1 DESC """
valueSql = '%'+val.title()+'%', self.tcpart, int(codcf)
try:
cr = self.CnAz.cursor ()
cr.execute(sql % valueSql)
rows = cr.fetchall()
for row
|
in rows:
for rowlc in range(1):
row_lc = self.lc.GetItemCount()
t_cpart = str(row[0])
cod = str(row[1])
ragsoc1 = str(row[3]).title()
ragsoc2 = str(row[4]).title
|
()
indiriz = str(row[6]).title()
tel_abi = str(row[12])
tel_uff = str(row[13])
fax = str(row[14])
self.lc.InsertStringItem(rowlc, cod)
self.lc.SetStringItem(rowlc, 1, ragsoc1)
self.lc.SetStringItem(rowlc, 2, ragsoc2)
self.lc.SetStringItem(rowlc, 3, indiriz)
self.lc.SetStringItem(rowlc, 4, tel_abi)
self.lc.SetStringItem(rowlc, 5, tel_uff)
self.lc.SetStringItem(rowlc, 6, fax)
self.lc.SetItemData(1,0)
except StandardError, msg:
self.__FRM__.MsgErr("scrtblcf"," Cerca tblcf Error %s" % (msg))
self.CnAz.commit()
self.currentItem = 0
wx.StaticLine(self.pnl , -1, wx.DLG_PNT(self.pnl , 5,115),
wx.DLG_SZE(self.pnl , 300,-1))
self.ok = wx.Button(self.pnl , Nid, cfg.vcconf,
wx.DLG_PNT(self.pnl , 195,120),
wx.DLG_SZE(self.pnl,cfg.btnSzeLH,cfg.btnSzeV))
self.canc = wx.Button(self.pnl , Nid, cfg.vccanc,
wx.DLG_PNT(self.pnl , 255,120),
wx.DLG_SZE(self.pnl,cfg.btnSzeLH,cfg.btnSzeV))
for x in self.pnl.GetChildren(): x.SetFont(self.font)
self.canc.SetFocus()
#self.SetFont(self.font)
box_sizer = wx.BoxSizer(wx.VERTICAL)
box_sizer.Add(self.pnl, 0, wx.EXPAND|wx.ALL,0)
self.SetAutoLayout(1)
self.SetSizer(box_sizer)
box_sizer.Fit(self)
self.canc.Bind(wx.EVT_BUTTON, self.Close)
self.ok.Bind(wx.EVT_BUTTON, self.Ok)
self.lc.Bind(wx.EVT_LEFT_DCLICK, self.DblClick)
self.lc.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.LstAct)
self.lc.Bind(wx.EVT_LIST_ITEM_SELECTED, self.LstSlct)
self.Bind(wx.EVT_CLOSE, self.Close)
def Ok(self, event):
self.DblClick(self.currentItem)
def Close(self, event):
#wx.GetApp().GetPhasisMdi().CloseTabObj(self)
self.Destroy()
def getColTxt(self, index, col):
item = self.lc.GetItem(index, col)
return item.GetText()
def DblClick(self, event):
self.cnt[1].SetValue(self.lc.GetItemText(self.currentItem))
self.cnt[2].SetValue(self.getColTxt(self.currentItem, 1))
self.cnt[3](self)
self.Destroy()
def LstSlct(self, event):
self.currentItem = event.m_itemIndex
def LstAct(self, event):
self.currentItem = event.m_itemIndex
self.DblClick(self)
|
sysadminmatmoz/ingadhoc
|
multi_store/__openerp__.py
|
Python
|
agpl-3.0
| 2,617
| 0.003821
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the im
|
plied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
|
'name': 'Multi Store',
'version': '8.0.1.0.0',
'category': 'Accounting',
'sequence': 14,
'summary': '',
'description': """
Multi Store
===========
The main purpose of this module is to restrict journals access for users on different stores.
This module add a new concept "stores" in some point similar to multicompany.
Similar to multicompany:
* User can have multiple stores available (store_ids)
* User can be active only in one store (store_id) which can be set up in his own preferences
* There is a group "multi store" that gives users the availability to see multi store fields
This module also adds a store_id field on journal:
* If store_id = False then journal can be seen by everyone
* If store_id is set, then journal can be seen by users on that store and parent stores
It also restrict edition, creation and unlink on: account.move, account.invoice and account.voucher.
It is done with the same logic to journal. We do not limitate the "read" of this models because user should need to access those documents, for example, to see partner due.
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3',
'images': [
],
'depends': [
'account_voucher',
],
'data': [
'views/res_store_view.xml',
'views/res_users_view.xml',
'views/account_view.xml',
'security/multi_store_security.xml',
'security/ir.model.access.csv',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
janglapuk/xiongmai-cam-api
|
example.py
|
Python
|
mit
| 660
| 0
|
from xmcam import *
from xmconst import *
from time import sleep
CAM_IP = '192.168.1.10'
CAM_PORT = 34567
|
if __name__ == '__main__':
xm = XMCam(CAM_IP, CAM_PORT, 'admin', 'admin')
login = xm.cmd_login()
print(login)
print(xm.cmd_system_function())
print(xm.cmd_system_info())
print(xm.cmd_channel_title())
print(xm.cmd_OEM_info())
print(xm.cmd_storage_info())
print(xm.cmd_users())
print(xm.cmd_ptz_contr
|
ol(PTZ_LEFT))
sleep(1)
print(xm.cmd_ptz_control(PTZ_LEFT, True))
cfg = xm.cmd_config_export('export.cfg')
print('Config ==>', cfg)
snap = xm.cmd_snap('test.jpg')
print('SNAP ==>', snap)
|
ahmadiga/min_edx
|
common/djangoapps/status/models.py
|
Python
|
agpl-3.0
| 2,090
| 0.002871
|
"""
Store status messages in the database.
"""
from django.db import models
from django.contrib import admin
from django.core.cache import cache
from xmodule_django.models import CourseKeyField
from config_models.models import ConfigurationModel
from config_models.admin import ConfigurationModelAdmin
class GlobalStatusMessage(ConfigurationModel):
"""
Model that represents the current status message.
"""
message = models.TextField(blank=True, null=True)
def full_message(self, course_key):
""" Returns the full status message, including any course-specific status messages.
|
"""
cache_key = "status_message.{course_id}".format(course_id=unicode(course_key))
if cache.get(cache_key):
return cache.get(cache_key)
msg = self.message
if course_key:
try:
course_message = self.coursemessage_set.get(course_key=course_key)
# Don't add the message if course_message is blank.
if course_message:
msg = u"{} <br /> {}".format(msg, course_message.message)
|
except CourseMessage.DoesNotExist:
# We don't have a course-specific message, so pass.
pass
cache.set(cache_key, msg)
return msg
def __unicode__(self):
return "{} - {} - {}".format(self.change_date, self.enabled, self.message)
class CourseMessage(models.Model):
"""
Model that allows the user to specify messages for individual courses.
This is not a ConfigurationModel because using it's not designed to support multiple configurations at once,
which would be problematic if separate courses need separate error messages.
"""
global_message = models.ForeignKey(GlobalStatusMessage)
course_key = CourseKeyField(max_length=255, blank=True, db_index=True)
message = models.TextField(blank=True, null=True)
def __unicode__(self):
return unicode(self.course_key)
admin.site.register(GlobalStatusMessage, ConfigurationModelAdmin)
admin.site.register(CourseMessage)
|
kevin-intel/scikit-learn
|
benchmarks/bench_20newsgroups.py
|
Python
|
bsd-3-clause
| 3,292
| 0
|
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print(f"X_train.shape = {X_train.shape}")
print(f"X_train.format = {X_train.format}")
print(f"X_train.dtype = {X_train.dtype}")
print(f"X_train density = {X_train.nnz / np.product(X_train.shape)}")
print(f"y_train {y_train.shape}")
print(f"X_test {X_test.shape}")
print(f"X_test.format = {X_test.format}")
print(f"X_test.dtype = {X_test.dtype}")
print(f"y_test {y_test.shape}")
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[n
|
ame] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
|
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
|
clarkerubber/irwin
|
modules/queue/IrwinQueue.py
|
Python
|
agpl-3.0
| 1,421
| 0.004222
|
"""Queue item for deep analysis by irwin"""
from default_imports import *
from modules.queue.Origin import Origin
from modules.game.Game import PlayerID
from datetime import datetime
import pymongo
from pymongo.collection import Collection
IrwinQueue = NamedTuple('IrwinQueue', [
|
('id', PlayerID),
('origin', Origin)
])
class IrwinQueueBSONHandler:
@staticmethod
def reads(bson: Dict) -> IrwinQueue:
return IrwinQueue(
id=bson['_id'],
origin=bson['origin'])
@staticmethod
def writes(irwinQueue: IrwinQueue) -> Dict
|
:
return {
'_id': irwinQueue.id,
'origin': irwinQueue.origin,
'date': datetime.now()
}
class IrwinQueueDB(NamedTuple('IrwinQueueDB', [
('irwinQueueColl', Collection)
])):
def write(self, irwinQueue: IrwinQueue):
self.irwinQueueColl.update_one(
{'_id': irwinQueue.id},
{'$set': IrwinQueueBSONHandler.writes(irwinQueue)},
upsert=True)
def removePlayerId(self, playerId: PlayerID):
self.irwinQueueColl.remove({'_id': playerId})
def nextUnprocessed(self) -> Opt[IrwinQueue]:
irwinQueueBSON = self.irwinQueueColl.find_one_and_delete(
filter={},
sort=[("date", pymongo.ASCENDING)])
return None if irwinQueueBSON is None else IrwinQueueBSONHandler.reads(irwinQueueBSON)
|
nlapalu/SDDetector
|
tests/test_GffGeneParser.py
|
Python
|
gpl-3.0
| 1,549
| 0.020013
|
#!/usr/bin/env python
import unittest
from SDDetector.Entities.Gene import Gene
from SDDetector.Entities.Transcript import Transcript
from SDDetector.Entities.CDS import CDS
from SDDetector.Parser.Gff.GffGeneParser import GffGeneParser
class TestGffGeneParser(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_getAllGenes(self):
"""Test getAllGenes method"""
iGffGeneParser = GffGeneParser("test-data/gene.gff3")
lGenes = [Gene('G00001','Chr1',23988,24919,-1,[Transcript('G00001.1','Chr1',23988,24919,-1,'G00001',[CDS('G00001.1_cds_1','Chr1',23988,24083, -1, 'G00001.1'),CDS('G00001.1_cds_1','Chr1',24274,24427,-1,'G00001.1'),CDS('G00001.1_cds_1','Chr1',24489,24919,-1,'G00001.1')])])]
self.assertEqual(iGffGeneParser.getAllGenes()[0],lGenes[0])
def test_getAllGenes(self):
"""Test getAllGenes method"""
# iGffGeneParser = GffGeneParser("/home/nlapalu/Workspace/Github/SDDetector/data/arabidopsis/TAIR10.new.gff3")
pass
# lGenes = [Gene('G00001','Chr1',23988,24919,-1,[Transcript('G00001.1','Chr1',23988,24919,-1,'G00001',[CDS('G00001.1_c
|
ds_1','Chr1',23988,24083, -1, 'G00001.1'),CDS('G00001.1_cds_1','Chr1',24274,24427,-1,'G00001.1'),CDS('G00001.1_cds_1','Chr1',24489,24919,-1,'G00001.1')])])]
# self.assertEqual(iGffG
|
eneParser.getAllGenes()[0],lGenes[0])
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestGffGeneParser)
unittest.TextTestRunner(verbosity=2).run(suite)
|
cupy/cupy
|
cupy/_padding/pad.py
|
Python
|
mit
| 29,422
| 0
|
import numbers
import numpy
import cupy
###############################################################################
# Private utility functions.
def _round_if_needed(arr, dtype):
"""Rounds arr inplace if the destination dtype is an integer.
"""
if cupy.issubdtype(dtype, cupy.integer):
arr.round(out=arr) # bug in round so use rint (cupy/cupy#2330)
def _slice_at_axis(sl, axis):
"""Constructs a tuple of slices to slice an array in the given dimension.
Args:
sl(slice): The slice for the given dimension.
axis(int): The axis to which `sl` is applied. All other dimensions are
left "unsliced".
Returns:
tuple of slices: A tuple with slices matching `shape` in length.
"""
return (slice(None),) * axis + (sl,) + (Ellipsis,)
def _view_roi(array, original_area_slice, axis):
"""Gets a view of the current region of interest during iterative padding.
When padding multiple dimensions iteratively corner values are
unnecessarily overwritten multiple times. This function reduces the
working area for the first dimensions so that corners are excluded.
Args:
array(cupy.ndarray): The array with the region of interest.
original_area_slice(tuple of slices): Denotes the area with original
values of the unpadded array.
axis(int): The currently padded dimension assuming that `axis` is padded
before `axis` + 1.
Returns:
"""
axis += 1
sl = (slice(None),) * axis + original_area_slice[axis:]
return array[sl]
def _pad_simple(array, pad_width, fill_value=None):
"""Pads an array on all sides with either a constant or u
|
ndefined values.
Args:
array(cupy.ndarray): Array to grow.
pad_width(sequence of tuple[int, int]): Pad width on both sides for each
dimension in `arr`.
fill_value(scalar, optional): I
|
f provided the padded area is
filled with this value, otherwise the pad area left undefined.
(Default value = None)
"""
# Allocate grown array
new_shape = tuple(
left + size + right
for size, (left, right) in zip(array.shape, pad_width)
)
order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order
padded = cupy.empty(new_shape, dtype=array.dtype, order=order)
if fill_value is not None:
padded.fill(fill_value)
# Copy old array into correct space
original_area_slice = tuple(
slice(left, left + size)
for size, (left, right) in zip(array.shape, pad_width)
)
padded[original_area_slice] = array
return padded, original_area_slice
def _set_pad_area(padded, axis, width_pair, value_pair):
"""Set an empty-padded area in given dimension.
"""
left_slice = _slice_at_axis(slice(None, width_pair[0]), axis)
padded[left_slice] = value_pair[0]
right_slice = _slice_at_axis(
slice(padded.shape[axis] - width_pair[1], None), axis
)
padded[right_slice] = value_pair[1]
def _get_edges(padded, axis, width_pair):
"""Retrieves edge values from an empty-padded array along a given axis.
Args:
padded(cupy.ndarray): Empty-padded array.
axis(int): Dimension in which the edges are considered.
width_pair((int, int)): Pair of widths that mark the pad area on both
sides in the given dimension.
"""
left_index = width_pair[0]
left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis)
left_edge = padded[left_slice]
right_index = padded.shape[axis] - width_pair[1]
right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis)
right_edge = padded[right_slice]
return left_edge, right_edge
def _get_linear_ramps(padded, axis, width_pair, end_value_pair):
"""Constructs linear ramps for an empty-padded array along a given axis.
Args:
padded(cupy.ndarray): Empty-padded array.
axis(int): Dimension in which the ramps are constructed.
width_pair((int, int)): Pair of widths that mark the pad area on both
sides in the given dimension.
end_value_pair((scalar, scalar)): End values for the linear ramps which
form the edge of the fully padded array. These values are included in
the linear ramps.
"""
edge_pair = _get_edges(padded, axis, width_pair)
left_ramp = cupy.linspace(
start=end_value_pair[0],
# squeeze axis replaced by linspace
stop=edge_pair[0].squeeze(axis),
num=width_pair[0],
endpoint=False,
dtype=padded.dtype,
axis=axis,
)
right_ramp = cupy.linspace(
start=end_value_pair[1],
# squeeze axis replaced by linspace
stop=edge_pair[1].squeeze(axis),
num=width_pair[1],
endpoint=False,
dtype=padded.dtype,
axis=axis,
)
# Reverse linear space in appropriate dimension
right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)]
return left_ramp, right_ramp
def _get_stats(padded, axis, width_pair, length_pair, stat_func):
"""Calculates a statistic for an empty-padded array along a given axis.
Args:
padded(cupy.ndarray): Empty-padded array.
axis(int): Dimension in which the statistic is calculated.
width_pair((int, int)): Pair of widths that mark the pad area on both
sides in the given dimension.
length_pair(2-element sequence of None or int): Gives the number of
values in valid area from each side that is taken into account when
calculating the statistic. If None the entire valid area in `padded`
is considered.
stat_func(function): Function to compute statistic. The expected
signature is
``stat_func(x: ndarray, axis: int, keepdims: bool) -> ndarray``.
"""
# Calculate indices of the edges of the area with original values
left_index = width_pair[0]
right_index = padded.shape[axis] - width_pair[1]
# as well as its length
max_length = right_index - left_index
# Limit stat_lengths to max_length
left_length, right_length = length_pair
if left_length is None or max_length < left_length:
left_length = max_length
if right_length is None or max_length < right_length:
right_length = max_length
# Calculate statistic for the left side
left_slice = _slice_at_axis(
slice(left_index, left_index + left_length), axis
)
left_chunk = padded[left_slice]
left_stat = stat_func(left_chunk, axis=axis, keepdims=True)
_round_if_needed(left_stat, padded.dtype)
if left_length == right_length == max_length:
# return early as right_stat must be identical to left_stat
return left_stat, left_stat
# Calculate statistic for the right side
right_slice = _slice_at_axis(
slice(right_index - right_length, right_index), axis
)
right_chunk = padded[right_slice]
right_stat = stat_func(right_chunk, axis=axis, keepdims=True)
_round_if_needed(right_stat, padded.dtype)
return left_stat, right_stat
def _set_reflect_both(padded, axis, width_pair, method, include_edge=False):
"""Pads an `axis` of `arr` using reflection.
Args:
padded(cupy.ndarray): Input array of arbitrary shape.
axis(int): Axis along which to pad `arr`.
width_pair((int, int)): Pair of widths that mark the pad area on both
sides in the given dimension.
method(str): Controls method of reflection; options are 'even' or 'odd'.
include_edge(bool, optional): If true, edge value is included in
reflection, otherwise the edge value forms the symmetric axis to the
reflection. (Default value = False)
"""
left_pad, right_pad = width_pair
old_length = padded.shape[axis] - right_pad - left_pad
if include_edge:
# Edge is included, we need to offset the pad amount by 1
edge_offset = 1
else:
edge_offset = 0 # Edge is not included, no need to offset pad amount
old_length -= 1 # but must be omitted from the chunk
if left_pad > 0:
# Pad with reflected values on left side:
|
dimagi/commcare-hq
|
corehq/ex-submodules/pillowtop/management/commands/update_es_settings.py
|
Python
|
bsd-3-clause
| 2,244
| 0.003119
|
from django.core.management.base import BaseCommand, CommandError
from corehq.elastic import get_es_new
from corehq.pillows.utils import get_all_expected_es_indices
class Command(BaseCommand):
help = "Update dynamic settings for existing elasticsearch indices."
def add_arguments(self, parser):
parser.add_argument(
'--noinput',
action='store_true',
dest='noinput',
default=False,
help='Skip important confirmation warnings.'
)
def handle(self, **options):
noinput = options.pop('noinput')
es_indices = list(get_all_expected_es_indices())
to_update = []
es = get_es_new()
for index_info in es_indices:
old_settings = es.indices.get_settings(index=index_info.index)
old_number_of_replicas = int(
old_settings[index_info.index]['settings']['index']['number_of_replicas']
)
new_number_of_replicas = index_info.meta['settings']['number_of_replicas']
if old_number_of_replicas != new_number_of_replicas:
print("{} [{}]:\n Number of replicas changing from {!r} to {!r}".format(
index_info.alias, index_info.index, old_number_of_replicas, new_number_of_replicas))
to_update.append((index_info, {
'number_of_replicas': new_number_of_replicas,
}))
if not to_update:
print("There is nothing to update.")
return
if (noinput or _confirm(
"Confirm that you want to update all the settings above?")):
for index_info, settings in to_update:
|
mapping_res = es.indices.put_settings(index=index_info.index, body=settings)
if mapping_res.get('acknowledged', False):
print("{} [{}]:\n Index settings successfully updated".format(
index_info.alias, index_info.index))
else:
pri
|
nt(mapping_res)
def _confirm(message):
if input(
'{} [y/n]'.format(message)
).lower() == 'y':
return True
else:
raise CommandError('abort')
|
deepmind/dm_control
|
dm_control/entities/manipulators/kinova/__init__.py
|
Python
|
apache-2.0
| 848
| 0
|
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARR
|
ANTIES OR CONDITIONS OF ANY KIND, either express or
|
implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Composer models of Kinova robots."""
from dm_control.entities.manipulators.kinova.jaco_arm import JacoArm
from dm_control.entities.manipulators.kinova.jaco_hand import JacoHand
|
dlazz/ansible
|
lib/ansible/modules/cloud/google/gcp_compute_target_vpn_gateway.py
|
Python
|
gpl-3.0
| 11,232
| 0.003205
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License
|
v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
|
----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ["preview"],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_target_vpn_gateway
description:
- Represents a VPN gateway running in GCP. This virtual device is managed by Google,
but used only by you.
short_description: Creates a GCP TargetVpnGateway
version_added: 2.7
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
description:
description:
- An optional description of this resource.
required: false
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
required: true
network:
description:
- The network this VPN gateway is accepting traffic for.
- 'This field represents a link to a Network resource in GCP. It can be specified
in two ways. You can add `register: name-of-resource` to a gcp_compute_network
task and then set this network field to "{{ name-of-resource }}" Alternatively,
you can set this network to a dictionary with the selfLink key where the value
is the selfLink of your Network'
required: true
region:
description:
- The region this gateway should sit in.
required: true
extends_documentation_fragment: gcp
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/targetVpnGateways)'
'''
EXAMPLES = '''
- name: create a address
gcp_compute_address:
name: "address-vpngateway"
region: us-west1
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: address
- name: create a network
gcp_compute_network:
name: "network-vpngateway"
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: network
- name: create a target vpn gateway
gcp_compute_target_vpn_gateway:
name: "test_object"
region: us-west1
network: "{{ network }}"
project: "test_project"
auth_kind: "serviceaccount"
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
id:
description:
- The unique identifier for the resource.
returned: success
type: int
network:
description:
- The network this VPN gateway is accepting traffic for.
returned: success
type: dict
tunnels:
description:
- A list of references to VpnTunnel resources associated to this VPN gateway.
returned: success
type: list
forwardingRules:
description:
- A list of references to the ForwardingRule resources associated to this VPN gateway.
returned: success
type: list
region:
description:
- The region this gateway should sit in.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
description=dict(type='str'),
name=dict(required=True, type='str'),
network=dict(required=True, type='dict'),
region=dict(required=True, type='str')
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#targetVpnGateway'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind):
module.fail_json(msg="TargetVpnGateway cannot be edited")
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#targetVpnGateway',
u'description': module.params.get('description'),
u'name': module.params.get('name'),
u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink')
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/targetVpnGateways/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{regio
|
yencarnacion/jaikuengine
|
.google_appengine/google/appengine/api/modules/modules_stub.py
|
Python
|
apache-2.0
| 5,933
| 0.007079
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permission
|
s and
# limitations under the License.
#
"""Stub implementation of the modules service."""
from google.appengine.api import apiproxy_stub
from google.appengine.api import request_info
from google.appengine.api.modules import modules_service_pb
from google.appengine.runtime import apiproxy_errors
class Mod
|
ulesServiceStub(apiproxy_stub.APIProxyStub):
_ACCEPTS_REQUEST_ID = True
THREADSAFE = True
def __init__(self, request_data):
super(ModulesServiceStub, self).__init__('modules',
request_data=request_data)
def _GetModuleFromRequest(self, request, request_id):
dispatcher = self.request_data.get_dispatcher()
if request.has_module():
module = request.module()
else:
module = self.request_data.get_module(request_id)
return module, dispatcher
def _GetModuleAndVersionFromRequest(self, request, request_id):
module, dispatcher = self._GetModuleFromRequest(request, request_id)
if request.has_version():
version = request.version()
else:
version = self.request_data.get_version(request_id)
if version not in dispatcher.get_versions(module):
version = dispatcher.get_default_version(module)
return module, version, dispatcher
def _Dynamic_GetModules(self, request, response, request_id):
dispatcher = self.request_data.get_dispatcher()
for module in dispatcher.get_module_names():
response.add_module(module)
def _Dynamic_GetVersions(self, request, response, request_id):
module, dispatcher = self._GetModuleFromRequest(request, request_id)
try:
for version in dispatcher.get_versions(module):
response.add_version(version)
except request_info.ModuleDoesNotExistError:
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.INVALID_MODULE)
def _Dynamic_GetDefaultVersion(self, request, response, request_id):
module, dispatcher = self._GetModuleFromRequest(request, request_id)
try:
response.set_version(dispatcher.get_default_version(module))
except request_info.ModuleDoesNotExistError:
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.INVALID_MODULE)
def _Dynamic_GetNumInstances(self, request, response, request_id):
try:
module, version, dispatcher = self._GetModuleAndVersionFromRequest(
request, request_id)
response.set_instances(dispatcher.get_num_instances(module, version))
except (request_info.ModuleDoesNotExistError,
request_info.VersionDoesNotExistError,
request_info.NotSupportedWithAutoScalingError):
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.INVALID_VERSION)
def _Dynamic_SetNumInstances(self, request, response, request_id):
try:
module, version, dispatcher = self._GetModuleAndVersionFromRequest(
request, request_id)
dispatcher.set_num_instances(module, version, request.instances())
except (request_info.ModuleDoesNotExistError,
request_info.VersionDoesNotExistError,
request_info.NotSupportedWithAutoScalingError):
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.INVALID_VERSION)
def _Dynamic_StartModule(self, request, response, request_id):
module = request.module()
version = request.version()
dispatcher = self.request_data.get_dispatcher()
try:
dispatcher.start_module(module, version)
except (request_info.ModuleDoesNotExistError,
request_info.VersionDoesNotExistError,
request_info.NotSupportedWithAutoScalingError):
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.INVALID_VERSION)
except request_info.ModuleAlreadyStartedError:
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.UNEXPECTED_STATE)
def _Dynamic_StopModule(self, request, response, request_id):
try:
module, version, dispatcher = self._GetModuleAndVersionFromRequest(
request, request_id)
dispatcher.stop_module(module, version)
except (request_info.ModuleDoesNotExistError,
request_info.VersionDoesNotExistError,
request_info.NotSupportedWithAutoScalingError):
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.INVALID_VERSION)
except request_info.ModuleAlreadyStoppedError:
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.UNEXPECTED_STATE)
def _Dynamic_GetHostname(self, request, response, request_id):
if request.has_instance():
instance = request.instance()
else:
instance = None
try:
module, version, dispatcher = self._GetModuleAndVersionFromRequest(
request, request_id)
response.set_hostname(dispatcher.get_hostname(module, version, instance))
except (request_info.ModuleDoesNotExistError,
request_info.VersionDoesNotExistError):
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.INVALID_MODULE)
except request_info.InvalidInstanceIdError:
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.INVALID_INSTANCES)
|
msghens/pyADAP
|
adlib.py
|
Python
|
mit
| 8,216
| 0.041504
|
# -*- coding: utf-8 -*-
#
# adlib.py
#
# A lot of help from:
# http://marcitland.blogspot.com/2011/02/python-active-directory-linux.html
# import sys is my friend!
import sys
import logging
import ldap
from person import Person
#import netrc
import base64,zlib
import ldap.modlist as modlist
from secure import ADurl, adusername, adpassword
import time
#~ Create a AD connection with clean up. Must be called
#~ 'with' statement
#~ usage: with ADconnection as adc
# setting module logger
logger = logging.getLogger('pyADAP.adlib')
class ADconnection(object):
def __enter__(self):
#LDAP Connection
try:
# Fix MS Issues
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
ldap.set_option(ldap.OPT_REFERRALS,0)
self.ldap_connection = ldap.initialize(ADurl)
self.ldap_connection.simple_bind_s(adusername, adpassword)
except ldap.LDAPError, e:
sys.stderr.write('Error connecting to LDAP server: ' + str(e) + '\n')
# Needs to fail gracefully such as just dump to bit bucket
#sys.exit(1)
logger.info('Error connecting to LDAP server')
raise RuntimeError('Error connecting to LDAP server')
logger.debug("Connected to AD")
return self.ldap_connection
def __init__(self):
return None
def __exit__(self, type, value, traceback):
self.close()
def close(self):
logger.debug("Disconnecting from AD")
self.ldap_connection.unbind_s()
class adlib(object):
def __init__(self,imsperson):
self.p
|
errec = imsperson
#Base dn. Outside config????
self.base_dn = 'dc=sbcc,dc=local'
self.dn = None
self.inADFlag = None
def inAD(self,cn=None):
if cn is None:
cn=self.perrec.userid
#instatiate class. Why? Who knows...
ad = ADconnection()
with ad as ldapconn:
try:
searchfilter = ('(&(objectCategory=person)(&(objectClass=user)(sAMAccountName=%s)))' % cn)
user_results = ldapconn.search_s(self.base_dn, ldap.SCOPE_SUBTREE,searchfilter)
dn = user_res
|
ults[0][0]
if dn is None:
return False
except ldap.LDAPError, error_message:
#print "error finding username: %S" % error_message
self.inADFlag = False
return False
except:
self.inADFlag = False
return False
self.inADFlag = True
return True
def chgPwd(self,cn=None):
if cn is None:
cn=self.perrec.userid
#instatiate class. Why? Who knows...
ad = ADconnection()
with ad as ldapconn:
try:
searchfilter = ('(&(objectCategory=person)(&(objectClass=user)(sAMAccountName=%s)))' % cn)
logger.debug(searchfilter)
user_results=ldapconn.search_s(self.base_dn,ldap.SCOPE_SUBTREE,searchfilter)
logger.debug(user_results)
dn = user_results[0][0]
#~ print dn
if dn <> None:
#placeholder for logging
#print 'updating ' + user['username'],time.ctime()
adpass = ('"%s"' % self.perrec.password).encode("utf-16-le")
#adpass = base64.b64encode(adpass)
# Update Password
mod_attrs = [( ldap.MOD_REPLACE, 'unicodePwd', adpass ),( ldap.MOD_REPLACE, 'unicodePwd', adpass)]
# Update Role
mod_attrs.append( (ldap.MOD_REPLACE, 'employeeType', str(self.perrec.primaryRole)) )
#Update Knumber
mod_attrs.append( (ldap.MOD_REPLACE, 'employeeID', str(self.perrec.knumber)) )
mod_attrs.append( (ldap.MOD_REPLACE, 'employeeNumber', str(self.perrec.knumber).strip('K')) )
mod_attrs.append( (ldap.MOD_REPLACE, 'mail', str(self.perrec.userid) + '@pipeline.sbcc.edu') )
#Reenable user
#print user_results[0][1]['userAccountControl'][0]
UC = int(user_results[0][1]['userAccountControl'][0])
if UC & (1<<1):
UC = UC & ~(1 << 1)
UCattrib = (ldap.MOD_REPLACE, 'userAccountControl', str(UC))
#mod_attrs.append(UCattrib)
#print mod_attrs
ldapconn.modify_s( dn, mod_attrs )
logger.info('Updated password for %s', str(cn))
#work on logging
except ldap.LDAPError, error_message:
#~ print "error finding username: %s" % error_message
return False
def enableUser(self,cn=None):
if cn is None:
cn=self.perrec.userid
#instatiate class. Why? Who knows...
ad = ADconnection()
with ad as ldapconn:
try:
searchfilter = ('(&(objectCategory=person)(&(objectClass=user)(sAMAccountName=%s)))' % cn)
logger.debug(searchfilter)
user_results=ldapconn.search_s(self.base_dn,ldap.SCOPE_SUBTREE,searchfilter)
logger.debug(user_results)
dn = user_results[0][0]
#~ print dn
if dn <> None:
#placeholder for logging
#print 'updating ' + user['username'],time.ctime()
#print user_results[0][1]['userAccountControl'][0]
UC = int(user_results[0][1]['userAccountControl'][0])
if UC & (1<<1):
UC = UC & ~(1 << 1)
UCattrib = (ldap.MOD_REPLACE, 'userAccountControl', str(UC))
#mod_attrs.append(UCattrib)
mod_attrs = [(UCattrib)]
#print mod_attrs
ldapconn.modify_s( dn, mod_attrs )
logger.info('Enabled: %s', str(cn))
#work on logging
except ldap.LDAPError, error_message:
#~ print "error finding username: %s" % error_message
return False
def addUser(self):
# Build User
if self.perrec.ADContainer == 'noOU':
logger.debug("User does not have container: %s" % self.perrec.userid)
logger.error("AD Account not created for: %s" % self.perrec.userid)
#raise ValueError('User not create')
return False
user_dn = 'cn=' + self.perrec.userid + ',' + self.perrec.ADContainer
logger.info('User DN for new user: %s', user_dn)
user_attrs = {}
user_attrs['objectClass'] = \
['top', 'person', 'organizationalPerson', 'user']
user_attrs['cn'] = str(self.perrec.userid)
user_attrs['userPrincipalName'] = str(self.perrec.userid) + '@' + 'sbcc.local'
user_attrs['sAMAccountName'] = str(self.perrec.userid)
user_attrs['givenName'] = str(self.perrec.fname.encode("utf-8"))
user_attrs['sn'] = str(self.perrec.lname.encode("utf-8"))
user_attrs['displayName'] = str(self.perrec.displayName.encode("utf-8"))
user_attrs['userAccountControl'] = '514'
user_attrs['mail'] = str(self.perrec.userid) + '@pipeline.sbcc.edu'
user_attrs['employeeID'] = str(self.perrec.knumber)
user_attrs['employeeNumber'] = str(self.perrec.knumber).strip('K')
user_ldif = modlist.addModlist(user_attrs)
ad = ADconnection()
with ad as ldapconn:
logger.info('Adding users: %s', user_dn)
ldapconn.add_s(user_dn,user_ldif)
time.sleep(1)
logger.info('Adding membership: %s', user_dn)
add_member = [(ldap.MOD_ADD, 'member', str(user_dn))]
ldapconn.modify_s(self.perrec.ADMemberOf,add_member)
time.sleep(1)
adpass = ('"%s"' % self.perrec.password).encode("utf-16-le")
#adpass = base64.b64encode(adpass)
# Update Password
mod_attrs = [( ldap.MOD_REPLACE, 'unicodePwd', adpass ),( ldap.MOD_REPLACE, 'unicodePwd', adpass)]
logger.info('Setting pass: %s', user_dn)
ldapconn.modify_s(user_dn,mod_attrs)
time.sleep(1)
# 512 will set user account to enabled
mod_acct = [(ldap.MOD_REPLACE, 'userAccountControl', '512')]
logger.info('Trying to enable user: %s', user_dn)
logger.info('userAccountControl: %s', mod_acct)
ldapconn.modify_s(user_dn,mod_acct)
#Enable Account
#self.chgPwd()
#self.enableUser()
logger.info('Us
|
django-stars/dash2011
|
presence/apps/vote/views.py
|
Python
|
bsd-3-clause
| 684
| 0
|
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.contrib import messages
from models import UserVote
from forms import UserVoteForm
def vote(request):
if request.method == "POST":
form = UserVoteForm(request.POST)
if form.is_valid():
vote = form.save(commit=False)
vote = UserVote.objects.vote
|
(request.user, vote.vote)
messages.info(request, "Your mood is %s" % vote.get_vote_display())
else:
form = UserVoteForm()
return HttpResponseRedi
|
rect(reverse('dashboard'))
|
PyFilesystem/pyfilesystem2
|
fs/opener/memoryfs.py
|
Python
|
mit
| 808
| 0
|
# coding: utf-8
"""`MemoryFS` opener definit
|
ion.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import typing
from .base import Opener
from .registry import registry
if typing.TYPE_CHECKING:
from typing import Text
from .parse import ParseResult
from ..memoryfs import MemoryFS # noqa: F401
@registry.install
class MemOpener(Opener):
"""`MemoryFS` opener."""
protocols = ["mem"]
def op
|
en_fs(
self,
fs_url, # type: Text
parse_result, # type: ParseResult
writeable, # type: bool
create, # type: bool
cwd, # type: Text
):
# type: (...) -> MemoryFS
from ..memoryfs import MemoryFS
mem_fs = MemoryFS()
return mem_fs
|
jayinton/FlaskDemo
|
simple/index.py
|
Python
|
mit
| 269
| 0
|
#!/usr/bin/env python
# coding=utf-8
__
|
author__ = 'Jayin Ton'
from flask import Flask
app = Flask(__name__)
host = '127.0.0.1'
port = 8000
@app.route('/')
def index():
return 'welcome'
if __name__ == '__main__':
a
|
pp.run(host=host, port=port, debug=True)
|
BaroboRobotics/libbarobo
|
PyMobot/setup_win32.py
|
Python
|
gpl-3.0
| 855
| 0.016374
|
#!/usr/bin/env python
from distutils.core import setup,Extension
from distutils.command.build_py import build_py
dist = s
|
etup(name='PyMobot',
version='0.1',
description='Mobot Control Python Library',
author='David Ko',
author_email='[email protected]',
url='http://www.barobo.com',
packages=['barobo'],
ext_modules=
|
[Extension('barobo._mobot',
['barobo/mobot.i'],
swig_opts=['-c++', '-I../'],
include_dirs=['../', '../BaroboConfigFile', '../BaroboConfigFile/mxml-2.7'],
define_macros=[('NONRELEASE','1')],
extra_compile_args=['-fpermissive'],
library_dirs=['../', '../BaroboConfigFile', '../BaroboConfigFile/mxml-2.7'],
libraries=['baroboStatic', 'baroboconfigfile', 'mxml', 'pthread', 'ws2_32'],
)],
)
build_py = build_py(dist)
build_py.ensure_finalized()
build_py.run()
|
nozuono/calibre-webserver
|
src/tinycss/media3.py
|
Python
|
gpl-3.0
| 4,645
| 0.003229
|
#!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
from tinycss.css21 import CSS21Parser
from tinycss.parsing import remove_whitespace, split_on_comma, ParseError
class MediaQuery(object):
__slots__ = 'media_type', 'expressions', 'negated'
def __init__(self
|
, media_type='all', expressions=(), negated=False):
self.media_type = media_type
self.expressions = expressions
self.negated = negated
def __repr__(self):
return '<MediaQuery type=%s negated=%s expressions=%s>' % (
self.media_type, self.negated, self.expressions)
def __eq__(self, other):
return self.media_type == getattr(other, 'media_type', None) and \
self.negated == getattr(other, 'negated', None) and \
self.expre
|
ssions == getattr(other, 'expressions', None)
class MalformedExpression(Exception):
def __init__(self, tok, msg):
Exception.__init__(self, msg)
self.tok = tok
class CSSMedia3Parser(CSS21Parser):
''' Parse media queries as defined by the CSS 3 media module '''
def parse_media(self, tokens, errors):
if not tokens:
return [MediaQuery('all')]
queries = []
for part in split_on_comma(remove_whitespace(tokens)):
negated = False
media_type = None
expressions = []
try:
for i, tok in enumerate(part):
if i == 0 and tok.type == 'IDENT':
val = tok.value.lower()
if val == 'only':
continue # ignore leading ONLY
if val == 'not':
negated = True
continue
if media_type is None and tok.type == 'IDENT':
media_type = tok.value
continue
elif media_type is None:
media_type = 'all'
if tok.type == 'IDENT' and tok.value.lower() == 'and':
continue
if not tok.is_container:
raise MalformedExpression(tok, 'expected a media expression not a %s' % tok.type)
if tok.type != '(':
raise MalformedExpression(tok, 'media expressions must be in parentheses not %s' % tok.type)
content = remove_whitespace(tok.content)
if len(content) == 0:
raise MalformedExpression(tok, 'media expressions cannot be empty')
if content[0].type != 'IDENT':
raise MalformedExpression(content[0], 'expected a media feature not a %s' % tok.type)
media_feature, expr = content[0].value, None
if len(content) > 1:
if len(content) < 3:
raise MalformedExpression(content[1], 'malformed media feature definition')
if content[1].type != ':':
raise MalformedExpression(content[1], 'expected a :')
expr = content[2:]
if len(expr) == 1:
expr = expr[0]
elif len(expr) == 3 and (expr[0].type, expr[1].type, expr[1].value, expr[2].type) == (
'INTEGER', 'DELIM', '/', 'INTEGER'):
# This should really be moved into token_data, but
# since RATIO is not part of CSS 2.1 and does not
# occur anywhere else, we special case it here.
r = expr[0]
r.value = (expr[0].value, expr[2].value)
r.type = 'RATIO'
r._as_css = expr[0]._as_css + expr[1]._as_css + expr[2]._as_css
expr = r
else:
raise MalformedExpression(expr[0], 'malformed media feature definition')
expressions.append((media_feature, expr))
except MalformedExpression as err:
errors.extend(ParseError(err.tok, err.message))
media_type, negated, expressions = 'all', True, ()
queries.append(MediaQuery(media_type or 'all', expressions=tuple(expressions), negated=negated))
return queries
|
fmaschler/networkit
|
networkit/scd.py
|
Python
|
mit
| 62
| 0.016129
|
# extension im
|
ports
from _NetworKit imp
|
ort PageRankNibble, GCE
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.4/Lib/test/test_copy.py
|
Python
|
mit
| 17,174
| 0.002678
|
"""Unit tests for the copy module."""
import sys
import copy
import copy_reg
import unittest
from test import test_support
class TestCopy(unittest.TestCase):
# Attempt full line coverage of copy.py from top to bottom
def test_exceptions(self):
self.assert_(copy.Error is copy.error)
self.assert_(issubclass(copy.Error, Exception))
# The copy() method
def test_copy_basic(self):
x = 42
y = copy.copy(x)
self.assertEqual(x, y)
def test_copy_copy(self):
class C(object):
def __init__(self, foo):
self.foo = foo
def __copy__(self):
return C(self.foo)
x = C(42)
y = copy.copy(x)
self.assertEqual(y.__class__, x.__class__)
self.assertEqual(y.foo, x.foo)
def test_copy_registry(self):
class C(object):
def __new__(cls, foo):
obj = object.__new__(cls)
obj.foo = foo
return obj
def pickle_C(obj):
return (C, (obj.foo,))
x = C(42)
self.assertRaises(TypeError, copy.copy, x)
copy_reg.pickle(C, pickle_C, C)
y = copy.copy(x)
def test_copy_reduce_ex(self):
class C(object):
def __reduce_ex__(self, proto):
return ""
def __reduce__(self):
raise test_support.TestFailed, "shouldn't call this"
x = C()
y = copy.copy(x)
self.assert_(y is x)
def test_copy_reduce(self):
class C(object):
def __reduce__(self):
return ""
x = C()
y = copy.copy(x)
self.assert_(y is x)
def test_copy_cant(self):
class C(object):
def __getattribute__(self, name):
if name.startswith("__reduce"):
raise AttributeError, name
return object.__getattribute__(self, name)
x = C()
self.assertRaises(copy.Error, copy.copy, x)
# Type-specific _copy_xxx() methods
def test_copy_atomic(self):
class Classic:
pass
class NewStyle(object):
pass
def f():
pass
tests = [None, 42, 2L**100, 3.14, True, False, 1j,
"hello", u"hello\u1234", f.func_code,
NewStyle, xrange(10), Classic, max]
for x in tests:
self.assert_(copy.copy(x) is x, repr(x))
def test_copy_list(self):
x = [1, 2, 3]
self.assertEqual(copy.copy(x), x)
def test_copy_tuple(self):
x = (1, 2, 3)
self.assertEqual(copy.copy(x), x)
def test_copy_dict(self):
x = {"foo": 1, "bar": 2}
self.assertEqual(copy.copy(x), x)
def test_copy_inst_vanilla(self):
class C:
def __init__(self, foo):
self.foo = foo
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_copy(self):
class C:
def __init__(self, foo):
self.foo = foo
def __copy__(self):
return C(self.foo)
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_getinitargs(self):
class C:
def __init__(self, foo):
self.foo = foo
def
|
__getinitargs__(self):
return (self.foo,)
def __cmp__(self, other):
return cmp(self.foo,
|
other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_getstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return {"foo": self.foo}
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __setstate__(self, state):
self.foo = state["foo"]
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_getstate_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return self.foo
def __setstate__(self, state):
self.foo = state
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
# The deepcopy() method
def test_deepcopy_basic(self):
x = 42
y = copy.deepcopy(x)
self.assertEqual(y, x)
def test_deepcopy_memo(self):
# Tests of reflexive objects are under type-specific sections below.
# This tests only repetitions of objects.
x = []
x = [x, x]
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y is not x)
self.assert_(y[0] is not x[0])
self.assert_(y[0] is y[1])
def test_deepcopy_issubclass(self):
# XXX Note: there's no way to test the TypeError coming out of
# issubclass() -- this can only happen when an extension
# module defines a "type" that doesn't formally inherit from
# type.
class Meta(type):
pass
class C:
__metaclass__ = Meta
self.assertEqual(copy.deepcopy(C), C)
def test_deepcopy_deepcopy(self):
class C(object):
def __init__(self, foo):
self.foo = foo
def __deepcopy__(self, memo=None):
return C(self.foo)
x = C(42)
y = copy.deepcopy(x)
self.assertEqual(y.__class__, x.__class__)
self.assertEqual(y.foo, x.foo)
def test_deepcopy_registry(self):
class C(object):
def __new__(cls, foo):
obj = object.__new__(cls)
obj.foo = foo
return obj
def pickle_C(obj):
return (C, (obj.foo,))
x = C(42)
self.assertRaises(TypeError, copy.deepcopy, x)
copy_reg.pickle(C, pickle_C, C)
y = copy.deepcopy(x)
def test_deepcopy_reduce_ex(self):
class C(object):
def __reduce_ex__(self, proto):
return ""
def __reduce__(self):
raise test_support.TestFailed, "shouldn't call this"
x = C()
y = copy.deepcopy(x)
self.assert_(y is x)
def test_deepcopy_reduce(self):
class C(object):
def __reduce__(self):
return ""
x = C()
y = copy.deepcopy(x)
self.assert_(y is x)
def test_deepcopy_cant(self):
class C(object):
def __getattribute__(self, name):
if name.startswith("__reduce"):
raise AttributeError, name
return object.__getattribute__(self, name)
x = C()
self.assertRaises(copy.Error, copy.deepcopy, x)
# Type-specific _deepcopy_xxx() methods
def test_deepcopy_atomic(self):
class Classic:
pass
class NewStyle(object):
pass
def f():
pass
tests = [None, 42, 2L**100, 3.14, True, False, 1j,
"hello", u"hello\u1234", f.func_code,
NewStyle, xrange(10), Classic, max]
for x in tests:
self.assert_(copy.deepcopy(x) is x, repr(x))
def test_deepcopy_list(self):
x = [[1, 2], 3]
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(x is not y)
self.assert_(x[0] is not y[0])
def test_deepcopy_reflexive_list(self):
x = []
x.append(x)
y = copy.deepcopy(x)
self.assertRaises(RuntimeError, cmp, y, x)
self.assert_(y is not x)
self
|
peppelinux/pyDBsync
|
src/main.py
|
Python
|
bsd-3-clause
| 3,261
| 0.029745
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import argparse
#import imp
from validator import *
from settings import *
from utils import *
parser = argparse.ArgumentParser(description='Sync two Databases', epilog="Es: python main.py -run test --db-master=mysql://root:[email protected]/dbname --db-slave=mysql://root:passwdx@localhost/dbname")
parser.add_argument('-run', dest='run', action='store', type=str, choices=['test','execute'], required=True,
help='Test Produce only a simulation. Execute does the job !')
parser.add_argument('--no-schemacheck', required=False, action='store_true', help='disable schema check. Use it only if you are shure that the DB\'s schemas are identical and you want bypass the diff valuation of the DB_maps files.')
#parser.add_argument('--verbose', required=False, action='store_true', help='view a lot of usefull/useless output')
parser.add_argument('--db-master', dest='master', action='store',
required=True, help='es: mysql://user:password@hostname[:3306]/dbname where the data is taken from',
type=RegexValidator(DB_CONNECTOR_PATTERN))
parser.add_argument('--db-slave', dest='slave', action='store',
type=RegexValidator(DB_CONNECTOR_PATTERN),
required=True, help='es: mysql://user:password@hostname[:3306]/dbname where we need to store and sync the data taken from master')
parser.add_argument('--tables', required=True, action='store', help='tables names separated by a comma and no space, like this: --tables users,groups,tb_matchs')
parser.add_argument('--version', action='version', version='pySyncDB 0.1a')
if __name__ == '__main__':
# fetch arguments from sys.args with a little help from argsparse module :)
args = parser.parse_args()
# control if the folder where the tables_maps will stored exists
Verify_DBMAPS_Folder()
# DO backup
# TODO: a procedure to do a backups with creational statements and insert queries from sqlalchemy
# Backup(db_name)
# producing tables_maps with sqlautocode helps a lot :)
SQLAutoCodeMap('master', args)
SQLAutoCodeMap('slave', args)
# if there's not the --no-schemacheck this will start the Schema Comparison to control
# that the two files are identical
if not args.no_schemacheck: SchemaComparator()
# use imp to import the tables_schemes. This make all the things more simple !
# deprecated: I abandoned it because of ugly warnings like this:
# RuntimeWarning: Parent module 'master_schema' not found while handling absolute import...
#master_schema = imp.load_source('master_schema.py', DB_MAP_FOLDER+'/master_schema.py')
#slave_schema = imp.load_source('slave_schema.py', DB_MAP_FOLDER+'/slave_schema.py')
# now I use simply this :)
sys.path.append(DB_MAP_FOLDER)
from pydbsync import *
master = SessionManager('master'
|
, args.master)
|
slave = SessionManager('slave', args.slave)
for table in args.tables.split(','):
if args.run == 'test': args.run = None
g = pyTableSyncManager(master, slave, table, args.run)
g.InspectTable()
if g.ProposedUpdates:
g.CommitUpdates()
if g.ProposedInsertions:
g.CommitInsertions()
if g.ProposedDeletions:
g.CommitDeletions()
# purge it!
del(g)
|
jm66/vsphere_inventory_report
|
vsphere_inventory_report.py
|
Python
|
gpl-2.0
| 17,553
| 0.014357
|
#!/usr/bin/python
import logging, sys, re, getpass, argparse, pprint, csv, time
from pysphere import MORTypes, VIServer, VITask, VIProperty, VIMor, VIException
from pysphere.vi_virtual_machine import VIVirtualMachine
from pysphere.resources import VimService_services as VI
def sizeof_fmt(num):
for x in ['bytes','KB','MB']:
num /= 1024.0
return "%3.5f" % (num)
def get_vm_permissions(auth_manager, vm_mor, request):
vm_mor_type = "VirtualMachine"
_this = request.new__this(auth_manager)
_this.set_attribute_type(auth_manager.get_attribute_type())
request.set_element__this(_this)
entity = request.new_entity(vm_mor)
entity.set_attribute_type(vm_mor_type)
request.set_element_entity(entity)
request.set_element_inherited(True)
response = server._proxy.RetrieveEntityPermissions(request)
permissions = response._returnval
perm_array = [(p.Principal, p.RoleId) for p in permissions]
return perm_array
def write_report(vms_info, csvfile, dirname, c):
for val in vms_info.values():
c
|
.writerow([val['Folder'], val['vm'], val['numCPU'], val['MBmemory'], val['storageUsed'], val['storageCommitted'],val['ESX
|
ihost'], val['datastores'],
val['vmConfig'], val['networks'], val['netids'], val['vmOS'], val['vmTools'], val['vmPower'], val['vmDNS'], val['Note'],
val['cpuReservationMhz'], val['cpuLimitMhz'], val['memReservationMB'], val['memLimitMB'], val['HardDisks'],
val['CDdrive'], val['snapshots'], val['Permissions'] ])
def create_vm_dict():
vm = {'vmId': None, 'vm': None, 'numCPU': None, 'MBmemory': None, 'vmConfig': None, 'Note': None, 'vmOS': None, 'vmDNS': None,
'vmPower': None, 'vmTools': None, 'cpuReservationMhz': None, 'cpuLimitMhz': None, 'memReservationMB': None, 'memLimitMB': None,
'networks': None, 'datastores': None, 'netids': None, 'snapshots': None, 'CDdrive': None,
'ESXihost': None, 'HardDisks': None, 'storageUsed': None, 'storageCommitted': None, 'Folder': None, 'Permissions': None}
return vm
def create_csv_header():
csv_header = ["Folder", "vmName", "numCPU", "MBmemory", "GBstorage", "GBcommitted", "ESXhost", "datastores", "vmConfig", "NICs",
"NetIDs", "vmOS", "vmTools", "vmPower", "vmDNS", "Note",
"cpuReservationMhz", "cpuLimitMhz", "memReservationMB", "memLimitMB",
"HardDisks", "CDdrive", "Snapshots", "vmPermissions"]
return csv_header
def create_vm_props():
properties = ['name','config.hardware.device', 'config.hardware.numCPU',
'config.hardware.memoryMB', 'config.files.vmPathName',
'runtime.host', 'config.version', 'summary.runtime.powerState',
'config.annotation', 'config.guestFullName', 'guest.hostName',
'guest.toolsVersion', 'guest.disk', 'guest.net',
'resourceConfig.cpuAllocation.reservation',
'resourceConfig.cpuAllocation.limit',
'resourceConfig.memoryAllocation.reservation',
'resourceConfig.memoryAllocation.limit',
'datastore', 'snapshot', 'layoutEx.file', 'storage.perDatastoreUsage']
return properties
def create_me_props():
return ['name', 'parent']
def get_dvp_dict(datacenters, datacentername, server):
dvpgs = {}
# GET INITIAL PROPERTIES AND OBJECTS
dcmor = [k for k,v in datacenters if v==datacentername][0]
dcprops = VIProperty(server, dcmor)
# networkFolder managed object reference
nfmor = dcprops.networkFolder._obj
dvpg_mors = server._retrieve_properties_traversal(property_names=['name','key'], from_node=nfmor, obj_type='DistributedVirtualPortgroup')
# building dictionary with the DVS
for dvpg in dvpg_mors:
mor = dvpg.Obj
entity = {}
for p in dvpg.PropSet:
entity[p.Name]=p.Val
dvpgs[mor] = entity
return dvpgs
def get_path(entity, entities_info):
parent = entity.get('parent')
display_name = "%s" % (entity['name'])
if parent and parent in entities_info:
return get_path(entities_info[parent], entities_info) + " > " + display_name
return display_name
def get_paths_dict(server, properties2):
entities_info = {}
paths = {}
# getting managed entities
props2 = server._retrieve_properties_traversal(property_names=properties2, obj_type='ManagedEntity')
# building a dictionary with the Managed Entities info
for prop in props2:
mor = prop.Obj
entity = {'id':mor, 'name':None, 'parent':None,'type':mor.get_attribute_type()}
for p in prop.PropSet:
entity[p.Name] = p.Val
entities_info[mor] = entity
# building dictionary with VMs vs path
for entity in entities_info.itervalues():
if entity['type'] == "VirtualMachine":
paths[entity['id']] = {'id': entity['id'], 'path':get_path(entity, entities_info)}
return paths
def set_dir(directory):
if directory:
return directory
else:
logger.info('Using default directory /tmp')
return '/tmp'
def getDateSuffix():
return '_'+time.strftime("%Y-%m-%d")
def set_filename(filename):
if filename:
return filename + getDateSuffix()
else:
logger.info('Using default filename vsphere-inventory')
return 'vsphere-inventory' + getDateSuffix()
def get_args():
# Creating the argument parser
parser = argparse.ArgumentParser(description="Report full vShere inventory to a CSV file")
parser.add_argument('-s', '--server', nargs=1, required=True, help='The vCenter or ESXi server to connect to', dest='server', type=str)
parser.add_argument('-u', '--user', nargs=1, required=True, help='The username with which to connect to the server', dest='username', type=str)
parser.add_argument('-p', '--password', nargs=1, required=False, help='The password with which to connect to the host. If not specified, the user is prompted at runtime for a password', dest='password', type=str)
parser.add_argument('-c', '--dc', nargs=1, required=True, help='The datacenter name you wish to report', dest='dcname', type=str)
parser.add_argument('-D', '--dir', required=False, help='Write CSV to a specific directory. Default /tmp', dest='directory', type=str)
parser.add_argument('-f', '--filename', required=False, help='File name. Default vsphere-inventory.csv', dest='filename', type=str)
parser.add_argument('-v', '--verbose', required=False, help='Enable verbose output', dest='verbose', action='store_true')
parser.add_argument('-d', '--debug', required=False, help='Enable debug output', dest='debug', action='store_true')
parser.add_argument('-l', '--log-file', nargs=1, required=False, help='File to log to (default = stdout)', dest='logfile', type=str)
parser.add_argument('-V', '--version', action='version', version="%(prog)s (version 0.4)")
args = parser.parse_args()
return args
def get_vms_dict(server, properties, paths, hosts_dict, datastores_dict, dvpgs):
vms_info = {}
# getting VMs info
props = server._retrieve_properties_traversal(property_names=properties, obj_type='VirtualMachine')
#build a dictionary with the VMs info
for prop in props:
mor = prop.Obj
vm = create_vm_dict()
for p in prop.PropSet:
vm['vmId'] = mor
if p.Name == "name":
vm['vm'] = p.Val
elif p.Name == "config.hardware.numCPU":
vm['numCPU'] = p.Val
elif p.Name == "config.hardware.memoryMB":
vm['MBmemory'] = p.Val
elif p.Name == "config.files.vmPathName":
vm['vmConfig'] = p.Val
elif p.Name == "config.annotation":
annotation = str(p.Val)
annotation = annotation.replace('\n',' ')
vm['Note']= annotation
elif p.Name == "config.guestFullName":
vm['vmOS'] = p.Val
elif p.Name == "guest.hostName":
vm['vmDNS'] = p.Val
elif p.Name
|
anjesh/pdf-processor
|
run.py
|
Python
|
mit
| 2,284
| 0.007005
|
from PdfProcessor import *
import argparse
from datetime import datetime
import ConfigParser
import ProcessLogger
import traceback
from urllib2 import HTTPError, URLError
parser = argparse.ArgumentParser(description='Processes the pdf and extracts the text')
parser.add_argument('-l','--language', help='Language of input pdf file for transcription (english, french, spanish).', required=False, default="english")
parser.add_argument('-i','--infile', help='File path of the input pdf file.', required=True)
parser.add_argument('-o','--outdir', help='File name of the output csv file.', required=True)
results = parser.parse_args()
allo
|
wed_languages = ["english", "french", "spanish", "portuguese", "arabic"]
pdfProcessor = ""
try:
logger = ProcessLogger.getLogger('run')
logger.info("Processing started at %s ", str(datetime.now()))
logger.info("input: %s", results.infile)
logger.info("outdir: %s", results.outdir)
if results.language.lower() not in
|
allowed_languages:
raise Exception("language should be one of english, french, spanish, portuguese or arabic")
if results.language.lower() == "portuguese":
results.language = "portuguesestandard"
configParser = ConfigParser.RawConfigParser()
configParser.read(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'settings.config'))
pdfProcessor = PDFProcessor(results.infile, results.outdir, results.language.lower())
pdfProcessor.setConfigParser(configParser)
pdfProcessor.writeStats()
if pdfProcessor.isStructured():
pdfProcessor.extractTextFromStructuredDoc()
else:
pdfProcessor.extractTextFromScannedDoc()
except URLError as e:
logger.error("URLError: %s", e.reason);
logger.debug(traceback.format_exception(*sys.exc_info()))
except HTTPError as e:
logger.error("HTTPError: [%s] %s", e.code, e.reason);
logger.debug(traceback.format_exception(*sys.exc_info()))
except OSError as e:
logger.error("OSError: %s [%s] in %s", e.strerror, e.errno, e.filename);
logger.debug(traceback.format_exception(*sys.exc_info()))
except Exception as e:
logger.error("Exception: %s ", e);
logger.debug(traceback.format_exception(*sys.exc_info()))
finally:
logger.info("Processing ended at %s ", str(datetime.now()));
|
5610110083/Safety-in-residential-project
|
cgi-bin/any/setCookies.py
|
Python
|
apache-2.0
| 447
| 0.029083
|
#!/usr/bin/python
import requests
import time, Cookie
# Instantiate a SimpleCookie
|
object
cookie = Cookie.SimpleCookie()
# The SimpleCookie instance is a mapping
cookie['lastvisit'] = str(time.time())
s = requests.session()
s.
|
cookies.clear()
# Output the HTTP message containing the cookie
print cookie
print 'Content-Type: text/html\n'
print '<html><body>'
print 'Server time is', time.asctime(time.localtime())
print '</body></html>'
|
DevicePilot/synth
|
synth/devices/commswave.py
|
Python
|
mit
| 3,252
| 0.006458
|
"""
commswave
=========
Takes device communications up and down according to a timefunction.
Comms will be working whenever the timefunction returns non-zero.
Configurable parameters::
{
"timefunction" : A timefunction definition
"threshold" : (optional) Comms will only work when the timefunction is returning >= threshold. If missing then any non-zero value will make comms work.
"gate_properties" : (optional) ["list", "of", "properties"] If this is defined, then instead of taking whole comms up and down, only these specific properties are gated
}
Device properties created::
{
}
"""
from .device import Device
from common import importer
import logging
class Commswave(Device):
def __init__(self, instance_name, time, engine, update_callback, context, params):
"""Take Comms up and down according to some time function"""
tf = params["commswave"]["timefunction"]
self.comms_timefunction = importer.get_class("timefunction", list(tf.keys())[0])(engine, self, tf[list(tf.keys())[0]])
self.comms_tf_threshold = params["commswave"].get("threshold", None)
self.comms_gate_properties = params["commswave"].get("gate_properties", None)
self.messages_sent = 0
self.messages_attempted = 0
super(Commswave,self).__init__(instance_name, time, engine, update_callback, context, params)
def timefunction_says_communicate(self):
thresh = 0.0
if self.comms_tf_threshold is not None:
thresh = self.comms_tf_threshold
return self.comms_timefunction.state() > thresh
def comms_ok(self):
if self.comms_gate_properties is not None: # If we're gating individual properties, then don't gate overall comms
return super(Commswave, self).comms_ok()
else:
self.messages_attempted += 1
is_ok = super(Commswave, self).comms_ok()
is_ok = is_ok and self.timefunction_says_communicate()
if is_ok:
self.messages_sent += 1
return is_ok
def transmit(self, the_id, ts, properties, force_comms):
if self.comms_gate_properties is not None: # We're gating properties
if not self.timefunction_says_communicate():
for p in self.comms_gate_properties:
properties.pop(p, None) # Remove the property, if it's there
super(Commswave, self).tr
|
ansmit(the_id, ts, properties, force_comms)
def external_event(self, event_name, arg):
super(Commswave, self).external_event(event_name, arg)
def close(self):
super(Commswave,self).close()
logging.info("Comms report for " + str(self.properties["$id"]) + " " +
str(self.messages_sent) + " sent ("+str(100 * self.messages_sent/self.messages_attempted) + "%) from " +
s
|
tr(self.messages_attempted) + " total")
# Private methods
## (we don't actually need to tick, as we can instantaneously look up timefunction state whenever we need to)
## def tick_commswave(self, _):
## self.ok_commswave = self.comms_timefunction.state()
## self.engine.register_event_at(self.comms_timefunction.next_change(), self.tick_commswave, self, self)
|
aluminiumgeek/goodbye-mihome
|
apps/sound_when_door_is_open.py
|
Python
|
bsd-2-clause
| 608
| 0
|
import json
from plugins import gateway_speaker
from plugins.
|
magnet import MAGNET_STORE_KEY
DOOR_SENSOR_SID = '158d0001837ec2'
def run(store, conn, cursor):
"""Play sound on the Gateway when somebody opens the door"""
p = store.pubsub(ignore_subscribe_messages=True)
p.subscribe(MAGNET_STORE_KEY)
for message in p.listen():
if message.get('type') != 'message':
continue
data = json.loads(message.get('data').decode())
if data.get('si
|
d') == DOOR_SENSOR_SID and data.get('status') == 'open':
gateway_speaker.play(3) # Standard alarm sound
|
CloudVLab/professional-services
|
tools/ml-auto-eda/ml_eda/reporting/recommendation.py
|
Python
|
apache-2.0
| 4,289
| 0.007694
|
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for provide recommendation based on analysis results"""
from decimal import Decimal
from typing import Union
from ml_eda.metadata import run_metadata_p
|
b2
from ml_eda.reporting import template
# Thresholds
MISSING_THRESHOLD = 0.1
CARDINALITY_THRESHOLD = 100
CORRELATION_COEFFICIENT_THRESHOLD = 0.3
P_VALUE_THRESHOLD = 0.05
def check_missing(attribute_name: str,
analysis: run_metadata_pb2.Analysis
) -> Union[None, str]:
"""Check whether % of missing exc
|
eed threshold
Args:
attribute_name: (string),
analysis: (run_metadata_pb2.Analysis), analysis that contain the result
of number of missing values
Returns:
Union[None, string]
"""
metrics = analysis.smetrics
total = 0
missing = 0
for item in metrics:
if item.name == run_metadata_pb2.ScalarMetric.TOTAL_COUNT:
total = item.value
elif item.name == run_metadata_pb2.ScalarMetric.MISSING:
missing = item.value
if total == 0:
raise ValueError('The dataset is empty')
missing_rate = missing / total
if missing_rate > MISSING_THRESHOLD:
return template.HIGH_MISSING.format(
name=attribute_name,
value=missing_rate
)
return None
def check_cardinality(attribute_name: str,
analysis: run_metadata_pb2.Analysis
) -> Union[None, str]:
"""Check whether the cardinality exceeds the predefined threshold
Args:
attribute_name: (string),
analysis: (run_metadata_pb2.Analysis), analysis that contain the result
of cardinality
Returns:
Union[None, string]
"""
metrics = analysis.smetrics
cardinality = 0
for item in metrics:
if item.name == run_metadata_pb2.ScalarMetric.CARDINALITY:
cardinality = item.value
if cardinality > CARDINALITY_THRESHOLD:
return template.HIGH_CARDINALITY.format(
name=attribute_name,
value=cardinality
)
return None
def check_pearson_correlation(analysis: run_metadata_pb2.Analysis
) -> Union[None, str]:
"""Check whether the correlation coefficients exceed the predefined threshold
Args:
analysis: (run_metadata_pb2.Analysis), analysis that contain the result
of pearson correlation
Returns:
Union[None, string]
"""
metrics = analysis.smetrics
name_list = [att.name for att in analysis.features]
coefficient = 0
for item in metrics:
if item.name == run_metadata_pb2.ScalarMetric.CORRELATION_COEFFICIENT:
coefficient = item.value
if abs(coefficient) > CORRELATION_COEFFICIENT_THRESHOLD:
return template.HIGH_CORRELATION.format(
name_one=name_list[0],
name_two=name_list[1],
metric='correlation coefficient',
value="{0:.2f}".format(coefficient)
)
return None
def check_p_value(analysis: run_metadata_pb2.Analysis
) -> Union[None, str]:
"""Check whether the p-value of statistical tests
exceed the predefined threshold
Args:
analysis: (run_metadata_pb2.Analysis), analysis that contain the result
of statistical test
Returns:
Union[None, string]
"""
metric = analysis.smetrics[0]
analysis_name = run_metadata_pb2.Analysis.Name.Name(analysis.name)
name_list = [att.name for att in analysis.features]
p_value = metric.value
if p_value < P_VALUE_THRESHOLD:
return template.LOW_P_VALUE.format(
name_one=name_list[0],
name_two=name_list[1],
metric='p-value',
value="{:.2E}".format(Decimal(str(p_value))),
test_name=analysis_name
)
return None
|
odoousers2014/odoo-development
|
modules/development_tools/wizard/development_tools_config_settings.py
|
Python
|
agpl-3.0
| 12,322
| 0.000081
|
# -*- coding: utf-8 -*-
###############################################################################
# License, author and contributors information in: #
# __openerp__.py file at the root folder of this module. #
###############################################################################
from openerp import models, fields, api
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval
from logging import getLogger
_logger = getLogger(__name__)
class DevelopmentToolsConfigSettings(models.TransientModel):
""" Module config settings
Fields:
email_to (Char): Address will be used to send captured email messages
email_capture (Boolean): Check it to capture outgoing email messages
developing_modules_enabled (Boolean): Sets the filter as default filter
in Local modules views
developing_module_ids (Many2many): Select items you want to display by
default Local modules views
search_default_app (Boolean): Enable search_default_app filter in the
Local modules view
"""
_name = 'development_tools.config.settings'
_description = u'Development tools config settings'
_inherit = ['res.config.settings']
_rec_name = 'id'
_order = 'id ASC'
# ---------------------------- ENTITY FIELDS ------------------------------
email_to = fields.Char(
string='Email to',
required=False,
readonly=False,
index=False,
help='Address will be used to send captured email messages',
size=50,
translate=False,
default='[email protected]',
)
email_capture = fields.Boolean(
string='Capture emails',
required=False,
readonly=False,
index=False,
default=True,
help='Check it to capture outgoing email messages',
)
developing_modules_enabled = fields.Boolean(
string='Set as default filter',
required=False,
readonly=False,
index=False,
default=False, # filter_model_name_whithout_module_development_modules
help='Sets the filter as default filter in Local modules views'
)
developing_module_ids = fields.Many2many(
string='Modules shown',
required=False,
readonly=False,
index=False,
default=None,
help='Select items you want to display by default Local modules views',
comodel_name='ir.module.module',
domain=[],
context={},
limit=None,
manual=True,
compute=lambda self: self._compute_developing_module_ids(),
inverse=lambda self: self._inverse_developing_module_ids()
)
search_default_app = fields.Boolean(
string='Search default app filter',
required=False,
readonly=False,
index=False,
default=False,
help='Enable search_default_app filter in the Local modules view'
)
development_mode = fields.Boolean(
string='Development mode as default',
required=False,
readonly=False,
index=False,
default=True,
help='Set development mode by default'
)
# ----------------------- COMPUTED FIELD METHODS --------------------------
def _compute_developing_module_ids(self):
for record in self:
record.developing_module_ids = record.get_developing_module_ids()
def _inverse_developing_module_ids(self):
try:
ids = [module.id for module in self.developing_module_ids]
name = 'filter_model_name_whithout_module_development_modules'
filter_set = self.env.ref('{}.{}'.format(self._module, name))
filter_set.domain = unicode([('id', 'in', ids or [-1])])
except Exception as ex:
_logger.error('_inverse_developing_module_ids: %s' % ex)
# --------------------- RES.CONFIG.SETTINGS METHODS -----------------------
@api.model
def get_default_values(self, values):
return dict(
email_to=self.get_email_to(),
email_capture=self.get_email_capture(),
developing_modules_enabled=self.get_developing_modules_enabled(),
developing_module_ids=self.get_developing_module_ids(),
search_default_app=self.get_search_default_app(),
development_mode=self.get_debug_mode(),
)
@api.one
def set_default_values(self):
self._set_email_to()
self._set_email_capture()
self._set_developing_modules_enabled()
self._set_developing_module_ids()
self._set_search_default_app()
self._set_debug_mode()
# ------------------------- GETTERS AND SETTERS ---------------------------
def get_email_to(self):
param = self._get_parameter('email_to')
return param.value if param else self._defaults['email_to']
def _set_email_to(self):
param = self._get_parameter('email_to', force=True)
param.value = self.email_to
def get_email_capture(self):
param = self._get_parameter('email_capture')
if param:
value = self._safe_eval(param.value, bool)
else:
value = self._defaults['email_capture']
return value
def _set_email_capture(self):
param = self._get_parameter('email_capture', force=True)
param.value = unicode(self.email_capture)
def get_developing_modules_enabled(self):
value = False
try:
name = 'filter_model_name_whithout_module_development_modules'
filter_set = self.env.ref('{}.{}'.format(self._module, name))
value = filter_set.is_default
except Exception as ex:
msg = self._not_retrieved.format('developing_modules_enabled', ex)
_logger.error(msg)
return value
def _set_developing_modules_enabled(self):
try:
name = 'filter_model_name_whithout_module_development_modules'
filter_set = self.env.ref('{}.{}'.format(self._module, name))
filter_set.is_default = self.developing_modules_enabled
except Exception as ex:
msg = self._not_set('developing_modules_enabled', ex)
_logger.error(msg)
def get_developing_module_ids(self):
value = None
try:
name = 'filter_model_name_whithout_module_development_modules'
filter_set = self.env.ref('{}.{}'.format(self._module, name))
domain = self._safe_eval(filter_set.domain, list)
value = filter(lambda x: x > 0, domain[0][2])
except Exception as ex:
msg = self._not_retrieved.format('developing_module_ids', ex)
_logger.error(msg)
return value
def _set_developing_module_ids(self):
try:
ids = [module.id for module in self.developing_module_ids]
name = 'filter_model_name_whithout_module_development_modules
|
'
filter_set = self.env.ref('{}.{}'.format(self._module, name))
filter_set.domain = unicode([('id', 'in', ids or [-1])])
except Exception as ex:
msg = self._not_set('developing_module_ids', ex)
_lo
|
gger.error(msg)
def get_search_default_app(self):
value = None
try:
action_set = self.env.ref('base.open_module_tree')
context = self._safe_eval(action_set.context, dict)
if 'search_default_app' in context:
value = context['search_default_app'] in [1, True]
else:
value = False
except Exception as ex:
msg = self._not_retrieved.format('search_default_app', ex)
_logger.error(msg)
return value
def _set_search_default_app(self):
try:
action_set = self.env.ref('base.open_module_tree')
context = self._safe_eval(action_set.context, dict)
value = 1 if self.search_default_app else 0
context.update({'search_default_app': value})
action_set.context = unicode(context)
except Exception as ex:
msg
|
feelobot/compose
|
compose/cli/main.py
|
Python
|
apache-2.0
| 19,689
| 0.001727
|
from __future__ import print_function
from __future__ import unicode_literals
from inspect import getdoc
from operator import attrgetter
import logging
import re
import signal
import sys
from docker.errors import APIError
import dockerpty
from .. import __version__
from .. import legacy
from ..const import DEFAULT_TIMEOUT
from ..project import NoSuchService, ConfigurationError
from ..service import BuildError, NeedsBuildError
from ..config import parse_environment
from ..progress_stream import StreamOutputError
from .command import Command
from .docopt_command import NoSuchCommand
from .errors import UserError
from .formatter import Formatter
from .log_printer import LogPrinter
from .utils import yesno, get_version_info
log = logging.getLogger(__name__)
def main():
setup_logging()
try:
command = TopLevelCommand()
command.sys_dispatch()
except KeyboardInterrupt:
log.error("\nAborting.")
sys.exit(1)
except (UserError, NoSuchService, ConfigurationError, legacy.LegacyError) as e:
log.error(e.msg)
sys.exit(1)
except NoSuchCommand as e:
log.error("No such command: %s", e.command)
log.error("")
log.error("\n".join(parse_doc_section("commands:", getdoc(e.supercommand))))
sys.exit(1)
except APIError as e:
log.error(e.explanation)
sys.exit(1)
except BuildError as e:
log.error("Service '%s' failed to build: %s" % (e.service.name, e.reason))
sys.exit(1)
except StreamOutputError as e:
log.error(e)
sys.exit(1)
except NeedsBuildError as e:
log.error("Service '%s' needs to be built, but --no-build was passed." % e.service.name)
sys.exit(1)
def setup_logging():
console_handler = logging.StreamHandler(sys.stderr)
console_handler.setFormatter(logging.Formatter())
console_handler.setLevel(logging.INFO)
root_logger = logging.getLogger()
root_logger.addHandler(console_handler)
root_logger.setLevel(logging.DEBUG)
# Disable requests logging
logging.getLogger("requests").propagate = False
# stolen from docopt master
def parse_doc_section(name, source):
pattern = re.compile('^([^\n]*' + name + '[^\n]*\n?(?:[ \t].*?(?:\n|$))*)',
re.IGNORECASE | re.MULTILINE)
return [s.strip() for s in pattern.findall(source)]
class TopLevelCommand(Command):
"""Define and run multi-container applications with Docker.
Usage:
docker-compose [options] [COMMAND] [ARGS...]
docker-compose -h|--help
Options:
-f, --file FILE Specify an alternate compose file (default: docker-compose.yml)
-p, --project-name NAME Specify an alternate project name (default: directory name)
--verbose Show more output
-v, --version Print version and exit
Commands:
build Build or rebuild services
help Get help on a command
kill Kill containers
logs V
|
iew output from containers
port Print the public port for a port binding
ps List containers
pull Pulls service images
restart Restart services
rm Remove stopped containers
run Run a one-off command
scale
|
Set number of containers for a service
start Start services
stop Stop services
up Create and start containers
migrate-to-labels Recreate containers to add labels
version Show the Docker-Compose version information
"""
def docopt_options(self):
options = super(TopLevelCommand, self).docopt_options()
options['version'] = get_version_info('compose')
return options
def build(self, project, options):
"""
Build or rebuild services.
Services are built once and then tagged as `project_service`,
e.g. `composetest_db`. If you change a service's `Dockerfile` or the
contents of its build directory, you can run `docker-compose build` to rebuild it.
Usage: build [options] [SERVICE...]
Options:
--no-cache Do not use cache when building the image.
"""
no_cache = bool(options.get('--no-cache', False))
project.build(service_names=options['SERVICE'], no_cache=no_cache)
def help(self, project, options):
"""
Get help on a command.
Usage: help COMMAND
"""
handler = self.get_handler(options['COMMAND'])
raise SystemExit(getdoc(handler))
def kill(self, project, options):
"""
Force stop service containers.
Usage: kill [options] [SERVICE...]
Options:
-s SIGNAL SIGNAL to send to the container.
Default signal is SIGKILL.
"""
signal = options.get('-s', 'SIGKILL')
project.kill(service_names=options['SERVICE'], signal=signal)
def logs(self, project, options):
"""
View output from containers.
Usage: logs [options] [SERVICE...]
Options:
--no-color Produce monochrome output.
"""
containers = project.containers(service_names=options['SERVICE'], stopped=True)
monochrome = options['--no-color']
print("Attaching to", list_containers(containers))
LogPrinter(containers, attach_params={'logs': True}, monochrome=monochrome).run()
def port(self, project, options):
"""
Print the public port for a port binding.
Usage: port [options] SERVICE PRIVATE_PORT
Options:
--protocol=proto tcp or udp [default: tcp]
--index=index index of the container if there are multiple
instances of a service [default: 1]
"""
index = int(options.get('--index'))
service = project.get_service(options['SERVICE'])
try:
container = service.get_container(number=index)
except ValueError as e:
raise UserError(str(e))
print(container.get_local_port(
options['PRIVATE_PORT'],
protocol=options.get('--protocol') or 'tcp') or '')
def ps(self, project, options):
"""
List containers.
Usage: ps [options] [SERVICE...]
Options:
-q Only display IDs
"""
containers = sorted(
project.containers(service_names=options['SERVICE'], stopped=True) +
project.containers(service_names=options['SERVICE'], one_off=True),
key=attrgetter('name'))
if options['-q']:
for container in containers:
print(container.id)
else:
headers = [
'Name',
'Command',
'State',
'Ports',
]
rows = []
for container in containers:
command = container.human_readable_command
if len(command) > 30:
command = '%s ...' % command[:26]
rows.append([
container.name,
command,
container.human_readable_state,
container.human_readable_ports,
])
print(Formatter().table(headers, rows))
def pull(self, project, options):
"""
Pulls images for services.
Usage: pull [options] [SERVICE...]
Options:
--allow-insecure-ssl Allow insecure connections to the docker
registry
"""
insecure_registry = options['--allow-insecure-ssl']
project.pull(
service_names=options['SERVICE'],
insecure_registry=insecure_registry
)
def rm(self, project, options):
"""
Remove stopped service containers.
Usage: rm [options] [SERVICE...]
Options:
-f, --force Don't ask to confirm remova
|
OCA/l10n-italy
|
l10n_it_vat_statement_split_payment/models/account_config.py
|
Python
|
agpl-3.0
| 520
| 0
|
# Copyright 2018 Silvio Gregorini ([email protected])
# Copyright (c) 2018 Openforce Srls Unipersonale (www.openforce.it)
# Copyright (c) 2019 Matteo Bilotta
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import fields, models
class ResConfigSettings(models.TransientModel):
_inherit = "res.config.settings"
sp_descript
|
ion = fields.Char(
related="company_id.sp_description",
string="Description for period end stateme
|
nts",
readonly=False,
)
|
garyd203/flying-circus
|
src/flyingcircus/service/applicationautoscaling.py
|
Python
|
lgpl-3.0
| 424
| 0.002358
|
"""General-use classes to interact with the ApplicationAutoScaling service through CloudFor
|
mation.
See Also:
`AWS developer guide for ApplicationAutoScaling
<https://docs.aws.amazon.com/autoscaling/application/APIReference/Welcome.html>`_
"""
# noinspection PyUnresolvedReferences
from
|
.._raw import applicationautoscaling as _raw
# noinspection PyUnresolvedReferences
from .._raw.applicationautoscaling import *
|
fokusov/moneyguru
|
qt/controller/transaction/filter_bar.py
|
Python
|
gpl-3.0
| 843
| 0.005931
|
# Created By: Virgil Dupras
# Created On: 2009-11-27
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
from hscommon.trans import trget
from core.document import FilterType
from ..filter_bar import FilterBar
tr = trget('ui')
class TransactionFilterBar(FilterBar):
BUTTONS = [
(tr("All"), None),
|
(tr("Income"), FilterType.Income),
(tr("Expenses"), FilterType.Expense),
(tr("Transfers"),
|
FilterType.Transfer),
(tr("Unassigned"), FilterType.Unassigned),
(tr("Reconciled"), FilterType.Reconciled),
(tr("Not Reconciled"), FilterType.NotReconciled),
]
|
kawashiro/dewyatochka2
|
src/dewyatochka/core/plugin/subsystem/message/py_entry.py
|
Python
|
gpl-3.0
| 2,198
| 0.002275
|
# -*- coding: UTF-8
""" Entry decorators for python plugins
Functions
=========
chat_message -- Decorator for chat message plugin
chat_command -- Decorator for chat command plugin
chat_accost -- Decorator for chat accost plugin
"""
from dewyatochka.core.plugin.loader.internal import entry_point
from dewyatochka.core.plugin.exceptions import PluginRegistrationError
from .matcher import PLUGIN_TYPE_COMMAND, PLUGIN_TYPE_MESSAGE, PLUGIN_TYPE_ACCOST
__all__ = ['chat_command', 'chat_message', 'chat_accost']
# Commands already in use
_reserved_commands = set()
def ch
|
at_message(fn=None, *, services=None, regular=False, system=False, own=False) -> callable:
""" Decorator to mark function as message handler entry point
:param callable fn: Function if decorator is invoked direct
|
ly
:param list services: Dependent services list
:param bool regular: Register this handler for regular messages
:param bool system: Register this handler for system messages
:param bool own: Register this handler for own messages
:return callable:
"""
return entry_point(PLUGIN_TYPE_MESSAGE, services=services, regular=True, system=False, own=False)(fn) \
if fn is not None else \
entry_point(PLUGIN_TYPE_MESSAGE, services=services, regular=regular, system=system, own=own)
def chat_command(command, *, services=None) -> callable:
""" Register handler for chat command
:param list services: Dependent services list
:param str command: Command name without prefix
:return callable:
"""
if command in _reserved_commands:
raise PluginRegistrationError('Chat command %s is already in use' % command)
_reserved_commands.add(command)
return entry_point(PLUGIN_TYPE_COMMAND, services=services, command=command)
def chat_accost(fn=None, *, services=None) -> callable:
""" Register handler for a chat personal accost
:param callable fn: Function if decorator is invoked directly
:param list services: Dependent services list
:return callable:
"""
entry_point_fn = entry_point(PLUGIN_TYPE_ACCOST, services=services)
return entry_point_fn(fn) if fn is not None else entry_point_fn
|
TheWardoctor/Wardoctors-repo
|
plugin.video.uncoded/uncoded.py
|
Python
|
apache-2.0
| 12,280
| 0.003746
|
# -*- coding: utf-8 -*-
'''
Uncoded Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urlparse,sys,urllib
params = dict(urlparse.parse_qsl(sys.argv[2].replace('?','')))
action = params.get('action')
name = params.get('name')
title = params.get('title')
year = params.get('year')
imdb = params.get('imdb')
tvdb = params.get('tvdb')
tmdb = params.get('tmdb')
season = params.get('season')
episode = params.get('episode')
tvshowtitle = params.get('tvshowtitle')
premiered = params.get('premiered')
url = params.get('url')
image = params.get('image')
meta = params.get('meta')
select = params.get('select')
query = params.get('query')
source = params.get('source')
content = params.get('content')
windowedtrailer = params.get('windowedtrailer')
windowedtrailer = int(windowedtrailer) if windowedtrailer in ("0","1") else 0
if action == None:
from resources.lib.indexers import navigator
from resources.lib.modules import cache
cache.cache_version_check()
navigator.navigator().root()
elif action == 'movieNavigator':
from resources.lib.indexers import navigator
navigator.navigator().movies()
elif action == 'movieliteNavigator':
from resources.lib.indexers import navigator
navigator.navigator().movies(lite=True)
elif action == 'mymovieNavigator':
from resources.lib.indexers import navigator
navigator.navigator().mymovies()
elif action == 'mymovieliteNavigator':
from resources.lib.indexers import navigator
navigator.navigator().mymovies(lite=True)
elif action == 'tvNavigator':
from resources.lib.indexers import navigator
navigator.navigator().tvshows()
elif action == 'tvliteNavigator':
from resources.lib.indexers import navigator
navigator.navigator().tvshows(lite=True)
elif action == 'mytvNavigator':
from resources.lib.indexers import navigator
navigator.navigator().mytvshows()
elif action == 'mytvliteNavigator':
from resources.lib.indexers import navigator
navigator.navigator().mytvshows(lite=True)
elif action == 'downloadNavigator':
from resources.lib.indexers import navigator
navigator.navigator().downloads()
elif action == 'libraryNavigator':
from resources.lib.indexers import navigator
navigator.navigator().library()
elif action == 'toolNavigator':
from resources.lib.indexers import navigator
navigator.navigator().tools()
elif action == 'searchNavigator':
from resources.lib.indexers import navigator
navigator.navigator().search()
elif action == 'viewsNavigator':
from resources.lib.indexers import navigator
navigator.navigator().views()
elif action == 'clearCache':
from resources.lib.indexers import navigator
navigator.navigator().clearCache()
elif action == 'clearCacheSearch':
from resources.lib.indexers import navigator
navigator.navigator().clearCacheSearch()
elif action == 'infoCheck':
from resources.lib.indexers import navigator
navigator.navigator().infoCheck('')
elif action == 'movies':
from resources.lib.indexers import movies
movies.movies().get(url)
elif action == 'moviePage':
from resources.lib.indexers import movies
movies.movies().get(url)
elif action == 'movieWidget':
from resources.lib.indexers import movies
movies.movies().widget()
elif action == 'movieSearch':
from resources.lib.indexers import movies
movies.movies().search()
elif action == 'movieSearchnew':
from resources.lib.indexers import movies
movies.movies().search_new()
elif action == 'movieSearchterm':
from resources.lib.indexers import movies
movies.movies().search_term(name)
elif action == 'moviePerson':
from resources.lib.indexers import movies
movies.movies().person()
elif action == 'movieGenres':
from resources.lib.indexers import movies
movies.movies().genres()
elif action == 'movieLanguages':
from resources.lib.indexers import movies
movies.movies().languages()
elif action == 'movieCertificates':
from resources.lib.indexers import movies
movies.movies().certifications()
elif action == 'movieYears':
from resources.lib.indexers import movies
movies.movies().years()
elif action == 'moviePersons':
from resources.lib.indexers import movies
movies.movies().persons(url)
elif action == 'movieUserlists':
from resources.lib.indexers import movies
movies.movies().userlists()
elif action == 'channels':
from resources.lib.indexers import channels
channels.channels().get()
elif action == 'tvshows':
from resources.lib.indexers import tvshows
tvshows.tvshows().get(url)
elif action == 'tvshowPage':
from resources.lib.indexers import tvshows
tvshows.tvshows().get(url)
elif action == 'tvSearch':
from resources.lib.indexers import tvshows
tvshows.tvshows().search()
elif action == 'tvSearchnew':
from resources.lib.indexers import tvshows
tvshows.tvshows().search_new()
elif action == 'tvSearchterm':
from resources.lib.indexers import tvshows
tvshows.tvshows().search_term(name)
elif action == 'tvPerson':
from resources.lib.indexers import tvshows
tvshows.tvshows().person()
elif action == 'tvGenres':
from resources.lib.indexers import tvshows
tvshows.tvshows().genres()
elif action == 'tvNetworks':
from resources.lib.indexers import tvshows
tvshows.tvshows().networks()
elif action == 'tvLanguages':
from resources.lib.indexers import tvshows
tvshows.tvshows().languages()
elif action == 'tvCertificates':
from resources.lib.indexers import tvshows
tvshows.tvshows().certifications()
elif action == 'tvPersons':
from resources.lib.indexers import tvshows
tvshows.tvshows().persons(url)
elif action == 'tvUserlists':
from resources.lib.indexers import tvshows
tvshows.tvshows().userlists()
elif action == 'seasons':
from resources.lib.indexers import episodes
episodes.seasons().get(tvshowtitle, year, imdb, tvdb)
elif action == 'episodes':
from resources.lib.indexers import episodes
episodes.episodes().get(tvshowtitle, year, imdb, tvdb, season, episode)
elif action == 'calendar':
from resources.lib.indexers import episodes
episodes.episodes().calendar(url)
elif action == 'tvWidget':
from resources.lib.indexers import episodes
episodes.episodes().widget()
elif action == 'calendars':
from resources.lib.indexers import episodes
episodes.episodes().calendars()
elif action == 'episodeUserlists':
from resources.lib.indexers import episodes
episodes.episodes().userlists()
elif action == 'refresh':
from resources.lib.modules import control
control.refresh()
elif action == 'queueItem':
from resources.lib.modules import control
control.queueItem()
elif action == 'openSettings':
fro
|
m resources.lib.modules import control
|
control.openSettings(query)
elif action == 'artwork':
from resources.lib.modules import control
control.artwork()
elif action == 'addView':
from resources.lib.modules import views
views.addView(content)
elif action == 'moviePlaycount':
from resources.lib.modules import playcount
playcount.movies(imdb, query)
elif action == 'episodePlaycount':
from resources.lib.modules import playcount
playcount.episodes(imdb, tvdb, season, episode, query)
elif action == 'tvPlaycount':
from resources.lib.modules import playcount
playcount.tvshows(name, imdb, tvdb, season, query)
elif action == 'trailer':
from resources.lib.modules import trailer
|
jonashaag/gpyconf
|
gpyconf/backends/_xml/__init__.py
|
Python
|
lgpl-2.1
| 1,161
| 0.002584
|
# %FILEHEADER%
from ..filebased import FileBasedBackend
from .. import NONE, MissingOption
from xmlserialize import serialize_to_file, unserialize_file
from lxml.etree import XMLSyntaxErro
|
r
class XMLBackend(dict, FileBasedBackend):
ROOT_ELEMENT = 'configuration'
initial_file_content = '<{0}></{0}>'.format(ROOT_ELEMENT)
def __init__(self, backref, extension='xml', filename=None):
dict.__init__(self)
FileBasedBackend.__init__(self, backr
|
ef, extension, filename)
def read(self):
try:
return unserialize_file(self.file)
except XMLSyntaxError, err:
self.log('Could not parse XML configuration file: %s' % err,
level='error')
def save(self):
serialize_to_file(self, self.file, root_tag=self.ROOT_ELEMENT)
def get_option(self, item):
try:
return self.__getitem__(item)
except KeyError:
raise MissingOption(item)
set_option = dict.__setitem__
options = property(lambda self:self.keys())
tree = property(lambda self:self)
def reset_all(self):
self._create_file()
self.clear()
|
TheWardoctor/Wardoctors-repo
|
plugin.video.metalliq/resources/lib/meta/play/live.py
|
Python
|
apache-2.0
| 3,977
| 0.005029
|
import re
import urllib
from xbmcswift2 import xbmc
from meta import plugin, LANG
from meta.gui import dialogs
from meta.utils.text import to_unicode
from meta.library.live import get_player_plugin_from_library
from meta.navigation.base import get_icon_path, get_background_path
from meta.play.players import get_needed_langs, ADDON_SELECTOR
from meta.play.channelers import get_needed_langs, ADDON_PICKER
from meta.play.base import active_players, active_channelers, action_cancel, action_play, on_play_video
from settings import SETTING_USE_SIMPLE_SELECTOR, SETTING_LIVE_DEFAULT_PLAYER_FROM_CONTEXT, SETTING_LIVE_DEFAULT_PLAYER_FROM_LIBRARY, SETTING_LIVE_DEFAULT_PLAYER, SETTING_LIVE_LIBRARY_FOLDER, SETTING_LIVE_DEFAULT_CHANNELER
from language import get_string as _
def play_channel(channel, program, language, mode):
# Get players to use
if mode == 'select':
play_plugin = ADDON_SELECTOR.id
elif mode == 'context':
play_plugin = plugin.get_setting(SETTING_LIVE_DEFAULT_PLAYER_FROM_CONTEXT, unicode)
elif mode == 'library':
play_plugin = plugin.get_setting(SETTING_LIVE_DEFAULT_PLAYER_FROM_LIBRARY, unicode)
elif mode == 'default':
play_plugin = plugin.get_setting(SETTING_LIVE_DEFAULT_PLAYER, unicode)
else:
play_plugin = mode
players = active_players("live")
players = [p for p in players if p.id == play_plugin] or players
if not players:
dialogs.notify(msg="{0} {1} {2}".format(_("No cache").replace(_("Cache").lower(),_("TV")), _("Player").lower(), _("Enabled").lower()), title=_("Error"), delay=5000, image=get_icon_path("live"))
action_cancel()
return
# Get parameters
params = {}
for lang in get_needed_langs(players):
params[lang] = get_channel_parameters(channel, program, language)
params[lang] = to_unicode(params[lang])
# Go for it
link = on_play_video(mode, players, params)
if link:
action_play({
'label': channel,
'path': link,
'is_playable': True,
'info_type': 'video',
})
def play_channel_from_guide(channel, program, language, mode):
# Get channelers to use
if mode == 'select':
play_plugin = ADDON_PICKER.id
elif mode == 'default':
play_plugin = plugin.get_setting(SETTING_LIVE_DEFAULT_CHANNELER, unicode)
else:
play_plugin = mode
channelers = active_channelers("live")
channelers = [p for p in channelers if p.id == play_plugin] or channelers
if not channelers:
dialogs.notify(msg="{0} {1} {2}".format(_("No cache").replace(_("Cache").lower(),_("TV")), _("Player").lower(), _("Enabled").lower()), title=_("Error"), delay=5000, image=get_icon_path("live"))
|
action_cancel()
return
# Get parameters
params = {}
for lang in get_needed_langs(channelers):
params[lang] = get_channel_parameters(channel, program, language)
params[lang] = to_unicode(params[lang])
# Go for it
link = on_play_video(mode, channelers, params)
if link
|
:
action_play({
'label': channel,
'path': link,
'is_playable': True,
'info_type': 'video',
})
def get_channel_parameters(channel, program, language):
channel_regex = re.compile("(.+?)\s*(\d+|one|two|three|four|five|six|seven|eight|nine|ten)\s*.*?(\d*)$",
re.IGNORECASE|re.UNICODE)
parameters = {}
parameters['name'] = channel
parameters['urlname'] = urllib.quote(parameters['name'])
parameters['shortname'] = parameters['name'][1:-1]
parameters['basename'] = re.sub(channel_regex, r"\1",channel)
parameters['shortbasename'] = parameters['basename'][1:-1]
parameters['extension'] = re.sub(channel_regex, r"\2",channel)
parameters['delay'] = re.sub(channel_regex, r"\3", channel)
parameters['program'] = program
parameters['language'] = language
return parameters
|
suutari/shoop
|
shuup/notify/models/notification.py
|
Python
|
agpl-3.0
| 4,222
| 0.002605
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.conf import settings
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db import models
from django.db.models import Q
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from enumfields import EnumIntegerField
from jsonfield.fields import JSONField
from shuup.core.fields import InternalIdentifierField
from shuup.notify.enums import Priority, RecipientType
class NotificationManager(models.Manager):
def for_user(self, user):
"""
:type user: django.contrib.aut
|
h.models.AbstractUser
"""
if not user or user.is_anonymous():
return self.none()
q = (Q(recipient_type=RecipientType.S
|
PECIFIC_USER) & Q(recipient=user))
if getattr(user, 'is_superuser', False):
q |= Q(recipient_type=RecipientType.ADMINS)
return self.filter(q)
def unread_for_user(self, user):
return self.for_user(user).exclude(marked_read=True)
class Notification(models.Model):
"""
A model for persistent notifications to be shown in the admin, etc.
"""
recipient_type = EnumIntegerField(RecipientType, default=RecipientType.ADMINS, verbose_name=_('recipient type'))
recipient = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True, related_name="+", on_delete=models.SET_NULL,
verbose_name=_('recipient')
)
created_on = models.DateTimeField(auto_now_add=True, editable=False, verbose_name=_('created on'))
message = models.CharField(max_length=140, editable=False, default="", verbose_name=_('message'))
identifier = InternalIdentifierField(unique=False)
priority = EnumIntegerField(Priority, default=Priority.NORMAL, db_index=True, verbose_name=_('priority'))
_data = JSONField(blank=True, null=True, editable=False, db_column="data")
marked_read = models.BooleanField(db_index=True, editable=False, default=False, verbose_name=_('marked read'))
marked_read_by = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True, editable=False, related_name="+", on_delete=models.SET_NULL,
verbose_name=_('marked read by')
)
marked_read_on = models.DateTimeField(null=True, blank=True, verbose_name=_('marked read on'))
objects = NotificationManager()
def __init__(self, *args, **kwargs):
url = kwargs.pop("url", None)
super(Notification, self).__init__(*args, **kwargs)
if url:
self.url = url
def save(self, *args, **kwargs):
if self.recipient_type == RecipientType.SPECIFIC_USER and not self.recipient_id:
raise ValueError("With RecipientType.SPECIFIC_USER, recipient is required")
super(Notification, self).save(*args, **kwargs)
def mark_read(self, user):
if self.marked_read:
return False
self.marked_read = True
self.marked_read_by = user
self.marked_read_on = now()
self.save(update_fields=('marked_read', 'marked_read_by', 'marked_read_on'))
return True
@property
def is_read(self):
return self.marked_read
@property
def data(self):
if not self._data:
self._data = {}
return self._data
@property
def url(self):
url = self.data.get("_url")
if isinstance(url, dict):
return reverse(**url)
return url
@url.setter
def url(self, value):
if self.pk:
raise ValueError("URL can't be set on a saved notification")
self.data["_url"] = value
def set_reverse_url(self, **reverse_kwargs):
if self.pk:
raise ValueError("URL can't be set on a saved notification")
try:
reverse(**reverse_kwargs)
except NoReverseMatch: # pragma: no cover
raise ValueError("Invalid reverse URL parameters")
self.data["_url"] = reverse_kwargs
|
EdDev/vdsm
|
lib/vdsm/storage/check.py
|
Python
|
gpl-2.0
| 12,176
| 0
|
#
# Copyright 2016-2017 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warr
|
anty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
"""
This module provides event loop based infrastructure for scal
|
able storage
health monitoring:
CheckService entry point for starting and stopping path checkers.
DirectioChecker checker using dd process for file or block based
volumes.
CheckResult result object provided to user callback on each check.
"""
from __future__ import absolute_import
import logging
import re
import subprocess
import threading
from vdsm import cmdutils
from vdsm import concurrent
from vdsm import constants
from vdsm.common.compat import CPopen
from vdsm.storage import asyncevent
from vdsm.storage import asyncutils
from vdsm.storage import exception
EXEC_ERROR = 127
_log = logging.getLogger("storage.check")
class CheckService(object):
"""
Provides path checking service.
This object is a simple thread safe entry point for starting and stopping
path checkers, keeping the internals decoupled from client code.
Usage:
# Start the service
service = CheckService()
service.start()
# Start checking path
service.start_checking(path, complete)
# Stop checking path, waiting up to 30 seconds
service.stop_checking(path, timeout=30)
# Stop the service
service.stop()
"""
def __init__(self):
self._lock = threading.Lock()
self._loop = asyncevent.EventLoop()
self._thread = concurrent.thread(self._loop.run_forever,
name="check/loop")
self._checkers = {}
def start(self):
"""
Start the service thread.
"""
_log.info("Starting check service")
self._thread.start()
def stop(self):
"""
Stop all checkers and the service thread.
Do not wait for running check processes since the application is
shutting down. To wait for all processes, stop all checkers and wait
for them before stoping.
"""
if not self._thread.is_alive():
return
_log.info("Stopping check service")
with self._lock:
for checker in self._checkers.values():
self._loop.call_soon_threadsafe(checker.stop)
self._checkers.clear()
self._loop.call_soon_threadsafe(self._loop.stop)
self._thread.join()
self._loop.close()
def start_checking(self, path, complete, interval=10.0):
"""
Start checking path every interval secconds. On check, invoke the
complete callback with a CheckResult instance.
Note that the complete callback is invoked in the check thread, and
must not block, as it will block all other checkers.
"""
_log.info("Start checking %r", path)
with self._lock:
if path in self._checkers:
raise RuntimeError("Already checking path %r" % path)
checker = DirectioChecker(self._loop, path, complete,
interval=interval)
self._checkers[path] = checker
self._loop.call_soon_threadsafe(checker.start)
def stop_checking(self, path, timeout=None):
"""
Stop checking path. If timeout is set, wait until the checker has
stopped, or the timeout has expired.
"""
_log.info("Stop checking %r", path)
with self._lock:
checker = self._checkers.pop(path)
self._loop.call_soon_threadsafe(checker.stop)
if timeout:
return checker.wait(timeout)
def is_checking(self, path):
return path in self._checkers
# Checker state
IDLE = "idle"
RUNNING = "running"
STOPPING = "stopping"
class DirectioChecker(object):
"""
Check path availability using direct I/O.
DirectioChecker is created with a complete callback. Each time a check
cycle is completed, the complete callback will be invoked with a
CheckResult instance.
CheckResult provides a delay() method returning the read delay in
seconds. If the check failed, the delay() method will raise the
appropriate exception that can be reported to engine.
Note that the complete callback must not block as it will block the entire
event loop thread.
The checker runs exactly every interval seconds. If a check did not
complete before the next check is scheduled, the next check will be delayed
to the next interval.
Checker is not thread safe. Use EventLoop.call_soon_threadsafe() to start
or stop a checker. The only thread safe method is wait().
Usage::
# Start the event loop thread
loop = asyncevent.EventLoop()
concurrent.thread(loop.run_forever).start()
# The complete callback
def complete(result):
try:
check_delay = result.delay()
except Exception as e:
check_error = e
check_time = time.time()
# Start a checker on the event loop thread
checker = DirectioChecker(loop, path, complete)
loop.call_soon_threadsafe(checker.start)
...
# Stop a checker from another thread
loop.call_soon_threadsafe(checker.stop)
# If needed, wait until a checker actually stopped.
checker.wait(30)
"""
log = logging.getLogger("storage.directiochecker")
def __init__(self, loop, path, complete, interval=10.0):
self._loop = loop
self._path = path
self._complete = complete
self._interval = interval
self._looper = asyncutils.LoopingCall(loop, self._check)
self._check_time = None
self._proc = None
self._reader = None
self._reaper = None
self._err = None
self._state = IDLE
self._stopped = threading.Event()
def start(self):
"""
Start the checker.
Raises RuntimeError if the checker is running.
"""
if self._state is not IDLE:
raise RuntimeError("Checker is %s", self._state)
self._state = RUNNING
_log.debug("Checker %r started", self._path)
self._stopped.clear()
self._looper.start(self._interval)
def stop(self):
"""
Stop the checker.
If the checker is waiting for the next check, the next check will be
cancelled. If the checker is in the middle of a check, it will stop
when the check completes.
If the checker is not running, the call is ignored silently.
"""
if self._state is not RUNNING:
return
_log.debug("Checker %r stopping", self._path)
self._state = STOPPING
self._looper.stop()
if self._proc is None:
self._stop_completed()
def wait(self, timeout=None):
"""
Wait until a checker has stopped.
Returns True if checker has stopped, False if timeout expired.
"""
return self._stopped.wait(timeout)
def is_running(self):
return self._state is not IDLE
def _stop_completed(self):
self._state = IDLE
_log.debug("Checker %r stopped", self._path)
self._stopped.set()
def _check(self):
"""
Called when starting the checker, and then every interval seconds until
the checker is stopped.
"
|
eklochkov/gitinspector
|
gitinspector/extensions.py
|
Python
|
gpl-3.0
| 1,635
| 0.015912
|
# coding: utf-8
#
# Copyright © 2012-2015 Ejwa Software. All rights reserved.
#
# This file is part of gitinspector.
#
# gitinspector is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gitinspector is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of
|
the GNU General Public License
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
DEFAULT_EXTENSIONS = {"java":"java", "c":"c", "cc":"c", "cpp":"cpp", "h":"cpp", "hh":"cpp", "hpp":"cpp", "py":"python",
"glsl":"opengl", "rb":"ruby", "js":"javascript", "sql":"sql", "fltar":"ansible","pkb":"sql",
"pks":"sq
|
l","txt":"text", "drt":"drools", "drl":"drools", "bpmn":"processes", "kt":"kotlin"}
__extensions__ = DEFAULT_EXTENSIONS.keys()
__extensions_dict__ = DEFAULT_EXTENSIONS
__located_extensions__ = set()
def get():
return __extensions__
def get_dict():
return __extensions_dict__
def define(string):
global __extensions__
__extensions__ = string.split(",")
def add_located(string):
if len(string) == 0:
__located_extensions__.add("*")
else:
__located_extensions__.add(string)
def get_located():
return __located_extensions__
|
os2webscanner/os2webscanner
|
scrapy-webscanner/scanners/processors/xml.py
|
Python
|
mpl-2.0
| 2,214
| 0.002258
|
# The contents of this file are subject to the Mozilla Public License
# Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://ww
|
w.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# OS2Webscanner was developed by Magenta in collaboration with OS2 the
# Danish community of open source municipalities (http://www.o
|
s2web.dk/).
#
# The code is currently governed by OS2 the Danish community of open
# source municipalities ( http://www.os2web.dk/ )
"""HTML Processors."""
from .processor import Processor
from .text import TextProcessor
import logging
import os
import xmltodict
import json
from xml.parsers.expat import ExpatError
from .html import HTMLProcessor
class XmlProcessor(HTMLProcessor):
"""Processor for XMLdocuments.
When processing, converts document to json one line including all attributes
Immediately processes with TextProcessor after processing.
"""
item_type = "xml"
text_processor = TextProcessor()
def handle_spider_item(self, data, url_object):
"""Immediately process the spider item."""
return self.process(data, url_object)
def handle_queue_item(self, item):
"""Immediately process the queue item."""
result = self.process_file(item.file_path, item.url)
if os.path.exists(item.file_path):
os.remove(item.file_path)
return result
def process(self, data, url_object):
"""Process XML data.
Converts document to json before processing with TextProcessor.
if XML is not well formed, treat it as HTML
"""
logging.info("Process XML %s" % url_object.url)
try:
data = json.dumps(xmltodict.parse(data))
return self.text_processor.process(data, url_object)
except ExpatError:
return super(XmlProcessor,self).process(data,url_object)
Processor.register_processor(XmlProcessor.item_type, XmlProcessor)
|
GoogleCloudPlatform/jupyter-extensions
|
jupyterlab_gitsync/setup.py
|
Python
|
apache-2.0
| 1,836
| 0.003268
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from setuptools import find_packages, setup
with open("README.md") as f:
long_description = f.read()
version = None
with open(os.path.join(os.getcwd(), "jupyter
|
lab_gitsync", "version.py")) as f:
for l in f:
if l.startswith("VERSION"):
version = l.rstrip().split(" = ")[1].replace("'", "")
if not version:
raise RuntimeError("Unable to determine version")
npm_package = "jupyterlab_gitsync-{}.tgz".format(version)
if not os.path.exists(os.path.join(os.getcwd(), npm_package)):
raise FileNotFoundError("Cannot find NPM package. Did you run
|
`npm pack`?")
data_files = [
("share/jupyter/lab/extensions", (npm_package,)),
("etc/jupyter/jupyter_notebook_config.d",
("jupyter-config/jupyter_notebook_config.d/jupyterlab_gitsync.json",)),
]
setup(
name="jupyterlab_gitsync",
version=version,
description="JupyterLab Git Sync",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/GoogleCloudPlatform/jupyter-extensions",
data_files=data_files,
license="Apache License 2.0",
packages=find_packages(),
python_requires=">=3.6",
install_requires=[
"jupyterlab~=1.2.0",
"gcp_jupyterlab_shared>=1.0.11",
],
)
|
loult-elte-fwere/termiloult
|
tests/test_multiple_play.py
|
Python
|
mit
| 559
| 0.001789
|
import logging
from time import sleep
from tools.audiosink import AudioSink
demo = logging.getLogger('Demo')
logging.basicConfig(level=logging.DEBUG)
p = AudioSink()
with open("sample.wav", 'rb') as f:
a = f.read()
demo.info("add the first track")
p.add(a, "a")
sleep(2)
with open("sample.wav", 'rb') as f:
b = f.read()
demo.info("add a second trac
|
k")
p.add(b,"b")
sleep(5)
demo.info("remove the first track")
p.remove("a")
sleep(5)
demo.info("lower the volume to 40%")
p.volume = 40
sleep(15)
demo.info("close the AudioSink")
p.
|
close()
|
nfrechette/acl
|
tools/graph_generation/gen_bit_rate_stats.py
|
Python
|
mit
| 2,309
| 0.020788
|
import numpy
import os
import sys
# This script depends on a SJSON parsing package:
# https://pypi.python.org/pypi/SJSON/1.1.0
# https://shelter13.net/projects/SJSON/
# https://bitbucket.org/Anteru/sjson/src
import sjson
if __name__ == "__main__":
if sys.version_info < (3, 4):
print('Python 3.4 or higher needed to run this script')
sys.exit(1)
if len(sys.argv) != 2:
print('Usage: python gen_bit_rate_stats.py <path/to/input_file.sjson>')
sys.exit(1)
input_sjson_file = sys.argv[1]
if not input_sjson_file.endswith('.sjson'):
print('Expected SJSON input file, found: {}'.format(input_sjson_file))
sys.exit(1)
if not os.path.exists(input_sjson_file):
print('Input file not found: {}'.format(input_sjson_file))
sys.exit(1)
with open(input_sjson_file, 'r') as file:
input_sjson_data = sjson.loads(file.read())
input_data_type_def = {
'names': ('algorithm_names', '0', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '32'),
'formats': ('S128', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4')
}
columns_to_extract = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19)
output_csv_file_path = 'D:\\acl-dev\\tools\\graph_generation\\bit_rates.csv'
output_csv_data = []
output_csv_headers = ['Bit Rate']
output_csv_data.append(['0', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '32'])
for entry in input_sjson_data['inputs']:
print('Parsing {} ...'.format(entry['header']))
csv_data = numpy.loadtxt(entry['file'], delimiter=',', dtype=inpu
|
t_data_type_def, skiprows=1, usecols=columns_to_extract)
filter = entry.get('filter', None)
if filter != None:
best_variable_data_mask = csv_data['algorithm_names'] == bytes(entry['filter'], encoding
|
= 'utf-8')
csv_data = csv_data[best_variable_data_mask]
# Strip algorithm name
output_csv_data.append(csv_data[0].tolist()[1:])
output_csv_headers.append(entry['header'])
output_csv_data = numpy.column_stack(output_csv_data)
with open(output_csv_file_path, 'wb') as f:
header = bytes('{}\n'.format(','.join(output_csv_headers)), 'utf-8')
f.write(header)
numpy.savetxt(f, output_csv_data, delimiter=',', fmt=('%s'))
|
blstream/myHoard_Python
|
myhoard/settings/dev.py
|
Python
|
apache-2.0
| 1,018
| 0.001965
|
from common import *
DEBUG = True
MONGODB_SETTINGS = {
'HOST': '127.0.0.1',
'PORT': 27017,
'DB': 'myhoard_dev',
'USERNAME': 'myhoard',
'PASSWORD': 'myh0@rd',
}
# Logging
LOGGING = {
'version': 1,
'disable_existin
|
g_loggers': False,
'root': {
'level': 'NOTSET',
'handlers': ['console', 'file'],
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'INFO',
'formatter': 'standard',
'stream': 'ext://sys.stdout',
},
'file':
|
{
'class': 'logging.handlers.RotatingFileHandler',
'level': 'INFO',
'formatter': 'standard',
'filename': '/home/pat/logs/dev/myhoard.log',
'mode': 'a',
'maxBytes': 2 * 1024 * 1024, # 2MiB
'backupCount': 64,
},
},
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s',
},
},
}
|
Mlieou/leetcode_python
|
leetcode/python/ex_689.py
|
Python
|
mit
| 1,077
| 0.007428
|
class Solution(object):
def maxSumOfThreeSubarrays(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
window = []
c_sum = 0
for i, v in enumerate(nums):
c_sum += v
if i >= k: c_sum -= nums[i-k]
if i >= k-1: window.append(c_sum)
left = [0] * len(window)
best = 0
for i in range(len(window)):
if window[i] > window[best]:
best = i
left[i] = best
right = [0] * len(window)
best = l
|
en(window) - 1
for i in range(len(window)-1, -1, -1):
if window[i] > window[best]:
best = i
right[i] = best
ans = None
for b in range(k, len(window)
|
- k):
a, c = left[b-k], right[b+k]
if ans is None or (window[a] + window[b] + window[c] >
window[ans[0]] + window[ans[1]] + window[ans[2]]):
ans = a, b, c
return ans
|
rbuffat/pyidf
|
tests/test_exteriorfuelequipment.py
|
Python
|
apache-2.0
| 1,733
| 0.003462
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.exterior_equipment import ExteriorFuelEquipment
log = logging.getLogger(__name__)
class TestExteriorFuelEquipment(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_exteriorfuelequipment(self):
pyidf.validation_level = ValidationLevel.error
obj = ExteriorFuelEquipment()
# alpha
var_name = "Na
|
me"
obj.name = var_name
# alpha
var_fuel_use_type = "Electricity"
obj.fuel_use_type = var_fuel_use_type
# object-list
var_schedule_name = "object-list|Schedule Name"
obj.schedule_name = var_schedule_name
# real
var_design_level = 0.0
obj.design_level = var_design_level
# alpha
var_enduse_subcat
|
egory = "End-Use Subcategory"
obj.enduse_subcategory = var_enduse_subcategory
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.exteriorfuelequipments[0].name, var_name)
self.assertEqual(idf2.exteriorfuelequipments[0].fuel_use_type, var_fuel_use_type)
self.assertEqual(idf2.exteriorfuelequipments[0].schedule_name, var_schedule_name)
self.assertAlmostEqual(idf2.exteriorfuelequipments[0].design_level, var_design_level)
self.assertEqual(idf2.exteriorfuelequipments[0].enduse_subcategory, var_enduse_subcategory)
|
abhijo89/sphinxit
|
sphinxit/core/constants.py
|
Python
|
bsd-3-clause
| 1,436
| 0
|
"""
sphinxit.core.constants
~~~~~~~~~~~~~~~~~~~~~~~
Defines some Sphinx-specific constants.
:copyright: (c) 2013 by Roman Semirook.
:license: BSD, see LICENSE for more details.
"""
from collections import namedtuple
RESERVED_KEYWORDS = (
'AND',
'AS',
'ASC',
'AVG',
'BEGIN',
'BETWEEN',
'BY',
'CALL',
'COLLATION',
'COMMIT',
'COUNT',
'DELETE',
'DESC',
'DESCRIBE',
'DISTINCT',
'FALSE',
'FROM',
'GLOBAL',
'GROUP',
'IN',
'INSERT',
'INTO',
'LIMIT',
'MATCH',
'MAX',
'META',
'MIN',
'NOT',
'NULL',
'OPTION',
'OR',
'ORDER',
'REPLACE',
'ROLLBACK
|
',
'SELECT',
'SET',
'SHOW',
'START',
'STATUS',
'SUM',
'TABLES',
'TRANSACTION',
'TRUE',
'UPDATE',
'VALUES',
'VARIABLES',
'WARNINGS',
'WEIGHT',
'WHERE',
'WITHIN'
)
ESCAPED_CHARS = namedtuple('EscapedChars', ['single_escape', 'double_escape'])(
|
single_escape=("'", '+', '[', ']', '=', '*'),
double_escape=('@', '!', '^', '(', ')', '~', '-', '|', '/', '<<', '$', '"')
)
NODES_ORDER = namedtuple('NodesOrder', ['select', 'update'])(
select=(
'SelectFrom',
'Where',
'GroupBy',
'OrderBy',
'WithinGroupOrderBy',
'Limit',
'Options'
),
update=(
'UpdateSet',
'Where',
'Options'
)
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.