repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
ChuanleiGuo/AlgorithmsPlayground
|
LeetCodeSolutions/python/211_Add_and_Search_Word_Data_structure_design.py
|
Python
|
mit
| 1,004
| 0.000996
|
class WordDictionary(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.root = {}
def addWord(self, word):
"""
Adds a word into the data structure.
:type word: str
:rtype: void
"""
node = self.roo
|
t
for c in word:
if c not in node:
node[c] = {}
node = node[c]
node['#'] = '#'
def search(self, word):
"""
Returns if the word is in the data structure. A word could
contain the dot character '.' to represent any one letter.
:type word: str
:rtype: bool
"""
def find(word, node):
if not word:
return '#' in node
c, w
|
ord = word[0], word[1:]
if c != '.':
return c in node and find(word, node[c])
return any(find(word, d) for d in node.values() if d != '#')
return find(word, self.root)
|
tiangolo/fastapi
|
tests/test_tutorial/test_metadata/test_tutorial001.py
|
Python
|
mit
| 1,611
| 0.000622
|
from fastapi.testclient import TestClient
from docs_src.metadata.tutorial001 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {
"title": "ChimichangApp",
"description": "\nChimichangApp API helps you do awesome stuff. 🚀\n\n## Items\n\nYou can **read items**.\n\n## Users\n\nYou will be able to:\n\n* **Create users** (_not implemented_).\n* **Read users** (_not implemented_).\n",
"termsOfService": "http://example.com/terms/",
"contact": {
"name": "Deadpoolio the Amazing",
"url": "http://x-force.example.com/contact/",
"email": "[email protected]",
},
"license": {
"na
|
me": "Apache 2.0",
"url": "https://www.apache.org/licenses/LICENSE-2.0.html",
},
"version": "0.0.1",
},
"paths": {
"/items/": {
"get": {
"summary": "Read Items",
"operatio
|
nId": "read_items_items__get",
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
}
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_items():
response = client.get("/items/")
assert response.status_code == 200, response.text
assert response.json() == [{"name": "Katana"}]
|
nareshshah139/CocktailPartyAlgorithm1
|
CocktailPartyAlgorithm.py
|
Python
|
mit
| 2,277
| 0.039087
|
import sys
from numpy import *
from scipy import signal
import scipy.io.wavfile
from matplotlib import pyplot
import sklearn.decomposition
def main():
# First load the audio data, the audio data on this example is obtained from http://www.ism.ac.jp/~shiro/research/blindsep.html
rate, source = scipy.io.wavfile.read('/Users/nareshshah/blind_source_data/X_rsm2.wav')
# The 2 sources are stored in left and r
|
ight channels of the audio
source_1, source_2 = sour
|
ce[:, 0], source[:, 1]
data = c_[source_1, source_2]
# Normalize the audio from int16 range to [-1, 1]
data = data / 2.0 ** 15
# Perform Fast ICA on the data to obtained separated sources
fast_ica = sklearn.decomposition.FastICA( n_components=2 )
separated = fast_ica.fit_transform( data )
# Check, data = separated X mixing_matrix + mean
assert allclose( data, separated.dot( fast_ica.mixing_.T ) + fast_ica.mean_ )
# Map the separated result into [-1, 1] range
max_source, min_source = 1.0, -1.0
max_result, min_result = max(separated.flatten()), min(separated.flatten())
separated = map( lambda x: (2.0 * (x - min_result))/(max_result - min_result) + -1.0, separated.flatten() )
separated = reshape( separated, (shape(separated)[0] / 2, 2) )
# Store the separated audio, listen to them later
scipy.io.wavfile.write( '/Users/nareshshah/blind_source_data/separated_1.wav', rate, separated[:, 0] )
scipy.io.wavfile.write( '/Users/nareshshah/blind_source_data/separated_2.wav', rate, separated[:, 1] )
# Plot the original and separated audio data
fig = pyplot.figure( figsize=(10, 8) )
fig.canvas.set_window_title( 'Blind Source Separation' )
ax = fig.add_subplot(221)
ax.set_title('Source #1')
ax.set_ylim([-1, 1])
ax.get_xaxis().set_visible( False )
pyplot.plot( data[:, 0], color='r' )
ax = fig.add_subplot(223)
ax.set_ylim([-1, 1])
ax.set_title('Source #2')
ax.get_xaxis().set_visible( False )
pyplot.plot( data[:, 1], color='r' )
ax = fig.add_subplot(222)
ax.set_ylim([-1, 1])
ax.set_title('Separated #1')
ax.get_xaxis().set_visible( False )
pyplot.plot( separated[:, 0], color='g' )
ax = fig.add_subplot(224)
ax.set_ylim([-1, 1])
ax.set_title('Separated #2')
ax.get_xaxis().set_visible( False )
pyplot.plot( separated[:, 1], color='g' )
pyplot.show()
|
davislidaqing/Mcoderadius
|
toughradius/radiusd/plugins/acct_stop_process.py
|
Python
|
agpl-3.0
| 1,514
| 0.018494
|
#!/usr/bin/env python
#coding=utf-8
from twisted.python import log
from toughradius.radiusd.settings import *
import logging
import datetime
def process(req=None,user=None,radiusd=None,**
|
kwargs):
if not req.get_acct_status_type() == STATUS_TYPE_STOP:
return
runstat=radiusd.runstat
|
store = radiusd.store
runstat.acct_stop += 1
ticket = req.get_ticket()
if not ticket.nas_addr:
ticket.nas_addr = req.source[0]
_datetime = datetime.datetime.now()
online = store.get_online(ticket.nas_addr,ticket.acct_session_id)
if not online:
session_time = ticket.acct_session_time
stop_time = _datetime.strftime( "%Y-%m-%d %H:%M:%S")
start_time = (_datetime - datetime.timedelta(seconds=int(session_time))).strftime( "%Y-%m-%d %H:%M:%S")
ticket.acct_start_time = start_time
ticket.acct_stop_time = stop_time
ticket.start_source= STATUS_TYPE_STOP
ticket.stop_source = STATUS_TYPE_STOP
store.add_ticket(ticket)
else:
store.del_online(ticket.nas_addr,ticket.acct_session_id)
ticket.acct_start_time = online['acct_start_time']
ticket.acct_stop_time= _datetime.strftime( "%Y-%m-%d %H:%M:%S")
ticket.start_source = online['start_source']
ticket.stop_source = STATUS_TYPE_STOP
store.add_ticket(ticket)
radiusd.syslog.info('[username:%s] Accounting stop request, remove online'%req.get_user_name(),level=logging.INFO)
|
EmanueleCannizzaro/scons
|
test/duplicate-sources.py
|
Python
|
mit
| 2,021
| 0.000495
|
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTH
|
ERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/duplicate-sources.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify that specifying a source file more than once
|
works correctly
and dos not cause a rebuild.
"""
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """\
def cat(target, source, env):
t = open(str(target[0]), 'wb')
for s in source:
t.write(open(str(s), 'rb').read())
t.close()
env = Environment(BUILDERS = {'Cat' : Builder(action = cat)})
env.Cat('out.txt', ['f1.in', 'f2.in', 'f1.in'])
""")
test.write('f1.in', "f1.in\n")
test.write('f2.in', "f2.in\n")
test.run(arguments='--debug=explain .')
test.must_match('out.txt', "f1.in\nf2.in\nf1.in\n")
test.up_to_date(options='--debug=explain', arguments='.')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
wevote/WebAppPublic
|
candidate/controllers.py
|
Python
|
bsd-3-clause
| 29,893
| 0.003646
|
# candidate/controllers.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .models import CandidateCampaignListManager, CandidateCampaignManager
from ballot.models import CANDIDATE
from config.base import get_environment_variable
from django.contrib import messages
from django.http import HttpResponse
from exception.models import handle_exception
from import_export_vote_smart.controllers import retrieve_and_match_candidate_from_vote_smart, \
retrieve_candidate_photo_from_vote_smart
import json
from office.models import ContestOfficeManager
from politician.models import PoliticianManager
import requests
import wevote_functions.admin
from wevote_functions.functions import posit
|
ive_value_exists
logger = wevote_functions.admin.get_logger(__name__)
WE_VOTE_API_KEY = get_environment_variable("WE_VOTE_API_KEY")
CANDIDATES_SYNC_URL = get_environment_variable("CANDIDATES_SYNC_URL")
def candidates_import
|
_from_sample_file():
"""
Get the json data, and either create new entries or update existing
:return:
"""
# Load saved json from local file
logger.info("Loading CandidateCampaigns from local file")
with open("candidate/import_data/candidate_campaigns_sample.json") as json_data:
structured_json = json.load(json_data)
return candidates_import_from_structured_json(structured_json)
def candidates_import_from_master_server(request, google_civic_election_id=''):
"""
Get the json data, and either create new entries or update existing
:return:
"""
messages.add_message(request, messages.INFO, "Loading Candidates from We Vote Master servers")
logger.info("Loading Candidates from We Vote Master servers")
# Request json file from We Vote servers
request = requests.get(CANDIDATES_SYNC_URL, params={
"key": WE_VOTE_API_KEY, # This comes from an environment variable
"format": 'json',
"google_civic_election_id": google_civic_election_id,
})
structured_json = json.loads(request.text)
results = filter_candidates_structured_json_for_local_duplicates(structured_json)
filtered_structured_json = results['structured_json']
duplicates_removed = results['duplicates_removed']
import_results = candidates_import_from_structured_json(filtered_structured_json)
import_results['duplicates_removed'] = duplicates_removed
return import_results
def filter_candidates_structured_json_for_local_duplicates(structured_json):
"""
With this function, we remove candidates that seem to be duplicates, but have different we_vote_id's.
We do not check to see if we have a matching office this routine -- that is done elsewhere.
:param structured_json:
:return:
"""
duplicates_removed = 0
filtered_structured_json = []
candidate_list_manager = CandidateCampaignListManager()
for one_candidate in structured_json:
candidate_name = one_candidate['candidate_name'] if 'candidate_name' in one_candidate else ''
google_civic_candidate_name = one_candidate['google_civic_candidate_name'] \
if 'google_civic_candidate_name' in one_candidate else ''
we_vote_id = one_candidate['we_vote_id'] if 'we_vote_id' in one_candidate else ''
google_civic_election_id = \
one_candidate['google_civic_election_id'] if 'google_civic_election_id' in one_candidate else ''
contest_office_we_vote_id = \
one_candidate['contest_office_we_vote_id'] if 'contest_office_we_vote_id' in one_candidate else ''
politician_we_vote_id = one_candidate['politician_we_vote_id'] \
if 'politician_we_vote_id' in one_candidate else ''
candidate_twitter_handle = one_candidate['candidate_twitter_handle'] \
if 'candidate_twitter_handle' in one_candidate else ''
vote_smart_id = one_candidate['vote_smart_id'] if 'vote_smart_id' in one_candidate else ''
maplight_id = one_candidate['maplight_id'] if 'maplight_id' in one_candidate else ''
# Check to see if there is an entry that matches in all critical ways, minus the we_vote_id
we_vote_id_from_master = we_vote_id
results = candidate_list_manager.retrieve_possible_duplicate_candidates(
candidate_name, google_civic_candidate_name, google_civic_election_id, contest_office_we_vote_id,
politician_we_vote_id, candidate_twitter_handle, vote_smart_id, maplight_id,
we_vote_id_from_master)
if results['candidate_list_found']:
# There seems to be a duplicate already in this database using a different we_vote_id
duplicates_removed += 1
else:
filtered_structured_json.append(one_candidate)
candidates_results = {
'success': True,
'status': "FILTER_CANDIDATES_FOR_DUPLICATES_PROCESS_COMPLETE",
'duplicates_removed': duplicates_removed,
'structured_json': filtered_structured_json,
}
return candidates_results
def candidates_import_from_structured_json(structured_json):
candidate_campaign_manager = CandidateCampaignManager()
candidates_saved = 0
candidates_updated = 0
candidates_not_processed = 0
for one_candidate in structured_json:
candidate_name = one_candidate['candidate_name'] if 'candidate_name' in one_candidate else ''
we_vote_id = one_candidate['we_vote_id'] if 'we_vote_id' in one_candidate else ''
google_civic_election_id = \
one_candidate['google_civic_election_id'] if 'google_civic_election_id' in one_candidate else ''
ocd_division_id = one_candidate['ocd_division_id'] if 'ocd_division_id' in one_candidate else ''
contest_office_we_vote_id = \
one_candidate['contest_office_we_vote_id'] if 'contest_office_we_vote_id' in one_candidate else ''
# This routine imports from another We Vote server, so a contest_office_id doesn't come from import
# Look up contest_office in this local database.
# If we don't find a contest_office by we_vote_id, then we know the contest_office hasn't been imported
# from another server yet, so we fail out.
contest_office_manager = ContestOfficeManager()
contest_office_id = contest_office_manager.fetch_contest_office_id_from_we_vote_id(
contest_office_we_vote_id)
if positive_value_exists(candidate_name) and positive_value_exists(google_civic_election_id) \
and positive_value_exists(we_vote_id) and positive_value_exists(contest_office_id):
proceed_to_update_or_create = True
else:
proceed_to_update_or_create = False
if proceed_to_update_or_create:
updated_candidate_campaign_values = {
# Values we search against
'google_civic_election_id': google_civic_election_id,
'ocd_division_id': ocd_division_id,
'contest_office_we_vote_id': contest_office_we_vote_id,
'candidate_name': candidate_name,
# The rest of the values
'we_vote_id': we_vote_id,
'maplight_id': one_candidate['maplight_id'] if 'maplight_id' in one_candidate else None,
'vote_smart_id': one_candidate['vote_smart_id'] if 'vote_smart_id' in one_candidate else None,
'contest_office_id': contest_office_id, # Retrieved from above
'politician_we_vote_id':
one_candidate['politician_we_vote_id'] if 'politician_we_vote_id' in one_candidate else '',
'state_code': one_candidate['state_code'] if 'state_code' in one_candidate else '',
'party': one_candidate['party'] if 'party' in one_candidate else '',
'order_on_ballot': one_candidate['order_on_ballot'] if 'order_on_ballot' in one_candidate else 0,
'candidate_url': one_candidate['candidate_url'] if 'candidate_url' in one_candidate else '',
'photo_url': one_candidate['photo_url'] if 'photo_url' in one_candidate else '',
'photo_url_from_maplight':
|
Aerilius/eog_panorama
|
eog_panorama/eog_panorama.py
|
Python
|
gpl-3.0
| 15,642
| 0.007672
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Approach:
# - Listen for image load event and check the XMP tag GPano:UsePanoramaViewer
# https://developers.google.com/streetview/spherical-metadata
# - GExiv2 (in default Ubuntu install, but may not be robust enough to inconsistent/duplicate XMP tags)
# - ExifTool (not in default install)
# - If it is a panorama, replace 2D image display by 360° display
# Create a sphere and project the photo according to XMP GPano tags.
# - OpenGL: python-gtklext (not maintained and not in repos),
# python-opengl (too low-level), shortcrust
# - GTK scene graph kit: not yet completed and included in common distributions
# - JavaScript/WebGL: PhotoSphereViewer.js
# - Interactivity (drag to rotate around z-axis and tilt; scroll to zoom)
import gi, os, urllib.parse
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GObject, Gio, Eog
# EXIF/XMP metadata
gi.require_version('GExiv2', '0.10')
from gi.repository import GExiv2
# Webview for WebGL panorama viewer
gi.require_version('WebKit2', '4.0')
from gi.repository import WebKit2
# Encoding image in data uris.
import base64
class PanoramaPlugin(GObject.Object, Eog.WindowActivatable):
# Override EogWindowActivatable's window property
# This is the EogWindow this plugin instance has been activated for
window = GObject.property(type=Eog.Window)
def __init__(self):
GObject.Object.__init__(self)
self.panorama_viewer_loaded = False
self.panorama_viewer_active = False
self.container = None
self.image_view = None
self.panorama_view = None
self.thumb_view = None
self.selection_change_handler = None
# Eye-of-Gnome API methods
def do_activate(self):
"""The plugin has been activated (on app start or through checkbox in preferences), set it up."""
# For tracking selected image.
self.thumb_view = self.window.get_thumb_view()
self.selection_change_handler = self.thumb_view.connect('selection-changed', self.on_selection_changed)
# Initialization of panorama viewer:
# Since it takes significant amount of memory, we load it only
# once we encounter a panorama image (see on_selection_changed).
#self.load_panorama_viewer()
def do_deactivate(self):
"""The plugin has been deactivated, clean everything up."""
# Remove all modifications and added widgets from the UI scene graph.
# (In this implementation same as when hiding the panorama.)
self.hide_panorama()
# Unregister event handlers.
self.thumb_view.disconnect(self.selection_change_handler)
self.selection_change_handler = None
# Release resources.
sel
|
f.panorama_view = None
self.panorama_viewer_active = False
self.panorama_viewer_loaded = False
def on_selection_changed(self, thumb_view):
"""An image has been selected."""
# Use the reference of thumb_view passed as parameter, not self
|
.thumb_view (did cause errors).
current_image = thumb_view.get_first_selected_image() # may be None
if current_image:
# Get file path
uri = current_image.get_uri_for_display()
filepath = urllib.parse.urlparse(uri).path
# If it is a panorama, switch to panorama viewer.
if self.use_panorama_viewer(filepath):
# Read panorama metadata
try:
metadata = self.get_pano_xmp(filepath)
# I tried passing just the image file path, but cross-site-scripting
# restrictions do not allow local file:// access.
# Solutions: simple server or data uri.
image = self.image_to_base64(filepath)
# Lazy loading: Create panorama_viewer only when a panorama is encountered.
# TODO: maybe unload it again after a certain amount of non-panorama images.
if not self.panorama_viewer_loaded:
# 1. Load the panorama viewer.
self.load_panorama_viewer(lambda: self.panorama_view.load_image(image, metadata, self.show_panorama) )
else:
# 2. Load the image into the panorama viewer.
# 3. When finished, make it visible.
self.panorama_view.load_image(image, metadata, self.show_panorama)
except Exception as error:
print(error)
# Fallback to display as normal image.
self.hide_panorama()
else:
# It is a normal image.
self.hide_panorama()
# Release resources in the panorama viewer by loading an empty/none image
if self.panorama_viewer_loaded:
empty_image = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAC0lEQVQI12NgAAIAAAUAAeImBZsAAAAASUVORK5CYII='
self.panorama_view.load_image(empty_image, {})
# Helper methods
def use_panorama_viewer(self, filepath):
metadata = GExiv2.Metadata(filepath)
return metadata.get_tag_string('Xmp.GPano.ProjectionType') == 'equirectangular' \
and metadata.get_tag_string('Xmp.GPano.UsePanoramaViewer') != 'False'
def get_pano_xmp(self, filepath):
"""Read XMP panorama metadata of an image file.
Args:
filepath: an image file to read
Returns:
a dict containing XMP keys with their values
"""
metadata = GExiv2.Metadata(filepath)
# For tags see: http://www.exiv2.org/tags.html
# and http://exiv2.org/tags-xmp-GPano.html
tags_required = {
'Xmp.GPano.FullPanoWidthPixels': 'full_width',
'Xmp.GPano.FullPanoHeightPixels': 'full_height',
'Xmp.GPano.CroppedAreaImageWidthPixels': 'cropped_width',
'Xmp.GPano.CroppedAreaImageHeightPixels': 'cropped_height',
'Xmp.GPano.CroppedAreaLeftPixels': 'cropped_x',
'Xmp.GPano.CroppedAreaTopPixels': 'cropped_y'
}
tags_optional = {
'Xmp.GPano.PoseHeadingDegrees': 'pose_heading',
'Xmp.GPano.InitialHorizontalFOVDegrees': 'initial_h_fov',
'Xmp.GPano.InitialViewHeadingDegrees': 'initial_heading',
'Xmp.GPano.InitialViewPitchDegrees': 'initial_pitch',
'Xmp.GPano.InitialViewRollDegrees': 'initial_roll'
}
result = {}
for (tag, key) in tags_required.items():
if metadata.has_tag(tag):
result[key] = float(metadata.get_tag_string(tag))
else:
raise Exception("Required tag %s is missing, cannot use panorama viewer."%tag)
for (tag, key) in tags_optional.items():
if metadata.has_tag(tag):
result[key] = float(metadata.get_tag_string(tag))
return result
def load_panorama_viewer(self, on_loaded_cb = None):
"""Initialize the panorama viewer widget.
Args:
on_loaded_cb: an optional callback function/lambda that is called
after loading of the panorama widget completes.
Note:
Instantiation of the WebView is synchronous, but loading of html is asynchronous.
For subsequently interacting with the document, pass a callback.
"""
if not self.panorama_viewer_loaded:
self.image_view = self.window.get_view() # EogScrollView
self.container = self.image_view.get_parent() # its parent, GtkOverlay
# Create the panorama widget.
self.panorama_view = PanoramaViewer(on_loaded_cb)
self.panorama_view.show()
self.panorama_viewer_loaded = True
def image_to_base64(self, filepath):
"""Read an image file and returm its content as base6
|
OdatNurd/OverrideAudit
|
src/commands/package_report.py
|
Python
|
mit
| 2,593
| 0.0027
|
import sublime
import sublime_plugin
from ..core import oa_syntax, decorate_pkg_name
from ..core import ReportGenerationThread
from ...lib.packages import PackageList
###----------------------------------------------------------------------------
class PackageReportThread(ReportGenerationThread):
"""
Generate a tabular report of all installed packages and their state.
"""
def _process(self):
pkg_list = PackageList()
pkg_counts = pkg_list.package_counts()
title = "{} Total Packages".format(len(pkg_list))
t_sep = "=" * len(title)
fmt = '{{:>{}}}'.format(len(str(max(pkg_counts))))
stats = ("{0} [S]hipped with Sublime\n"
"{0} [I]nstalled (user) sublime-package files\n"
"{0} [U]npacked in Packages\\ directory\n"
"{0} Currently in ignored_packages\n"
"{0} Installed Dependencies\n").format(fmt).format(*pkg_counts)
row = "| {:<40} | {:3} | {:3} | {:<3} |".format("", "", "", "")
r_sep = "+------------------------------------------+-----+-----+-----+"
packages = {}
result = [title, t_sep, "", self._generation_time(), stats, r_sep]
for pkg_name, pkg_info in pkg_list:
packages[pkg_name] = pkg_info.status(detailed=False)
result.append(
"| {:<40} | [{:1}] | [{:1}] | [{:1}] |".format(
decorate_pkg_name(pkg_info, name_only=True),
"S" if pkg_info.shipped_path is not None else " ",
"I" if pkg_info.installed_path is not None else " ",
"U" if pkg_info.unpacked_path is not None else " "))
result.extend([r_sep, ""])
self._set_content("OverrideAudit: Package Report", result, ":packages",
oa_syntax("OA-PkgReport"), {
"override_audit_report_packages": packages,
"context_menu": "OverrideAuditReport.sublime-menu"
})
###----------------------------------------------------------------------------
class OverrideAuditPackageReportComm
|
and(sublime_plugin.WindowCommand):
"""
Generate a tabular report of all installed packages and their state.
"""
def run(self, force_reuse=False):
PackageReportThread(self.window, "Generating Package Report",
self.window.active_view(),
force_reuse=fo
|
rce_reuse).start()
###----------------------------------------------------------------------------
#
|
inuitwallet/nuberrypi
|
PKGBLD/nuberrypi-info/nuberrypi-info.py
|
Python
|
gpl-3.0
| 7,347
| 0.028039
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
#
# Copyright 2014 Peerchemist
#
# This file is part of NuBerryPi project.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
__author__ = "Peerchemist"
__license__ = "GPL"
__version__ = "0.23"
import os, sys
import sh
import argparse
import json
import urllib
import platform
from datetime import timedelta
from datetime import datetime as dt
from colored import fore, back, style
## Class that pulls and parses data
class pbinfo:
def system(self):
def uptime():
with open('/proc/uptime', 'r') as f:
uptime_seconds = float(f.readline().split()[0])
uptime_str = str(timedelta(seconds = uptime_seconds))
return(uptime_str)
def distr():
with open('/etc/os-release', 'r') as lsb:
for line in lsb:
if line.startswith('VERSION_ID'):
return(line.split('=')[1].replace('"','').strip())
def temp():
with open('/sys/class/thermal/thermal_zone0/temp', 'r') as temp:
return(float(temp.readline().strip())/1000)
mm = {
'nuberrypi': distr(),
'kernel release': platform.release(),
'uptime': uptime(),
'average load': os.getloadavg(),
'system_temperature': temp()
}
return(mm)
def hardware(self):
mm = {}
with open('/proc/cpuinfo') as cpuinfo:
for line in cpuinfo:
if line.startswith('Hardware'):
hardware = line.split(':')[1].strip()
if hardware == "BCM2708":
mm['hardware'] = "Raspberry Pi"
if line.startswith('Serial'):
ser = line.split(':')[1].strip()
mm['serial'] = ser
with open('/proc/cmdline', 'r') as cmdline:
for i in cmdline.readline().split():
if i.startswith('smsc95xx.macaddr'):
mm['maccaddr'] = str(i.split('=')[1])
if i.startswith('bcm2708.boardrev'):
mm['board_rev'] = str(i.split('=')[1])
return(mm)
def nud(self, argv):
get = sh.nud("getinfo", _ok_code=[0,3,5,87]).stdout
pos_diff = sh.nud("getdifficulty", _ok_code=[0,3,5,87]).stdout
try:
getinfo = json.loads(get)
pos = json.loads(pos_diff)['proof-of-stake']
getinfo["difficulty proof-of-stake"] = pos
except:
return("nud inactive")
## When posting in public, hide IP and balance.
if argv == "private":
del getinfo['balance']
del getinfo['ip']
return(getinfo)
else:
return(getinfo)
## Class that will do all the pretty printing
class box:
def default(self): ## printed when no arguments
box = {}
box['nuberrypi version'] = "v" + pbinfo.system()['nuberrypi']
box['uptime'] = pbinfo.system()['uptime']
box['nud'] = pbinfo.nud(self)
box['serial'] = pbinfo.hardware()['serial']
box['raspi_board_rev'] = pbinfo.hardware()['board_rev']
print(fore.GREEN + style.UNDERLINED + "NuBerryPi:" + style.RESET)
print(json.dumps(box, sort_keys=True, indent=4))
if box['nud'] == "nud inactive":
print(fore.RED + style.BOLD + "WARNING: nud is not running!" + style.RESET)
def public(self): ## When privacy is needed
box = {}
box['NuBerryPi:'] = "v" + pbinfo.system()['nuberrypi']
box['serial'] = pbinfo.hardware()['serial']
box['uptime'] = pbinfo.system()['uptime']
box['nud'] = pbinfo.nud('private')
print(fore.GREEN + style.UNDERLINED + "NuBerryPi:" + style.RESET)
print(json.dumps(box, sort_keys=True, indent=4))
def system(self):
box = pbinfo.system()
print(fore.GREEN + style.UNDERLINED + "NuBerryPi system info:" + style.RESET)
print(json.dumps(box, sort_keys=True, indent=4))
if box['system_temperature'] > 76:
print(fore.RED + style.BOLD + "WARNING: system temperature too high!" + style.RESET)
def all(self): ## Switch to show all
box = {}
box['system'] = pbinfo.system()
box['system'].update(pbinfo.hardware())
box['nud'] = pbinfo.nud(self)
print(json.dumps(box, sort_keys=True, indent=4))
def health(self):
report = health.check()
print "Checking if we are on the right chain..."
print "Using" + " " + style.UNDERLINED + "www.peerchain.co" + style.RESET + " as reference."
print
for k,v in report.items():
if v == True:
print(k + ":" + fore.GREEN + style.BOLD + "True" + style.RESET)
else:
print(k + ":" + fore.RED + style.BOLD + "False" + style.RESET)
print
## Checking health of blockchain
class health:
def pull(self):
url = "https://peerchain.co/api/v1/blockLatest/"
response = urllib.urlopen(url)
return(json.loads(response.read()))
def local(self):
local = {}
local["heightInt"] = int(sh.nud("getblockcount", _ok_code=[0,3,5,87]).stdout)
local["hash"] = sh.nud("getblockhash", local["heightInt"],
_ok_code=[0,3,5,87]).stdout.strip()
block_info = json.loads(sh.nud("getblock", local["hash"],
_ok_code=[0,3,5,87]).stdout)
local["prevHash"] = block_info["previousblockhash"]
local["mrkRoot"] = block_info["merkleroot"]
#timestring = block_info["time"].replace("UTC", "").strip()
#local["timeStampUnix"] = dt.strptime(timestring
# , "%Y-%m-%d %H:%M:%S").strftime("%s")
return local
def check(self):
local = self.local()
remote = self.pull()
report = {}
if remote["heightInt"] == local["heightInt"]:
report["block_count_matches"] = True
else:
report["block_count_matches"] = False
if remote["hash"] == local["hash"]:
report["last_block_hash_matches"] = True
else:
report["last_block_hash_matches"] = False
if remote["prevHash"] == local["prevHash"]:
report["previous_block_hash_matches"] = True
else:
report["previous_block_hash_matches"] = False
if remote["mrkRoot"] == local["mrkRoot"]:
report["merkle_root_matches"] = True
else:
report["merkle_root_matches"] = False
return report
pbinfo = pbinfo()
box = box()
health = health()
######################### args
parser = argparse.ArgumentParser(description='Show information on NuBerryPi')
parser.add_argument
|
('-a', '--all', help='show everything', action='store_true')
parser.add_argument('-s','--system', help='show system information', action='store_true')
parser.add_argument('-p', '--nu', help='equal to "ppcoid getinfo"', action='store_true
|
')
parser.add_argument('--public', help='hide private data [ip, balance, serial]', action='store_true')
parser.add_argument('-o', '--output', help='dump data to stdout, use to pipe to some other program',
action='store_true')
parser.add_argument('--health', help='compare local blockchain data with peerchain.co as reference',
action='store_true')
args = parser.parse_args()
## Default, if no arguments
if not any(vars(args).values()):
box.default()
if args.all:
box.all()
if args.system:
box.system()
if args.nu:
print(json.dumps(pbinfo.nud("self"), indent=4, sort_keys=True))
if args.public:
box.public()
if args.output:
sys.stdout.write(box.all())
if args.health:
box.health()
|
chienlieu2017/it_management
|
odoo/odoo/addons/base/res/res_font.py
|
Python
|
gpl-3.0
| 6,052
| 0.003305
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from reportlab.pdfbase import ttfonts
from odoo import api, fields, models
from odoo.report.render.rml2pdf import customfonts
"""This module allows the mapping of some system-available TTF fonts to
the reportlab engine.
This file could be customized per distro (although most Linux/Unix ones)
should have the same filenames, only need the code below).
Due to an awful configuration that ships with reportlab at many Linux
and Ubuntu distros, we have to override the search path, too.
"""
_logger = logging.getLogger(__name__)
# Alternatives for the [broken] builtin PDF fonts. Default order chosen to match
# the pre-v8 mapping from odoo.report.render.rml2pdf.customfonts.CustomTTFonts.
# Format: [ (BuiltinFontFamily, mode, [AlternativeFontName, ...]), ...]
BUILTIN_ALTERNATIVES = [
('Helvetica', "normal", ["DejaVuSans", "LiberationSans"]),
('Helvetica', "bold", ["DejaVuSans-Bold", "LiberationSans-Bold"]),
('Helvetica', 'italic', ["DejaVuSans-Oblique", "LiberationSans-Italic"]),
('Helvetica', 'bolditalic', ["DejaVuSans-BoldOblique", "LiberationSans-BoldItalic"]),
('Times', 'normal', ["LiberationSerif", "DejaVuSerif"]),
('Times', 'bold', ["LiberationSerif-Bold", "DejaVuSerif-Bold"]),
('Times', 'italic', ["LiberationSerif-Italic", "DejaVuSerif-Italic"]),
('Times', 'bolditalic', ["LiberationSerif-BoldItalic", "DejaVuSerif-BoldItalic"]),
('Courier', 'normal', ["FreeMono", "DejaVuSansMono"]),
('Courier', 'bold', ["FreeMonoBold", "DejaVuSansMono-Bold"]),
('Courier', 'italic', ["FreeMonoOblique", "DejaVuSansMono-Oblique"]),
('Courier', 'bolditalic', ["FreeMonoBoldOblique", "DejaVuSansMono-BoldOblique"]),
]
class ResFont(models.Model):
_name = "res.font"
_description = 'Fonts available'
_order = 'family,name,id'
_rec_name = 'family'
family = fields.Char(string="Font family", required=True)
name = fields.Char(string="Font Name", required=True)
path = fields.Char(required=True)
mode = fields.Char(required=True)
_sql_constraints = [
('name_font_uniq', 'unique(family, name)', 'You can not register two fonts with the same name'),
]
@api.model
def font_scan(self, lazy=False):
"""Action of loading fonts
In lazy mode will scan the filesystem only if there is no founts in the database and sync if no font in CustomTTFonts
In not lazy mode will force scan filesystem and sync
"""
if lazy:
# lazy loading, scan only if no fonts in db
fonts = self.search([('path', '!=', '/dev/null')])
if not fonts:
# no scan yet or no font found on the system, scan the filesystem
self._scan_disk()
elif len(customfonts.CustomTTFonts) == 0:
# CustomTTFonts list is empty
self._sync()
else:
self._scan_disk()
return True
def _scan_disk(self):
"""Scan the file system and register the result in database"""
found_fonts = []
for font_path in customfonts.list_all_sysfonts():
try:
font = ttfonts.TTFontFile(font_path)
_logger.debug("Found font %s at %s", font.name, font_path)
found_fonts.append((font.familyName, font.name, font_path, font.styleName))
except Exception, ex:
_logger.warning("Could not register Font %s: %s", font_path, ex)
for family, name, path, mode in found_fonts:
if not self.search([('family', '=', family), ('name', '=', name)]):
self.create({'family': family, 'name': name, 'path': path, 'mode': mode})
# remove fonts not present on the disk anymore
existing_font_names = [name for (family, name, path, mode) in found_fonts]
# Remove inexistent fonts
self.search([('name', 'not in', existing_font_names), ('path', '!=', '/dev/null')]).unlink()
self.pool.signal_caches_change()
return self._sync()
def _sync(self):
"""Set the customfonts.CustomTTFonts list to the content of the database"""
customfonts.CustomTTFonts = []
local_family_modes = set()
local_font_paths = {}
for font in self.search([('path', '!=', '/dev/null')]):
local_family_modes.add((font.family, font.mode))
local_font_paths[font.name] = font.path
customfonts.CustomTTFonts.append((font.family, font.name, font.path, font.mode))
# Attempt to remap the bui
|
ltin fonts (Helvetica, Times, Courier) to better alternatives
# if available, because they only support a very small subset of unicode
# (missing 'č' for example)
for builtin_font_family, mode, alts in BUILTIN_ALTERNATIVES:
if (builtin_font_family, mode) not in local_family_modes:
# No local font exists with that name, try alternatives
for altern_font in alts:
if local_
|
font_paths.get(altern_font):
altern_def = (builtin_font_family, altern_font,
local_font_paths[altern_font], mode)
customfonts.CustomTTFonts.append(altern_def)
_logger.debug("Builtin remapping %r", altern_def)
break
else:
_logger.warning("No local alternative found for builtin font `%s` (%s mode)."
"Consider installing the DejaVu fonts if you have problems "
"with unicode characters in RML reports",
builtin_font_family, mode)
return True
@classmethod
def clear_caches(cls):
"""Force worker to resync at next report loading by setting an empty font list"""
customfonts.CustomTTFonts = []
return super(ResFont, cls).clear_caches()
|
JFDesigner/FBAlbumDownloader
|
browser_cookie/setup.py
|
Python
|
gpl-2.0
| 722
| 0.006925
|
import sys
import os
from distutils.core import setup
if sys.version_info.major >= 3:
print 'Sorry, currently only supports Python 2. Patches welcome!'
sys.exit(1)
setup(
name='browser-cookie',
version='0.6',
packages=['browser_cookie'],
package_dir={'browser_cookie' : '.'}, # look for package contents in current directory
author='Richard Penman',
author_email='[email protected]',
description='Loads cookies from your browser into a
|
cookiejar object so can download with urllib and other libraries the same content you see in the web browser.',
url='https://bitbucket.org/richardpenman/browser_cookie',
install_requires=['pycrypto', 'keyring'],
license='lgpl'
)
| |
geoaxis/ask-sweden
|
ask_sweden/lambda_function.py
|
Python
|
mit
| 3,656
| 0.001094
|
import logging
from ask import alexa
import car_accidents
import expected_population
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(request_obj, context=None):
return alexa.route_request(request_obj)
@alexa.default
def default_handler(request):
logger.info('default_handler')
return alexa.respond("Sorry, I don't understand.", end_session=True)
@alexa.request("LaunchRequest")
def launch_request_handler(request):
logger.info('launch_request_handler')
return alexa.respond('Ask me about any public data about Sweden.', end_session=True)
@alexa.request("SessionEndedRequest")
def session_ended_request_handler(request):
logger.info('session_ended_request_handler')
return alexa.respond('Goodbye.', end_session=True)
@alexa.intent('AMAZON.CancelIntent')
def cancel_intent_handler(request):
logger.info('cancel_intent_handler')
return alexa.respond('Okay.', end_session=True)
@alexa.intent('AMAZON.HelpIntent')
def help_intent_handler(request):
logger.info('help_intent_handler')
return alexa.respond('You can ask me about car accidents.', end_session=True)
@alexa.intent('AMAZON.StopIntent')
def stop_intent_handler(request):
logger.info('stop_intent_handler')
return alexa.respond('Okay.', end_session=True)
@alexa.intent('CarAccidents')
def car_accidents_intent_handler(request):
logger.info('car_accidents_intent_handler')
logger.info(request.get_slot_map())
city = request.get_slot_value('city')
year = request.get_slot_value('year')
if not city:
return alexa.respond('Sorry, which city?')
num_card_acc = car_accidents.get_num_accidents(year=int(year), city=city)
logger.info('%s accidents in %s in %s', num_card_acc, city, year)
return alexa.respond(
'''
<speak>
There were
<say-as interpret-as="cardinal">%s</say-as>
car accidents in %s in
<say-as interpret-as="date" format="y">%s</say-as>,
</speak>
''' % (num_card_acc, city, year),
end_session=True, is_ssml=True)
@alexa.intent('PopulationSweden')
def population_intent_handler(request):
logger.info('population_sweden_intent_handler')
logger.info(request.get_slot_map())
year = request.get_slot_value('year')
return alexa.respond(
'''
<speak>
in
<say-as interpret-as="date" format="y">%s</say-as>,
The expected population of Sweden is going to be
<say-as interpret-as="cardinal">%s</say-as>
</speak>
''' % (year, expected_popul
|
ation.get_expected_population(year)),
end_session=True, is_ssml=True)
@alexa.intent('WaterUsage')
def water_usage_stockholm(request):
year = request.get_slot_value('year')
logger.info('water_usage_stockholm')
logger.info(
|
request.get_slot_map())
return alexa.respond(
'''
<speak>
the water consumption in Stockholm in <say-as interpret-as="date" format="y">%s</say-as>,
is <say-as interpret-as="cardinal">%s</say-as>
</speak>
''' % (year, car_accidents.get_water_usage_stockholm(year)),
end_session=True, is_ssml=True)
@alexa.intent('Apartments')
def housing_numbers(request):
year = request.get_slot_value('year')
logger.info('apartments')
logger.info(request.get_slot_map())
return alexa.respond(
'''
<speak>
the number of apartments built during that year in Stockholm, is <say-as interpret-as="cardinal">%s</say-as>
</speak>
''' % (car_accidents.get_num_apartments_stockholm(year)),
)
|
Barmaley-exe/scikit-learn
|
sklearn/ensemble/tests/test_weight_boosting.py
|
Python
|
bsd-3-clause
| 15,811
| 0
|
"""Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_classification_toy():
"""Check classification on a toy dataset."""
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
"""Check classification on a toy dataset."""
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
"""Check consistency on dataset iris."""
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
"""Check consistency on dataset boston house prices."""
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
"""Check staged predictions."""
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
"""Check that base trees can be grid-searched."""
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
"""Check pickability."""
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
"""Check variable importances."""
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
|
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algo
|
rithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
"""Test that it gives proper exception on deficient input."""
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
|
xunxunzgq/open-hackathon-bak_01
|
open-hackathon-server/src/hackathon/storage/local_storage.py
|
Python
|
mit
| 7,616
| 0.003808
|
# -*- coding: utf-8 -*-
"""
Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
sys.path.append("..")
import os
from os.path import realpath, dirname, isfile, abspath
import json
import time
import uuid
from werkzeug.datastructures import FileStorage
from hackathon.constants import FILE_TYPE, HEALTH_STATUS, HEALTH
from storage import Storage
__all__ = ["LocalStorage"]
class LocalStorage(Storage):
"""Hackathon file storage that saves all templates on local disk
template files will be save at "<src_dir>/open-hackathon-server/src/hackathon/resources"
uploaded images will be save at "<src_dir>/open-hackathon-server/src/hackathon/resources"
"""
def save(self, context):
"""Save a file to storage
:type context: Context
:param context: the execution context of file saving
:rtype context
:return the updated context which should including the full path of saved file
"""
context = self.__generate_paths(context)
self.__save_file(context.content, context.physical_path)
self.log.debug("file saved at:" + context.physical_path)
return context
def load(self, context):
"""Load file from storage
:type context: Context
:param context: the execution context of file loading
:rtype dict
:return the file content
"""
path = context.physical_path
file_type = context.file_type
if file_type == FILE_TYPE.TEMPLATE:
with open(path) as template_file:
return json.load(template_file)
else:
return None
def delete(self, context):
"""Delete file from storage
:type context: Context
:param context: the execution context of file deleting
:rtype bool
:return True if successfully deleted else False
"""
path = context.physical_path
if isfile(path):
os.remove(path)
return True
else:
self.log.warn("try to remove dir or non-existed file")
return False
def report_health(self):
"""The status of local storage should be always True"""
return {
HEALTH.STATUS: HEALTH_STATUS.OK
}
def __init__(self):
self.base_dir = self.__get_storage_base_dir()
def __ensure_dir(self, file_path):
"""Make sure the directory of target file exists"""
path = dirname(file_path)
if path and not (os.path.exists(path)):
os.makedirs(path)
return path
def __save_file(self, content, path):
"""Dump file to disk
An existing file with the same name will be erased
:type content: file | dict | FileStorage
:param content: the content of file to be saved. Can be a file object or a dict
:type path: str | unicode
:param path: the file path
"""
self.__ensure_dir(path)
with open(path, 'w') as f:
if isinstance(content, dict):
json.dump(content, f)
elif isinstance(content, file):
f.write(content.read())
elif isinstance(content, FileStorage):
content.save(path)
def __get_storage_base_dir(self):
"""Get the base directory of storage"""
return "%s/.." % dirname(realpath(__file__))
def __generate_paths(self, context):
"""Generate file new name ,physical path and uri
:type context: Context
:param context: execution context
:return updated context
"""
hackathon_name = context.hackathon_name if "hackathon_name" in context else None
# replace file_name with new random name
context.file_name = self.__generate_file_name(context.file_name, hackathon_name)
context.physical_path = self.__generate_physical_path(context.file_name, context.file_type)
context.url = self.__generate_url(context.physical_path, context.file_ty
|
pe)
return context
|
def __generate_url(self, physical_path, file_type):
"""Return the http URI of file
It's for local storage only and the uploaded images must be in dir /static
:type physical_path: str|unicode
:param physical_path: the absolute physical path of the file
:type file_type: str | unicode
:param file_type: type of file which decides the directories where file is saved.
:rtype str
:return public accessable URI
"""
# only upladed images need an URI.
# example: http://localhost:15000/static/pic/upload/win10-201456-1234.jpg
if file_type == FILE_TYPE.HACK_IMAGE:
i = physical_path.index("static")
path = physical_path[i:]
return self.util.get_config("endpoint") + "/" + path
return ""
def __generate_physical_path(self, file_name, file_type, hackathon_name=None):
"""Return the physical path of file including directory and file name
:type file_name: str|unicode
:param file_name: the original file name
:type file_type: str | unicode
:param file_type: type of file which decides the directories where file is saved.
:rtype str
:return physical path of the file to be saved
"""
if file_type == FILE_TYPE.HACK_IMAGE:
path = "%s/static/pic/upload%s/%s/%s" % (
self.__get_storage_base_dir(),
"/" + hackathon_name if hackathon_name else "",
time.strftime("%Y%m%d"),
file_name)
return abspath(path)
return abspath("%s/resources/lib/%s" % (
self.__get_storage_base_dir(),
file_name))
def __generate_file_name(self, origin_name, hackathon_name=None):
"""Generate a random file name
:type origin_name: str | unicode
:param origin_name the origin name of file
:type hackathon_name: str | unicode
:param hackathon_name: name of hackathon related to this file
:rtype str
:return a random file name which includes hackathon_name and time as parts
"""
if not hackathon_name:
hackathon_name = ""
extension = os.path.splitext(origin_name)[1]
new_name = "%s-%s-%s%s" % (
hackathon_name,
time.strftime("%Y%m%d"),
str(uuid.uuid1())[0:8],
extension
)
return new_name.strip('-')
|
7404N/deepnet
|
deepnet/tanh_layer.py
|
Python
|
bsd-3-clause
| 1,606
| 0.010585
|
from layer import *
class TanhLayer(Layer):
def __init__(self, *args, **kwargs):
super(TanhLayer, self).__init__(*args, **kwargs)
@classmethod
def IsLayerType(cls, proto):
return proto.hyperparams.activation == deepnet_pb2.Hyperparams.TANH
def ApplyActivation(self):
cm.tanh(self.state)
def Sample(self):
self.state.sample_bernoulli_tanh(target=self.sample)
def ComputeDeriv(self):
"""Compute derivative w.r.t input given derivative w.r.t output."""
self.deriv.apply_tanh_deriv(self.state)
if self.hyperparams.dropout:
self.deriv.mult(self
|
.mask)
def GetLoss(self, get_deriv=False, **kwargs):
"""Compu
|
tes loss.
Computes the loss function. Assumes target is in self.data and predictions
are in self.state.
Args:
get_deriv: If True, computes the derivative of the loss function w.r.t the
inputs to this layer and puts the result in self.deriv.
"""
perf = deepnet_pb2.Metrics()
perf.MergeFrom(self.proto.performance_stats)
perf.count = self.batchsize
if self.loss_function == deepnet_pb2.Layer.SQUARED_LOSS:
self.state.subtract(self.data, target=self.deriv)
error = self.deriv.euclid_norm()**2
perf.error = error
if get_deriv:
self.ComputeDeriv()
else:
raise Exception('Unknown loss function for tanh units.')
return perf
def GetSparsityDivisor(self):
self.means_temp2.assign(1)
self.means_temp2.subtract(self.means, target=self.means_temp)
self.means_temp2.add(self.means)
self.means_temp2.mult(self.means_temp)
return self.means_temp2
|
SUSE/kiwi
|
test/unit/solver/repository/rpm_md_test.py
|
Python
|
gpl-3.0
| 1,668
| 0
|
from mock import patch, call
import mock
from lxml import etree
from kiwi.solver.repository.rpm_md import SolverRepositoryRpmMd
from kiwi.solver.repository.base import SolverRepositoryBase
class TestSolverRepositoryRpmMd:
def setup(self):
self.xml_data = etree.parse('../data/repomd.xml')
self.uri = mock.Mock()
self.solver = SolverRepositoryRpmMd(self.uri)
@patch.object(SolverRepositoryBase, 'download_from_repository')
@patch.object(SolverRepositoryBase, '_create_solvables')
@patch.object(SolverRepositoryBase, '_create_temporary_metadata_dir')
|
@patch.object(SolverRepositoryBase, '_get_repomd_xml')
def test__setup_repository_metadata(
self, mock_xml, mock_mkdtemp, mock_create_solvables,
mock_download_from_reposi
|
tory
):
mock_mkdtemp.return_value = 'metadata_dir.XX'
mock_xml.return_value = self.xml_data
self.solver._setup_repository_metadata()
assert mock_download_from_repository.call_args_list == [
call(
'repodata/55f95a93-primary.xml.gz',
'metadata_dir.XX/55f95a93-primary.xml.gz'
),
call(
'repodata/0815-other.xml.gz',
'metadata_dir.XX/0815-other.xml.gz'
)
]
assert mock_create_solvables.call_args_list == [
call('metadata_dir.XX', 'rpmmd2solv'),
call('metadata_dir.XX', 'comps2solv')
]
@patch.object(SolverRepositoryBase, '_get_repomd_xml')
def test_timestamp(self, mock_xml):
mock_xml.return_value = self.xml_data
assert self.solver.timestamp() == '1478352191'
|
skosukhin/spack
|
var/spack/repos/builtin/packages/numdiff/package.py
|
Python
|
lgpl-2.1
| 2,791
| 0.000717
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser G
|
eneral Public
# License along with this program; if n
|
ot, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Numdiff(AutotoolsPackage):
"""Numdiff is a little program that can be used to compare putatively
similar files line by line and field by field, ignoring small numeric
differences or/and different numeric formats."""
homepage = 'https://www.nongnu.org/numdiff'
url = 'http://nongnu.askapache.com/numdiff/numdiff-5.8.1.tar.gz'
maintainers = ['davydden']
version('5.9.0', '794461a7285d8b9b1f2c4a8149889ea6')
version('5.8.1', 'a295eb391f6cb1578209fc6b4f9d994e')
variant('nls', default=False,
description="Enable Natural Language Support")
variant('gmp', default=False,
description="Use GNU Multiple Precision Arithmetic Library")
depends_on('gettext', when='+nls')
depends_on('gmp', when='+gmp')
def configure_args(self):
spec = self.spec
args = []
if '+nls' in spec:
args.append('--enable-nls')
else:
args.append('--disable-nls')
if '+gmp' in spec:
# compile with -O0 as per upstream known issue with optimization
# and GMP; https://launchpad.net/ubuntu/+source/numdiff/+changelog
# http://www.nongnu.org/numdiff/#issues
# keep this variant off by default as one still encounter
# GNU MP: Cannot allocate memory (size=2305843009206983184)
args.extend([
'--enable-gmp',
'CFLAGS=-O0'
])
else:
args.append('--disable-gmp')
return args
|
chitianhao/trafficserver
|
tests/gold_tests/pluginTest/sslheaders/sslheaders.test.py
|
Python
|
apache-2.0
| 3,767
| 0.004247
|
'''
Test the sslheaders plugin.
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed und
|
er the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Test.S
|
ummary = '''
Test sslheaders plugin.
'''
Test.SkipUnless(
Condition.HasCurlFeature('http2'),
)
Test.Disk.File('sslheaders.log').Content = 'sslheaders.gold'
server = Test.MakeOriginServer("server", options={'--load': Test.TestDirectory + '/observer.py'})
request_header = {
"headers": "GET / HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
ts = Test.MakeATSProcess("ts", select_ports=False)
ts.addSSLfile("ssl/server.pem")
ts.addSSLfile("ssl/server.key")
# ts.addSSLfile("ssl/signer.pem")
ts.Variables.ssl_port = 4443
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 0,
'proxy.config.diags.debug.tags': 'http',
'proxy.config.http.cache.http': 0, # Make sure each request is forwarded to the origin server.
'proxy.config.proxy_name': 'Poxy_Proxy', # This will be the server name.
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.http.server_ports': (
'ipv4:{0} ipv4:{1}:proto=http2;http:ssl ipv6:{0} ipv6:{1}:proto=http2;http:ssl'
.format(ts.Variables.port, ts.Variables.ssl_port)),
# 'proxy.config.ssl.client.verify.server': 0,
# 'proxy.config.ssl.server.cipher_suite': 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2',
# 'proxy.config.url_remap.pristine_host_hdr' : 1,
# 'proxy.config.ssl.client.certification_level': 2,
# 'proxy.config.ssl.CA.cert.filename': '{0}/signer.pem'.format(ts.Variables.SSLDir),
# 'proxy.config.ssl.TLSv1_3': 0
})
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
ts.Disk.remap_config.AddLine(
'map http://bar.com http://127.0.0.1:{0}'.format(server.Variables.Port)
)
ts.Disk.remap_config.AddLine(
'map https://bar.com http://127.0.0.1:{0}'.format(server.Variables.Port)
)
ts.Disk.ssl_server_name_yaml.AddLines([
'- fqdn: "*bar.com"',
' verify_client: STRICT',
])
ts.Disk.plugin_config.AddLine(
'sslheaders.so SSL-Client-ID=client.subject'
)
tr = Test.AddTestRun()
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(Test.Processes.ts, ready=When.PortOpen(ts.Variables.ssl_port))
tr.Processes.Default.Command = (
'curl -H "SSL-Client-ID: My Fake Client ID" --verbose --ipv4 --insecure --header "Host: bar.com"' +
' https://localhost:{}'.format(ts.Variables.ssl_port)
)
tr.Processes.Default.ReturnCode = 0
|
kaplun/ops
|
modules/bibdocfile/lib/bibdocfile_webinterface.py
|
Python
|
gpl-2.0
| 26,790
| 0.005524
|
## This file is part of Invenio.
## Copyright (C) 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import cgi
import os
import time
import shutil
from invenio.config import (CFG_ACCESS_CONTROL_LEVEL_SITE,
CFG_SITE_LANG,
CFG_TMPSHAREDDIR,
CFG_SITE_URL,
CFG_SITE_SECURE_URL,
CFG_WEBSUBMIT_STORAGEDIR,
CFG_SITE_RECORD,
CFG_INSPIRE_SITE,
CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_ICON_DOCTYPES,
CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_ICON_SIZE)
from invenio.bibdocfile_config import CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_DOCTYPES, \
CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_MISC, \
CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_RESTRICTIONS, \
CFG_BIBDOCFILE_ICON_SUBFORMAT_RE
from invenio import webinterface_handler_config as apache
from invenio.access_control_config import VIEWRESTRCOLL
from invenio.access_control_mailcookie import mail_cookie_create_authorize_action
from invenio.access_control_engine import acc_authorize_action
from invenio.access_control_admin import acc_is_role
from invenio.webpage import page, pageheaderonly, \
pagefooteronly, warning_page, write_warning
from invenio.webuser import getUid, page_not_authorized, collect_user_info, isUserSuperAdmin, \
isGuestUser
from invenio import webjournal_utils
from invenio.webinterface_handler import wash_urlargd, WebInterfaceDirectory
from invenio.urlutils import make_canonical_urlargd, redirect_to_url
from invenio.messages import gettext_set_language
from invenio.search_engine import \
guess_primary_collection_of_a_record, get_colID, record_exists, \
create_navtrail_links, check_user_can_view_record, record_empty, \
is_user_owner_of_record
from invenio.bibdocfile import BibRecDocs, normalize_format, file_strip_ext, \
stream_restricted_icon, BibDoc, InvenioBibDocFileError, \
get_subformat_from_format
from invenio.errorlib import register_exception
from invenio.websearchadminlib import get_detailed_page_tabs, get_detailed_page_tabs_counts
import invenio.template
bibdocfile_templates = invenio.template.load('bibdocfile')
webstyle_templates = invenio.template.load('webstyle')
websubmit_templates = invenio.template.load('websubmit')
websearch_templates = invenio.template.load('websearch')
from invenio.bibdocfile_managedocfiles import \
create_file_upload_interface, \
get_upload_file_interface_javascript, \
get_upload_file_interface_css, \
move_uploaded_files_to_storage
bibdocfile_templates = invenio.template.load('bibdocfile')
class WebInterfaceFilesPages(WebInterfaceDirectory):
def __init__(self, recid):
self.recid = recid
def _lookup(self, component, path):
# after /<CFG_SITE_RECORD>/<recid>/files/ every part is used as the file
# name
filename = component
def getfile(req, form):
args = wash_urlargd(form, bibdocfile_templates.files_default_urlargd)
ln = args['ln']
_ = gettext_set_language(ln)
uid = getUid(req)
user_info = collect_user_info(req)
verbose = args['verbose']
if verbose >= 1 and not isUserSuperAdmin(user_info):
# Only SuperUser can see all the details!
verbose = 0
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE > 1:
return page_not_authorized(req, "/%s/%s" % (CFG_SITE_RECORD, self.recid),
navmenuid='submit')
if record_exists(self.recid) < 1:
msg = "<p>%s</p>" % _("Requested record does not seem to exist.")
return warning_page(msg, req, ln)
if record_empty(self.recid):
msg = "<p>%s</p>" % _("Requested record does not seem to have been integrated.")
return warning_page(msg, req, ln)
(auth_code, auth_message) = check_user_can_view_record(user_info, self.recid)
if auth_code and user_info['email'] == 'guest':
if webjournal_utils.is_recid_in_released_issue(self.recid):
# We can serve the file
pass
else:
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {'collection' : guess_primary_collection_of_a_record(self.recid)})
target = CFG_SITE_SECURE_URL + '/youraccount/login' + \
make_canonical_urlargd({'action': cookie, 'ln' : ln, 'referer' : \
CFG_SITE_SECURE_URL + user_info['uri']}, {})
return redirect_to_url(req, target, norobot=True)
elif auth_code:
if webjournal_utils.is_recid_in_released_issue(self.recid):
# We can serve the file
pass
else:
return page_not_authorized(req, "../", \
text = auth_message)
readonly = CFG_ACCESS_CONTROL_LEVEL_SITE == 1
# From now on: either the user provided a specific file
# name (and a possible version), or we return a list of
# all the available files. In no case are the docids
# visible.
try:
bibarchive = BibRecDocs(self.recid)
except InvenioBibDocFileError:
register_exception(req=req, alert_admin=True)
msg = "<p>%s</p><p>%s</p>" % (
_("The system has encountered an error in retrieving the list of files for this document."),
_("The error has been logged and will be taken in consideration as soon as possible."))
return warning_page(msg, req, ln)
if bibarchive.deleted_p():
req.status = apache.HTTP_GONE
return warning_page(_("Requested record does not seem to exist."), req, ln)
docname = ''
docformat = ''
version = ''
warn = ''
if filename:
# We know the complete file name, guess which docid it
# refers to
## TODO: Change the extension system according to ext.py from setlink
## and have a uniform extension mechanism...
docname = file_strip_ext(filename)
docformat = filename[len(docname):]
if docformat and docformat[0] != '.':
docformat = '.' + docformat
if args['subformat']:
docformat += ';%s' % args['subformat']
else:
docname = args['d
|
ocname']
if not docformat:
docformat = args['format']
if args['subformat']:
docformat += ';%s' % args['subformat']
if not
|
version:
version = args['version']
## Download as attachment
is_download = False
if args['download']:
is_download = True
# version could be either empty, or all or an integer
try:
int(version)
except ValueError:
if version != 'all':
version = ''
display_hidden = isUser
|
Nevtep/omniwallet
|
api/send.py
|
Python
|
agpl-3.0
| 10,105
| 0.017912
|
import urlparse
import os, sys
tools_dir = os.environ.get('TOOLSDIR')
lib_path = os.path.abspath(tools_dir)
sys.path.append(lib_path)
from msc_utils_parsing import *
from blockchain_utils import *
from msc_apps import *
import random
def send_form_response(response_dict):
expected_fields=['from_address', 'to_address', 'amount', 'currency', 'fee']
# if marker is True, send dust to marker (for payments of sells)
for field in expected_fields:
if not response_dict.has_key(field):
info('No field '+field+' in response dict '+str(response_dict))
return (None, 'No field '+field+' in response dict '+str(response_dict))
if len(response_dict[field]) != 1:
info('Multiple values for field '+field)
return (None, 'Multiple values for field '+field)
if response_dict.has_key( 'pubKey' ) and is_pubkey_valid( response_dict['pubKey'][0]):
pubkey = response_dict['pubKey'][0]
response_status='OK'
else:
response_status='invalid pubkey'
pubkey=None
from_addr=response_dict['from_address'][0]
if not is_valid_bitcoin_address_or_pubkey(from_addr):
return (None, 'From address is neither bitcoin address nor pubkey')
to_addr=response_dict['to_address'][0]
if not is_valid_bitcoin_address(to_addr):
return (None, 'To address is not a bitcoin address')
amount=response_dict['amount'][0]
if float(amount)<0 or float( from_satoshi(amount))>max_currency_value:
return (None, 'Invalid amount: ' + str( from_satoshi( amount )) + ', max: ' + str( max_currency_value ))
btc_fee=response_dict['fee'][0]
if float(btc_fee)<0 or float( from_satoshi(btc_fee))>max_currency_value:
return (None, 'Invalid fee: ' + str( from_satoshi( amount )) + ', max: ' + str( max_currency_value ))
currency=response_dict['currency'][0]
if currency=='OMNI':
currency_id=1
else:
if currency=='T-OMNI':
currency_id=2
else:
if currency=='BTC':
currency_id=0
else:
if currency[:2] == 'SP':
currency_id=int(currency[2:])
else:
return (None, 'Invalid currency')
marker_addr=None
try:
marker=response_dict['marker'][0]
if marker.lower()=='true':
marker_addr=exodus_address
except KeyError:
# if no marker, marker_addr stays None
pass
if pubkey == None:
tx_to_sign_dict={'transaction':'','sourceScript':''}
l=len(from_addr)
if l == 66 or l == 130: # probably pubkey
if is_pubkey_valid(from_addr):
pubkey=from_addr
response_status='OK'
else:
response_status='invalid pubkey'
else:
if not is_valid_bitcoin_address(from_addr):
response_status='invalid address'
else:
from_pubkey=bc_getpubkey(from_addr)
if not is_pubkey_valid(from_pubkey):
response_status='missing pubkey'
else:
pubkey=from_pubkey
response_status='OK'
try:
if pubkey != None:
tx_to_sign_dict=prepare_send_tx_for_signing( pubkey, to_addr, marker_addr, currency_id, amount, btc_fee)
else:
# hack to show error on page
tx_to_sign_dict['sourceScript']=response_status
response='{"status":"'+response_status+'", "transaction":"'+tx_to_sign_dict['transaction']+'", "sourceScript":"'+tx_to_sign_dict['sourceScript']+'"}'
print "Sending unsigned tx to user for signing", response
return (response, None)
except Exception as e:
print "error creating unsigned tx", e
return (None, str(e))
# simple send and bitcoin send (with or without marker)
def prepare_send_tx_for_signing(from_address, to_address, marker_address, currency_id, amount, btc_fee=500000):
print '*** send tx for signing, amount: ' + amount
print ' btc_fee: ' + btc_fee
# consider a more general func that covers also sell offer and sell accept
# check if address or pubkey was given as from address
if from_address.startswith('0'): # a pubkey was given
from_address_pub=from_address
from_address=get_addr_from_key(from_address)
else: # address was given
from_address_pub=addrPub=bc_getpubkey(from_address)
from_address_pub=from_address_pub.strip()
# set change address to from address
change_address_pub=from_address_pub
changeAddress=from_address
sat
|
oshi_amount=int( amount )
fee=int( btc_fee )
# differ bitcoin send and other currencies
if currency_id == 0: # bitcoin
# normal bitcoin send
required_value=satoshi_amount
# if marker is needed, allocate dust for the marker
if marker_address != None:
required_value+=1*dust_limit
else
|
:
tx_type=0 # only simple send is supported
required_value=4*dust_limit
#------------------------------------------- New utxo calls
fee_total_satoshi=required_value+fee
dirty_txes = bc_getutxo( from_address, fee_total_satoshi )
if (dirty_txes['error'][:3]=='Con'):
raise Exception({ "status": "NOT OK", "error": "Couldn't get list of unspent tx's. Response Code: " + dirty_txes['code'] })
if (dirty_txes['error'][:3]=='Low'):
raise Exception({ "status": "NOT OK", "error": "Not enough funds, try again. Needed: " + str(fee_total_satoshi) + " but Have: " + dirty_txes['avail'] })
inputs_total_value = dirty_txes['avail']
inputs = dirty_txes['utxos']
#------------------------------------------- Old utxo calls
# get utxo required for the tx
#utxo_all=get_utxo(from_address, required_value+fee)
#utxo_split=utxo_all.split()
#inputs_number=len(utxo_split)/12
#inputs=[]
#inputs_total_value=0
#if inputs_number < 1:
# info('Error not enough BTC to generate tx - no inputs')
# raise Exception('This address must have enough BTC for protocol transaction fees and miner fees')
#for i in range(inputs_number):
# inputs.append(utxo_split[i*12+3])
# try:
# inputs_total_value += int(utxo_split[i*12+7])
# except ValueError:
# info('Error parsing utxo, '+ str(utxo_split) )
# raise Exception('Error: parsing inputs was invalid, do you have enough BTC?')
#inputs_outputs='/dev/stdout'
#for i in inputs:
# inputs_outputs+=' -i '+i
#---------------------------------------------- End Old utxo calls
inputs_outputs='/dev/stdout'
for i in inputs:
inputs_outputs+=' -i '+str(i[0])+':'+str(i[1])
# calculate change
change_value=inputs_total_value-required_value-fee
if change_value < 0:
info('Error not enough BTC to generate tx - negative change')
raise Exception('This address must have enough BTC for miner fees and protocol transaction fees')
if currency_id == 0: # bitcoin
# create a normal bitcoin transaction (not mastercoin)
# dust to marker if required
# amount to to_address
# change to change
if marker_address != None:
inputs_outputs+=' -o '+marker_address+':'+str(dust_limit)
inputs_outputs+=' -o '+to_address+':'+str(satoshi_amount)
else:
# create multisig tx
# simple send - multisig
# dust to exodus
# dust to to_address
# double dust to rawscript "1 [ change_address_pub ] [ dataHex_obfuscated ] 2 checkmultisig"
# change to change
dataSequenceNum=1
dataHex = '{:02x}'.format(0) + '{:02x}'.format(dataSequenceNum) + \
'{:08x}'.format(tx_type) + '{:08x}'.format(currency_id) + \
'{:016x}'.format(satoshi_amount) + '{:06x}'.format(0)
dataBytes = dataHex.decode('hex_codec')
dataAddress = hash_160_to_bc_address(dataBytes[1:21])
# create the BIP11 magic
change_address_compressed_pub=get_compressed_pubkey_format( change_address
|
xiruibing/hae
|
src/trayicon.py
|
Python
|
mit
| 1,695
| 0.040583
|
from PyQt5.QtCore import QObject, pyqtSlot, pyqtSignal
from PyQt5.Qt import QSy
|
stemTrayIcon, QIcon
class TrayIcon(QSystemTrayIcon):
ActivationReason = ['Unknown', 'Context', 'DoubleClick', 'Trigger', 'MiddleClick']
onactivate = pyqtSignal(int, str)
onmessageclick = pyqtSignal()
def __init__(self, parent, toolTip = '', icon = '')
|
:
super(TrayIcon, self).__init__(parent)
self.setObjectName('trayIcon')
self.setIcon(icon)
self.setToolTip(toolTip)
self.activated.connect(self.activateHandler)
self.messageClicked.connect(self.onmessageclick)
# Slots
# 设置工具提示
@pyqtSlot(str)
def setToolTip(self, toolTip):
super(TrayIcon, self).setToolTip(toolTip)
# 设置图标
@pyqtSlot(str)
def setIcon(self, icon):
if icon:
icon = QIcon(icon)
else:
icon = self.parent().windowIcon()
super(TrayIcon, self).setIcon(QIcon(icon))
# 设置右键菜单
@pyqtSlot(QObject)
def setContextMenu(self, menu):
super(TrayIcon, self).setContextMenu(menu)
# 获取是否可见
@pyqtSlot(result = bool)
def isVisible(self):
return super(TrayIcon, self).isVisible()
# 获取是否支持消息弹泡
@pyqtSlot(result = bool)
def supportsMessages(self):
return super(TrayIcon, self).supportsMessages()
# 获取是否支持系统托盘图标
@pyqtSlot(result = bool)
def isSystemTrayAvailable(self):
return super(TrayIcon, self).isSystemTrayAvailable()
# 显示托盘消息
# showMessage
# 设置可见性
# setVisible
# 显示
# show
# 隐藏
# hide
# Sinals
def activateHandler(self, reason):
self.onactivate.emit(reason, TrayIcon.ActivationReason[reason])
|
JensGrabner/mpmath
|
mpmath/function_docs.py
|
Python
|
bsd-3-clause
| 280,518
| 0.000125
|
"""
Extended docstrings for functions.py
"""
pi = r"""
`\pi`, roughly equal to 3.141592654, represents the area of the unit
circle, the half-period of trigonometric functions, and many other
things in mathematics.
Mpmath can evaluate `\pi` to arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +pi
3.1415926535897932384626433832795028841971693993751
This shows digits 99991-100000 of `\pi` (the last digit is actually
a 4 when the decimal expansion is truncated, but here the nearest
rounding is used)::
>>> mp.dps = 100000
>>> str(pi)[-10:]
'5549362465'
**Possible issues**
:data:`pi` always rounds to the nearest floating-point
number when used. This means that exact mathematical identities
involving `\pi` will generally not be preserved in floating-point
arithmetic. In particular, multiples of :data:`pi` (except for
the trivial case ``0*pi``) are *not* the exact roots of
:func:`~mpmath.sin`, but differ roughly by the current epsilon::
>>> mp.dps = 15
>>> sin(pi)
1.22464679914735e-16
One solution is to use the :func:`~mpmath.sinpi` function instead::
>>> sinpi(1)
0.0
See the documentation of trigonometric functions for additional
details.
"""
degree = r"""
Represents one degree of angle, `1^{\circ} = \pi/180`, or
about 0.01745329. This constant may be evaluated to arbitrary
precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +degree
0.017453292519943295769236907684886127134428718885417
The :data:`degree` object is convenient for conversion
to radians::
>>> sin(30 * degree)
0.5
>>> asin(0.5) / degree
30.0
"""
e = r"""
The transcendental number `e` = 2.718281828... is the base of the
natural logarithm (:func:`~mpmath.ln`) and of the exponential function
(:func:`~mpmath.exp`).
Mpmath can be evaluate `e` to arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +e
2.7182818284590452353602874713526624977572470937
This shows digits 99991-100000 of `e` (the last digit is actually
a 5 when the decimal expansion is truncated, but here the nearest
rounding is used)::
>>> mp.dps = 100000
>>> str(e)[-10:]
'2100427166'
**Possible issues**
:data:`e` always rounds to the nearest floating-point number
when used, and mathematical identities involving `e` may not
hold in floating-point arithmetic. For example, ``ln(e)``
might not evaluate exactly to 1.
In particular, don't use ``e**x`` to compute the exponential
function. Use ``exp(x)`` instead; this is both faster and more
accurate.
"""
phi = r"""
Represents the golden ratio `\phi = (1+\sqrt 5)/2`,
approximately equal to 1.6180339887. To high precision,
its value is::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +phi
1.6180339887498948482045868343656381177203091798058
Formulas for the golden ratio include the following::
>>> (1+sqrt(5))/2
1.6180339887498948482045868343656381177203091798058
>>> findroot(lambda x: x**2-x-1, 1)
1.6180339887498948482045868343656381177203091798058
>>> limit(lambda n: fib(n+1)/fib(n), inf)
1.6180339887498948482045868343656381177203091798058
"""
euler = r"""
Euler's constant or the Euler-Mascheroni constant `\gamma`
= 0.57721566... is a number of central importance to
number theory and special functions. It is defined as the limit
.. math ::
\gamma = \lim_{n\to\infty} H_n - \log n
where `H_n = 1 + \frac{1}{2} + \ldots + \frac{1}{n}` is a harmonic
number (see :func:`~mpmath.harmonic`).
Evaluation of `\gamma` is supported at arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +euler
0.57721566490153286060651209008240243104215933593992
We can also compute `\gamma` directly from the definition,
although this is less efficient::
>>> limit(lambda n: harmonic(n)-log(n), inf)
0.57721566490153286060651209008240243104215933593992
This shows digits 9991-10000 of `\gamma` (the last digit is actually
a 5 when the decimal expansion is truncated, but here the nearest
rounding is used)::
>>> mp.dps = 10000
>>> str(euler)[-10:]
'4679858166'
Integrals, series, and representations for `\gamma` in terms of
special functions include the following (there are many others)::
>>> mp.dps = 25
>>> -quad(lambda x: exp(-x)*log(x), [0,inf])
0.5772156649015328606065121
>>> quad(lambda x,y: (x-1)/(1-x*y)/log(x*y), [0,1], [0,1])
0.5772156649015328606065121
>>> nsum(lambda k: 1/k-log(1+1/k), [1,inf])
0.5772156649015328606065121
>>> nsum(lambda k: (-1)**k*zeta(k)/k, [2,inf])
0.5772156649015328606065121
>>> -diff(gamma, 1)
0.5772156649015328606065121
>>> limit(lambda x: 1/x-gamma(x), 0)
0.5772156649015328606065121
>>> limit(lambda x: zeta(x)-1/(x-1), 1)
0.5772156649015328606065121
>>> (log(2*pi*nprod(lambda n:
... exp(-2+2/n)*(1+2/n)**n, [1,inf]))-3)/2
0.5772156649015328606065121
For generalizations of the identities `\gamma = -\Gamma'(1)`
and `\gamma = \lim_{x\to1} \zeta(x)-1/(x-1)`, see
:func:`~mpmath.psi` and :func:`~mpmath.stieltjes` respectively.
"""
catalan = r"""
Catalan's constant `K` = 0.91596559... is given by the infinite
series
.. math ::
K = \sum_{k=0}^{\infty} \frac{(-1)^k}{(2k+1)^2}.
Mpmath can evaluate it to arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +catalan
0.91596559417721901505460351493238411077414937428167
One can also compute `K` directly from the definition, although
this is significantly less efficient::
>>> nsum(lambda k: (-1)**k/(2*k+1)**2, [0, inf])
0.91596559417721901505460351493238411077414937428167
This shows digits 9991-10000 of `K` (the last digit is actually
a 3 when the decimal expansion is truncated, but here the nearest
rounding is used)::
>>> mp.dps = 10000
>>> str(catalan)[-10:]
'9537871504'
Catalan's constant has numerous integral representations::
>>> mp.dps = 50
>>> quad(lambda x: -log(x)/(1+x**2), [0, 1])
0.91596559417721901505460351493238411077414937428167
>>> quad(lambda x: atan(x)/x, [0, 1])
0.91596559417721901505460351493238411077414937428167
>>> quad(lambda x: ellipk(x**2)/2, [0, 1])
0.91596559417721901505460351493238411077414937428167
>>> quad(lambda x,y: 1/(1+(x*y)**2), [0, 1], [0, 1])
0.91596559417721901505460351493238411077414937428167
As well as series representations::
>>> pi*log(sqrt(3)+2)/8 + 3*nsum(lambda n:
... (fac(n)/(2*n+1))**2/fac(2*n), [0, inf])/8
0.91596559417721901505460351493238411077414937428167
>>> 1-nsum(lambda n: n*zeta(2*n+1)/16**n, [1,inf])
|
0.91596559417721901505460351493238411077414937428167
"""
khinchin = r"""
Khinchin's constant
|
`K` = 2.68542... is a number that
appears in the theory of continued fractions. Mpmath can evaluate
it to arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +khinchin
2.6854520010653064453097148354817956938203822939945
An integral representation is::
>>> I = quad(lambda x: log((1-x**2)/sincpi(x))/x/(1+x), [0, 1])
>>> 2*exp(1/log(2)*I)
2.6854520010653064453097148354817956938203822939945
The computation of ``khinchin`` is based on an efficient
implementation of the following series::
>>> f = lambda n: (zeta(2*n)-1)/n*sum((-1)**(k+1)/mpf(k)
... for k in range(1,2*int(n)))
>>> exp(nsum(f, [1,inf])/log(2))
2.6854520010653064453097148354817956938203822939945
"""
glaisher = r"""
Glaisher's constant `A`, also known as the Glaisher-Kinkelin
constant, is a number approximately equal to 1.282427129 that
sometimes appears in formulas related to gamma and zeta functions.
It is also related to the Barnes G-function (see :func:`~mpmath.barnesg`).
The constant is defined as `A = \exp(1/12-\zeta'(-1))` where
`\zeta'(s)` denotes the derivative of the Riemann zeta function
(see :func:`~mpmath.zeta`).
Mpmath can evaluate Glaisher's constant to arbitrary precision:
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +glaisher
1.
|
chunchih/article-matching
|
experiment/find_key_relation.py
|
Python
|
bsd-3-clause
| 2,408
| 0.023671
|
# -*- coding: utf-8 -*-
from gensim.models import word2vec
from gensim import models
import jieba
import codecs
import io
from collections import Counter
import operator
import numpy
f = codecs.open("target_article.txt",'r','utf8')
content = f.readlines()
article = []
jieba.set_dictionary('jieba_dict/dict.txt.big')
model = models.Word2Vec.load_word2vec_format('med250.model.bin',binary=True)
# import stopword
stopwordset = set()
with io.open('jieba_dict/stopwords.txt','r',encoding='utf-8') as sw:
for line in sw:
stopwordset.add(line.strip('\n'))
# Cut The Words , Output: short words in article
for line in content:
seg_list = jieba.cut(line)
for gg in seg_list:
if gg not in stopwordset:
article.append(gg)
# Count frequency
raw_data = Counter(article)
raw_data = { key:raw_data[key] for key in raw_data if key in model.vocab}
low_level = 0
for key in raw_data:
low_level += raw_data[key]
low_level = int(round(low_level*0.01))
# Initial Accumalation
words = []
acc_data = dict()
map_words = []
related_word = dict()
for keys in raw_data:
words.append(keys)
# acc_data[keys] = 0
# Pick up the Friends
for word_1 in words:
cand_words = []
for word_2 in words:
if model.similarity(word_1, word_2) >= 0.6:
cand_words.append(word_2)
map_words.append(cand_words)
for i in range(len(map_words)):
friend_list = map_words[i]
value = 0.0
for friend_1 in friend_list:
for friend_2 in friend_li
|
st:
if friend_1 == friend_2:
continue
value += model.similarity(friend_1, friend_2)
leng = len(friend_list)
related_word[words[i]] = value/float(leng*leng)
s_imp_words = sorted(related_word
|
.items(), key=operator.itemgetter(1), reverse=True)
for i in s_imp_words[:20]:
print i[0]
print "-----------------------"
#print s_imp_words
# for value in output:
# if value[1] == 0.0:
# continue
# print value[0], value[1]
# print "-----------------------"
keywords = []
fg = numpy.zeros(len(s_imp_words))
for i in range(len(s_imp_words)):
if fg[i] == 1:
continue
for j in range(i+1,len(s_imp_words)):
if fg[j] != 1:
if model.similarity(s_imp_words[i][0], s_imp_words[j][0]) >= 0.7:
fg[j] = 1
keywords.append(s_imp_words[i])
#print s_imp_words[i][0]
for i in keywords[:10]:
print i[0]
# with io.open("target_keywords.txt",'w',encoding='utf-8') as output:
# for text in keywords:
# output.write(text + '\n')
|
thuck/ppam
|
ui/tabs.py
|
Python
|
gpl-2.0
| 13,310
| 0.003681
|
###############################################################################
#PPAM is a pulseaudio interface.
#Copyright (C) 2013 Denis Doria (Thuck)
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; version 2
#of the License.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
###############################################################################
import curses
from curses import KEY_UP, KEY_DOWN
from ui.basic import draw_info_window
from pulse import pulseaudio as pa
from pulse import components as co
class GenericStream(object):
def __init__(self, win, stream_type, name):
self.win = win
self.height, self.width = self.win.getmaxyx()
self.name = name
self.help = [_("+/- to Increase and decrease volume"),
_("./, to Increase and decrease right volume"),
_("</> to Increase and decrease left volume"),
_("m to Mute"),
_("K to kill the steram")]
self.selected_item = 0
self.max_item = 0
self.playback = getattr(co, stream_type)()
self.streams = []
self.type_of_info = None
self.info_window_data = None
def resize_window(self, win):
self.win = win
self.height, self.width = self.win.getmaxyx()
def _update_info_window(self, pid):
if self.type_of_info == 'p':
self.info_window_data = self.playback.properties(pid)
elif self.type_of_info == 'i':
self.info_window_data = self.playback.info(pid)
elif self.type_of_info == 'H':
self.info_window_data = self.help
def update(self, char):
if self.selected_item > self.max_item:
self.selected_item = self.max_item
if char in (ord('H'), ):
self.type_of_info = 'H'
self.info_window_data = self.help
elif char in (ord('c'), ):
self.type_of_info = None
self.info_window_data = None
elif self.streams:
pid = self.streams[self.selected_item][1]
self._update_info_window(pid)
if char in (ord('+'), ):
self.playback.increase_volume(pid)
elif char in (ord('-'), ):
self.playback.decrease_volume(pid)
elif char in (ord('m'),):
self.playback.mute(pid)
elif char in (ord('>'), ):
self.playback.increase_left_volume(pid)
elif char in (ord('.'), ):
self.playback.increase_right_volume(pid)
elif char in (ord('<'), ):
self.playback.decrease_left_volume(pid)
elif char in (ord(','), ):
self.playback.decrease_right_volume(pid)
elif char in (ord('p'), ):
self.type_of_info = 'p'
self.info_window_data = self.playback.properties(pid)
elif char in (ord('i'), ):
self.type_of_info = 'i'
self.info_window_data = self.playback.info(pid)
elif char in (ord('K'), ):
self.playback.kill(pid)
elif char in (KEY_UP, ord('k')) and self.selected_item > 0:
self.selected_item -= 1
elif (char in (KEY_DOWN, ord('j')) and
self.selected_item < self.max_item):
self.selected_item += 1
def draw(self):
self.streams = self.playback.playing()
line_number = 0
self.win.erase()
self.win.box()
for line_number, stream in enumerate(self.streams):
if len(stream) == 5:
(app_name,
app_pid,
volume_left,
volume_right,
mute) = stream
line = '[%s] L:%i%% R:%i%% (%s)' % (app_name, volume_left,
volume_right, app_pid)
else:
(app_name,
app_pid,
volume_left,
mute) = stream
line = '[%s] M:%i%% (%s)' % (app_name, volume_left, app_pid)
if mute:
line = '%s [M]' % (line)
if self.selected_item == line_number:
self.win.addstr(line_number + 1, 1, line, curses.co
|
lor_pair(1))
else:
self.win.addstr(line_number + 1, 1, line)
self.max_item = line_number
if self.info_window_data:
draw_info_window(self.win, self.info_window_data)
self.win.refresh()
class Tab
|
Playback(GenericStream):
def __init__(self, win):
GenericStream.__init__(self, win, 'Playback', _('Playback'))
class TabRecord(GenericStream):
def __init__(self, win):
GenericStream.__init__(self, win, 'Record', _('Record'))
class GenericDevice(object):
def __init__(self, win, device_type, name):
self.win = win
self.height, self.width = self.win.getmaxyx()
self.name = name
self.help = [_("+/- to Increase and decrease volume"),
_("./, to Increase and decrease right volume"),
_("</> to Increase and decrease left volume"),
_("m to Mute")]
self.selected_item = 0
self.max_item = 0
self.device = getattr(co, device_type)()
self.devices = []
self.type_of_info = None
self.info_window_data = None
def resize_window(self, win):
self.win = win
self.height, self.width = self.win.getmaxyx()
def _update_info_window(self, info):
if self.type_of_info == 'p':
self.info_window_data = self.device.properties(info)
elif self.type_of_info == 'i':
self.info_window_data = self.device.info(info)
elif self.type_of_info == 'H':
self.info_window_data = self.help
def update(self, char):
if self.selected_item > self.max_item:
self.selected_item = self.max_item
if char in (ord('H'), ):
self.type_of_info = 'H'
self.info_window_data = self.help
elif char in (ord('c'), ):
self.type_of_info = None
self.info_window_data = None
elif self.devices:
name = self.devices[self.selected_item][0]
self._update_info_window(name)
if char in (ord('+'), ):
self.device.increase_volume(name)
elif char in (ord('-'), ):
self.device.decrease_volume(name)
elif char in (ord('m'),):
self.device.mute(name)
elif char in (ord('>'), ):
self.device.increase_left_volume(name)
elif char in (ord('.'), ):
self.device.increase_right_volume(name)
elif char in (ord('<'), ):
self.device.decrease_left_volume(name)
elif char in (ord(','), ):
self.device.decrease_right_volume(name)
elif char in (ord('p'), ):
self.type_of_info = 'p'
self.info_window_data = self.device.properties(name)
elif char in (ord('i'), ):
self.type_of_info = 'i'
self.info_window_data = self.device.info(name)
elif char in (ord('n'), ):
self.device.change_port_next(name)
elif char in (ord('p'), ):
self.device.change_port_previous(name)
elif char in (KEY_UP, ord('k')) and self.selected_item > 0:
self.selected_item -= 1
elif (char in (KEY_DOWN, ord('j')) and
|
aperigault/ansible
|
docs/docsite/rst/conf.py
|
Python
|
gpl-3.0
| 8,952
| 0.00067
|
# -*- coding: utf-8 -*-
#
# documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 27 13:23:22 2008-2009.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed
# automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys
import os
# pip install sphinx_rtd_theme
# import sphinx_rtd_theme
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
# sys.path.append(os.path.abspath('some/directory'))
#
sys.path.insert(0, os.path.join('ansible', 'lib'))
sys.path.append(os.path.abspath(os.path.join('..', '_extensions')))
# We want sphinx to document the ansible modules contained in this repository,
# not those that may happen to be installed in the version
# of Python used to run sphinx. When sphinx loads in order to document,
# the repository version needs to be the one that is loaded:
sys.path.insert(0, os.path.abspath(os.path.join('..', '..', '..', 'lib')))
VERSION = 'devel'
AUTHOR = 'Ansible, Inc'
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings.
# They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# TEST: 'sphinxcontrib.fulltoc'
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'pygments_lexer', 'notfound.extension']
# Later on, add 'sphinx.ext.viewcode' to the list if you want to have
# colorized code generated too for references.
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'Ansible Documentation'
copyright = "2013-2018 Ansible, Inc"
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = VERSION
# The full version, including alpha/beta/rc tags.
release = VERSION
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directories, that shouldn't be
# searched for source files.
# exclude_dirs = []
# A list of glob-style patterns that should be excluded when looking
# for source files.
# OBSOLETE - removing this - dharmabumstead 2018-02-06
# exclude_patterns = ['modules']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'YAML+Jinja'
# Substitutions, variables, entities, & shortcuts for text which do not need to link to anything.
# For titles which should be a link, use the intersphinx anchors set at the index, chapter, and section levels, such as qi_start_:
# |br| is useful for formatting fields inside of tables
# |_| is a nonbreaking space; similarly useful inside of tables
rst_epilog = """
.. |br| raw:: html
<br>
.. |_| unicode:: 0xA0
:trim:
"""
# Options for HTML output
# -----------------------
html_theme_path = ['../_themes']
html_theme = 'sphinx_rtd_theme'
html_short_title = 'Ansible Documentation'
html_theme_options = {
'canonical_url': "https://docs.ansible.com/ansible/latest/",
'collapse_navigation': "True",
'vcs_pageview_mode': 'edit'
}
html_context = {
'display_github': 'True',
'github_user': 'ansible',
'github_repo': 'ansible',
'github_version': 'devel/docs/docsite/rst/',
'github_module_version': 'devel/lib/ansible/modules/',
'current_version': version,
'latest_version': '2.8',
# list specifically out of order to make latest work
'available_versions': ('latest', '2.7', '2.6', 'devel'),
'css_files': ('_static/ansible.css', # overrides to the standard theme
),
}
# The style sheet to use for HTML an
|
d HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
# html_style = 'solar.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Ansible Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_tit
|
le = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = 'favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['../_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
html_use_opensearch = 'https://docs.ansible.com/ansible/latest'
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Poseidodoc'
# Configuration for sphinx-notfound-pages
# with no 'notfound_template' and no 'notfound_context' set,
# the extension builds 404.rst into a location-agnostic 404 page
#
# default is `en` - using this for the sub-site:
notfound_default_language = "ansible"
# default is `latest`:
# setting explicitly - docsite serves up /ansible/latest/404.html
# so keep this set to `latest` even on the `devel` branch
# then no maintenance is needed when we branch a new stable_x.x
notfound_default_version = "latest"
# makes default setting explicit:
notfound_no_urls_prefix = False
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class
# [howto/manual]).
latex_documents = [
('index', 'ansible.tex', 'Ansible 2.2 Documentation', AUTHOR, 'manual'),
]
# Th
|
pytroll/satpy
|
satpy/readers/li_l2.py
|
Python
|
gpl-3.0
| 5,368
| 0.000373
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Satpy developers
#
# This file is part of satpy.
#
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see <http://www.gnu.org/licens
|
es/>.
# type: ignore
"""Interface to MTG-LI L2 product NetCDF files
The reader is based on preliminary test data provided by EUMETSAT.
The data description is described in the
"LI L2 Product User Guide [LIL2PUG] Draft version" documentation.
|
"""
import logging
from datetime import datetime
import h5netcdf
import numpy as np
from pyresample import geometry
# FIXME: This is not xarray/dask compatible
# TODO: Once migrated to xarray/dask, remove ignored path in setup.cfg
from satpy.dataset import Dataset
from satpy.readers.file_handlers import BaseFileHandler
logger = logging.getLogger(__name__)
class LIFileHandler(BaseFileHandler):
"""MTG LI File Reader."""
def __init__(self, filename, filename_info, filetype_info):
super(LIFileHandler, self).__init__(filename, filename_info, filetype_info)
self.nc = h5netcdf.File(self.filename, 'r')
# Get grid dimensions from file
refdim = self.nc['grid_position'][:]
# Get number of lines and columns
self.nlines = int(refdim[2])
self.ncols = int(refdim[3])
self.cache = {}
logger.debug('Dimension : {}'.format(refdim))
logger.debug('Row/Cols: {} / {}'.format(self.nlines, self.ncols))
logger.debug('Reading: {}'.format(self.filename))
logger.debug('Start: {}'.format(self.start_time))
logger.debug('End: {}'.format(self.end_time))
@property
def start_time(self):
return datetime.strptime(self.nc.attrs['sensing_start'], '%Y%m%d%H%M%S')
@property
def end_time(self):
return datetime.strptime(self.nc.attrs['end_time'], '%Y%m%d%H%M%S')
def get_dataset(self, key, info=None, out=None):
"""Load a dataset
"""
if key in self.cache:
return self.cache[key]
# Type dictionary
typedict = {"af": "flash_accumulation",
"afa": "accumulated_flash_area",
"afr": "flash_radiance",
"lgr": "radiance",
"lef": "radiance",
"lfl": "radiance"}
# Get lightning data out of NetCDF container
logger.debug("Key: {}".format(key['name']))
# Create reference grid
grid = np.full((self.nlines, self.ncols), np.NaN)
# Get product values
values = self.nc[typedict[key['name']]]
rows = self.nc['row']
cols = self.nc['column']
logger.debug('[ Number of values ] : {}'.format((len(values))))
logger.debug('[Min/Max] : <{}> / <{}>'.format(np.min(values),
np.max(values)))
# Convert xy coordinates to flatten indices
ids = np.ravel_multi_index([rows, cols], grid.shape)
# Replace NaN values with data
np.put(grid, ids, values)
# Correct for bottom left origin in LI row/column indices.
rotgrid = np.flipud(grid)
# Rotate the grid by 90 degree clockwise
rotgrid = np.rot90(rotgrid, 3)
logger.warning("LI data has been rotated to fit to reference grid. \
Works only for test dataset")
# Mask invalid values
ds = np.ma.masked_where(np.isnan(rotgrid), rotgrid)
# Create dataset object
out.data[:] = np.ma.getdata(ds)
out.mask[:] = np.ma.getmask(ds)
out.info.update(key.to_dict())
return out
def get_area_def(self, key, info=None):
"""Create AreaDefinition for specified product.
Projection information are hard coded for 0 degree geos projection
Test dataset doesn't provide the values in the file container.
Only fill values are inserted.
"""
# TODO Get projection information from input file
a = 6378169.
h = 35785831.
b = 6356583.8
lon_0 = 0.
# area_extent = (-5432229.9317116784, -5429229.5285458621,
# 5429229.5285458621, 5432229.9317116784)
area_extent = (-5570248.4773392612, -5567248.074173444,
5567248.074173444, 5570248.4773392612)
proj_dict = {'a': float(a),
'b': float(b),
'lon_0': float(lon_0),
'h': float(h),
'proj': 'geos',
'units': 'm'}
area = geometry.AreaDefinition(
'LI_area_name',
"LI area",
'geosli',
proj_dict,
self.ncols,
self.nlines,
area_extent)
self.area = area
logger.debug("Dataset area definition: \n {}".format(area))
return area
|
kernsuite-debian/lofar
|
LCS/PyCommon/test/t_dbcredentials.py
|
Python
|
gpl-3.0
| 2,789
| 0.008964
|
#!/usr/bin/env python3
import unittest
import tempfile
from lofar.common.dbcredentials import *
def setUpModule():
pass
def tearDownModule():
pass
class TestCredentials(unittest.TestCase):
def test_default_values(self):
c = Credentials()
self.assertEqual(c.type, "postgres")
self.assertEqual(c.host, "localhost")
self.assertEqual(c.port, 0)
#self.assertEqual(c.user, "")
self.assertEqual(c.password, "")
self.assertEqual(c.database, "")
def test_pg_connect_options(self):
c = Credentials()
self.assertEqual(
c.pg_connect_options(),
{ "host": "localhost",
"port": -1,
"user": c.user,
"passwd": "",
"dbname": "",
})
class TestDBCredentials(unittest.TestCase):
def test_set_get(self):
dbc = DBCredentials(filepatterns=[])
c_in = Credentials()
c_in.host = "example.com"
c_in.port = 1234
c_in.user = "root"
c_in.password = "secret"
c_in.database = "mydb"
dbc.set("DATABASE", c_in)
c_out = dbc.get("DATABASE")
self.assertEqual(str(c_out), str(c_in))
def test_get_non_existing(self):
dbc = DBCredentials(filepatterns=[])
with self.assertRaises(DBCredentials.NoSectionError):
dbc.get("UNKNOWN")
def test_list(self):
dbc = DBCredentials(filepatterns=[])
c = Credentials()
c.host = "foo"
dbc.set("FOO", c)
c = Credentials()
c.host = "bar"
dbc.set("BAR", c)
self.assertEqual(sorted(dbc.list()), ["BAR", "FOO"])
def test_config(self):
f = tempfile.NamedTemporaryFile()
f.write(b"""
[database:DATABASE]
type = postgres
host = example.com
port = 1234
user = root
password = secret
database = mydb
""")
f.flush() # don't close since that will delete the TemporaryFile
# test if DATABASE is there
|
dbc = DBCredentials(filepatterns=[f.name])
self.assertEqual(dbc.list(), ["DATABASE"])
# test if credentials match with what we've written
c_in = Credentials()
c_in.host = "example.com"
c_in.port = 1234
c_in.user = "root"
c_in.password = "secret"
c_in.database = "mydb"
c_out = dbc.get("DATABASE")
self.assertEqual(str(c_out), str(c_in))
def test_freeform_config_option(self):
f = tempfile.NamedTemporaryFile()
f.write(b
|
"""
[database:DATABASE]
foo = bar
test = word word
""")
f.flush() # don't close since that will delete the TemporaryFile
# extract our config
dbc = DBCredentials(filepatterns=[f.name])
c_out = dbc.get("DATABASE")
# test if the free-form config options got through
self.assertEqual(c_out.config["foo"], "bar")
self.assertEqual(c_out.config["test"], "word word")
def main(argv):
unittest.main()
if __name__ == "__main__":
# run all tests
import sys
main(sys.argv[1:])
|
asedunov/intellij-community
|
python/testData/refactoring/introduceConstant/py1840.py
|
Python
|
apache-2.0
| 35
| 0.028571
|
exec(open("t
|
mp<caret>.t
|
xt").read())
|
openstack/cinder
|
cinder/common/constants.py
|
Python
|
apache-2.0
| 1,167
| 0
|
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#
|
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# The maximum value a signed INT type may have
DB_MAX_INT = 0x7FFFFFFF
# The cinder services binaries and topics' names
API_BINAR
|
Y = "cinder-api"
SCHEDULER_BINARY = "cinder-scheduler"
VOLUME_BINARY = "cinder-volume"
BACKUP_BINARY = "cinder-backup"
SCHEDULER_TOPIC = SCHEDULER_BINARY
VOLUME_TOPIC = VOLUME_BINARY
BACKUP_TOPIC = BACKUP_BINARY
LOG_BINARIES = (SCHEDULER_BINARY, VOLUME_BINARY, BACKUP_BINARY, API_BINARY)
# The encryption key ID used by the legacy fixed-key ConfKeyMgr
FIXED_KEY_ID = '00000000-0000-0000-0000-000000000000'
|
facebookexperimental/eden
|
eden/scm/tests/test-fb-hgext-diff-since-last-submit-t.py
|
Python
|
gpl-2.0
| 6,148
| 0.00244
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
from __future__ import absolute_import
from testutil.dott import feature, sh, testtmp # noqa: F401
# Load extensions
(
sh % "cat"
<< r"""
[extensions]
arcconfig=$TESTDIR/../edenscm/hgext/extlib/phabricator/arcconfig.py
arcdiff=
"""
>> "$HGRCPATH"
)
# Diff with no revision
sh % "hg init repo"
sh % "cd repo"
sh % "touch foo"
sh % "hg add foo"
sh % "hg ci -qm 'No rev'"
sh % "hg diff --since-last-submit" == r"""
abort: local changeset is not associated with a differential revision
[255]"""
sh % "hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == r"""
abort: local changeset is not associated with a differential revision
[255]"""
# Fake a diff
sh % "echo bleet" > "foo"
sh % "hg ci -qm 'Differential Revision: https://phabricator.fb.com/D1'"
sh % "hg diff --since-last-submit" == r"""
abort: no .arcconfig found
[255]"""
sh % "hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == r"""
abort: no .arcconfig found
[255]"""
# Prep configuration
sh % "echo '{}'" > ".arcrc"
sh % 'echo \'{"config" : {"default" : "https://a.com/api"}, "hosts" : {"https://a.com/api/" : { "user" : "testuser", "oauth" : "garbage_cert"}}}\'' > ".arcconfig"
# Now progressively test the response handling for variations of missing data
sh % "cat" << r"""
[{}]
""" > "$TESTTMP/mockduit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit" == r"""
Error calling graphql: Unexpected graphql response format
abort: unable to d
|
etermine previous changeset hash
[255]"""
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == r"""
Error calling graphql: Unexpected graphql response format
abort: unable to determine previous changeset hash
[255]"""
sh % "cat" << r"""
[{"data": {"query": [{"results": {"nodes": [{
"number": 1,
"diff_status_name": "Needs Revi
|
ew",
"differential_diffs": {"count": 3},
"is_landing": false,
"land_job_status": "NO_LAND_RUNNING",
"needs_final_review_status": "NOT_NEEDED",
"created_time": 123,
"updated_time": 222
}]}}]}}]
""" > "$TESTTMP/mockduit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit" == r"""
abort: unable to determine previous changeset hash
[255]"""
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == r"""
abort: unable to determine previous changeset hash
[255]"""
sh % "cat" << r"""
[{"data": {"query": [{"results": {"nodes": [{
"number": 1,
"diff_status_name": "Needs Review",
"is_landing": false,
"land_job_status": "NO_LAND_RUNNING",
"needs_final_review_status": "NOT_NEEDED",
"created_time": 123,
"updated_time": 222
}]}}]}}]
""" > "$TESTTMP/mockduit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit" == r"""
abort: unable to determine previous changeset hash
[255]"""
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == r"""
abort: unable to determine previous changeset hash
[255]"""
# This is the case when the diff is up to date with the current commit;
# there is no diff since what was landed.
sh % "cat" << r"""
[{"data": {"query": [{"results": {"nodes": [{
"number": 1,
"diff_status_name": "Needs Review",
"latest_active_diff": {
"local_commit_info": {
"nodes": [
{"property_value": "{\"lolwut\": {\"time\": 0, \"commit\": \"2e6531b7dada2a3e5638e136de05f51e94a427f4\"}}"}
]
}
},
"differential_diffs": {"count": 1},
"is_landing": false,
"land_job_status": "NO_LAND_RUNNING",
"needs_final_review_status": "NOT_NEEDED",
"created_time": 123,
"updated_time": 222
}]}}]}}]
""" > "$TESTTMP/mockduit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == "2e6531b7dada2a3e5638e136de05f51e94a427f4 Differential Revision: https://phabricator.fb.com/D1"
# This is the case when the diff points at our parent commit, we expect to
# see the bleet text show up. There's a fake hash that I've injected into
# the commit list returned from our mocked phabricator; it is present to
# assert that we order the commits consistently based on the time field.
sh % "cat" << r"""
[{"data": {"query": [{"results": {"nodes": [{
"number": 1,
"diff_status_name": "Needs Review",
"latest_active_diff": {
"local_commit_info": {
"nodes": [
{"property_value": "{\"lolwut\": {\"time\": 0, \"commit\": \"88dd5a13bf28b99853a24bddfc93d4c44e07c6bd\"}}"}
]
}
},
"differential_diffs": {"count": 1},
"is_landing": false,
"land_job_status": "NO_LAND_RUNNING",
"needs_final_review_status": "NOT_NEEDED",
"created_time": 123,
"updated_time": 222
}]}}]}}]
""" > "$TESTTMP/mockduit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit --nodates" == r"""
diff -r 88dd5a13bf28 -r 2e6531b7dada foo
--- a/foo
+++ b/foo
@@ -0,0 +1,1 @@
+bleet"""
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == "88dd5a13bf28b99853a24bddfc93d4c44e07c6bd No rev"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit-2o" == r"""
Phabricator rev: 88dd5a13bf28b99853a24bddfc93d4c44e07c6bd
Local rev: 2e6531b7dada2a3e5638e136de05f51e94a427f4 (.)
Changed: foo
| ...
| +bleet"""
# Make a new commit on top, and then use -r to look at the previous commit
sh % "echo other" > "foo"
sh % "hg commit -m 'Other commmit'"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit --nodates -r 2e6531b" == r"""
diff -r 88dd5a13bf28 -r 2e6531b7dada foo
--- a/foo
+++ b/foo
@@ -0,0 +1,1 @@
+bleet"""
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(2e6531b)' -T '{node} {desc}\\n'" == "88dd5a13bf28b99853a24bddfc93d4c44e07c6bd No rev"
|
Architektor/PySnip
|
venv/lib/python2.7/site-packages/twisted/protocols/wire.py
|
Python
|
gpl-3.0
| 2,659
| 0.003009
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""Implement standard (and unused) TCP protocols.
These protocols are either provided by inetd, or are not provided at all.
"""
from __future__ import absolute_import, division
import time
import struct
from zope.interface import implementer
from twisted.internet import protocol, interfaces
from twisted.python.compat import _PY3
class Echo(protocol.Protocol):
"""As soon as any data is received, write it back (RFC 862)"""
def dataReceived(self, data):
self.transport.write(data)
class Discard(protocol.Protocol):
"""Discard any received data (RFC 863)"""
def dataReceived(self, data):
# I'm ignoring you, nyah-nyah
|
pass
@implementer(interfaces.IProducer)
class Charge
|
n(protocol.Protocol):
"""Generate repeating noise (RFC 864)"""
noise = r'@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~ !"#$%&?'
def connectionMade(self):
self.transport.registerProducer(self, 0)
def resumeProducing(self):
self.transport.write(self.noise)
def pauseProducing(self):
pass
def stopProducing(self):
pass
class QOTD(protocol.Protocol):
"""Return a quote of the day (RFC 865)"""
def connectionMade(self):
self.transport.write(self.getQuote())
self.transport.loseConnection()
def getQuote(self):
"""Return a quote. May be overrriden in subclasses."""
return "An apple a day keeps the doctor away.\r\n"
class Who(protocol.Protocol):
"""Return list of active users (RFC 866)"""
def connectionMade(self):
self.transport.write(self.getUsers())
self.transport.loseConnection()
def getUsers(self):
"""Return active users. Override in subclasses."""
return "root\r\n"
class Daytime(protocol.Protocol):
"""Send back the daytime in ASCII form (RFC 867)"""
def connectionMade(self):
self.transport.write(time.asctime(time.gmtime(time.time())) + '\r\n')
self.transport.loseConnection()
class Time(protocol.Protocol):
"""Send back the time in machine readable form (RFC 868)"""
def connectionMade(self):
# is this correct only for 32-bit machines?
result = struct.pack("!i", int(time.time()))
self.transport.write(result)
self.transport.loseConnection()
__all__ = ["Echo", "Discard", "Chargen", "QOTD", "Who", "Daytime", "Time"]
if _PY3:
__all3__ = ["Echo"]
for name in __all__[:]:
if name not in __all3__:
__all__.remove(name)
del globals()[name]
del name, __all3__
|
kamyu104/LeetCode
|
Python/kth-largest-element-in-an-array.py
|
Python
|
mit
| 1,142
| 0.002627
|
# Time: O(n) ~ O(n^2)
# Space: O(1)
from random import randint
class Solution:
# @param {integer[]} nums
# @param {integer} k
# @return {integer}
def findKthLargest(self, nums, k):
left, right = 0, len(nums) - 1
while left <= right:
pivot_idx = randint(left, right)
new_pivot_idx = self.PartitionAroundPivot(left, right, pivot_idx, nums)
if new_pivot_idx == k - 1:
return nums[new_pivot_idx]
elif new_pivot_idx > k - 1:
right = new_pivot_idx -
|
1
else: # new_pivot_idx < k - 1.
left = new_pivot_idx + 1
def PartitionAroundPivot(self, left, right, pivot_idx, nums):
pivot_value = nums[pivot_idx]
new_pivot_idx = left
nums[pivot_idx], nums[right] = nums[right], nums[pivot_idx]
for i in xrange(left, right):
if n
|
ums[i] > pivot_value:
nums[i], nums[new_pivot_idx] = nums[new_pivot_idx], nums[i]
new_pivot_idx += 1
nums[right], nums[new_pivot_idx] = nums[new_pivot_idx], nums[right]
return new_pivot_idx
|
robocomp/robocomp-ursus-rockin
|
components/comprehension/comprehension.py
|
Python
|
gpl-3.0
| 4,066
| 0.029759
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, traceback, Ice, threading, time, os
import IceStorm
# Ctrl+c handling
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
# Qt interface
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtSvg import *
# Check that RoboComp has been correctly detected
ROBOCOMP = ''
try:
ROBOCOMP = os.environ['ROBOCOMP']
except:
pass
if len(ROBOCOMP)<1:
print 'ROBOCOMP environment variable not set! Exiting.'
sys.exit()
Ice.loadSlice("-I"+ROBOCOMP+"/interfaces/ --all "+ROBOCOMP+"/interfaces/ASRPublish.ice")
import RoboCompASRPublish
Ice.loadSlice("-I"+ROBOCOMP+"/interfaces/ --all "+ROBOCOMP+"/interfaces/ASRCommand.ice")
import RoboCompASRCommand
Ice.loadSlice("-I"+ROBOCOMP+"/interfaces/ --all "+ROBOCOMP+"/interfaces/ASRComprehension.ice")
import RoboCompASRComprehension
class MainClass(object):
def __init__(self, commandTopic):
print 'Esta clase podria ser la clase principal del programa'
self.commandTopic = commandTopic
def newText(self, text, current=None):
print 'Nos ha llegado', text
command = RoboCompASRCommand.Command()
partes = text.split()
if len(partes) > 0:
command.action = partes[0]
if len(partes) > 1:
command.complements = partes[1:]
print 'Action', command.action, '(', command.complements,')'
else:
print 'Action', command.action
self.commandTopic.newCommand(command)
else:
print 'Comando vacio?'
def mode(self, text):
print 'Nos llega por la interfaz ASRComprehension', text
class ASRPublishTopicI (RoboCompASRPublish.ASRPublish):
def __init__(self, _handler):
self.handler = _handler
def newText(self, text, current=None):
self.handler.newText(text)
class ASRComprehensionI (RoboCompASRComprehension.ASRComprehension):
def __init__(self, _handler):
self.handler = _handler
def mode(self, text, current=None):
self.handler.mode(text)
class Server (Ice.Application):
def run (self, argv):
status = 0
try:
# Proxy to publish ASRCommand
proxy = self.communicator().getProperties().getProperty("IceStormProxy")
obj = self.communicator().stringToProxy(proxy)
topicManager = IceStorm.TopicManagerPrx.checkedCast(obj)
try:
topic = False
topic = topicManager.retrieve("ASRCommand")
except:
pass
while not topic:
try:
topic = topicManager.retrieve("ASRCommand")
except IceStorm.NoSuchTopic:
try:
topic = topicManager.create("ASRCommand")
except:
print 'Another client created the ASRCommand topic... ok'
pub = topic.getPublisher().ice_oneway()
commandTopic = RoboCompASRCommand.ASRCommandPrx.uncheckedCast(pub)
mainObject = MainClass(commandTopic)
# Subscribe to ASRPublishTopic
proxy = self.communicator().getProperties().getProperty( "IceStormProxy")
topicManager = IceStorm.TopicManagerPrx.checkedCast(self.communicator().stringToProxy(proxy))
adapterT = self.communicator().createObjectAdapter("ASRPublishTopic")
asrTopic = ASRPublishTopicI(mainObject)
proxyT = adapterT.addWithUUID(asrTopic).ice_oneway()
ASRPublishTopic_subscription = False
while not ASRPublishTopic_subscription:
try:
topic = topicManager.retrieve("ASRPublishTopic")
qos = {}
topic.subs
|
cribeAndGetPublisher(qos, proxyT)
adapterT.activate()
ASRPublishTopic_subscription = True
except IceStorm.NoSuchTopic:
print "Error! No topic found! Sleeping for a while..."
time.sleep(1)
print 'ASRPublishTopic subscription ok'
# Implement ASRComprehension
asrcomprehensionI = ASRComprehensionI(mainObject)
adapterASRComprehension = self.communicator().createObjectAdapter('ASRCom
|
prehension')
adapterASRComprehension.add(asrcomprehensionI, self.communicator().stringToIdentity('asrcomprehension'))
adapterASRComprehension.activate()
self.communicator().waitForShutdown()
except:
traceback.print_exc()
status = 1
if self.communicator():
try:
self.communicator().destroy()
except:
traceback.print_exc()
status = 1
Server( ).main(sys.argv)
|
anand1712/cloudpulse
|
cloudpulse/openstack/api/cinder_api.py
|
Python
|
apache-2.0
| 1,489
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient.client import Client
class CinderHealth(object):
def __init__(self, creds):
self.cinderclient = Client(**creds)
def cinder_list(self):
try:
cinder_list = self.cinderclient.volumes.list()
except Exception as e:
return (404, e.message, [])
return (200, "success", cinder_list)
def cinder_volume_create(self, volume_name, volume_size):
try:
cinder_ret = sel
|
f.cinderclient.volumes.create(volume_size,
name=volume_name)
except Exception as e:
return (404, e.message, [])
return (200, "success", cinder_ret)
def cinder_volume_delete(self, volume_id):
try:
cinder_re
|
t = self.cinderclient.volumes.delete(volume_id)
except Exception as e:
return (404, e.message, [])
return (200, "success", cinder_ret)
|
mateuszmidor/GumtreeOnMap
|
src/offerfetcher.py
|
Python
|
gpl-2.0
| 848
| 0.008255
|
'''
Created on 30-07-2014
@author: mateusz
'''
from threading import Thread
import gumtreeofferparser as Parser
from injectdependency import Inject, InjectDependency
@InjectDependency('urlfetcher')
class OfferFetcher(Thread):
urlfetcher = Inject
def __init__(self, inQueue, outQueue):
Thread.__init__(self, name="OfferFetcher")
self.inQueue = inQueue
self.outQueue = outQueue
def run(self):
while (True): # this is ok for daemon thread
ur
|
l = self.inQueue.get()
offer = self.getOffer(url)
self.outQueue.put(offer)
self.inQueue.task_done()
def get
|
Offer(self, url):
html = self.urlfetcher.fetchDocument(url)
offer = Parser.extractOffer(html)
offer["url"] = url
return offer
|
camerongray1515/Prophasis
|
application/prophasis_common/prophasis_common/alert_modules/syslog.py
|
Python
|
bsd-2-clause
| 179
| 0.005587
|
from sys
|
log import syslog
module_name = "Syslog"
config = {
"prefix": "Default Prefix"
}
def ha
|
ndle_alert(message):
syslog("{} - {}".format(config["prefix"], message))
|
Haellsigh/travaux-pratiques
|
TP/TP8-Exo2.py
|
Python
|
mit
| 532
| 0.011278
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 20 20:42:20 2016
@author: haell
"""
def dicho(f, a, b, epsilon):
assert f(a) * f(b) <= 0 and
|
epsilon > 0
g, d = a, b
fg, fd = f(g), f(d)
n = 0
while d - g > 2 * epsilon:
n += 1
m = (g + d) / 2.
fm = f(m)
if fg
|
* fm <= 0:
d, fd = m, fm
else:
g, fg = m, fm
print(d, g, fd, fg)
return (g + d) / 2., n
print(dicho(lambda x : x*x*10**(-8) - 4*x / 5 + 10**(-8), 7*10**7, 9*10**7, 10**-8))
|
NonVolatileComputing/arrow
|
python/pyarrow/tests/test_types.py
|
Python
|
apache-2.0
| 4,240
| 0
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pyarrow as pa
import pyarrow.types as types
def test_is_boolean():
assert types.is_boolean(pa.bool_())
assert not types.is_boolean(pa.int8())
def test_is_integer():
signed_ints = [pa.int8(), pa.int16(), pa.int32(), pa.int64()]
unsigned_ints = [pa.uint8(),
|
pa.uint16(), pa.uint32(), pa.uint64()]
for t in signed_ints + unsigned_ints:
assert types.i
|
s_integer(t)
for t in signed_ints:
assert types.is_signed_integer(t)
assert not types.is_unsigned_integer(t)
for t in unsigned_ints:
assert types.is_unsigned_integer(t)
assert not types.is_signed_integer(t)
assert not types.is_integer(pa.float32())
assert not types.is_signed_integer(pa.float32())
def test_is_floating():
for t in [pa.float16(), pa.float32(), pa.float64()]:
assert types.is_floating(t)
assert not types.is_floating(pa.int32())
def test_is_null():
assert types.is_null(pa.null())
assert not types.is_null(pa.list_(pa.int32()))
def test_is_decimal():
assert types.is_decimal(pa.decimal(19, 4))
assert not types.is_decimal(pa.int32())
def test_is_list():
assert types.is_list(pa.list_(pa.int32()))
assert not types.is_list(pa.int32())
def test_is_dictionary():
assert types.is_dictionary(
pa.dictionary(pa.int32(),
pa.array(['a', 'b', 'c'])))
assert not types.is_dictionary(pa.int32())
def test_is_nested_or_struct():
struct_ex = pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.int8()),
pa.field('c', pa.string())])
assert types.is_struct(struct_ex)
assert not types.is_struct(pa.list_(pa.int32()))
assert types.is_nested(struct_ex)
assert types.is_nested(pa.list_(pa.int32()))
assert not types.is_nested(pa.int32())
# TODO(wesm): Union types not yet implemented in pyarrow
# def test_is_union():
# assert types.is_union(pa.union([pa.field('a', pa.int32()),
# pa.field('b', pa.int8()),
# pa.field('c', pa.string())]))
# assert not types.is_union(pa.list_(pa.int32()))
# TODO(wesm): is_map, once implemented
def test_is_binary_string():
assert types.is_binary(pa.binary())
assert not types.is_binary(pa.string())
assert types.is_string(pa.string())
assert types.is_unicode(pa.string())
assert not types.is_string(pa.binary())
assert types.is_fixed_size_binary(pa.binary(5))
assert not types.is_fixed_size_binary(pa.binary())
def test_is_temporal_date_time_timestamp():
date_types = [pa.date32(), pa.date64()]
time_types = [pa.time32('s'), pa.time64('ns')]
timestamp_types = [pa.timestamp('ms')]
for case in date_types + time_types + timestamp_types:
assert types.is_temporal(case)
for case in date_types:
assert types.is_date(case)
assert not types.is_time(case)
assert not types.is_timestamp(case)
for case in time_types:
assert types.is_time(case)
assert not types.is_date(case)
assert not types.is_timestamp(case)
for case in timestamp_types:
assert types.is_timestamp(case)
assert not types.is_date(case)
assert not types.is_time(case)
assert not types.is_temporal(pa.int32())
def test_timestamp_type():
# See ARROW-1683
assert isinstance(pa.timestamp('ns'), pa.TimestampType)
|
harsimrans/word-treasure
|
word_treasure/test_wordt.py
|
Python
|
gpl-3.0
| 1,648
| 0.005461
|
import unittest
from word_treasure import *
class WordTreasureTestCase(unittest.TestCase):
"""Test for functions in word treasure.
The major aim is to check if there is any
unexpected crash.
Doesnot check the validity of the response"""
def test_definition_call(self):
word1 = "hello"
word2 = "somenonexistantword"
self.assertEqual(display_definitions(word1), True)
self.assertEqual(display_definitions(word2), None)
def test_random_words(self):
limit = 10
self.assertEqual(display_random_words(limit), True)
def test_display_examples(self):
limit = 10
word1 = "hello"
word2 = "somenonexistantword"
self.assertEqual(display_examples(word1, limit), True)
self.assertEqual(display_examples(word2, limit), None)
def test_display_top_examples(self):
word1 = "hello"
word2 = "somenonexistantword"
self.assertEqual(display_top_examples(word1), True)
self.assertEqual(display_top_examples(word2), None)
def test_display_related_words(self):
word1 = "hello"
word2 = "somenonexistantword"
self.assertEqual(display_related_words(word1), True)
self.assertEqual(display_related_words(word2), None)
|
def test_display_compact(self):
word1 = "hello"
word2 = "somenonexistantword"
|
self.assertEqual(display_compact(word1), True)
self.assertEqual(display_compact(word2), None)
def test_help_display(self):
self.assertEqual(display_help(), True)
if __name__=='__main__':
unittest.main()
|
wolverineav/neutron
|
neutron/db/migration/alembic_migrations/brocade_init_ops.py
|
Python
|
apache-2.0
| 2,634
| 0
|
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Initial operations for the Mellanox plugin
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'brocadenetworks',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('vlan', sa.String(length=10), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'brocadeports',
sa.Column('port_id', sa.String(length=36), nullable=False,
server_default=''),
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('admin_state_up', sa.Boolean(), nullable=False),
sa.Column('physical_interface', sa.String(length=36), nullable=True),
sa.Column('vlan_id', sa.String(length=36), nullable=True),
sa.Column('tenant_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['brocadenetworks.id'], ),
sa.PrimaryKeyConstraint('port_id'))
op.create_table(
'ml2_brocadenetworks',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('vlan', sa.String(length=10), nullable=True),
sa.Column('segment_id', sa.String(length=36), nullable=True),
sa.Column('network_type', sa.String(length=10), nullable=True),
sa.Column('tenant_id', sa.String(length=255), nullable=True,
index=True),
sa.Pr
|
imaryKeyConstraint('id'))
op.create_table(
'ml2_brocadeports',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('admin_state_up', sa.Boolean(), nullable=False),
sa.Column('physical_interface', sa.String(length=36), nullable=True),
sa.Column('vlan_id', sa.String(length=36), nullable=True),
sa.Colu
|
mn('tenant_id', sa.String(length=255), nullable=True,
index=True),
sa.PrimaryKeyConstraint('id'),
sa.ForeignKeyConstraint(['network_id'], ['ml2_brocadenetworks.id']))
|
dlutxx/memo
|
python/daemon.py
|
Python
|
mit
| 1,497
| 0
|
# -*- encoding: utf8 -*-
# A daemon to keep SSH forwarding connected
from __future__ import print_function, absolute_import
import os
import sys
import time
import socket
import logging
class Daemon(object):
def __init__(self):
self.heartbeat = 50
def run(self):
logging.basicConfig(filename='daemon.log')
logging.error('daemon started')
self.daemonize()
while True:
if not self.check_connection():
self.reconnect()
logging.warn('reconnecting')
time.sleep(self.heartbeat)
def check_connection(self):
c = socket.socket()
try:
c.connect(('localhost', 3366))
c.close()
return True
except socket.error:
return False
def daemonize(self):
pid = os.fork()
|
if pid:
os.waitpid(pid, os.WNOHANG)
sys.exit(0)
return
def reconnect(self):
pid = os.fork()
if pid == 0: # child
err = os.execlp('
|
/usr/bin/ssh', 'ssh', '-i',
'/home/xu/.ssh/id_rsa', '-L',
'3366:127.0.0.1:3306', '-p', '42022', '[email protected]')
if err:
logging.error("error to execlp")
sys.exit(1)
elif pid > 0:
os.waitpid(pid, 0)
else:
logging.error('error to fork')
sys.exit(2)
if __name__ == '__main__':
Daemon().run()
|
patrick-brian-mooney/python-personal-library
|
run_subfolder_scripts.py
|
Python
|
gpl-3.0
| 1,560
| 0.005128
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Search through the subfolders of the current folder. For each subfolder found,
chdir() to it, then run all executable scri
|
pts ending in .SH in that folder.
Does not exhaustively search for subfolders of subfolders, o
|
r subfolders of
subfolders of subfolders, etc.; it only does exactly what was described in that
first sentence, without recursion.
Note that this calls scripts in an insecure way:
subprocess.call(script_name, shell=True)
so it should only be called on scripts that are trusted completely.
This script is copyright 2017-20 by Patrick Mooney. It is licensed under the GNU
GPL, either version 3 or (at your option) any later version. See the file
LICENSE.md for details.
"""
import glob, os, subprocess
from pprint import pprint
the_dirs = [ d for d in glob.glob("*") if os.path.isdir(d) ]
for which_dir in the_dirs:
olddir = os.getcwd()
try:
os.chdir(which_dir)
print("changed directory to %s" % os.getcwd())
exec_scripts = [ which_script for which_script in list(set(glob.glob('*SH') + glob.glob('*sh'))) if os.access(which_script, os.X_OK) ]
pprint("exec_scripts are: %s" % exec_scripts)
for which_script in exec_scripts:
print("About to call script: %s" % which_script)
subprocess.call('./' + which_script, shell=True)
subprocess.call('chmod a-x %s' % which_script)
except BaseException as e:
print('Something went wrong; the system said %s' % e)
finally:
os.chdir(olddir)
|
eclee25/flu-SDI-exploratory-age
|
scripts/create_fluseverity_figs/F2_incid_time.py
|
Python
|
mit
| 2,579
| 0.01551
|
#!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 4/26/14
###Function: Incidence per 100,000 vs. week number for flu weeks (wks 40-20). Incidence is per 100,000 for the US population in the second calendar year of the flu season.
###Import data: SQL_export/OR_allweeks_outpatient.csv, SQL_export/totalpop.csv
###Command Line: python F2_incid_time.py
##############################################
### notes ###
# Incidence per 100,000 is normalized by total population by second calendar year of the flu season
### packages/modules ###
import csv
import matplotlib.pyplot as plt
## local modules ##
import functions as fxn
### data structures ###
### functions ###
### data files ###
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
### called/local plotting parameters ###
ps = fxn.pseasons
fw = fxn.gp_fluweeks
sl = fxn.gp_seasonlabels
colvec = fxn.gp_colors
wklab = fxn.gp_weeklabels
fs = 24
fssml = 16
### program ###
# dict_wk[week] = seasonnum, dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
d_wk, d_incid, d_OR = fxn.week_OR_processing(incid, pop)
d_zOR = fxn.week_zOR_processing(d_wk, d_OR)
# d_incid53ls[seasonnum] = [ILI wk 40 per 100000, ILI wk 41 per 100000,...], d_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], d_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
d_incid53ls, d_OR53ls, d_zOR53ls = fxn.week_plotting_dicts(d_wk, d_incid, d_OR, d_zOR)
# plot values
for s in ps:
plt.plot(xrange(fw), d_incid53ls[s][:fw], marker = 'o', color = colvec[s-2], label = sl[s-2], linewidth = 2)
plt.xlim([0, fw-1])
plt.xticks(range(fw)[::5], wklab[:fw:5])
plt.ylim([0, 60])
plt.xlabel('Week Number', fontsize=fs)
plt.ylabel('Incidence per 100,000', fontsize=fs)
plt.legend(loc='upper left')
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/F2/inci
|
d_time.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show()
# 7/28/14: does 'week' variable in SDI refer to week before or after referenced date? Thanksgiving week does not correpond with correct week nu
|
mber for dip in incidence plot
print [d_incid[wk] for wk in sorted(d_wk) if d_wk[wk]==2]
print [wk for wk in sorted(d_wk) if d_wk[wk]==2]
|
shaddyx/simpleDecorators
|
simpledecorators/Synchronized.py
|
Python
|
mit
| 511
| 0.005871
|
from functools import wraps
from threading import RLock
import traceback
def Synchronized(lock=None):
"""
:para
|
m lock: if None - global lock will used, unique for each function
:return:
"""
if not lock:
lock=RLock()
d
|
ef decorator(fn):
@wraps(fn)
def wrapped(*args, **kwargs):
lock.acquire()
try:
return fn(*args, **kwargs)
finally:
lock.release()
return wrapped
return decorator
|
Code4SA/municipal-data
|
municipal_finance/templatetags/jsonify.py
|
Python
|
mit
| 140
| 0
|
import json
from django import template
regist
|
er = template.Library()
@register.filter
def jsonify(value):
return json.dumps(value)
| |
zaibacu/DamnedQuest
|
sprite_creator.py
|
Python
|
mit
| 1,532
| 0.00718
|
from PIL import Image
from math import ceil, floor
def load_img(src):
return Image.open(src)
def create_master(width, height):
return Image.new("RGBA", (width, height))
def closest_power_two(num):
result = 2
while result < num:
result = result * 2
return result
def create_matrix(cols, rows, images):
x, y = images[0].size # We assume that all images are same size
width = closest_power_two(x)
height = closest_power_two(y)
print("Width: {0} H
|
eight: {1}".format(width, height))
offset_x = int((width - x) / 2)
offset_y = int((height - y) / 2)
master = create_master(width * cols, height * rows)
for i
|
ndex, img in enumerate(images):
row = floor(index / cols)
col = index % cols
master.paste(img, (width * col + offset_x, height * row - offset_y))
return master
def hero_sprites(name, action, frames):
from functools import reduce
def generator(name, action, position, frames):
if frames > 1:
return [load_img("img/png/1x/{0}/{1}{2} ({3}).png".format(name, action, position, frame)) for frame in range(1, frames + 1)]
else:
return [load_img("img/png/1x/{0}/{1}{2}.png".format(name, action, position))]
imgs = list(reduce(lambda a, b: a + b, [generator(name, action, pos, frames) for pos in ["Back", "Front", "Left", "Right"]], []))
return imgs
if __name__ == "__main__":
matrix = create_matrix(4, 4, hero_sprites("hero1", "Dead", 3))
matrix.save("img/hero1_dead.png", "PNG")
|
apache/incubator-airflow
|
airflow/operators/redshift_to_s3_operator.py
|
Python
|
apache-2.0
| 1,680
| 0.002381
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Un
|
less required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module is
|
deprecated.
Please use :mod:`airflow.providers.amazon.aws.transfers.redshift_to_s3`.
"""
import warnings
from airflow.providers.amazon.aws.transfers.redshift_to_s3 import RedshiftToS3Operator
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.transfers.redshift_to_s3`.",
DeprecationWarning,
stacklevel=2,
)
class RedshiftToS3Transfer(RedshiftToS3Operator):
"""
This class is deprecated.
Please use: :class:`airflow.providers.amazon.aws.transfers.redshift_to_s3.RedshiftToS3Operator`.
"""
def __init__(self, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use
`airflow.providers.amazon.aws.transfers.redshift_to_s3.RedshiftToS3Operator`.""",
DeprecationWarning,
stacklevel=3,
)
super().__init__(**kwargs)
|
BetterWorks/django-bleachfields
|
bleachfields/bleachchar.py
|
Python
|
mit
| 409
| 0
|
from django.db import models
from .bleachfield import BleachField
class BleachCharField(BleachField, models.CharField):
def pre_save(self, model_instance, add):
new_value = getattr(model_instance, self.attname)
clean_value = self.clean_text(new_value)
setattr(model_instance, self.attname, clean_value)
|
return super(BleachCharField, self).pre_save(model_instance, add)
|
|
tylertian/Openstack
|
openstack F/glance/glance/api/v2/image_tags.py
|
Python
|
apache-2.0
| 1,791
| 0
|
# Copyright 2012 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from glance.common import exception
from glance.common import utils
from glance.common import wsgi
import glance.db
class Controller(object):
def __init__(self, db=None):
self.db_api = db or glance.db.get_api()
self.db_api.configure_db()
@utils.mutating
def update(self, req, image_id, tag_value):
context = req.context
if tag_value not in self.db_api.image_tag_get_all(context, image_id):
self.db_api.image_tag_create(context, image_id, tag_value)
@utils.mutating
def delete(self, req, image_id, tag_value):
try:
self.
|
db_api.image_tag_delete(req.context, image_id, tag_value)
except exception.NotFound:
raise webob.exc.HTTPN
|
otFound()
class ResponseSerializer(wsgi.JSONResponseSerializer):
def update(self, response, result):
response.status_int = 204
def delete(self, response, result):
response.status_int = 204
def create_resource():
"""Images resource factory method"""
serializer = ResponseSerializer()
controller = Controller()
return wsgi.Resource(controller, serializer=serializer)
|
yosshy/osclient2
|
osclient2/nova/v2/server.py
|
Python
|
apache-2.0
| 21,602
| 0
|
# Copyright 2014-2017 by Akira Yoshiyama <[email protected]>.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Resource class and its manager for servers in Compute API v2
"""
import time
from osclient2 import base
from osclient2 import exception
from osclient2 import mapper
from osclient2 import utils
from . import volume_attachment
from . import interface_attachment
from osclient2.neutron.v2.network import Resource as Network
from osclient2.neutron.v2.port import Resource as Port
from osclient2.cinder.v2.volume import Resource as Volume
from osclient2.cinder.v2.snapshot import Resource as Snapshot
from osclient2.glance.v2.image import Resource as Image
ATTRIBUTE_MAPPING = [
('id', 'id', mapper.Noop),
('name', 'name', mapper.Noop),
('access_ipv4', 'accessIPv4', mapper.Noop),
('access_ipv6', 'accessIPv6', mapper.Noop),
('addresses', 'addresses', mapper.Noop),
('host', 'OS-EXT-SRV-ATTR:host', mapper.Noop),
('networks', 'networks', mapper.Noop),
('disks', 'block_device_mapping_v2', mapper.Noop),
('user_data', 'user_data', mapper.Base64),
('progress', 'progress', mapper.Noop),
('status', 'status', mapper.Noop),
('task_state', 'OS-EXT-STS:task_state', mapper.Noop),
('created_at', 'created', mapper.DateTime),
('updated_at', 'updated', mapper.DateTime),
('flavor', 'flavorRef', mapper.Resource('nova.flavor')),
('image', 'imageRef', mapper.Resource('image')),
('project', 'tenant_id', mapper.Resource('project')),
('user', 'user_id', mapper.Resource('user')),
('key_pair', 'key_name', mapper.Resource('nova.key_pair')),
('error_reason', 'fault', mapper.Noop),
]
class Resource(base.Resource):
"""Resource class for servers in Compute API v2"""
_sub_manager_list = {
'volume': volume_attachment.Manager,
'interface': interface_attachment.Manager,
}
def wait_for_finished(self, count=10, interval=10):
"""
Wait for task finished
@keyword count: Maximum polling time
@type count: int
@keyword interval: Polling interval in seconds
@type interval: int
@rtype: None
"""
for i in range(count):
time.sleep(interval)
try:
self.reload()
except exception.NotFound:
return
if not self.task_state:
return
def start(self):
"""
Start a server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("os-start"))
def stop(self):
"""
Stop a server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("os-stop"))
def reboot(self, force=False):
"""
Reboot a server
@keyword force: Whether reboot type is hard or soft. force=True means
hard reboot.
@type type: bool
@rtype: None
"""
if force:
type = "HARD"
else:
type = "SOFT"
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("reboot", type=type))
def pause(self):
"""
Pause a server (save to RAM if server is a VM)
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("pause"))
def unpause(self):
"""
Unpaus
|
e a server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("unpause"))
def suspend(self):
"""
Suspend a server (save to disk if server is a VM)
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("suspend"))
def resume(self):
|
"""
Resume a server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("resume"))
def reset_network(self):
"""
Reset networking of a server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("resetNetwork"))
def inject_network_info(self):
"""
Inject network information to a server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("injectNetworkInfo"))
def lock(self):
"""
Lock a server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("lock"))
def unlock(self):
"""
Unlock a server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("unlock"))
def force_delete(self):
"""
Force to delete a server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("forceDelete"))
def restore(self):
"""
Restore a defered-deleted server if available
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("restore"))
def rescue(self, password=None):
"""
Create rescue environment for the server
@keyword password: password of the rescue OS
@type password: str
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("rescue", dminPass=password))
def unrescue(self):
"""
Terminate the rescue environment
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("unrescue"))
def shelve(self):
"""
Shelve a running server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("shelve"))
def unshelve(self):
"""
Restore a shelved server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("unshelve"))
def delete_shelve(self):
"""
Delete a shelved server
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("shelveOffload"))
def create_image(self, name=None, metadata=None):
"""
Create server image
@keyword name: Image name
@type name: str
@keyword metadata: Metadata
@type metadata: dict
@rtype: None
"""
self._http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body(
"createImage",
name=name,
metadata=metadata))
def backup(self, name=None, backup_type=None, rotation=None):
"""
Create server backup
|
skitoo/chac
|
chac/sensors/models.py
|
Python
|
gpl-3.0
| 251
| 0
|
from dja
|
ngo.db import models
from jsonfield import JSONField
class Sensor(models.Model):
name = models.CharField(max_length=25)
activated = models.BooleanField(
|
default=False)
type = models.CharField(max_length=10)
meta = JSONField()
|
julka2010/games
|
games/tichu/cards.py
|
Python
|
mpl-2.0
| 2,702
| 0.002591
|
import functools
from . import (
constants,
utils,
)
class Card():
def __init__(self, kind=None, strength=None, value=None, verbose=None, **kwargs):
if kind is None:
raise(TypeError("Missing required 'kind' argument."))
self.kind = kind
self.strength = strength
self.value = value
self.verbose = verbose if verbose is not None else kind
super().__init__(**kwargs)
def __valid_comparision(self, arg):
return hasattr(arg, "kind") and hasattr(arg, "strength")
_valid_comparision = __valid_comparision
def __lt__(self, value):
if not self.__valid_comparision(value):
return NotImplemented
if self.strength is not None:
if value.strength is not None:
return self.strength < value.strength
else:
return False
elif value.strength is not None:
return True
return self.kind < value.kind
def __str__(self):
return self.kind
class SimpleCard(Card):
def __init__(self, colour=None, kind=None, strength=None, **kwargs):
if colour is None:
raise(TypeError("Missing required 'colour' argument."))
self.colour = colour
if kind is None:
if strength is not None:
kind = str(strength)
super().__init__(kind=kind, strength=strength, **kwargs)
def __valid_comparisio
|
n(self, arg):
if super()._valid_comparision(arg):
if hasattr(arg, "
|
colour") and (arg.colour is not None):
if arg.strength is not None:
return True
return False
_valid_comparision = __valid_comparision
def __lt__(self, value):
if not self.__valid_comparision(value):
return super().__lt__(value)
if self.strength < value.strength:
return True
if self.strength == value.strength:
return self.colour < value.colour
return False
def __eq__(self, value):
if not self._valid_comparision(value):
return False
if (self.strength == value.strength) and (self.colour == value.colour):
return True
def __str__(self):
return self.kind + self.colour[0]
class MahJongg(Card):
def __init__(self):
super().__init__(kind='1', strength=1)
class Dragon(Card):
def __init__(self):
super().__init__(kind='R', value=25, verbose="Dragon")
class Pheonix(Card):
def __init__(self):
super().__init__(kind='P', value=-25, verbose="Pheonix")
class Dog(Card):
def __init__(self):
super().__init__(kind="D", verbose="Dog")
|
google-research/falken
|
service/learner/brains/specs.py
|
Python
|
apache-2.0
| 58,084
| 0.005113
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper classes for Falken observation and action specs."""
# pylint: disable=g-bad-import-order
import collections
import common.generate_protos # pylint: disable=unused-import
import action_pb2
import brain_pb2
import observation_pb2
import primitives_pb2
# Optional fields of ObservationData and ObservationSpec protobufs.
OBSERVATION_OPTIONAL_ENTITIES = ['player', 'camera']
# Optional fields of EntityType and Entity protobufs.
ENTITY_OPTIONAL_FIELDS = ['position', 'rotation']
class InvalidSpecError(Exception):
"""Raised when the spec is invalid."""
class TypingError(Exception):
"""Raised when data doesn't match the spec."""
class BrainSpec:
"""A wrapper class for an observation and action spec proto."""
def __init__(self, brain_spec_pb, spec_base_class=None,
action_spec_class=None, observation_spec_class=None):
"""Parse and validate the provided spec proto.
Args:
brain_spec_pb: BrainSpec protobuf to parse and validate.
spec_base_class: SpecBase class to use for brain validation.
action_spec_class: SpecBase class to use for action validation and
conversion.
observation_spec_class: SpecBase class to use for action validation and
conversion.
"""
assert isinstance(brain_spec_pb, brain_pb2.BrainSpec)
spec_base_class = spec_base_class if spec_base_class else SpecBase
action_spec_class = action_spec_class if action_spec_class else ActionSpec
observation_spec_class = (observation_spec_class
if observation_spec_class else ObservationSpec)
_ = spec_base_class(brain_spec_pb)
self.action_spec = action_spec_class(brain_spec_pb.action_spec)
self.observation_spec = observation_spec_class(
brain_spec_pb.observation_spec)
self.validate_joystick_references()
def validate_joystick_references(self):
"""Validate joystick actions reference existing entities.
Raises:
InvalidSpecError: If invalid references are found or referenced entities
do not have positions or rotations.
"""
references_by_joystick_name = collections.defaultdict(set)
for node in self.action_spec.proto_node.children:
action = node.proto
if isinstance(action, action_pb2.JoystickType):
if action.control_frame:
references_by_joystick_name[node.name].add(action.control_frame)
if action.controlled_entity:
references_by_joystick_name[node.name].add(action.controlled_entity)
joystick_names_by_reference = collections.defaultdict(set)
invalid_references_by_joystick = []
for joystick_name, references in sorted(
references_by_joystick_name.items()):
invalid_references = []
for reference in references:
# In future, we may support named entities as well in addition to the
# fixed player and camera entities.
if reference in OBSERVATION_OPTIONAL_ENTITIES:
joystick_names_by_reference[reference].add(joystick_name)
else:
invalid_references.append(reference)
if invalid_references:
invalid_references_by_joystick.append(
f'{joystick_name} --> {sorted(invalid_references)}')
# Report all invalid entity references by joysticks.
if invalid_references_by_joystick:
msg = ', '.join(invalid_references_by_joystick)
raise InvalidSpecError(f'Joystick(s) reference invalid entities: {msg}.')
# Get all entities by name.
observation_node = self.observation_spec.proto_node
entities_by_name = {}
for optional_field in OBSERVATION_OPTIONAL_ENTITIES:
entity_node = observation_node.child_by_proto_field_name(optional_field)
if entity_node:
entities_by_name[entity_node.name] = entity_node
global_entities = observation_node.child_by_proto_field_name(
'global_entities')
if global_entities:
for entity_node in global_entities.children:
entities_by_name[entity_node.name] = entity_node
# Check that all referenced entities exist and have positions and rotations.
for reference, joystick_names in joystick_names_by_reference.items():
joystick_names = sorted(joystick_names)
entity_node = entities_by_name.get(reference)
if not entity_node:
raise InvalidSpecError(f'Missing entity {reference} referenced by '
f'joysticks {joystick_names}.')
if not entity_node.child_by_proto_field_name('position'):
raise InvalidSpecError(f'Entity {reference} referenced by joysticks '
f'{joystick_names} has no position.')
if not entity_node.child_by_proto_field_name('rotation'):
raise InvalidSpecError(f'Entity {reference} r
|
eferenced by joysticks '
f'{joystick_names} has no rotation.')
class SpecBase:
"""Base class for an action or observation spec."""
def __init__(self, spec):
"""Parse and validate the provided spec proto.
Args:
spec: Spec protobuf to parse and validate.
"""
self._spec_proto = spec
self._spec_proto_node = ProtobufNode.from_spec(spec)
self._node_nest = self._spec_proto_node.as_nest(include_self=False)
super().__init__()
@property
|
def proto(self):
"""Return the underlying proto buffer."""
return self._spec_proto
@property
def proto_node(self):
"""Get the ProtobufNode referencing the underlying protocol buffer."""
return self._spec_proto_node
def __str__(self):
"""String representation of the proto owned by this object."""
return str(self._spec_proto)
class ObservationSpec(SpecBase):
"""A wrapper class for an ObservationSpec proto.
ObservationSpec proto defines the observation space for an agent. This class
is a helper class used to translate spec information and value to a TF Agents
compatible format.
"""
def __init__(self, spec_proto):
assert isinstance(spec_proto, observation_pb2.ObservationSpec)
super().__init__(spec_proto)
class ActionSpec(SpecBase):
"""A wrapper class for an ActionSpec proto.
An ActionSpec proto defines the action space for an agent. This class
is a helper class used to translate spec information and value to a TF Agents
compatible format.
"""
def __init__(self, spec_proto):
assert isinstance(spec_proto, action_pb2.ActionSpec)
super().__init__(spec_proto)
def _concat_path(prefix, component):
"""Add a component to a path.
Args:
prefix: Prefix of the path. If this is empty, it isn't included in the
returned path.
component: Component to add to the path.
Returns:
Concatenated path string.
"""
return f'{prefix}/{component}' if prefix else component
def _get_optional_fields_from_proto(proto, field_names):
"""Get optional fields from the specified proto.
Args:
proto: Proto to query.
field_names: Names of the fields to find.
Returns:
List of (field_name, field_proto) tuples where field_name is the
name of the field and field_proto is the sub-message proto.
"""
return [(f, getattr(proto, f)) for f in field_names if proto.HasField(f)]
def _label_repeated_field(proto, field_name):
"""Label elements of a repeated proto field.
Args:
proto: Proto to query.
field_name: Repeated field name to enumerate.
Yields:
(index, name, field_proto) tuples where index is the index of the field,
name is `field_name[index]` and field_proto is the proto in the
repeated field at the index index.
"""
repeated_field = getattr(proto, field_name)
for i in range(len(repeated
|
dcsquared13/Diamond
|
src/collectors/network/network.py
|
Python
|
mit
| 4,536
| 0.007496
|
# coding=utf-8
"""
The NetworkCollector class collects metrics on network interface usage
using /proc/net/dev.
#### Dependencies
* /proc/net/dev
"""
import diamond.collector
from diamond.collector import str_to_bool
import diamond.convertor
import os
import re
try:
import psutil
except ImportError:
psutil = None
class NetworkCollector(diamond.collector.Collector):
PROC = '/proc/net/dev'
def get_default_config_help(self):
config_help = super(NetworkCollector, self).get_default_config_help()
config_help.update({
'interfaces': 'List of interface types to collect',
'greedy': 'Greedy match interfaces',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(NetworkCollector, self).get_default_config()
config.update({
'path': 'network',
'interfaces': ['eth', 'bond', 'em', 'p1p', 'eno', 'enp', 'ens',
'enx'],
'byte_unit': ['bit', 'byte'],
'greedy': 'true',
})
return config
def collect(self):
"""
Collect network interface stats.
"""
# Initialize results
results = {}
if os.access(self.PROC, os.R_OK):
# Open File
file = open(self.PROC)
# Build Regular Expression
greed = ''
if str_to_bool(self.config['greedy']):
greed = '\S*'
exp = (('^(?:\s*)((?:%s)%s):(?:\s*)' +
'(?P<rx_bytes>\d+)(?:\s*)' +
'(?P<rx_packets>\w+)(?:\s*)' +
'(?P<rx_errors>\d+)(?:\s*)' +
'(?P<rx_drop>\d+)(?:\s*)' +
'(?P<rx_fifo>\d+)(?:\s*)' +
'(?P<rx_frame>\d+)(?:\s*)' +
'(?P<rx_compressed>\d+)(?:\s*)' +
'(?P<rx_multicast>\d+)(?:\s*)' +
'(?P<tx_bytes>\d+)(?:\s*)' +
'(?P<tx_packets>\w+)(?:\s*)' +
'(?P<tx_errors>\d+)(?:\s*)' +
'(?P<tx_drop>\d+)(?:\s*)' +
'(?P<tx_fifo>\d+)(?:\s*)' +
'(?P<tx_colls>\d+)(?:\s*)' +
'(?P<tx_carrier>\d+)(?:\s*)' +
'(?P<tx_compressed>\d+)(?:.*)$') %
(('|'.join(self.config['interfaces'])), greed))
reg = re.compile(exp)
# Match Interfaces
for line in file:
match = reg.match(line)
if match:
device = match.group(1)
results[device] = match.groupdict()
# Close File
file.close()
else:
if not psutil:
self.log.error('Unable to import psutil')
self.log.error('No network metrics retrieved')
return None
network_stats = psutil.network_io_counters(True)
for device in network_stats.keys():
network_stat = network_stats[device]
results[device] = {}
results[device]['rx_bytes'] = network_stat.bytes_recv
results[device]['tx_bytes'] = network_stat.bytes_sent
results[device]['rx_packets'] = network_stat.packets_recv
results[device]['tx_packets'] = network_stat.packets_sent
for device in results:
stats = results[device]
for s, v in
|
stats.items():
# Get Metric Name
metric_name = '.'.join([device, s])
# Get Metric Value
metric_value = self.derivative(metric_name,
long
|
(v),
diamond.collector.MAX_COUNTER)
# Convert rx_bytes and tx_bytes
if s == 'rx_bytes' or s == 'tx_bytes':
convertor = diamond.convertor.binary(value=metric_value,
unit='byte')
for u in self.config['byte_unit']:
# Public Converted Metric
self.publish(metric_name.replace('bytes', u),
convertor.get(unit=u), 2)
else:
# Publish Metric Derivative
self.publish(metric_name, metric_value)
return None
|
brownharryb/erpnext
|
erpnext/manufacturing/doctype/work_order/work_order.py
|
Python
|
gpl-3.0
| 25,289
| 0.025031
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe import _
from frappe.utils import flt, get_datetime, getdate, date_diff, cint, nowdate
from frappe.model.document import Document
from erpnext.manufacturing.doctype.bom.bom import validate_bom_no, get_bom_items_as_dict
from dateutil.relativedelta import relativedelta
from erpnext.stock.doctype.item.item import validate_end_of_life
from erpnext.manufacturing.doctype.workstation.workstation import WorkstationHolidayError
from erpnext.projects.doctype.timesheet.timesheet import OverlapError
from erpnext.stock.doctype.stock_entry.stock_entry import get_additional_costs
from erpnext.manufacturing.doctype.manufacturing_settings.manufacturing_settings import get_mins_between_operations
from erpnext.stock.stock_balance import get_planned_qty, update_bin_qty
from frappe.utils.csvutils import getlink
from erpnext.stock.utils import get_bin, validate_warehouse_company, get_latest_stock_qty
from erpnext.utilities.transaction_base import validate_uom_is_integer
class OverProductionError(frappe.ValidationError): pass
class StockOverProductionError(frappe.ValidationError): pass
class OperationTooLongError(frappe.ValidationError): pass
class ItemHasVariantError(frappe.ValidationError): pass
from six import string_types
form_grid_templates = {
"operations": "templates/form_grid/work_order_grid.html"
}
class WorkOrder(Document):
def onload(self):
ms = frappe.get_doc("Manufacturing Settings")
self.set_onload("material_consumption", ms.material_consumption)
self.set_onload("backflush_raw_materials_based_on", ms.backflush_raw_materials_based_on)
def validate(self):
self.validate_production_item()
if self.bom_no:
validate_bom_no(self.production_item, self.bom_no)
self.validate_sales_order()
self.set_default_warehouse()
self.validate_warehouse_belongs_to_company()
self.calculate_operating_cost()
self.validate_qty()
self.validate_operation_time()
self.status = self.get_status()
validate_uom_is_integer(self, "stock_uom", ["qty", "produced_qty"])
self.set_required_items(reset_only_qty = len(self.get("required_items")))
def validate_sales_order(self):
if self.sales_order:
self.check_sales_order_on_hold_or_close()
so = frappe.db.sql("""
select so.name, so_item.delivery_date, so.project
from `tabSales Order` so
inner join `tabSales Order Item` so_item on so_item.parent = so.name
left join `tabProduct Bundle Item` pk_item on so_item.item_code = pk_item.parent
where so.name=%s and so.docstatus = 1 and (
so_item.item_code=%s or
pk_item.item_code=%s )
""", (self.sales_order, self.production_item, self.production_item), as_dict=1)
if not so:
so = frappe.db.sql("""
select
so.name, so_item.delivery_date, so.project
from
`tabSales Order` so, `tabSales Order Item` so_item, `tabPacked Item` packed_item
where so.name=%s
and so.name=so_item.parent
and so.name=packed_item.parent
and so_item.item_code = packed_item.parent_item
and so.docstatus = 1 and packed_item.item_code=%s
""", (self.sales_order, self.production_item), as_dict=1)
if len(so):
if not self.expected_delivery_date:
self.expected_delivery_date = so[0].delivery_date
if so[0].project:
self.project = so[0].project
if not self.material_request:
self.validate_work_order_against_so()
else:
frappe.throw(_("Sales Order {0} is not valid").format(self.sales_order))
def check_sales_order_on_hold_or_close(self):
status = frappe.db.get_value("Sales Order", self.sales_order, "status")
if status in ("Closed", "On Hold"):
frappe.throw(_("Sales Order {0} is {1}").format(self.sales_order, status))
def set_default_warehouse(self):
if not self.wip_warehouse:
self.wip_warehouse = frappe.db.get_single_value("Manufacturing Settings", "default_wip_warehouse")
if not self.fg_warehouse:
self.fg_warehouse = frappe.db.get_single_value("Manufacturing Settings", "default_fg_warehouse")
def validate_warehouse_belongs_to_company(self):
warehouses = [self.fg_warehouse, self.wip_warehouse]
for d in self.get("required_items"):
if d.source_warehouse not in warehouses:
warehouses.append(d.source_warehouse)
for wh in warehouses:
validate_warehouse_company(wh, self.company)
def calculate_operating_cost(self):
self.planned_operating_cost, self.actual_operating_cost = 0.0, 0.0
for d in self.get("operations"):
d.planned_operating_cost = flt(d.hour_rate) * (flt(d.time_in_mins) / 60.0)
d.actual_operating_cost = flt(d.hour_rate) * (flt(d.actual_operation_time) / 60.0)
self.planned_operating_cost += flt(d.planned_operating_cost)
self.actual_operating_cost += flt(d.actual_operating_cost)
variable_cost = self.actual_operating_cost if self.actual_operating_cost \
else self.planned_operating_cost
self.total_operating_cost = flt(self.additional_operating_cost) + flt(variable_cost)
def validate_work_order_against_so(self):
# already ordered qty
ordered_qty_against_so = frappe.db.sql("""select sum(qty) from `tabWork Order`
where production_item = %s and sales_order = %s and docstatus < 2 and name != %s""",
(self.production_item, self.sales_order, self.name))[0][0]
total_qty = flt(ordered_qty_against_so) + flt(self.qty)
# get qty from Sales Order Item table
so_item_qty = frappe.db.sql("""select sum(stock_qty) from `tabSales Order Item`
where parent = %s and item_code = %s""",
(self.sales_order, self.production_item))[0][0]
# get qty from Packing Item table
dnpi_qty = frappe.db.sql("""select sum(qty) from `tabPacked Item`
where parent = %s and parenttype = 'Sales Order' and item_code = %s""",
(self.sales_order, self.production_item))[0][0]
# total qty in SO
so_qty = flt(so_item_qty) + flt(dnpi_qty)
allowance_percentage = flt(frappe.db.get_single_value("Manufacturing Settings",
"overproduction_percentage_for_sales_order"))
if total_qty > so_qty + (allowance_percentage/100 * so_qty):
frappe.throw(_("Cannot produce more Item {0} than Sales Order quantity {1}")
.format(self.production_item, so_qty), OverProductionError)
def update_status(self, status=None):
'''Update status of work order if unknown'''
if status != "Stopped":
status = self.get_status(status)
if status != self.status:
self.db_set("status", status)
self.update_required_items()
return status
def get_status(self, status=None):
'''Return the status based on stock entries against this work order'''
if not status:
status = self.status
if self.docstatus==0:
status = 'Draft'
elif self.docstatus==1:
if status != 'Stopped':
stock_entries = frappe._dict(frappe.db.sql("""select purpose, sum(fg_completed_qty)
from `tabStock Entry` where work_order=%s and docstatus=1
group by purpose""", self.name))
status = "Not Started"
if stock_entries:
status = "In Process"
produced_qty = stock_entries.get("Manufacture")
if flt(produced_qty) >= flt(self.qty):
status = "Completed"
else:
status = 'Cancelled'
return status
def update_work_order_qty(self):
"""Update **Manufactured Qty** and **Material Transferred for Qty** in Work Order
based on Stock Entry"""
allowance_percentage = flt(frappe.db.get_single_value("Manufacturing Settings",
"overproduction_percentage_for_work_order"))
for purpose, fieldname in (("
|
Manufacture", "produced_qty"),
("Material Transfer for Manufacture", "material_transferred_for_manufacturing")):
if (purpose == 'Material Transfer for Manufacture' and
self.ope
|
rations and self.transfer_material_against == 'Job Card'):
continue
qty = flt(frappe.db.sql("""select sum(fg_completed_qty)
from `tabStock Entry` where work_order=%s and docstatus=1
and purpose=%s""", (self.name, purpose))[0][0])
completed_qty = self.qty + (allowance_percentage/100 * self.qty)
if qty > completed_qty:
frappe.throw(_("{0} ({1}) cannot be greater than planned quantity ({2}) in Work
|
rwl/PyCIM
|
CIM14/CPSM/Equipment/Core/GeographicalRegion.py
|
Python
|
mit
| 2,316
| 0.000864
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE
|
SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND
|
, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.CPSM.Equipment.Core.IdentifiedObject import IdentifiedObject
class GeographicalRegion(IdentifiedObject):
"""A geographical region of a power system network model.
"""
def __init__(self, Regions=None, *args, **kw_args):
"""Initialises a new 'GeographicalRegion' instance.
@param Regions: The association is used in the naming hierarchy.
"""
self._Regions = []
self.Regions = [] if Regions is None else Regions
super(GeographicalRegion, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["Regions"]
_many_refs = ["Regions"]
def getRegions(self):
"""The association is used in the naming hierarchy.
"""
return self._Regions
def setRegions(self, value):
for x in self._Regions:
x.Region = None
for y in value:
y._Region = self
self._Regions = value
Regions = property(getRegions, setRegions)
def addRegions(self, *Regions):
for obj in Regions:
obj.Region = self
def removeRegions(self, *Regions):
for obj in Regions:
obj.Region = None
|
Quiphius/pdns-sync
|
pdnssync/main.py
|
Python
|
mit
| 2,853
| 0.001052
|
import argparse
from pdnssync.database import Database
from pdnssync.parse import Parser
from pdnssync.error import get_warn, get_err
parser = Parser()
def validate():
domains = parser.get_domains()
for d in sorted(domains):
domains[d].validate(domains)
def sync(db):
all_db_domains = db.get_domains()
all_domains = parser.get_domains()
list_domains = all_domains.keys()
list_db_domains = all_db_domains.keys()
create_list = list(set(list_domains) - set(list_db_domains))
delete_list = list(set(list_db_domains) - set(list_domains))
db.create_domains(create_list)
db.delete_domains(delete_list)
for i in sorted(list_domains):
d = all_domains[i]
d.sync_domain(db)
def export(db):
all_db_domain = db.get_domains()
for d in all_db_domain:
print('# %s' % d)
records = db.get_records(d)
soa = records[(d, 'SOA')][0].data.split(' ')
print('D %s %s %s' % (d, soa[0], soa[1]))
if (d, 'NS') in records:
ns = records[(d, 'NS')]
ns_names = []
for i in ns:
ns_names.append(i.data)
print('N %s' % ' '.join(ns_names))
if (d, 'MX') in records:
mx = records[(d, 'MX')]
mx_names = []
for i in mx:
mx_names.append("%s %s" % (i.prio, i.data))
print('M %s' % ' '.join(mx_names))
for i in records:
if i[1] == 'A':
for j in records[i]:
print('%s %s' % (j.data, i[0]))
if i[1] == 'AAAA':
|
for j in records[i]:
print('%s %s' % (j.data, i[0]))
if i[1] == 'CNAME':
for j in records[i]:
print('C %s %s' % (i[0],
|
j.data))
if i[1] == 'SRV':
for j in records[i]:
print('S %s %s %s' % (i[0], j.prio, j.data))
if i[1] == 'TXT':
for j in records[i]:
print('X %s %s' % (i[0], j.data))
print()
def do_sync():
aparser = argparse.ArgumentParser()
aparser.add_argument("-v", "--verbose", action="count", default=0, help="increase output verbosity")
aparser.add_argument("-w", "--werror", action="store_true", help="also break on warnings")
aparser.add_argument('files', metavar='file', nargs='+', help='the files to parse')
args = aparser.parse_args()
for fname in args.files:
parser.parse(fname)
parser.assign()
validate()
err = get_err()
warn = get_warn()
print('%d error(s) and %d warning(s)' % (err, warn))
if err == 0 and (not args.werror or warn == 0):
db = Database()
sync(db)
else:
print('Errors found, not syncing')
def do_export():
db = Database()
export(db)
|
almet/whiskerboard
|
settings/epio.py
|
Python
|
mit
| 751
| 0.001332
|
from __future__ import absolute_import
from .base import *
from bundle_config import config
DATABASES = {
'default': {
'ENGINE': 'django.db.b
|
ackends.postgresql_psycopg2',
'NAME': config['postgres']['database'],
'USER': config['postgres']['username'],
'PASSWORD': config['postgres']['password'],
'HOST': config['postgres']['host'],
}
}
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': '{host}:{port}'.format(
host=config['redis']['host'],
port=config['redis']['port']),
'OPTIONS': {
|
'PASSWORD': config['redis']['password'],
},
'VERSION': config['core']['version'],
},
}
DEBUG = False
|
dan-stone/canal
|
canal/tests/test_from_json.py
|
Python
|
mit
| 3,665
| 0
|
import numpy as np
import canal as canal
from .util import NumpyTestCase
class FromJSONTestCase(NumpyTestCase):
class Measurement(canal.Measurement):
int_field = canal.IntegerField()
alternate_db_name = canal.IntegerField(db_name="something_else")
float_field = canal.FloatField()
bool_field = canal.BooleanField()
string_field = canal.StringField()
tag_1 = canal.Tag()
tag_2 = canal.Tag()
def test_from_json_iso_time(self):
test_data = 5*[
[
"2015-01-29T21:55:43.702900257Z",
1,
2,
1.2,
True,
"some content",
"1",
"2"
],
[
"2015-01-29T21:55:43.702900345Z",
2,
3,
2.3,
False,
"some other content",
"1",
"2"
]
]
json_data = dict(
results=[dict(
series=[dict(
name="Measurement",
columns=[
"time",
"int_field",
"something_else",
"float_field",
"bool_field",
"string_field",
"tag_1",
"tag_2"
],
values=test_data
)]
)]
)
test_series = self.Measurement.from_json(json_data)
self.assertndArrayEqual(
test_series.time,
np.array(
5*[
"2015-01-29T21:55:43.702900257Z",
"2015-01-29T21:55:43.702900345Z"
],
dtype='datetime64'
)
)
self.assertndArrayEqual(
test_series.int_field,
np.array(5*[1, 2])
)
self.assertndArrayEqual(
test_series.alternate_db_name,
np.array(5*[2, 3])
)
|
self.assertndArrayEqual(
test_series.float_field,
np.array(5*[1.2, 2.3])
)
self.assertndArrayEqual(
tes
|
t_series.bool_field,
np.array(5*[True, False])
)
self.assertndArrayEqual(
test_series.string_field,
np.array(5*["some content", "some other content"])
)
self.assertndArrayEqual(
test_series.tag_1,
np.array(10*["1"])
)
self.assertndArrayEqual(
test_series.tag_2,
np.array(10*["2"])
)
def test_from_json_bad_input(self):
with self.assertRaises(ValueError):
list(self.Measurement.from_json({"bad": "input"}))
def test_empty_json(self):
content = dict()
with self.assertRaises(ValueError):
self.Measurement.from_json(content)
def test_from_json_wrong_measurement(self):
test_json = dict(
results=[dict(
series=[dict(
name="SomeOtherMeasurement",
columns=[
"time",
"int_field",
"float_field",
"bool_field",
"string_field",
"tag_1",
"tag_2"
],
values=[]
)]
)]
)
with self.assertRaises(ValueError):
self.Measurement.from_json(test_json)
|
blakeohare/crayon
|
Scripts/project-diff.py
|
Python
|
mit
| 5,511
| 0.020323
|
import os
import sys
def main(args):
if len(args) != 2:
print("Usage: python project-diff.py [path-to-project-1] [path-to-project-2]")
return
dir1 = args[0]
dir2 = args[1]
project1 = collect_text_files(dir1)
project2 = collect_text_files(dir2)
files_only_in_1 = []
files_only_in_2 = []
files_in_both = []
perform_venn_analysis(set(project1.keys()), set(project2.keys()), files_only_in_1, files_only_in_2, files_in_both)
if len(files_only_in_1) > 0:
print("The following files are only in Project 1:")
for file in files_only_in_1:
print(" " + file)
print("")
if len(files_only_in_2) > 0:
print("The following files are only in Project 2:")
for file in files_only_in_2:
print(" " + file)
print("")
print(str(len(files_in_both)) + " files in both projects.")
print("")
files_in_both.sort()
files_with_diffs = []
for file in files_in_both:
text_1 = project1[file]
text_2 = project2[file]
diff = perform_diff(text_1, text_2)
if len(diff) > 0:
files_with_diffs.append(file)
print("There's a difference in " + file)
print("\n".join(diff))
print("")
if len(files_with_diffs) == 0:
print("No files with text differences.")
else:
print("Diffs were in the following files:")
print("\n".join(files_with_diffs))
print("")
def perform_venn_analysis(set_a, set_b, only_in_a_out, only_in_b_out, in_both_out):
for item in set_a:
if item not in set_b:
only_in_a_out.append(item)
else:
in_both_out.append(item)
for item in set_b:
if item not in set_a:
only_in_b_out.append(item)
def collect_text_files(root):
output = {}
root = root.replace('\\', '/')
if root.endswith('/'):
root = root[:-1]
collect_text_files_impl(root, '', output)
return output
def get_file_extension(file):
if '.' in file:
return file.split('.')[-1].lower()
return ''
FILE_EXTENSION_IGNORE_LIST = set([
'png', 'jpg',
'xcuserstate',
])
def is_text_file(path):
ext = get_file_extension(path)
return ext not in FILE_EXTENSION_IGNORE_LIST
def collect_text_files_impl(root, current_dir, output):
full_dir = root
if current_dir != '':
full_dir += '/' + current_dir
for file in os.listdir(full_dir.replace('/', os.sep)):
full_file = full_dir + '/' + file
if os.path.isdir(full_file.replace('/', os.sep)):
next_cd = file if current_dir == '' else (current_dir + '/' + file)
collect_text_files_impl(root, next_cd, output)
else:
rel_file = file if current_dir == '' else (current_dir + '/' + file)
if is_text_file(rel_file):
c = open(full_file.replace
|
('/', os.sep), 'rt')
text = c.read()
c.close()
output[rel_file] = text
else:
output[rel_file] = '\n'.join([
"Binary file:",
"size X", # TODO: get file size
"first 20 bytes: ...", # TODO: this
"last 20 bytes: ...", # TODO: do this as well
])
def perform_diff(text_1, text_2):
if text_1 == text_2:
return []
lines_1 = text_1.split('\n')
lines_2 = text_2.split('\n')
trimmed_front = 0
trimmed_back = 0
#
|
Remove identical lines at the beginning and end of the file
while len(lines_1) > trimmed_front and len(lines_2) > trimmed_front and lines_1[trimmed_front] == lines_2[trimmed_front]:
trimmed_front += 1
lines_1 = lines_1[trimmed_front:]
lines_2 = lines_2[trimmed_front:]
while len(lines_1) > trimmed_back and len(lines_2) > trimmed_back and lines_1[-1 - trimmed_back] == lines_2[-1 - trimmed_back]:
trimmed_back += 1
lines_1 = lines_1[:-trimmed_back]
lines_2 = lines_2[:-trimmed_back]
length_1 = len(lines_1)
length_2 = len(lines_2)
grid = []
for x in range(length_2 + 1):
column = []
for y in range(length_1 + 1):
column.append(None)
grid.append(column)
# Perform levenshtein difference
# each grid cell will consist of a tuple: (diff-size, previous-path: up|left|diag)
# Each step to the right indicates taking a line from lines 2
# Each step downwards indicates taking a line from lines 1
# Prepopulate the left and top rows indicating starting the diff by removing all
# lines from lines 1 and adding all lines from lines 2.
for x in range(length_2 + 1):
grid[x][0] = (x, 'left')
for y in range(length_1 + 1):
grid[0][y] = (y, 'up')
grid[0][0] = (0, 'diag')
# Populate the grid. Figure out the minimum diff to get to each point.
for y in range(1, length_1 + 1):
for x in range(1, length_2 + 1):
if lines_1[y - 1] == lines_2[x - 1]:
grid[x][y] = (grid[x - 1][y - 1][0], 'diag')
elif (grid[x - 1][y][0] <= grid[x][y - 1][0]):
grid[x][y] = (grid[x - 1][y][0] + 1, 'left')
else:
grid[x][y] = (grid[x][y - 1][0] + 1, 'up')
# Start from the bottom right corner and walk backwards to the origin
x = length_2
y = length_1
diff_chain = []
ellipsis_used = False
while x != 0 and y != 0:
node = grid[x][y]
if node[1] == 'diag':
if not ellipsis_used:
diff_chain.append('...')
ellipsis_used = True
x -= 1
y -= 1
elif node[1] == 'left':
diff_chain.append('+ [' + str(trimmed_front + x) + '] ' + lines_2[x - 1])
x -= 1
ellipsis_used = False
else:
diff_chain.append('- [' + str(trimmed_front + y) + '] ' + lines_1[y - 1])
y -= 1
ellipsis_used = False
diff_chain.reverse()
return diff_chain
main(sys.argv[1:])
|
zstars/weblabdeusto
|
server/src/experiments/ud_demo_xilinx/server.py
|
Python
|
bsd-2-clause
| 2,623
| 0.006484
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <[email protected]>
#
import os
import json
from weblab.util import data_filename
from voodoo.gen.caller_checker import caller_check
from voodoo.log import logged
from voodoo.override import Override
import experiments.ud_xilinx.server as UdXilinxExperiment
import weblab.data.server_type as ServerType
import weblab.experiment.util as ExperimentUtil
module_directory = os.path.join(*__name__.split('.')[:-1])
class UdDemoXilinxExperiment(UdXilinxExperiment.UdXilinxExperiment):
FILES = {
'PLD' : 'cpld.jed',
'FPGA' : 'fpga.bit',
}
def __init__(self, coord_address, locator, cfg_manager, *args, **kwargs):
super(UdDemoXilinxExperiment,self).__init__(coord_address, locator, cfg_manager, *args, **kwargs)
file_path = data_filename(os.path.join(module_directory, self.FILES[self._xilinx_device]))
self.file_content = ExperimentUtil.serialize(open(file_path, "rb").read())
@Override(UdXilinxExperiment.UdXilinxExperiment)
|
@caller_check(ServerType.Laboratory)
@logged("info")
def do_start_experiment(self, *args, **kwargs):
"""
Handles experiment startup, returning certain initial configuration parameters.
(Thus makes use of the API version 2).
"""
super(UdDemoXilinxExperiment, self).do_send_file_to_device(self.file_content, "program")
return json.dumps({ "initial_con
|
figuration" : """{ "webcam" : "%s", "expected_programming_time" : %s }""" % (self.webcam_url, self._programmer_time), "batch" : False })
@Override(UdXilinxExperiment.UdXilinxExperiment)
@caller_check(ServerType.Laboratory)
@logged("info")
def do_dispose(self):
super(UdDemoXilinxExperiment, self).do_dispose()
return "ok"
@Override(UdXilinxExperiment.UdXilinxExperiment)
@caller_check(ServerType.Laboratory)
@logged("info",except_for='file_content')
def do_send_file_to_device(self, file_content, file_info):
return "sending file not possible in demo"
@logged("info")
@Override(UdXilinxExperiment.UdXilinxExperiment)
@caller_check(ServerType.Laboratory)
def do_send_command_to_device(self, command):
return super(UdDemoXilinxExperiment, self).do_send_command_to_device(command)
|
suprotkin/atm
|
atm/card/middleware.py
|
Python
|
gpl-2.0
| 751
| 0.001332
|
__author__ = 'roman'
from django.utils.functional import SimpleLazyObject
from . import get_card as _get_card
def get_card(request):
if
|
not hasattr(request, '_cached_card'):
request._cached_card = _get_card(request)
return request._cached_card
class CardAuthMiddleware(object):
def process_request(self, request):
assert hasattr(request, 'session'), (
|
"The Card authentication middleware requires session middleware "
"to be installed. Edit your MIDDLEWARE_CLASSES setting to insert "
"'django.contrib.sessions.middleware.SessionMiddleware' before "
"'card.middleware.CardAuthMiddleware'."
)
request.card = SimpleLazyObject(lambda: get_card(request))
|
SimonDevon/simple-python-shapes
|
square1.py
|
Python
|
mit
| 236
| 0
|
# Step 1: Make all the "turtle" commands a
|
vailable to us.
import turtle
# Step 2: create a new turtle, we'll call him simon
simon = turtle.Turtle()
|
# Lets draw a square!
for loop in range(4):
simon.forward(200)
simon.left(90)
|
iTALC/documentation
|
build-manuals.py
|
Python
|
gpl-3.0
| 1,538
| 0.046164
|
#!/usr/bin/env python3
import os
import shutil
import subprocess
import gettext
version = '4.4.0'
builds = [
{ 'language': 'de', 'paper': 'a4paper', 'babel': 'ngerman' },
{ 'language': 'en', 'paper': 'letterpaper', 'babel': 'USenglish' },
{ 'language': 'es', 'paper': 'a4paper', 'babel': 'spanish' },
{ 'language': 'fr', 'paper': 'a4paper', 'babel': 'french' },
{ 'language': 'hu', 'paper': 'a4paper', 'babel': 'magyar' },
{ 'language': 'it', 'paper': 'a4paper', 'babel': 'italian' },
{ 'language': 'sl', 'paper': 'a4paper', 'babel': 'slovene' },
{ 'language': 'uk', 'paper': 'a4paper', 'babel': 'ukrainian' },
]
for i in builds:
for manual in [ 'admin', 'user' ]:
language = i['language']
print( 'Building for language "%s"' % ( language ) )
subprocess.Popen( ['msgfmt', 'locale/%s/LC_MESSAGES/%s.po' % ( language, manual ), '-o',
'locale/%s/LC_MESSAGES/%s.mo' % ( language, manual ) ] ).wait()
env = os.environ.copy()
with open('%s/index.rst' % (manual)) as f:
title = f.readline().rstrip()
title = gettext.translation(manual, 'locale', [language], None, True).gettext(title)
env['TITLE'] = title;
env['LANGUAGE'] = language
env['PAPER'] = i['paper']
env['INDEX'] = '%s/index' % ( manual )
env['BABEL'] = i['babel']
env['VERSION'] = version
env['SPHINXOPTS'] = '-j%s' % ( os.cpu_count()+1 )
shutil.rmtree('_build', True)
subprocess.Popen( ['make', 'latexpdf' ], env=env ).wait()
sh
|
util.copyfile('_build/latex/veyon.pdf', 'veyon-%s-manual-%s_%s.pdf' % ( manual, langu
|
age, version ))
|
zstackio/zstack-woodpecker
|
integrationtest/vm/multihosts/vm_snapshots/paths/xc_path8.py
|
Python
|
apache-2.0
| 1,642
| 0.017052
|
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=8, path_list=[
[TestAction.create_vm, 'vm1', 'flag=ceph'],
[TestAction.create_volume, 'volume1', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume, 'volume2', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot1'],
[TestAction.clone_vm, 'vm1', 'vm2'],
[TestAction.create_volume_backup, 'volume2', 'volume2-backup1'],
[TestAction.stop_vm, 'vm1'],
[TestAction.use_volume_backup, 'volume2-backup1'],
[
|
TestAction.start_vm, 'vm1'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot5'],
[TestAction.delete_vm_snapshot, 'vm1-snapshot1'],
[TestA
|
ction.create_vm_snapshot, 'vm2', 'vm2-snapshot9'],
[TestAction.clone_vm, 'vm1', 'vm3', 'full'],
[TestAction.delete_volume_snapshot, 'vm1-snapshot5'],
[TestAction.stop_vm, 'vm2'],
[TestAction.change_vm_image, 'vm2'],
[TestAction.delete_vm_snapshot, 'vm2-snapshot9'],
])
'''
The final status:
Running:['vm1', 'vm3']
Stopped:['vm2']
Enadbled:['volume1-snapshot5', 'volume2-snapshot5', 'volume3-snapshot5', 'volume2-backup1']
attached:['volume1', 'volume2', 'volume3', 'clone@volume1', 'clone@volume2', 'clone@volume3']
Detached:[]
Deleted:['vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1', 'vm1-snapshot5', 'vm2-snapshot9']
Expunged:[]
Ha:[]
Group:
'''
|
MSusik/invenio
|
invenio/testsuite/test_apps/first/views.py
|
Python
|
gpl-2.0
| 951
| 0.01367
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your optio
|
n) any later version.
##
## Invenio is distributed in the
|
hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from flask import Blueprint
blueprint = Blueprint('first', __name__, url_prefix='/',
template_folder='templates', static_folder='static')
|
meissnert/StarCluster-Plugins
|
STAR_2_4_0g1.py
|
Python
|
mit
| 1,005
| 0.018905
|
from starcluster.clustersetup import ClusterSetup
from starcluster.log
|
ger import log
class STARInstaller(ClusterSetup):
def run(self, nodes, master, user, user_shell, volumes):
for node in nodes:
log.info("Installing STAR 2.4.0g1 on %s" % (node.alias))
node.ssh.execute('wget -c -P /opt/software/star https://github.com/alexdobin/STAR/archive/STAR_2.4.0g1.tar.gz')
node.ssh.execute('tar -xzf /opt/software/star/STAR_2.4.0g1.tar.gz -C /opt/software/star')
node.ssh.execute('make STAR -C /opt/software/star/STAR-STAR_2.4.0g1/source')
node.s
|
sh.execute('mkdir -p /usr/local/Modules/applications/star/;touch /usr/local/Modules/applications/star/2.4.0g1')
node.ssh.execute('echo "#%Module" >> /usr/local/Modules/applications/star/2.4.0g1')
node.ssh.execute('echo "set root /opt/software/star/STAR-STAR_2.4.0g1" >> /usr/local/Modules/applications/star/2.4.0g1')
node.ssh.execute('echo -e "prepend-path\tPATH\t\$root/bin/Linux_x86_64" >> /usr/local/Modules/applications/star/2.4.0g1')
|
t-lou/JSudokuSolver
|
digit_generator/digit_gen.py
|
Python
|
apache-2.0
| 4,207
| 0.013311
|
#! /bin/python2
import numpy
import cv2
import os
import struct
BLACK = (0,)
WHITE = (255,)
DIR_OUT = "./img/"
SIZE_CANVAS = 50
SIZE_FEATURE = 28
SIZE_BLOCK = 32
DIGITS = tuple([chr(ord("0") + i) for i in range(10)] + [""])
FONTS = (cv2.FONT_HERSHEY_SIMPLEX, cv2.FONT_HERSHEY_PLAIN,
cv2.FONT_HERSHEY_DUPLEX, cv2.FONT_HERSHEY_COMPLEX,
cv2.FONT_HERSHEY_TRIPLEX, cv2.FONT_HERSHEY_COMPLEX_SMALL,
cv2.FONT_HERSHEY_SCRIPT_SIMPLEX)
def clear_path():
if not os.path.isdir(DIR_OUT):
os.mkdir(DIR_OUT)
def get_tf(angle, center, offset):
a_radian = numpy.radians(angle)
c = numpy.cos(a_radian)
s = numpy.sin(a_radian)
tl = numpy.matrix([[1.0, 0.0, -center[0]], [0.0, 1.0, -center[1]], [0.0, 0.0, 1.0]])
rot = numpy.matrix([[c, -s, 0.0 ], [s, c, 0.0], [0.0, 0.0, 1.0]])
retl = numpy.matrix([[1.0, 0.0, (center[0] + offset[0])], [0.0, 1.0, (center[1] + offset[1])], [0.0, 0.0, 1.0]])
return retl * rot * tl
os.system("rm -rf " + DIR_OUT + "*")
def create_dataset(fn_f, fn_l, num_sample):
fl = open(fn_l, "wb")
ff = open(fn_f, "wb")
# headers
fl.write(struct.pack(">i", 2049))
fl.write(struct.pack(">i", num_sample))
ff.write(struct.pack(">i", 2051))
ff.write(struct.pack(">i", num_sample))
ff.write(struct.pack(">i", SIZE_FEATURE))
ff.write(struct.pack(">i", SIZE_FEATURE))
canvas = numpy.ones((SIZE_CANVAS, SIZE_CANVAS), dtype = numpy.uint8) * 255
# cv2.imwrite(dir_img + "canvas.png", canvas)
for id_img in range(num_sample):
copy = numpy.copy(canvas)
id_digit = numpy.random.randint(0, len(DIGITS))
id_font = numpy.random.randint(0, len(FONTS))
thickness = numpy.random.randint(1, 3)
base_line = cv2.getTextSize(DIGITS[id_digit], FONTS[id_font], 1.0, thickness)[1] + 1
scale_font = float(numpy.random.randint(40, 60)) / 100.0
scale = float(SIZE_BLOCK) * 0.5 * scale_font / float(base_line)
shift = float(SIZE_CANVAS) / 2.0 - float(SIZE_BLOCK) * 0.5 * scale_font
cv2.putText(copy, DIGITS[id_digit], (0, 2 * base_line + 1),
FONTS[id_font], 1.0, BLACK, thickness)
copy = cv2.warpAffine(copy, numpy.matrix([[scale, 0.0, shift], [0.0, scale, shift]]),
copy.shape, borderValue = WHITE)
# draw lines
thickness_line = numpy.random.randint(1, 3)
cv2.line(copy, (0, (SIZE_CANVAS - SIZE_BLOCK) / 2 - thickness_line),
(SIZE_CANVAS - 1, (SIZE_CANVAS - SIZE_BLOCK) / 2 - thickness_line),
BLACK, thickness_line)
cv2.line(copy, (0, (SIZE_CANVAS + SIZE_BLOCK) / 2 + thickness_line),
(SIZE_CANVAS - 1, (SIZE_CANVAS + SIZE_BLOCK) / 2 + thickness_line),
BLACK, thickness_line)
cv2.line(copy, ((SIZE_CANVAS - SIZE_BLOCK) / 2 - thickness_line, 0),
((SIZE_CANVAS - SIZE_BLOCK) / 2 - thickness_line, SIZE_CANVAS - 1),
BLACK, thickness_line)
cv2.line(copy, ((SIZE_CANVAS + SIZE_BLOCK) / 2 + thickness_line, 0),
((SIZE_CANVAS + SIZE_BLOCK) / 2 + thickness_line, SIZE_CANVAS - 1),
BLACK, thickness_line)
# rotation
copy = cv2.warpAffine(copy, get_tf(float(numpy.random.randint(-10,11)), (float(SIZE_CANVAS) / 2.0, float(SIZE_CANVAS) / 2.0),
(numpy.random.randint(-3, 4), numpy.random.randint(-3, 4)))[0:2, :],
copy.shape, borderValue = WHITE)
copy = copy[(SIZE_CANVAS - SIZE_FEATURE) / 2:(SIZE_CANVAS + SIZE_FEATURE) / 2,
(SIZE_CANVAS - SIZE_FEATURE) / 2:(SIZE_CANVAS + SIZE_FEATURE) / 2]
# cv2.imwrite(DIR_OUT + "{}.png".format(id_img), copy)
copy[copy < 19
|
2] = 0
copy[copy >= 192] = 255
copy = copy.astype(numpy.uint8)
ff.write(copy.data)
fl.write(numpy.uint8(id_digit))
if id_img % 1000 == 0:
print id_img, num_sample
fl.close()
ff.close()
create_dataset(DIR_OUT + "printed_feature_train", DIR_OUT + "printed_label_train", 100000)
print "training data complete"
create_dataset(DIR_OUT + "printed_feature
|
_valid", DIR_OUT + "printed_label_valid", 10000)
print "test data complete"
|
jbarciauskas/slack-stats
|
openedxstats/urls.py
|
Python
|
bsd-3-clause
| 319
| 0
|
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework import routers, serializers, viewsets
from .views import HomePageView
urlpatterns = [
url(r'^$', Hom
|
ePageView.as_view(), name='home'),
url(r'^admin/', admin.site.url
|
s),
url(r'^', include('slackdata.urls')),
]
|
DemocracyClub/UK-Polling-Stations
|
polling_stations/apps/bug_reports/apps.py
|
Python
|
bsd-3-clause
| 96
| 0
|
f
|
rom
|
django.apps import AppConfig
class BugReportsConfig(AppConfig):
name = "bug_reports"
|
CLVsol/oehealth
|
oehealth_prescription/oehealth_patient_medication.py
|
Python
|
agpl-3.0
| 2,721
| 0.009555
|
# -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from osv import osv
from osv import fields
class oehealth_patient_medication(osv.Model):
_name = 'oehealth.patient.medication'
_columns = {
'patient_id': fields.many2
|
one('oehealth.patient', string='Patient',),
#'doctor': fields.many2one('oehealth.physician', string='Physician',
# help='Physician who prescribed the medicament'),
'adverse_reacti
|
on': fields.text(string='Adverse Reactions',
help='Side effects or adverse reactions that the patient experienced'),
'notes': fields.text(string='Extra Info'),
'is_active': fields.boolean(string='Active',
help='Check if the patient is currently taking the medication'),
'course_completed': fields.boolean(string='Course Completed'),
'template': fields.many2one('oehealth.medication.template',
string='Medication Template', ),
'discontinued_reason': fields.char(size=256,
string='Reason for discontinuation',
help='Short description for discontinuing the treatment'),
'discontinued': fields.boolean(string='Discontinued'),
}
oehealth_patient_medication()
|
r-rathi/error-control-coding
|
perf/perf_plots.py
|
Python
|
mit
| 11,256
| 0.002221
|
import argparse
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from perf.errsim import *
def plot_x_vs_pmf(params, show=True, fpath=None):
def plot(ax, x, param, **plotargs):
if param['pb'] is None:
param['pb'] = param['pe']
label = 'BSC pe={pe} m={m} n={n}'.format(**param)
else:
label = 'GBMM pe={pe} pb={pb} m={m} n={n}'.format(**param)
pmf = errpmf(**param)
if 'label' not in plotargs:
plotargs['label'] = label
ax.plot(x, pmf[x], **plotargs)
plt.close('all')
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=plt.figaspect(1/2))
t = np.arange(11)
for param in params:
plot(ax, t, param.copy(), lw=1.5)
ax.axhline(1e-15, color='black', linestyle='dashed')
ax.set_ylim(1e-25, 1e-1)
ax.set_ylabel('PMF, $p_X(x)$')
ax.set_yscale('log')
ax.grid(True)
ax.set_xticks(t)
ax.set_xlabel('Number of Symbols, $x$')
ax.set_title('Symbol Error PMF (Prob. of x errors in n digits)')
ax.legend(fontsize=12)
if fpath:
fig.savefig(fpath)
if show:
plt.show()
def plot_x_vs_pndc(params, show=True, fpath=None):
def plot(ax, x, param, **plotargs):
if param['pb'] is None:
param['pb'] = param['pe']
label = 'BSC pe={pe} m={m} n={n}'.format(**param)
else:
label = 'GBMM pe={pe} pb={pb} m={m} n={n}'.format(**param)
pmf = errpmf(**param)
pndc = prob_ndc(pmf)
if 'labe
|
l' not in plotargs:
plotargs['label'] = label
ax.plot(x, pndc[x], **plotargs)
|
plt.close('all')
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=plt.figaspect(1/2))
t = np.arange(11)
for param in params:
plot(ax, t, param.copy(), lw=1.5)
ax.axhline(1e-15, color='black', linestyle='dashed')
ax.set_ylim(1e-25, 1e-1)
ax.set_ylabel('$P_{ndc}(t)$')
ax.set_yscale('log')
ax.grid(True)
ax.set_xticks(t)
ax.set_xlabel('Number of Symbols, $x$')
ax.set_title('Probability of not-decoding-correctly')
ax.legend(fontsize=12)
if fpath:
fig.savefig(fpath)
if show:
plt.show()
def plot_t_vs_ober(params, show=True, fpath=None):
def plot(ax, t, param, **plotargs):
if param['pb'] is None:
param['pb'] = param['pe']
label = 'BSC pe={pe} m={m} n={n}'.format(**param)
else:
label = 'GBMM pe={pe} pb={pb} m={m} n={n}'.format(**param)
pmf = errpmf(**param)
ober = ber_out(param['pe'], param['pb'], pmf)
if 'label' not in plotargs:
plotargs['label'] = label
ax.plot(t, ober[t], **plotargs)
plt.close('all')
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=plt.figaspect(1/2))
t = np.arange(11)
for param in params:
plot(ax, t, param.copy(), lw=1.5)
ax.axhline(1e-15, color='black', linestyle='dashed')
ax.set_ylim(1e-25, 1e-5)
ax.set_ylabel('Output BER, $BER_o$')
ax.set_yscale('log')
ax.grid(True)
ax.set_xticks(t)
ax.set_xlabel('Number of Symbols corrected, $t$')
ax.set_title('Number of Symbols Corrected vs. Output BER')
ax.legend(fontsize=12)
if fpath:
fig.savefig(fpath)
if show:
plt.show()
def plot_r_vs_ober(params, show=True, fpath=None):
def plot(axes, t, param, **plotargs):
if param['pb'] is None:
param['pb'] = param['pe']
label = 'BSC pe={pe} m={m} n={n}'.format(**param)
else:
label = 'GBMM pe={pe} pb={pb} m={m} n={n}'.format(**param)
pmf = errpmf(**param)
ober = ber_out(param['pe'], param['pb'], pmf)
if 'label' not in plotargs:
plotargs['label'] = label
n = param['n']
frac_t = 100 * t / n
k = n - 2 * t
r = k / n
axes[0].plot(frac_t, ober[t], **plotargs)
axes[1].plot(r, ober[t], **plotargs)
plt.close('all')
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=plt.figaspect(1/2))
t = np.arange(16)
for param in params:
plot(axes, t, param.copy(), lw=1.5)
for ax in axes:
ax.axhline(1e-15, color='black', linestyle='dashed')
ax.set_ylim(1e-25, 1e-5)
ax.set_ylabel('Output BER, $BER_o$')
ax.set_yscale('log')
ax.grid(True)
axes[0].set_xlim(0, 10)
axes[0].set_xlabel('Fraction of Symbols corrected, $t/n$ [%]')
axes[0].set_title('Fraction of Symbols corrected vs. Output BER')
axes[0].legend(loc='upper right', fontsize=12)
axes[1].set_xlim(0.8, 1.0)
axes[1].set_xlabel('Coding Rate, $R = k/n = (n - 2t)/n$')
axes[1].set_title('Coding Rate vs. Output BER')
axes[1].legend(loc='upper left', fontsize=12)
plt.tight_layout()
if fpath:
fig.savefig(fpath)
if show:
plt.show()
def plot_pe_vs_ober(params, show=True, fpath=None):
def plot(ax, pe, param, **plotargs):
if param['pb'] is None:
label = 'BSC m={m} n={n} t={t}'.format(**param)
else:
label = 'GBMM pb={pb} m={m} n={n} t={t}'.format(**param)
ober = pe_vs_ober(pe, **param)
if 'label' not in plotargs:
plotargs['label'] = label
ax.plot(pe, ober, **plotargs)
plt.close('all')
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=plt.figaspect(1/2))
pe = 10.0 ** np.arange(-15, -0.5, 0.5)
for param in params:
plot(ax, pe, param.copy(), lw=1.5)
ax.axhline(1e-15, color='black', linestyle='dashed')
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim(pe[0], pe[-1])
ax.set_ylim(1e-25, 1e-1)
ax.set_xlabel('Input BER, $BER_i$')
ax.set_ylabel('Output BER, $BER_o$')
ax.set_title('Input vs. Output BER')
ax.legend(loc='upper left', fontsize=12)
ax.grid(True)
if fpath:
fig.savefig(fpath)
if show:
plt.show()
def plot_ebn0_vs_ober(params, show=True, fpath=None):
def plot(ax, ebn0, param, **plotargs):
if param['pb'] is None:
label = 'BSC m={m} n={n} t={t}'.format(**param)
else:
label = 'GBMM pb={pb} m={m} n={n} t={t}'.format(**param)
n = param['n']
t = param['t']
R = (n - 2 * t)/n
esn0 = ebn0 + dB(R)
pe = esn02pe(esn0)
ober = pe_vs_ober(pe, **param)
if 'label' not in plotargs:
plotargs['label'] = label
ax.plot(ebn0, ober, **plotargs)
plt.close('all')
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=plt.figaspect(1/2))
ebn0 = np.arange(5, 20.5, 0.5)
# Uncoded (FEC input) for reference
pe = esn02pe(ebn0)
iber = ber_in(pe=pe, pb=0.5)
ax.plot(ebn0, pe, lw=1.5, color='black', label='Uncoded BSC')
ax.plot(ebn0, iber, lw=1.5, color='black', linestyle='dashed',
label='Uncoded GBMM(pb=0.5)')
for param in params:
plot(ax, ebn0, param.copy(), lw=1.5)
ax.axhline(1e-15, color='black', linestyle='dashed')
ax.set_yscale('log')
ax.set_xlim(ebn0[0], ebn0[-1])
ax.set_xticks(ebn0[::2])
ax.set_ylim(1e-25, 1e-1)
ax.set_xlabel('$E_b/N_0 [dB]$')
ax.set_ylabel('Output BER, $BER_o$')
ax.set_title('Eb/N0 vs. Output BER')
ax.legend(fontsize=10)
ax.grid(True)
if fpath:
fig.savefig(fpath)
if show:
plt.show()
if __name__ == '__main__':
argp = argparse.ArgumentParser(description='Create code performance plots.')
argp.add_argument('dir', metavar='DIR', help='plots directory')
argp.add_argument('--no-show', dest='show', action='store_false',
help='Don\'t show, just save to file.')
argns = argp.parse_args()
dirpath = os.path.abspath(argns.dir)
os.makedirs(dirpath, exist_ok=True)
# pe vs ober
params = [
# GBMM
dict(pb=0.5, m=8, n=124, t=4),
dict(pb=0.5, m=8, n=124, t=6),
dict(pb=0.5, m=8, n=124, t=8),
dict(pb=0.5, m=8, n=248, t=4),
dict(pb=0.5, m=8, n=248, t=6),
dict(pb=0.5, m=8, n=248, t=8),
dict(pb=0.5, m=10, n=528, t=7),
# BSC
dict(p
|
rajrakeshdr/pychess
|
lib/pychess/widgets/ImageMenu.py
|
Python
|
gpl-3.0
| 4,643
| 0.0112
|
from __future__ import print_function
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
class ImageMenu(Gtk.EventBox):
def __init__ (self, image, child):
GObject.GObject.__init__(self)
self.add(image)
self.subwindow = Gtk.Window()
self.subwindow.set_decorated(False)
self.subwindow.set_resizable(False)
self.subwindow.set_type_hint(Gdk.WindowTypeHint.DIALOG)
self.subwindow.add(child)
self.subwindow.connect_after("draw", self.__sub_onExpose)
self.subwindow.connect("button_press_event", self.__sub_onPress)
self.subwindow.connect("motion_notify_event", self.__sub_onMotion)
self.subwindow.connect("leave_notify_event", self.__sub_onMotion)
self.subwindow.connect("delete-event", self.__sub_onDelete)
self.subwindow.connect("focus-out-event", self.__sub_onFocusOut)
child.show_all()
self.setOpen(False)
self.connect("button_press_event", self.__onPress)
def setOpen (self, isopen):
self.isopen = isopen
if isopen:
topwindow = self.get_parent()
while not isinstance(topwindow, Gtk.Window):
topwindow = topwindow.get_parent()
x, y = topwindow.get_window().get_position()
x += self.get_allocation().x + self.get_allocation().width
y += self.get_allocation().y
self.subwindow.move(x, y)
self.subwindow.props.visible = isopen
self.set_state(self.isopen and Gtk.StateType.SELECTED or Gtk.StateType.NORMAL)
def __onPress (self, self_, event):
if event.button == 1 and event.type == Gdk.EventType.BUTTON_PRESS:
self.setOpen(not self.isopen)
def __sub_setGrabbed (self, grabbed):
if grabbed and not Gdk.pointer_is_grabbed():
Gdk.pointer_grab(self.subwindow.get_window(), True,
Gdk.EventMask.LEAVE_NOTIFY_MASK|
Gdk.EventMask.POINTER_MOTION_MASK|
Gdk.EventMask.BUTTON_PRESS_MASK,
None, None, Gdk.CURRENT_TIME)
Gdk.keyboard_grab(self.subwindow.get_window(), True, Gdk.CURRENT_TIME)
elif Gdk.pointer_is_grabbed():
Gdk.pointer_ungrab(Gdk.CURRENT_TIME)
Gdk.keyboard_ungrab(Gdk.CURRENT_TIME)
def __sub_onMotion (self, subwindow, event):
a = subwindow.get_allocation()
self.__sub_setGrabbed(not (0 <= event.x < a.width and 0 <= event.y < a.height))
def __sub_onPress (self, subwindow, event):
a = subwindow.get_allocation()
if not (0 <= event.x < a.width and 0 <= event.y < a.height):
Gdk.pointer_ungrab(event.time)
self.setOpen(False)
def __sub_onExpose (self, subwindow, ctx):
a = subwindow.get_allocation()
context = subwindow.get_window().cairo_create()
context.set_line_width(2)
context.rectangle (a.x, a.y, a.width, a.height)
sc = self.get_style_context()
found, color = sc.lookup_color("p_dark_color")
context.set_source_rgba(*color)
context.stroke()
self.__sub_setGrabbed(self.isopen)
def __sub_onDelete (self, subwindow, event):
self.setOpen(False)
return True
def __sub_onFocusOut (self, subwindow, event):
self.setOpen(False)
def switchWithImage (image, dial
|
og):
parent = image.get_parent()
parent.remove(image)
imageMenu = ImageMenu(image, dialog)
parent.add(imageMenu)
imageMenu.show()
if __name__ == "__main__":
win = Gtk.Window()
vbox = Gtk.VBox()
vbox.add(Gtk.Label(label="Her er d
|
er en kat"))
image = Gtk.Image.new_from_icon_name("gtk-properties", Gtk.IconSize.BUTTON)
vbox.add(image)
vbox.add(Gtk.Label(label="Her er der ikke en kat"))
win.add(vbox)
table = Gtk.Table(2, 2)
table.attach(Gtk.Label(label="Minutes:"), 0, 1, 0, 1)
spin1 = Gtk.SpinButton(Gtk.Adjustment(0,0,100,1))
table.attach(spin1, 1, 2, 0, 1)
table.attach(Gtk.Label(label="Gain:"), 0, 1, 1, 2)
spin2 = Gtk.SpinButton(Gtk.Adjustment(0,0,100,1))
table.attach(spin2, 1, 2, 1, 2)
table.set_border_width(6)
switchWithImage(image, table)
def onValueChanged (spin):
print(spin.get_value())
spin1.connect("value-changed", onValueChanged)
spin2.connect("value-changed", onValueChanged)
win.show_all()
win.connect("delete-event", Gtk.main_quit)
Gtk.main()
|
chrisjsewell/ipypublish
|
ipypublish/sphinx/tests/test_bibgloss.py
|
Python
|
bsd-3-clause
| 3,453
| 0.002317
|
# -*- coding: utf-8 -*-
"""
test_sphinx
~~~~~~~~~~~
General Sphinx test and check output.
"""
import sys
import pytest
import sphinx
from ipypublish.sphinx.tests import get_test_source_dir
from ipypublish.tests.utils import HTML2JSONParser
@pytest.mark.sphinx(buildername="html", srcdir=get_test_source_dir("bibgloss_basic"))
def test_basic(app, status, warning, get_sphinx_app_output, data_regression):
app.build()
assert "build succeeded" in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
assert warnings == ""
output = get_sphin
|
x_app_output(app, buildername="html")
parser = HTML2JSONParser()
parser.feed(output)
if sphinx.version_info >= (2,):
data_regression.check(parser.parsed, basename="test_basic_v2")
else:
data_regression.check(parser.parsed, basename="test_basic_v1")
@pyt
|
est.mark.sphinx(buildername="html", srcdir=get_test_source_dir("bibgloss_sortkeys"))
def test_sortkeys(app, status, warning, get_sphinx_app_output, data_regression):
app.build()
assert "build succeeded" in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
assert warnings == ""
output = get_sphinx_app_output(app, buildername="html")
parser = HTML2JSONParser()
parser.feed(output)
if sphinx.version_info >= (2,):
data_regression.check(parser.parsed, basename="test_sortkeys_v2")
else:
data_regression.check(parser.parsed, basename="test_sortkeys_v1")
@pytest.mark.sphinx(buildername="html", srcdir=get_test_source_dir("bibgloss_unsorted"))
def test_unsorted(app, status, warning, get_sphinx_app_output, data_regression):
app.build()
assert "build succeeded" in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
assert warnings == ""
output = get_sphinx_app_output(app, buildername="html")
parser = HTML2JSONParser()
parser.feed(output)
if sphinx.version_info >= (2,):
data_regression.check(parser.parsed, basename="test_unsorted_v2")
else:
data_regression.check(parser.parsed, basename="test_unsorted_v1")
@pytest.mark.sphinx(
buildername="html", srcdir=get_test_source_dir("bibgloss_missingref")
)
def test_missingref(app, status, warning, get_sphinx_app_output):
app.build()
assert "build succeeded" in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
if (
"could not relabel bibglossary reference [missingkey]" not in warnings
and "WARNING: citation not found: missingkey" not in warnings # sphinx < 2
): # sphinx >= 2
raise AssertionError(
"should raise warning for missing citation `missingkey`: {}".format(
warnings
)
)
@pytest.mark.sphinx(
buildername="html", srcdir=get_test_source_dir("bibgloss_duplicatekey")
)
def test_duplicatekey(app, status, warning, get_sphinx_app_output):
with pytest.raises(KeyError):
app.build()
@pytest.mark.skipif(
sys.version_info < (3, 0),
reason="SyntaxError on import of texsoup/data.py line 135",
)
@pytest.mark.sphinx(buildername="html", srcdir=get_test_source_dir("bibgloss_tex"))
def test_load_tex(app, status, warning, get_sphinx_app_output):
app.build()
assert "build succeeded" in status.getvalue() # Build succeeded
warnings = warning.getvalue().strip()
assert warnings == ""
|
labase/eica
|
tests/testwebfunctionaldb.py
|
Python
|
gpl-2.0
| 5,489
| 0.00293
|
#! /usr/bin/env python
# -*- coding: UTF8 -*-
# Este arquivo é parte do programa Carinhas
# Copyright 2013-2014 Carlo Oliveira <[email protected]>,
# `Labase <http://labase.selfip.org/>`__; `GPL <http://is.gd/3Udt>`__.
#
# Carinhas é um software livre; você pode redistribuí-lo e/ou
# modificá-lo dentro
|
dos termos da Licença Pública Geral GN
|
U como
# publicada pela Fundação do Software Livre (FSF); na versão 2 da
# Licença.
#
# Este programa é distribuído na esperança de que possa ser útil,
# mas SEM NENHUMA GARANTIA; sem uma garantia implícita de ADEQUAÇÃO
# a qualquer MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a
# Licença Pública Geral GNU para maiores detalhes.
#
# Você deve ter recebido uma cópia da Licença Pública Geral GNU
# junto com este programa, se não, escreva para a Fundação do Software
# Livre(FSF) Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
############################################################
SuperPython - Teste de Funcionalidade Web
############################################################
Verifica a funcionalidade do servidor web.
"""
__author__ = 'carlo'
import unittest
import sys
import bottle
import os
import sys
import os
project_server = os.path.dirname(os.path.abspath(__file__))
project_server = os.path.join(project_server, '../src/')
# print(project_server)
sys.path.insert(0, project_server)
# make sure the default templates directory is known to Bottle
templates_dir = os.path.join(project_server, 'server/views/')
# print(templates_dir)
if templates_dir not in bottle.TEMPLATE_PATH:
bottle.TEMPLATE_PATH.insert(0, templates_dir)
if sys.version_info[0] == 2:
from mock import MagicMock, patch
else:
from unittest.mock import MagicMock, patch, ANY
from webtest import TestApp
from server.control import application as appbottle
import server.modelo_redis as cs
import server.control as ct
class FunctionalWebTest(unittest.TestCase):
def setUp(self):
cs.DBF = '/tmp/redis_test.db'
pass
def test_default_page(self):
""" test_default_page """
app = TestApp(appbottle)
response = app.get('/static/index.html')
self.assertEqual('200 OK', response.status)
self.assertTrue('<title>Jogo Eica - Cadastro</title>' in response.text, response.text[:1000])
def test_default_redirect(self):
"""test_default_redirect """
app = TestApp(appbottle)
response = app.get('/')
self.assertEqual('302 Found', response.status)
def test_register(self):
"""test_register """
# app = TestApp(appbottle)
# response = app.get('/static/register?doc_id="10000001"&module=projeto2222')
rec_id, response = self._get_id('3333')
self.assertEqual('200 OK', response.status)
self.assertTrue(rec_id in response, str(response))
# rec_id = str(response).split('ver = main("')[1].split('e0cb4e39e071")')[0] + 'e0cb4e39e071'
expected_record = "{'module': 'projeto2222', 'user': 'projeto2222-lastcodename', 'idade': '00015',"
received_record = cs.DRECORD.get(rec_id)
assert expected_record in str(received_record),\
"{}: {}".format(rec_id, received_record)
def _get_id(self, ref_id='e0cb4e39e071', url='/static/register?doc_id="10000001"&module=projeto2222'):
"""test_store """
app = TestApp(appbottle)
user, idade, ano, sexo = 'projeto2222-lastcodename', '00015', '0009', 'outro'
user_data = dict(doc_id=ref_id, user=user, idade=idade, ano=ano, sexo=sexo)
response = app.get(url, params=user_data)
return str(response).split('ver = main("')[1].split('")')[0], response
def test_store(self):
"""test_store """
app = TestApp(appbottle)
# response = app.get('/static/register?doc_id="10000001"&module=projeto2222')
# rec_id = str(response).split('ver = main("')[1].split('e0cb4e39e071")')[0] + 'e0cb4e39e071'
rec_id, _ = self._get_id()
response = app.post('/record/store', self._pontua(rec_id))
self.assertEqual('200 OK', response.status)
self.assertTrue('", "tempo": "20' in response, str(response))
# self.assertTrue('{"module": "projeto2222", "jogada": [{"carta": "2222",' in str(response), str(response))
expected_record = "{'module': 'projeto2222', 'user': 'projeto2222-lastcodename', 'idade': '00015',"
received_record = str(response)
assert expected_record.replace("'", '"') in received_record,\
"{}: {}".format(rec_id, received_record)
def _pontua(self, ref_id):
ct.LAST = ref_id
jogada = {"doc_id": ref_id,
"carta": 2222,
"casa": 2222,
"move": 2222,
"ponto": 2222,
"tempo": 2222,
"valor": 2222}
return jogada
def test_pontos(self):
rec_id, response = self._get_id()
app = TestApp(appbottle)
app.post('/record/store', self._pontua(rec_id))
ct.LAST = rec_id
response = app.get('/pontos')
self.assertEqual('200 OK', response.status)
self.assertTrue('projeto2222-lastcodename' in response, str(response))
self.assertTrue('<h3>Idade: 10 Genero: outro Ano Escolar: 9</h3>' in response, str(response))
self.assertTrue('<td><span>2222<span></td>' in response, str(response))
if __name__ == '__main__':
unittest.main()
|
leogregianin/pychess
|
lib/pychess/Utils/lutils/perft.py
|
Python
|
gpl-3.0
| 857
| 0
|
from time import time
from pychess.Utils.lutils
|
.lmovegen import genAllMoves
|
from pychess.Utils.lutils.lmove import toLAN
def do_perft(board, depth, root):
nodes = 0
if depth == 0:
return 1
for move in genAllMoves(board):
board.applyMove(move)
if board.opIsChecked():
board.popMove()
continue
count = do_perft(board, depth - 1, root - 1)
nodes += count
board.popMove()
if root > 0:
print("%8s %10d %10d" % (toLAN(board, move), count, nodes))
return nodes
def perft(board, depth, root):
for i in range(depth):
start_time = time()
nodes = do_perft(board, i + 1, root)
ttime = time() - start_time
print("%2d %10d %5.2f %12.2fnps" %
(i + 1, nodes, ttime, nodes / ttime if ttime > 0 else nodes))
|
endlisnis/weather-records
|
testHourly.py
|
Python
|
gpl-3.0
| 159
| 0.018868
|
#!/usr/bin/python
from __future__ import print_function
import weather, time
a = time.time(); weather.hourly.load("ottawa
|
"); print time.time() - a
r
|
aw_input()
|
wkschwartz/django
|
tests/model_indexes/tests.py
|
Python
|
bsd-3-clause
| 13,530
| 0.0017
|
from unittest import mock
from django.conf import settings
from django.db import connection, models
from django.db.models.functions import Lower, Upper
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import isolate_apps
from .models import Book, ChildModel1, ChildModel2
class SimpleIndexesTests(SimpleTestCase):
def test_suffix(self):
self.assertEqual(models.Index.suffix, 'idx')
def test_repr(self):
index = models.Index(fields=['title'])
multi_col_index = models.Index(fields=['title', 'author'])
partial_index = models.Index(fields=['title'], name='long_books_idx', condition=models.Q(pages__gt=400))
covering_index = models.Index(
fields=['title'],
name='include_idx',
include=['author', 'pages'],
)
opclasses_index = models.Index(
fields=['headline', 'body'],
name='opclasses_idx',
opclasses=['varchar_pattern_ops', 'text_pattern_ops'],
)
func_index = models.Index(Lower('title'), name='book_func_idx')
self.assertEqual(repr(index), "<Index: fields='title'>")
self.assertEqual(repr(multi_col_index), "<Index: fields='title, author'>")
self.assertEqual(repr(partial_index), "<Index: fields='title' condition=(AND: ('pages__gt', 400))>")
self.assertEqual(
repr(covering_index),
"<Index: fields='title' include='author, pages'>",
)
self.assertEqual(
repr(opclasses_index),
"<Index: fields='headline, body' "
"opclasses='varchar_pattern_ops, text_pattern_ops'>",
)
self.assertEqual(repr(func_index), "<Index: expressions='Lower(F(title))'>")
def test_eq(self):
index = models.Index(fields=['title'])
same_index = models.Index(fields=['title'])
another_index = models.Index(fields=['title', 'author'])
index.model = Book
same_index.model = Book
another_index.model = Book
self.assertEqual(index, same_index)
self.assertEqual(index, mock.ANY)
self.assertNotEqual(index, another_index)
def test_eq_func(self):
index = models.Index(Lower('title'), models.F('author'), name='book_func_idx')
same_index = models.Index(Lower('title'), 'author', name='book_func_idx')
another_index = models.Index(Lower('title'), name='book_func_idx')
self.assertEqual(index, same_index)
self.assertEqual(index, mock.ANY)
self.assertNotEqual(index, another_index)
def test_index_fields_type(self):
with self.assertRaisesMessage(ValueError, 'Index.fields must be a list or tuple.'):
models.Index(fields='title')
def test_index_fields_strings(self):
msg = 'Index.fields must contain only strings with field names.'
with self.assertRaisesMessage(ValueError, msg):
models.Index(fields=[models.F('title')])
def test_fields_tuple(self):
self.assertEqual(models.Index(fields=('title',)).fields, ['title'])
def test_requires_field_or_expression(self):
msg = 'At least one field or expression is required to define an index.'
with self.assertRaisesMessage(ValueError, msg):
models.Index()
def test_expressions_and_fields_mutually_exclusive(self):
msg = "Index.fields and expressions are mutually exclusive."
with self.assertRaisesMessage(ValueError, msg):
models.Index(Upper('foo'), fields=['field'])
def test_opclasses_requires_index_name(self):
with self.assertRaisesMessage(ValueError, 'An index must be named to use opclasses.'):
models.Index(opclasses=['jsonb_path_ops'])
def test_opclasses_requires_list_or_tuple(self):
with self.assertRaisesMessage(ValueError, 'Index.opclasses must be a list or tuple.'):
models.Index(name='test_opclass', fields=['field'], opclasses='jsonb_path_ops')
def test_opclasses_and_fields_same_length(self):
msg = 'Index.fields and Index.opclasses must have the same number of elements.'
with self.assertRaisesMessage(ValueError, msg):
models.Index(name='test_opclass', fields=['field', 'other'], opclasses=['jsonb_path_ops'])
def test_condition_requires_index_name(self):
with self.assertRaisesMessage(ValueError, 'An index must be named to use condition.'):
models.Index(condition=models.Q(pages__gt=400))
def test_expressions_requires_index_name(self):
msg = 'An index must be named to use expressions.'
with self.assertRaisesMessage(ValueError, msg):
models.Index(Lower('field'))
def test_expressions_with_opclasses(self):
msg = (
'Index.opclasses cannot be used with expressions. Use '
'django.contrib.postgres.indexes.OpClass() instead.'
)
with self.assertRaisesMessage(ValueError, msg):
models.Index(
Lower('field'),
name='test_func_opclass',
opclasses=['jsonb_path_ops'],
)
def test_condition_must_be_q(self):
with self.assertRaisesMessage(ValueError, 'Index.condition must be a Q instance.'):
models.Index(condition='invalid', name='long_book_idx')
def test_include_requires_list_or_tuple(self):
msg = 'Index.include must be a list or tuple.'
with self.assertRaisesMessage(ValueError, msg):
models.Index(name='test_include', fields=['field'], include='other')
def test_include_requires_index_name(self):
msg = 'A covering index must be named.'
with self.assertRaisesMessage(ValueError, msg):
models.Index(fields=['field'], include=['other'])
def test_name_auto_generation(self):
index = models.Index(fields=['author'])
index.set_name_with_model(Book)
self.assertEqual(index.name, 'model_index_author_0f5565_idx')
# '-' for DESC columns should be accounted for in the index name.
index = models.Index(fields=['-author'])
index.set_name_with_model(Book)
self.assertEqual(index.name, 'model_index_author_708765_idx')
# fields may be truncated in the name. db_column is used for naming.
long_field_index = models.Index(fields=['pages'])
long_field_index.set_name_with_model(Book)
self.assertEqual(long_field_index.name, 'model_index_page_co_69235a_idx')
# suffix can't be longer than 3 characters.
long_field_index.suffix = 'suff'
msg = 'Index too long for multiple database support. Is self.suffix longer than 3 characters?'
with self.assertRaisesMessage(AssertionError, msg):
long_field_index.set_name_with_model(
|
Book)
@isolate_apps('model_indexes')
def test_name_auto_generation_with_quoted_db_table(self):
class QuotedDbTable(models.Model):
name = models.CharField(max_length=50)
class Meta:
db_table = '"t_quoted"'
index = models.Index(fields=['name'])
index.set_name_with_model(QuotedDbTable)
self.assertEqual(index.name, 't_quoted_name_e4ed1b_idx')
def test_deconstr
|
uction(self):
index = models.Index(fields=['title'], db_tablespace='idx_tbls')
index.set_name_with_model(Book)
path, args, kwargs = index.deconstruct()
self.assertEqual(path, 'django.db.models.Index')
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{'fields': ['title'], 'name': 'model_index_title_196f42_idx', 'db_tablespace': 'idx_tbls'}
)
def test_deconstruct_with_condition(self):
index = models.Index(
name='big_book_index',
fields=['title'],
condition=models.Q(pages__gt=400),
)
index.set_name_with_model(Book)
path, args, kwargs = index.deconstruct()
self.assertEqual(path, 'django.db.models.Index')
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
'fields': ['title'],
|
stinebuu/nest-simulator
|
doc/userdoc/contribute/templates_styleguides/pynest_api_template.py
|
Python
|
gpl-2.0
| 4,742
| 0.001266
|
# -*- coding: utf-8 -*-
#
# pynest_api_template.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""[[ This template demonstrates how to create a docstring for the PyNEST API.
If you have modified an API, please ensure you update the docstring!
The format is based on `NumPy style docstring
<https://numpydoc.readthedocs.i
|
o/en/latest/format.html>`_ and uses
reStructuredText markup. Please review the syntax rules if you are
unfamiliar with either reStructuredText or NumPy style docstrings.
Copy this file and replace the sample text with a description of the API.
The double bracketed sections [[ ]], which provide explanations, should be
completely removed from your final version - Including this entire
docstring!
]]
"""
def GetConnections(source=None, target=None, synape_model=None, syna
|
pse_label=None):
"""Return a `SynapseCollection` representing the connection identifiers.
[[ In a single 'summary line', state what the function does ]]
[[ All functions should have a docstring with at least a summary line ]]
[[ Below summary line (separated by new line), there should be an extended
summary section that should be used to clarify functionality.]]
Any combination of `source`, `target`, `synapse_model` and
`synapse_label` parameters is permitted.
[[ Deprecation warnings should appear directly after the extended summary.
It should state in what version the object was deprecated, when it will
be removed and what recommend way obtains the same functionality]]
.. deprecated:: 1.6.0
`ndobj_old` will be removed in NumPy 2.0.0, it is replaced by
`ndobj_new` because the latter works also with array subclasses.
[[ For all headings ensure the underline --- is at least the length of the
heading ]]
Parameters
----------
source : NodeCollection, optional
Source node IDs, only connections from these
pre-synaptic neurons are returned
target : NodeCollection, optional
Target node IDs, only connections to these
postsynaptic neurons are returned
synapse_model : str, optional
Only connections with this synapse type are returned
synapse_label : int, optional
(non-negative) only connections with this synapse label are returned
Returns
-------
SynapseCollection:
Object representing the source-node_id, target-node_id, target-thread, synapse-id, port of connections, see
:py:class:`.SynapseCollection` for more.
Raises
-------
TypeError
Notes
-------
Details on the connectivity. [[ Here details regarding the code or further
explanations can be included. This section may include mathematical
equations, written in LaTeX format. You can include references to relevant
papers using the reStructuredText syntax. Do not include model formulas ]]
The discrete-time Fourier time-convolution [1]_ property states that
.. math::
x(n) * y(n) \Leftrightarrow X(e^{j\omega } )Y(e^{j\omega } )
The value of :math:`\omega` is larger than 5.
[[ The See Also section should include 2 or 3 related functions. ]]
See Also
---------
func_a : Function a with its description.
func_b, func_c
References
-----------
[[ Note the format of the reference. No bold nor italics is used. Last name
of author(s) followed by year, title in sentence case and full name of
journal followed by volume and page range. Include the doi if
applicable.]]
.. [1] Bonewald LF. (2011). The amazing osteocyte. Journal of Bone and
Mineral Research 26(2):229–238. DOI: 10.1002/jbmr.320.
"""
# [[ in line comments should be used to explain why this code is here]]
# This code was included because of bug Y when running X
# Temporary, I HOPE HOPE HOPE
if model is not None and syn_spec is not None:
raise kernel.NESTerror(
"'model' is an alias for 'syn_spec' and cannot"
" be used together with 'syn_spec'.")
|
openstack/vitrage
|
vitrage/evaluator/template_validation/content/v2/get_param_validator.py
|
Python
|
apache-2.0
| 1,505
| 0
|
# Copyright 2019 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from vitrage.evaluator.template_functions import function_resolver
from vitra
|
ge.evaluator.template_functions import GET_PARAM
from vitrage.evaluator.template_functions.v2.functions import get_param
from vitrage.evaluator.template_validation.base import get_custom_fault_result
from vitrage.evalu
|
ator.template_validation.base import ValidationError
from vitrage.evaluator.template_validation.content.base import \
get_content_correct_result
class GetParamValidator(object):
@classmethod
def validate(cls, template, actual_params):
try:
function_resolver.validate_function(
function_resolver.FuncInfo(
name=GET_PARAM, func=get_param, error_code=0),
template,
actual_params=actual_params)
except ValidationError as e:
return get_custom_fault_result(e.code, e.details)
return get_content_correct_result()
|
mahabs/nitro
|
nssrc/com/citrix/netscaler/nitro/resource/config/lldp/lldpparam.py
|
Python
|
apache-2.0
| 5,812
| 0.031142
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class lldpparam(base_resource) :
""" Configuration for lldp params resource. """
def __init__(self) :
self._holdtimetxmult = 0
self._timer = 0
self._mode = ""
@property
def holdtimetxmult(self) :
"""A multiplier for calculating the duration for which the receiving device stores the LLDP information in its database before discarding or removing it. The duration is calculated as the holdtimeTxMult (Holdtime Multiplier) parameter value multiplied by the timer (Timer) parameter value.<br/>Default value: 4<br/>Minimum length = 1<br/>Maximum length = 20.
"""
try :
return self._holdtimetxmult
except Exception as e:
raise e
@holdtimetxmult.setter
def holdtimetxmult(self, holdtimetxmult) :
"""A multiplier for calculating the duration for which the receiving device stores the LLDP information in its database before discarding or removing it. The duration is calculated as the holdtimeTxMult (Holdtime Multiplier) parameter value multiplied by the timer (Timer) parameter value.<br/>Default value: 4<br/>Minimum length = 1<br/>Maximum length = 20
"""
try :
self._holdtimetxmult = holdtimetxmult
except Exception as e:
raise e
@property
def timer(self) :
"""Interval, in seconds, between LLDP packet data units (LLDPDUs). that the NetScaler ADC sends to a directly connected device.<br/>Default value: 30<br/>Minimum length = 1<br/>Maximum length = 3000.
"""
try :
return self._timer
except Exception as e:
raise e
@timer.setter
def timer(self, timer) :
"""Interval, in seconds, between LLDP packet data units (LLDPDUs). that the NetScaler ADC sends to a directly connected device.<br/>Default value: 30<br/>Minimum length = 1<br/>Maximum length = 3000
"""
try :
self._timer = timer
except Exception as e:
raise e
@property
def mode(self) :
"""Global mode of Link Layer Discovery Protocol (LLDP) on the NetScaler ADC. The resultant LLDP mode of an interface depends on the LLDP mode configured at the global and the interface levels.<br/>Possible values = NONE, TRANSMITTER, RECEIVER, TRANSCEIVER.
"""
tr
|
y :
return self._mode
except Exception as e:
raise e
@mode.setter
def mode(self, mode) :
"""Global mode of Link Layer Discovery Protocol (LLDP) on the NetScaler ADC. The resultant LLDP mode of an interface depends on the LLDP mode configured at the global and the interface levels.<br/>Possible values = NONE, T
|
RANSMITTER, RECEIVER, TRANSCEIVER
"""
try :
self._mode = mode
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(lldpparam_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lldpparam
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update lldpparam.
"""
try :
if type(resource) is not list :
updateresource = lldpparam()
updateresource.holdtimetxmult = resource.holdtimetxmult
updateresource.timer = resource.timer
updateresource.mode = resource.mode
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
""" Use this API to unset the properties of lldpparam resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = lldpparam()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the lldpparam resources that are configured on netscaler.
"""
try :
if not name :
obj = lldpparam()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class Mode:
NONE = "NONE"
TRANSMITTER = "TRANSMITTER"
RECEIVER = "RECEIVER"
TRANSCEIVER = "TRANSCEIVER"
class lldpparam_response(base_response) :
def __init__(self, length=1) :
self.lldpparam = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lldpparam = [lldpparam() for _ in range(length)]
|
jkaberg/GakkGakk
|
manage.py
|
Python
|
mit
| 1,062
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from flask_script import Manager, Shell, Server
from flask_migrate import MigrateCommand
from gakkgakk.app import create_app
from gakkgakk.models import User
from gakkgakk.settings import DevConfig, ProdConfig
from gakkgakk.database import db
reload(sys)
sys.setdefaultencoding('utf-8')
app = create_app(ProdConfig)
HERE = os.path.abspath(os.path.dirname(__file__))
TEST_PATH = os.path.join(HERE, 'tests')
manager = Manager(app)
def _make_context():
"""Return context dict for a shell session so you can access
app, db, and the User model by default.
"""
return {'app': app, 'db': db, 'User': User}
@manager.command
def test():
"""Run the tests."""
imp
|
ort pytest
exit_code = pytest.main([TEST_PATH, '--verbose'])
return ex
|
it_code
manager.add_command('server', Server(host='0.0.0.0', threaded=True))
manager.add_command('shell', Shell(make_context=_make_context))
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
cghr/cghr-chef-repository
|
cookbooks/trac/files/default/plugins-stock/public_wiki_policy.py
|
Python
|
apache-2.0
| 2,244
| 0.004902
|
from fnmatch import fnmatchcase
from trac.config import Option
from trac.core import *
from trac.perm import IPermissionPolicy
revision = "$Rev: 11490 $"
url = "$URL: https://svn.edgewall.org/repos/trac/tags/trac-1.0.1/sample-plugins/permissions/public_wiki_policy.py $"
class PublicWikiPolicy(Component):
"""Allow public access to some wiki pages.
This is a sample permission policy plugin illustrating how to check
permission on realms.
Don't forget to integrate that plugin in the appropriate place in the
list of permission policies:
{{{
[trac]
permission_policies = PublicWikiPolicy, DefaultPermissionPolicy
}}}
Then you can configure which pages you want to make public:
{{{
[public_wiki]
view = Public*
modify = PublicSandbox/*
}}}
"""
implements(IPermissionPolicy)
view = Option('public_wiki', 'view', 'Public*',
"""Case-sensitive glob pattern used for granting view permission on
all Wiki pages matching it.""")
modify = Option('public_wiki', 'modify', 'Public*',
"""Case-sensitive glob pattern used for granting modify permissions
on all Wiki pages matching it.""")
def check_permission(self, action, username, resource, perm):
if resource: # fine-grained permission check
if resource.
|
realm == 'wiki': # wiki realm or resource
if resource.id: # ... it's a resource
if action == 'WIKI_VIEW': # (think 'VIEW' here)
pattern = self.view
else:
pattern = self.modify
if fnmatchca
|
se(resource.id, pattern):
return True
else: # ... it's a realm
return True
# this policy ''may'' grant permissions on some wiki pages
else: # coarse-grained permission check
#
# support for the legacy permission checks: no resource specified
# and realm information in the action name itself.
#
if action.startswith('WIKI_'):
return True
# this policy ''may'' grant permissions on some wiki pages
|
SteveAbb/Vestigo
|
Vestigo/vestigo.py
|
Python
|
mit
| 405
| 0.081481
|
#!/usr/bin/env python
from settings import Settings
from scan import Scanner
from logger import Logger
def main():
try:
#Read config
|
file
settings=Settings()
#Set up logger
logger=Logger(settings)
#Create sc
|
anner
scanner=Scanner(settings,logger)
#Begin scanning
scanner.StartScanning()
except KeyboardInterrupt:
scanner.StopScanning()
if __name__ == "__main__":
main()
|
andycasey/snob
|
snob/nips_search.py
|
Python
|
mit
| 67,954
| 0.004709
|
"""
An estimator for modelling data from a mixture of Gaussians,
using an objective function based on minimum message length.
"""
__all__ = [
"GaussianMixture",
"kullback_leibler_for_multivariate_normals",
"responsibility_matrix",
"split_component", "merge_component", "delete_component",
]
import logging
import numpy as np
import scipy
import scipy.stats as stats
import scipy.optimize as op
from collections import defaultdict
from sklearn.cluster import k_means_ as kmeans
logger = logging.getLogger(__name__)
def _total_parameters(K, D, covariance_type):
r"""
Return the total number of model parameters :math:`Q`, if a full
covariance matrix structure is assumed.
.. math:
Q = \frac{K}{2}\left[D(D+3) + 2\right] - 1
:param K:
The number of Gaussian mixtures.
:param D:
The dimensionality of the data.
:returns:
The total number of model parameters, :math:`Q`.
"""
return (0.5 * D * (D + 3) * K) + (K - 1)
def _responsibility_matrix(y, mean, covariance, weight, covariance_type):
r"""
Return the responsibility matrix,
.. math::
r_{ij} = \frac{w_{j}f\left(x_i;\theta_j\right)}{\sum_{k=1}^{K}{w_k}f\left(x_i;\theta_k\right)}
where :math:`r_{ij}` denotes the conditional probability of a datum
:math:`x_i` belonging to the :math:`j`-th component. The effective
membership associated with each component is then given by
.. math::
n_j = \sum_{i=1}^{N}r_{ij}
\textrm{and}
\sum_{j=1}^{M}n_{j} = N
where something.
:param y:
The data values, :math:`y`.
:param mu:
The mean values of the :math:`K` multivariate normal distributions.
:param cov:
The covariance matrices of the :math:`K` multivariate normal
distributions. The shape of this array will depend on the
``covariance_type``.
:param weight:
The current estimates of the relative mixing weight.
:param full_output: [optional]
If ``True``, return the responsibility matrix, and the log likelihood,
which is evaluated for free (default: ``False``).
:returns:
The responsibility matrix. If ``full_output=True``, then the
log likelihood (per observation) will also be returned.
"""
precision = _compute_precision_cholesky(covariance, covariance_type)
weighted_log_prob = np.log(weight) + \
_estimate_log_gaussian_prob(y, mean, precision, covariance_type)
log_likelihood = scipy.misc.logsumexp(weighted_log_prob, axis=1)
with np.errstate(under="ignore"):
log_responsibility = weighted_log_prob - log_likelihood[:, np.newaxis]
responsibility = np.exp(log_responsibility).T
return (responsibility, log_likelihood)
class BaseGaussianMixture(object):
r"""
Model data from (potentially) many multivariate Gaussian distributions,
using minimum message length (MML) as the objective function.
:param covariance_type: [optional]
The structure of the covariance matrix for individual components.
The available options are: `full` for a free covariance matrix, or
`diag` for a diagonal covariance matrix (default: ``diag``).
:param covariance_regularization: [optional]
Regularization strength to add to the diagonal of covariance matrices
(default: ``0``).
:param threshold: [optional]
The relative improvement in message length required before stopping an
expectation-maximization step (default: ``1e-5``).
:param max_em_iterations: [optional]
The maximum number of iterations to run per expectation-maximization
loop (default: ``10000``).
"""
parameter_names = ("mean", "covariance", "weight")
def __init__(self, covariance_type="full", covariance_regularization=0,
mixture_probability=1e-3, percent_scatter=1, predict_mixtures=3,
threshold=1e-3, max_em_iterations=10000, **kwargs):
available = ("full", "diag", )
covariance_type = covariance_type.strip().lower()
if covariance_type not in available:
raise ValueError("covariance type '{}' is invalid. "\
"Must be one of: {}".format(
covariance_type, ", ".join(available)))
if 0 > covariance_regularization:
raise ValueError(
"covariance_regu
|
larization must be a non-negative float")
if 0 >= threshold:
raise ValueError("threshold must be a positive value")
if 1 > max_em_iterations:
raise ValueError("max_em_iterations must b
|
e a positive integer")
self._threshold = threshold
self._mixture_probability = mixture_probability
self._percent_scatter = percent_scatter
self._predict_mixtures = predict_mixtures
self._max_em_iterations = max_em_iterations
self._covariance_type = covariance_type
self._covariance_regularization = covariance_regularization
return None
@property
def mean(self):
r""" Return the multivariate means of the Gaussian mixtures. """
return self._mean
@property
def covariance(self):
r""" Return the covariance matrices of the Gaussian mixtures. """
return self._covariance
@property
def weight(self):
r""" Return the relative weights of the Gaussian mixtures. """
return self._weight
@property
def covariance_type(self):
r""" Return the type of covariance stucture assumed. """
return self._covariance_type
@property
def covariance_regularization(self):
r"""
Return the regularization applied to diagonals of covariance matrices.
"""
return self._covariance_regularization
@property
def threshold(self):
r""" Return the threshold improvement required in message length. """
return self._threshold
@property
def max_em_iterations(self):
r""" Return the maximum number of expectation-maximization steps. """
return self._max_em_iterations
def _expectation(self, y, **kwargs):
r"""
Perform the expectation step of the expectation-maximization algorithm.
:param y:
The data values, :math:`y`.
:returns:
A three-length tuple containing the responsibility matrix,
the log likelihood, and the change in message length.
"""
responsibility, log_likelihood = _responsibility_matrix(
y, self.mean, self.covariance, self.weight, self.covariance_type)
ll = np.sum(log_likelihood)
I = _message_length(y, self.mean, self.covariance, self.weight,
responsibility, -ll, self.covariance_type,
**kwargs)
return (responsibility, log_likelihood, I)
def _maximization(self, y, responsibility, parent_responsibility=1,
**kwargs):
r"""
Perform the maximization step of the expectation-maximization
algorithm.
:param y:
The data values, :math:`y`.
# TODO
:param responsibility:
The responsibility matrix for all :math:`N` observations being
partially assigned to each :math:`K` component.
# TODO
"""
K = self.weight.size
N, D = y.shape
# Update the weights.
effective_membership = np.sum(responsibility, axis=1)
weight = (effective_membership + 0.5)/(N + K/2.0)
w_responsibility = parent_responsibility * responsibility
w_effective_membership = np.sum(w_responsibility, axis=1)
mean = np.empty(self.mean.shape)
for k, (R, Nk) in enumerate(zip(w_responsibility, w_effective_membership)):
mean[k] = np.sum(R * y.T, axis=1) / Nk
# TODO: Use parent responsibility when initializing?
covariance = _estimate_covariance_matrix(y, responsibility, mean,
self.covariance_type, self.covariance_regularization)
# TODO: callback?
return self.set_parameter
|
AntoDev96/GuidaSky
|
handlers/callback.py
|
Python
|
gpl-3.0
| 1,351
| 0.005181
|
from constants import constants, callback_name_list
from controller import plan_controller, navigable_list_controller, navigable_inline_keyboard_controller, settings_controller
from telepot.namedtuple import ReplyKeyboardRemove
from bot import bot
from decorators.callback import callback_dict as callback_list
"""
callback_list = {
callback_name_list["setting"]: settings_
|
controller.set_settings,
}
"""
def handle_callback_data(msg, action_prefix):
callback_data = msg['data']
message = msg['message']['text'] if 'text' in msg['message'] else msg['message']['caption']
chat_id = msg['message']['chat']['id']
callback_query_id = msg['id']
inline_message_id = msg['message']["from"]["id"]
message_id = msg['message']['message_id']
for callback in callback_list:
if callback_data.startswith(callback):
answer =
|
callback_list[callback](callback_query_id, callback_data, chat_id=chat_id, message_id=message_id, inline_message_id=inline_message_id)
if answer == None:
action_prefix[chat_id] = " "
else:
action_prefix[chat_id] = answer
break
else:
bot.sendMessage(chat_id, constants["callbackNotFound"], reply_markup=ReplyKeyboardRemove())
action_prefix[chat_id] = " "
bot.answerCallbackQuery(callback_query_id)
|
psychopy/versions
|
psychopy/app/coder/codeEditorBase.py
|
Python
|
gpl-3.0
| 19,323
| 0.001139
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provides class BaseCodeEditor; base class for
CodeEditor class in Coder
and CodeBox class in dlgCode (code component)
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2020 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
import wx
import wx.stc
import sys
from pkg_resources import parse_version
from psychopy.constants import PY3
from psychopy import logging
from psychopy import prefs
from ..themes import ThemeMixin
from psychopy.localization import _translate
class BaseCodeEditor(wx.stc.StyledTextCtrl, ThemeMixin):
"""Provides base class for code editors
See the wxPython demo styledTextCtrl 2.
"""
def __init__(self, parent, ID, pos, size, style):
wx.stc.StyledTextCtrl.__init__(self, parent, ID, pos, size, style)
self.notebook = parent
self.UNSAVED = False
self.filename = ""
self.fileModTime = None # was file modified outside of CodeEditor
self.AUTOCOMPLETE = True
self.autoCompleteDict = {}
self._commentType = {'Py': '#', 'JS': '//', 'Both': '//' or '#'}
# doesn't pause strangely
self.locals = None # will contain the local environment of the script
self.prevWord = None
# remove some annoying stc key commands
CTRL = wx.stc.STC_SCMOD_CTRL
self.CmdKeyClear(ord('['), CTRL)
self.CmdKeyClear(ord(']'), CTRL)
self.CmdKeyClear(ord('/'), CTRL)
self.CmdKeyClear(ord('/'), CTRL | wx.stc.STC_SCMOD_SHIFT)
# 4 means 'tabs are bad'; 1 means 'flag inconsistency'
self.SetMargins(0, 0)
self.SetUseTabs(False)
self.SetTabWidth(4)
self.SetIndent(4)
self.SetBufferedDraw(False)
self.SetEOLMode(wx.stc.STC_EOL_LF)
# setup margins for line numbers
self.SetMarginType(0, wx.stc.STC_MARGIN_NUMBER)
self.SetMarginWidth(0, 40)
# Setup a margin to hold fold markers
self.SetMarginType(1, wx.stc.STC_MARGIN_SYMBOL)
self.SetMarginMask(1, wx.stc.STC_MASK_FOLDERS)
self.SetMarginSensitive(1, True)
self.SetMarginWidth(1, 12)
self.MarkerDefine(wx.stc.STC_MARKNUM_FOLDEROPEN,
wx.stc.STC_MARK_BOXMINUS, "white", "#808080")
self.MarkerDefine(wx.stc.STC_MARKNUM_FOLDER,
wx.stc.STC_MARK_BOXPLUS, "white", "#808080")
self.MarkerDefine(wx.stc.STC_MARKNUM_FOLDERSUB,
wx.stc.STC_MARK_VLINE, "white", "#808080")
self.MarkerDefine(wx.stc.STC_MARKNUM_FOLDERTAIL,
wx.stc.STC_MARK_LCORNER, "white", "#808080")
self.MarkerDefine(wx.stc.STC_MARKNUM_FOLDEREND,
wx.stc.STC_MARK_BOXPLUSCONNECTED, "white", "#808080")
self.MarkerDefine(wx.stc.STC_MARKNUM_FOLDEROPENMID,
wx.stc.STC_MARK_BOXMINUSCONNECTED, "white", "#808080")
self.MarkerDefine(wx.stc.STC_MARKNUM_FOLDERMIDTAIL,
wx.stc.STC_MARK_TCORNER, "white", "#808080")
# Set what kind of events will trigger a modified event
self.SetModEventMask(wx.stc.STC_MOD_DELETETEXT |
wx.stc.STC_MOD_INSERTTEXT)
# Bind context menu
self.Bind(wx.EVT_CONTEXT_MENU, self.OnContextMenu)
def OnContextMenu(self, event):
"""Sets the context menu for components using code editor base class"""
if not hasattr(self, "UndoID"):
# Create a new ID for all items
self.UndoID = wx.NewId()
self.RedoID = wx.NewId()
self.CutID = wx.NewId()
self.CopyID = wx.NewId()
self.PasteID = wx.NewId()
self.DeleteID = wx.NewId()
self.SelectAllID = wx.NewId()
# Bind items to relevant method
self.Bind(wx.EVT_MENU, self.onUndo, id=self.UndoID)
self.Bind(wx.EVT_MENU, self.onRedo, id=self.RedoID)
self.Bind(wx.EVT_MENU, self.onCut, id=self.CutID)
self.Bind(wx.EVT_MENU, self.onCopy, id=self.CopyID)
self.Bind(wx.EVT_MENU, self.onPaste, id=self.PasteID)
self.Bind(wx.EVT_MENU, self.onDelete, id=self.DeleteID)
self.Bind(wx.EVT_MENU, self.onSelectAll, id=self.SelectAllID)
# Create menu and menu items
menu = wx.Menu()
undoItem = wx.MenuItem(menu, self.UndoID, _translate("Undo"))
redoItem = wx.MenuItem(menu, self.RedoID, _translate("Redo"))
cutItem = wx.MenuItem(menu, self.CutID, _translate("Cut"))
copyItem = wx.MenuItem(menu, self.CopyID, _translate("Copy"))
pasteItem = wx.MenuItem(menu, self.PasteID, _translate("Paste"))
deleteItem = wx.MenuItem(menu, self.DeleteID, _translate("Delete"))
selectItem = wx.MenuItem(menu, self.SelectAllID, _translate("Select All"))
# Check whether items should be enabled
undoItem.Enable(self.CanUndo())
redo
|
Item.Enable(self.CanRedo())
cutItem.Enable(self.CanCut())
copyItem.Enable(self.CanCopy())
pasteItem.Enable(self.CanPaste())
deleteItem.Enable(self.CanCopy())
# Append items to menu
menu.Append(undoItem)
menu.Append(redoItem)
menu.AppendSeparator()
menu.Append(cutItem)
me
|
nu.Append(copyItem)
menu.Append(pasteItem)
menu.AppendSeparator()
menu.Append(deleteItem)
menu.Append(selectItem)
self.PopupMenu(menu)
menu.Destroy()
def onUndo(self, event):
"""For context menu Undo"""
foc = self.FindFocus()
if hasattr(foc, 'Undo'):
foc.Undo()
def onRedo(self, event):
"""For context menu Redo"""
foc = self.FindFocus()
if hasattr(foc, 'Redo'):
foc.Redo()
def onCut(self, event):
"""For context menu Cut"""
foc = self.FindFocus()
if hasattr(foc, 'Cut'):
foc.Cut()
def onCopy(self, event):
"""For context menu Copy"""
foc = self.FindFocus()
if hasattr(foc, 'Copy'):
foc.Copy()
def onPaste(self, event):
"""For context menu Paste"""
foc = self.FindFocus()
if hasattr(foc, 'Paste'):
foc.Paste()
def onSelectAll(self, event):
"""For context menu Select All"""
foc = self.FindFocus()
if hasattr(foc, 'SelectAll'):
foc.SelectAll()
def onDelete(self, event):
"""For context menu Delete"""
foc = self.FindFocus()
if hasattr(foc, 'DeleteBack'):
foc.DeleteBack()
def OnKeyPressed(self, event):
pass
def HashtagCounter(self, text, nTags=0):
# Hashtag counter - counts lines beginning with hashtags in selected text
for lines in text.splitlines():
if lines.startswith('#'):
nTags += 1
elif lines.startswith('//'):
nTags += 2
return nTags
def toggleCommentLines(self):
codeType = "Py"
if hasattr(self, "codeType"):
codeType = self.codeType
startText, endText = self._GetPositionsBoundingSelectedLines()
nLines = len(self._GetSelectedLineNumbers())
nHashtags = self.HashtagCounter(self.GetTextRange(startText, endText))
passDec = False # pass decision - only pass if line is blank
# Test decision criteria, and catch devision errors
# when caret starts at line with no text, or at beginning of line...
try:
devCrit, decVal = .6, nHashtags / nLines # Decision criteria and value
except ZeroDivisionError:
if self.LineLength(self.GetCurrentLine()) == 1:
self._ReplaceSelectedLines(self._commentType[codeType])
devCrit, decVal, passDec = 1, 0, True
else:
self.CharRightExtend() # Move caret so line is counted
devCrit, decVal = .6, nHashtags / len(self._GetSelectedLineNumbers())
newText = ''
# Add or remove hashtags/JS comments from sel
|
iZehan/spatial-pbs
|
examples/simplerun.py
|
Python
|
bsd-3-clause
| 7,287
| 0.004117
|
"""
Created on 05/12/13
@author: zw606
simple example
assumes images and labels files are named the same but in different folders
(one folder for images, one folder for labels)
"""
import glob
from os.path import join, basename
from spatch.image import spatialcontext
from spatch.image.mask import get_boundary_mask
from spatch.segmentation.patchbased import SAPS
from spatch.utilities.io import open_image, get_affine, save_3d_labels_data
from spatch.image.spatialcontext import COORDINATES, GDT
INITIAL_SPATIAL_INFO = COORDINATES
REFINEMENT_SPATIAL_INFO = GDT
def get_subject_id(fileName):
nameParts = fileName.split('.')[0].split('_')
return nameParts[0]
def initial_saps_segment(trainingSet, targetFile, imagesPath, labelsPath, patchSize, k, spatialWeight,
spatialInfoType=INITIAL_SPATIAL_INFO, maskData=None, numProcessors=21):
targetImage = open_image(join(imagesPath, targetFile))
# Ensure target subject is not included in atlases
targetId = get_subject_id(targetFile)
trainingSet = [x for x in trainingSet if get_subject_id(x) != targetId]
# initialise the spatial-pbs object
saps = SAPS(imagesPath, labelsPath, patchSize, boundaryDilation=None,
spatialWeight=spatialWeight, minValue=None, maxValue=None,
spatialInfoType=spatialInfoType)
# get results
results = saps.label_image(targetImage, k, trainingSet, queryMaskDict=maskData, numProcessors=numProcessors)
return results
def refinement_saps_segment(trainingSet, targetFile, imagesPath, labelsPath, patchSize, k, spatialWeight,
prevResultsPath, dtLabels, boundaryRefinementSize=2, preDtErosion=None,
spatialInfoType=REFINEMENT_SPATIAL_INFO, numProcessors=21):
targetImage = open_image(join(imagesPath, targetFile))
# Ensure target subject is not included in atlases
targetId = get_subject_id(targetFile)
trainingSet = [x for x in trainingSet if get_subject_id(x) != targetId]
# initialise the spatial-pbs object
saps = SAPS(imagesPath, labelsPath, patchSize, boundaryDilation=boundaryRefinementSize,
spatialWeight=spatialWeight, minValue=None, maxValue=N
|
one,
spatialInfoType=spatialInfoType)
prevResults = open_image(join(prevResultsPath, targetFile))
refinementMask = get_boundary_mask(prevResults, boundaryRefinementSize)
queryMaskDict = {1: refinementMask}
# erosion of labels before calculating spatial context
if preDtErosion is None:
preDtErosion = boundaryRefinementSize
# get spatial context to use from previous resul
|
ts
spatialInfo = spatialcontext.get_dt_spatial_context_dict(prevResults, spatialInfoType=spatialInfoType,
spatialLabels=dtLabels, labelErosion=preDtErosion,
imageData=targetImage).values()
# get results
results = saps.label_image(targetImage, k, trainingSet, queryMaskDict=queryMaskDict, spatialInfo=spatialInfo,
dtLabels=dtLabels, preDtErosion=preDtErosion, numProcessors=numProcessors)
return results
def run_leave_one_out(imagesPath, labelsPath, savePath, patchSize=7, k=15, spatialWeight=400,
prevResultsPath=None, dtLabels=None, preDtErosion=None, refinementSize=2,
numProcessors=8, fileName="*.nii.gz"):
"""
Assumes images are in common template space,
otherwise registration (not performed here) will be required for each target image
"""
files = glob.glob(join(imagesPath, fileName))
print "Number of files found:", len(files)
dataset = [basename(x) for x in files]
if prevResultsPath is not None:
# do refinement
for targetFile in dataset:
trainingSet = [x for x in dataset if x != targetFile]
results = refinement_saps_segment(trainingSet, targetFile, imagesPath, labelsPath,
patchSize, k, spatialWeight,
prevResultsPath, dtLabels, preDtErosion=preDtErosion,
boundaryRefinementSize=refinementSize,
numProcessors=numProcessors)
save_3d_labels_data(results, get_affine(join(imagesPath, targetFile)),
join(savePath, targetFile))
else:
# do initial segmentation
for targetFile in dataset:
trainingSet = [x for x in dataset if x != targetFile]
results = initial_saps_segment(trainingSet, targetFile, imagesPath, labelsPath,
patchSize, k, spatialWeight, numProcessors=numProcessors)
save_3d_labels_data(results, get_affine(join(imagesPath, targetFile)),
join(savePath, targetFile))
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--imagesPath", default=None,
help="Set path to images (specify folder)")
parser.add_argument("--labelsPath", default=None,
help="Set path to labels (specify folder) ")
parser.add_argument("--savePath", default=None,
help="Set path to save results (specify folder)")
parser.add_argument("--prevResultsPath", default=None,
help="Set path to initial results for refinement (specify folder)")
parser.add_argument("--fileName", default="*.nii.gz",
help="Specify which files to work on (takes regex)")
parser.add_argument("--patchSize", type=int, default=7, nargs="+",
help="Set the patch size to use")
parser.add_argument("-k", type=int, default=15,
help="Set number of nearest neighbours to use")
parser.add_argument("--spatialWeight", type=float, default=10,
help="Set path to initial results")
parser.add_argument("--dtLabels", type=int, default=None, nargs="+",
help="Set the labels (structures) to use to provide adaptive spatial context")
parser.add_argument("--preDtErosion", type=int, default=None,
help="Set the erosion of labels data to apply prior to any distance transforms")
parser.add_argument("--refinementSize", type=int, default=2,
help="Set boundary size for refinement (number of dilations-erosions used)")
parser.add_argument("--numProcessors", type=int, default=10,
help="Set number of processors to use")
options = parser.parse_args()
run_leave_one_out(options.imagesPath, options.labelsPath, options.savePath, patchSize=options.patchSize,
k=options.k, prevResultsPath=options.prevResultsPath,
dtLabels=options.dtLabels, preDtErosion=options.preDtErosion,
spatialWeight=options.spatialWeight, numProcessors=options.numProcessors,
fileName=options.fileName, refinementSize=options.refinementSize)
print "Done!"
|
dyn888/youtube-dl
|
youtube_dl/extractor/iprima.py
|
Python
|
unlicense
| 3,713
| 0.00216
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from random import random
from math import floor
from .common import InfoExtractor
from ..utils import (
ExtractorError,
remove_end,
sanitized_Request,
)
class IPrimaIE(InfoExtractor):
_WORKING = False
_VALID_URL = r'https?://play\.iprima\.cz/(?:[^/]+/)*(?P<id>[^?#]+)'
_TESTS = [{
'url': 'http://play.iprima.cz/particka/particka-92',
'info_dict': {
'id': '39152',
'ext': 'flv',
'title': 'Partička (92)',
'description': 'md5:74e9617e51bca67c3ecfb2c6f9766f45',
'thumbnail': 'http://play.iprima.cz/sites/default/files/image_crops/image_620x349/3/491483_particka-92_image_620x349.jpg',
},
'params': {
'skip_download': True, # requires rtmpdump
},
}, {
'url': 'http://play.iprima.cz/particka/tchibo-particka-jarni-moda',
'info_dict': {
'id': '9718337',
'ext': 'flv',
'title': 'Tchibo Partička - Jarní móda',
'thumbnail': 're:^http:.*\.jpg$',
},
'params': {
'skip_download': True, # requires rtmpdump
},
}, {
'url': 'http://play.iprima.cz/zpravy-ftv-prima-2752015',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match
|
(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
if re.search(r'Nemáte oprávnění přistupovat na tuto stránku\.\s*</div>', webpage):
raise ExtractorError(
'%s said: You do not have permission to access this page' % self.IE_NAME, expected=True)
|
player_url = (
'http://embed.livebox.cz/iprimaplay/player-embed-v2.js?__tok%s__=%s' %
(floor(random() * 1073741824), floor(random() * 1073741824))
)
req = sanitized_Request(player_url)
req.add_header('Referer', url)
playerpage = self._download_webpage(req, video_id)
base_url = ''.join(re.findall(r"embed\['stream'\] = '(.+?)'.+'(\?auth=)'.+'(.+?)';", playerpage)[1])
zoneGEO = self._html_search_regex(r'"zoneGEO":(.+?),', webpage, 'zoneGEO')
if zoneGEO != '0':
base_url = base_url.replace('token', 'token_' + zoneGEO)
formats = []
for format_id in ['lq', 'hq', 'hd']:
filename = self._html_search_regex(
r'"%s_id":(.+?),' % format_id, webpage, 'filename')
if filename == 'null':
continue
real_id = self._search_regex(
r'Prima-(?:[0-9]{10}|WEB)-([0-9]+)[-_]',
filename, 'real video id')
if format_id == 'lq':
quality = 0
elif format_id == 'hq':
quality = 1
elif format_id == 'hd':
quality = 2
filename = 'hq/' + filename
formats.append({
'format_id': format_id,
'url': base_url,
'quality': quality,
'play_path': 'mp4:' + filename.replace('"', '')[:-4],
'rtmp_live': True,
'ext': 'flv',
})
self._sort_formats(formats)
return {
'id': real_id,
'title': remove_end(self._og_search_title(webpage), ' | Prima PLAY'),
'thumbnail': self._og_search_thumbnail(webpage),
'formats': formats,
'description': self._search_regex(
r'<p[^>]+itemprop="description"[^>]*>([^<]+)',
webpage, 'description', default=None),
}
|
smorand/dtol
|
src/core/controllers.py
|
Python
|
gpl-3.0
| 4,697
| 0.031935
|
# -*- coding: utf-8 -*-
#
# This file is covered by the GNU Public Licence v3 licence. See http://www.gnu.org/licenses/gpl.txt
#
'''
List of controllers, with indirections to object loaded by Spring
'''
import springpython.context
from django.http import HttpResponse
from django.template import loader, Context
from os import listdir
from os.path import isdir, isfile, sep
from settings import APPLICATION_CONTEXTS, TEMPLATE_DIRS, DEBUG
import logging
LOGGER = logging.getLogger('app')
class CommonController(object):
def __init__(self):
self.prehandler = None
self.posthandler = None
self.urls = []
def _geturls(self):
raise Exception("No URL defined")
def prehandle(self, request):
tpl = None
if isinstance(self.prehandler, list):
for ph in self.prehandler:
if isinstance(ph, PreHandler):
tpl = ph.handle(request)
if tpl != None:
break
elif isinstance(self.prehandler, PreHandler):
tpl = self.prehandler.handle(request)
return tpl
def posthandle(self, request, tpl):
if isinstance(self.posthandler, list):
for ph in self.posthandler:
if isinstance(ph, PostHandler):
ph.handle(request, tpl)
elif isinstance(self.posthandler, PostHandler):
self.posthandler.handle(request, tpl)
class PreHandler(object):
def handle(self, request):
pass
class PostHandler(object):
def handle(self, request, tpl):
pass
# Templates loading
class TemplatesContainer(object):
def __init__(self, tpldir=TEMPLATE_DIRS, prefix=''):
self.__templates = {}
self.__tpldir = tpldir
self.__prefix = prefix
self.__load()
def after_properties_set(self):
pass
def set_app_context(self, context):
pass
def __load(self):
# Load all templates found. Replace directory by _
for fileent in listdir(self.__tpldir):
if isfile(self.__tpldir + sep + fileent):
self.__templates[fileent.replace('.html', '')] = loader.get_template(self.__prefix + fileent)
elif isdir(self.__tpldir + sep + fileent):
self.__templates[fileent] = TemplatesContainer(self.__tpldir + sep + fileent, self.__prefix + fileent + sep)
def __getattr__(self, name):
if DEBUG:
self.__load()
if name not in self.__templates:
LOGGER.error('Internal error: Template %s is missing' % (name))
raise Exception('Internal error: Template %s is missing' % (name))
return self.__templates[name]
def render(self, name,
|
context={}):
name_i = name.split('.', 2)
tpl = self
while type(tpl) == TemplatesContainer:
try:
tpl = tpl.__getattr__(name_i.pop(0))
except:
LOGGER.error('I
|
nternal error: Template %s is missing' % (name))
raise Exception('Internal error: Template %s is missing' % (name))
return tpl.render(Context(context))
def content(self, content):
return HttpResponse(content=content, mimetype="text/html", status=200)
def response(self, name, context={}, status=200, mimetype="text/html"):
return HttpResponse(content=self.render(name, context), mimetype=mimetype, status=status)
def redirect(self, url):
return HttpResponse(content='<html><head><meta http-equiv="refresh" content="0; url=%s"/></head></html>' % url, mimetype="text/html", status=200)
def forbidden(self):
return self.response('forbidden')
def empty(self):
return self.content('')
def error(self, msg):
return self.response('message_return', { 'error':msg })
def underConstruction(self):
return self.response('under_construction')
# Controllers are entry point of the application, so this is the good place to load the application (lazy loading)
ApplicationContext = springpython.context.ApplicationContext(APPLICATION_CONTEXTS)
'''
Declare controller. This first layer has two purposes :
1/ Check security
2/ Call the IoC managed controller method
'''
# Controllers
templates = ApplicationContext.get_object('templatesContainer')
controllersmap = {}
def run_controller(request, *kargs, **kwargs):
kwargsremain = {}
for key, val in kwargs.iteritems():
if key == 'controller':
controller = kwargs['controller']
elif key == 'method':
method = kwargs['method']
elif key == 'right':
right = kwargs['right']
else:
kwargsremain[key] = val
if controller not in controllersmap.keys():
controllersmap[controller] = ApplicationContext.get_object(controller)
controllerObj = controllersmap[controller]
try:
if right is not None and request.session.get(right, default=None) is None:
tpl = templates.forbidden()
else:
tpl = controllerObj.prehandle(request)
if tpl is None:
tpl = getattr(controllerObj, method)(request, *kargs, **kwargsremain)
controllerObj.posthandle(request, tpl)
except Exception as exc:
tpl = templates.error(exc)
return tpl
|
lantianlz/qiexing
|
www/sight/urls.py
|
Python
|
gpl-2.0
| 287
| 0
|
# -*- coding: utf-8 -*-
from dj
|
ango.conf.urls import patterns, url
# from django.conf import settings
urlpatterns = patterns('www.sight.views',
url(r'^$', 'sight_map'),
|
url(r'^(?P<sight_id>\d+)$', 'sight_detail'),
)
|
lino-framework/book
|
lino_book/projects/anna/lib/tickets/models.py
|
Python
|
bsd-2-clause
| 1,862
| 0.001611
|
# -*- coding: UTF-8 -*-
# Copyright 2016-2017 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
from lino_xl.lib.tickets.models import *
from lino.api import _
Ticket.hide_elements('closed')
# class Ticket(Ticket):
# class Meta(Ticket.Meta):
# app_label = 'tickets'
# verbose_name = _("Plea")
# verbose_name_plural = _("Pleas")
# abstract = dd.is_abstract_model(__name__, 'Ticket')
# ActiveTickets._label = _("Active pleas")
# UnassignedTickets._label = _("Unassigned pleas")
# PublicTickets._label = _("Public pleas")
# TicketsToTriage._label = _("Pleas to triage")
# TicketsToTalk._label = _("Pleas to talk")
# # TicketsToDo._label = _("Pleas to to")
# AllTickets._label = _("All pleas")
dd.update_field(
'tickets.Ticket', 'upgrade_notes', verbose_name=_("Solution"))
# dd.update_field(
# 'tickets.Ticket', 'state', default=TicketStates.todo.as_callable)
class TicketDetail(TicketDetail):
main = "general history_tab more"
general = dd.Panel("""
general1:60 votes.VotesByVotable:20 uploads.UploadsByController
description:30 comments.CommentsByRFC:30 skills.DemandsByDemander #working.SessionsByTicket:20
""", label=_("General"))
gener
|
al1 = """
summary:40 id:6 deadline
user:12 end_user:12 #faculty #topic
site workflow_buttons
"""
history_tab = dd.Panel("""
changes.ChangesByMaster:50 #stars.StarsByController:20
""", label=_("History"), required_roles=dd.login_required(Triager))
more = dd.Panel("""
more1:60 #skills.AssignableWorkersByTicket:20
u
|
pgrade_notes LinksByTicket skills.OffersByDemander
""", label=_("More"), required_roles=dd.login_required(Triager))
more1 = """
created modified ticket_type:10
state priority project
# standby feedback closed
"""
Tickets.detail_layout = TicketDetail()
|
Yagniksuchak/CodeParser
|
src/logChunk/dictUtil.py
|
Python
|
bsd-3-clause
| 602
| 0.031561
|
#Key, dictionary[key, int], int --> dictionary[key, int]
#Given a key, d
|
ictionary and increment, set the dictionary value at
#key to dictionary[key] + inc. If there is no old value, set to inc
def incrementDict(dictKey, dictionary, inc=1):
if(dictKey in dictionary):
dic
|
tionary[dictKey] += inc
else:
dictionary[dictKey] = inc
return dictionary
#dictionary[key, int] -> boolean
#Given a dictionary of counts return true if at least one is non zero
#and false otherwise
def nonZeroCount(dictionary):
for k,v in dictionary.iteritems():
assert(v >= 0)
if(v > 0):
return True
return False
|
brython-dev/brython
|
www/src/Lib/test/test_uu.py
|
Python
|
bsd-3-clause
| 8,294
| 0.000844
|
"""
Tests for uu module.
Nick Mathewson
"""
import unittest
from test.support import os_helper
import os
import stat
import sys
import uu
import io
plaintext = b"The symbols on top of your keyboard are !@#$%^&*()_+|~\n"
encodedtext = b"""\
M5&AE('-Y;6)O;',@;VX@=&]P(&]F('EO=7(@:V5Y8F]A<F0@87)E("% (R0E
*7B8J*"E?*WQ^"@ """
# Stolen from io.py
class FakeIO(io.TextIOWrapper):
"""Text I/O implementation using an in-memory buffer.
Can be a used as a drop-in replacement for sys.stdin and sys.stdout.
"""
# XXX This is really slow, but fully functional
def __init__(self, initial_value="", encoding="utf-8",
errors="strict", newline="\n"):
super(FakeIO, self).__init__(io.BytesIO(),
encoding=encoding,
errors=errors,
newline=newline)
self._encoding = encoding
self._errors = errors
if initial_value:
if not isinstance(initial_value, str):
initial_value = str(initial_value)
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
return self.buffer.getvalue().decode(self._encoding, self._errors)
def encodedtextwrapped(mode, filename, backtick=False):
if backtick:
res = (bytes("begin %03o %s\n" % (mode, filename), "ascii") +
encodedtext.replace(b' ', b'`') + b"\n`\nend\n")
else:
res = (bytes("begin %03o %s\n" % (mode, filename), "ascii") +
encodedtext + b"\n \nend\n")
return res
class UUTest(unittest.TestCase):
def test_encode(self):
inp = io.BytesIO(plaintext)
out = io.BytesIO()
uu.encode(inp, out, "t1")
self.assertEqual(out.getvalue(), encodedtextwrapped(0o666, "t1"))
inp = io.BytesIO(plaintext)
out = io.BytesIO()
uu.encode(inp, out, "t1", 0o644)
self.assertEqual(out.getvalue(), encodedtextwrapped(0o644, "t1"))
inp = io.BytesIO(plaintext)
out = io.BytesIO()
uu.encode(inp, out, "t1", backtick=True)
self.assertEqual(out.getvalue(), encodedtextwrapped(0o666, "t1", True))
with self.assertRaises(TypeError):
uu.encode(inp, out, "t1", 0o644, True)
def test_decode(self):
for backtick in True, False:
inp = io.BytesIO(encodedtextwrapped(0o666, "t1", backtick=backtick))
out = io.BytesIO()
uu.decode(inp, out)
self.assertEqual(out.getvalue(), plaintext)
inp = io.BytesIO(
b"UUencoded files may contain many lines,\n" +
b"even some that have 'begin' in them.\n" +
encodedtextwrapped(0o666, "t1", backtick=backtick)
)
out = io.BytesIO()
uu.decode(inp, out)
self.assertEqual(out.getvalue(), plaintext)
def test_truncatedinput(self):
inp = io.BytesIO(b"begin 644 t1\n" + encodedtext)
out = io.BytesIO()
try:
uu.decode(inp, out)
self.fail("No exception raised")
except uu.Error as e:
self.assertEqual(str(e), "Truncated input file")
def test_missingbegin(self):
inp = io.BytesIO(b"")
out = io.BytesIO()
try:
uu.decode(inp, out)
self.fail("No exception raised")
except uu.Error as e:
self.assertEqual(str(e), "No valid begin line found in input file")
def test_garbage_padding(self):
# Issue #22406
encodedtext1 = (
b"begin 644 file\n"
# length 1; bits 001100 111111 111111 111111
b"\x21\x2C\x5F\x5F\x5F\n"
b"\x20\n"
b"end\n"
)
encodedtext2 = (
b"begin 644 file\n"
# length 1; bits 001100 111111 111111 111111
b"\x21\x2C\x5F\x5F\x5F\n"
b"\x60\n"
b"end\n"
)
plaintext = b"\x33" # 00110011
for encodedtext in encodedtext1, encodedtext2:
with self.subTest("uu.decode()"):
inp = io.BytesIO(encodedtext)
out = io.BytesIO()
uu.decode(inp, out, quiet=True)
self.assertEqual(out.getvalue(), plaintext)
with self.subTest("uu_codec"):
import codecs
decoded = codecs.decode(encodedtext, "uu_codec")
self.assertEqual(decoded, plaintext)
def test_newlines_escaped(self):
# Test newlines are escaped with uu.encode
inp = io.BytesIO(plaintext)
out = io.BytesIO()
filename = "test.txt\n\roverflow.txt"
safefilename = b"test.txt\\n\\roverflow.txt"
uu.encode(inp, out, filename)
self.assertIn(safefilename, out.getvalue())
class UUStdIOTest(unittest.TestCase):
def setUp(self):
self.stdin = sys.stdin
self.stdout = sys.stdout
def tearDown(self):
sys.stdin = self.stdin
sys.stdout = self.stdout
def test_encode(self):
sys.stdin = FakeIO(plaintext.decode("ascii"))
sys.stdout = FakeIO()
uu.encode("-", "-", "t1", 0o666)
self.assertEqual(sys.stdout.getvalue(),
encodedtextwrapped(0o666, "t1").decode("ascii"))
def test_decode(self):
sys.stdin = FakeIO(encodedtextwrapped(0o666, "t1").decode("ascii"))
sys.stdout = FakeIO()
uu.decode("-", "-")
stdout = sys.stdout
sys.stdout = self.stdout
sys.stdin = self.stdin
self.assertEqual(stdout.getvalue(), plaintext.decode("ascii"))
class UUFileTest(unittest.TestCase):
def setUp(self):
# uu.encode() supports only ASCII file names
self.tmpin = os_helper.TESTFN_ASCII + "i"
self.tmpout = os_helper.TESTFN_ASCII + "o"
self.addCleanup(os_helper.unlink, self.tmpin)
self.addCleanup(os_helper.unlink, self.tmpout)
def test_encode(self):
with open(self.tmpin, 'wb') as fin:
fin.write(plaintext)
with open(self.tmpin, 'rb') as fin:
with open(self.tmpout, 'wb') as fout:
uu.encode(fin, fout, self.tmpin, mode=0o644)
with open(self.tmpout, 'rb') as fout:
s = fout.read()
self.assertEqual(s, encodedtextwrapped(0o644, se
|
lf.tmpin))
# in_file and out_file as filenames
uu.encode(self.tmpin, self.tmpout, self.tmpin, mode=0o644)
with open(self.tmpout, 'rb') as fout:
s = fout.read()
self.assertEqual(s,
|
encodedtextwrapped(0o644, self.tmpin))
def test_decode(self):
with open(self.tmpin, 'wb') as f:
f.write(encodedtextwrapped(0o644, self.tmpout))
with open(self.tmpin, 'rb') as f:
uu.decode(f)
with open(self.tmpout, 'rb') as f:
s = f.read()
self.assertEqual(s, plaintext)
# XXX is there an xp way to verify the mode?
def test_decode_filename(self):
with open(self.tmpin, 'wb') as f:
f.write(encodedtextwrapped(0o644, self.tmpout))
uu.decode(self.tmpin)
with open(self.tmpout, 'rb') as f:
s = f.read()
self.assertEqual(s, plaintext)
def test_decodetwice(self):
# Verify that decode() will refuse to overwrite an existing file
with open(self.tmpin, 'wb') as f:
f.write(encodedtextwrapped(0o644, self.tmpout))
with open(self.tmpin, 'rb') as f:
uu.decode(f)
with open(self.tmpin, 'rb') as f:
self.assertRaises(uu.Error, uu.decode, f)
def test_decode_mode(self):
# Verify that decode() will set the given mode for the out_file
expected_mode = 0o444
with open(self.tmpin, 'wb') as f:
f.write(encodedtextwrapped(expected_mode, self.tmpout))
# make file writable again, so it can be removed (Windows only)
self.addCleanup(os.chmod, self.tmpout, expected_mode | stat.S_IWRITE)
with open(self.tmpin, 'rb') as f:
uu.decode(f)
self.assertEqual(
stat.S_IMODE(
|
rwightman/pytorch-image-models
|
timm/models/layers/create_attn.py
|
Python
|
apache-2.0
| 3,526
| 0.002269
|
""" Attention Factory
Hacked together by / Copyright 2021 Ross Wightman
"""
import torch
from functools import partial
from .bottleneck_attn import BottleneckAttn
from .cbam import CbamModule, LightCbamModule
from .eca import EcaModule, CecaModule
from .gather_excite import GatherExcite
from .global_context import GlobalContext
from .halo_attn import HaloAttn
from .lambda_layer import LambdaLayer
from .non_local_attn import NonLocalAttn, BatNonLocalAttn
from .selective_kernel import SelectiveKernel
from .split_attn import SplitAttn
from .squeeze_excite import SEModule, EffectiveSEModule
def get_attn(attn_type):
if isinstance(attn_type, torch.nn.Module):
return attn_type
module_cls = None
if attn_type is not None:
if isinstance(attn_type, str):
attn_type = attn_type.lower()
|
# Lightweight attention modules (channel and/or coarse spatial).
# Typically added to existing network arc
|
hitecture blocks in addition to existing convolutions.
if attn_type == 'se':
module_cls = SEModule
elif attn_type == 'ese':
module_cls = EffectiveSEModule
elif attn_type == 'eca':
module_cls = EcaModule
elif attn_type == 'ecam':
module_cls = partial(EcaModule, use_mlp=True)
elif attn_type == 'ceca':
module_cls = CecaModule
elif attn_type == 'ge':
module_cls = GatherExcite
elif attn_type == 'gc':
module_cls = GlobalContext
elif attn_type == 'gca':
module_cls = partial(GlobalContext, fuse_add=True, fuse_scale=False)
elif attn_type == 'cbam':
module_cls = CbamModule
elif attn_type == 'lcbam':
module_cls = LightCbamModule
# Attention / attention-like modules w/ significant params
# Typically replace some of the existing workhorse convs in a network architecture.
# All of these accept a stride argument and can spatially downsample the input.
elif attn_type == 'sk':
module_cls = SelectiveKernel
elif attn_type == 'splat':
module_cls = SplitAttn
# Self-attention / attention-like modules w/ significant compute and/or params
# Typically replace some of the existing workhorse convs in a network architecture.
# All of these accept a stride argument and can spatially downsample the input.
elif attn_type == 'lambda':
return LambdaLayer
elif attn_type == 'bottleneck':
return BottleneckAttn
elif attn_type == 'halo':
return HaloAttn
elif attn_type == 'nl':
module_cls = NonLocalAttn
elif attn_type == 'bat':
module_cls = BatNonLocalAttn
# Woops!
else:
assert False, "Invalid attn module (%s)" % attn_type
elif isinstance(attn_type, bool):
if attn_type:
module_cls = SEModule
else:
module_cls = attn_type
return module_cls
def create_attn(attn_type, channels, **kwargs):
module_cls = get_attn(attn_type)
if module_cls is not None:
# NOTE: it's expected the first (positional) argument of all attention layers is the # input channels
return module_cls(channels, **kwargs)
return None
|
denever/discipline_terra
|
cover/urls.py
|
Python
|
gpl-2.0
| 198
| 0.005051
|
from django.conf.urls import patte
|
rns, include, url
from cover.views import Cov
|
erView
urlpatterns = patterns('cover.views',
url(r'^$', CoverView.as_view(), name='cover'),
)
|
mitsei/dlkit
|
dlkit/abstract_osid/commenting/searches.py
|
Python
|
mit
| 8,401
| 0.001309
|
"""Implementations of commenting abstract base class searches."""
# pylint: disable=invalid-name
# Method names comply with OSID specification.
# pylint: disable=no-init
# Abstract classes do not define __init__.
# pylint: disable=too-few-public-methods
# Some interfaces are specified as 'markers' and include no methods.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
# pylint: disable=too-many-arguments
# Argument signature defined in specification.
# pylint: disable=duplicate-code
# All apparent duplicates have been inspected. They aren't.
import abc
class CommentSearch:
"""The search interface for governing comment searches."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def search_among_comments(self, comment_ids):
"""Execute this search among the given list of comments.
:param comment_ids: list of comments
:type comment_ids: ``osid.id.IdList``
:raise: ``NullArgument`` -- ``comment_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def order_comment_results(self, comment_search_order):
"""Specify an ordering to the search results.
:param comment_search_order: comment search order
:type comment_search_order: ``osid.commenting.CommentSearchOrder``
:raise: ``NullArgument`` -- ``comment_search_order`` is ``null``
:raise: ``Unsupported`` -- ``comment_search_order`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def get_comment_search_record(self, comment_search_record_type):
"""Gets the comment search record corresponding to the given comment search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
:param comment_search_record_type: a comment search record type
:type comment_search_record_type: ``osid.type.Type``
:return: the comment search record
:rtype: ``osid.commenting.records.CommentSearchRecord``
:raise: ``NullArgument`` -- ``comment_search_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(comment_search_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.commenting.records.CommentSearchRecord
class CommentSearchResults:
"""This interface provides a means to capture results of a search."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_comments(self):
"""Gets the comment list resulting from a search.
:return: the comment list
:rtype: ``osid.commenting.CommentList``
:raise: ``IllegalState`` -- list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.commenting.CommentList
comments = property(fget=get_comments)
@abc.abstractmethod
def get_comment_query_inspector(self):
"""Gets the inspector for the query to examine the terns used in the search.
:return: the query inspector
:rtype: ``osid.commenting.CommentQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.commenting.CommentQueryInspector
comment_query_inspector = prope
|
rty(fget=get_comment_query_inspector)
@abc.abstractmethod
def
|
get_comment_search_results_record(self, comment_search_record_type):
"""Gets the comment search results record corresponding to the given comment search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
:param comment_search_record_type: a comment search record type
:type comment_search_record_type: ``osid.type.Type``
:return: the comment search results record
:rtype: ``osid.commenting.records.CommentSearchResultsRecord``
:raise: ``NullArgument`` -- ``comment_search_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(comment_search_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.commenting.records.CommentSearchResultsRecord
class BookSearch:
"""The search interface for governing book searches."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def search_among_books(self, book_ids):
"""Execute this search among the given list of books.
:param book_ids: list of books
:type book_ids: ``osid.id.IdList``
:raise: ``NullArgument`` -- ``book_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def order_book_results(self, book_search_order):
"""Specify an ordering to the search results.
:param book_search_order: book search order
:type book_search_order: ``osid.commenting.BookSearchOrder``
:raise: ``NullArgument`` -- ``book_search_order`` is ``null``
:raise: ``Unsupported`` -- ``book_search_order`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def get_book_search_record(self, book_search_record_type):
"""Gets the book search record corresponding to the given book search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
:param book_search_record_type: a book search record type
:type book_search_record_type: ``osid.type.Type``
:return: the book search record
:rtype: ``osid.commenting.records.BookSearchRecord``
:raise: ``NullArgument`` -- ``book_search_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(book_search_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.commenting.records.BookSearchRecord
class BookSearchResults:
"""This interface provides a means to capture results of a search."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_books(self):
"""Gets the book list resulting from a search.
:return: the book list
:rtype: ``osid.commenting.BookList``
:raise: ``IllegalState`` -- list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.commenting.BookList
books = property(fget=get_books)
@abc.abstractmethod
def get_book_query_inspector(self):
"""Gets the inspector for the query to examine the terns used in the search.
:return: the query inspector
:rtype: ``osid.commenting.BookQueryInspector``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.commenting.BookQueryInspector
book_query_inspector = property(fget=get_book_query_inspector)
@abc.abstractmethod
def get_book_search_results_record(self, book_search_record_type):
"""Gets the book search results record corresponding to the given book search record Type.
This method is used to retrieve an object implementing the
requested record.
:param book_search_record_type: a book search record type
:type book_search_record_type: ``osid.type.Type``
:return: the book search results record
:rtype: ``osid.commenting.records.BookSearchResultsRecord``
:raise: ``NullArgument`` -- ``BookSearchRecordType`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Un
|
sanacl/GrimoireELK
|
grimoire/elk/github.py
|
Python
|
gpl-3.0
| 12,563
| 0.002468
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Github to Elastic class helper
#
# Copyright (C) 2015 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Alvaro del Castillo San Felix <[email protected]>
#
import json
import logging
import re
from datetime import datetime
from dateutil import parser
from .utils import get_time_diff_days
from grimoire.elk.enrich import Enrich, metadata
GITHUB = 'https://github.com/'
class GitHubEnrich(Enrich):
roles = ['assignee_data', 'user_data']
def __init__(self, db_sortinghat=None, db_projects_map=None, json_projects_map=None,
db_user='', db_password='', db_host=''):
super().__init__(db_sortinghat, db_projects_map, json_projects_map,
db_user, db_password, db_host)
self.users = {} # cache users
self.location = {} # cache users location
self.location_not_found = [] # location not found in map api
def set_elastic(self, elastic):
self.elastic = elastic
# Recover cache data from Elastic
self.geolocations = self.geo_locations_from_es()
def get_field_author(self):
return "user_data"
def get_fields_uuid(self):
return ["assignee_uuid", "user_uuid"]
def get_identities(self, item):
""" Return the identities from an item """
identities = []
item = item['data']
for identity in ['user', 'assignee']:
if item[identity]:
# In user_data we have the full user data
user = self.get_sh_identity(item[identity+"_data"])
if user:
identities.append(user)
return identities
def get_sh_identity(self, item, identity_field=None):
identity = {}
user = item # by default a specific user dict is expected
if 'data' in item and type(item) == dict:
user = item['data'][identity_field]
if not user:
return identity
identity['username'] = user['login']
identity['email'] = None
identity['name'] = None
if 'email' in user:
identity['email'] = user['email']
if 'name' in user:
identity['name'] = user[
|
'name']
return identity
def get_geo_point(self, location):
geo_point = geo_code = None
if location is None:
return geo_point
if location in self.geolocations:
geo_location = self.geolocations[location]
geo
|
_point = {
"lat": geo_location['lat'],
"lon": geo_location['lon']
}
elif location in self.location_not_found:
# Don't call the API.
pass
else:
url = 'https://maps.googleapis.com/maps/api/geocode/json'
params = {'sensor': 'false', 'address': location}
r = self.requests.get(url, params=params)
try:
logging.debug("Using Maps API to find %s" % (location))
r_json = r.json()
geo_code = r_json['results'][0]['geometry']['location']
except:
if location not in self.location_not_found:
logging.debug("Can't find geocode for " + location)
self.location_not_found.append(location)
if geo_code:
geo_point = {
"lat": geo_code['lat'],
"lon": geo_code['lng']
}
self.geolocations[location] = geo_point
return geo_point
def get_github_cache(self, kind, _key):
""" Get cache data for items of _type using _key as the cache dict key """
cache = {}
res_size = 100 # best size?
_from = 0
index_github = "github/" + kind
url = self.elastic.url + "/"+index_github
url += "/_search" + "?" + "size=%i" % res_size
r = self.requests.get(url)
type_items = r.json()
if 'hits' not in type_items:
logging.info("No github %s data in ES" % (kind))
else:
while len(type_items['hits']['hits']) > 0:
for hit in type_items['hits']['hits']:
item = hit['_source']
cache[item[_key]] = item
_from += res_size
r = self.requests.get(url+"&from=%i" % _from)
type_items = r.json()
if 'hits' not in type_items:
break
return cache
def geo_locations_from_es(self):
return self.get_github_cache("geolocations", "location")
def geo_locations_to_es(self):
max_items = self.elastic.max_items_bulk
current = 0
bulk_json = ""
url = self.elastic.url + "/github/geolocations/_bulk"
logging.debug("Adding geoloc to %s (in %i packs)" % (url, max_items))
for loc in self.geolocations:
if current >= max_items:
self.requests.put(url, data=bulk_json)
bulk_json = ""
current = 0
geopoint = self.geolocations[loc]
location = geopoint.copy()
location["location"] = loc
# First upload the raw issue data to ES
data_json = json.dumps(location)
# Don't include in URL non ascii codes
safe_loc = str(loc.encode('ascii', 'ignore'),'ascii')
geo_id = str("%s-%s-%s" % (location["lat"], location["lon"],
safe_loc))
bulk_json += '{"index" : {"_id" : "%s" } }\n' % (geo_id)
bulk_json += data_json +"\n" # Bulk document
current += 1
self.requests.put(url, data = bulk_json)
logging.debug("Adding geoloc to ES Done")
def get_elastic_mappings(self):
""" geopoints type is not created in dynamic mapping """
mapping = """
{
"properties": {
"assignee_geolocation": {
"type": "geo_point"
},
"user_geolocation": {
"type": "geo_point"
},
"title_analyzed": {
"type": "string",
"index":"analyzed"
}
}
}
"""
return {"items":mapping}
def get_field_unique_id(self):
return "ocean-unique-id"
def get_project_repository(self, eitem):
repo = eitem['origin']
return repo
@metadata
def get_rich_item(self, item):
rich_issue = {}
# metadata fields to copy
copy_fields = ["metadata__updated_on","metadata__timestamp","ocean-unique-id","origin"]
for f in copy_fields:
if f in item:
rich_issue[f] = item[f]
else:
rich_issue[f] = None
# The real data
issue = item['data']
rich_issue['time_to_close_days'] = \
get_time_diff_days(issue['created_at'], issue['closed_at'])
if issue['state'] != 'closed':
rich_issue['time_open_days'] = \
get_time_diff_days(issue['created_at'], datetime.utcnow())
else:
rich_issue['time_open_days'] = rich_issue['time_to_close_days']
rich_issue['user_login'] = issue['user']['login']
user = issue['user_data']
if user is not None:
rich_issue['user_name'] = user['name']
rich_issue[
|
datamade/yournextmp-popit
|
candidates/management/commands/candidates_parties_with_multiple_emblems.py
|
Python
|
agpl-3.0
| 834
| 0
|
from django.core.management.base import BaseCommand
from candidates.models import OrganizationExtra
class Command(BaseCommand):
|
def handle(self, *args, **options):
for party_extra in OrganizationExtra.objects \
.filter(base__classification='Party') \
.select_related('base') \
.prefetch_related('images'):
images = list(party_extra.images.all())
if len(images) < 2:
continue
print "====================================================="
party = party_extra.base
print len
|
(images), party_extra.slug, party.name.encode('utf-8')
for image in images:
print ' --'
print ' ' + image.source.encode('utf-8')
print ' ' + image.image.url
|
lrq3000/pyFileFixity
|
pyFileFixity/lib/gooey/gui/windows/runtime_display_panel.py
|
Python
|
mit
| 1,831
| 0.009285
|
'''
Created on Dec 23, 2013
@author: Chris
'''
import sys
import wx
from gooey.gui.lang import i18n
from gooey.gui.message_event import EVT_MSG
class MessagePump(object):
def __init__(self):
# self.queue = queue
self.stdout = sys.stdout
# Overrides stdout's write method
def write(self, text):
raise NotImplementedError
class RuntimeDisplay(wx.Panel):
def __init__(self, parent, build_spec, **kwargs):
wx.Panel.__init__(self, parent, **kwargs)
self.build_spec = build_spec
self._init_properties()
self._init_components()
self._do_layout()
# self._HookStdout()
def _init_properties(self):
self.SetBackgroundColour('#F0F0F0')
def _init_components(self):
self.text = wx.StaticText(self, label=i18n._("status"))
self.cmd_textbox = wx.TextCtrl(
self, -1, "",
style=wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_RICH)
if self.build_spec.get('monospace_display'):
pointsize = self.cmd_textbox.GetFont().GetPointSize()
font = wx.Font(pointsize, wx.FONTFAMILY_MODERN,
wx.FONTWEIGHT_NORMAL, wx.FONTWEIGHT_BOLD, False)
self.cmd_textbox.SetFont(font)
def _do_layout(self):
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.AddSpacer(10)
sizer.Add(self.text, 0, wx.LEFT, 30)
sizer.AddSpacer(10)
sizer.Add(self.cmd_textbox, 1, wx.LEFT | wx.RIGHT
|
| wx.BOTTOM | wx.EXPAND, 30)
sizer.AddSpacer(20)
self.SetSizer(sizer)
self.Bind(EVT_MSG, self.OnMsg)
def _HookStdout(self):
_stdout = sys.stdout
_stdout_write = _stdout.write
sys.stdout = MessagePump()
sys.stdout.write = self.WriteToDisplayBox
def AppendText(self, txt):
self.cmd_textbox.AppendText(txt)
def WriteToDisplayBox(self, txt):
if txt is not '':
self.AppendText(txt)
def OnMsg
|
(self, evt):
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.