repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
rushter/MLAlgorithms
|
mla/pca.py
|
Python
|
mit
| 1,758
| 0.000569
|
# coding:utf-8
import logging
import numpy as np
from scipy.linalg import svd
from mla.base import BaseEstimator
np.random.seed(1000)
class PCA(BaseEstimator):
y_required = False
def __init__(self, n_components, solver="svd"):
"""Principal component analysis (PCA) implementation.
Transforms a dataset of possibly correlated values into n linearly
uncorrelated components. The components are ordered such that the first
has the largest possible variance and each following component as the
largest possible variance given the previous components. This causes
the early components to contain most of the variability in the dataset.
Parameters
----------
n_components : int
solver : str, default 'svd'
{'svd', 'eigen'}
"""
self.solver = solver
self.n_components = n_components
self.components = None
self.mean = None
def fit(self, X, y=None):
self.mean = np.mean(X, axis=0)
self._decompose(X)
def _decompose(self, X):
# Mean centering
X = X.copy()
X -= self.mean
if self.solver == "svd":
_, s, Vh = svd(X, full_matrices=True)
eli
|
f self.solver == "eigen":
s, Vh = np.linalg.eig(np.cov(X.T))
Vh = Vh.T
s_squared = s ** 2
variance_ratio
|
= s_squared / s_squared.sum()
logging.info("Explained variance ratio: %s" % (variance_ratio[0: self.n_components]))
self.components = Vh[0: self.n_components]
def transform(self, X):
X = X.copy()
X -= self.mean
return np.dot(X, self.components.T)
def _predict(self, X=None):
return self.transform(X)
|
kimgea/simple_twitter_functions
|
twitter_functions/twitter/whitelisted_users.py
|
Python
|
mit
| 119
| 0.016807
|
"
|
""
Add user scren name to whitelist if it is not to be unfollowed
"""
whitelist = [
|
]
|
didzis/CAMR
|
graphstate.py
|
Python
|
gpl-2.0
| 68,252
| 0.013406
|
#!/usr/bin/python
# parsing state representing a subgraph
# initialized with dependency graph
#
from __future__ import absolute_import
import copy,sys,re
import cPickle
from parser import *
from common.util import *
from constants import *
from common.SpanGraph import SpanGraph
from common.AMRGraph import *
import numpy as np
class ActionError(Exception):
pass
class ActionTable(dict):
'''to do'''
def add_action(self,action_name):
key = len(self.keys())+1
self[key] = action_name
class GraphState(object):
"""
Starting from dependency graph, each state represents subgraph in parsing process
Indexed by current node being handled
"""
sent = None
#abt_tokens = None
deptree = None
action_table = None
#new_actions = None
sentID = 0
gold_graph = None
model = None
verbose = None
def __init__(self,sigma,A):
self.sigma = sigma
self.idx = self.sigma.top()
self.cidx = None
self.beta = None
#self.beta = Buffer(A.nodes[self.idx].children[:]) if self.idx != -1 else None
#self.cidx = self.beta.top()
#if self.beta:
# self.cidx = self.beta.top()
#else:
# self.cidx = None
self.A = A
self.action_history = []
#self.left_label_set = set([])
#self._init_atomics()
@staticmethod
def init_state(instance,verbose=0):
depGraph = SpanGraph.init_dep_graph(instance,instance.tokens)
#depGraph.pre_merge_netag(instance)
seq = []
#if instance.sentID == 104:
# import pdb
# pdb.set_trace()
for r in sorted(depGraph.multi_roots,reverse=True): seq += depGraph.postorder(root=r)
#seq = uniqify(seq)
seq.append(-1)
sigma = Buffer(seq)
sigma.push(START_ID)
GraphState.text = instance.text
GraphState.sent = instance.tokens
#GraphState.abt_tokens = {}
GraphState.gold_graph = instance.gold_graph
if GraphState.gold_graph: GraphState.gold_graph.abt_node_table = {}
GraphState.deptree = depGraph
GraphState.sentID = instance.comment['id'] if instance.comment and 'id' in instance.comment else GraphState.sentID + 1
GraphState.verbose = verbose
if verbose > 1:
print >> sys.stderr,"Sentence ID:%s, initial sigma:%s" % (GraphState.sentID,sigma)
return GraphState(sigma,copy.deepcopy(depGraph))
@staticmethod
def init_action_table(actions):
actionTable = ActionTable()
for act_type,act_str in actions:
actionTable[act_type] = act_str
#GraphState.new_actions = set()
GraphState.action_table = actionTable
def _init_atomics(self):
"""
atomics features for the initial state
"""
# first parent of current node
sp1 = GraphState.sent[self.A.nodes[self.idx].parents[0]] if self.A.nodes[self.idx].parents else NOT_ASSIGNED
# immediate left sibling, immediate right sibling and second right sibling
if sp1 != NOT_ASSIGNED and len(self.A.nodes[sp1['id']].children) > 1:
children = self.A.nodes[sp1['id']].children
idx_order = sorted(children).index(self.idx)
slsb = GraphState.sent[children[idx_order-1]] if idx_order > 0 else NOT_ASSIGNED
srsb = GraphState.
|
sent[children[idx_order+1]] if idx_order < len(children)-1 else NOT_ASSIGNED
sr2sb = GraphState.sent[children[idx_order+2]] if idx_order < len(children)-2 else NOT_ASSIGNED
else:
slsb = EMPTY
srsb = EMPTY
sr2sb = EMPTY
'''
# left first parent of current node
slp1 = GraphState.sent[self.A.nodes[self.idx].parents[0]] if self.A.nodes[self.idx].parents and self.A.nodes[self.
|
idx].parents[0] < self.idx else NOT_ASSIGNED
# right last child of current child
brc1 = GraphState.sent[self.deptree.nodes[self.cidx].children[-1]] if self.cidx and self.A.nodes[self.cidx].children and self.A.nodes[self.cidx].children[-1] > self.cidx else NOT_ASSIGNED
# left first parent of current child
blp1 = GraphState.sent[self.A.nodes[self.cidx].parents[0]] if self.cidx and self.A.nodes[self.cidx].parents and self.A.nodes[self.cidx].parents[0] < self.cidx else NOT_ASSIGNED
'''
self.atomics = [{'id':tok['id'],
'form':tok['form'],
'lemma':tok['lemma'],
'pos':tok['pos'],
'ne':tok['ne'],
'rel':tok['rel'] if 'rel' in tok else EMPTY,
'sp1':sp1,
'slsb':slsb,
'srsb':srsb,
'sr2sb':sr2sb
}
for tok in GraphState.sent] # atomic features for current state
def pcopy(self):
return cPickle.loads(cPickle.dumps(self,-1))
def is_terminal(self):
"""done traverse the graph"""
return self.idx == -1
def is_permissible(self,action):
#TODO
return True
def is_possible_align(self,currentIdx,goldIdx,ref_graph):
'''
tmp_state = self.pcopy()
oracle = __import__("oracle").DetOracleSC()
next_action,label = oracle.give_ref_action(tmp_state,ref_graph)
while tmp_state.beta:
next_action['edge_label'] = label
tmp_state = tmp_state.apply(next_action)
next_action,label = oracle.give_ref_action(tmp_state,ref_graph)
'''
#ref_children = [ref_graph.abt_node_table[c] if c in ref_graph.abt_node_table else c for c in ref_graph.nodes[goldIdx].children]
#return len(set(self.A.nodes[currentIdx].children) & set(ref_children)) > 1 or self.A.nodes[currentIdx].words[0][0].lower() == goldIdx
if self.A.nodes[currentIdx].words[0].lower() in prep_list:
return False
return True
def get_current_argset(self):
if self.idx == START_ID:
return set([])
currentIdx = self.idx
currentNode = self.get_current_node()
currentGraph = self.A
# record the core arguments current node(predicate) have
return set(currentGraph.get_edge_label(currentIdx,c) for c in currentNode.children if currentGraph.get_edge_label(currentIdx,c).startswith('ARG'))
def get_possible_actions(self,train):
if self.idx == START_ID:
return [{'type':NEXT2}]
actions = []
currentIdx = self.idx
currentChildIdx = self.cidx
currentNode = self.get_current_node()
currentChild = self.get_current_child()
currentGraph = self.A
token_label_set = GraphState.model.token_label_set
token_to_concept_table = GraphState.model.token_to_concept_table
tag_codebook = GraphState.model.tag_codebook
if isinstance(currentIdx,int):
current_tok_lemma = ','.join(tok['lemma'] for tok in GraphState.sent if tok['id'] in range(currentNode.start,currentNode.end))
current_tok_form = ','.join(tok['form'] for tok in GraphState.sent if tok['id'] in range(currentNode.start,currentNode.end))
current_tok_ne = GraphState.sent[currentIdx]['ne']
else:
current_tok_form = ABT_TOKEN['form']
current_tok_lemma = ABT_TOKEN['lemma'] #if currentIdx != START_ID else START_TOKEN['lemma']
current_tok_ne = ABT_TOKEN['ne'] #if currentIdx != START_ID else START_TOKEN['ne']
#if self.action_history and self.action_history[-1]['type'] in [REPLACEHEAD,NEXT2,DELETENODE] and currentNode.num_parent_infer_in_chain < 3 and currentNode.num_parent_infer == 0:
#actions.extend([{'type':INFER,'tag':z} for z in tag_codebook['ABTTag'].labels()])
if currentChildIdx: # beta not empty
#all_candidate_edge_labels = GraphState.model.rel_codebook.labels()
#if current_tok_lemma in token_label_set:
# all_candida
|
nagyistoce/netzob
|
src/netzob/Common/Models/L3NetworkMessage.py
|
Python
|
gpl-3.0
| 4,102
| 0.008787
|
# -*- coding: utf-8 -*-
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011 Georges Bossert and Frédéric Guihéry |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or
|
|
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT
|
ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : [email protected] |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Standard library imports
#+---------------------------------------------------------------------------+
import logging
#+---------------------------------------------------------------------------+
#| Local application imports
#+---------------------------------------------------------------------------+
from netzob.Common.Type.Format import Format
from netzob.Common.Models.L2NetworkMessage import L2NetworkMessage
from netzob.Common.Models.Factories.L3NetworkMessageFactory import L3NetworkMessageFactory
from netzob.Common.Property import Property
## Remarques :
# - Peut-être moins clair de parler de Layer 3 source Adress que IP Adress...
class L3NetworkMessage(L2NetworkMessage):
"""Definition of a layer 3 network message"""
def __init__(self, id, timestamp, data, l2Protocol, l2SourceAddress,
l2DestinationAddress, l3Protocol, l3SourceAddress,
l3DestinationAddress, pattern=[]):
if len(pattern) == 1:
pattern.insert(0, str(l3DestinationAddress))
super(L3NetworkMessage, self).__init__(id, timestamp, data, l2Protocol,
l2SourceAddress, l2DestinationAddress, pattern=[])
self.type = "L3Network"
self.l3Protocol = str(l3Protocol)
self.l3SourceAddress = str(l3SourceAddress)
self.l3DestinationAddress = str(l3DestinationAddress)
def getFactory(self):
return L3NetworkMessageFactory
def getL3Protocol(self):
return self.l3Protocol
def getL3SourceAddress(self):
return self.l3SourceAddress
def getL3DestinationAddress(self):
return self.l3DestinationAddress
def getProperties(self):
properties = super(L3NetworkMessage, self).getProperties()
properties.append(Property('Layer 3 Protocol', Format.STRING, self.getL3Protocol()))
properties.append(Property('Layer 3 Source Address', Format.IP, self.getL3SourceAddress()))
properties.append(Property('Layer 3 Destination Address', Format.IP, self.getL3DestinationAddress()))
return properties
|
christopherjbly/calendar-indicator
|
src/calendarindicator.py
|
Python
|
gpl-3.0
| 17,542
| 0.039813
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
__author__='atareao'
__date__ ='$25/04/2011'
#
# Remember-me
# An indicator for Google Calendar
#
# Copyright (C) 2011 Lorenzo Carbonell
# [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
#
import os
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import GLib
from gi.repository import AppIndicator3 as appindicator
from gi.repository import Gtk
from gi.repository import GdkPixbuf
from gi.repository import Notify
import urllib
import time
import dbus
import locale
import gettext
import datetime
import webbrowser
from calendardialog import CalendarDialog
from calendarwindow import CalendarWindow
from addcalendarwindow import AddCalendarWindow
from eventwindow import EventWindow
from googlecalendarapi import GoogleCalendar
#
import comun
from configurator import Configuration
from preferences_dialog import Preferences
#
locale.setlocale(locale.LC_ALL, '')
gettext.bindtextdomain(comun.APP, comun.LANGDIR)
gettext.textdomain(comun.APP)
_ = gettext.gettext
def wait(time_lapse):
time_start = time.time()
time_end = (time_start + time_lapse)
while time_end > time.time():
while Gtk.events_pending():
Gtk.main_iteration()
def short_msg(msg,length=50):
if len(msg)>length:
return msg[:length]
return msg
def internet_on():
try:
response=urllib.request.urlopen('http://google.com',timeout=1)
return True
except Exception as e:
print(e)
return False
def check_events(event1,event2):
return event1['id'] == event2['id']
def is_event_in_events(an_event,events):
for event in events:
if check_events(an_event,event):
return True
return False
def add2menu(menu, text = None, icon = None, conector_event = None, conector_action = None):
if text != None:
menu_item = Gtk.ImageMenuItem.new_with_label(text)
if icon:
image = Gtk.Image.new_from_stock(icon, Gtk.IconSize.MENU)
menu_item.set_image(image)
menu_item.set_always_show_image(True)
else:
if icon == None:
menu_item = Gtk.SeparatorMenuItem()
else:
menu_item = Gtk.ImageMenuItem.new_from_stock(icon, None)
menu_item.set_always_show_image(True)
if conector_event != None and conector_action != None:
menu_item.connect(conector_event,conector_action)
menu_item.show()
menu.append(menu_item)
return menu_item
class EventMenuItem(Gtk.MenuItem):
def __init__(self,label):
Gtk.MenuItem.__init__(self,label)
self.event = None
def get_event(self):
return self.event
def set_event(self,event):
self.event = event
if 'summary' in event.keys():
self.set_label(event.get_start_date_string()+' - '+short_msg(event['summary']))
else:
self.set_label(event.get_start_date_string())
class CalendarIndicator():
def __init__(self):
if dbus.SessionBus().request_name("es.atareao.calendar-indicator") != dbus.bus.REQUEST_NAME_REPLY_PRIMARY_OWNER:
print("application already running")
exit(0)
self.indicator = appindicator.Indicator.new('Calendar-Indicator', 'Calendar-Indicator', appindicator.IndicatorCategory.APPLICATION_STATUS)
self.notification = Notify.Notification.new('','', None)
self.googlecalendar = GoogleCalendar(token_file = comun.TOKEN_FILE)
error = True
while(error):
if self.googlecalendar.do_refresh_authorization() is None:
p = Preferences()
if p.run() == Gtk.ResponseType.ACCEPT:
p.save_preferences()
p.destroy()
self.googlecalendar = GoogleCalendar(token_file = comun.TOKEN_FILE)
if (not os.path.exists(comun.TOKEN_FILE)) or (self.googlecalendar.do_refresh_authorization() is None):
md = Gtk.MessageDialog( parent = None,
|
flags = Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
type = Gtk.MessageType.ERROR,
buttons = Gtk.ButtonsType.OK_CANCEL,
message_format = _('You have to authorize Calendar-Indicato
|
r to manage your Google Calendar.\n Do you want to authorize?'))
if md.run() == Gtk.ResponseType.CANCEL:
exit(3)
md.destroy()
else:
self.googlecalendar = GoogleCalendar(token_file = comun.TOKEN_FILE)
if self.googlecalendar.do_refresh_authorization() is None:
error = False
else:
error = False
self.load_preferences()
#
self.events = []
self.create_menu()
self.sync()
self.update_menu()
self.actualization_time = time.time()
GLib.timeout_add_seconds(60, self.work)
def sync(self):
self.googlecalendar.read()
def load_preferences(self):
configuration = Configuration()
self.time = configuration.get('time')
self.theme = configuration.get('theme')
self.calendars = configuration.get('calendars')
self.visible_calendars = []
for calendar in self.calendars:
if calendar['visible']:
self.visible_calendars.append(calendar['id'])
def work(self):
self.update_menu(check=True)
if (time.time()-self.actualization_time) > self.time*3600:
if internet_on():
self.sync()
self.actualization_time = time.time()
return True
def create_menu(self):
self.menu = Gtk.Menu()
self.menu_events = []
for i in range(10):
menu_event = EventMenuItem('%s'%i)
menu_event.show()
menu_event.set_visible(False)
menu_event.connect('activate',self.on_menu_event_activate)
self.menu.append(menu_event)
self.menu_events.append(menu_event)
add2menu(self.menu)
self.menu_add_new_calendar = add2menu(self.menu, text = _('Add new calendar'), conector_event = 'activate',conector_action = self.on_menu_add_new_calendar)
self.menu_add_new_event = add2menu(self.menu, text = _('Add new event'), conector_event = 'activate',conector_action = self.on_menu_add_new_event)
add2menu(self.menu)
self.menu_refresh = add2menu(self.menu, text = _('Sync with google calendar'), conector_event = 'activate',conector_action = self.on_menu_refresh)
self.menu_show_calendar = add2menu(self.menu, text = _('Show Calendar'), conector_event = 'activate',conector_action = self.menu_show_calendar_response)
self.menu_preferences = add2menu(self.menu, text = _('Preferences'), conector_event = 'activate',conector_action = self.menu_preferences_response)
add2menu(self.menu)
menu_help = add2menu(self.menu, text =_('Help'))
menu_help.set_submenu(self.get_help_menu())
add2menu(self.menu)
add2menu(self.menu, text = _('Exit'), conector_event = 'activate',conector_action = self.menu_exit_response)
self.menu.show()
self.indicator.set_menu(self.menu)
def set_menu_sensitive(self,sensitive = False):
self.menu_add_new_calendar.set_sensitive(sensitive)
self.menu_add_new_event.set_sensitive(sensitive)
self.menu_refresh.set_sensitive(sensitive)
self.menu_show_calendar.set_sensitive(sensitive)
self.menu_preferences.set_sensitive(sensitive)
self.menu_about.set_sensitive(sensitive)
def update_menu(self,check=False):
#
now = datetime.datetime.now()
normal_icon = os.path.join(comun.ICONDIR,'%s-%s-normal.svg'%(now.day,self.theme))
starred_icon = os.path.join(comun.ICONDIR,'%s-%s-starred.svg'%(now.day,self.theme))
#
self.indicator.set_icon(normal_icon)
self.indicator.set_attention_icon(starred_icon)
#
events2 = self.googlecalendar.getNextTenEvents(self.visible_calendars)
if check and len(self.events)>0:
for event in events2:
if not is_event_in_events(event,self.events):
msg = _('New event:')+'\n'
if 'summary' in event.keys:
msg += event.get_start_date_string() + ' - '+ event['summary']
else:
msg += event.get_start_date_string()
self.notification.update('Calendar Indicator',msg,comun.ICON_NEW_EVENT)
self.notification.show()
for event in
|
trezor/trezor-crypto
|
tests/test_wycheproof.py
|
Python
|
mit
| 21,271
| 0.000658
|
#!/usr/bin/env python
import ctypes
import json
import os
from binascii import hexlify, unhexlify
import pytest
from pyasn1.codec.ber.decoder import decode as ber_decode
from pyasn1.codec.der.decoder import decode as der_decode
from pyasn1.codec.der.encoder import encode as der_encode
from pyasn1.type import namedtype, univ
class EcSignature(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType("r", univ.Integer()),
namedtype.NamedType("s", univ.Integer()),
)
class EcKeyInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType("key_type", univ.ObjectIdentifier()),
namedtype.NamedType("curve_name", univ.ObjectIdentifier()),
)
class EcPublicKey(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType("key_info", EcKeyInfo()),
namedtype.NamedType("public_key", univ.BitString()),
)
class EdKeyInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType("key_type", univ.ObjectIdentifier())
)
class EdPublicKey(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType("key_info", EdKeyInfo()),
namedtype.NamedType("public_key", univ.BitString()),
)
class ParseError(Exception):
pass
class NotSupported(Exception):
pass
class DataError(Exception):
pass
class curve_info(ctypes.Structure):
_fields_ = [("bip32_name", ctypes.c_char_p), ("params", ctypes.c_void_p)]
def keys_in_dict(dictionary, keys):
return keys <= set(dictionary.keys())
def parse_eddsa_signature(signature):
if len(signature) != 64:
raise ParseError("Not a valid EdDSA signature")
return signature
def parse_ecdh256_privkey(private_key):
if private_key < 0 or private_key.bit_length() > 256:
raise ParseError("Not a valid 256 bit ECDH private key")
return private_key.to_bytes(32, byteorder="big")
def parse_signed_hex(string):
if len(string) % 2 == 1:
string = "0" + string
number = int(string, 16)
if int(string[0], 16) & 8:
return -number
else:
return number
def parse_result(result):
if result == "valid":
return True
elif result == "invalid":
return False
elif result == "acceptable":
return None
else:
raise DataError()
def is_valid_der(data):
try:
structure, _ = der_decode(data)
return data == der_encode(structure)
except Exception:
return False
def parse_ed_pubkey(public_key):
try:
public_key, _ = ber_decode(public_key, asn1Spec=EdPublicKey())
except Exception:
raise ParseError("Not a BER encoded Edwards curve public key")
if not public_key["key_info"]["key_type"] == univ.ObjectIdentifier("1.3.101.112"):
raise ParseError("Not a BER encoded Edwards curve public key")
public_key = bytes(public_key["public_key"].asOctets())
return public_key
def parse_ec_pubkey(public_key):
try:
public_key, _ = ber_decode(public_key, asn1Spec=EcPublicKey())
except Exception:
raise ParseError("Not a BER encoded named elliptic curve public key")
if not public_key["key_info"]["key_type"] == univ.ObjectIdentifier(
"1.2.840.10045.2.1"
):
raise ParseError("Not a BER encoded named elliptic curve public key")
curve_identifier = public_key["key_info"]["curve_name"]
curve_name = get_curve_name_by_identifier(curve_identifier)
if curve_name is None:
raise NotSupported(
"Unsupported named elliptic curve: {}".format(curve_identifier)
)
try:
public_key = bytes(public_key["public_key"].asOctets())
except Exception:
raise ParseError("Not a BER encoded named elliptic curve public key")
return curve_name, public_key
def parse_ecdsa256_signature(signature):
s = signature
if not is_valid_der(signature):
raise ParseError("Not a valid DER")
try:
signature, _ = der_decode(signature, asn1Spec=EcSignature())
except Exception
|
:
raise ParseError("Not a valid DER encoded ECDSA signature")
try:
r = int(signature["r"]).to_bytes(32, byteorder="big")
s = int(signature["s"]).to_bytes(32, byteorder="big")
signature = r + s
except Exception:
raise ParseError("Not a valid DER encoded 256 bit ECDSA signature")
return si
|
gnature
def parse_digest(name):
if name == "SHA-256":
return 0
else:
raise NotSupported("Unsupported hash function: {}".format(name))
def get_curve_by_name(name):
lib.get_curve_by_name.restype = ctypes.c_void_p
curve = lib.get_curve_by_name(bytes(name, "ascii"))
if curve is None:
return None
curve = ctypes.cast(curve, ctypes.POINTER(curve_info))
return ctypes.c_void_p(curve.contents.params)
def parse_curve_name(name):
if name == "secp256r1":
return "nist256p1"
elif name == "secp256k1":
return "secp256k1"
elif name == "curve25519":
return "curve25519"
else:
return None
def get_curve_name_by_identifier(identifier):
if identifier == univ.ObjectIdentifier("1.3.132.0.10"):
return "secp256k1"
elif identifier == univ.ObjectIdentifier("1.2.840.10045.3.1.7"):
return "nist256p1"
else:
return None
def chacha_poly_encrypt(key, iv, associated_data, plaintext):
context = bytes(context_structure_length)
tag = bytes(16)
ciphertext = bytes(len(plaintext))
lib.rfc7539_init(context, key, iv)
lib.rfc7539_auth(context, associated_data, len(associated_data))
lib.chacha20poly1305_encrypt(context, plaintext, ciphertext, len(plaintext))
lib.rfc7539_finish(context, len(associated_data), len(plaintext), tag)
return ciphertext, tag
def chacha_poly_decrypt(key, iv, associated_data, ciphertext, tag):
context = bytes(context_structure_length)
computed_tag = bytes(16)
plaintext = bytes(len(ciphertext))
lib.rfc7539_init(context, key, iv)
lib.rfc7539_auth(context, associated_data, len(associated_data))
lib.chacha20poly1305_decrypt(context, ciphertext, plaintext, len(ciphertext))
lib.rfc7539_finish(context, len(associated_data), len(ciphertext), computed_tag)
return plaintext if tag == computed_tag else False
def add_pkcs_padding(data):
padding_length = 16 - len(data) % 16
return data + bytes([padding_length] * padding_length)
def remove_pkcs_padding(data):
padding_length = data[-1]
if not (
0 < padding_length <= 16
and data[-padding_length:] == bytes([padding_length] * padding_length)
):
return False
else:
return data[:-padding_length]
def aes_encrypt_initialise(key, context):
if len(key) == (128 / 8):
lib.aes_encrypt_key128(key, context)
elif len(key) == (192 / 8):
lib.aes_encrypt_key192(key, context)
elif len(key) == (256 / 8):
lib.aes_encrypt_key256(key, context)
else:
raise NotSupported("Unsupported key length: {}".format(len(key) * 8))
def aes_cbc_encrypt(key, iv, plaintext):
plaintext = add_pkcs_padding(plaintext)
context = bytes(context_structure_length)
ciphertext = bytes(len(plaintext))
aes_encrypt_initialise(key, context)
lib.aes_cbc_encrypt(
plaintext, ciphertext, len(plaintext), bytes(bytearray(iv)), context
)
return ciphertext
def aes_decrypt_initialise(key, context):
if len(key) == (128 / 8):
lib.aes_decrypt_key128(key, context)
elif len(key) == (192 / 8):
lib.aes_decrypt_key192(key, context)
elif len(key) == (256 / 8):
lib.aes_decrypt_key256(key, context)
else:
raise NotSupported("Unsupported AES key length: {}".format(len(key) * 8))
def aes_cbc_decrypt(key, iv, ciphertext):
context = bytes(context_structure_length)
plaintext = bytes(len(ciphertext))
aes_decrypt_initialise(key, context)
lib.aes_cbc_decrypt(ciphertext, plaintext, len(ciphertext), iv, context)
return remove_pkcs_padding(plaintext)
def load_json_testvectors(filename):
try:
result = json.
|
vlfedotov/django-business-logic
|
tests/rest/test_reference.py
|
Python
|
mit
| 4,996
| 0.001401
|
# -*- coding: utf-8 -*-
from .common import *
class ReferenceDescriptorTest(TestCase):
def setUp(self):
self.reference_descriptor = ReferenceDescriptor.objects.create(
content_type=ContentType.objects.get_for_model(Model))
self.client = JSONClient()
def test_reference_descriptor_list(self):
url = reverse('business-logic:rest:reference-descriptor-list')
response = self.client.get(url)
self.assertEqual(200, response.status_code)
_json = response_json(response)
self.assertIsInstance(_json, list)
self.assertEqual(1, len(_json))
descriptor = _json[0]
model = 'test_app.Model'
self.assertEqual(model, descriptor['name'])
self.assertEqual('Test Model', descriptor['verbose_name'])
self.assertEqual(reverse('business-log
|
ic:rest:reference-list', kwargs=dict(model=model)), descriptor['url'])
def test_unregistered_reference_list_not_found(self):
model = 'business_logic.ReferenceDescriptor'
url = reverse('business-logic:rest:reference-list', kwargs=dict(model=model))
response = self.client.get(url)
self.assertEqual(404, response.status_code)
def test_notexists_model_n
|
ot_found(self):
for model in ('ooo.XXX', 'password'):
url = reverse('business-logic:rest:reference-list', kwargs=dict(model=model))
response = self.client.get(url)
self.assertEqual(404, response.status_code)
class ReferenceListTest(TestCase):
def setUp(self):
self.reference_descriptor = ReferenceDescriptor.objects.create(
content_type=ContentType.objects.get_for_model(Model))
self.client = JSONClient()
model = 'test_app.Model'
self.url = reverse('business-logic:rest:reference-list', kwargs=dict(model=model))
self.test_models = []
for i in range(11):
self.test_models.append(Model.objects.create(string_value='str_{}'.format(str(i) * 3)))
def test_reference_list(self):
response = self.client.get(self.url)
self.assertEqual(200, response.status_code)
_json = response_json(response)
self.assertIsInstance(_json, dict)
self.assertEqual(11, len(_json['results']))
reference = _json['results'][0]
self.assertEqual(self.test_models[0].id, reference['id'])
self.assertEqual(str(self.test_models[0]), reference['name'])
def test_reference_list_search_not_configured(self):
response = self.client.get(self.url, dict(search='111'))
self.assertEqual(400, response.status_code)
_json = response_json(response)
self.assertEqual(
['ReferenceDescriptor for `test_app.Model` are not configured: incorrect `search_fields` field'], _json)
def test_reference_list_search(self):
self.reference_descriptor.search_fields = 'string_value'
self.reference_descriptor.save()
response = self.client.get(self.url, dict(search='111'))
_json = response_json(response)
self.assertEqual(1, len(_json['results']))
def test_reference_list_search_related_fields(self):
self.reference_descriptor.search_fields = 'foreign_value__string_value'
self.reference_descriptor.save()
test_model = self.test_models[2]
test_related_model = RelatedModel.objects.create(string_value='xxx')
test_model.foreign_value = test_related_model
test_model.save()
response = self.client.get(self.url, dict(search='xxx'))
_json = response_json(response)
self.assertEqual(1, len(_json['results']))
reference = _json['results'][0]
self.assertEqual(test_model.id, reference['id'])
class ReferenceViewTest(TestCase):
def setUp(self):
self.reference_descriptor = ReferenceDescriptor.objects.create(
content_type=ContentType.objects.get_for_model(Model))
self.client = JSONClient()
model = 'test_app.Model'
self.test_model = Model.objects.create(string_value='str_value')
self.url = reverse('business-logic:rest:reference', kwargs=dict(model=model, pk=self.test_model.id))
def test_reference_view(self):
response = self.client.get(self.url)
self.assertEqual(200, response.status_code)
_json = response_json(response)
self.assertIsInstance(_json, dict)
self.assertEqual(self.test_model.id, _json['id'])
self.assertEqual(str(self.test_model), _json['name'])
def test_reference_view_name_field(self):
self.reference_descriptor.name_field = 'string_value'
self.reference_descriptor.save()
response = self.client.get(self.url)
self.assertEqual(200, response.status_code)
_json = response_json(response)
self.assertIsInstance(_json, dict)
self.assertEqual(self.test_model.id, _json['id'])
self.assertEqual(self.test_model.string_value, _json['name'])
|
MeeseeksBox/MeeseeksDev
|
meeseeksdev/commands.py
|
Python
|
mit
| 6,864
| 0.002477
|
"""
Define a few commands
"""
from .meeseeksbox.utils import Session, fix_issue_body, fix_comment_body
from .meeseeksbox.scopes import admin, write, everyone
from textwrap import dedent
def _format_doc(function, name):
if not function.__doc__:
doc = " "
else:
doc = function.__doc__.splitlines()
first, other = doc[0], "\n".join(doc[1:])
return "`@meeseeksdev {} {}` ({}) \n{} ".format(name, first, function.scope, other)
def help_make(commands):
data = "\n".join([_format_doc(v, k) for k, v in commands.items()])
@everyone
def help(*, session, payload, arguments):
comment_url = payload["issue"]["comments_url"]
session.post_comment(
comment_url,
dedent(
"""The following commands are available:\n\n{}
""".format(
data
)
),
)
return help
@write
def close(*, session, payload, arguments, local_config=None):
session.ghrequest("PATCH", payload["issue"]["url"], json={"state": "closed"})
@write
def open(*, session, payload, arguments, local_config=None):
session.ghrequest("PATCH", payload["issue"]["url"], json={"state": "open"})
@write
def migrate_issue_request(
*, session: Session, payload: dict, arguments: str, local_config=None
):
"""[to] {org}/{repo}
Need to be admin on target repo. Replicate all comments on target repo and close current on.
"""
"""Todo:
- Works through pagination of comments
- Works through pagination of labels
Link to non-migrated labels.
"""
if arguments.startswith("to "):
arguments = arguments[3:]
org_repo = arguments
org, repo = arguments.split("/")
target_session = yield org_repo
if not target_session:
session.post_comment(
payload["issue"]["comments_url"],
body="I'm afraid I can't do that. Maybe I need to be installed on target repository ?\n"
"Click [here](https://github.com/integrations/meeseeksdev/installations/new) to do that.".format(
botname="meeseeksdev"
),
)
return
issue_title = payload["issue"]["title"]
issue_body = payload["issue"]["body"]
original_org = payload["organization"]["login"]
original_repo = payload["repository"]["name"]
original_poster = payload["issue"]["user"]["login"]
original_number = payload["issue"]["number"]
migration_requester = payload["comment"]["user"]["login"]
request_id = payload["comment"]["id"]
original_labels = [l["name"] for l in payload["issue"]["labels"]]
if original_labels:
available_labels = target_session.ghrequest(
"GET",
"https://api.github.com/repos/{org}/{r
|
epo}/labels".format(
org=org, repo=repo
),
None,
).json()
available_labels = [l["name"] for l in available_labels]
migrate_
|
labels = [l for l in original_labels if l in available_labels]
not_set_labels = [l for l in original_labels if l not in available_labels]
new_response = target_session.create_issue(
org,
repo,
issue_title,
fix_issue_body(
issue_body,
original_poster,
original_repo,
original_org,
original_number,
migration_requester,
),
labels=migrate_labels,
)
new_issue = new_response.json()
new_comment_url = new_issue["comments_url"]
original_comments = session.ghrequest(
"GET", payload["issue"]["comments_url"], None
).json()
for comment in original_comments:
if comment["id"] == request_id:
continue
body = comment["body"]
op = comment["user"]["login"]
url = comment["html_url"]
target_session.post_comment(
new_comment_url,
body=fix_comment_body(body, op, url, original_org, original_repo),
)
if not_set_labels:
body = "I was not able to apply the following label(s): %s " % ",".join(
not_set_labels
)
target_session.post_comment(new_comment_url, body=body)
session.post_comment(
payload["issue"]["comments_url"],
body="Done as {}/{}#{}.".format(org, repo, new_issue["number"]),
)
session.ghrequest("PATCH", payload["issue"]["url"], json={"state": "closed"})
from .meeseeksbox.scopes import pr_author, write
from .meeseeksbox.commands import tag, untag
@pr_author
@write
def ready(*, session, payload, arguments, local_config=None):
"""{no arguments}
Remove "waiting for author" tag, adds "need review" tag. Can also be issued
if you are the current PR author even if you are not admin.
"""
tag(session, payload, "need review")
untag(session, payload, "waiting for author")
@write
def merge(*, session, payload, arguments, method="merge", local_config=None):
print("===== merging =====")
if arguments:
if arguments not in {"merge", "squash", "rebase"}:
print("don't know how to merge with methods", arguments)
return
else:
method = arguments
prnumber = payload["issue"]["number"]
org_name = payload["repository"]["owner"]["login"]
repo_name = payload["repository"]["name"]
# collect extended payload on the PR
print("== Collecting data on Pull-request...")
r = session.ghrequest(
"GET",
"https://api.github.com/repos/{}/{}/pulls/{}".format(
org_name, repo_name, prnumber
),
json=None,
)
pr_data = r.json()
head_sha = pr_data["head"]["sha"]
mergeable = pr_data["mergeable"]
repo_name = pr_data["head"]["repo"]["name"]
if mergeable:
resp = session.ghrequest(
"PUT",
"https://api.github.com/repos/{}/{}/pulls/{}/merge".format(
org_name, repo_name, prnumber
),
json={"sha": head_sha, "merge_method": method},
override_accept_header="application/vnd.github.polaris-preview+json",
)
print("------------")
print(resp.json())
print("------------")
resp.raise_for_status()
else:
print("Not mergeable", pr_data["mergeable"])
###
# Lock and Unlock are not yet available for integration.
###
# def _lock_primitive(meth,*, session, payload, arguments):
# number = payload['issue']['number']
# org_name = payload['repository']['owner']['login']
# repo_name = payload['repository']['name']
# session.ghrequest('PUT', 'https://api.github.com/repos/{}/{}/issues/{}/lock'.format(org_name, repo_name, number))
#
# @admin
# def lock(**kwargs):
# _lock_primitive('PUT', **kwargs)
#
# @admin
# def unlock(**kwargs):
# _lock_primitive('DELETE', **kwargs)
|
atphalix/eviltoys
|
tools/myscripts/mdl_export.py
|
Python
|
gpl-2.0
| 38,523
| 0.052384
|
#!BPY
"""
Name: 'MDL (.mdl)'
Blender: 244
Group: 'Export'
Tooltip: 'Export to Quake file format (.mdl).'
"""
__author__ = 'Andrew Denner'
__version__ = '0.1.3'
__url__ = ["Andrew's site, http://www.btinternet.com/~chapterhonour/",
"Can also be contacted through http://celephais.net/board", "blender", "elysiun"]
__email__ = ["Andrew Denner, andrew.denner:btinternet*com", "scripts"]
__bpydoc__ = """\
This script Exports a Quake 1 file (MDL).
Based wholesale off the MD2 export by Bob Holcomb, with the help of David Henry's MDL format guide
"""
# ***** BEGIN GPL LICENSE BLOCK *****
#
# Script copyright (C): Andrew Denner(portions Bob Holcomb)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
# --------------------------------------------------------------------------
import Blender
from Blender import *
from Blender.Draw import *
from Blender.BGL import *
from Blender.Window import *
import struct, string
from types import *
from math import *
######################################################
# GUI Loader
######################################################
|
# Export globals
g_filename=Create("default.mdl")
g_frame_filename=Create("default")
g_filename_search=Create("")
g_frame_search=Create("default")
user_frame_list=[]
|
#Globals
g_scale=Create(1.0)
g_fixuvs=Create(0)
g_flags=Create(0)
# Events
EVENT_NOEVENT=1
EVENT_SAVE_MDL=2
EVENT_CHOOSE_FILENAME=3
EVENT_CHOOSE_FRAME=4
EVENT_EXIT=100
######################################################
# Callbacks for Window functions
######################################################
def filename_callback(input_filename):
global g_filename
g_filename.val=input_filename
def frame_callback(input_frame):
global g_frame_filename
g_frame_filename.val=input_frame
def draw_gui():
global g_scale
global g_fixuvs
global g_flags
global g_filename
global g_frame_filename
global EVENT_NOEVENT,EVENT_SAVE_MDL,EVENT_CHOOSE_FILENAME,EVENT_CHOOSE_FRAME,EVENT_EXIT
########## Titles
glClear(GL_COLOR_BUFFER_BIT)
glRasterPos2d(10, 140)
Text("MDL Export")
######### Parameters GUI Buttons
######### MDL Filename text entry
g_filename = String("MDL file to save: ", EVENT_NOEVENT, 10, 75, 210, 18,
g_filename.val, 255, "MDL file to save")
########## MDL File Search Button
Button("Search",EVENT_CHOOSE_FILENAME,220,75,80,18)
########## MDL Frame List Text entry
g_frame_filename = String("Frame List file to load: ", EVENT_NOEVENT, 10, 55, 210, 18,
g_frame_filename.val, 255, "Frame List to load-overrides MDL defaults")
g_flags = Number("Model Flags", EVENT_NOEVENT, 10, 115, 210, 18,
g_flags.val, 0, 1<<15, "Specify the combination of flags you desire")
########## Frame List Search Button
Button("Search",EVENT_CHOOSE_FRAME,220,55,80,18)
########## Scale slider-default is 1
g_scale = Slider("Scale Factor: ", EVENT_NOEVENT, 10, 95, 210, 18,
g_scale.val, 0.001, 10.0, 1.0, "Scale factor for object Model");
########## Fix UVs options
g_fixuvs = Menu("Fix UV coords %t|Don't Fix UVs%x0|Translate points %x1|Clamp points %x2",
EVENT_NOEVENT, 10, 35, 210, 18, g_fixuvs.val, "Method for handling UV's which are outside skin range")
######### Draw and Exit Buttons
Button("Export",EVENT_SAVE_MDL , 10, 10, 80, 18)
Button("Exit",EVENT_EXIT , 170, 10, 80, 18)
def event(evt, val):
if (evt == QKEY and not val):
Exit()
def bevent(evt):
global g_filename
global g_frame_filename
global EVENT_NOEVENT,EVENT_SAVE_MDL,EVENT_EXIT
######### Manages GUI events
if (evt==EVENT_EXIT):
Blender.Draw.Exit()
elif (evt==EVENT_CHOOSE_FILENAME):
FileSelector(filename_callback, "MDL File Selection")
elif (evt==EVENT_CHOOSE_FRAME):
FileSelector(frame_callback, "Frame Selection")
elif (evt==EVENT_SAVE_MDL):
if (g_filename.val == "model"):
save_mdl("blender.mdl")
Blender.Draw.Exit()
return
else:
save_mdl(g_filename.val)
Blender.Draw.Exit()
return
Register(draw_gui, event, bevent)
######################################################
# MDL Model Constants
######################################################
MDL_MAX_TRIANGLES=2048
MDL_MAX_VERTICES=1024
MDL_MAX_TEXCOORDS=1024
MDL_MAX_FRAMES=256
MDL_MAX_SKINS=16
MDL_MAX_FRAMESIZE=(MDL_MAX_VERTICES * 4 + 128)
MDL_FRAME_NAME_LIST=(("stand",1,10)),
#10 frames
#pretty sure these are the same
MDL_NORMALS=((-0.525731, 0.000000, 0.850651),
(-0.442863, 0.238856, 0.864188),
(-0.295242, 0.000000, 0.955423),
(-0.309017, 0.500000, 0.809017),
(-0.162460, 0.262866, 0.951056),
(0.000000, 0.000000, 1.000000),
(0.000000, 0.850651, 0.525731),
(-0.147621, 0.716567, 0.681718),
(0.147621, 0.716567, 0.681718),
(0.000000, 0.525731, 0.850651),
(0.309017, 0.500000, 0.809017),
(0.525731, 0.000000, 0.850651),
(0.295242, 0.000000, 0.955423),
(0.442863, 0.238856, 0.864188),
(0.162460, 0.262866, 0.951056),
(-0.681718, 0.147621, 0.716567),
(-0.809017, 0.309017, 0.500000),
(-0.587785, 0.425325, 0.688191),
(-0.850651, 0.525731, 0.000000),
(-0.864188, 0.442863, 0.238856),
(-0.716567, 0.681718, 0.147621),
(-0.688191, 0.587785, 0.425325),
(-0.500000, 0.809017, 0.309017),
(-0.238856, 0.864188, 0.442863),
(-0.425325, 0.688191, 0.587785),
(-0.716567, 0.681718, -0.147621),
(-0.500000, 0.809017, -0.309017),
(-0.525731, 0.850651, 0.000000),
(0.000000, 0.850651, -0.525731),
(-0.238856, 0.864188, -0.442863),
(0.000000, 0.955423, -0.295242),
(-0.262866, 0.951056, -0.162460),
(0.000000, 1.000000, 0.000000),
(0.000000, 0.955423, 0.295242),
(-0.262866, 0.951056, 0.162460),
(0.238856, 0.864188, 0.442863),
(0.262866, 0.951056, 0.162460),
(0.500000, 0.809017, 0.309017),
(0.238856, 0.864188, -0.442863),
(0.262866, 0.951056, -0.162460),
(0.500000, 0.809017, -0.309017),
(0.850651, 0.525731, 0.000000),
(0.716567, 0.681718, 0.147621),
(0.716567, 0.681718, -0.147621),
(0.525731, 0.850651, 0.000000),
(0.425325, 0.688191, 0.587785),
(0.864188, 0.442863, 0.238856),
(0.688191, 0.587785, 0.425325),
(0.809017, 0.309017, 0.500000),
(0.681718, 0.147621, 0.716567),
(0.587785, 0.425325, 0.688191),
(0.955423, 0.295242, 0.000000),
(1.000000, 0.000000, 0.000000),
(0.951056, 0.162460, 0.262866),
(0.850651, -0.525731, 0.000000),
(0.955423, -0.295242, 0.000000),
(0.864188, -0.442863, 0.238856),
(0.951056, -0.162460, 0.262866),
(0.809017, -0.309017, 0.500000),
(0.681718, -0.147621, 0.716567),
(0.850651, 0.000000, 0.525731),
(0.864188, 0.442863, -0.238856),
(0.809017, 0.309017, -0.500000),
(0.951056, 0.162460, -0.262866),
(0.525731, 0.000000, -0.850651),
(0.681718, 0.147621, -0.716567),
(0.681718, -0.147621, -0.716567),
(0.850651, 0.000000, -0.525731),
(0.809017, -0.309017, -0.500000),
(0.864188, -0.442863, -0.238856),
(0.951056, -0.162460, -0.262866),
(0.147621, 0.716567, -0.681718),
(0.309017, 0.500000, -0.809017),
(0.425325, 0.688191, -0.587785),
(0.442863, 0.238856, -0.864188),
(0.587785, 0.425325, -0.688191),
(0.688191, 0.587785, -0.425325),
(-0.147621, 0.716567, -0.681718),
(-0.309017, 0.500000, -0.809017),
(0.000000, 0.525731, -0.850651),
(-0.525731, 0.000000, -0.850651),
(-0.442863, 0.238856, -0.864188),
(-0.2952
|
Tendrl/node_agent
|
tendrl/node_agent/node_sync/disk_sync.py
|
Python
|
lgpl-2.1
| 18,338
| 0.000327
|
import os
import unicodedata
from tendrl.commons.event import Event
from tendrl.commons.message import ExceptionMessage
from tendrl.commons.utils import cmd_utils
from tendrl.commons.utils import etcd_utils
from tendrl.commons.utils import log_utils as logger
def sync():
try:
_keep_alive_for = int(NS.config.data.get("sync_interval", 10)) + 250
disks = get_node_disks()
disk_map = {}
for disk in disks:
# Creating dict with disk name as key and disk_id as value
# It will help populate block device disk_id attribute
_map = dict(disk_id=disks[disk]['disk_id'], ssd=False)
disk_map[disks[disk]['disk_name']] = _map
block_devices = get_node_
|
block_devices(disk_map)
for disk in disks:
if disk_map[disks[disk]['disk_name']]:
disks[disk]['ssd'] = disk_map[disks[disk][
'disk_name']]['ssd']
|
if "virtio" in disks[disk]["driver"]:
# Virtual disk
NS.tendrl.objects.VirtualDisk(**disks[disk]).save(
ttl=_keep_alive_for
)
else:
# physical disk
NS.tendrl.objects.Disk(**disks[disk]).save(ttl=_keep_alive_for)
for device in block_devices['all']:
NS.tendrl.objects.BlockDevice(**device).save(ttl=_keep_alive_for)
for device_id in block_devices['used']:
etcd_utils.write(
"nodes/%s/LocalStorage/BlockDevices/used/%s" %
(NS.node_context.node_id,
device_id.replace("/", "_").replace("_", "", 1)),
device_id, ttl=_keep_alive_for
)
for device_id in block_devices['free']:
etcd_utils.write(
"nodes/%s/LocalStorage/BlockDevices/free/%s" %
(NS.node_context.node_id,
device_id.replace("/", "_").replace("_", "", 1)),
device_id, ttl=_keep_alive_for
)
raw_reference = get_raw_reference()
etcd_utils.write(
"nodes/%s/LocalStorage/DiskRawReference" %
NS.node_context.node_id,
raw_reference,
ttl=_keep_alive_for,
)
except(Exception, KeyError) as ex:
_msg = "node_sync disks sync failed: " + ex.message
Event(
ExceptionMessage(
priority="error",
publisher=NS.publisher_id,
payload={"message": _msg,
"exception": ex}
)
)
def get_node_disks():
disks, disks_map, err = get_disk_details()
if not err:
cmd = cmd_utils.Command('hwinfo --partition')
out, err, rc = cmd.run()
if not err:
for partitions in out.split('\n\n'):
devlist = {"hardware_id": "",
"parent_hardware_id": "",
"sysfs_id": "",
"hardware_class": "",
"model": "",
"partition_name": "",
"device_files": "",
"config_status": "",
}
for partition in partitions.split('\n'):
key = partition.split(':')[0]
if key.strip() == "Unique ID":
devlist["hardware_id"] = \
partition.split(':')[1].lstrip()
if key.strip() == "Parent ID":
devlist["parent_hardware_id"] = \
partition.split(':')[1].lstrip()
if key.strip() == "SysFS ID":
devlist["sysfs_id"] = \
partition.split(':')[1].lstrip()
if key.strip() == "Hardware Class":
devlist["hardware_class"] = \
partition.split(':')[1].lstrip()
if key.strip() == "Model":
devlist["model"] = \
partition.split(':')[1].lstrip().replace('"', "")
if key.strip() == "Device File":
_name = partition.split(':')[1].lstrip()
devlist["partition_name"] = \
"".join(_name.split(" ")[0])
if key.strip() == "Device Files":
devlist["device_files"] = \
partition.split(':')[1].lstrip()
if key.strip() == "Config Status":
devlist["config_status"] = \
partition.split(':')[1].lstrip()
# checking if partition parent id is in collected
# disk_ids or not
if devlist["parent_hardware_id"] in disks_map:
part_name = devlist["partition_name"]
parent = disks_map[devlist["parent_hardware_id"]]
disks[parent]["partitions"][part_name] = devlist
return disks
def get_disk_details():
disks = {}
disks_map = {}
cmd = cmd_utils.Command('hwinfo --disk')
out, err, rc = cmd.run()
if not err:
out = unicodedata.normalize('NFKD', out).encode('utf8', 'ignore') \
if isinstance(out, unicode) \
else unicode(out, errors="ignore").encode('utf8')
for all_disks in out.split('\n\n'):
devlist = {"disk_id": "",
"hardware_id": "",
"parent_id": "",
"disk_name": "",
"sysfs_id": "",
"sysfs_busid": "",
"sysfs_device_link": "",
"hardware_class": "",
"model": "",
"vendor": "",
"device": "",
"rmversion": "",
"serial_no": "",
"driver_modules": "",
"driver": "",
"device_files": "",
"device_number": "",
"bios_id": "",
"geo_bios_edd": "",
"geo_logical": "",
"size": "",
"size_bios_edd": "",
"geo_bios_legacy": "",
"config_status": "",
"partitions": {}
}
for disk in all_disks.split('\n'):
key = disk.split(':')[0]
if key.strip() == "Unique ID":
devlist["hardware_id"] = \
disk.split(':')[1].lstrip()
elif key.strip() == "Parent ID":
devlist["parent_id"] = \
disk.split(':')[1].lstrip()
elif key.strip() == "SysFS ID":
devlist["sysfs_id"] = \
disk.split(':')[1].lstrip()
elif key.strip() == "SysFS BusID":
devlist["sysfs_busid"] = \
disk.split(':')[1].lstrip()
elif key.strip() == "SysFS Device Link":
devlist["sysfs_device_link"] = \
disk.split(':')[1].lstrip()
elif key.strip() == "Hardware Class":
devlist["hardware_class"] = \
disk.split(':')[1].lstrip()
elif key.strip() == "Model":
devlist["model"] = \
disk.split(':')[1].lstrip().replace('"', "")
elif key.strip() == "Vendor":
devlist["vendor"] = \
disk.split(':')[1].replace(" ", "").replace('"', "")
elif key.strip() == "Device":
devlist["device"] = \
disk.split(':')[1].replace(" ", "").replace('"', "")
elif key.strip() == "Revision":
devlist["rmversion"] = \
disk.split(':')[1].lstrip().replace('"', "")
|
sergeeva-olga/decree-server
|
setup.py
|
Python
|
gpl-3.0
| 3,119
| 0.002886
|
# This is your "setup.py" file.
# See the following sites for general guide to Python packaging:
# * `The Hitchhiker's Guide to Packaging <http://guide.python-distribute.org/>`_
# * `Python Project Howto <http://infinitemonkeycorps.net/docs/pph/>`_
from setuptools import setup, find_packages
import sys
import os
#from Cython.Build import cythonize
from setuptools.extension import Extension
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.md'), "rb").read().decode("utf-8")
NEWS = open(os.path.join(here, 'NEWS.rst')).read()
version = '0.1'
install_requires = [
# List your project dependencies here.
# For more details, see:
# http://packages.python.org/distribute/setuptools.html#declaring-dependencies
# Packages with fixed versions
# "<package1>==0.1",
# "<package2>==0.3.0",
# "nose", "coverage" # Put it here.
]
tests_requires = [
# List your project testing dependencies here.
]
dev_requires = [
# List your project development dependencies here.\
]
dependency_links = [
# Sources for some fixed versions packages
#'https://github.com/<user1>/<package1>/archive/master.zip#egg=<package1>-0.1',
#'https://github.com/<user2>/<package2>/archive/master.zip#egg=<package2>-0.3.0',
]
# Cython extension
# TOP_DIR="/home/eugeneai/Development/codes/NLP/workprog/tmp/link-grammar"
# LG_DIR="link-grammar"
# LG_LIB_DIR=os.path.join(TOP_DIR,LG_DIR,".libs")
# LG_HEADERS=os.path.join(TOP_DIR)
ext_modules = [
# Extension("isu.aquarium.cython_module",
# sources=["src/./isu.aquarium/cython_module.pyx"],
# libraries=["gdal"],
# )
]
setup(
name='isu.aquarium',
version=version,
description="Document organizing WEB-system",
long_description=README + '\n\n' + NEWS,
# Get classifiers from http://pypi.python.org/pypi?%3Aaction=list_classifiers
# classifiers=[c.strip() for c in """
# Development Status :: 4 - Beta
# License :: OSI Approved :: MIT License
# Operating System :: OS Independent
# Programming Language :: Python :: 2.6
# Programming Language :: Python :: 2.7
# Programming Language :: Python :: 3
# Topic :: Software Development :: Libraries :: Python Modules
# """.split('\n') if c.strip()],
# ],
keywords='W
|
EB Semantics JavaScript',
author='Evgeny Cherkashin',
author_email='[email protected]',
url='https://github.com/sergeeva-olga/decree-server',
license='GPL>=2',
packages=find_packages("src"),
package_dir={'': "src"},
namespace_packages=['isu'],
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
dependency_links=dependency_links,
extras_require={
'tests': tests_requires,
'dev': dev_requires,
|
},
test_suite='tests',
entry_points="""\
[paste.app_factory]
main=isu.aquarium.server:main
""",
#ext_modules = cythonize(ext_modules),
#test_suite = 'nose.collector',
# setup_requires=['nose>=1.0','Cython','coverage']
)
|
saleem-latif/GeoCode
|
tests/unittest_geocode.py
|
Python
|
gpl-2.0
| 1,541
| 0
|
from testscenarios import TestWithScenarios
import unittest
from geocode.geocode import GeoCodeAccessAPI
class GeoCodeTests(TestWithScenarios, unittest.TestCase):
scenarios = [
(
"Scenario - 1: Get latlng from address",
{
'address': "Sydney NSW",
'latlng': (-33.8674869, 151.2069902),
'method': "geocode",
}
),
(
"Scenario - 2: Get address from latlng",
{
'address': "Sydney NSW",
'latlng': (-33.8674869, 151.2069902),
'method': "address",
}
),
]
def setUp(self):
self.api = GeoCodeAccessAPI()
def test_geocode(self):
if self.method == 'geocode':
expected_address = self.addre
|
ss
expected_lat = self.latlng[0]
expected_lng = self.latlng[1]
geocode = self.api.get_geocode(expected_address)
self.assertAlmostEqual(geocode.lat, expected_lat
|
, delta=5)
self.assertAlmostEqual(geocode.lng, expected_lng, delta=5)
self.assertIn(expected_address, geocode.address)
else:
expected_address = self.address
expected_lat = self.latlng[0]
expected_lng = self.latlng[1]
address = self.api.get_address(lat=expected_lat, lng=expected_lng)
self.assertIn(expected_address, address)
def tearDown(self):
pass
if __name__ == "__main__":
unittest.main()
|
visipedia/tf_classification
|
tfserving/client.py
|
Python
|
mit
| 2,561
| 0.0164
|
"""
A simple client to query a TensorFlow Serving instance.
Example:
$ python client.py \
--images IMG_0932_sm.jpg \
--num_results 10 \
--model_name inception \
--host localhost \
--port 9000 \
--timeout 10
Author: Grant Van Horn
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import time
import tfserver
def parse_args():
parser = argparse.ArgumentParser(description='Command line classification client. Sorts and prints the classification results.')
parser.add_argument('--images', dest='image_paths',
help='Path to one or more images to classify (jpeg or png).',
type=str, nargs='+', required=True)
parser.add_argument('--num_results', dest='num_results',
help='The number of results to print. Set to 0 to print all classes.',
required=False, type=int, default=0)
parser.add_argument('--model_name', dest='model_name',
help='The name of the model to query.',
required=False, type=str, default='inception')
parser.add_argument('--host', dest='host',
help='Machine host where the TensorFlow Serving model is.',
required=False, type=str, default='localhost')
parser.add_argument('--port', dest='port',
help='Port that the TensorFlow Server is listening on.',
required=False, type=int, default=9000)
|
parser.add_argument('--timeout', dest='timeout',
help='Amount of time to wait before fail
|
ing.',
required=False, type=int, default=10)
args = parser.parse_args()
return args
def main():
args = parse_args()
# Read in the image bytes
image_data = []
for fp in args.image_paths:
with open(fp) as f:
data = f.read()
image_data.append(data)
# Get the predictions
t = time.time()
predictions = tfserver.predict(image_data, model_name=args.model_name,
host=args.host, port=args.port, timeout=args.timeout
)
dt = time.time() - t
print("Prediction call took %0.4f seconds" % (dt,))
# Process the results
results = tfserver.process_classification_prediction(predictions, max_classes=args.num_results)
# Print the results
for i, fp in enumerate(args.image_paths):
print("Results for image: %s" % (fp,))
for name, score in results[i]:
print("%s: %0.3f" % (name, score))
print()
if __name__ == '__main__':
main()
|
nateprewitt/pipenv
|
pipenv/vendor/backports/shutil_get_terminal_size/__init__.py
|
Python
|
mit
| 338
| 0
|
"""A backport of the get_termina
|
l_size function from Python 3.3's shutil."""
__title__ = "bac
|
kports.shutil_get_terminal_size"
__version__ = "1.0.0"
__license__ = "MIT"
__author__ = "Christopher Rosell"
__copyright__ = "Copyright 2014 Christopher Rosell"
__all__ = ["get_terminal_size"]
from .get_terminal_size import get_terminal_size
|
welliam/imagersite
|
user_profile/tests.py
|
Python
|
mit
| 2,830
| 0
|
from django.test import TestCase
from django.contrib.auth.models import User
from django.urls import reverse
from .models import UserProfile
from imagersite.tests import AuthenticatedTestCase
# Create your tests here.
class ProfileTestCase(TestCase):
"""TestCase for Profile"""
def setUp(self):
"""Set up User Profile"""
self.user = User(username='Cris', first_name='Cris')
self.user.save()
def test_user_has_profile(self):
"""Test User has a profile."""
self.assertTrue(hasattr(self.user, 'profile'))
def test_profile_username(self):
"""Test Profile has username"""
self.assertEqual(self.user.profile.user.username, 'Cris')
# Learn to paramertize
def test_profile_has_cameratype(self):
"""Test profile has cameria type attr."""
self.assertTrue(hasattr(self.user.profile, 'camera_type'))
def test_profile_repr(self):
"""Test repr function."""
self.assertIn('Cris', repr(self.user.profile))
def test_profile_active(self):
"""Test profile manager."""
self.assertTrue(len(UserProfile.active.all()) > 0)
class UserProfilePageTestCase(AuthenticatedTestCase):
"""Test case for viewing the profile."""
def test_profile_page(self):
self.log_in()
self.assertEqual(self.client.get('/profile/').status_code, 200)
def test_profile_page_has_username(self):
self.log_in()
self.assertIn(
self.username.encode('utf-8'),
self.client.get('/profile/').content
|
)
def
|
test_profile_page_has_photo_count(self):
self.log_in()
self.assertIn(
b'Photos uploaded:',
self.client.get('/profile/').content
)
def test_profile_page_has_album_count(self):
self.log_in()
self.assertIn(b'Albums created:', self.client.get('/profile/').content)
class EditProfileTestCase(TestCase):
"""Edit profile test case."""
def setUp(self):
"""GET the route named edit_profile."""
self.user = User(username='test')
self.user.save()
self.client.force_login(self.user)
self.response = self.client.get(reverse('edit_profile'))
def test_status_code(self):
"""Test the status code for GETing edit_profile is 200."""
self.assertEqual(self.response.status_code, 200)
def test_edit_profile(self):
"""Test editing a album stores the updated value."""
new_camera_type = 'camera'
data = {
'camera_type': new_camera_type,
}
response = self.client.post(reverse('edit_profile'), data)
self.assertEqual(response.status_code, 302)
profile = UserProfile.objects.filter(user=self.user).first()
self.assertEqual(profile.camera_type, new_camera_type)
|
alphagov/notifications-api
|
migrations/versions/0146_add_service_callback_api.py
|
Python
|
mit
| 2,779
| 0.010076
|
"""
Revision ID: 0146_add_service_callback_api
Revises: 0145_add_notification_reply_to
Create Date: 2017-11-28 15:13:48.730554
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0146_add_service_callback_api'
down_revision = '0145_add_notification_reply_to'
def upgrade():
op.create_table('service_callback_api_history',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('service_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('url', sa.String(), nullable=False),
sa.Column('bearer_token', sa.String(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('updated_by_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('version', sa.Integer(), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint('id', 'version')
)
op.create_index
|
(op.f('ix_service_callback_api_history_service_id'), 'service_callback_api_history',
['service_id'], unique=False)
op.create_index(op.f('ix_service_callback_api_history_updated_by_id'), 'service_callback_api_history',
['updated_by_id'], unique=False)
op.create_table('service_callback_api',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('service_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.C
|
olumn('url', sa.String(), nullable=False),
sa.Column('bearer_token', sa.String(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('updated_by_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('version', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['service_id'], ['services.id'], ),
sa.ForeignKeyConstraint(['updated_by_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_service_callback_api_service_id'), 'service_callback_api', ['service_id'], unique=True)
op.create_index(op.f('ix_service_callback_api_updated_by_id'), 'service_callback_api', ['updated_by_id'], unique=False)
def downgrade():
op.drop_index(op.f('ix_service_callback_api_updated_by_id'), table_name='service_callback_api')
op.drop_index(op.f('ix_service_callback_api_service_id'), table_name='service_callback_api')
op.drop_table('service_callback_api')
op.drop_index(op.f('ix_service_callback_api_history_updated_by_id'), table_name='service_callback_api_history')
op.drop_index(op.f('ix_service_callback_api_history_service_id'), table_name='service_callback_api_history')
op.drop_table('service_callback_api_history')
|
kragniz/mypaint
|
brushlib/doc/source/conf.py
|
Python
|
gpl-2.0
| 8,376
| 0.007283
|
# -*- coding: utf-8 -*-
#
# libmypaint documentation build configuration file, created by
# sphinx-quickstart2 on Wed Jun 13 23:40:45 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
# Breathe setup, for integrating doxygen content
extensions.append('breathe')
doxyxml_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../doxygen')
print doxyxml_dir
breathe_projects = {"libmypaint": doxyxml_dir}
breathe_default_project = "libmypaint"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'libmypaint'
copyright = u'2012, MyPaint Development Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'libmypaintdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'libmypaint.tex', u'libmypaint Documentation',
u'MyPaint Development Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'libmypaint', u'libmypaint Documentation',
[u'MyPaint Development Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output --------------------------------------------
|
----
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'libmypaint', u'libmypaint Documentation',
u'MyPaint Development Team', 'libmypaint', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
|
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'foo
|
tongpa/pypollmanage
|
pypollmanage/controllers/__init__.py
|
Python
|
apache-2.0
| 119
| 0.008403
|
# -*- coding: utf-8 -*-
"""Controllers for the pypollmanage pluggab
|
le application
|
."""
from .root import RootController
|
Comunitea/CMNT_00098_2017_JIM_addons
|
jim_invoice/models/general_ledger_wizard.py
|
Python
|
agpl-3.0
| 545
| 0.003676
|
# -*- coding: utf-8 -*-
# © 2016 Comunitea
# L
|
icense AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from odoo import api, fields, models
class GeneralLedgerReportWizard(models.TransientModel):
_inherit = "general.ledger.report.wizard"
@api.onchange('company_id')
def onchange_company_id(self):
res = super(GeneralLedgerReportWizard, self).onchange_company_id()
if self.company_id:
res['domain']['partner_ids'] = [
('is_company', '=', True)
]
re
|
turn res
|
yatinkumbhare/openstack-nova
|
nova/virt/configdrive.py
|
Python
|
apache-2.0
| 9,964
| 0.000401
|
# Copyright 2012 Michael Still and Canonical Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Config Drive v2 helper."""
import os
import shutil
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from oslo_utils import units
from nova import exception
from nova.i18n import _LW
from nova.openstack.common import fileutils
from nova import utils
from nova import version
LOG = logging.getLogger(__name__)
configdrive_opts = [
cfg.StrOpt('config_drive_format',
default='iso9660',
choices=('iso9660', 'vfat'),
help='Config drive format.'),
# force_config_drive is a string option, to allow for future behaviors
# (e.g. use config_drive based on image properties)
cfg.StrOpt('force_config_drive',
choices=('always', 'True', 'False'),
help='Set to "always" to force injection to take place on a '
'config drive. NOTE: The "always" will be deprecated in '
'the Liberty release cycle.'),
cfg.StrOpt('mkisofs_cmd',
default='genisoimage',
help='Name and optionally path of the tool used for '
'ISO image creation')
]
CONF = cfg.CONF
CONF.register_opts(configdrive_opts)
# Config drives are 64mb, if we can't size to the exact size of the data
CONFIGDRIVESIZE_BYTES = 64 * units.Mi
FS_FORMAT_VFAT = 'vfat'
FS_FORMAT_ISO9660 = 'iso9660'
IMAGE_TYPE_RAW = 'raw'
IMAGE_TYPE_PLOOP = 'ploop'
class ConfigDriveBuilder(object):
"""Build config drives, optionally as a context manager."""
def __init__(self, instance_md=None):
if CONF.force_config_drive == 'always':
LOG.warning(_LW('The setting "always" will be deprecated in the '
'Liberty version. Please use "True" instead'))
self.imagefile = None
self.mdfiles = []
if instance_md is not None:
self.add_instance_metadata(instance_md)
def __enter__(self):
return self
def __exit__(self, exctype, excval, exctb):
if exctype is not None:
# NOTE(mikal): this means we're being cleaned up because an
# exception was thrown. All bets are off now, and we should not
# swallow the exception
return False
self.cleanup()
def _add_file(self, basedir, path, data):
filepath = os.path.join(basedir, path)
dirname = os.path.dirname(filepath)
fileutils.ensure_tree(dirname)
with open(filepath, 'wb') as f:
f.write(data)
def add_instance_metadata(self, instance_md):
for (path, data) in instance_md.metadata_for_config_drive():
self.mdfiles.append((path, data))
def _write_md_files(self, basedir):
for data in self.mdfiles:
self._add_file(basedir, data[0], data[1])
def _make_iso9660(self, path, tmpdir):
publisher = "%(product)s %(version)s" % {
'product': version.product_string(),
'version': version.version_string_with_package()
}
utils.execute(CONF.mkisofs_cmd,
'-o', path,
'-ldots',
|
'-allow-lowercase',
'-allow-multidot',
'-l',
'-publisher',
publisher,
'-quiet',
'-J',
'-r',
|
'-V', 'config-2',
tmpdir,
attempts=1,
run_as_root=False)
def _make_vfat(self, path, tmpdir):
# NOTE(mikal): This is a little horrible, but I couldn't find an
# equivalent to genisoimage for vfat filesystems.
with open(path, 'wb') as f:
f.truncate(CONFIGDRIVESIZE_BYTES)
utils.mkfs('vfat', path, label='config-2')
with utils.tempdir() as mountdir:
mounted = False
try:
_, err = utils.trycmd(
'mount', '-o', 'loop,uid=%d,gid=%d' % (os.getuid(),
os.getgid()),
path,
mountdir,
run_as_root=True)
if err:
raise exception.ConfigDriveMountFailed(operation='mount',
error=err)
mounted = True
# NOTE(mikal): I can't just use shutils.copytree here,
# because the destination directory already
# exists. This is annoying.
for ent in os.listdir(tmpdir):
shutil.copytree(os.path.join(tmpdir, ent),
os.path.join(mountdir, ent))
finally:
if mounted:
utils.execute('umount', mountdir, run_as_root=True)
def _make_ext4_ploop(self, path, tmpdir):
"""ploop is a disk loopback block device, that is used in
Parallels(OpenVZ) containers. It is similiar to Linux loop
device but prevents double caching of data in memory and
supports snapshots and some other effeciency benefits. Adding
ploop is a natural way to add disk device to VZ containers.
Ploop device has its own image format. It contains specific
partition table with one ext4 partition.
"""
os.mkdir(path)
utils.execute('ploop',
'init',
'-s', CONFIGDRIVESIZE_BYTES,
'-t', 'ext4',
path + '/disk.config.hds',
attempts=1,
run_as_root=True)
with utils.tempdir() as mountdir:
mounted = False
try:
_, err = utils.trycmd(
'ploop', 'mount',
'-m', mountdir,
'-t', 'ext4',
path + '/DiskDescriptor.xml',
run_as_root=True)
if os.path.exists(mountdir):
utils.execute('chown', '-R',
'%(u)d:%(g)d' % {'u': os.getuid(),
'g': os.getgid()},
mountdir,
run_as_root=True)
mounted = True
for ent in os.listdir(tmpdir):
shutil.copytree(os.path.join(tmpdir, ent),
os.path.join(mountdir, ent))
finally:
if mounted:
utils.execute('ploop', 'umount',
path + '/disk.config.hds', run_as_root=True)
def make_drive(self, path, image_type=IMAGE_TYPE_RAW):
"""Make the config drive.
:param path: the path to place the config drive image at
:param image_type: host side image format
:raises ProcessExecuteError if a helper process has failed.
"""
fs_format = CONF.config_drive_format
if fs_format is None:
if image_type == IMAGE_TYPE_RAW:
fs_format = FS_FORMAT_ISO9660
with utils.tempdir() as tmpdir:
self._write_md_files(tmpdir)
if image_type == IMAGE_TYPE_RAW:
if fs_format not in (FS_FORMAT_VFAT, FS_FORMAT_ISO9660):
raise exception.ConfigDriveUnsupportedFormat(
format=fs_format,
|
cytex124/celsius-cloud-backend
|
src/addons/management_user/admin.py
|
Python
|
mit
| 408
| 0
|
from djan
|
go.contrib.admin.models import LogEntry
from django.contrib.auth.models import User, Group, Permission
from simple_history import register
from celsius.tools import register_for_permission_handling
register(User
|
)
register(Group)
register_for_permission_handling(User)
register_for_permission_handling(Group)
register_for_permission_handling(Permission)
register_for_permission_handling(LogEntry)
|
RobSpectre/garfield
|
garfield/voice/tests/test_models.py
|
Python
|
mit
| 499
| 0
|
from django.test import TestCase
from voice.models import Call
class CallModelTestCase(TestCase):
de
|
f setUp(self):
self.call = Call(sid="CAxxx",
from_number="+15558675309",
to_number="+15556667777")
self.call.save()
def test_string_representation(self):
self.assertEqual(str(self.call),
"{0}: from +1555867530
|
9 to "
"+15556667777".format(self.call.date_created))
|
TAMU-CPT/galaxy-tools
|
tools/phage/phage_annotation_table.py
|
Python
|
gpl-3.0
| 19,472
| 0.004725
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8
import os
import argparse
from gff3 import genes, get_gff3_id, get_rbs_from, feature_test_true, feature_lambda, feature_test_type
from cpt_gffParser import gffParse, gffWrite
from Bio import SeqIO
from jinja2 import Environment, FileSystemLoader
import logging
from math import floor
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(name="pat")
# Path to script, required because of Galaxy.
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
# Path to the HTML template for the report
def genes_all(feature_list, feature_type=["gene"], sort=False):
"""
Simple filter to extract gene features from the feature set.
"""
if not sort:
for x in feature_lambda(
feature_list, feature_test_type, {"types": feature_type}, subfeatures=True
):
yield x
else:
data = list(genes_all(feature_list, feature_type, sort=False))
data = sorted(data, key=lambda feature: feature.location.start)
for x in data:
yield x
def checkSubs(feature, qualName):
subFeats = []
res = ""
subFeats = feature.sub_features
while (len(subFeats) > 0):
for feat in subFeats:
for i in feat.qualifiers.keys():
for j in qualName:
if i == j:
if res == "":
res = feat.qualifiers[i][0]
else:
res += "; " + feat.qualifiers[i][0]
if res != "":
return res
tempFeats = []
for feat in subFeats: # Should be breadth-first results
for x in feat.sub_features:
tempFeats.append(x)
subFeats = tempFeats
return res
def annotation_table_report(record, types, wanted_cols, gaf_data, searchSubs):
getTypes = []
for x in [y.strip() for y in types.split(",")]:
getTypes.append(x)
getTypes.append("gene")
sorted_features = list(genes_all(record.features, getTypes, sort=True))
if wanted_cols is None or len(wanted_cols.strip()) == 0:
return [], []
useSubs = searchSubs
def rid(record, feature):
"""Organism ID
"""
return record.id
def id(record, feature):
"""ID
"""
return feature.id
def featureType(record, feature):
"""Type
"""
return feature.type
def name(record, feature):
"""Name
"""
for x in ["Name", "name"]:
for y in feature.qualifiers.keys():
if x == y:
return feature.qualifiers[x][0]
if useSubs:
res = checkSubs(feature, ["Name", "name"])
if res != "":
return res
return "None"
def start(record, feature):
"""Boundary
"""
return str(feature.location.start + 1)
def end(record, feature):
"""Boundary
"""
return str(feature.location.end)
def location(record, feature):
"""Location
"""
return str(feature.location.start + 1) + "..{0.end}".format(feature.location)
def length(record, feature):
"""CDS Length (AA)
"""
if feature.type == "CDS":
cdss = [feature]
else:
cdss = list(genes(feature.sub_features, feature_type="CDS", sort=True))
if cdss == []:
return "None"
res = (sum([len(cds) for cds in cdss]) / 3) - 1
if floor(res) == res:
res = int(res)
return str(res)
def notes(record, feature):
"""User entered Notes"""
for x in ["Note", "note", "Notes", "notes"]:
for y in feature.qualifiers.keys():
if x == y:
return feature.qualifiers[x][0]
if useSubs:
res = checkSubs(feature, ["Note", "note", "Notes", "notes"])
if res != "":
return res
return "None"
def date_created(record, feature):
"""Created"""
return feature.qualifiers.get("date_creation", ["None"])[0]
def date_last_modified(record, feature):
"""Last Modified"""
res = feature.qualifiers.get("date_last_modified", ["None"])[0]
if res != "None":
return res
if useSubs:
res = checkSubs(feature, ["date_last_modified"])
if res != "":
return res
return "None"
def description(record, feature):
"""Description"""
res = feature.qualifiers.get("description", ["None"])[0]
if res != "None":
return res
if useSubs:
res = checkSubs(feature, ["description"])
if res != "":
return res
return "None"
def owner(record, feature):
"""Owner
User who created the feature. In a 464 scenario this may be one of
the TAs."""
for x in ["Owner", "owner"]:
for y in feature.qualifiers.keys():
if x == y:
return feature.qualifiers[x][0]
if useSubs:
res = checkSubs(feature, ["Owner", "owner"])
if res != "":
return res
return "None"
def product(record, feature):
"""Product
User entered product qualifier (collects "Product" and "product"
entries)"""
"""User entered Notes"""
for x in ["product", "Product"]:
for y in feature.qualifiers.keys():
if x == y:
return feature.qualifiers[x][0]
if useSubs:
res = checkSubs(feature, ["product", "Product"])
if res != "":
return res
return "None"
def note(record, feature):
"""Note
User entered Note qualifier(s)"""
return feature.qualifiers.get("Note", [])
def strand(record, feature):
"""Strand
"""
return "+" if feature.location.strand > 0 else "-"
def sd_spacing(record, feature):
"""Shine-Dalgarno spacing
"""
rbss = get_rbs_from(gene)
if len(rbss) == 0:
retu
|
rn "None"
else:
resp = []
for rbs in rbss:
cdss = list(genes(feature.sub_features, feature_type="CDS", sort=True))
if len(cdss) == 0:
return "No CDS"
if rbs.location.strand > 0:
distance = min(
cdss, key=lambda x: x.location.start - rbs.location.end
|
)
distance_val = str(distance.location.start - rbs.location.end)
resp.append(distance_val)
else:
distance = min(
cdss, key=lambda x: x.location.end - rbs.location.start
)
distance_val = str(rbs.location.start - distance.location.end)
resp.append(distance_val)
if len(resp) == 1:
return str(resp[0])
return resp
def sd_seq(record, feature):
"""Shine-Dalgarno sequence
"""
rbss = get_rbs_from(gene)
if len(rbss) == 0:
return "None"
else:
resp = []
for rbs in rbss:
resp.append(str(rbs.extract(record).seq))
if len(resp) == 1:
return str(resp[0])
else:
return resp
def start_codon(record, feature):
"""Start Codon
"""
if feature.type == "CDS":
cdss = [feature]
else:
cdss = list(genes(feature.sub_features, feature_type="CDS", sort=True))
data = [x for x in cdss]
if len(data) == 1:
return str(data[0].extract(record).seq[0:3])
else:
return [
"{0} ({1.location.start}..{1.location.end}:{1.location.strand})".format(
x.extract(record).seq[0:3], x
)
for x in data
]
def stop_codon(record, feature):
"""Stop Codon
"""
return str(feature.extract(record).seq[-3:])
def dbxrefs(record, feature):
"""DBxrefs
"""
"""User e
|
ntt-sic/nova
|
nova/tests/api/openstack/compute/plugins/v3/test_scheduler_hints.py
|
Python
|
apache-2.0
| 12,196
| 0.000246
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from oslo.config import cfg
from nova.api.openstack import compute
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import servers
from nova.compute import api as compute_api
from nova.compute import flavors
from nova import db
import nova.db.api
from nova.network import manager
from nova.openstack.common import jsonutils
from nova.openstack.common import rpc
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
from nova.tests.image import fake
CONF = cfg.CONF
FAKE_UUID = fakes.FAKE_UUID
def fake_gen_uuid():
return FAKE_UUID
def return_security_group(context, instance_id, security_group_id):
pass
class SchedulerHintsTestCase(test.TestCase):
def setUp(self):
super(SchedulerHintsTestCase, self).setUp()
self.fake_instance = fakes.stub_instance(1, uuid=FAKE_UUID)
self.app = compute.APIRouterV3(init_only=('servers',
'os-scheduler-hints'))
def test_create_server_without_hints(self):
def fake_create(*args, **kwargs):
self.assertEqual(kwargs['scheduler_hints'], {})
return ([self.fake_instance], '')
self.stubs.Set(nova.compute.api.API, 'create', fake_create)
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {'server': {
'name': 'server_test',
'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175',
'flavor_ref': '1',
}}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(202, res.status_int)
def test_create_server_with_hints(self):
def fake_create(*args, **kwargs):
self.assertEqual(kwargs['scheduler_hints'], {'a': 'b'})
return ([self.fake_instance], '')
self.stubs.Set(nova.compute.api.API, 'create', fake_create)
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {
'server': {
'name': 'server_test',
'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175',
'flavor_ref': '1',
},
'os-scheduler-hints:scheduler_hints': {'a': 'b'},
}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(202, res.status_int)
def test_create_server_bad_hints(self):
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {
'server': {
'name': 'server_test',
'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175',
'flavor_ref': '1',
},
'os-scheduler-hints:scheduler_hints': 'here',
}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(400, res.status_int)
class ServersControllerCreateTest(test.TestCase):
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTest, self).setUp()
self.flags(verbose=True,
enable_instance_password=True)
self.instance_cache_num = 0
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
CONF.set_override('extensions_blacklist', 'os-scheduler-hints',
'osapi_v3')
self.no_scheduler_hints_controller = servers.ServersController(
extension_info=ext_info)
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': dict(inst_type),
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"config_drive": None,
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"root_device_name": inst.get('root_device_name', 'vda'),
})
self.instance_cache_by_id[instance['id']] = instance
self.instance_cache_by_uuid[instance['uuid']] = instance
return instance
def instance_get(context, instance_id):
"""Stub for compute/api create() pulling in instance after
scheduling
"""
return self.instance_cache_by_id[instance_id]
def instance_update(context, uuid, values):
instance = self.instance_cache_by_uuid[uuid]
instance.update(values)
return instance
def server_update(context, instance_uuid, params):
inst = self.instance_cache_by_uuid[instance_uuid]
inst.update(params)
return (inst, inst)
def fake_method(*args, **kwargs):
|
pass
def project_get_networks(context, user_id):
return dict(id='1', host='localhost')
def queue_get_for(context, *args):
return 'network_topic'
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_
|
pair_funcs(self.stubs)
fake.stub_out_image_service(self.stubs)
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
self.stubs.Set(db, 'instance_add_security_group',
return_security_group)
self.stubs.Set(db, 'project_get_networks',
project_get_networks)
self.stubs.Set(db, 'instance_create', instance_create)
self.stubs.Set(db, 'instance_system_metadata_update',
fake_method)
self.stubs.Set(db, 'instance_get', instance_get)
self.stubs.Set(db, 'instance_update', instance_update)
self.stubs.Set(rpc, 'cast', fake_method)
self.stubs.Set(db, 'instance_update_and_get_original',
server_update)
self.stubs.Set(rpc, 'queue_get_for', queue_get_for)
self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
fake_method)
def _test_create_extra(self, params, no_image=False,
override_controller=None):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', image_ref=image_uuid, flavor_ref=2)
if no_image:
server.pop('image_ref', None)
server.update(params)
body = dict(se
|
guaix-ucm/numina
|
numina/array/combine.py
|
Python
|
gpl-3.0
| 9,851
| 0.000203
|
#
# Copyright 2008-2018 Universidad Complutense de Madrid
#
# This file is part of Numina
#
# SPDX-License-Identifier: GPL-3.0+
# License-Filename: LICENSE.txt
#
"""Different methods for combining lists of arrays."""
import numpy
import numina.array._combine as intl_combine
CombineError = intl_combine.CombineError
mean_method = intl_combine.mean_method
def mean(arrays, masks=None, dtype=None, out=None,
zeros=None, scales=None,
weights=None):
"""Combine arrays using the mean, with masks and offsets.
Arrays and masks are a list of array objects. All input arrays
have the same shape. If present, the masks have the same shape
also.
The function returns an array with one more dimension than the
inputs and with size (3, shape). out[0] contains the mean,
out[1] the variance and out[2] the number of points used.
:param arrays: a list of arrays
:param masks: a list of mask arrays, True values are masked
:param dtype: data type of the output
:param out: optional output, with one more axis than the input arrays
:return: mean, variance of the mean and number of points stored
Example:
>>> import numpy
>>> image = numpy.array([[1., 3.], [1., -1.4]])
>>> inputs = [image, image + 1]
>>> mean(inputs)
array([[[ 1.5, 3.5],
[ 1.5, -0.9]],
<BLANKLINE>
[[ 0.5, 0.5],
[ 0.5, 0.5]],
<BLANKLINE>
[[ 2. , 2. ],
[ 2. , 2. ]]])
"""
return generic_combine(intl_combine.mean_method(), arrays, masks=masks,
dtype=dtype, out=out,
zeros=zeros, scales=scal
|
es,
weights=weights)
def median(arrays, masks=None, dtype=None, out=None,
zeros=None, scales=None,
weights=None):
"""Combine arrays using the median, with masks.
Arrays and masks are a list of array objects. All input arrays
have the same shape. If present, the masks have the same shape
also.
The function returns an array with one more dimension than the
inputs and with size (3, shape). out[0] contains the mean,
ou
|
t[1] the variance and out[2] the number of points used.
:param arrays: a list of arrays
:param masks: a list of mask arrays, True values are masked
:param dtype: data type of the output
:param out: optional output, with one more axis than the input arrays
:return: median, variance of the median and number of points stored
"""
return generic_combine(intl_combine.median_method(), arrays, masks=masks,
dtype=dtype, out=out,
zeros=zeros, scales=scales, weights=weights)
def sigmaclip(arrays, masks=None, dtype=None, out=None,
zeros=None, scales=None, weights=None,
low=3., high=3.):
"""Combine arrays using the sigma-clipping, with masks.
Inputs and masks are a list of array objects. All input arrays
have the same shape. If present, the masks have the same shape
also.
The function returns an array with one more dimension than the
inputs and with size (3, shape). out[0] contains the mean,
out[1] the variance and out[2] the number of points used.
:param arrays: a list of arrays
:param masks: a list of mask arrays, True values are masked
:param dtype: data type of the output
:param out: optional output, with one more axis than the input arrays
:param low:
:param high:
:return: mean, variance of the mean and number of points stored
"""
return generic_combine(intl_combine.sigmaclip_method(low, high), arrays,
masks=masks, dtype=dtype, out=out,
zeros=zeros, scales=scales, weights=weights)
def minmax(arrays, masks=None, dtype=None, out=None, zeros=None,
scales=None, weights=None, nmin=1, nmax=1):
"""Combine arrays using mix max rejection, with masks.
Inputs and masks are a list of array objects. All input arrays
have the same shape. If present, the masks have the same shape
also.
The function returns an array with one more dimension than the
inputs and with size (3, shape). out[0] contains the mean,
out[1] the variance and out[2] the number of points used.
:param arrays: a list of arrays
:param masks: a list of mask arrays, True values are masked
:param dtype: data type of the output
:param out: optional output, with one more axis than the input arrays
:param nmin:
:param nmax:
:return: mean, variance of the mean and number of points stored
"""
return generic_combine(intl_combine.minmax_method(nmin, nmax), arrays,
masks=masks, dtype=dtype, out=out,
zeros=zeros, scales=scales, weights=weights)
def quantileclip(arrays, masks=None, dtype=None, out=None,
zeros=None, scales=None, weights=None,
fclip=0.10):
"""Combine arrays using the sigma-clipping, with masks.
Inputs and masks are a list of array objects. All input arrays
have the same shape. If present, the masks have the same shape
also.
The function returns an array with one more dimension than the
inputs and with size (3, shape). out[0] contains the mean,
out[1] the variance and out[2] the number of points used.
:param arrays: a list of arrays
:param masks: a list of mask arrays, True values are masked
:param dtype: data type of the output
:param out: optional output, with one more axis than the input arrays
:param fclip: fraction of points removed on both ends. Maximum is 0.4 (80% of points rejected)
:return: mean, variance of the mean and number of points stored
"""
return generic_combine(intl_combine.quantileclip_method(fclip), arrays,
masks=masks, dtype=dtype, out=out,
zeros=zeros, scales=scales, weights=weights)
def flatcombine(arrays, masks=None, dtype=None, scales=None,
low=3.0, high=3.0, blank=1.0):
"""Combine flat arrays.
:param arrays: a list of arrays
:param masks: a list of mask arrays, True values are masked
:param dtype: data type of the output
:param out: optional output, with one more axis than the input arrays
:param blank: non-positive values are substituted by this on output
:return: mean, variance of the mean and number of points stored
"""
result = sigmaclip(arrays, masks=masks,
dtype=dtype, scales=scales,
low=low, high=high)
# Substitute values <= 0 by blank
mm = result[0] <= 0
result[0, mm] = blank
# Add values to mask
result[1:2, mm] = 0
return result
def zerocombine(arrays, masks, dtype=None, scales=None):
"""Combine zero arrays.
:param arrays: a list of arrays
:param masks: a list of mask arrays, True values are masked
:param dtype: data type of the output
:param scales:
:return: median, variance of the median and number of points stored
"""
result = median(arrays, masks=masks,
dtype=dtype, scales=scales)
return result
def sum(arrays, masks=None, dtype=None, out=None,
zeros=None, scales=None):
"""Combine arrays by addition, with masks and offsets.
Arrays and masks are a list of array objects. All input arrays
have the same shape. If present, the masks have the same shape
also.
The function returns an array with one more dimension than the
inputs and with size (3, shape). out[0] contains the sum,
out[1] the variance and out[2] the number of points used.
:param arrays: a list of arrays
:param masks: a list of mask arrays, True values are masked
:param dtype: data type of the output
:param out: optional output, with one more axis than the input arrays
:return: sum, variance of the sum and number of points stored
Example:
>>> import numpy
>>> image = numpy.array([[1., 3.], [1., -1.4]])
>>> inputs =
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/lib/django-1.5/django/db/models/constants.py
|
Python
|
bsd-3-clause
| 118
| 0.008475
|
"""
Constants used across the ORM in general.
"""
# Separato
|
r used to split filter
|
strings apart.
LOOKUP_SEP = '__'
|
douggeiger/gnuradio
|
gr-blocks/python/blocks/qa_multiply_matrix_ff.py
|
Python
|
gpl-3.0
| 4,810
| 0.009356
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import time
import numpy
import os
import pmt
from gnuradio import gr, gr_unittest
from gnuradio import blocks
class test_multiply_matrix_ff (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
self.multiplier = None
def tearDown (self):
self.tb = None
self.multiplier = None
def run_once(self, X_in, A, tpp=gr.TPP_DONT, A2=None, tags=None, msg_A=None):
""" Run the test for given input-, output- and matrix values.
Every row from X_in is considered an input signal on a port. """
X_in = numpy.matrix(X_in)
A_matrix = numpy.matrix(A)
(N, M) = A_matrix.shape
self.assertTrue(N == X_in.shape[0])
# Calc expected
Y_out_exp = numpy.matrix(numpy.zeros((M, X_in.shape[1])))
self.multiplier = blocks.multiply_matrix_ff(A, tpp)
|
if A2 is not None:
self.multiplier.set_A(A2)
A = A2
A_matrix = numpy.matrix(A)
for i in xrange(N):
if tags is None:
these_tags = ()
else:
these_tags = (tags[i],)
self.tb.connect(blocks.vector_source_f(X_in[i].tolist()[0], tags=these_tags), (self.multiplier, i))
sinks = []
for i in xrange(M
|
):
sinks.append(blocks.vector_sink_f())
self.tb.connect((self.multiplier, i), sinks[i])
# Run and check
self.tb.run()
for i in xrange(X_in.shape[1]):
Y_out_exp[:,i] = A_matrix * X_in[:,i]
Y_out = [list(x.data()) for x in sinks]
if tags is not None:
self.the_tags = []
for i in xrange(M):
self.the_tags.append(sinks[i].tags())
self.assertEqual(list(Y_out), Y_out_exp.tolist())
def test_001_t (self):
""" Simplest possible check: N==M, unit matrix """
X_in = (
(1, 2, 3, 4),
(5, 6, 7, 8),
)
A = (
(1, 0),
(0, 1),
)
self.run_once(X_in, A)
def test_002_t (self):
""" Switch check: N==M, flipped unit matrix """
X_in = (
(1, 2, 3, 4),
(5, 6, 7, 8),
)
A = (
(0, 1),
(1, 0),
)
self.run_once(X_in, A)
def test_003_t (self):
""" Average """
X_in = (
(1, 1, 1, 1),
(2, 2, 2, 2),
)
A = (
(0.5, 0.5),
(0.5, 0.5),
)
self.run_once(X_in, A)
def test_004_t (self):
""" Set """
X_in = (
(1, 2, 3, 4),
(5, 6, 7, 8),
)
A1 = (
(1, 0),
(0, 1),
)
A2 = (
(0, 1),
(1, 0),
)
self.run_once(X_in, A1, A2=A2)
def test_005_t (self):
""" Tags """
X_in = (
(1, 2, 3, 4),
(5, 6, 7, 8),
)
A = (
(0, 1), # Flip them round
(1, 0),
)
tag1 = gr.tag_t()
tag1.offset = 0
tag1.key = pmt.intern("in1")
tag1.value = pmt.PMT_T
tag2 = gr.tag_t()
tag2.offset = 0
tag2.key = pmt.intern("in2")
tag2.value = pmt.PMT_T
self.run_once(X_in, A, tpp=gr.TPP_ONE_TO_ONE, tags=(tag1, tag2))
self.assertTrue(pmt.equal(tag1.key, self.the_tags[0][0].key))
self.assertTrue(pmt.equal(tag2.key, self.the_tags[1][0].key))
#def test_006_t (self):
#""" Message passing """
#X_in = (
#(1, 2, 3, 4),
#(5, 6, 7, 8),
#)
#A1 = (
#(1, 0),
#(0, 1),
#)
#msg_A = (
#(0, 1),
#(1, 0),
#)
#self.run_once(X_in, A1, msg_A=msg_A)
if __name__ == '__main__':
#gr_unittest.run(test_multiply_matrix_ff, "test_multiply_matrix_ff.xml")
gr_unittest.run(test_multiply_matrix_ff)
|
oyajiro/l2bot
|
hf/wl.py
|
Python
|
artistic-2.0
| 2,661
| 0.007516
|
import pyautogui, win32api, win32con, ctypes, autoit
from PIL import ImageOps, Image, ImageGrab
from numpy import *
import os
import time
import cv2
import random
from Bot import *
def main():
bot = Bot()
autoit.win_wait(bot.title, 5)
counter = 0
poitonUse = 0
cycle = True
fullCounter = 0
while cycle:
hpstatus = bot.checkOwnHp()
print 'hp ' + str(hpstatus)
if hpstatus == 0:
autoit.control_send(bot.title, '', '{F9}', 0)
bot.sleep(0.3,0.6)
print 'Dead'
cv2.imwrite('Dead' + str(int(time.time())) + '.png',bot.getScreen(leftCornerx,leftCornery,x2,fullY2))
cycle = False
if hpstatus == 1:
if poitonUse == 0:
autoit.control_send(bot.title, '', '{F10}', 0)
poitonUse += 1
if poitonUse > 5:
poitonUse = 0
else:
poitonUse = 0
res = bot.findHP();
print 'tgs ' + str(res)
if res == 3:
fullCounter += 1
print 'fc ' + str(fullCounter)
autoit.control_send(bot.title, '', '{F1}', 0)
else:
fullCounter = 0
if fullCounter > 4:
autoit.control_send(bot.title, '', '{ESC}', 0)
bot.sleep(0.3,0.6)
autoit.control_send(bot.title, '', '{F3}', 0)
bot.sleep(0.1,0.3)
autoit.control_send(bot.title, '', '{F1}', 0)
# bot.mouseRotate()
fullCounter = 0
if res > 0:
autoit.control_send(bot.title, '', '{F1}', 0)
counter = 0
if res == 1 or res == 3:
bot.sleep(0.3,0.6)
if res > 1 and res < 3:
bot.sleep(1,3)
if res == 1:
autoit.control_send(bot.title, '', '{F3}', 0)
bot.sleep(0.3,0.6)
autoit.control_send(bot.title, '', '{F2}', 0)
bot.sleep(0.3,0.6)
autoit.control_send(bot.title, '', '{F1}', 0)
else:
fullCounter = 0
if counter < 3:
autoit.control_send(bot.title, '', '{F3}', 0)
bot.sleep(0.5,0.8)
autoit.control_send(bot.title, '', '{F1}', 0)
print 'F3'
if counter > 2:
# bot.findTarget()
autoit.control_send(bot.title, '', '{F7}', 0)
# if counter > 3:
# autoit.control_send(b
|
ot.title, '', '{F8}', 0)
# counter = 0
counter += 1
print 'cnt '
|
+ str(counter)
pass
if __name__ == '__main__':
main()
|
AlienCowEatCake/ImageViewer
|
src/ThirdParty/Exiv2/exiv2-0.27.5-Source/tests/bugfixes/github/test_CVE_2017_17722.py
|
Python
|
gpl-3.0
| 409
| 0
|
# -*- coding: utf-8 -*-
import system_tests
class TestCvePoC(metaclass=system_tests.CaseMeta):
url = "https://github.com/Exiv2/exiv2/issues/208"
filename = "$data_path/2018-01-09-exiv2-crash-001.tiff"
commands = ["$exiv2 " + filename]
retval = [1]
stdout = [""]
stderr = [
"""$exiv2_exception_message """ + filename + """:
$filename: $ker
|
FileContain
|
sUnknownImageType
"""]
|
vFense/vFenseAgent-nix
|
agent/watcher_mac.py
|
Python
|
lgpl-3.0
| 4,731
| 0.001057
|
import subprocess
import sys
import os
import time
from collections import namedtuple
sys.path.append(os.path.join(os.getcwd(), "src"))
from utils import settings
from utils import logger
settings.initialize('watcher')
original_plist = '/opt/TopPatch/agent/daemon/com.toppatch.agent.plist'
osx_plist = '/System/Library/LaunchDaemons/com.toppatch.agent.plist'
daemon_label = 'com.toppatch.agent'
cp_command = ['/bin/cp', original_plist, osx_plist]
list_command = ['/bin/launchctl', 'list']
load_command = ['/bin/launchctl', 'load', '-w', osx_plist]
unload_command = ['/bin/launchctl', 'unload', '-w', osx_plist]
start_command = ['/bin/launchctl', 'start', daemon_label]
stop_command = ['/bin/launchctl', 'stop', daemon_label]
check_in_seconds = 60
def start_agent():
result = False
try:
process = subprocess.Popen(start_command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
raw_output, error_output = process.communicate()
if raw_output == '' and error_output == '':
logger.log('Agent started.')
result = True
elif 'No such process' in error_output:
logger.log('Agent not found.')
else:
logger.log('Unknown output: "%s"' % error_output)
except Exception as e:
logger.log("Could not start agent.", logger.LogLevel.Error)
logger.log_exception(e)
return result
def restart_agent():
try:
process = subprocess.Popen(stop_command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
raw_output, error_output = process.communicate()
if raw_output == '' and error_output == '':
logger.log('Agent has restarted.')
elif 'No such process' in error_output:
logger.log('Agent not found. Nothing to restart.')
else:
logger.log('Unknown output: "%s"' % error_output)
except Exception as e:
logger.log("Could not start agent.", logger.LogLevel.Error)
logger.log_exception(e)
def load_agent():
try:
process = subprocess.Popen(load_command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
raw_output, error_output = process.communicate()
if raw_output == '' and error_output == '':
logger.log('Agent loaded.')
elif 'Already loaded' in error_output:
logger.log('Agent is already loaded.')
else:
logger.log('Unknown output: "%s"' % error_output)
except Exception as e:
logger.log("Could not load agent.", logger.LogLevel.Error)
logger.log_exception(e)
def unload_agent():
try:
process = subprocess.Popen(unload_command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
raw_output, error_output = process.communicate()
if raw_output == '' and error_output == '':
logger.log('Agent unloaded.')
elif 'Error unloading' in error_output:
logger.log('Agent is not loaded/installed.')
else:
logger.log('Unknown output: "%s"' % error_output)
except Exception as e:
logger.log("Could not load agent.", logger.LogLevel.Error)
logger.log_exception(e)
AgentStatus = namedtuple('AgentStats', ['loaded', 'running'])
def agent_running_stats():
ps_info = []
running = False
loaded = False
process = subprocess.Popen(list_command, stdout=subprocess.PIPE
|
,
stderr=subprocess.PIPE)
raw_output, error_output = process.communicate()
for line in raw_output.splitlines():
pid, run, pname = line.split('\t')
ps_info.append((pname,
|
run, pid))
for p in ps_info:
if daemon_label == p[0]:
# p[1] can either be:
# : '0' meaning not running.
# : '-' meaning its running.
loaded = True
if p[1] == '-':
running = True
break
elif p[1] == '0':
running = False
status = AgentStatus(loaded, running)
logger.log(str(status), logger.LogLevel.Debug)
return status
if __name__ == '__main__':
logger.log("Starting watcher daemon.")
while True:
time.sleep(check_in_seconds)
agent_status = agent_running_stats()
if agent_status.loaded:
if agent_status.running:
logger.log("Agent is running.", logger.LogLevel.Debug)
continue
else:
if not start_agent():
load_agent()
else:
load_agent()
|
Karthikeyan-kkk/ooni-probe
|
ooni/utils/hacks.py
|
Python
|
bsd-2-clause
| 3,059
| 0.000654
|
# When some software has issues and we need to fix it in a
# hackish way, we put it in here. This one day will be empty.
import copy_reg
from twisted.web.client import SchemeNotSupported
from txsocksx.http import SOCKS5Agent as SOCKS5AgentOriginal
def patched_reduce_ex(self, proto):
"""
This is a hack to overcome a bug in one of pythons core functions. It is
located inside of copy_reg and is called _reduce_ex.
Some background on the issue can be found here:
http://stackoverflow.com/questions/569754/how-to-tell-for-which-object-attribute-pickle
http://stackoverflow.com/questions/2049849/why-cant-i-pickle-this-object
There was also an open bug on the pyyaml trac repo, but it got closed because
they could not reproduce.
http://pyyaml.org/ticket/190
It turned out to be easier to patch the python core library than to monkey
patch yaml.
XXX see if there is a better way. sigh...
"""
_HEAPTYPE = 1 << 9
assert proto < 2
for base in self.__class__.__mro__:
if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE:
break
else:
base = object # not really reachable
if base is object:
state = None
elif base is int:
state = None
else:
if base is self.__class__:
raise TypeError("can't pickle %s objects" % base.__name__)
state = base(self)
args = (self.__class__, base, state)
try:
getstate = self.__getstate__
except AttributeError:
if getattr(self, "__slots__", None):
raise TypeError("a class that defines __slots__ without "
"defining __getstate__ cannot be pickled")
try:
dic
|
t = self.__dict__
except AttributeError:
dict = None
else:
dict = get
|
state()
if dict:
return copy_reg._reconstructor, args, dict
else:
return copy_reg._reconstructor, args
class SOCKS5Agent(SOCKS5AgentOriginal):
"""
This is a quick hack to fix:
https://github.com/habnabit/txsocksx/issues/9
"""
def _getEndpoint(self, scheme_or_uri, host=None, port=None):
if host is not None:
scheme = scheme_or_uri
else:
scheme = scheme_or_uri.scheme
host = scheme_or_uri.host
port = scheme_or_uri.port
if scheme not in ('http', 'https'):
raise SchemeNotSupported('unsupported scheme', scheme)
endpoint = self.endpointFactory(
host, port, self.proxyEndpoint, **self.endpointArgs)
if scheme == 'https':
if hasattr(self, '_wrapContextFactory'):
tlsPolicy = self._wrapContextFactory(host, port)
elif hasattr(self, '_policyForHTTPS'):
tlsPolicy = self._policyForHTTPS.creatorForNetloc(host, port)
else:
raise NotImplementedError("can't figure out how to make a context factory")
endpoint = self._tlsWrapper(tlsPolicy, endpoint)
return endpoint
|
ateska/striga2-pocs
|
greenev/pyge/__init__.py
|
Python
|
unlicense
| 147
| 0.020408
|
try:
import _py
|
geapi
except ImportError as e:
e.msg += ' (this module can be imported only from greenev)'
raise
from .evlo
|
op import event_loop
|
desaster/uusipuu
|
modules/google.py
|
Python
|
bsd-2-clause
| 1,007
| 0.005958
|
# -*- coding: ISO-8859-15 -*-
from twisted.web import client
from twisted.internet.defer import inlineCallbacks
from core.Uusipuu import UusipuuModule
import urllib, simplej
|
son
class Module(UusipuuModule):
def startup(self):
self.log('google.py loaded')
@inlineCallbacks
def cmd_google(self, user, target, params):
self.log('Querying google for "%s"' % params)
data = yield client.getPage(
'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&%s' %
urllib.urlencode({'q': params.strip()}))
|
json = simplejson.loads(data)
results = json['responseData']['results']
if not results:
self.log('No results found matching "%s"' % keyword)
self.chanmsg('No results found matching "%s"' % keyword)
return
self.chanmsg('%s: %s' % \
(results[0]['titleNoFormatting'].encode('utf-8'),
results[0]['url'].encode('utf-8')))
# vim: set et sw=4:
|
googleapis/python-security-private-ca
|
samples/generated_samples/privateca_v1beta1_generated_certificate_authority_service_update_certificate_authority_async.py
|
Python
|
apache-2.0
| 2,081
| 0.002403
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl
|
e law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for UpdateCertificateAuthority
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modificat
|
ions to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-private-ca
# [START privateca_v1beta1_generated_CertificateAuthorityService_UpdateCertificateAuthority_async]
from google.cloud.security import privateca_v1beta1
async def sample_update_certificate_authority():
# Create a client
client = privateca_v1beta1.CertificateAuthorityServiceAsyncClient()
# Initialize request argument(s)
certificate_authority = privateca_v1beta1.CertificateAuthority()
certificate_authority.type_ = "SUBORDINATE"
certificate_authority.tier = "DEVOPS"
certificate_authority.config.reusable_config.reusable_config = "reusable_config_value"
certificate_authority.key_spec.cloud_kms_key_version = "cloud_kms_key_version_value"
request = privateca_v1beta1.UpdateCertificateAuthorityRequest(
certificate_authority=certificate_authority,
)
# Make the request
operation = client.update_certificate_authority(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END privateca_v1beta1_generated_CertificateAuthorityService_UpdateCertificateAuthority_async]
|
emgirardin/compassion-modules
|
partner_communication/models/email.py
|
Python
|
agpl-3.0
| 908
| 0
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <[email protected]>
#
# The licence is in the file __openerp__.py
#
################
|
##############################################################
from openerp import models, fields
class MailMessage(model
|
s.Model):
""" Add relation to communication configuration to track generated
e-mails.
"""
_inherit = 'mail.mail'
##########################################################################
# FIELDS #
##########################################################################
communication_config_id = fields.Many2one('partner.communication.config')
|
rbrecheisen/pyminer
|
pyminer/pyminer.py
|
Python
|
apache-2.0
| 1,957
| 0.001533
|
__author__ = 'Ralph'
from ui.app import Application
if __name__ == '__main__':
from ui.app import Example
import wx
app = wx.App()
Example(None, title='Example')
app.MainLoop()
# application = Application()
# application.run()
# node1 = ImportARFF()
# node2 = SelectAttributes()
# node3 = SupportVectorMachine()
# node4 = SelectAttributes()
# node5 = ApplyModel()
#
# node1.get_config().set('file_name', '/Users/Ralph/datasets/imagemend/out/prepared/features_prepared.arff')
#
# node2.get_config().set('selector_type', 'subset')
# node2.get_config().set('attributes', ['M', 'F', 'age', 'id'])
#
# node3.get_config().set('kernel_type', 'rbf')
# node3.get_config().set('target', 'diagnosis')
# node3.get_config().set('auto_detect', True)
# node3.get_config().set('performance_measure', 'accuracy')
# node3.get_config().set('n_folds', 2)
# node3.get_config().set('n_grid_folds', 2)
# node3.get_config().set('model_output_dir', '/Users/Ralph/tmp/model')
#
# node4.get_config().set('selector_type', 'single')
# node4.get_config().set('attributes', ['diagnosis'])
#
# Connectio
|
n(
# # ImportARFF -> SelectAttributes
# node1.get_output_port('output'), node2.get_input_port('input'))
# Connection(
# # SelectAttributes -> SVM
# node2.get_output_port('output'), node3.get_input_port('input'))
# Connection(
|
# # SelectAttributes -> SelectAttributes
# node2.get_output_port('output'), node4.get_input_port('input'))
# Connection(
# # SelectAttributes -> ApplyModel
# node4.get_output_port('output'), node5.get_input_port('input'))
# Connection(
# # SVM -> ApplyModel
# node3.get_output_port('model'), node5.get_input_port('model'))
#
# node1.execute()
#
# print('predictions: {}'.format(node5.get_output_port('output').get_data()))
|
z0rr0/t34.me
|
configs/api_python.py
|
Python
|
agpl-3.0
| 328
| 0.015244
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import sys, urllib2
def main():
if len(sys.argv) < 2:
|
print("Error, usage: {0} <your url>".format(sys.argv[0]))
return 1
url = sys.argv[1]
print(urllib2.urlopen('http://t34.me/api/?u=' + url).read())
|
return 0
if __name__ == '__main__':
main()
|
pagekite/PyPagekite
|
pagekite/proto/filters.py
|
Python
|
agpl-3.0
| 8,412
| 0.011531
|
"""
These are filters placed at the end of a tunnel for watching or modifying
the traffic.
"""
##############################################################################
from __future__ import absolute_import
LICENSE = """\
This file is part of pagekite.py.
Copyright 2010-2020, the Beanstalks Project ehf. and Bjarni Runar Einarsson
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU Affero General Public License as published by the Free
Software Foundation, either version 3 of the License, or (at your option) any
later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see: <http://www.gnu.org/licenses/>
"""
##############################################################################
import six
import re
import time
import pagekite.logging as logging
from pagekite.compat import *
class TunnelFilter:
"""Base class for watchers/filters for data going in/out of Tunnels."""
FILTERS = ('connected', 'data_in', 'data_out')
IDLE_TIMEOUT = 1800
def __init__(self, ui):
self.sid = {}
self.ui = ui
def clean_idle_sids(self, now=None):
now = now or time.time()
for sid in list(six.iterkeys(self.sid)):
if self.sid[sid]['_ts'] < now - self.IDLE_TIMEOUT:
del self.sid[sid]
def filter_set_sid(self, sid, info):
now = time.time()
if sid not in self.sid:
self.sid[sid] = {}
self.sid[sid].update(info)
self.sid[sid]['_ts'] = now
self.clean_idle_sids(now=now)
def filter_connected(self, tunnel, sid, data):
if sid not in self.sid:
self.sid[sid] = {}
self.sid[sid]['_ts'] = time.time()
return data
def filter_data_in(self, tunnel, sid, data):
if sid not in self.sid:
self.sid[sid] = {}
self.sid[sid]['_ts'] = time.time()
return data
def filter_data_out(self, tunnel, sid, data):
if sid not in self.sid:
self.sid[sid] = {}
self.sid[sid]['_ts'] = time.time()
return data
class TunnelWatcher(TunnelFilter):
"""Base class for watchers/filters for data going in/out of Tunnels."""
FILTERS = ('data_in', 'data_out')
def __init__(self, ui, watch_level=0):
TunnelFilter.__init__(self, ui)
self.watch_level = watch_level
def format_data(self, data, level):
if '\r\n\r\n' in data:
head, tail = data.split('\r\n\r\n', 1)
output = self.format_data(head, level)
output[-1] += '\\r\\n'
output.append('\\r\\n')
if tail:
output.extend(self.format_data(tail, level))
return output
else:
output = data.encode('string_escape').replace('\\n', '\\n\n')
if output.count('\\') > 0.15*len(output):
if level > 2:
output = [['', '']]
count = 0
for d in data:
output[-1][0] += '%2.2x' % ord(d)
output[-1][1] += '%c' % ((ord(d) > 31 and ord(d) < 127) and d or '.')
count += 1
if (count % 2) == 0:
output[-1][0] += ' '
if (count % 20) == 0:
output.append(['', ''])
return ['%-50s %s' % (l[0], l[1]) for l in output]
else:
return ['<< Binary bytes: %d >>' % len(data)]
else:
return output.strip().splitlines()
def now(self):
return ts_to_iso(int(10*time.time())/10.0
).replace('T', ' ').replace('00000', '')
def filter_data_in(self, tunnel, sid, data):
if data and self.watch_level[0] > 0:
self.ui.Notify('===[ INCOMING @ %s / %s ]===' % (self.now(), sid),
color=self.ui.WHITE, prefix=' __')
for line in self.format_data(data, self.watch_level[0]):
self.ui.Notify(line, prefix=' <=', now=-1, color=self.ui.GREEN)
return TunnelFilter.filter_data_in(self, tunnel, sid, data)
def filter_data_out(self, tunnel, sid, data):
if data and self.watch_level[0] > 1:
self.ui.Notify('===[ OUTGOING @ %s / %s ]===' % (self.now(), sid),
color=self.ui.WHITE, prefix=' __')
for line in self.format_data(data, self.watch_level[0]):
self.ui.Notify(line, prefix=' =>', now=-1, color=self.ui.BLUE)
return TunnelFilter.filter_data_out(self, tunnel, sid, data)
class HaproxyProtocolFilter(TunnelFilter):
"""Filter prefixes the HAProxy PROXY protocol info to requests."""
FILTERS = ('connected')
ENABLE = 'proxyproto'
def filter_connected(self, tunnel, sid, data):
info = self.sid.get(sid)
|
if info:
if not info.get(self.ENABLE, False):
pass
elif info[self.ENABLE] in ("1", True):
remote_ip = info['remote_ip']
|
if '.' in remote_ip:
remote_ip = remote_ip.rsplit(':', 1)[1]
data = 'PROXY TCP%s %s 0.0.0.0 %s %s\r\n%s' % (
'4' if ('.' in remote_ip) else '6',
remote_ip, info['remote_port'], info['port'], data or '')
else:
logging.LogError(
'FIXME: Unimplemented PROXY protocol v%s\n' % info[self.ENABLE])
return TunnelFilter.filter_connected(self, tunnel, sid, data)
class HttpHeaderFilter(TunnelFilter):
"""Filter that adds X-Forwarded-For and X-Forwarded-Proto to requests."""
FILTERS = ('data_in')
HTTP_HEADER = re.compile('(?ism)^(([A-Z]+) ([^\n]+) HTTP/\d+\.\d+\s*)$')
DISABLE = 'rawheaders'
def filter_data_in(self, tunnel, sid, data):
info = self.sid.get(sid)
if (info and
info.get('proto') in ('http', 'http2', 'http3', 'websocket') and
not info.get(self.DISABLE, False)):
# FIXME: Check content-length and skip bodies entirely
http_hdr = self.HTTP_HEADER.search(data)
if http_hdr:
data = self.filter_header_data_in(http_hdr, data, info)
return TunnelFilter.filter_data_in(self, tunnel, sid, data)
def filter_header_data_in(self, http_hdr, data, info):
clean_headers = [
r'(?mi)^(X-(PageKite|Forwarded)-(For|Proto|Port):)'
]
add_headers = [
'X-Forwarded-For: %s' % info.get('remote_ip', 'unknown'),
'X-Forwarded-Proto: %s' % (info.get('using_tls') and 'https' or 'http'),
'X-PageKite-Port: %s' % info.get('port', 0)
]
if info.get('rewritehost', False):
add_headers.append('Host: %s' % info.get('rewritehost'))
clean_headers.append(r'(?mi)^(Host:)')
if http_hdr.group(1).upper() in ('POST', 'PUT'):
# FIXME: This is a bit ugly
add_headers.append('Connection: close')
clean_headers.append(r'(?mi)^(Connection|Keep-Alive):')
info['rawheaders'] = True
for hdr_re in clean_headers:
data = re.sub(hdr_re, 'X-Old-\\1', data)
return re.sub(self.HTTP_HEADER,
'\\1\n%s\r' % '\r\n'.join(add_headers),
data)
class HttpSecurityFilter(HttpHeaderFilter):
"""Filter that blocks known-to-be-dangerous requests."""
DISABLE = 'trusted'
HTTP_DANGER = re.compile('(?ism)^((get|post|put|patch|delete) '
# xampp paths, anything starting with /adm*
'((?:/+(?:xampp/|security/|licenses/|webalizer/|server-(?:status|info)|adm)'
'|[^\n]*/'
# WordPress admin pages
'(?:wp-admin/(?!admin-ajax|css/)|wp-config\.php'
# Hackzor tricks
'|system32/|\.\.|\.ht(?:access|pass)'
# phpMyAdmin and similar tools
'|(?:php|sql)?my(?:sql)?(?:adm|manager)'
# Setup pages for common PHP tools
'|(?:adm[^\n]*|install[^\n]*|setup)\.php)'
')[^\n]*)'
' HTTP/\d+\.\d+\s*)$')
REJECT = 'PAGEKITE_REJECT_'
def filter_header_data_in(self, http_hdr, data, info):
danger = self.HTTP_DANGER.search(data)
if danger:
self.ui.Notify('BLOCKED: %s %s' % (danger.group(2), danger.
|
victorlin/pyramid-handy
|
pyramid_handy/tweens/__init__.py
|
Python
|
mit
| 177
| 0
|
fro
|
m .allow_origin import allow_origin_tween_factory # noqa
from .api_headers import api_headers_tween_factory # noqa
from .
|
basic_auth import basic_auth_tween_factory # noqa
|
UManPychron/pychron
|
pychron/git_archive/diff_editor.py
|
Python
|
apache-2.0
| 9,035
| 0.001107
|
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= standard library imports ========================
from itertools import groupby
import six
# ============= enthought library imports =======================
from pyface.qt.QtGui import QTextEdit, QWidget, QHBoxLayout, QTextFormat, QColor, QPainter, QFrame, \
QSizePolicy, QPainterPath
from traits.trait_errors import TraitError
from traitsui.basic_editor_factory import BasicEditorFactory
from traitsui.qt4.editor import Editor
# ============= local library imports ==========================
from pychron.git_archive.diff_util import extract_line_numbers
def get_ranges(data):
return [[gi[0] for gi in g]
for k, g in groupby(enumerate(data),
lambda i_x: i_x[0] - i_x[1])]
class QDiffConnector(QFrame):
_left_y = 0
_right_y = 0
def __init__(self):
super(QDiffConnector, self).__init__()
self.color =
|
QColor(0, 100, 0, 100)
self.setSizePolicy(QSizePolicy(QSizePolicy.Fixed,
QSizePolicy.Ignored))
self.setFixedWidth(30)
def paintEvent(self, event):
qp = QPainter()
qp.begin(self)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(self.color)
qp.setPen(self.color)
rect = event.rect()
|
x = rect.x()
w = rect.width()
lineheight = 16
print('-------------------')
print('lefts', self.lefts)
print('rights', self.rights)
print('-------------------')
ly = self._left_y + 5
ry = self._right_y + 5
rs=self.rights[:]
# offset=1
for i, l in enumerate(self.lefts):
path = QPainterPath()
sl, el = l[0], l[-1]
try:
r=rs[i]
sr, er = r[0], r[-1]
rs.pop(i)
# offset+=1
except IndexError:
sr, er = l[-1], l[-1]-1
y = ly + lineheight * sl
y2 = ry + lineheight * sr
path.moveTo(x, y)
path.lineTo(x, y + lineheight * (el - sl + 1))
path.lineTo(x + w, y2 + lineheight * (er - sr + 1))
path.lineTo(x + w, y2)
qp.drawPath(path)
for i, r in enumerate(rs):
path = QPainterPath()
sr, er = r[0], r[-1]
# try:
l=self.lefts[i]
sl, el = r[-1], r[-1]-1
# except IndexError:
# sl, el = l[-1]+2, l[-1]+1
# print sl, el
y = ly + lineheight * (sl)
y2 = ry + lineheight * (sr)
path.moveTo(x, y)
path.lineTo(x, y + lineheight * (el - sl + 1))
path.lineTo(x + w, y2 + lineheight * (er - sr + 1))
path.lineTo(x + w, y2)
qp.drawPath(path)
qp.end()
def set_left_y(self, y):
self._left_y += y
def set_right_y(self, y):
self._right_y += y
class LinkedTextEdit(QTextEdit):
linked_widget = None
connector = None
orientation = 'left'
no_update = False
def scrollContentsBy(self, x, y):
if self.linked_widget and not self.no_update:
sb = self.linked_widget.verticalScrollBar()
v = sb.value() - y
self.linked_widget.no_update = True
sb.setSliderPosition(v)
self.linked_widget.no_update = False
if self.connector:
if self.orientation == 'left':
self.connector.set_left_y(y)
else:
self.connector.set_right_y(y)
self.connector.update()
super(LinkedTextEdit, self).scrollContentsBy(x, y)
class QDiffEdit(QWidget):
def __init__(self, parent, *args, **kw):
super(QDiffEdit, self).__init__(*args, **kw)
self.left = LinkedTextEdit()
self.left.orientation = 'left'
self.left.setReadOnly(True)
self.right = LinkedTextEdit()
self.right.orientation = 'right'
self.right.setReadOnly(True)
self.connector = QDiffConnector()
self.left.linked_widget = self.right
self.right.linked_widget = self.left
self.left.connector = self.connector
self.right.connector = self.connector
layout = QHBoxLayout()
layout.setSpacing(0)
layout.addWidget(self.left)
layout.addWidget(self.connector)
layout.addWidget(self.right)
self.setLayout(layout)
def set_left_text(self, txt):
self.left.setText(txt)
def set_right_text(self, txt):
self.right.setText(txt)
def highlight(self, ctrl, lineno):
selection = QTextEdit.ExtraSelection()
selection.cursor = ctrl.textCursor()
selection.format.setBackground(QColor(100, 200, 100))
selection.format.setProperty(
QTextFormat.FullWidthSelection, True)
doc = ctrl.document()
block = doc.findBlockByLineNumber(lineno)
selection.cursor.setPosition(block.position())
ss = ctrl.extraSelections()
ss.append(selection)
ctrl.setExtraSelections(ss)
selection.cursor.clearSelection()
def _clear_selection(self):
for ctrl in (self.left, self.right):
ctrl.setExtraSelections([])
def set_diff(self):
self._clear_selection()
ls, rs = extract_line_numbers(self.left.toPlainText(),
self.right.toPlainText())
for li in ls:
self.highlight(self.left, li)
for ri in rs:
self.highlight(self.right, ri)
self._set_connectors(ls, rs)
def _set_connectors(self, ls, rs):
self.connector.lefts = get_ranges(ls)
self.connector.rights = get_ranges(rs)
self.connector.update()
class _DiffEditor(Editor):
_no_update = False
def init(self, parent):
self.control = self._create_control(parent)
def _create_control(self, parent):
control = QDiffEdit(parent)
# QtCore.QObject.connect(ctrl.left,
# QtCore.SIGNAL('textChanged()'), self.update_left_object)
# QtCore.QObject.connect(ctrl.right,
# QtCore.SIGNAL('textChanged()'), self.update_right_object)
control.left.textChanged.connect(self.update_left_object)
control.right.textChanged.connect(self.update_right_object)
return control
def update_editor(self):
if self.value:
self.control.set_left_text(self.value.left_text)
self.control.set_right_text(self.value.right_text)
self.control.set_diff()
def update_right_object(self):
""" Handles the user entering input data in the edit control.
"""
self._update_object('right')
def update_left_object(self):
""" Handles the user entering input data in the edit control.
"""
self._update_object('left')
def _get_user_left_value(self):
return self._get_user_value('left')
def _get_user_right_value(self):
return self._get_user_value('left')
def _update_object(self, attr):
if (not self._no_update) and (self.control is not None):
try:
setattr(self.value, '{}_text'.format(attr),
getattr(self, '_get_user_{}_value'.format(attr))())
self.control.set_diff()
if self._error is not N
|
v-iam/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/route_table.py
|
Python
|
mit
| 2,557
| 0.000782
|
# coding=u
|
tf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behav
|
ior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class RouteTable(Resource):
"""Route table resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict
:param routes: Collection of routes contained within a route table.
:type routes: list of :class:`Route
<azure.mgmt.network.v2017_03_01.models.Route>`
:ivar subnets: A collection of references to subnets.
:vartype subnets: list of :class:`Subnet
<azure.mgmt.network.v2017_03_01.models.Subnet>`
:param provisioning_state: The provisioning state of the resource.
Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'subnets': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'routes': {'key': 'properties.routes', 'type': '[Route]'},
'subnets': {'key': 'properties.subnets', 'type': '[Subnet]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, location=None, tags=None, routes=None, provisioning_state=None, etag=None):
super(RouteTable, self).__init__(id=id, location=location, tags=tags)
self.routes = routes
self.subnets = None
self.provisioning_state = provisioning_state
self.etag = etag
|
RobMackie/robiverse
|
python/echo_client/echo_client.py
|
Python
|
gpl-2.0
| 323
| 0
|
#!/usr/bin/env python3
import socket
HOST = '127.0.0.1' # The server's hostname or IP address
PORT = 65432 # The port used by the s
|
erver
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as
|
s:
s.connect((HOST, PORT))
s.sendall(b'Hello, world')
data = s.recv(1024)
print('Received', repr(data))
|
tensorflow/lingvo
|
lingvo/tasks/car/waymo/waymo_decoder.py
|
Python
|
apache-2.0
| 13,038
| 0.004295
|
# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base models for point-cloud based detection."""
from lingvo import compat as tf
from lingvo.core import metrics
from lingvo.core import py_utils
from lingvo.tasks.car import base_decoder
from lingvo.tasks.car import detection_3d_metrics
from lingvo.tasks.car import transform_util
from lingvo.tasks.car.waymo import waymo_ap_metric
from lingvo.tasks.car.waymo import waymo_metadata
import numpy as np
class WaymoOpenDatasetDecoder(base_decoder.BaseDecoder):
"""A decoder to use for decoding a detector model on Waymo."""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'draw_visualizations', False, 'Boolean for whether to draw '
'visualizations. This is independent of laser_sampling_rate.')
p.ap_metric = waymo_ap_metric.WaymoAPMetrics.Params(
waymo_metadata.WaymoMetadata())
p.Define(
'extra_ap_metrics', {},
'Dictionary of extra AP metrics to run in the decoder. The key'
'is the name of the metric and the value is a sub-class of '
'APMetric')
p.Define(
'save_residuals', False,
'If True, this expects the residuals and ground-truth to be available '
'in the decoder output dictionary, and it will save it to the decoder '
'output file. See decode_include_residuals in PointDetectorBase '
'for details.')
return p
def CreateDecoderMetrics(self):
"""Decoder metrics for WaymoOpenDataset."""
p = self.params
waymo_metric_p = p.ap_metric.Copy().Set(cls=waymo_ap_metric.WaymoAPMetrics)
waymo_metrics = waymo_metric_p.Instantiate()
class_names = waymo_metrics.metadata.ClassNames()
# TODO(bencaine,vrv): There's some code smell with this ap_metrics params
# usage. We create local copies of the params to then instantiate them.
# Failing to do this risks users editing the params after construction of
# the object, making each object method call have the potential for side
# effects.
# Create a new dictionary with copies of the params converted to objects
# so we can then add these to the decoder metrics.
extra_ap_metrics = {}
for k, metric_p in p.extra_ap_metrics.items():
extra_ap_metrics[k] = metric_p.Instantiate()
waymo_metric_bev_p = waymo_metric_p.Copy()
waymo_metric_bev_p.box_type = '2d'
waymo_metrics_bev = way
|
mo_metric_bev_p.Instantiate()
# Convert the list of class names to a dictionary mapping class_id -> name.
class_id_to_name = dict(enumerate(class_names))
# TODO(vrv): This uses the same top down transform as for KITTI;
# re-visit these settings since detections can happen all around
# the car.
top_down_transform = transform_util.MakeCarToImageTransform(
pixels_per_meter=32.,
image_ref_x=512.,
|
image_ref_y=1408.,
flip_axes=True)
decoder_metrics = py_utils.NestedMap({
'top_down_visualization':
(detection_3d_metrics.TopDownVisualizationMetric(
top_down_transform,
image_height=1536,
image_width=1024,
class_id_to_name=class_id_to_name)),
'num_samples_in_batch': metrics.AverageMetric(),
'waymo_metrics': waymo_metrics,
'waymo_metrics_bev': waymo_metrics_bev,
})
self._update_metrics_class_keys = ['waymo_metrics_bev', 'waymo_metrics']
for k, metric in extra_ap_metrics.items():
decoder_metrics[k] = metric
self._update_metrics_class_keys.append(k)
decoder_metrics.mesh = detection_3d_metrics.WorldViewer()
return decoder_metrics
def ProcessOutputs(self, input_batch, model_outputs):
"""Produce additional decoder outputs for WaymoOpenDataset.
Args:
input_batch: A .NestedMap of the inputs to the model.
model_outputs: A .NestedMap of the outputs of the model, including::
- per_class_predicted_bboxes: [batch, num_classes, num_boxes, 7] float
Tensor with per class 3D (7 DOF) bounding boxes.
- per_class_predicted_bbox_scores: [batch, num_classes, num_boxes] float
Tensor with per class, per box scores.
- per_class_valid_mask: [batch, num_classes, num_boxes] masking Tensor
indicating which boxes were still kept after NMS for each class.
Returns:
A NestedMap of additional decoder outputs needed for
PostProcessDecodeOut.
"""
del model_outputs
p = self.params
input_labels = input_batch.labels
input_metadata = input_batch.metadata
source_ids = tf.strings.join([
input_metadata.run_segment,
tf.as_string(input_metadata.run_start_offset)
],
separator='_')
ret = py_utils.NestedMap({
'num_points_in_bboxes': input_batch.labels.bboxes_3d_num_points,
# Ground truth.
'bboxes_3d': input_labels.bboxes_3d,
'bboxes_3d_mask': input_labels.bboxes_3d_mask,
'labels': input_labels.labels,
'label_ids': input_labels.label_ids,
'speed': input_labels.speed,
'acceleration': input_labels.acceleration,
# Fill the following in.
'source_ids': source_ids,
'difficulties': input_labels.single_frame_detection_difficulties,
'unfiltered_bboxes_3d_mask': input_labels.unfiltered_bboxes_3d_mask,
'run_segment': input_metadata.run_segment,
'run_start_offset': input_metadata.run_start_offset,
'pose': input_metadata.pose,
})
if p.draw_visualizations:
laser_sample = self._SampleLaserForVisualization(
input_batch.lasers.points_xyz, input_batch.lasers.points_padding)
ret.update(laser_sample)
return ret
def PostProcessDecodeOut(self, dec_out_dict, dec_metrics_dict):
"""Post-processes the decoder outputs."""
p = self.params
# Update num_samples_in_batch.
batch_size, num_classes, num_boxes, _ = (
dec_out_dict.per_class_predicted_bboxes.shape)
dec_metrics_dict.num_samples_in_batch.Update(batch_size)
# Update decoder output by removing z-coordinate, thus reshaping the bboxes
# to [batch, num_bboxes, 5] to be compatible with
# TopDownVisualizationMetric.
# Indices corresponding to the 2D bbox parameters (x, y, dx, dy, phi).
bbox_2d_idx = np.asarray([1, 1, 0, 1, 1, 0, 1], dtype=np.bool)
bboxes_2d = dec_out_dict.bboxes_3d[..., bbox_2d_idx]
predicted_bboxes = dec_out_dict.per_class_predicted_bboxes[..., bbox_2d_idx]
if p.draw_visualizations and dec_out_dict.points_sampled:
tf.logging.info('Updating sample for top down visualization')
dec_metrics_dict.mesh.Update(
py_utils.NestedMap({
'points_xyz': dec_out_dict.points_xyz,
'points_padding': dec_out_dict.points_padding,
}))
# Flatten our predictions/scores to match the API of the visualization
# The last dimension of flattened_bboxes is 5 due to the mask
# above using bbox_2d_idx.
flattened_bboxes = np.reshape(predicted_bboxes,
[batch_size, num_classes * num_boxes, 5])
flattened_visualization_weights = np.reshape(
dec_out_dict.visualization_weights,
[batch_size, num_classes * num_boxes])
# Create a label id mask for now to maintain compatibility.
# TODO(bencaine): Refactor visualizations to reflect new structure.
flattened_visualization_labels = np.tile(
np.arange(0, num_classes)[np.newaxis, :, np
|
not-na/fritzctl
|
fritzctl/ooapi/general_hosts.py
|
Python
|
gpl-2.0
| 9,883
| 0.012344
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# general_hosts.py
#
# Copyright 2016-2020 fritzctl Contributors>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
from . import base
class API_general_hosts(base.API_base):
"""
General Host Information TR64 Object-Oriented API.
Can be instantiated via ``session.
|
getOOAPI("general_hosts")`` or ``session.getOOAPI("urn:dslforum-org:servic
|
e:Hosts:1")``\ .
Same parameters and attributes as :py:class:`fritzctl.ooapi.base.API_base()`\ .
"""
def getHostByIndex(self,index,ext=True):
"""
Returns the Host associated with the given Index.
:param int index: The Index of the Host
:param bool ext: Optional Flag if information from the AVM Extension should be integrated, defaults to True
:return: Host Information Object
:rtype: Host
:raises AssertionError: if the index is invalid, e.g. not an integer or lower than 0
:raises ValueError: if the index is out-of-bounds
"""
assert isinstance(index,int) and index>=0
d = self.dynapi.GetGenericHostEntry(NewIndex=index)
if ext:
d.update(self.dynapi.callAPI("X_AVM-DE_GetGenericHostEntryExt",NewIndex=index))
d["_ext"]=True
else:
d["_ext"]=False
return Host(self,index,d)
def getHostByMAC(self,mac,ext=True):
"""
Returns the Host associated with the given MAC Address.
:param str mac: MAC Address of the Host
:param bool ext: Optional Flag if information from the AVM Extension should be integrated, defaults to True
:return: Host Information Object
:rtype: Host
:raises AssertionError: if the MAC Address is invalid, e.g. not a string
:raises ValueError: if the MAC Address is unknown
"""
assert isinstance(mac,str)
d = self.dynapi.GetSpecificHostEntry(NewMACAdress=mac)
d["NewMACAddress"]=mac
if ext:
d.update(self.dynapi.callAPI("X_AVM-DE_GetSpecificHostEntryExt",NewMACAddress=mac))
d["_ext"]=True
else:
d["_ext"]=False
return Host(self,-1,d)
def getHostListLength(self):
"""
Returns the length of the List of all known Hosts.
:return: Number of Entries in the host list.
:rtype: int
"""
return int(self.dynapi.GetHostNumberOfEntries()["NewHostNumberOfEntries"])
def getHostList(self,ext=True):
"""
Returns a list of all hosts.
:param bool ext: Optional Flag if information from the AVM Extension should be integrated, defaults to True
:return: List of Hosts
:rtype: List of :py:class:`Host()`
"""
out = []
for i in range(self.getHostListLength()):
out.append(self.getHostByIndex(i,ext=ext))
return out
def getMacByIndex(self,index):
"""
Returns the MAC Address of the device associated with the given index.
:param int index: Index of the Device to return
:return: MAC Address
:rtype: str
"""
return self.dynapi.GetGenericHostEntry(NewIndex=index)["NewMACAddress"]
def getChangeCounter(self):
"""
Returns the current change counter.
:return: The current change counter
:rtype: int
"""
return int(self.dynapi.callAPI("X_AVM-DE_GetChangeCounter")["NewX_AVM-DE_GetChangeCounter"])
def wakeUp(self,mac):
"""
Sends a WakeOnLAN request to the specified Host.
:param str mac: MAC Address to wake up
:raises AssertionError: if the MAC Address is invalid, e.g. not a string
:raises ValueError: if the MAC Address is unknown
"""
assert isinstance(mac,str)
self.dynapi.callAPI("X_AVM-DE_WakeOnLANByMACAddress",NewMACAddress=mac)
class Host(object):
"""
Host Information and Configuration Class.
:param API_avm_homeauto api: API object to use when querying for data
:param int index: Index this device had when requested via ``GetGenericHostEntry()``\ , may be -1 if unknown
:param dict info: Dictionary containing the TR64 Response with all the data about the device; automatically passed to :py:meth:`loadData()`
:ivar API_avm_homeauto api: stores the supplied API object
:ivar int index: stores the supplied index
:ivar dict info: stores the data in a dictionary
:py:attr:`info` stores a flag if extension data is available in the ``_ext`` key.
:ivar str mac: MAC Address of this Host
:ivar str ip: IP Address of this Host
:ivar str address_source: Source of the Address
:ivar int lease_remaining: Time in second until the DHCP Lease expires
:ivar str interface_type: Type of the interface this Host is connected with
:ivar bool active: Flag if this host is active
:ivar str hostname: Property for reading and writing hostname, see :py:attr:`hostname`
Extension Variables:
:ivar int ethport: Which ethernet port the host is connected with, from 1-4 or 0 if not via LAN
:ivar float speed: Current Connection Speed
:ivar bool updateAvailable: Flag if an update is available, where applicable
:ivar bool updateSuccessful: Flag if the last update was successful, where applicable
:ivar str infourl: URL for getting Information
:ivar str model: Model of the Host
:ivar str url: URL of the Host
"""
def __init__(self,api,index,info):
self.api = api
self.index = index
self.info = info
self.loadData(self.info)
def loadData(self,data):
"""
Populates instance variables with the supplied TR64 response.
This method is automatically called upon construction with the supplied info dict.
Note that the ``_ext`` key must be set to a boolean flag indicating if extension information is contained in the response.
"""
self.mac = data["NewMACAddress"]
self.ip = data["NewIPAddress"]
self.address_source = data["NewAddressSource"]
self.lease_remaining = int(data["NewLeaseTimeRemaining"])
self.interface_type = data["NewInterfaceType"]
self.active = data["NewActive"]=="1"
self._hostname = data["NewHostName"]
if data["_ext"]:
self.ethport = int(data["NewX_AVM-DE_Port"])
self.speed = float(data["NewX_AVM-DE_Speed"])
self.updateAvailable = data["NewX_AVM-DE_UpdateAvailable"]=="1"
self.updateSuccessful = data["NewX_AVM-DE_UpdateSuccessful"]=="succeeded"
self.infourl = data["NewX_AVM-DE_InfoURL"]
self.model = data["NewX_AVM-DE_Model"]
self.url = data["NewX_AVM-DE_URL"]
def reloadData(self):
"""
Reloads the data from the server.
Note that this method will only request extension data if the key ``_ext`` is set to ``True``\ .
"""
d = self.api.dynapi.GetSpecificHostEntry(NewMACAddress=self.mac)
if self.info["_ext"]:
d.update(self.api.dynapi.callAPI("X_AVM-DE_GetSpecificHostEntryExt",NewMACAddress=self.mac))
d["_ext"]=self.info["_ext"]
d["NewMACAddress"]=self.mac
self.info = d
def doUpdate(self):
"""
Requests that the host does an update.
Note that this may not work on every
|
gianina-ingenuity/titanium-branch-deep-linking
|
testbed/x/mobilesdk/osx/5.5.1.GA/common/css/ply/lex.py
|
Python
|
mit
| 40,747
| 0.011633
|
# -----------------------------------------------------------------------------
# ply: lex.py
#
# Copyright (C) 2001-2009,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
__version__ = "3.3"
__tabversion__ = "3.2" # Version of table file used
import re, sys, types, copy, os
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# Extract the code attribute of a function. Different implementations
# are for Python 2/3 compatibility.
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self,message,s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def critical(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
def warning(self,msg,*args,**kwargs):
self.f.write("LEX WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("LEX ERROR: " + (msg % args) + "\n")
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re,findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = "INITIAL" # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = "" # Ignored characters
self.lexliterals = "" # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = 0 # Optimized mode
def clone(self,object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
|
newtab = { }
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object,f[0].__name__),f[1]))
newre.append((cre,newfindex))
newtab[key
|
] = newre
c.lexstatere = newtab
c.lexstateerrorf = { }
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object,ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self,tabfile,outputdir=""):
if isinstance(tabfile,types.ModuleType):
return
basetabfilename = tabfile.split(".")[-1]
filename = os.path.join(outputdir,basetabfilename)+".py"
tf = open(filename,"w")
tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
tf.write("_tabversion = %s\n" % repr(__version__))
tf.write("_lextokens = %s\n" % repr(self.lextokens))
tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
tabre = { }
# Collect all functions in the initial state
initial = self.lexstatere["INITIAL"]
initialfuncs = []
for part in initial:
for f in part[1]:
if f and f[0]:
initialfuncs.append(f)
for key, lre in self.lexstatere.items():
titem = []
|
CHBMB/LazyLibrarian
|
lib/fuzzywuzzy/fuzz.py
|
Python
|
gpl-3.0
| 8,419
| 0.00095
|
#!/usr/bin/env python
# encoding: utf-8
"""
fuzz.py
Copyright (c) 2011 Adam Cohen
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in
|
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE
|
AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import unicode_literals
import platform
import warnings
try:
from .StringMatcher import StringMatcher as SequenceMatcher
except ImportError:
#if platform.python_implementation() != "PyPy":
# warnings.warn('Using slow pure-python SequenceMatcher. Install python-Levenshtein to remove this warning')
from difflib import SequenceMatcher
from . import utils
###########################
# Basic Scoring Functions #
###########################
@utils.check_for_none
@utils.check_empty_string
def ratio(s1, s2):
s1, s2 = utils.make_type_consistent(s1, s2)
m = SequenceMatcher(None, s1, s2)
return utils.intr(100 * m.ratio())
@utils.check_for_none
@utils.check_empty_string
def partial_ratio(s1, s2):
""""Return the ratio of the most similar substring
as a number between 0 and 100."""
s1, s2 = utils.make_type_consistent(s1, s2)
if len(s1) <= len(s2):
shorter = s1
longer = s2
else:
shorter = s2
longer = s1
m = SequenceMatcher(None, shorter, longer)
blocks = m.get_matching_blocks()
# each block represents a sequence of matching characters in a string
# of the form (idx_1, idx_2, len)
# the best partial match will block align with at least one of those blocks
# e.g. shorter = "abcd", longer = XXXbcdeEEE
# block = (1,3,3)
# best score === ratio("abcd", "Xbcd")
scores = []
for block in blocks:
long_start = block[1] - block[0] if (block[1] - block[0]) > 0 else 0
long_end = long_start + len(shorter)
long_substr = longer[long_start:long_end]
m2 = SequenceMatcher(None, shorter, long_substr)
r = m2.ratio()
if r > .995:
return 100
else:
scores.append(r)
return utils.intr(100 * max(scores))
##############################
# Advanced Scoring Functions #
##############################
def _process_and_sort(s, force_ascii, full_process=True):
"""Return a cleaned string with token sorted."""
# pull tokens
ts = utils.full_process(s, force_ascii=force_ascii) if full_process else s
tokens = ts.split()
# sort tokens and join
sorted_string = u" ".join(sorted(tokens))
return sorted_string.strip()
# Sorted Token
# find all alphanumeric tokens in the string
# sort those tokens and take ratio of resulting joined strings
# controls for unordered string elements
@utils.check_for_none
def _token_sort(s1, s2, partial=True, force_ascii=True, full_process=True):
sorted1 = _process_and_sort(s1, force_ascii, full_process=full_process)
sorted2 = _process_and_sort(s2, force_ascii, full_process=full_process)
if partial:
return partial_ratio(sorted1, sorted2)
else:
return ratio(sorted1, sorted2)
def token_sort_ratio(s1, s2, force_ascii=True, full_process=True):
"""Return a measure of the sequences' similarity between 0 and 100
but sorting the token before comparing.
"""
return _token_sort(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process)
def partial_token_sort_ratio(s1, s2, force_ascii=True, full_process=True):
"""Return the ratio of the most similar substring as a number between
0 and 100 but sorting the token before comparing.
"""
return _token_sort(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process)
@utils.check_for_none
def _token_set(s1, s2, partial=True, force_ascii=True, full_process=True):
"""Find all alphanumeric tokens in each string...
- treat them as a set
- construct two strings of the form:
<sorted_intersection><sorted_remainder>
- take ratios of those two strings
- controls for unordered partial matches"""
p1 = utils.full_process(s1, force_ascii=force_ascii) if full_process else s1
p2 = utils.full_process(s2, force_ascii=force_ascii) if full_process else s2
if not utils.validate_string(p1):
return 0
if not utils.validate_string(p2):
return 0
# pull tokens
tokens1 = set(p1.split())
tokens2 = set(p2.split())
intersection = tokens1.intersection(tokens2)
diff1to2 = tokens1.difference(tokens2)
diff2to1 = tokens2.difference(tokens1)
sorted_sect = " ".join(sorted(intersection))
sorted_1to2 = " ".join(sorted(diff1to2))
sorted_2to1 = " ".join(sorted(diff2to1))
combined_1to2 = sorted_sect + " " + sorted_1to2
combined_2to1 = sorted_sect + " " + sorted_2to1
# strip
sorted_sect = sorted_sect.strip()
combined_1to2 = combined_1to2.strip()
combined_2to1 = combined_2to1.strip()
if partial:
ratio_func = partial_ratio
else:
ratio_func = ratio
pairwise = [
ratio_func(sorted_sect, combined_1to2),
ratio_func(sorted_sect, combined_2to1),
ratio_func(combined_1to2, combined_2to1)
]
return max(pairwise)
def token_set_ratio(s1, s2, force_ascii=True, full_process=True):
return _token_set(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process)
def partial_token_set_ratio(s1, s2, force_ascii=True, full_process=True):
return _token_set(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process)
###################
# Combination API #
###################
# q is for quick
def QRatio(s1, s2, force_ascii=True):
p1 = utils.full_process(s1, force_ascii=force_ascii)
p2 = utils.full_process(s2, force_ascii=force_ascii)
if not utils.validate_string(p1):
return 0
if not utils.validate_string(p2):
return 0
return ratio(p1, p2)
def UQRatio(s1, s2):
return QRatio(s1, s2, force_ascii=False)
# w is for weighted
def WRatio(s1, s2, force_ascii=True):
"""Return a measure of the sequences' similarity between 0 and 100,
using different algorithms.
"""
p1 = utils.full_process(s1, force_ascii=force_ascii)
p2 = utils.full_process(s2, force_ascii=force_ascii)
if not utils.validate_string(p1):
return 0
if not utils.validate_string(p2):
return 0
# should we look at partials?
try_partial = True
unbase_scale = .95
partial_scale = .90
base = ratio(p1, p2)
len_ratio = float(max(len(p1), len(p2))) / min(len(p1), len(p2))
# if strings are similar length, don't use partials
if len_ratio < 1.5:
try_partial = False
# if one string is much much shorter than the other
if len_ratio > 8:
partial_scale = .6
if try_partial:
partial = partial_ratio(p1, p2) * partial_scale
ptsor = partial_token_sort_ratio(p1, p2, full_process=False) \
* unbase_scale * partial_scale
ptser = partial_token_set_ratio(p1, p2, full_process=False) \
* unbase_scale * partial_scale
return utils.intr(max(base, partial, ptsor, ptser))
else:
tsor = token_sort_ratio(p1, p2, full_process=False) * unbase_scale
tser = token_set_ratio(p1, p2, full_process=False) * unbase_scale
return utils.intr(max
|
ilastikdev/ilastik
|
tests/test_applets/objectCounting/testObjectCountingMultiImageGui.py
|
Python
|
gpl-3.0
| 31,053
| 0.010756
|
###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
#Fixme: this test currently fails due to an issue to be fixed
# Checks that the boxes remains in the image when switching image
# Other things to be tested:
# - interaction with boxes : move them around etc...
import os
import numpy
from PyQt4.QtGui import QApplication,QKeyEvent
from PyQt4.QtCore import QEvent,Qt
from ilastik.workflows.counting import CountingWorkflow
from tests.helpers import ShellGuiTestCaseBase
from lazyflow.operators import OpPixelFeaturesPresmoothed
class TestObjectCountingGuiMultiImage(ShellGuiTestCaseBase):
"""
Run a set of GUI-based tests on the pixel classification workflow.
Note: These tests are named in order so that simple cases are tried before complex ones.
Additionally, later tests may depend on earlier ones to run properly.
"""
@classmethod
def workflowClass(cls):
return CountingWorkflow
PROJECT_FILE = os.path.split(__file__)[0] + '/test_project.ilp'
SAMPLE_DATA = []
SAMPLE_DATA.append( os.path.split(__file__)[0] + '/1.npy')
SAMPLE_DATA.append( os.path.split(__file__)[0] + '/0.npy')
SAMPLE_DATA.append( os.path.split(__file__)[0] + '/2.npy')
SAMPLE_DATA.append( os.path.split(__file__)[0] + '/3.npy')
SAMPLE_DATA.append( os.path.split(__file__)[0] + '/4.npy')
@classmethod
def setupClass(cls):
# Base class first
super(TestObjectCountingGuiMultiImage, cls).setupClass()
if hasattr(cls, 'SAMPLE_DATA'):
cls.using_random_data = False
else:
cls.using_random_data = True
cls.SAMPLE_DATA = []
cls.SAMPLE_DATA.append(os.path.split(__file__)[0] + '/random_data1.npy')
cls.SAMPLE_DATA.append(os.path.split(__file__)[0] + '/random_data2.npy')
data1 = numpy.random.random((1,200,200,1,1))
data1 *= 256
data2 = numpy.random.random((1,50,100,1,1))
data2 *= 256
numpy.save(cls.SAMPLE_DATA[0], data1.astype(numpy.uint8))
numpy.save(cls.SAMP
|
LE_DATA[1], data2.astype(numpy.uint8))
@classmethod
def teardownClass(cls):
# Call our base class so the app quits!
super(TestObjectCountingGuiMultiImage, cls).teardownClass()
# Clean up: Delete any test files we generated
removeFiles = [ TestObjectCountingGuiMultiImage.PROJECT_FILE ]
if cls.using_random_data:
removeFiles += TestObjectCountingGuiMultiImage.SAMPLE_DATA
|
for f in removeFiles:
try:
os.remove(f)
except:
pass
def test_1_NewProject(self):
"""
Create a blank project, manipulate few couple settings, and save it.
"""
def impl():
projFilePath = self.PROJECT_FILE
shell = self.shell
# New project
shell.createAndLoadNewProject(projFilePath, self.workflowClass())
workflow = shell.projectManager.workflow
from ilastik.applets.dataSelection.opDataSelection import DatasetInfo
opDataSelection = workflow.dataSelectionApplet.topLevelOperator
for i, dataFile in enumerate(self.SAMPLE_DATA):
# Add a file
info = DatasetInfo()
info.filePath = dataFile
opDataSelection.DatasetGroup.resize(i+1)
opDataSelection.DatasetGroup[i][0].setValue(info)
# Set some features
opFeatures = workflow.featureSelectionApplet.topLevelOperator
opFeatures.FeatureIds.setValue( OpPixelFeaturesPresmoothed.DefaultFeatureIds )
opFeatures.Scales.setValue( [0.3, 0.7, 1, 1.6, 3.5, 5.0, 10.0] )
# sigma: 0.3 0.7 1.0 1.6 3.5 5.0 10.0
selections = numpy.array( [[True, False, False, False, False, False, False],
[True, False, False, False, False, False, False],
[True, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False]] )
opFeatures.SelectionMatrix.setValue(selections)
# Save and close
shell.projectManager.saveProject()
shell.ensureNoCurrentProject(assertClean=True)
# Run this test from within the shell event loop
self.exec_in_shell(impl)
def test_2_ClosedState(self):
"""
Check the state of various shell and gui members when no project is currently loaded.
"""
def impl():
assert self.shell.projectManager is None
assert self.shell.appletBar.count() == 0
# Run this test from within the shell event loop
self.exec_in_shell(impl)
def test_3_OpenProject(self):
def impl():
self.shell.openProjectFile(self.PROJECT_FILE)
assert self.shell.projectManager.currentProjectFile is not None
# Run this test from within the shell event loop
self.exec_in_shell(impl)
# These points are relative to the CENTER of the view
def test_4_AddDotsAndBackground(self):
"""
Add labels and draw them in the volume editor.
"""
def impl():
imageId = 0
workflow = self.shell.projectManager.workflow
countingClassApplet = workflow.countingApplet
self.shell.imageSelectionCombo.setCurrentIndex(imageId)
gui = countingClassApplet.getMultiLaneGui()
self.waitForViews(gui.currentGui().editor.imageViews)
opPix = countingClassApplet.topLevelOperator
# Select the labeling drawer
self.shell.setSelectedAppletDrawer(3)
# Turn off the huds and so we can capture the raw image
viewMenu = gui.currentGui().menus()[0]
viewMenu.actionToggleAllHuds.trigger()
# Select the labeling drawer
self.shell.setSelectedAppletDrawer(3)
# Turn off the huds and so we can capture the raw image
viewMenu = gui.currentGui().menus()[0]
viewMenu.actionToggleAllHuds.trigger()
## Turn off the slicing position lines
## FIXME: This disables the lines without unchecking the position
## box in the VolumeEditorWidget, making the checkbox out-of-sync
#gui.currentGui().editor.navCtrl.indicateSliceIntersection = False
# Do our tests at position 0,0,0
gui.currentGui().editor.posModel.slicingPos = (0,0,0)
assert gui.currentGui()._labelControlUi.liveUpdateButton.isChecked() == False
assert gui.currentGui()._labelControlUi.labelListModel.rowCount() == 2, "Got {} rows".format(gui.currentGui()._labelControlUi.labelListModel.rowCount())
# Select the brush
gui.currentGui()._labelControlUi.paintToolButton.click()
# Let the GUI catch up: Process all events
QApplication.processEvents(
|
WillieMaddox/scipy
|
benchmarks/benchmarks/optimize.py
|
Python
|
bsd-3-clause
| 10,537
| 0.000949
|
from __future__ import division, print_function, absolute_import
import time
from collections import defaultdict
import numpy as np
try:
import scipy.optimize
from scipy.optimize.optimize import rosen, rosen_der, rosen_hess
from scipy.optimize import leastsq
except ImportError:
pass
from . import test_functions as funcs
from .common import Benchmark
from .lsq_problems import extract_lsq_problems
class _BenchOptimizers(Benchmark):
"""a framework for benchmarking the optimizer
Parameters
----------
function_name : string
fun : callable
der : callable
function that returns the derivative (jacobian, gradient) of fun
hess : callable
function that returns the hessian of fun
minimizer_kwargs : kwargs
additional keywords passed to the minimizer. e.g. tol, maxiter
"""
def __init__(self, function_name, fun, der=None, hess=None,
**minimizer_kwargs):
self.function_name = function_name
self.fun = fun
self.der = der
self.hess = hess
self.minimizer_kwargs = minimizer_kwargs
if "tol" not in minimizer_kwargs:
minimizer_kwargs["tol"] = 1e-4
self.results = []
def reset(self):
self.results = []
def add_result(self, result, t, name):
"""add a result to the list"""
result.time = t
result.name = name
if not hasattr(result, "njev"):
result.njev = 0
if not hasattr(result, "nhev"):
result.nhev = 0
self.results.append(result)
def print_results(self):
"""print the current list of results"""
results = self.average_results()
results = sorted(results, key=lambda x: (x.nfail, x.mean_time))
if not results:
return
print("")
print("=========================================================")
print("Optimizer benchmark: %s" % (self.function_name))
print("dimensions: %d, extra kwargs: %s" % (results[0].ndim, str(self.minimizer_kwargs
|
)))
print("averaged over %d starting configurations" % (results[0].ntrials))
print(" Optimizer nfail nfev njev nhev time")
print("---------------------------------------------------------")
for res in results:
print("%11s | %4d | %4d | %4d | %4d | %.6g" %
(res.name, res.nfail, res.mean_nfev, res.mean_njev, res.mean_nhev, res.mean_time))
def average_results(self):
"""group the results by minimizer
|
and average over the runs"""
grouped_results = defaultdict(list)
for res in self.results:
grouped_results[res.name].append(res)
averaged_results = dict()
for name, result_list in grouped_results.items():
newres = scipy.optimize.OptimizeResult()
newres.name = name
newres.mean_nfev = np.mean([r.nfev for r in result_list])
newres.mean_njev = np.mean([r.njev for r in result_list])
newres.mean_nhev = np.mean([r.nhev for r in result_list])
newres.mean_time = np.mean([r.time for r in result_list])
newres.ntrials = len(result_list)
newres.nfail = len([r for r in result_list if not r.success])
try:
newres.ndim = len(result_list[0].x)
except TypeError:
newres.ndim = 1
averaged_results[name] = newres
return averaged_results.values()
def bench_run(self, x0, methods=None, **minimizer_kwargs):
"""do an optimization test starting at x0 for all the optimizers"""
kwargs = self.minimizer_kwargs
if methods is None:
methods = ["COBYLA", 'Powell',
'L-BFGS-B', 'BFGS', 'CG', 'TNC', 'SLSQP',
"Newton-CG", 'dogleg', 'trust-ncg']
fonly_methods = ["COBYLA", 'Powell']
for method in fonly_methods:
if method not in methods:
continue
t0 = time.time()
res = scipy.optimize.minimize(self.fun, x0, method=method,
**kwargs)
t1 = time.time()
self.add_result(res, t1-t0, method)
gradient_methods = ['L-BFGS-B', 'BFGS', 'CG', 'TNC', 'SLSQP']
if self.der is not None:
for method in gradient_methods:
if method not in methods:
continue
t0 = time.time()
res = scipy.optimize.minimize(self.fun, x0, method=method,
jac=self.der, **kwargs)
t1 = time.time()
self.add_result(res, t1-t0, method)
hessian_methods = ["Newton-CG", 'dogleg', 'trust-ncg']
if self.hess is not None:
for method in hessian_methods:
if method not in methods:
continue
t0 = time.time()
res = scipy.optimize.minimize(self.fun, x0, method=method,
jac=self.der, hess=self.hess,
**kwargs)
t1 = time.time()
self.add_result(res, t1-t0, method)
class BenchSmoothUnbounded(Benchmark):
"""Benchmark the optimizers with smooth, unbounded, functions"""
params = [
['rosenbrock', 'rosenbrock_tight',
'simple_quadratic', 'asymmetric_quadratic',
'sin_1d', 'booth', 'beale', 'LJ'],
["COBYLA", 'Powell',
'L-BFGS-B', 'BFGS', 'CG', 'TNC', 'SLSQP',
"Newton-CG", 'dogleg', 'trust-ncg'],
["mean_nfev", "mean_time"]
]
param_names = ["test function", "solver", "result type"]
def setup(self, func_name, method_name, ret_val):
b = getattr(self, 'run_' + func_name)(methods=[method_name])
results = b.average_results()
result = None
for r in results:
if r.name == method_name:
result = getattr(r, ret_val)
break
if result is None:
raise NotImplementedError()
self.result = result
def track_all(self, func_name, method_name, ret_val):
return self.result
def run_rosenbrock(self, methods=None):
b = _BenchOptimizers("Rosenbrock function",
fun=rosen, der=rosen_der, hess=rosen_hess)
for i in range(10):
b.bench_run(np.random.uniform(-3, 3, 3), methods=methods)
return b
def run_rosenbrock_tight(self, methods=None):
b = _BenchOptimizers("Rosenbrock function",
fun=rosen, der=rosen_der, hess=rosen_hess,
tol=1e-8)
for i in range(10):
b.bench_run(np.random.uniform(-3, 3, 3), methods=methods)
return b
def run_simple_quadratic(self, methods=None):
s = funcs.SimpleQuadratic()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("simple quadratic function",
fun=s.fun, der=s.der, hess=s.hess)
for i in range(10):
b.bench_run(np.random.uniform(-2, 2, 3), methods=methods)
return b
def run_asymmetric_quadratic(self, methods=None):
s = funcs.AsymmetricQuadratic()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("function sum(x**2) + x[0]",
fun=s.fun, der=s.der, hess=s.hess)
for i in range(10):
b.bench_run(np.random.uniform(-2, 2, 3), methods=methods)
return b
def run_sin_1d(self, methods=None):
fun = lambda x: np.sin(x[0])
der = lambda x: np.array([np.cos(x[0])])
b = _BenchOptimizers("1d sin function",
fun=fun, der=der, hess=None)
for i in range(10):
b.bench_run(np.random.uniform(-2, 2, 1), methods=methods)
return b
def run_booth(self, methods=None):
s = funcs.Booth()
# print
|
siosio/intellij-community
|
python/testData/multipleArgumentsCompletion/noExceptionIfMoreArgumentsThanParameters.py
|
Python
|
apache-2.0
| 61
| 0.065574
|
def foo(x):
|
pass
x = 42
|
y = 42
z = 42
foo(x, y, <caret>)
|
ddurieux/alignak
|
test/test_star_in_hostgroups.py
|
Python
|
agpl-3.0
| 3,007
| 0.000665
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# Thi
|
s file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either ver
|
sion 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# Jean Gabes, [email protected]
# Hartmut Goebel, [email protected]
# Grégory Starck, [email protected]
# Sebastien Coavoux, [email protected]
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from alignak_test import *
class TestStarInGroups(AlignakTest):
def setUp(self):
self.setup_with_file('etc/alignak_star_in_hostgroups.cfg')
# If we reach a good start, we are ok :)
# the bug was that an * hostgroup expand get all host_name != ''
# without looking at register 0 or not
def test_star_in_groups(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the hosts and services"
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "TEST")
self.assertIsNot(svc, None)
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "TEST_HNAME_STAR")
self.assertIsNot(svc, None)
if __name__ == '__main__':
unittest.main()
|
esdalmaijer/PyGaze
|
examples/simple_experiment/constants.py
|
Python
|
gpl-3.0
| 4,461
| 0.014795
|
## This file is part of PyGaze - the open-source toolbox for eye tracking
##
## PyGaze is a Python module for easily creating gaze contingent experiments
## or other software (as well as non-gaze contingent experiments/software)
## Copyright (C) 2012-2013 Edwin S. Dalmaijer
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>
#
# version: 0.4 (25-03-2013)
# MAIN
DUMMY
|
MODE = True # False for gaze contingent display, True for dummy mode (using mouse or joystick)
LOGFILENAME = 'default' # logfilename, without path
LOGFILE = LOGFILENAME[:] # .txt; adding path before logfilename is optional; logs responses (NOT eye movements, these are stored in an EDF file!)
TRIALS = 5
# DISPLAY
# used in libscreen, for the *_display functions. The values may be adjusted,
# b
|
ut not the constant's names
SCREENNR = 0 # number of the screen used for displaying experiment
DISPTYPE = 'pygame' # either 'psychopy' or 'pygame'
DISPSIZE = (1920, 1080) # resolution
SCREENSIZE = (34.5, 19.7) # physical display size in cm
MOUSEVISIBLE = False # mouse visibility
BGC = (125,125,125) # backgroundcolour
FGC = (0,0,0) # foregroundcolour
FULLSCREEN = False
# SOUND
# defaults used in libsound. The values may be adjusted, but not the constants'
# names
SOUNDOSCILLATOR = 'sine' # 'sine', 'saw', 'square' or 'whitenoise'
SOUNDFREQUENCY = 440 # Herz
SOUNDLENGTH = 100 # milliseconds (duration)
SOUNDATTACK = 0 # milliseconds (fade-in)
SOUNDDECAY = 5 # milliseconds (fade-out)
SOUNDBUFFERSIZE = 1024 # increase if playback is choppy
SOUNDSAMPLINGFREQUENCY = 48000 # samples per second
SOUNDSAMPLESIZE = -16 # determines bit depth (negative is signed
SOUNDCHANNELS = 2 # 1 = mono, 2 = stereo
# INPUT
# used in libinput. The values may be adjusted, but not the constant names.
MOUSEBUTTONLIST = None # None for all mouse buttons; list of numbers for buttons of choice (e.g. [1,3] for buttons 1 and 3)
MOUSETIMEOUT = None # None for no timeout, or a value in milliseconds
KEYLIST = None # None for all keys; list of keynames for keys of choice (e.g. ['space','9',':'] for space, 9 and ; keys)
KEYTIMEOUT = 1 # None for no timeout, or a value in milliseconds
JOYBUTTONLIST = None # None for all joystick buttons; list of button numbers (start counting at 0) for buttons of choice (e.g. [0,3] for buttons 0 and 3 - may be reffered to as 1 and 4 in other programs)
JOYTIMEOUT = None # None for no timeout, or a value in milliseconds
# EYETRACKER
# general
TRACKERTYPE = 'smi' # either 'smi', 'eyelink' or 'dummy' (NB: if DUMMYMODE is True, trackertype will be set to dummy automatically)
SACCVELTHRESH = 35 # degrees per second, saccade velocity threshold
SACCACCTHRESH = 9500 # degrees per second, saccade acceleration threshold
# EyeLink only
# SMI only
SMIIP = '127.0.0.1'
SMISENDPORT = 4444
SMIRECEIVEPORT = 5555
# FRL
# Used in libgazecon.FRL. The values may be adjusted, but not the constant names.
FRLSIZE = 200 # pixles, FRL-size
FRLDIST = 125 # distance between fixation point and FRL
FRLTYPE = 'gauss' # 'circle', 'gauss', 'ramp' or 'raisedCosine'
FRLPOS = 'center' # 'center', 'top', 'topright', 'right', 'bottomright', 'bottom', 'bottomleft', 'left', or 'topleft'
# CURSOR
# Used in libgazecon.Cursor. The values may be adjusted, but not the constants' names
CURSORTYPE = 'cross' # 'rectangle', 'ellipse', 'plus' (+), 'cross' (X), 'arrow'
CURSORSIZE = 20 # pixels, either an integer value or a tuple for width and height (w,h)
CURSORCOLOUR = 'pink' # colour name (e.g. 'red'), a tuple RGB-triplet (e.g. (255, 255, 255) for white or (0,0,0) for black), or a RGBA-value (e.g. (255,0,0,255) for red)
CURSORFILL = True # True for filled cursor, False for non filled cursor
CURSORPENWIDTH = 3 # cursor edge width in pixels (only if cursor is not filled)
|
nicorellius/pdxpixel
|
pdxpixel/core/mailgun.py
|
Python
|
mit
| 1,073
| 0.002796
|
def send_simple_message():
return requests.post(
"https://api.mailgun.net/v3/sandbox049ff464a4d54974bb0143935f9577ef.mailgun.org/messages",
auth=("api", "key-679dc79b890e700f11f001a6bf86f4a1"),
data={"from": "Mailgun Sandbox <[email protected]>",
"to": "nick <[email protected]>",
"subject": "Hello nick",
"text": "Congratulations nick, you just sent an email with Mailgun! You are truly awesome! You can see a recor
|
d of this email in your logs: https://mailgun.com/cp/log . You can send up to 300 emails/day from this sandbox server. Next, you should add your own domain so you can send 10,000 emails/month for free."})
# cURL command to send mail aith API key
# curl -s --user 'api:key-679dc79b890e700f11f001a6bf86f4a1' \
# https://api.mailgun.net/v3/mail.pdxpixel.com/messages \
# -F from='Excited User <[email protected]
|
>' \
# -F [email protected] \
# -F subject='Hello' \
# -F text='Testing some Mailgun awesomness!'
|
scieloorg/citedby
|
citedby/__init__.py
|
Python
|
bsd-2-clause
| 1,974
| 0.000507
|
import os
from pyramid.config import Configurator
from pyramid.renderers import JSONP
from pyramid.settings import aslist
from citedby import controller
from citedby.controller import cache_region as controller_cache_region
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings)
config.add_renderer('jsonp', JSONP(param_name='callback', indent=4))
def add_controller(request):
es = os.environ.get(
'ELASTICSEARCH_HOST',
settings.get('elasticsearch_host', '127.0.0.1:9200')
)
es_index = os.environ.get(
'ELASTICSEARCH_INDEX',
settings.get('elasticsearch_index', 'citations')
)
return controller.controller(
aslist(es),
sniff_on_connection_fail=True,
timeout=600
).set_base_index(es_index)
config.add_route('index', '/')
config.add_route('status', '/_status/')
config.add_route('citedby_pid', '/api/v1/pid/')
config.add_route('citedby_doi', '/api/v1/doi/')
config.add_route('citedby_meta', '/api/v1/meta/')
config.add_request_method(add_controller, 'controller', reify=True)
# Cache Settings Config
memcached_host = os.environ.get(
'MEMCACHED_HOST',
settings.get('memcached_host', None)
)
memcached_expiration_time = os.environ.get(
'MEMCACHED
|
_EXPIRATION_TIME',
settings.get('memcached_expiration_time', 2592000) # a month cache
)
if 'memcached_host' is not None:
cache_config = {}
cache_config['expiration_time'] = int(memcached_expiration_time)
cache_config['arguments'] = {'url': memcached_host, 'binary': True}
controller_cache_region.config
|
ure('dogpile.cache.pylibmc', **cache_config)
else:
controller_cache_region.configure('dogpile.cache.null')
config.scan()
return config.make_wsgi_app()
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/python/lib/test/test_startfile.py
|
Python
|
gpl-3.0
| 1,193
| 0.000838
|
# Ridiculously simple test of the os.startfile function for Windows.
#
# empty.vbs is an empty file (except for a comme
|
nt), which does
# nothing when run with cscript or wscript.
#
# A possible improvement would be to have empty.vbs do something that
# we can detect here, to make sure that not only the os.startfile()
# call succeeded, but also the script actually has run.
import unittest
from test import support
import os
import sys
from os import path
startfile = support.get_attribute(os, 'startfile')
class TestCase(unittest.TestC
|
ase):
def test_nonexisting(self):
self.assertRaises(OSError, startfile, "nonexisting.vbs")
def test_empty(self):
# We need to make sure the child process starts in a directory
# we're not about to delete. If we're running under -j, that
# means the test harness provided directory isn't a safe option.
# See http://bugs.python.org/issue15526 for more details
with support.change_cwd(path.dirname(sys.executable)):
empty = path.join(path.dirname(__file__), "empty.vbs")
startfile(empty)
startfile(empty, "open")
if __name__ == "__main__":
unittest.main()
|
highweb-project/highweb-webcl-html5spec
|
tools/perf/page_sets/typical_25.py
|
Python
|
bsd-3-clause
| 4,012
| 0.004487
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import shutil
from profile_creators import profile_generator
from profile_creators import small_profile_extender
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from telemetry import story
class Typical25ProfileSharedState(shared_page_state.SharedDesktopPageState):
"""Shared state associated with a profile generated from 25 navigations.
Generates a shared profile on initialization.
"
|
""
def __init__(self, test, finder_options, story_set):
super(Typical25ProfileSharedState, self).__init__(
test, finder_options, story_set)
generator = profile_generator.ProfileGenerator(
small_profile_extender.SmallProfileExtender,
'small_profile')
self._out_dir, self._owns_out_dir = generator.Run(finder_options)
if self._out_dir:
finder_options.browser_options.profile_dir = self._out_dir
else:
finder_options.bro
|
wser_options.dont_override_profile = True
def TearDownState(self):
"""Clean up generated profile directory."""
super(Typical25ProfileSharedState, self).TearDownState()
if self._owns_out_dir:
shutil.rmtree(self._out_dir)
class Typical25Page(page_module.Page):
def __init__(self, url, page_set, run_no_page_interactions,
shared_page_state_class=shared_page_state.SharedDesktopPageState):
super(Typical25Page, self).__init__(
url=url, page_set=page_set,
shared_page_state_class=shared_page_state_class)
self._run_no_page_interactions = run_no_page_interactions
def RunPageInteractions(self, action_runner):
if self._run_no_page_interactions:
return
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollPage()
class Typical25PageSet(story.StorySet):
""" Pages designed to represent the median, not highly optimized web """
def __init__(self, run_no_page_interactions=False,
page_class=Typical25Page):
super(Typical25PageSet, self).__init__(
archive_data_file='data/typical_25.json',
cloud_storage_bucket=story.PARTNER_BUCKET)
urls_list = [
# Why: Alexa games #48
'http://www.nick.com/games',
# Why: Alexa sports #45
'http://www.rei.com/',
# Why: Alexa sports #50
'http://www.fifa.com/',
# Why: Alexa shopping #41
'http://www.gamestop.com/ps3',
# Why: Alexa shopping #25
'http://www.barnesandnoble.com/u/books-bestselling-books/379003057/',
# Why: Alexa news #55
('http://www.economist.com/news/science-and-technology/21573529-small-'
'models-cosmic-phenomena-are-shedding-light-real-thing-how-build'),
# Why: Alexa news #67
'http://www.theonion.com',
'http://arstechnica.com/',
# Why: Alexa home #10
'http://allrecipes.com/Recipe/Pull-Apart-Hot-Cross-Buns/Detail.aspx',
'http://www.html5rocks.com/en/',
'http://www.mlb.com/',
# pylint: disable=line-too-long
'http://gawker.com/5939683/based-on-a-true-story-is-a-rotten-lie-i-hope-you-never-believe',
'http://www.imdb.com/title/tt0910970/',
'http://www.flickr.com/search/?q=monkeys&f=hp',
'http://money.cnn.com/',
'http://www.nationalgeographic.com/',
'http://premierleague.com',
'http://www.osubeavers.com/',
'http://walgreens.com',
'http://colorado.edu',
('http://www.ticketmaster.com/JAY-Z-and-Justin-Timberlake-tickets/artist/'
'1837448?brand=none&tm_link=tm_homeA_rc_name2'),
# pylint: disable=line-too-long
'http://www.theverge.com/2013/3/5/4061684/inside-ted-the-smartest-bubble-in-the-world',
'http://www.airbnb.com/',
'http://www.ign.com/',
# Why: Alexa health #25
'http://www.fda.gov',
]
for url in urls_list:
self.AddStory(
page_class(url, self, run_no_page_interactions))
|
cmc333333/regulations-parser
|
tests/commands_clear_tests.py
|
Python
|
cc0-1.0
| 2,301
| 0
|
import os
from unittest import TestCase
from click.testing import CliRunner
from regparser.commands.clear import clear
from regparser.index import entry
class CommandsClearTests(TestCase):
def setUp(self):
self.cli = CliRunner()
def test_no_errors_when_clear(self):
"""Should raise no errors when no cached files are present"""
with self.cli.isolated_filesystem():
self.cli.invoke(clear)
def test_deletes_fr_cache(self):
with self.cli.isolated_filesystem():
open('fr_cache.sqlite', 'w').close()
self.assertTrue(os.path.exists('fr_cache.sqlite'))
# flag must be present
self.cli.invoke(clear)
self.assertTrue(os.path.exists('fr_cache.sqlite'))
self.cli.invoke(clear, ['--http-cache'])
self.assertFalse(os.path.exists('fr_cache.sqlite'))
def test_deletes_index(self):
with self.cli.isolated_filesystem():
entry.Entry('aaa', 'bbb').write('ccc')
entry.Entry('bbb', 'ccc').write('ddd')
self.assertEqual(1, len(entry.Entry("aaa")))
self.assertEqual(1, len(entry.Entry("bbb")))
self.cli.invoke(clear)
self.assertEqual(0, len(entry.Entry("aaa")))
self.assertEqual(0, len(entry.Entry("bbb")))
def test_deletes_can_be_focused(self):
"""If params are provided to delete certain directories, only those
directories should get removed"""
with self.cli.isolated_filesystem():
to_delete = ['delroot/aaa/bbb', 'delroot/aaa/ccc',
'root/delsub/aaa', 'root/delsub/bbb']
to_keep = ['root/othersub/aaa', 'root/aaa',
'top-level-file', 'other-root/aaa']
for path in to_delete + to_keep:
entry.Entry(*path.split('/')).write('')
se
|
lf.cli.invoke(clear, ['delroot', 'root/delsub'])
self.assertItemsEqual(['top-level-file', 'root', 'other-root'],
|
list(entry.Entry()))
self.assertItemsEqual(['othersub', 'aaa'],
list(entry.Entry('root')))
self.assertItemsEqual(['aaa'],
list(entry.Entry('other-root')))
|
nickfrostatx/polyrents-challenge
|
tests/test_phase2.py
|
Python
|
mit
| 4,200
| 0
|
from util import app
import hashlib
import os
phase2_url = '/phase2-%s/' % os.environ.get('PHASE2_TOKEN')
admin_password = u'adminpass'
admin_hash = hashlib.sha1(admin_password.encode('utf-8')).hexdigest()
session_key = 'sessionkey'
admin_session_key = 'adminsessionkey'
def init_data(redis):
redis.set('user:test:password', hashlib.sha1(b'test').hexdigest())
redis.set('user:admin:password', admin_hash)
redis.set('user:test:1', 'Buy groceries')
redis.set('user:test:2', 'Clean the patio')
redis.set('user:test:3', 'Take over the world')
redis.rpush('items:test', 1, 2, 3)
redis.set('session:%s' % session_key, 'test')
redis.set('session:%s' % admin_session_key, 'admin')
return app
def test_home(app):
rv = app.get(phase2_url)
assert b'Sign In' in rv.data
assert rv.status_code == 200
def test_404(app):
rv = app.get(phase2_url + 'asdf')
assert rv.status_code == 404
def test_get_405(app):
rv = app.get(phase2_url + 'login/')
assert rv.status_code == 405
def test_403s(app):
"""These should return 403 instead of 404."""
for url in ('dashboard/', 'dashboard/test/1/', 'dash
|
board/abc/def/'):
rv = app.get(phase2_url + url)
assert rv.status_code == 403
rv = app.get(phase2_url + url, headers={'Cookie': 'session=asdf'})
assert rv.status_code == 403
def test_post_405(app):
"""Be sure this returns 405, instead of 404 or 403."""
for url in ('', 'dashboard/', 'dashboard/test/1/', 'dashboard/abc/def/'):
rv = app.post(phase2_url + url)
assert rv.status_code == 405
def test_bad_login(app):
url = phase2_u
|
rl + 'login/'
init_data(app.application.redis)
rv = app.post(url)
assert 'dashboard' not in rv.headers.get('Location')
assert rv.status_code == 303
rv = app.post(url, data={'username': 'abcdef', 'password': 'abcdef'})
assert 'dashboard' not in rv.headers.get('Location')
assert rv.status_code == 303
rv = app.post(url, data={'username': 'test'})
assert 'dashboard' not in rv.headers.get('Location')
assert rv.status_code == 303
rv = app.post(url, data={'username': 'test', 'password': 'abcdef'})
assert 'dashboard' not in rv.headers.get('Location')
assert rv.status_code == 303
def test_good_login(app):
url = phase2_url + 'login/'
init_data(app.application.redis)
rv = app.post(url, data={'username': 'test', 'password': 'test'})
assert rv.status_code == 303
assert 'session=' in rv.headers.get('Set-Cookie')
assert 'dashboard' in rv.headers.get('Location')
rv = app.post(url, data={'username': 'admin', 'password': admin_password})
assert rv.status_code == 303
assert 'session=' in rv.headers.get('Set-Cookie')
assert 'dashboard' in rv.headers.get('Location')
def test_dashboard(app):
url = phase2_url + 'dashboard/'
init_data(app.application.redis)
rv = app.get(url, headers={'Cookie': 'session=%s' % session_key})
assert b'Buy groceries' in rv.data
assert b'Take over the world' in rv.data
assert rv.status_code == 200
def test_item_404(app):
url = phase2_url + 'dashboard/'
init_data(app.application.redis)
rv = app.get(url + 'abcdef/0/', headers={
'Cookie': 'session=%s' % session_key})
assert rv.status_code == 404
rv = app.get(url + 'test/0/', headers={
'Cookie': 'session=%s' % session_key})
assert rv.status_code == 404
rv = app.get(url + 'admin/1/', headers={
'Cookie': 'session=%s' % session_key})
assert rv.status_code == 404
def test_solution(app):
url = phase2_url + 'dashboard/admin/password/'
init_data(app.application.redis)
rv = app.get(url, headers={'Cookie': 'session=%s' % session_key})
assert admin_hash.encode('utf-8') in rv.data
assert rv.status_code == 200
def test_admin_dashboard(app):
url = phase2_url + 'dashboard/'
init_data(app.application.redis)
rv = app.get(url, headers={'Cookie': 'session=%s' % admin_session_key})
assert b'Challenge complete!' in rv.data
assert rv.status_code == 200
|
meltmedia/the-ark
|
tests/test_rhino_client.py
|
Python
|
apache-2.0
| 3,445
| 0.00029
|
import unittest
from mock import patch, Mock
from the_ark import rhino_client
__author__ = 'chaley'
rhino_client_ojb = None
class UtilsTestCase(unittest.TestCase):
def setUp(self):
self.rhino_client_obj = rhino_client.RhinoClient('test_name',
'url', 'brand',
'branch', 'build_id',
'user',
'rhino_client_url')
def test_set_log(self):
self.rhino_client_obj.set_log("file_path", "link_text")
self.assertEqual('file_path',
self.rhino_client_obj.test_data['result_url'])
self.assertEqual('link_text',
self.rhino_client_obj.test_data['result_text'])
@patch('requests.get')
def test_get(self, requests_get):
r = Mock()
r.json.return_value = {"stuff": "stuff"}
requests_get.return_value = r
response = self.rhino_client_obj.get('test_id')
self.assertEqual({"stuff": "stuff"}, response)
@patch('requests.post')
def test_post(self, requests_post):
request_json = Mock()
request_json.status_code = 201
requests_post.return_value = request_json
self.rhino_client_obj.post()
self.assertEqual(True, self.rhino_client_obj.posted)
@patch('requests.post')
def test_post_fail(self, requests_post):
request_json = Mock()
request_json.status_code = 400
requests_post.return_value = request_json
self.assertRaises(Exception, self.rhino_client_obj.post)
@patch('requests.put')
def test_put(self, requests_put):
self.rhino_client_obj.test_data['test_id'] = 156465465
self.rhino_client_obj.posted = True
request_json = Mock()
request_json.status_code = 201
request_json.json.return_value = {"stuff": "stuff"}
requests_put.return_value = request_json
self.rhino_client_obj.put()
self.assertEqual(True, self.rhino_client_obj.posted)
def test_put_posted_false(self):
self.assertRaises(Exception, self.rhino_client_obj.put)
@patch('requests.put')
def test_put_status_false(self, requests_put):
self.rhino_client_obj.test_data['test_id'] = 156465465
self.rhino_client_obj.posted = True
request_json = Mock()
request_json.status_code = 500
requests_put.return_value = request_json
self.assertRaises(rhino_client.RhinoClientException,
self.rhino_client_obj.put)
@patch('requests.post')
def test_send_test_post(self, requests_post):
request_json = Mock()
request_json.status_code = 201
requests_post.return_value = request_json
self.rhino_client_obj.send_test("status")
self.assertEqua
|
l(True, self.rhino_client_obj.posted)
@patch('requests.put')
def test_send_test_put(self, requests_put):
self.rhino_client_obj.test_data['test_id'] = 156465465
self.rhino_client_obj.posted = True
request_json = Mock()
request_json.status_code = 201
requests_put.return_value = request_json
self.rhino_client_obj.send_test("status")
self.asser
|
tEqual(True, self.rhino_client_obj.posted)
if __name__ == '__main__':
unittest.main()
|
dsimandl/teamsurmandl
|
gallery/forms.py
|
Python
|
mit
| 1,431
| 0.002096
|
import zipfile
import imghdr
from django import forms
from .models import Image, ImageBatchUp
|
load, Album
class AlbumAdminForm(forms.ModelForm):
class Meta:
model = Album
fields = '__all__'
def clean(self):
cleaned_data = self.cleaned_data
|
if cleaned_data.get('authorized_users') is None:
pass
else:
if cleaned_data.get('all_users') and cleaned_data.get('authorized_users').count() != 0:
cleaned_data['all_users'] = False
return cleaned_data
class ImageAdminForm(forms.ModelForm):
class Meta:
model = Image
fields = ('public', 'title', 'image', 'albums', 'user')
def clean_image(self):
image = self.cleaned_data['image']
if image is None:
return image
elif not imghdr.what(image):
raise forms.ValidationError(u"The file is not an image file")
else:
return image
class ImageBatchUploadAdminForm(forms.ModelForm):
class Meta:
model = ImageBatchUpload
fields = ('public', 'title', 'zip_file', 'albums', 'user')
def clean_zip_file(self):
image_zip = self.cleaned_data['zip_file']
if image_zip is None:
return image_zip
elif not zipfile.is_zipfile(image_zip):
raise forms.ValidationError(u"The file is not a zip file")
else:
return image_zip
|
Spiderlover/Toontown
|
toontown/catalog/CatalogNametagItem.py
|
Python
|
mit
| 3,528
| 0.001701
|
import CatalogItem
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from otp.otpbase import OTPLocalizer
from direct.interval.IntervalGlobal import *
from direct.gui.DirectGui import *
class CatalogNametagItem(CatalogItem.CatalogItem):
sequenceNumber = 0
def makeNewItem(self, nametagStyle):
self.nametagStyle = nametagStyle
CatalogItem.CatalogItem.makeNewItem(self)
def getPurchaseLimit(self):
return 1
def reachedPurchaseLimit(self, avatar):
if self in avatar.onOrder or self in avatar.mailboxContents or self in avatar.onGiftOrder or self in avatar.awardMailboxContents or self in avatar.onAwardOrder:
return 1
if avatar.nametagStyle == self.nametagStyle:
return 1
return 0
def getAcceptItemErrorText(self, retcode):
if retcode == ToontownGlobals.P_ItemAvailable:
return TTLocalizer.CatalogAcceptNametag
return CatalogItem.CatalogItem.getAcceptItemErrorText(self, retcode)
def saveHistory(self):
return 1
def getTypeName(self):
return TTLocalizer.NametagTypeName
def getName(self):
if self.nametagStyle == 100:
name = TTLocalizer.UnpaidNameTag
else:
name = TTLocalizer.NametagFontNames[self.nametagStyle]
if TTLocalizer.NametagReverse:
name = TTLocalizer.NametagLabel + name
else:
name = name + TTLocalizer.NametagLabel
return name
if self.nametagStyle == 0:
name = TTLocalizer.NametagPaid
elif self.nametagStyle == 1:
name = TTLocalizer.NametagAction
elif self.nametagStyle == 2:
name = TTLocalizer.Nam
|
etagFrilly
def recordPurchase(self, avatar, optional):
if avatar:
avatar.b_setNametagStyle(self.nametagStyle)
return ToontownGlobals.P_ItemAvailable
def getPicture(self, avatar):
frame = self.makeFrame()
if self.nametagStyle == 100:
inFont = ToontownGlobals.getToonFo
|
nt()
else:
inFont = ToontownGlobals.getNametagFont(self.nametagStyle)
nameTagDemo = DirectLabel(parent=frame, relief=None, pos=(0, 0, 0.24), scale=0.5, text=base.localAvatar.getName(), text_fg=(1.0, 1.0, 1.0, 1), text_shadow=(0, 0, 0, 1), text_font=inFont, text_wordwrap=9)
self.hasPicture = True
return (frame, None)
def output(self, store = -1):
return 'CatalogNametagItem(%s%s)' % (self.nametagStyle, self.formatOptionalData(store))
def compareTo(self, other):
return self.nametagStyle - other.nametagStyle
def getHashContents(self):
return self.nametagStyle
def getBasePrice(self):
return 500
cost = 500
if self.nametagStyle == 0:
cost = 600
elif self.nametagStyle == 1:
cost = 600
elif self.nametagStyle == 2:
cost = 600
elif self.nametagStyle == 100:
cost = 50
return cost
def decodeDatagram(self, di, versionNumber, store):
CatalogItem.CatalogItem.decodeDatagram(self, di, versionNumber, store)
self.nametagStyle = di.getUint16()
def encodeDatagram(self, dg, store):
CatalogItem.CatalogItem.encodeDatagram(self, dg, store)
dg.addUint16(self.nametagStyle)
def isGift(self):
return 0
def getBackSticky(self):
itemType = 1
numSticky = 4
return (itemType, numSticky)
|
paulmartel/voltdb
|
lib/python/voltcli/voltadmin.d/pause.py
|
Python
|
agpl-3.0
| 9,312
| 0.00494
|
# This file is part of VoltDB.
# Copyright (C) 2008-2016 VoltDB Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with VoltDB. If not, see <http://www.gnu.org/licenses/>.
import time
@VOLT.Command(
bundles = VOLT.AdminBundle(),
description = 'Pause the VoltDB cluster and switch it to admin mode.',
options = (
VOLT.BooleanOption('-w', '--wait', 'waiting',
'wait for all DR and Export transactions to be externally processed',
default = False)
)
)
def pause(runner):
# Check the STATUS column. runner.call_proc() detects and aborts on errors.
status = runner.call_proc('@Pause', [], []).table(0).tuple(0).column_integer(0)
if status <> 0:
runner.error('The cluster has failed to pause with status: %d' % status)
return
runner.info('The cluster is paused.')
if runner.opts.waiting:
status = runner.call_proc('@Quiesce', [], []).table(0).tuple(0).column_integer(0)
if status <> 0:
runner.error('The cluster has failed to quiesce with status: %d' % status)
return
runner.info('The cluster is quiesced.')
# check the dr stats
partition_min_host = dict()
partition_min = dict()
partition_max = dict()
check_dr(runner, partition_min_host, partition_min, partition_max)
# check the export stats twice because they are periodic
export_tables_with_data = dict()
check_dr(runner, partition_min_host, partition_min, partition_max)
last_table_stat_time = 0
last_table_stat_time = check_export(runner, export_tables_with_data, last_table_stat_time)
if not partition_min and last_table_stat_time == 1:
# there are no outstanding export or dr transactions
runner.info('All export and DR transactions have been processed.')
return
# after 10 seconds notify admin of what transactions have not drained
notifyInterval = 10
# have to get two samples of table stats because the cached value could be from before Quiesce
while True:
time.sleep(1)
if partition_min:
check_dr(runner, partition_min_host, partition_min, partition_max)
if last_table_stat_time > 1:
curr_table_stat_time = check_export(runner, export_tables_with_data, last_table_stat_time)
if last_table_stat_time == 1 or curr_table_stat_time > last_table_stat_time:
# have a new sample from table stat cache or there are no tables
if not export_tables_with_data and not partition_min:
runner.info('All export and DR transactions have been processed.')
return
notifyInterval -= 1
if notifyInterval == 0:
notifyInterval = 10
if last_table_stat_time > 1 and export_tables_with_data:
print_export_pending(runner, export_tables_with_data)
if partition_min:
print_dr_pending(runner, partition_min_host, partition_min, partition_max)
def get_stats(runner, component):
retry = 5
while True:
response = runner.call_proc('@Statistics', [VOLT.FastSerializer.VOLTTYPE_STRING,
VOLT.FastSerializer.VOLTTYPE_INTEGER], [component, 0])
status = response.status()
if status <> 1 and "timeout" in response.statusString:
if retry == 0:
runner.error('Unable to collect DR or export statistics from the cluster')
else:
sleep(1)
retry -= 1
continue
if status <> 1:
runner.error("Unexpected response to @Statistics %s: %s" % (component, resp))
return response
def check_dr(runner, partition_min_host, partition_min, partition_max):
resp = get_stats(runner, 'DRPRODUCER')
partition_data = resp.table(0)
for pid in partition_min:
# reset all min values to find the new min
if pid in partition_max:
partition_min[pid] = partition_max[pid]
for r in partition_data.tuples():
pid = r[3]
hostname = str(r[2])
if str(r[8]) == 'None':
last_queued = -1
else:
last_queued = r[8]
if str(r[9]) == 'None':
last_acked = -1
else:
last_acked = r[9]
# check TOTALBYTES
if r[5] > 0:
# track the highest seen drId for each partition
# use last queued to get the upper bound
if pid in partition_max:
partition_max[pid] = max(last_queued, partition_max[pid])
else:
partition_max[pid] = last_queued
if pid in partition_min:
if last_acked < partition_min[pid]:
# this replica is farther behind
partition_min[pid] = last_acked
else:
partition_min_host[pid] = set()
partition_min[pid] = last_acked
partition_min_host[pid].add(hostname)
else:
# this hostname's partition has an empty InvocationBufferQueue
if pid in partition_min:
# it was not empty on a previous call
partition_min_host[pid].discard(hostname)
if not partition_min_host[pid]:
del partition_min_host[pid]
del partition_min[pid]
if pid in partition_max:
if partition_max[pid] > last_acked:
runner.warning("DR Producer reports no data for partition %i on host %s but last acked drId (%i) does not match other hosts last acked drId (%s)" % (pid, hostname, last_acked, partition_max[pid]))
partition_max[pid] = max(last_acked, partition_max[pid])
else:
partition_max[pid] = last_acked
def print_dr_pending(runner, partition_min_host, partition_min, partition_max):
runner.info('The following partitions have pending DR transactions that the consumer cluster has not processed:')
summaryline = " Partition %i needs acknowledgements for drIds %i to %i on hosts: %s."
for pid in partition_min_host:
runner.info(summaryline % (pid, partition_min[pid]+1, partition_max[pid], ', '.join(partition_min_host[pid])))
def check_export(runner, export_tables_with_data, last_collection_time):
resp = get_stats(runner, 'TABLE')
export_tables = 0
collection_time = 0
if not resp.table_count() > 0:
# this is an empty database and we don't need to wait for export to drain
return 1
else:
tablestats = resp.table(0)
firsttuple = tablestats.tuple(0)
if firsttuple.column(0) == last_collection_time:
# this statistic is the same cached set as the last call
return last_collection_time
else:
collection_time = firsttuple.column(0)
for r in tablestats.tuples():
|
# first look for streaming (export) tables
if str(r[6]) == 'StreamedTable':
pendingData = r[8]
tablename = str(r[5])
pid = r[4]
hostname = str(r[2])
if pendingData > 0:
if not tablename in export_tables_with_data:
export_tables_with_data[tablename] = dict()
tabledata = export_tables_with_data
|
[tablename]
if not hostname in tabledata:
tabledata[hostname] = set()
tabledata[hostname]
|
chergert/libgit2-glib
|
tools/coverage.py
|
Python
|
lgpl-2.1
| 6,733
| 0.003864
|
#!/usr/bin/env python3
import os, sys, glob, pickle, subprocess
sys.path.insert(0, os.path.dirname(__file__))
from clang import cindex
sys.path = sys.path[1:]
def configure_libclang():
llvm_libdirs = ['/usr/lib/llvm-3.2/lib', '/usr/lib64/llvm']
try:
libdir = subprocess.check_output(['llvm-config', '--libdir']).decode('utf-8').strip()
llvm_libdirs.insert(0, libdir)
except OSError:
pass
for d in llvm_libdirs:
if not os.path.exists(d):
continue
files
|
= glob.glob(os.path.join(d, 'libclang.so*'))
if len(files) != 0:
cindex.Config.set_library_file(files[0])
return
class Call:
def __init__(self, cursor, decl):
self.ident = cursor.displayname.decode('utf-8')
self.filename = cursor.location.file.name.decode('utf-8')
ex = cursor.extent
self.start_line = ex.start.line
s
|
elf.start_column = ex.start.column
self.end_line = ex.end.line
self.end_column = ex.end.column
self.decl_filename = decl.location.file.name.decode('utf-8')
class Definition:
def __init__(self, cursor):
self.ident = cursor.spelling.decode('utf-8')
self.display = cursor.displayname.decode('utf-8')
self.filename = cursor.location.file.name.decode('utf-8')
ex = cursor.extent
self.start_line = ex.start.line
self.start_column = ex.start.column
self.end_line = ex.end.line
self.end_column = ex.end.column
def process_diagnostics(tu):
diagnostics = tu.diagnostics
haserr = False
for d in diagnostics:
sys.stderr.write('{0}\n'.format(d.format.decode('utf-8')))
if d.severity > cindex.Diagnostic.Warning:
haserr = True
if haserr:
sys.exit(1)
def walk_cursors(tu, files):
proc = list(tu.cursor.get_children())
while len(proc) > 0:
cursor = proc[0]
proc = proc[1:]
if cursor.location.file is None:
continue
fname = cursor.location.file.name.decode('utf-8')
if fname in files:
yield cursor
proc += list(cursor.get_children())
def newer(a, b):
try:
return os.stat(a).st_mtime > os.stat(b).st_mtime
except:
return True
def scan_libgit2_glib(cflags, files, git2dir):
files = [os.path.abspath(f) for f in files]
dname = os.path.dirname(__file__)
allcalls = {}
l = 0
if not os.getenv('SILENT'):
sys.stderr.write('\n')
i = 0
for f in files:
if not os.getenv('SILENT'):
name = os.path.basename(f)
if len(name) > l:
l = len(name)
perc = int((i / len(files)) * 100)
sys.stderr.write('[{0: >3}%] Processing ... {1}{2}\r'.format(perc, name, ' ' * (l - len(name))))
i += 1
astf = os.path.join(dname, '.' + os.path.basename(f) + '.cache')
if not newer(f, astf):
with open(astf, 'rb') as fo:
calls = pickle.load(fo)
else:
tu = cindex.TranslationUnit.from_source(f, cflags)
process_diagnostics(tu)
calls = {}
for cursor in walk_cursors(tu, files):
if cursor.kind == cindex.CursorKind.CALL_EXPR or \
cursor.kind == cindex.CursorKind.DECL_REF_EXPR:
cdecl = cursor.get_referenced()
if cdecl.kind != cindex.CursorKind.FUNCTION_DECL:
continue
if (not cdecl is None) and (not cdecl.location.file is None):
fdefname = cdecl.location.file.name.decode('utf-8')
if fdefname.startswith(git2dir):
call = Call(cursor, cdecl)
if call.ident in calls:
calls[call.ident].append(call)
else:
calls[call.ident] = [call]
with open(astf, 'wb') as fo:
pickle.dump(calls, fo)
for k in calls:
if k in allcalls:
allcalls[k] += calls[k]
else:
allcalls[k] = list(calls[k])
if not os.getenv('SILENT'):
sys.stderr.write('\r[100%] Processing ... done{0}\n'.format(' ' * (l - 4)))
return allcalls
def scan_libgit2(cflags, git2dir):
tu = cindex.TranslationUnit.from_source(git2dir + '.h', cflags)
process_diagnostics(tu)
headers = glob.glob(os.path.join(git2dir, '*.h'))
defs = {}
objapi = ['lookup', 'lookup_prefix', 'free', 'id', 'owner']
objderiv = ['commit', 'tree', 'tag', 'blob']
ignore = set()
for deriv in objderiv:
for api in objapi:
ignore.add('git_' + deriv + '_' + api)
for cursor in walk_cursors(tu, headers):
if cursor.kind == cindex.CursorKind.FUNCTION_DECL:
deff = Definition(cursor)
if not deff.ident in ignore:
defs[deff.ident] = deff
return defs
configure_libclang()
pos = sys.argv.index('--')
cflags = sys.argv[1:pos]
files = sys.argv[pos+1:]
incdir = os.getenv('LIBGIT2_INCLUDE_DIR')
defs = scan_libgit2(cflags, incdir)
calls = scan_libgit2_glib(cflags, files, incdir)
notused = {}
perfile = {}
nperfile = {}
for d in defs:
o = defs[d]
if not d in calls:
notused[d] = defs[d]
if not o.filename in nperfile:
nperfile[o.filename] = [o]
else:
nperfile[o.filename].append(o)
if not o.filename in perfile:
perfile[o.filename] = [o]
else:
perfile[o.filename].append(o)
ss = [notused[f] for f in notused]
ss.sort(key=lambda x: '{0} {1}'.format(os.path.basename(x.filename), x.ident))
lastf = None
keys = list(perfile.keys())
keys.sort()
for filename in keys:
b = os.path.basename(filename)
f = perfile[filename]
n_perfile = len(f)
if filename in nperfile:
n_nperfile = len(nperfile[filename])
else:
n_nperfile = 0
perc = int(((n_perfile - n_nperfile) / n_perfile) * 100)
print('\n File {0}, coverage {1}% ({2} out of {3}):'.format(b, perc, n_perfile - n_nperfile, n_perfile))
cp = list(f)
cp.sort(key=lambda x: "{0} {1}".format(not x.ident in calls, x.ident))
for d in cp:
if d.ident in calls:
print(' \033[32m✓ {0}\033[0m'.format(d.display))
else:
print(' \033[31m✗ {0}\033[0m'.format(d.display))
perc = int(((len(defs) - len(notused)) / len(defs)) * 100)
print('\nTotal coverage: {0}% ({1} functions out of {2} are being called)\n'.format(perc, len(defs) - len(notused), len(defs)))
# vi:ts=4:et
|
blutjens/perc_neuron_ros_ur10
|
pn_ros/bjorn_ws/build/rosserial/rosserial_mbed/catkin_generated/pkg.installspace.context.pc.py
|
Python
|
gpl-3.0
| 511
| 0.001957
|
# generated from c
|
atkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/bjornl/ros/workspaces/bjorn_ws/install/include".split(';') if "/home/bjornl/ros/workspaces/bjorn_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rosserial_mbed"
PROJEC
|
T_SPACE_DIR = "/home/bjornl/ros/workspaces/bjorn_ws/install"
PROJECT_VERSION = "0.7.6"
|
Protocol-X/script.video.funimationnow
|
resources/lib/modules/menunav.py
|
Python
|
gpl-3.0
| 6,384
| 0.01911
|
# -*- coding: utf-8 -*-
'''
Funimation|Now Add-on
Copyright (C) 2016 Funimation|Now
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import logging;
import re;
import xbmc;
import os;
import xbmcgui;
from resources.lib.modules import utils;
logger = logging.getLogger('funimationnow');
EXIT_CODE = 2;
SUCCESS_CODE = 3;
EXPIRE_CODE = 4;
HOME_SCREEN_CODE = 5;
BACK_CODE = 6;
LOGOUT_CODE = 7;
REST_CODE = 8;
SEARCH_WINDOW = 100100;
HOME_WINDOW = 110101;
QUEUE_WINDOW = 110102;
ALL_WINDOW = 110103;
SIMALCAST_WINDOW = 110104;
GENRE_WINDOW = 110105;
SETTINGS_WINDOW = 110106;
HELP_WINDOW = 110107;
LOGOUT_WINDOW = 110108;
func = dict({
SEARCH_WINDOW: 'search',
HOME_WINDOW: 'home',
QUEUE_WINDOW: 'queue',
ALL_WINDOW: 'all',
SIMALCAST_WINDOW: 'simalcast',
GENRE_WINDOW: 'genres',
SETTINGS_WINDOW: 'settings',
HELP_WINDOW: 'help',
LOGOUT_WINDOW: 'logout',
});
def chooser(landing_page, parent, child, controlID):
result = EXIT_CODE;
logger.debug(controlID);
logger.debug(child);
logger.debug(func.get(controlID));
try:
result = globals()[func.get(controlID)](landing_page, parent, child, controlID);
except Exception as inst:
logger.error(inst);
landing_page.result_code = result;
return result;
def home(landing_page, parent, child, controlID):
RESULT_CODE = REST_CODE;
if child == HOME_WINDOW:
RESULT_CODE = REST_CODE;
else:
RESULT_CODE = HOME_SCREEN_CODE;
return RESULT_CODE;
def search(landing_page, parent, child, controlID):
RESULT_CODE = REST_CODE;
if child == SEARCH_WINDOW:
pass;
else:
try:
from resources.lib.gui.searchgui import search;
search(landing_page);
except Exception as inst:
logger.error(inst);
return RESULT_CODE;
def queue(landing_page, parent, child, controlID):
RESULT_CODE = REST_CODE;
if child == QUEUE_WINDOW:
pass;
else:
try:
from resources.lib.gui.watchlistgui import watchlist;
mnavset = dict({
'width': 95,
'title': 'MY QUEUE',
'params': 'id=myqueue&title=My Queue',
'target': 'longlist',
'path': 'longlist/myqueue/'
});
watchlist(landing_page, mnavset);
except Exception as inst:
logger.error(inst);
return RESULT_CODE;
def all(landing_page, parent, child, controlID):
RESULT_CODE = REST_CODE;
if child == ALL_WINDOW:
pass;
else:
try:
from resources.lib.gui.genreselectgui import genreselect;
mnavset = dict({
'width': 140,
'title': 'RECENTLY ADDED',
'params': 'id=shows&title=All Shows&showGenres=true',
'target': 'longlist',
'path': 'longlist/content/',
'offset': 0,
'limit': 144
});
genreselect(landing_page, mnavset);
except Exception as inst:
logger.error(inst);
return RESULT_CODE;
def simalcast(landing_page, parent, child, controlID):
RESULT_CODE = REST_CODE;
if child == SIMALCAST_WINDOW:
pass;
else:
try:
from resources.lib.gui.audioselectgui import audioselect;
mnavset = dict({
'width': 108,
'title': 'SIMULDUBS',
#'params': 'id=simulcasts&title=Simulcasts',
'params': 'id=broadcast-dubs&title=Broadcast Dubs',
'target': 'longlist',
'path': 'longlist/content/'
});
audioselect(landing_page, mnavset);
except Exception as inst:
logger.error(inst);
return RESULT_CODE;
def genres(landing_page, parent, child, controlID):
RESULT_CODE = REST_CODE;
if child == GENRE_WINDOW:
pass;
else:
try:
from resources.lib.gui.genreshowsgui import genreshows;
mnavset = dict({
'width': 140,
'title': 'RECENTLY ADDED',
'params': 'id=genres&title=Genres&role=b',
'target': 'longlist',
'path': 'longlist/genres/',
'offset': 0,
'limit': 144
});
genreshows(landing_page, mnavset);
except Exception as inst:
logger.error(inst);
return RESULT_CODE;
def settings(landing_page, parent, child, controlID):
RESULT_
|
CODE = REST_CODE;
try:
|
#xbmc.executebuiltin('Addon.OpenSettings(%s)' % utils.getAddonInfo('id'));
utils.addon.openSettings();
utils.lock();
utils.sleep(2000);
utils.unlock();
addon_data = xbmc.translatePath(utils.getAddonInfo('profile')).decode('utf-8');
tokens = xbmc.translatePath(os.path.join(addon_data, 'tokens.db'));
if not os.path.exists(tokens):
RESULT_CODE = LOGOUT_CODE;
except Exception as inst:
logger.error(inst);
return RESULT_CODE;
def help(landing_page, parent, child, controlID):
RESULT_CODE = REST_CODE;
try:
from resources.lib.gui.helpmenugui import helpmenu;
helpmenu();
except Exception as inst:
logger.error(inst);
return RESULT_CODE;
def logout(landing_page, parent, child, controlID):
RESULT_CODE = LOGOUT_CODE;
from resources.lib.modules import cleardata;
logger.debug('Running Cleanup Script');
try:
cleardata.cleanup();
except:
pass;
return RESULT_CODE;
|
Spiderlover/Toontown
|
toontown/hood/FishAnimatedProp.py
|
Python
|
mit
| 2,348
| 0.002129
|
import AnimatedProp
from direct.actor import Actor
from direct.interval.IntervalGlobal import *
from toontown.effects.Splash import *
from toontown.effects.Ripples import *
import random
class FishAnimatedProp(AnimatedProp.AnimatedProp):
def __init__(self, node):
AnimatedProp.AnimatedProp.__init__(self, node)
parent = node.getParent()
self.fish = Actor.Actor(node, copy=0)
self.fish.reparentTo(parent)
self.fish.setTransform(node.getTransform())
node.clearMat()
self.fish.loadAnims({'jump': 'phase_4/models/props/SZ_fish-jump',
'swim': 'phase_4/models/props/SZ_fish-swim'})
self.splashSfxList = (loader.loadSfx('phase_4/audio/sfx/TT_splash1.ogg'), loader.loadSfx('phase_4/audio/sfx/TT_splash2.ogg'))
self.node = self.fish
self.geom = self.fish.getGeomNode()
self.exitRipples = Ripples(self.geom)
self.exitRipples.setBin('fixed', 25, 1)
self.exitRipples.setPosHprScale(-0.3,
|
0.0, 1.24, 0.0, 0.0, 0.0, 0.7, 0.7, 0.7)
self.splash = Splash(self.geom, wantParticles=0)
|
self.splash.setPosHprScale(-1, 0.0, 1.23, 0.0, 0.0, 0.0, 0.7, 0.7, 0.7)
randomSplash = random.choice(self.splashSfxList)
self.track = Sequence(FunctionInterval(self.randomizePosition), Func(self.node.unstash), Parallel(self.fish.actorInterval('jump'), Sequence(Wait(0.25), Func(self.exitRipples.play, 0.75)), Sequence(Wait(1.14), Func(self.splash.play), SoundInterval(randomSplash, volume=0.8, node=self.node))), Wait(1), Func(self.node.stash), Wait(4 + 10 * random.random()), name=self.uniqueName('Fish'))
def delete(self):
self.exitRipples.destroy()
del self.exitRipples
self.splash.destroy()
del self.splash
del self.track
self.fish.removeNode()
del self.fish
del self.node
del self.geom
def randomizePosition(self):
x = 5 * (random.random() - 0.5)
y = 5 * (random.random() - 0.5)
h = 360 * random.random()
self.geom.setPos(x, y, 0)
self.geom.setHpr(h, 0, 0)
def enter(self):
AnimatedProp.AnimatedProp.enter(self)
self.track.loop()
def exit(self):
AnimatedProp.AnimatedProp.exit(self)
self.track.finish()
self.splash.stop()
self.exitRipples.stop()
|
dacb/viscount
|
viscount/tasks/models.py
|
Python
|
bsd-2-clause
| 2,077
| 0.01637
|
"""
viscount.task.models
Task models
"""
from ..core import db
from ..utils import JSONSerializer
class TaskInputFile(JSONSerializer, db.Model):
__tablename__ = 'tasks_input_files'
id = db.Column(db.Integer, primary_key=True)
task_id = db.Column(db.Integer, db.ForeignKey('tasks.id'), nullable=False)
file_type_id = db.Column(db.Integer, db.ForeignKey('file_types.id'), nullable=False)
name = db.Column(db.String(255), nullable=False, primary_key=True)
description = db.Column(db.Text, nullable=False)
class TaskOutputFile(JSONSerializer, db.Model):
__tablename__ = 'tasks_output_files'
id = db.Column(db.Integer, primary_key=True)
task_id = db.Column(db.Integer, db.ForeignKey('tasks.id'), nullable=False)
file_type_id = db.Column(db.Integer, db.ForeignKey('file_types.id'), nullable=False)
name = db.Column(db.String(255), nullable=False, primary_key=True)
description = db.Column(db.Text, nullable=False)
class TaskJSONSerializer(JSONSerializer):
__json_modifiers__ = {
'events': lambda events, _: [dict(
|
id=event.id) for event in events],
'inputs': lambda inputs, _: [dict(id=input.id) for input in inputs],
'outputs': lambda outputs, _: [dict(id=output.id) for output in outputs],
'task_instances': lambda task_instances, _: [dict(id=task_instance.id) for task_instance in task_instances],
}
class Task(TaskJSONSerializer, db.Model):
__tablenam
|
e__ = 'tasks'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(32), unique=True)
owner_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
description = db.Column(db.Text, index=False, unique=False, nullable=False)
source_file = db.Column(db.Integer, db.ForeignKey('files.id'))
events = db.relationship('Event', backref='task', lazy='dynamic')
inputs = db.relationship('TaskInputFile', backref='task', lazy='dynamic')
outputs = db.relationship('TaskOutputFile', backref='task', lazy='dynamic')
task_instances = db.relationship('WorkflowTaskInstance', backref='task', lazy='dynamic')
def __repr__(self):
return '<Task %r>' % (self.name)
|
neldom/qessera
|
careers/models.py
|
Python
|
mit
| 2,634
| 0.015186
|
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from django.conf import settings
from django.db import models
from django.core.urlresolvers import reverse
from django.db.models.signals import pre_save
from django.utils import timezone
from django.utils.text import slugify
class CareerManager(models.Manager):
def active(self, *args, **kwargs):
return super(CareerManager, self).filter(draft = False).filter(published_at__lte = timezone.now())
@python_2_unicode_compatible
class Career(models.Model):
FULLTIME = 'Full-time'
PARTTIME = 'Part-time'
INTERNSHIP = 'Internship'
RESEARCH = 'Research'
ROLE_CATEGORY_CHOICES = (
(FULLTIME, 'Full-time'),
(PARTTIME, 'Part-time'),
(INTERNSHIP, 'Internship'),
(RESEARCH, 'Research'),
)
role_category = models.CharField(
max_lengt
|
h=12,
choices=RO
|
LE_CATEGORY_CHOICES,
default=FULLTIME,
)
# Role
role = models.CharField(max_length = 120)
# Location
city = models.CharField(max_length=255)
# Plain text and urlify slug
career_slug = models.SlugField(unique = True)
career_offer_title = models.CharField(max_length=255, default="")
career_offer_description = models.TextField(default="")
career_experience = models.TextField(default="")
career_terms = models.TextField(default="")
# Time and meta staff
draft = models.BooleanField(default = False)
published_at = models.DateField(auto_now = False, auto_now_add = False)
updated = models.DateTimeField(auto_now = True, auto_now_add = False)
timestamp = models.DateTimeField(auto_now = False, auto_now_add = True)
objects = CareerManager()
def __unicode__(self):
return self.role
def __str__(self):
return self.role
def get_absolute_url(self):
return reverse('careers:detail', kwargs = {'slug':self.career_slug})
class Meta:
ordering = ["-timestamp", "-updated"]
def create_slug(instance, new_slug = None):
career_slug = slugify(instance.title)
if new_slug is not None:
career_slug = new_slug
qs = Career.objects.filter(career_slug = career_slug).order_by("-id")
exists = qs.exists()
if exists:
new_slug = "%s-%s" %(career_slug, qs.first().id)
return create_slug(instance, slug = new_slug)
return career_slug
def pre_save_post_receiver(sender, instance, *args, **kwargs):
if not instance.career_slug:
instance.career_slug = create_slug(instance)
pre_save.connect(pre_save_post_receiver, sender = Career)
|
gh0std4ncer/doit
|
doc/tutorial/tutorial_02.py
|
Python
|
mit
| 276
| 0
|
def task_hello():
"""hello py """
def python_hello(times, text, targets):
with open(targets[0], "a") as output:
output.write(times * text)
return {'actions': [(python_hello, [3, "py!\
|
n"])],
|
'targets': ["hello.txt"],
}
|
smorad/ast119
|
hw5.py
|
Python
|
gpl-2.0
| 2,857
| 0.00805
|
from numpy import *
from matplotlib.pyplot import *
import scipy.constants as sc
import copy
import scipy.integrate as integ
# test sun/earth with hw5(1.989e30,5.972e24,149.6e6,0.0167,1000)
def hw5(m1, m2, a, e, tmax, tstep=0.001, tplot=0.025, method='leapfrog'):
if method != 'leapfrog' and method != 'odeint':
print("That's not a method")
return()
# initialize commonly used variables
period = sqrt((4*(pi**2)*(a**3)) / (sc.G*(m1 +
|
m2)))
dt = period*tstep
# initialize objects at time 0
q = m1 / m2
r0 = (1-e)*a/(1+q)
v0 = (1/(1+q))*sqrt((1+e)/(1-e))*sqrt(sc.G*(m1+m2)/a)
rv = array([r0, 0, 0, v0, -q*r0, 0, 0, -q*v0])
# set up figure
figure(1)
gca().set_aspect('equal')
xlim([-2*a, 2*a])
ylim([-2*a, 2*a])
rv_list = []
if method == 'leapfrog':
timeCounter = 0
frameCounter =
|
0
while timeCounter < tmax:
# plot positions if tplot time has passed
if frameCounter >= tplot:
frameCounter = 0
rv_list.append(copy.deepcopy(rv))
# calc positions
rv[0] = rv[0] + rv[2]*dt
rv[1] = rv[1] + rv[3]*dt
rv[4] = rv[4] + rv[6]*dt
rv[5] = rv[5] + rv[7]*dt
# calc acceleration
r = array([rv[0] - rv[4], rv[1] - rv[5]])
force = ((sc.G*m1*m2)/(np.linalg.norm(r)**2))*(r/np.linalg.norm(r))
# calc velocity
rv[2] = rv[2] - (force[0]/m1)*dt
rv[3] = rv[3] - (force[1]/m1)*dt
rv[6] = rv[6] + (force[0]/m2)*dt
rv[7] = rv[7] + (force[1]/m2)*dt
# increment counters
timeCounter += tstep
frameCounter += tstep
# plot final position
rv_list.append(copy.deepcopy(rv))
rv_list_plot = rv_list
else:
# odeint
rv_list = integ.odeint(deriv, rv, arange(0, tmax*period, dt), (m1, m2))
# needed to calculate using tstep, but we want to plot
# using tplot,
t_interval = tplot / tstep
rv_list_plot = rv_list[::t_interval]
# plot
for i in range(len(rv_list_plot)):
plot(rv_list_plot[i][0],rv_list_plot[i][1],'bo')
plot(rv_list_plot[i][4],rv_list_plot[i][5],'go')
draw()
def deriv(rv, dt, m1, m2):
# calc position deriv
rv_copy = zeros(8)
rv_copy[0] = rv[2]
rv_copy[1] = rv[3]
rv_copy[4] = rv[6]
rv_copy[5] = rv[7]
# calc velocity deriv
r = array([rv[0] - rv[4], rv[1] - rv[5]])
force = ((sc.G*m1*m2)/(np.linalg.norm(r)**2))*(r/np.linalg.norm(r))
rv_copy[2] = - (force[0]/m1)
rv_copy[3] = - (force[1]/m1)
rv_copy[6] = + (force[0]/m2)
rv_copy[7] = + (force[1]/m2)
return rv_copy
|
Spicery/ginger
|
apps/fetchgnx/design/conf.py
|
Python
|
gpl-3.0
| 9,404
| 0.005955
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# FetchGNX design notes documentation build configuration file, created by
# sphinx-quickstart on Fri Oct 9 13:29:00 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import cloud_sptheme as csp
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'FetchGNX design notes'
copyright = '2015, Stephen Leach'
author = 'Stephen Leach'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.8.3'
# The full version, including alpha/beta/rc tags.
release = '0.8.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'cloud'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {"defaultcollapsed":True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [csp.get_theme_dir()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = Tr
|
ue
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created usi
|
ng Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'FetchGNXdesignnotesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'FetchGNXdesignnotes.tex', 'FetchGNX design notes Documentation',
'Stephen Leach', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
#
|
betonreddit/betonreddit
|
betonreddit/player/migrations/0003_auto_20160509_2322.py
|
Python
|
apache-2.0
| 427
| 0
|
# -*- coding: utf-8 -*-
# Generated by Dj
|
ango 1.9.6 on 2016-05-09 23:22
from __future__ import unicode_lit
|
erals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('player', '0002_auto_20160505_0350'),
]
operations = [
migrations.RenameField(
model_name='player',
old_name='username',
new_name='email',
),
]
|
tonyczeh/vootstrap
|
vootstrap/__init__.py
|
Python
|
mit
| 5,176
| 0.001546
|
#!/usr/bin/env python
import sys
import textwrap
try:
import virtualenv # @UnresolvedImport
except:
from .lib import virtualenv # @Reimport
from . import snippits
__version__ = "0.9.1"
def file_search_dirs():
dirs = []
for d in virtualenv.file_search_dirs():
if "vootstrap" not in d:
dirs.append(d)
return dirs
def make_parser():
parser = virtualenv.ConfigOptionParser(
usage="usage: %prog [OPTIONS] OUTFILE",
version=__version__,
formatter=virtualenv.UpdatingDefaultsHelpFormatter())
parser.add_option(
"-v", "--verbose",
action="count",
dest="verbose",
default=0,
help="Increase verbosity")
parser.add_option(
"-q", "--quiet",
action="count",
dest="quiet",
default=0,
help="Decrease verbosity")
parser.add_option(
"-p", "--python",
dest="python",
metavar="PYTHON_EXE",
help="The Python interpreter to use, e.g., --python=python2.5 will "
"use the python2.5 interpreter to create the new environment. The "
"default is the interpreter that virtualenv was installed with (%s)"
% sys.executable)
parser.add_option(
"--clear",
dest="clear",
action="store_true",
help="Clear out the non-root install and start from scratch")
parser.add_option(
"--no-site-packages",
dest="no_site_packages",
action="store_true",
help="Don't give access to the global site-packages dir to the "
"virtual environment (default; deprecated)")
parser.add_option(
"--system-site-packages",
dest="system_site_packages",
action="store_true",
help="Give access to the global site-packages dir to the "
"virtual environment")
parser.add_option(
"--unzip-setuptools",
dest="unzip_setuptools",
action="store_true",
help="Unzip Setuptools or Distribute when installing it")
parser.add_option(
"--relocatable",
dest="relocatable",
action="store_true",
help="Make an EXISTING virtualenv environment relocatable. "
"This fixes up scripts and makes all .pth files relative")
parser.add_option(
"--distribute",
"--use-distribute",
dest="use_distribute",
action="store_true",
help="Use Distribute instead of Setuptools. Set environ variable "
"VIRTUALENV_DISTRIBUTE to make it the default ")
parser.add_option(
"--extra-search-dir",
dest="search_dirs",
action="append",
default=['.'],
help="Directory to look for setuptools/distribute/pip distributions "
"in. You can add any number of additional --extra-search-dir paths.")
parser.add_option(
"--never-download",
dest="never_download",
action="store_true",
help="Never download anything from the network. Instead, virtualenv "
"will fail if local distributions of setuptools/distribute/pip are "
"not present.")
parser.add_option(
"--prompt",
dest="prompt",
help="Provides an alternative prompt prefix for this environment")
parser.add_option("--install-requirements",
default=False,
action="store_true",
dest="instal
|
l_requirements",
help="Install requirements.txt after vootstrapping")
parser.add_option(
"--path",
action="append",
dest="path",
help="Directory to add to vootstrapped sys.path. You can add any "
"number of additional --path paths. Relative directories are relative "
"to the vootstrapped di
|
rectory")
return parser
def adjust_options(options):
out_str = "def adjust_options(options, args):\n"
opts = [
"verbose",
"quiet",
"python",
"clear",
"no_site_packages",
"system_site_packages",
"unzip_setuptools",
"relocatable",
"use_distribute",
"search_dirs",
"never_download",
"prompt"
]
for opt in opts:
out_str += " options.%s = %s\n" % (opt, getattr(options, opt))
out_str += snippits.ADJUST_OPTIONS_ARGS
return textwrap.dedent(out_str)
def after_install(options):
if not (options.install_requirements or options.path):
return ""
out_str = snippits.AFTER_INSTALL_PREFIX
if options.path:
out_str += snippits.AFTER_INSTALL_PATH(options.path)
if options.install_requirements:
out_str += snippits.AFTER_INSTALL_REQUIREMENTS
return textwrap.dedent(out_str)
def vootify(options):
return virtualenv.create_bootstrap_script(
adjust_options(options) +
after_install(options)
)
def main():
parser = make_parser()
(options, args) = parser.parse_args()
if not len(args):
parser.print_help()
return 1
with open(args[0], "w") as outfile:
outfile.write(vootify(options))
return 0
if __name__ == "__main__":
exit_code = main()
if exit_code():
sys.exit(exit_code)
|
zerothi/sisl
|
sisl/io/molden.py
|
Python
|
mpl-2.0
| 1,873
| 0.001068
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# f
|
ile, You can obtain one at https://mozilla.org/MPL/2.0/.
# Import sile objects
from .sile import *
from sisl._internal import set_module
from sisl import Geometry
__all__ = ['moldenSile']
@set_module("sisl.io")
class moldenSile(Sile):
""" Molden file object """
@sile_fh_open()
def write_supercell(self, sc):
""" Writes the supercell to the contained file """
# Check that we can
|
write to the file
sile_raise_write(self)
# Write the number of atoms in the geometry
self._write('[Molden Format]\n')
# Sadly, MOLDEN does not read this information...
@sile_fh_open()
def write_geometry(self, geometry, fmt='.8f'):
""" Writes the geometry to the contained file """
# Check that we can write to the file
sile_raise_write(self)
# Be sure to write the supercell
self.write_supercell(geometry.sc)
# Write in ATOM mode
self._write('[Atoms] Angs\n')
# Write out the cell information in the comment field
# This contains the cell vectors in a single vector (3 + 3 + 3)
# quantities, plus the number of supercells (3 ints)
fmt_str = '{{0:2s}} {{1:4d}} {{2:4d}} {{3:{0}}} {{4:{0}}} {{5:{0}}}\n'.format(fmt)
for ia, a, _ in geometry.iter_species():
self._write(fmt_str.format(a.symbol, ia, a.Z, *geometry.xyz[ia, :]))
def ArgumentParser(self, p=None, *args, **kwargs):
""" Returns the arguments that is available for this Sile """
newkw = Geometry._ArgumentParser_args_single()
newkw.update(kwargs)
return self.read_geometry().ArgumentParser(p, *args, **newkw)
add_sile('molf', moldenSile, case=False, gzip=True)
|
meredith-digops/awsops
|
amicreation/amicreation.py
|
Python
|
mit
| 1,866
| 0.001608
|
#!/usr/bin/env python
from __future__ import print_function
import boto3
import time
from botocore.exceptions import ClientError
from datetime import datetime
def get_unix_timestamp():
"""
Generate a Unix timestamp string.
"""
d = datetime.now()
t = time.mktime(d.timetuple())
return str(int(t))
def lambda_handler(event, context):
"""
Create EBS AMI for instances identified by the filter.
"""
if not 'DryRun' in event:
event['DryRun'] = False
if not 'Filters' in event:
event['Filters'] = [{
'Name': 'tag-key',
'Values': ['ops:snapshot']
}]
ec2 = boto3.resource('ec2')
# Iterate through instances identified by the filter.
for instance in ec2.instances.filter(Filters=event['Filters']):
instance_name = instance.instance_id
instance_tags = []
# If a Name tag is available, use it to identify the instance
|
# instead of the instance_id.
for tag in instance.tags:
if tag['Key'] == 'Name' and tag['Value'] != '':
instance_name = tag['Value']
|
else:
instance_tags.append(tag)
try:
# Create the AMI
image_name = instance_name + '-' + get_unix_timestamp()
image = instance.create_image(
Name=image_name,
NoReboot=True,
DryRun=event['DryRun']
)
print('Started image creation: ' + image_name)
image_tags = [{'Key': 'ops:retention', 'Value': '30'}] + instance_tags
image.create_tags(
Tags=image_tags,
DryRun=event['DryRun']
)
except ClientError as e:
if e.response['Error']['Code'] == 'DryRunOperation':
pass
|
melrief/Hadoop-Log-Tools
|
hadoop/util/stats.py
|
Python
|
apache-2.0
| 1,993
| 0.024586
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import numpy as N
import os
import sys
def parse_args(args):
|
p = argparse.ArgumentParser()
p.add_argument('-i', '--input-files', default=[sys.stdin], nargs="+",
type=argparse.FileType('rt'),
help='input file or empty (stdin)')
p.add_argument('-d', '--decorate',default=False,action='store_true'
,help='put the stat name before the value (e.g mean:1)')
g = p.add_mutually_exclusive_group()
g.add_argument('-A','--al
|
l-stats',action='store_true',default=False)
h = p.add_argument_group('stat')
h.add_argument('-a', '--mean', action='store_true', default=False)
h.add_argument('-D', '--median', action='store_true', default=False)
h.add_argument('-s', '--standard_deviation',action='store_true',default=False)
h.add_argument('-v', '--variance', action='store_true', default=False)
h.add_argument('-m', '--min', action='store_true', default=False)
h.add_argument('-M', '--max', action='store_true', default=False)
if not args:
p.print_help()
sys.exit(0)
return p.parse_args(args)
def main():
args = parse_args(sys.argv[1:])
for input_file in args.input_files:
vals = [float(x) for x in input_file.read().split(os.linesep) if x]
a = N.array(vals)
s = []
for (name,value,f) in [('mean', args.mean, N.mean)
, ('median', args.median, N.median)
, ('standard_deviation', args.standard_deviation
, N.std)
, ('variance', args.variance, N.var)
, ('min', args.min, N.amin)
, ('max', args.max, N.amax)]:
if not args.all_stats and not value:
continue
r = f(a)
if args.decorate:
s.append('{}:{}'.format(name,r))
else:
s.append('{}'.format(r))
print(' '.join(s))
if __name__=='__main__':
main()
|
materialsvirtuallab/megnet
|
megnet/utils/descriptor.py
|
Python
|
bsd-3-clause
| 6,396
| 0.002502
|
"""
This module implements atom/bond/structure-wise descriptor calculated from
pretrained megnet model
"""
import os
from typing import Dict, Union
import numpy as np
from tensorflow.keras.models import Model
from megnet.models import GraphModel, MEGNetModel
from megnet.utils.typing import StructureOrMolecule
DEFAULT_MODEL = os.path.join(os.path.dirname(__file__), "../../mvl_models/mp-2019.4.1/formation_energy.hdf5")
class MEGNetDescriptor:
"""
MEGNet descriptors. This class takes a trained model and
then compute the intermediate outputs as structure features
"""
def __init__(self, model_name: Union[str, GraphModel, MEGNetModel] = DEFAULT_MODEL, use_cache: bool
|
= True):
"""
Args:
model_name (str or MEGNetModel): trained model. If it is
str, then only models in mvl_models are used.
use_cache (bool): whether to use cache for structure
|
graph calculations
"""
if isinstance(model_name, str):
model = MEGNetModel.from_file(model_name)
elif isinstance(model_name, GraphModel):
model = model_name
else:
raise ValueError("model_name only support str or GraphModel object")
layers = model.layers
important_prefix = ["meg", "set", "concatenate"]
all_names = [i.name for i in layers if any(i.name.startswith(j) for j in important_prefix)]
if any(i.startswith("megnet") for i in all_names):
self.version = "v2"
else:
self.version = "v1"
valid_outputs = [i.output for i in layers if any(i.name.startswith(j) for j in important_prefix)]
outputs = []
valid_names = []
for i, j in zip(all_names, valid_outputs):
if isinstance(j, list):
for k, l in enumerate(j):
valid_names.append(i + f"_{k}")
outputs.append(l)
else:
valid_names.append(i)
outputs.append(j)
full_model = Model(inputs=model.inputs, outputs=outputs)
model.model = full_model
self.model = model
self.valid_names = valid_names
self._cache: Dict[str, float] = {}
self.use_cache = use_cache
def _predict_structure(self, structure: StructureOrMolecule) -> np.ndarray:
graph = self.model.graph_converter.convert(structure)
inp = self.model.graph_converter.graph_to_input(graph)
return self.model.predict(inp)
def _predict_feature(self, structure: StructureOrMolecule) -> np.ndarray:
if not self.use_cache:
return self._predict_structure(structure)
s = str(structure)
if s in self._cache:
return self._cache[s]
result = self._predict_structure(structure)
self._cache[s] = result
return result
def _get_features(self, structure: StructureOrMolecule, prefix: str, level: int, index: int = None) -> np.ndarray:
name = prefix
if level is not None:
name = f"{prefix}_{level}"
if index is not None:
name += f"_{index}"
if name not in self.valid_names:
raise ValueError(f"{name} not in original megnet model")
ind = self.valid_names.index(name)
out_all = self._predict_feature(structure)
return out_all[ind][0]
def _get_updated_prefix_level(self, prefix: str, level: int):
mapping = {
"meg_net_layer": ["megnet", level - 1],
"set2_set": ["set2set_atom" if level == 1 else "set2set_bond", None],
"concatenate": ["concatenate", None],
}
if self.version == "v2":
return mapping[prefix][0], mapping[prefix][1] # type: ignore
return prefix, level
def get_atom_features(self, structure: StructureOrMolecule, level: int = 3) -> np.ndarray:
"""
Get megnet atom features from structure
Args:
structure: pymatgen structure or molecule
level: int, indicating the block number of megnet, starting
from 1
Returns:
nxm atomic feature matrix
"""
prefix, level = self._get_updated_prefix_level("meg_net_layer", level)
return self._get_features(structure, prefix=prefix, level=level, index=0)
def get_bond_features(self, structure: StructureOrMolecule, level: int = 3) -> np.ndarray:
"""
Get bond features at megnet block level
Args:
structure: pymatgen structure
level: int
Returns:
n_bond x m bond feature matrix
"""
prefix, level = self._get_updated_prefix_level("meg_net_layer", level)
return self._get_features(structure, prefix=prefix, level=level, index=1)
def get_global_features(self, structure: StructureOrMolecule, level: int = 2) -> np.ndarray:
"""
Get state features at megnet block level
Args:
structure: pymatgen structure or molecule
level: int
Returns:
1 x m_g global feature vector
"""
prefix, level = self._get_updated_prefix_level("meg_net_layer", level)
return self._get_features(structure, prefix=prefix, level=level, index=2)
def get_set2set(self, structure: StructureOrMolecule, ftype: str = "atom") -> np.ndarray:
"""
Get set2set output as features
Args:
structure (StructureOrMolecule): pymatgen structure
or molecule
ftype (str): atom or bond
Returns:
feature matrix, each row is a vector for an atom
or bond
"""
mapping = {"atom": 1, "bond": 2}
prefix, level = self._get_updated_prefix_level("set2_set", level=mapping[ftype])
return self._get_features(structure, prefix=prefix, level=level)
def get_structure_features(self, structure: StructureOrMolecule) -> np.ndarray:
"""
Get structure level feature vector
Args:
structure (StructureOrMolecule): pymatgen structure
or molecule
Returns:
one feature vector for the structure
"""
prefix, level = self._get_updated_prefix_level("concatenate", level=1)
return self._get_features(structure, prefix=prefix, level=level)
|
zwarren/morse-car-controller
|
user/map.py
|
Python
|
mit
| 7,092
| 0.006204
|
#!/usr/bin/env python
import sys
import json
import logging
from logging import warning, error, info
from math import pi, degrees
from PyQt4 import Qt, QtCore, QtGui
from connection import Connection
arrow_points = (
Qt.QPoint(-1, -4),
Qt.QPoint(1, -4),
Qt.QPoint(1, 4),
Qt.QPoint(4, 4),
Qt.QPoint(0, 12),
Qt.QPoint(-4, 4),
Qt.QPoint(-1, 4)
)
class PlotGroup:
def __init__(self, color=Qt.Qt.black, symbol='cross'):
self.color = color
self.symbol = symbol
self.data = []
class XYPlot(Qt.QWidget):
def __init__(self):
Qt.QWidget.__init__(self)
# little dance to make the background white.
p = self.palette()
p.setColor(self.backgroundRole(), Qt.Qt.white)
self.setPalette(p)
self.setAutoFillBackground(True)
# map scale
self._scale = 1.0
self.symbol_size = 5.0
self._symbol_scale = self.symbol_size/self._scale
self._offset_x = 400
self._offset_y = 300
self.messages = []
self.groups = []
def translate(self, x, y):
self._offset_x += x
self._offset_y += y
self.update()
def scale(self, s):
self._scale *= s
self._symbol_scale = self.symbol_size/self._scale
self.update()
def drawArrow(self, qp, x, y, angle):
qp.save()
qp.translate(x, y)
qp.rotate(angle)
qp.scale(self._symbol_scale*0.5, self._symbol_scale*0.5)
qp.drawPolygon(*arrow_points)
qp.restore()
def drawCross(self, qp, x, y):
qp.save()
qp.translate(x, y)
qp.scale(self._symbol_scale, self._symbol_scale)
qp.drawLine(-1, -1, 1, 1)
qp.drawLine(-1, 1, 1, -1)
qp.restore()
def drawPlus(self, qp, x, y):
qp.save()
qp.translate(x, y)
qp.scale(self._symbol_scale, self._symbol_scale)
qp.drawLine(-1, 0, 1, 0)
qp.drawLine(0, -1, 0, 1)
qp.restore()
def drawModel(self, qp, x, y, angle, steer):
# all the units are x10 because there is some rounding(?)
# issue where lines don't joint correctly when using
# the meter units directly.
# there is a scale(0.1,0.1) further down to put things
# back to the correct size.
Lf = 16 # length of chass from middle to front axle
Lb = 23 # length of chassis from middle to back axle
Wa = 13 # half axle length
Lw = 10 # wheel length
qp.save()
qp.translate(x,y)
qp.rotate(angle)
#qp.scale(self._symbol_scale, self._symbol_scale)
qp.scale(0.1, 0.1)
qp.drawLine(0, -Lb, 0, Lf) # main body
qp.save() # begin rear end
qp.translate(0.0, -Lb)
qp.drawLine(-Wa, 0.0, Wa, 0.0) # rear axle
qp.drawLine(-Wa,-Lw, -Wa, Lw) #left wheel
qp.drawLine(Wa, -Lw, Wa, Lw) # right wheel
qp.restore()
qp.translate(0.0, Lf) # begin front end
qp.drawLine(-Wa, 0.0, Wa, 0.0) # front axle
qp.save() # begin left wheel
qp.translate(-Wa, 0.0)
qp.rotate(-steer)
qp.drawLine(0.0, -Lw, 0.0, Lw)
qp.restore()
qp.save() # begine right wheel
qp.translate(Wa, 0.0)
qp.rotate(-steer)
qp.drawLine(0.0, -Lw, 0.0, Lw)
qp.restore()
qp.restore()
def paintGrid(self, qp):
pass
def paintEvent(self, e):
#print self.offset_x, self.offset_y, self.s
qp = QtGui.QPainter()
qp.begin(self)
qp.setRenderHint(QtGui.QPainter.Antialiasing, True)
line_y = 20
for line in self.messages:
qp.drawText(20, line_y, line)
line_y += 20
qp.translate(self._offset_x, self._offset_y)
qp.scale(self._scale, -self._scale)
#qp.translate(200, 200)
qp.setBrush(Qt.Qt.black)
qp.setPen(Qt.Qt.black)
self.drawCross(qp, 0, 0)
for group in self.groups:
if group.symbol == 'arrow':
qp.setBrush(group.color)
qp.setPen(Qt.Qt.NoPen)
for v in group.data:
self.drawArrow(qp, v[0], v[1], v[2])
elif group.symbol == 'cross':
qp.setBrush(Qt.Qt.NoBrush)
qp.setPen(group.color)
for v in group.data:
self.drawCross(qp, v[0], v[1])
elif group.symbol == 'model':
pen = Qt.QPen()
pen.setWidth(self._symbol_scale)
pen.setColor(group.color)
qp.setBrush(group.color)
qp.setPen(pen)
for v in group.data:
#print("Draw model %0.2f %0.2f %0.2f %0.2f" % (v[0:4]))
self.drawModel(qp, v[0], v[1], v[2], v[3])
qp.end()
def add_plot_group(self, g):
self.groups.append(g)
#def update(self):
class MapPlot(XYPlot):
def __init__(self):
XYPlot.__init__(self)
self.current_pos = PlotGroup(color=Qt.Qt.blue, symbol='model')
self.add_plot_group(self.current_pos)
self.waypoint_group = PlotGroup(color=Qt.Qt.black, symbol='cross')
self.add_plot_group(self.waypoint_group)
self.scale(12)
def on_msg(self, msg):
try:
#t = msg[u'state'][u'time']
current = (msg[u'state'][u'x'],
msg[u'state'][u'y'],
degrees(msg[u'state'][u'yaw']),
degrees(msg[u'controls'][u'steer']))
waypoints = msg[u'waypoint_control'][u'points']
except KeyError:
logging.error("Invalid message.")
else:
self.current_pos.data = [current]
self.waypoint_group.data = waypoints
self.update()
class MainWindow(Qt.QWidget):
def __init__(self):
Qt.QWidget.__init__(self)
self.grid = Qt.QGridLayout()
self.setLayout(self.grid)
self.plot = MapPlot()
self.grid.addWidget(self.plot, 0, 0)
self.connection = Connection('localhost', 60212, self.update)
def update(self, msg):
self.plot.on_msg(msg)
def keyPressEvent(self, e):
if e.key() == Qt.Qt.Key_Escape:
self.close()
elif e.key() == Qt.Qt.Key_A:
self.plot.scale(2)
elif e.key() == Qt.Qt.Key_Z:
self.plot.scale(0.5)
elif e.key() == Qt.Qt.Key_Up:
self.plot.translate(0, 10)
elif e.key() == Qt.Qt.Key_Down:
self.plot.translate(
|
0, -10)
elif e.key() == Qt.Qt.Key_Left:
self.plot.translate(10, 0)
elif e.key(
|
) == Qt.Qt.Key_Right:
self.plot.translate(-10, 0)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
app = Qt.QApplication([])
demo = MainWindow()
demo.resize(800, 600)
demo.show()
sys.exit(app.exec_())
|
visdesignlab/TulipPaths
|
demos/simpleNodeCompleteness.py
|
Python
|
mit
| 928
| 0.002155
|
""" Example of reasoning about the approximate node completeness. """
from tulip import *
from tulipgui import *
import tulippaths as tp
# Load graph
graphFile = '../data/514_4hops.tlp'
graph = tlp.loadGraph(graphFile)
# Compute completeness for each node label
completeness = tp.utils.getApproximateAnnotationCompleteness(graph)
# Tally completeness
numComplete = 0
numAlmostComplete = 0
numIncomplete = 0
for node in graph.getNodes():
currCompleteness = completeness[node]
if currCo
|
mpleteness <= 1.0 and currCompleteness > 0.75:
numComplete += 1
elif currCompleteness <= 0.75 and currCompleteness > 0.25:
numAlmostComplete += 1
else:
graph.delNode(node)
|
numIncomplete += 1
print('num complete, num almost complete, num incomplete')
print((str(numComplete) + ', ' + str(numAlmostComplete) + ', ' + str(numIncomplete)))
nodeLinkView = tlpgui.createNodeLinkDiagramView(graph)
|
XeCycle/indico
|
indico/MaKaC/webinterface/rh/trackModif.py
|
Python
|
gpl-3.0
| 39,643
| 0.016195
|
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from BTrees.OOBTree import OOBTree
from cStringIO import StringIO
import MaKaC.webinterface.pages.tracks as tracks
import MaKaC.webinterface.pages.conferences as conferences
import MaKaC.webinterface.urlHandlers as urlHandlers
import MaKaC.webinterface.common.abstractFilters as abstractFilters
import MaKaC.review as review
from MaKaC.webinterface.rh.conferenceBase import RHTrackBase
from MaKaC.webinterface.rh.base import RHModificationBaseProtected
from MaKaC.errors import MaKaCError, FormValuesError
from MaKaC.PDFinterface.conference import TrackManagerAbstractToPDF, TrackManagerAbstractsToPDF
from indico.core.config import Config
import MaKaC.common.filters as filters
import MaKaC.webinterface.common.contribFilters as contribFilters
from MaKaC.webinterface.common.contribStatusWrapper import ContribStatusList
from MaKaC.PDFinterface.conference import ContribsToPDF
from MaKaC.webinterface.mail import GenericMailer, GenericNotification
from MaKaC.i18n import _
from MaKaC.abstractReviewing import ConferenceAbstractReview
from MaKaC.paperReviewing import Answer
from MaKaC.webinterface.common.tools import cleanHTMLHeaderFilename
from MaKaC.webinterface.rh.abstractModif import _AbstractWrapper
from MaKaC.webinterface.common.abstractNotificator import EmailNotificator
from indico.web.flask.util import send_file
class RHTrackModifBase( RHTrackBase, RHModificationBaseProtected ):
def _checkParams( self, params ):
RHTrackBase._checkParams( self, params )
def _checkProtection( self ):
RHModificationBaseProtected._checkProtection( self )
class RHTrackModification( RHTrackModifBase ):
def _process( self ):
p = tracks.WPTrackModification( self, self._track )
return p.display()
class RHTrackDataModification( RHTrackModifBase ):
def _process( self ):
p = tracks.WPTrackDataModification( self, self._track )
return p.display()
class RHTrackPerformDataModification(RHTrackModifBase):
def _checkParams(self,params):
RHTrackModifBase._checkParams(self,params)
self._cancel=params.has_key("cancel")
def _process(self):
if self._cancel:
self._redirect(urlHandlers.UHTrackModification.getURL(self._track))
else:
params=self._getRequestParams()
self._track.setTitle(params["title"])
self._track.setDescription(params["description"])
self._track.setCode(params["code"])
self._redirect(urlHandlers.UHTrackModification.getURL(self._track))
class RHTrackCoordination( RHTrackModifBase ):
def _checkProtection(self):
RHTrackModifBase._checkProtection(self)
if not self._conf.hasEnabledSection("cfa"):
raise MaKaCError( _("You cannot access this option because \"Abstracts\" was disabled"))
def _process( self ):
p = tracks.WPTrackModifCoordination( self, self._track )
return p.display()
class TrackCoordinationError( MaKaCError ):
pass
class RHTrackAbstractsBase( RHTrackModifBase ):
"""Base class for the areas accessible with track coordination privileges.
"""
def _checkProtection( self, checkCFADisabled=True ):
"""
"""
if not self._target.canCoordinate( self.getAW() ):
if self._getUser() == None:
self._checkSessionUser()
else:
raise TrackCoordinationError("You don't have rights to coordinate this track")
if checkCFADisabled and not self._conf.hasEnabledSection("cfa"):
raise MaKaCError( _("You cannot access this option because \"Abstracts\" was disabled"))
class _TrackAbstractFilterField( filters.FilterField ):
def __init__( self, track, values, showNoValue=True ):
self._track = track
filters.FilterField.__init__(self,track.getConference(),values,showNoValue)
class _StatusFilterField( _TrackAbstractFilterField ):
"""
"""
_id = "status"
def satisfies( self, abstract ):
"""
"""
s = tracks.AbstractStatusTrackViewFactory().getStatus( self._track, abstract )
return s.getId() in self.getValues()
class _ContribTypeFilterField( _TrackAbstractFilterField, abstractFilters.ContribTypeFilterField ):
"""
"""
_id = "type"
def __init__( self, track, values, showNoValue=True ):
_TrackAbstractFilterField.__init__( self, track, values, showNoValue )
def satisfies( self, abstract ):
"""
"""
return abstractFilters.ContribTypeFilterField.satisfies(self, abstract)
class _MultipleTrackFilterField(_TrackAbstractFilterField):
_id = "multiple_tracks"
def satisfies( self, abstract ):
return len( abstract.getTrackList() )>1
class _CommentsTrackFilterField(_TrackAbstractFilterField, abstractFilters.CommentFilterField):
_id = "comment"
def __init__( self, track, values, showNoValue=True ):
_TrackAbstractFilterField.__init__( self, track, values, showNoValue )
def satisfies( self, abstract ):
"""
"""
return abstractFilters.CommentFilterField.satisfies(self, abstract)
class _AccContribTypeFilterField(_TrackAbstractFilterField,abstractFilters.AccContribTypeFilterField):
"""
"""
_id = "acc_type"
def __init__(self,track,values,showNoValue=True):
_TrackAbstractFilterField.__init__(self,track,values,showNoValue)
def satisfies(self,abstract):
astv = tracks.AbstractStatusTrackViewFactory().getStatus( self._track, abstract )
if astv.__class__ in [tracks._ASTrackViewAccepted,\
tracks._ASTrackViewPA]:
if astv.getContribType() is None or astv.getContribType()=="":
return self._showNoValue
return astv.getContribType() in self._values
else:
return self._showNoValue
class TrackAbstractsFilterCrit(filters.FilterCriteria):
_availableFields
|
= { \
_ContribTypeFilterField.getId(): _ContribTypeFilterField, \
_StatusFilterField.getId(): _StatusFilterField, \
_MultipleTrackFilterField.getId(): _MultipleTrackFilterField, \
_CommentsTrackFilterField.getId(): _CommentsTrackFilterField,
_AccContribTypeFilterField.getId(): _AccContribTypeFilterField }
|
def __init__(self,track,crit={}):
self._track = track
filters.FilterCriteria.__init__(self,track.getConference(),crit)
def _createField(self,klass,values ):
return klass(self._track,values)
def satisfies( self, abstract ):
for field in self._fields.values():
if not field.satisfies( abstract ):
return False
return True
class _TrackAbstractsSortingField( filters.SortingField ):
def __init__( self, track ):
self._track = track
filters.SortingField.__init__( self )
class _ContribTypeSF( _TrackAbstractsSortingField, abstractFilters.ContribTypeSortingField ):
_id = "type"
def __init__( self, track ):
_TrackAbstractsSortingField.__init__( self, track )
def compare( self, a1, a2 ):
return abstractFilters.ContribTypeSortingField.compare( self, a1, a2 )
class _StatusSF( _TrackAbstractsSortingField ):
_id = "status"
def compare( self, a1, a2 ):
statusA1 = tracks.AbstractStatusTrackViewFactory().getStatus( self._track, a1 )
statusA2 = tracks.Abs
|
tomjelinek/pcs
|
pcs_test/tier0/lib/test_env.py
|
Python
|
gpl-2.0
| 6,324
| 0
|
import logging
from functools import partial
from unittest import (
TestCase,
mock,
)
from pcs.common import file_type_codes
from pcs.common.reports import ReportItemSeverity as severity
from pcs.common.reports import codes as report_codes
from pcs.lib.env import LibraryEnvironment
from pcs_test.tools.assertions import assert_raise_library_error
from pcs_test.tools.custom_mock import MockLibraryReportProcessor
from pcs_test.tools.misc import create_patcher
patch_env = create_patcher("pcs.lib.env")
patch_env_object = partial(mock.patch.object, LibraryEnvironment)
class LibraryEnvironmentTest(TestCase):
def setUp(self):
self.mock_logger = mock.MagicMock(logging.Logger)
self.mock_reporter = MockLibraryReportProcessor()
def test_logger(self):
env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
self.assertEqual(self.mock_logger, env.logger)
def test_report_processor(self):
env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
self.assertEqual(self.mock_reporter, env.report_processor)
def test_user_set(self):
user = "testuser"
env = LibraryEnvironment(
self.mock_logger, self.mock_reporter, user_login=user
)
self.assertEqual(user, env.user_login)
def test_user_not_set(self):
env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
self.assertEqual(None, env.user_login)
def test_usergroups_set(self):
groups = ["some", "group"]
env = LibraryEnvironment(
self.mock_logger, self.mock_reporter, user_groups=groups
)
self.assertEqual(groups, env.user_groups)
def test_usergroups_not_set(self):
env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
self.assertEqual([], env.user_groups)
class GhostFileCodes(TestCase):
def setUp(self):
self.mock_logger = mock.MagicMock(logging.Logger)
self.mock_reporter = MockLibraryReportProcessor()
def _fixture_get_env(self, cib_data=None, corosync
|
_conf_data=None):
return LibraryEnvironment(
self.mock_logger,
self.mock_reporter,
cib_d
|
ata=cib_data,
corosync_conf_data=corosync_conf_data,
)
def test_nothing(self):
self.assertEqual(self._fixture_get_env().ghost_file_codes, [])
def test_corosync(self):
self.assertEqual(
self._fixture_get_env(corosync_conf_data="x").ghost_file_codes,
[file_type_codes.COROSYNC_CONF],
)
def test_cib(self):
self.assertEqual(
self._fixture_get_env(cib_data="x").ghost_file_codes,
[file_type_codes.CIB],
)
def test_all(self):
self.assertEqual(
self._fixture_get_env(
cib_data="x",
corosync_conf_data="x",
).ghost_file_codes,
sorted([file_type_codes.COROSYNC_CONF, file_type_codes.CIB]),
)
@patch_env("CommandRunner")
class CmdRunner(TestCase):
def setUp(self):
self.mock_logger = mock.MagicMock(logging.Logger)
self.mock_reporter = MockLibraryReportProcessor()
def test_no_options(self, mock_runner):
expected_runner = mock.MagicMock()
mock_runner.return_value = expected_runner
env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
runner = env.cmd_runner()
self.assertEqual(expected_runner, runner)
mock_runner.assert_called_once_with(
self.mock_logger,
self.mock_reporter,
{
"LC_ALL": "C",
},
)
def test_user(self, mock_runner):
expected_runner = mock.MagicMock()
mock_runner.return_value = expected_runner
user = "testuser"
env = LibraryEnvironment(
self.mock_logger, self.mock_reporter, user_login=user
)
runner = env.cmd_runner()
self.assertEqual(expected_runner, runner)
mock_runner.assert_called_once_with(
self.mock_logger,
self.mock_reporter,
{
"CIB_user": user,
"LC_ALL": "C",
},
)
@patch_env("create_tmp_cib")
def test_dump_cib_file(self, mock_tmpfile, mock_runner):
tmp_file_name = "a file"
expected_runner = mock.MagicMock()
mock_runner.return_value = expected_runner
mock_instance = mock.MagicMock()
mock_instance.name = tmp_file_name
mock_tmpfile.return_value = mock_instance
env = LibraryEnvironment(
self.mock_logger, self.mock_reporter, cib_data="<cib />"
)
runner = env.cmd_runner()
self.assertEqual(expected_runner, runner)
mock_runner.assert_called_once_with(
self.mock_logger,
self.mock_reporter,
{
"LC_ALL": "C",
"CIB_file": tmp_file_name,
},
)
mock_tmpfile.assert_called_once_with(self.mock_reporter, "<cib />")
@patch_env_object("cmd_runner", lambda self: "runner")
class EnsureValidWait(TestCase):
def setUp(self):
self.create_env = partial(
LibraryEnvironment,
mock.MagicMock(logging.Logger),
MockLibraryReportProcessor(),
)
@property
def env_live(self):
return self.create_env()
@property
def env_fake(self):
return self.create_env(cib_data="<cib/>")
def test_not_raises_if_waiting_false_no_matter_if_env_is_live(self):
self.env_live.ensure_wait_satisfiable(False)
self.env_fake.ensure_wait_satisfiable(False)
def test_raises_when_is_not_live(self):
env = self.env_fake
assert_raise_library_error(
lambda: env.ensure_wait_satisfiable(10),
(
severity.ERROR,
report_codes.WAIT_FOR_IDLE_NOT_LIVE_CLUSTER,
{},
None,
),
)
@patch_env("get_valid_timeout_seconds")
def test_do_checks(self, get_valid_timeout):
timeout = 10
env = self.env_live
get_valid_timeout.return_value = timeout
env.ensure_wait_satisfiable(timeout)
get_valid_timeout.assert_called_once_with(timeout)
|
btcspry/3d-wallet-generator
|
gen_3dwallet/base.py
|
Python
|
mit
| 24,069
| 0.008226
|
#!/usr/bin/python3
try:
import qr_tools as qrTools # Module for this project
except:
import gen_3dwallet.qr_tools as qrTools
try:
import TextGenerator as textGen # Module for this project
except:
import gen_3dwallet.TextGenerator as textGen
import bitcoin # sudo pip3 install bitcoin
import argparse
import time
import math
import sys
import os
import distutils.spawn
def parse_args():
parser = argparse.ArgumentParser(description='Generate an STL file of a 3D-printable bitcoin, litecoin, dogecoin, or other type of coin.', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-ve', '--version', dest='versionByte', type=int, default=0, help='Version Bit of the address (for other altcoins).\nBitcoin: 0 (Default)\n Litecoin: 48\n Dogecoin: 30')
parser.add_argument('-ct', '--coin-title', dest='coinTitle', type=str, default="Bitcoin", help='Title of the coin, used for design purposes \n(Default: Bitcoin)')
parser.add_argument('-ls', '--layout-style', dest='layoutStyle', type=int, default=1, help="Layout style of the wallet.\n1) Address on the Front, Private Key on the Back (Default)\n2) Private Key Only\n3) Address Only (don't forget to export the Private
|
Keys after)")
parser.add_argument('-wi', '--width', dest='walletWidth', type=float, default=54.0, help='The width of the wallet in mm. The length is calculated automatically. Default option is approximately standard credit card legnth and width. \n(Default: 54.0)')
parser.add_arg
|
ument('-he', '--height', dest='walletHeight', type=float, default=8.0, help='The height of the wallet in mm. \n(Default: 8)')
parser.add_argument('-bo', '--black-offset', dest='blackOffset', type=int, default=-30, help='The percentage of the height that the black part of the QR code, and the text, will be raised or lowered by.\nNegative number for lowered, positive for raised. Option must be greater than -90. \n(Default: -20)')
parser.add_argument('-ec', '--qr-error-correction', dest='errorCorrection', type=str, default="M", help='The percentage of the QR codes that can be destroyed before they are irrecoverable\nL) 7 percent\nM) 15 percent (Default)\nQ) 25 percent\nH) 30 percent')
parser.add_argument('-dc', '--disable-round-corners', dest='roundCorners', action='store_false', help="Round the coners (four short edges) of the wallet. \n(Default: disabled)")
parser.add_argument('-co', '--copies', dest='copies', type=int, default=5, help='The number of wallets to generate. These will all be unique and randomly-generate wallets (not copies). \n(Default: 5)')
parser.add_argument('-sd', '--openscad-exe', dest='scadExe', type=str, default="openscad", help='The location and filename of the command line tools for OpenSCAD (leave as default if it is installed as a command [ie. Linux])\nIn most cases on Windows and Mac, the executable will be found automatically.')
parser.add_argument('-o', '--stl-folder', dest='outputSTLFolder', type=str, default="./WalletsOut/", help='The output folder to export the STL files into\n(Default: ./WalletsOut/)')
parser.add_argument('-oc', '--scad-folder', dest='outputSCADFolder', type=str, default='', help='The output folder to store the SCAD generation files in (optional, only used for debugging)\n(Default: disabled)')
parser.add_argument('-ea', '--export-address-csv', dest='exportAddressCSV', type=str, default='', help='The output CSV file to export the address list to (optional)\n(Default: disabled)')
parser.add_argument('-ep', '--export-privkey-csv', dest='exportPrivkeyCSV', type=str, default='', help='The output CSV file to export the private key list to (optional)\n(Default: disabled)')
parser.add_argument('-eap', '--export-address-privkey-csv', dest='exportAPCSV', type=str, default='', help='The output CSV file to export the address and private key list to, in the format of "address,privkey" (optional)\n(Default: disabled)')
parser.add_argument('-epa', '--export-privkey-address-csv', dest='exportPACSV', type=str, default='', help='The output CSV file to export the address and private key list to, in the format of "privkey,address" (optional)\n(Default: disabled)')
return parser.parse_args()
def main():
args = parse_args()
# Set DEBUG variable for testing purposes (changing styling)
# If true, prints the SCAD to the terminal and then breaks after first generation
DEBUG = False
# Generate the addresses
if args.copies < 1:
print("Please enter a valid number of copies (-co flag), and try again.")
sys.exit()
else: # Use an else statement here just in case we add the option to import a CSV file with the keys (generated somewhere else)
walletDataList = []
for i in range(args.copies):
thisData = {}
# Generate the addresses with keys
thisData["privateKey"] = bitcoin.main.random_key() # Secure: uses random library, time library and proprietary function
thisData["wif"] = bitcoin.encode_privkey(thisData["privateKey"], "wif", args.versionByte)
thisData["address"] = bitcoin.privkey_to_address(thisData["privateKey"], args.versionByte)
# Generate the QR codes
if args.errorCorrection.upper() not in ["L","M","Q","H"]:
print("Please select a valid QR Error Correction value (L, M, Q, or H).")
sys.exit()
thisData["wifQR"] = qrTools.getQRArray(thisData["wif"], args.errorCorrection.upper())
thisData["addressQR"] = qrTools.getQRArray(thisData["address"], args.errorCorrection.upper())
# Reverse them or else they appear backwards (unknown reason)
thisData["wifQR"] = list(reversed(thisData["wifQR"]))
thisData["addressQR"] = list(reversed(thisData["addressQR"]))
# Append ALL the wallet information, just in case we want to do something with it later
walletDataList.append(thisData)
# Validate other args and set some constants
walletWidth = args.walletWidth
walletHeight = args.walletHeight
if args.layoutStyle == 1 or args.layoutStyle == 2 or args.layoutStyle == 3:
walletLength = walletWidth*1.6 # Approximately the same ratio as a credit card
else:
print("Please choose a valid layout style option.")
sys.exit()
if args.blackOffset < -90.0:
print("Please ensure that --black-offset (-bo flag) is set correctly, and is greater than -90.")
sys.exit()
textDepth = (args.blackOffset/100) * walletHeight
# Check the openscad command
scadExe = args.scadExe
if args.scadExe == "openscad" and not distutils.spawn.find_executable("openscad"):
if os.path.isfile("/Applications/OpenSCAD.app/Contents/MacOS/OpenSCAD"):
print("Info: OpenSCAD found in Applications folder on Mac")
scadExe = "/Applications/OpenSCAD.app/Contents/MacOS/OpenSCAD"
elif os.path.isfile("%PROGRAMFILES%\OpenSCAD\openscad.exe"):
print("Info: OpenSCAD found in Program Files on Windows")
scadExe = "%PROGRAMFILES%\OpenSCAD\openscad.exe"
elif os.path.isfile("%PROGRAMFILES(x86)%\OpenSCAD\openscad.exe"):
print("Info: OpenSCAD found in Program Files (x86) on Windows")
scadExe = "%PROGRAMFILES(x86)%\OpenSCAD\openscad.exe"
if not distutils.spawn.find_executable(scadExe):
print("Please install OpenSCAD or specify the location of it with --openscad-exe.")
sys.exit()
# Set the master SCAD variable
masterSCAD = "// SCAD Code Generated By 3DGen.py - 3D Wallet Generator\n\n" # The beginning of the wallet are identical
scadOutputs = [] # Generated from loop for each wallet (different addresses)
# Include some modules at the beginning
masterSCAD += "// Import some modules\n"
masterSCAD += """
$fn=100;
module createMeniscus(h,radius)difference(){translate([radius/2+0.1,radius/2+0.1,0]){cube([radius+0.2,radius+0.1,h+0.2],center=true);}cylinder(h=h+0.2,r=radius,center=true);}
module roundCornersCube(x,y,z)translate([x/2,y/2,z/2]){difference(){r=((x+y)/2)*0.052;cube([x,y,z],cente
|
upiq/plonebuild
|
python/src/test-python.py
|
Python
|
mit
| 873
| 0.004582
|
def test(options, buildout):
from subprocess import Popen, PIPE
import os
import sy
|
s
python = options['python']
if not os.path.exists(python):
raise IOError("There is no file at %s" % python)
if sys.platform == 'darwin':
output = Popen([python, "-c", "import platform; print (platform.mac_ver())"], stdout=PIPE).communicate()[0]
if not output.startswith("('10."):
raise IOError("Your python at %s d
|
oesn't return proper data for platform.mac_ver(), got: %s" % (python, output))
elif sys.platform == 'linux2' and (2, 4) <= sys.version_info < (2, 5):
output = Popen([python, "-c", "import socket; print (hasattr(socket, 'ssl'))"], stdout=PIPE).communicate()[0]
if not output.startswith("True"):
raise IOError("Your python at %s doesn't have ssl support, got: %s" % (python, output))
|
sot/mica
|
mica/archive/cda/services.py
|
Python
|
bsd-3-clause
| 23,345
| 0.001157
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Python interface to the Chandra Data Archive (CDA) web services and an
interface to a local disk copy of the Observation Catalog (Ocat).
"""
from pathlib import Path
import re
import warnings
import time
import requests
import numpy as np
import tables
from astropy.table import Table, MaskedCol
|
umn
from astropy.coordinates import SkyCoord
from mica.common import MICA_ARCHIVE
__all__ = ['get_archive_file_list', 'get_proposal_abstract',
'get_ocat_web', 'get_ocat_local']
OCAT_TABLE_PATH = Path(MICA_ARCHIVE) / 'ocat_target_table.h5'
OCAT_TABLE_CACHE = {}
URL_CDA
|
_SERVICES = "https://cda.harvard.edu/srservices"
CDA_SERVICES = {
'prop_abstract': 'propAbstract',
'ocat_summary': 'ocatList',
'ocat_details': 'ocatDetails',
'archive_file_list': 'archiveFileList'}
# Units copied from https://github.com/jzuhone/pycda/blob/
# 5a4261328eab989bab91bed17f426ad17d876988/pycda/obscat.py#L38
OCAT_UNITS = {
"app_exp": "ks",
"count_rate": "s**-1",
"est_cnt_rate": "s**-1",
"evfil_lo": "keV",
"evfil_ra": "keV",
"exp_time": "ks",
"f_time": "s",
"forder_cnt_rate": "s**-1",
"soe_roll": "degree",
"x_sim": "mm",
"y_off": "arcmin",
"z_off": "arcmin",
"z_sim": "mm",
}
RETURN_TYPE_DOCS = """If ``return_type='auto'`` the return type is determined by the rules:
- If ``obsid`` is provided
AND the obsid corresponds to an integer
AND the returned result has a single row
THEN the return type is ``dict``
ELSE the return tuple is a ``Table``.
If ``return_type='table'`` then always return a ``Table``."""
CDA_PARAM_DOCS = """Additional function args for CDA search parameters::
instrument=ACIS,ACIS-I,ACIS-S,HRC,HRC-I,HRC-S
grating=NONE,LETG,HETG
type=ER,TOO,CAL,GO,GTO,DDT
cycle=00,01,02,03,04, ...
category=SOLAR SYSTEM,
NORMAL GALAXIES,
STARS AND WD,
WD BINARIES AND CV,
BH AND NS BINARIES,
NORMAL GALAXIES
CLUSTERS OF GALAXIES,
ACTIVE GALAXIES AND QUASARS,
GALACTIC DIFFUSE EMISSION AND SURVEYS,
EXTRAGALACTIC DIFFUSE EMISSION AND SURVEYS
jointObservation= HST,XMM,Spitzer,NOAO,NRAO,NuSTAR,Suzaku,Swift,RXTE
status= archived,observed,scheduled, unobserved,untriggered,canceled,deleted
expMode= ACIS TE,ACIS CC,HRC Timing
grid = 'is not null' or 'is null'
Input coordinate specifications::
inputCoordFrame=J2000 (other options: b1950, bxxxx, ec1950, ecxxxx, galactic)
inputCoordEquinox=2000 (4 digit year)
These parameters are single text entries::
target: matches any part of target name
piName: matches any part of PI name
observer: matches any part of observer name
propNum: proposal number
propTitle: matches any part of proposal title
These parameters form a cone search; if you use one you should use them all::
lon
lat
radius (arcmin, default=1.0)
These parameters form a box search; one lon & one lat are required.
Open-ended ranges are allowed. (Such as lonMin=15 with no lonMax.)
::
lonMin
lonMax
latMin
latMax
These parameters are range lists, where the range is indicated by a hyphen (-).
Multiple ranges can be entered separated by commas::
obsid (eg. obsid=100,200-300,600-1000,1800)
seqNum
expTime
appExpTime
countRate
These parameters are date range lists, where the range is
indicated by a hyphen (/). Multiple ranges can be entered separated
by commas. Valid dates are in one of the following formats:
YYYY-MM-DD, YYYY-MM-DD hh:mm, or YYYY-MM-DD hh:mm:ss
::
startDate
releaseDate
These specify how the data is displayed and ordered::
outputCoordFrame=J2000 (other options: b1950, bxxxx, ec1950, ecxxxx, galactic)
outputCoordEquinox=2000 (4 digit year)
outputCoordUnits=decimal (other option: sexagesimal)
sortColumn=ra (other options:
dec,seqNum,instrument,grating,
appExpTime,expTime,
target,piName,observer,status,
startDate,releaseDate,
obsid,propNum,category,type,cycle)
sortOrder=ascending (other option: descending)
maxResults=# (the number of results after which to stop displaying)
Special parameters that change the output table contents are available for
full output (``summary=False``):
- ``acisWindows='true'``: return ACIS windows details for a single obsid
- ``rollReqs='true'``: return roll requirements for a single obsid
- ``timeReqs='true'``: return time requirements for a single obsid
"""
COMMON_PARAM_DOCS = """:param target_name: str, optional
Target name, used in SkyCoord.from_name() to define ``ra`` and ``dec``
if ``resolve_name`` is True, otherwise matches a substring of the
table column ``target_name`` (ignoring spaces).
:param resolve_name: bool, optional
If True, use ``target_name`` to resolve ``ra`` and ``dec``.
:param ra: float, optional
Right Ascension in decimal degrees
:param dec: float, optional
Declination in decimal degrees
:param radius: float, optional
Search radius in arcmin (default=1.0)"""
def html_to_text(html):
from bs4 import BeautifulSoup
soup = BeautifulSoup(html, features='lxml')
text = soup.get_text()
text = re.sub(r'\n+', '\n', text)
return text
def clean_text(text):
out = text.encode('ascii', errors='ignore').decode('ascii')
out = out.replace('\n', ' ').replace('\r', '').strip()
return out
def get_archive_file_list(obsid, detector, level, dataset='flight', **params):
"""
Get list of archive files for given ``obsid``, ``detector``, ``level``, and ``dataset``.
Other parameters can be ``subdetector``, ``filetype``, ``filename``, and ``obi``.
Note: this may not be working for level 0 products.
Examples::
>>> get_archive_file_list(obsid=2365, detector='pcad',
... subdetector='aca', level=1, obi=2)
<Table length=27>
Filename Filesize Timestamp
str30 int64 str19
------------------------------ -------- -------------------
pcadf126690624N007_asol1.fits 7300800 2021-04-09 08:04:29
pcadf02365_002N001_asol1.fits 4728960 2021-04-09 08:04:30
... ... ...
pcadf126695890N007_adat61.fits 1293120 2021-04-09 08:04:28
pcadf126695890N007_adat71.fits 1293120 2021-04-09 08:04:28
>>> get_archive_file_list(obsid=400, detector='acis', level=2, filetype='evt2')
<Table length=1>
Filename Filesize Timestamp
str24 int64 str19
------------------------ -------- -------------------
acisf00400N007_evt2.fits 4619520 2011-07-08 13:52:57
:param obsid: int, str
Observation ID
:param detector: str
Detector name (e.g. 'pcad', 'acis')
:param level: int, float, str
Level name (e.g. 0, 0.5, 1, 1.5, 2, 3)
:param dataset: str
Dataset name (default='flight')
:param **params: dict
Additional parameters to filter query (subdetector, filetype, obi, filename)
:return: astropy Table
Table of archive files
"""
params['dataset'] = dataset
params['detector'] = detector
params['level'] = level
params['obsid'] = obsid
text = _get_cda_service_text('archive_file_list', **params)
dat = Table.read(text.splitlines(), format='ascii.basic', delimiter='\t', guess=False)
# Original Filesize has commas for the thousands like 11,233,456
filesize = [int(x.replace(',', '')) for x in dat['Filesize']]
dat['Filesize'] = filesize
return dat
def get_proposal_abstract(obsid=None, propnum=None, timeout=60):
"""Get a proposal
|
anhstudios/swganh
|
data/scripts/templates/object/mobile/shared_mynock.py
|
Python
|
mit
| 426
| 0.049296
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOS
|
T IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_mynock.iff"
result.attribute_template_id = 9
result.stfName("monster_name","mynock")
#### BEGIN MODIFICATIONS
|
####
#### END MODIFICATIONS ####
return result
|
InsightSoftwareConsortium/ITKExamples
|
src/IO/ImageBase/GenerateSlicesFromVolume/Code.py
|
Python
|
apache-2.0
| 1,976
| 0.001012
|
#!/usr/bin/env python
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import itk
itk.auto_progress(2)
if len(sys.argv) < 3:
print("Usage: " + sys.argv[0] + " <InputFileName> <OutputFileName> [Extension]")
sys.exit(1)
inputFileName = sys.argv[1]
outputFileName = sys.argv[2]
if len(sys.argv) > 3:
extension = sys.argv[3]
else:
extension = ".png"
fileNameFormat = outputFileName + "-%d" + extension
Dimension = 3
PixelType = itk.UC
InputImageType = itk.Image[PixelType, Dimension]
ReaderType = itk.ImageFileReader[InputImageType]
reader = ReaderType.New()
reader.SetFileName(inputFileName)
OutputPixelType = itk.UC
RescaleImageType = itk.Image[OutputPixelType, Dimension]
RescaleFilterType = itk.RescaleIntensityImageFilter[InputImage
|
Type, RescaleImageType]
rescale = RescaleFilterType.New()
rescale.SetInput(reader.GetOutput())
rescale.SetOutputMinimum(0)
rescale.SetOutputMaximum(255)
rescale.UpdateLargestPossibleRegion()
region = reader.GetOutput().GetLargestPossibl
|
eRegion()
size = region.GetSize()
fnames = itk.NumericSeriesFileNames.New()
fnames.SetStartIndex(0)
fnames.SetEndIndex(size[2] - 1)
fnames.SetIncrementIndex(1)
fnames.SetSeriesFormat(fileNameFormat)
OutputImageType = itk.Image[OutputPixelType, 2]
WriterType = itk.ImageSeriesWriter[RescaleImageType, OutputImageType]
writer = WriterType.New()
writer.SetInput(rescale.GetOutput())
writer.SetFileNames(fnames.GetFileNames())
writer.Update()
|
erudit/zenon
|
eruditorg/erudit/migrations/0087_auto_20180321_0853.py
|
Python
|
gpl-3.0
| 481
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 o
|
n 2018-03-21 13:53
from __future__ import unico
|
de_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('erudit', '0086_auto_20180321_0717'),
]
operations = [
migrations.RemoveField(
model_name='articleabstract',
name='article',
),
migrations.DeleteModel(
name='ArticleAbstract',
),
]
|
mycodeday/crm-platform
|
web_view_editor/__openerp__.py
|
Python
|
gpl-3.0
| 326
| 0.006135
|
{
'name': 'View Editor',
'category': 'Hidden',
'description': """
OpenERP Web to edit views.
====================
|
======
|
""",
'version': '2.0',
'depends':['web'],
'data' : [
'views/web_view_editor.xml',
],
'qweb': ['static/src/xml/view_editor.xml'],
'auto_install': True,
}
|
DsixTools/python-smeftrunner
|
smeftrunner/classes.py
|
Python
|
mit
| 11,842
| 0.001773
|
"""Defines the SMEFT class that provides the main API to smeftrunner."""
from . import rge
from . import io
from . import definitions
from . import beta
from . import smpar
import pylha
from collections import OrderedDict
from math import sqrt
import numpy as np
import ckmutil.phases, ckmutil.diag
class SMEFT(object):
"""Parameter point in the Standard Model Effective Field Theory."""
def __init__(self):
"""Initialize the SMEFT instance."""
self.C_in = None
self.scale_in = None
self.scale_high = None
def set_initial(self, C_in, scale_in, scale_high):
r"""Set the initial values for parameters and Wilson coefficients at
the scale `scale_in`, setting the new physics scale $\Lambda$ to
`scale_high`."""
self.C_in = C_in
self.scale_in = scale_in
self.scale_high = scale_high
def load_initial(self, streams):
"""Load the initial values for parameters and Wilson coefficients from
one or several files.
`streams` should be a tuple of file-like objects strings."""
d = {}
for stream in streams:
s = io.load(stream)
if 'BLOCK' not in s:
raise ValueError("No BLOCK found")
d.update(s['BLOCK'])
d = {'BLOCK': d}
C = io.wc_lha2dict(d)
sm = io.sm_lha2dict(d)
C.update(sm)
C = definitions.symmetrize(C)
self.C_in = C
def set_initial_wcxf(self, wc, scale_high=None, get_smpar=False):
"""Load the initial values for Wilson coefficients from a
wcxf.WC instance.
Parameters:
- `scale_high`: since Wilson coefficients are dimensionless in
smeftrunner but not in WCxf, the high scale in GeV has to be provided.
If this parameter is None (default), either a previously defined
value will be used, or the scale attribute of the WC instance will
be used.
- `get_smpar`: boolean, optional, defaults to True. If True, an attempt
is made to determine the SM parameters from the requirement of
reproducing the correct SM masses and mixings at the electroweak
scale. As approximations are involved, the result might or might not
be reliable, depending on the size of the Wilson coefficients
affecting the SM masses and mixings. If False, Standard Model
parameters have to be provided separately and are assumed to be in
the weak basis used for the Warsaw basis as defined in WCxf,
i.e. in the basis where the down-type and charged lepton mass
matrices are diagonal.
"""
import wcxf
if wc.eft != 'SMEFT':
raise ValueError("Wilson coefficients use wrong EFT.")
if wc.basis != 'Warsaw':
raise ValueError("Wilson coefficients use wrong basis.")
if scale_high is not None:
self.scale_high = scale_high
elif self.scale_high is None:
self.scale_high = wc.scale
C = wcxf.translators.smeft.wcxf2arrays(wc.dict)
keys_dim5 = ['llphiphi']
keys_dim6 = list(set(definitions.WC_keys_0f + definitions.WC_keys_2f + definitions.WC_keys_4f) - set(keys_dim5))
self.scale_in = wc.scale
for k in keys_dim5:
if k in C:
C[k] = C[k]*self.scale_high
for k in keys_dim6:
if k in C:
C[k] = C[k]*self.scale_high**2
C = definitions.symmetrize(C)
# fill in zeros for missing WCs
for k, s in definitions.C_keys_shape.items():
if k not in C and k not in definitions.SM_keys:
if s == 1:
C[k] = 0
else:
C[k] = np.zeros(s)
if self.C_in is None:
self.C_in = C
else:
self.C_in.update(C)
if get_smpar:
self.C_in.update(self._get_sm_scale_in())
def load_wcxf(self, stream, get_smpar=True):
"""Load the initial values for Wilson coefficients from
a file-like object or a string in WCxf format.
Note that Standard Model parameters have to be provided separately
and are assumed to be in the weak basis used for the Warsaw basis as
defined in WCxf, i.e. in the basis where the down-type and charged
lepton mass matrices are diagonal."""
import wcxf
wc = wcxf.WC.load(stream)
self.set_initial_wcxf(wc, get_smpar=get_smpar)
def dump(self, C_out, scale_out=None, stream=None, fmt='lha', skip_redundant=True):
"""Return a string representation of the parameters and Wilson
coefficients `C_out` in DSixTools output format. If `stream` is
specified, export it to a file. `fmt` defaults to `lha` (the SLHA-like
DSixTools format), but can also be `json` or `yaml` (see the
pylha documentation)."""
C = OrderedDict()
if scale_out is not None:
C['SCALES'] = {'values': [[1, self.scale_high], [2, scale_out]]}
else:
C['SCALES'] = {'values': [[1, self.scale_high]]}
sm = io.sm_dict2lha(C_out)['BLOCK']
C.update(sm)
wc = io.wc_dict2lha(C_out, skip_redundant=skip_redundant)['BLOCK']
C.update(wc)
return pylha.dump({'BLOCK': C}, fmt=fmt, stream=stream)
def get_wcxf(self, C_out, scale_out):
"""Return the Wilson coefficients `C_out` as a wcxf.WC instance.
Note that the Wilson coefficients are rotated into the Warsaw basis
as defined in WCxf, i.e. to the basis where the down-type and charged
lepton mass matrices are diagonal."""
import wcxf
C = self.rotate_defaultbasis(C_out)
d = wcxf.translators.smeft.arrays2wcxf(C)
|
basis = wcxf.Basis['SMEFT', 'Warsaw']
d = {k: v for k, v in d.items() if k in basis.all_wcs and v != 0}
keys_dim5 = ['llphiphi']
keys_dim6 = list(set(definitions.WC_keys_0f + definitions.WC_keys_2f
+ definitions.WC_keys_4f) - set(keys_dim5))
|
for k in d:
if k.split('_')[0] in keys_dim5:
d[k] = d[k] / self.scale_high
for k in d:
if k.split('_')[0] in keys_dim6:
d[k] = d[k] / self.scale_high**2
d = wcxf.WC.dict2values(d)
wc = wcxf.WC('SMEFT', 'Warsaw', scale_out, d)
return wc
def dump_wcxf(self, C_out, scale_out, fmt='yaml', stream=None, **kwargs):
"""Return a string representation of the Wilson coefficients `C_out`
in WCxf format. If `stream` is specified, export it to a file.
`fmt` defaults to `yaml`, but can also be `json`.
Note that the Wilson coefficients are rotated into the Warsaw basis
as defined in WCxf, i.e. to the basis where the down-type and charged
lepton mass matrices are diagonal."""
wc = self.get_wcxf(C_out, scale_out)
return wc.dump(fmt=fmt, stream=stream, **kwargs)
def rgevolve(self, scale_out, **kwargs):
"""Solve the SMEFT RGEs from the initial scale to `scale_out`.
Returns a dictionary with parameters and Wilson coefficients at
`scale_out`. Additional keyword arguments will be passed to
the ODE solver `scipy.integrate.odeint`."""
self._check_initial()
return rge.smeft_evolve(C_in=self.C_in,
scale_high=self.scale_high,
scale_in=self.scale_in,
scale_out=scale_out,
**kwargs)
def rgevolve_leadinglog(self, scale_out):
"""Compute the leading logarithmix approximation to the solution
of the SMEFT RGEs from the initial scale to `scale_out`.
Returns a dictionary with parameters and Wilson coefficients.
Much faster but less precise that `rgevolve`.
"""
self._check_initial()
return rge.smeft_evolve_leadinglog(C_in=self.C_in,
scale_high=self.scale_high,
scale_in=self.scale_in,
scale_out=scale_out)
|
turon/mantis
|
src/tools/tenodera/net_view.py
|
Python
|
bsd-3-clause
| 15,234
| 0.006302
|
# This file is part of MANTIS OS, Operating System
# See http://mantis.cs.colorado.edu/
#
# Copyright (C) 2003-2005 University of Colorado, Boulder
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the mos license (see file LICENSE)
import wx, thread
import net_model
class node_view:
def __init__(self, model, color = 'BLUE'):
self.node_radius = 10 # Radius of
|
a node
self.node_color = 'GREEN'
|
# TODO not currently used
self.node_outline = 'BLACK' # TODO not currently used
# Setting this flag prevents drawing this node and links while dragging
self.dragging = False
self.model = model
# Now setup the node's bitmap so we can just blit to the screen
# rather than having to re-draw every time.
#self.bmp = wx.EmptyBitmap(2 * self.node_radius + 4, 2 * self.node_radius + 4)
self.bmp = wx.EmptyBitmap(2 * self.node_radius, 3 * self.node_radius)
self.Update()
def HitTest(self, point):
rect = self.GetRect()
return rect.InsideXY(point.x, point.y)
def GetRect(self):
x, y = self.model.GetPosition()
return wx.Rect(x-self.node_radius, y-self.node_radius,
self.bmp.GetWidth(), self.bmp.GetHeight())
def Erase(self, dc):
if self.dragging:
return
dc.SetBrush(wx.Brush("WHITE"))
dc.SetPen(wx.Pen("WHITE"))
x, y = self.model.GetPosition()
#dc.DrawRectangle(x-self.node_radius, y-self.node_radius,
# self.node_radius * 2 + 4, self.node_radius * 2 + 4)
dc.DrawRectangle(x-self.node_radius, y-self.node_radius,
2 * self.node_radius, 3 * self.node_radius)
def Draw(self, dc, op = wx.COPY):
if self.dragging:
return True
if self.bmp.Ok():
memDC = wx.MemoryDC()
memDC.SelectObject(self.bmp)
x, y = self.model.GetPosition()
dc.Blit(x-self.node_radius, y-self.node_radius,
self.bmp.GetWidth(), self.bmp.GetHeight(),
memDC, 0, 0, op, True)
return True
else:
return False
def Update(self):
#self.led = state
# create a DC for drawing in to the bitmap memory
bdc = wx.MemoryDC();
bdc.SelectObject(self.bmp);
# First clear the background
#bdc.SetBrush(wx.Brush("WHITE"))
#bdc.SetPen(wx.Pen("WHITE"))
#bdc.DrawRectangle(0, 0, self.node_radius * 2 + 4, self.node_radius * 2 + 4)
# Now draw our default node
#bdc.SetBrush(wx.Brush(self.node_color))
#if self.model.GetLedState() == 1:
# bdc.SetPen(wx.Pen(self.node_outline, 4))
#else:
# bdc.SetPen(wx.Pen("RED", 4))
#bdc.DrawEllipse(0, 0, self.node_radius * 2, self.node_radius * 2)
bdc.SetBrush(wx.Brush("DARKGREEN"))
bdc.SetPen(wx.Pen("DARKGREEN"))
bdc.DrawRectangle(0, 0, 2 * self.node_radius, 3 * self.node_radius)
# Now draw the led line
if self.model.led & 1:
bdc.SetBrush(wx.Brush("YELLOW"))
bdc.SetPen(wx.Pen("YELLOW"))
bdc.DrawRectangle(0, 16, self.node_radius*3/2, 8)
if self.model.led & 2: # green
bdc.SetBrush(wx.Brush("GREEN"))
bdc.SetPen(wx.Pen("GREEN"))
bdc.DrawRectangle(0, 8, self.node_radius*3/2, 8)
if self.model.led & 4: # red
bdc.SetBrush(wx.Brush("RED"))
bdc.SetPen(wx.Pen("RED"))
bdc.DrawRectangle(0, 0, self.node_radius*3/2, 8)
# must disconnect the bitmap from the dc so we can use it later
bdc.SelectObject(wx.NullBitmap);
# Create a mask so that we only blit the colored part
#if "__WXGTK__" not in wx.PlatformInfo:
#mask = wx.Mask(self.bmp, wx.WHITE)
mask = wx.Mask(self.bmp)
mask.colour = wx.WHITE
self.bmp.SetMask(mask)
def __str__(self):
return 'node_view:'+str(self.model.id)
class link_view:
def __init__(self, src, dst):
self.src = src
self.dst = dst
self.flashcount = 0
def Erase(self, dc):
if self.src.dragging or self.dst.dragging:
return
pen = wx.Pen("WHITE")
pen.SetWidth(4)
dc.SetPen(pen)
dc.DrawLine(self.src.model.pos[0], self.src.model.pos[1], self.dst.model.pos[0], self.dst.model.pos[1])
def Draw(self, dc, op = wx.COPY):
if self.src.dragging or self.dst.dragging:
return
if self.flashcount:
pen = wx.Pen("GOLD")
else:
pen = wx.Pen("BLUE")
pen.SetWidth(4)
dc.SetPen(pen)
dc.DrawLine(self.src.model.pos[0], self.src.model.pos[1], self.dst.model.pos[0], self.dst.model.pos[1])
class event_queue:
"Queue for storing net events and their callbacks. See net_view.DispatchEvent()."
def __init__(self):
self.lock = thread.allocate_lock()
self.list = []
def put(self, obj):
"Add an object to the queue atomically."
self.lock.acquire()
self.list.append(obj)
self.lock.release()
def get(self):
"Return the entire queue as a list and clear the queue atomically."
self.lock.acquire()
list = self.list
self.list = []
self.lock.release()
return list
class net_view(wx.ScrolledWindow):
"This component does the drawing of the network model."
def __init__(self, parent, id, model):
wx.ScrolledWindow.__init__(self, parent, id, style=wx.NO_FULL_REPAINT_ON_RESIZE)
self.model = model
self.node_dict = {}
self.link_dict = {}
self.node_size = 25
self.dragNode = None
self.dragImage = None
self.queue = event_queue()
self.SetBackgroundColour("WHITE")
self.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
# Mouse buttons and motion
wx.EVT_LEFT_DOWN(self, self.OnLeftDown)
wx.EVT_LEFT_UP(self, self.OnLeftUp)
wx.EVT_MOTION(self, self.OnMotion)
wx.EVT_PAINT(self, self.OnPaint)
wx.EVT_IDLE(self, self.OnIdle)
self.SetMode("Select")
# Register network events callback DispatchEvent.
# See net_view.DispatchEvent() for details.
model.Bind(net_model.ADD_NODE, self.DispatchEvent, self.add_node)
model.Bind(net_model.REMOVE_NODE, self.DispatchEvent, self.del_node)
model.Bind(net_model.ADD_LINK, self.DispatchEvent, self.add_radio_link)
model.Bind(net_model.REMOVE_LINK, self.DispatchEvent, self.del_radio_link)
model.Bind(net_model.NET_CHANGED, self.DispatchEvent, self.new_network)
model.Bind(net_model.FORWARD_PACKET, self.DispatchEvent, self.forward_radio_packet)
def DispatchEvent(self, callback, *args):
""""Queue a net event to be handled on the GUI thread.
Many wxPython functions do not work when invoked from a thread other
than the main GUI thread. This is a problem for network events, because
they occur during the listen thread that was spawned by simClient.py.
The solution is to register a meta-callback, this method, with the
network model. When DispatchEvent is invoked by the network model,
it puts the original GUI callback, along with the arguments,
on self.queue and then calls wx.WakeUpIdle(). This causes OnIdle to be
invoked on the main GUI thread, which in turn invokes every callback
that is on the queue, and these callbacks can invoke wxPython functions
without fear of being on the wrong thread. This greatly simplifies the
implementation of the callbacks (trust me)."""
self.queue.put((callback, args))
# Cause an idle event to occur, which will invoke our idle handler.
wx.WakeUpIdle()
def FindNode(self, point):
"Return the node that contains the
|
compas-dev/compas
|
src/compas_blender/artists/lineartist.py
|
Python
|
mit
| 2,962
| 0.003038
|
from typing import Any
from typing import List
from typing import Optional
from typing import Union
import bpy
import compas_blender
from compas.artists import PrimitiveArtist
from compas.geometry i
|
mport Line
from compas.colors import Color
from compas_blender.artists import BlenderArtist
class LineArtist(BlenderArtist, PrimitiveArtist):
"""Artist fo
|
r drawing lines in Blender.
Parameters
----------
line : :class:`~compas.geometry.Line`
A COMPAS line.
collection : str | :blender:`bpy.types.Collection`
The Blender scene collection the object(s) created by this artist belong to.
**kwargs : dict, optional
Additional keyword arguments.
For more info,
see :class:`~compas_blender.artists.BlenderArtist` and :class:`~compas.artists.PrimitiveArtist`.
Examples
--------
Use the Blender artist explicitly.
.. code-block:: python
from compas.geometry import Line
from compas_blender.artists import LineArtist
line = Line([0, 0, 0], [1, 1, 1])
artist = LineArtist(line)
artist.draw()
Or, use the artist through the plugin mechanism.
.. code-block:: python
from compas.geometry import Line
from compas.artists import Artist
line = Line([0, 0, 0], [1, 1, 1])
artist = Artist(line)
artist.draw()
"""
def __init__(self,
line: Line,
collection: Optional[Union[str, bpy.types.Collection]] = None,
**kwargs: Any
):
super().__init__(primitive=line, collection=collection or line.name, **kwargs)
def draw(self, color: Optional[Color] = None, show_points: bool = False) -> List[bpy.types.Object]:
"""Draw the line.
Parameters
----------
color : tuple[int, int, int] | tuple[float, float, float] | :class:`~compas.colors.Color`, optional
The RGB color of the box.
The default color is :attr:`compas.artists.PrimitiveArtist.color`.
show_points : bool, optional
If True, show the start and end point in addition to the line.
Returns
-------
list[:blender:`bpy.types.Object`]
"""
color = Color.coerce(color) or self.color
start = self.primitive.start
end = self.primitive.end
objects = []
if show_points:
points = [
{'pos': start, 'name': f"{self.primitive.name}.start", 'color': color, 'radius': 0.01},
{'pos': end, 'name': f"{self.primitive.name}.end", 'color': color, 'radius': 0.01},
]
objects += compas_blender.draw_points(points, collection=self.collection)
lines = [
{'start': start, 'end': end, 'color': color, 'name': f"{self.primitive.name}"},
]
objects += compas_blender.draw_lines(lines, collection=self.collection)
return objects
|
amol9/hackerearth
|
hackerrank/practice/cavity_map/solution.py
|
Python
|
mit
| 500
| 0.006
|
n = int(inp
|
ut())
grid = [[int(c) for c in input()] for i in range (0, n)]
cavities = []
for i in range(0, n):
if i > 0 and i < n - 1:
for j in range(0, n):
if j > 0 and j < n - 1:
v = grid[i][j]
if grid[i - 1][j] < v and grid[i + 1][j] < v and grid[i][j - 1] < v and grid[i][j + 1] < v:
cavities.append((i, j))
for i, j in cavities:
grid[i][j] = 'X'
print('\n'.join(''.join(str(i) for i in row) for row in gri
|
d))
|
amosnier/python_for_kids
|
course_code/13_039_animated_ball.py
|
Python
|
gpl-3.0
| 562
| 0.003559
|
import tkinter
tk = tkinter.Tk()
tk.title("Bounce")
tk.resizable
|
(0, 0)
# Keep the window on the top
tk.wm_attributes("-topmost", 1)
canvas = tkinter.Canvas(tk, width=500, height=400)
# Remove border. Apparently no effect on Linux, but good on Mac
canvas.configure(bd=0)
# Make the 0 horizontal and vertical line apparent
canvas.configure(highlightthickness=0)
canvas.pack()
ball = canvas.create_oval(10, 10, 25, 25, fill='red')
def handle_timer_event():
canvas.move(ball, 10, 0)
tk.after(100, handle_timer_event)
handle_timer_event()
tk.ma
|
inloop()
|
Boy-314/winner-winner-bidget-sbinner
|
examples/playlist.py
|
Python
|
mit
| 8,569
| 0.002451
|
import asyncio
import discord
from discord.ext import commands
if not discord.opus.is_loaded():
# the 'opus' library here is opus.dll on windows
# or libopus.so on linux in the current directory
# you should replace this with the location the
# opus library is located in and with the proper filename.
# note that on windows this DLL is automatically provided for you
discord.opus.load_opus('opus')
class VoiceEntry:
def __init__(self, message, player):
self.requester = message.author
self.channel = message.channel
self.player = player
def __str__(self):
fmt = '*{0.title}* uploaded by {0.uploader} and requested by {1.display_name}'
duration = self.player.duration
if duration:
fmt = fmt + ' [length: {0[0]}m {0[1]}s]'.format(divmod(duration, 60))
return fmt.format(self.player, self.requester)
class VoiceState:
def __init__(self, bot):
self.current = None
self.voice = None
self.bot = bot
self.play_next_song = asyncio.Event()
self.songs = asyncio.Queue()
self.skip_votes = set() # a set of user_ids that voted
self.audio_player = self.bot.loop.create_task(self.audio_player_task())
def is_playing(self):
if self.voice is None or self.current is None:
return False
player = self.current.player
return not player.is_done()
@property
def player(self):
return self.current.player
def skip(self):
self.skip_votes.clear()
if self.is_playing():
self.player.stop()
def toggle_next(self):
self.bot.loop.call_soon_threadsafe(self.play_next_song.set)
async def audio_player_task(self):
while True:
self.play_next_song.clear()
self.current = await self.songs.get()
await self.bot.send_message(self.current.channel, 'Now playing ' + str(self.current))
self.current.player.start()
await self.play_next_song.wait()
class Music:
"""Voice related commands.
Works in multiple servers at once.
"""
def __init__(self, bot):
self.bot = bot
self.voice_states = {}
def get_voice_state(self, server):
state = self.voice_states.get(server.id)
if state is None:
state = VoiceState(self.bot)
self.voice_states[server.id] = state
return state
async def create_voice_client(self, channel):
voice = await self.bot.join_voice_channel(channel)
state = self.get_voice_state(channel.server)
state.voice = voice
def __unload(self):
for state in self.voice_states.values():
try:
state.audio_player.cancel()
if state.voice:
self.bot.loop.create_task(state.voice.disconnect())
except:
pass
@commands.command(pass_context=True, no_pm=True)
async def join(self, ctx, *, channel : discord.Channel):
"""Joins a voice channel."""
try:
await self.create_voice_client(channel)
except discord.ClientException:
await self.bot.say('Already in a voice channel...')
except discord.InvalidArgument:
await self.bot.say('This is not a voice channel...')
else:
await self.bot.say('Ready to play audio in ' + channel.name)
@commands.command(pass_context=True, no_pm=True)
async def summon(self, ctx):
"""Summons the bot to join your voice channel."""
summoned_channel = ctx.message.author.voice_channel
if summoned_channel is None:
await self.bot.say('You are not in a voice channel.')
return False
state = self.get_voice_state(ctx.message.server)
if state.voice is None:
state.voice = await self.bot.join_voice_channel(summoned_channel)
else:
await state.voice.move_to(summoned_channel)
return True
@commands.command(pass_context=True, no_pm=True)
async def play(self, ctx, *, song : str):
"""Plays a song.
If there is a song currently in the queue, then it is
queued until the next song is done playing.
This command automatically searches as well from YouTube.
The list of supported sites can be found here:
https://rg3.github.io/youtube-dl/supportedsites.html
"""
state = self.get_voice_state(ctx.message.server)
opts = {
'default_search': 'auto',
'quiet': True,
}
if state.voice is None:
success = await ctx.invoke(self.summon)
if not success:
return
try:
player = await state.voice.create_ytdl_player(song, ytdl_options=opts, after=state.toggle_next)
except Exception as e:
fmt = 'An error occurred while processing this request: ```py\n{}: {}\n```'
await self.bot.send_message(ctx.message.channel, fmt.format(type(e).__name__, e))
else:
player.volume = 0.6
entry = VoiceEntry(ctx.message, player)
await self.bot.say('Enqueued ' + str(entry))
await state.songs.put(entry)
@commands.command(pass_context=True, no_pm=True)
async def volume(self, ctx, value : int):
"""Sets the volume of the currently playing song."""
state = self.get_voice_state(ctx.message.server)
if state.is_playing():
player = state.player
player.volume = value / 100
await self.bot.say('Set the volume to {:.0%}'.format(player.volume))
@commands.command(pass_context=True, no_pm=True)
async def pause(self, ctx):
"""Pauses the currently played song."""
state = self.get_voice_state(ctx.message.server)
if state.is_playing():
player = state.player
player.pause()
@commands.command(pass_context=True, no_pm=True)
async def resume(self, ctx):
"""Resumes the currently played song."""
state = self.get_voice_state(ctx.message.server)
if state.is_playing():
player = state.player
player.resume()
@commands.command(pass_context=True, no_pm=True)
async def stop(self, ctx):
"""Stops playing audio and leaves the voice channel.
This also clears the queue.
"""
server = ctx.message.server
state = self.get_voice_state(server)
if state.is_playing():
player = state.player
player.stop()
try:
state.audio_player.cancel()
del self.voice_states[server.id]
await state.voice.disconnect()
except:
pass
@commands.command(pass_context=True, no_pm=True)
async def skip(self, ctx):
"""Vote to skip a song. The song requester can automatically skip.
3 skip votes are needed for the song to be skipped.
"""
state = self.get_voice_state(ctx.message.server)
if not state.is_playing():
await self.bot.say('Not playing any music right now...')
return
voter = ctx.message.author
if voter == state.current.requester:
await self.bot.say('Requester requested skipping song...')
state.skip()
elif voter.id not in state.skip_votes:
state.skip_votes.add(voter.id)
total_votes = len(state.skip_votes)
if total_votes >= 3:
await self.bot.say('Skip vote passed, skipping song...')
|
state.skip()
else:
await self.bot.say('Skip vote added, currently at [{}/3]'.format(total_votes))
else:
await self.bot.say('You have already voted to skip this song.')
@commands.command(pass_context=True, no_pm=True)
async def playing(self, ctx):
"""Shows info about the currently played song."""
state = self.get
|
_voice_state(ctx.message.server)
if state.current is None:
await self.bot.say('Not playing anything.')
else:
|
valentine20xx/portal
|
converter/utils.py
|
Python
|
gpl-3.0
| 2,852
| 0.001052
|
import csv
import os
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse_lazy
from django.utils.decorators import method_decorator
from django.db import models
from converter.exceptions import UploadException
from .models import SystemSource, Reference, ReferenceKeyValue
from django.db import transaction
class TimeStampedModel(models.Model):
"""
An abstract base class model that provides self-
. fields.
updating ``created`` and ``modified``
"""
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class LoginRequiredMixin:
@method_decorator(login_required(login_url=reverse_lazy("auth:login")))
def dispatch(self, request, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs)
def ss_handle_uploaded_file(f):
filename = f.name
# filepath = os.path.join('/home/niko/' + filename)
filepath = os.path.join('C:/Users/nmorozov/Desktop/1/' + filename)
with open(filepath, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
with open(filepath, newline='') as csvfile:
iterator = csv.reader(csvfile, delimiter=',', quotechar='|')
with transaction.atomic():
for obj in iterator:
if safe_get(obj, 0) == "system":
ss = SystemSource(code=safe_get(obj, 1),
fullname=safe_get(obj, 2))
ss.save()
elif safe_get(obj, 0) == "reference":
reference = Reference(code=safe_get(obj, 1),
fullname=safe_get(obj, 2),
table_name=safe_get(obj, 3),
table_charset=safe_get(obj, 4),
jdbc_source=safe_get(obj, 5),
replication_sql=safe_get(obj, 6),
master_id=ss)
reference.save()
elif safe_get(obj, 0) == "content":
content = ReferenceKeyValue(key=safe_get(obj, 1),
value=safe_get(obj, 2),
refer
|
ence_id=reference)
content.save()
else:
raise UploadException("Parse error")
# raise ValidationError('Invalid value', code=
|
'invalid')
os.remove(filepath)
def safe_get(_list, _index, _default=""):
try:
return _list[_index]
except IndexError:
return _default
|
googleapis/python-datacatalog
|
samples/generated_samples/datacatalog_generated_datacatalog_v1_policy_tag_manager_serialization_import_taxonomies_sync.py
|
Python
|
apache-2.0
| 1,711
| 0.001753
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may ob
|
tain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations unde
|
r the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ImportTaxonomies
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-datacatalog
# [START datacatalog_generated_datacatalog_v1_PolicyTagManagerSerialization_ImportTaxonomies_sync]
from google.cloud import datacatalog_v1
def sample_import_taxonomies():
# Create a client
client = datacatalog_v1.PolicyTagManagerSerializationClient()
# Initialize request argument(s)
inline_source = datacatalog_v1.InlineSource()
inline_source.taxonomies.display_name = "display_name_value"
request = datacatalog_v1.ImportTaxonomiesRequest(
inline_source=inline_source,
parent="parent_value",
)
# Make the request
response = client.import_taxonomies(request=request)
# Handle the response
print(response)
# [END datacatalog_generated_datacatalog_v1_PolicyTagManagerSerialization_ImportTaxonomies_sync]
|
elan17/irc-terminal
|
server/main.py
|
Python
|
gpl-3.0
| 927
| 0.002157
|
import multiprocessing
import Library.interfaz
import Library.config
import handler
import server
try:
config = Library.config.read()
except:
import sys
print("FAILED TO OPEN CONFIG FILE, EXITING")
sys.exit()
man = multiprocessing.Manager()
adios = man.Value(bool, False)
interfaz = Library.interfaz.Interfaz(lang=config["lang"])
hand = handler.Handler(interfaz, adios)
hand.pantalla("INIT", prompt=False)
input("")
key_bits = int(config["key_length"
|
])
hand.pantalla("GENERATING_KEY", args=(key_bits,), prompt=False)
server = server.Server(adios, hand, Library.Encriptacion.genera(key_bits), ip=config["host"], port=int(config["port"]))
g = multiprocessing.Process(target=server.listen)
p = multiprocessing.Process(target=server.server_handler
|
)
p2 = multiprocessing.Process(target=hand.listen, args=(server, ))
p.start()
g.start()
hand.listen(server)
adios.value = True
p.join()
g.join()
server.handler.exit()
|
LMSlay/wiper
|
docs/source/conf.py
|
Python
|
bsd-3-clause
| 7,714
| 0.007519
|
# -*- coding: utf-8 -*-
#
# Viper documentation build configuration file, created by
# sphinx-quickstart on Mon May 5 18:24:15 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Viper'
copyright = u'2014, Claudio Guarnieri'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.1'
# The full version, including alpha/beta/rc tags.
release = '1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for re
|
placing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The
|
reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Viperdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Viper.tex', u'Viper Documentation',
u'Claudio Guarnieri', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'viper', u'Viper Documentation',
[u'Claudio Guarnieri'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Viper', u'Viper Documentation',
u'Claudio Guarnieri', 'Viper', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
minlexx/pyevemon
|
esi_client/models/get_opportunities_tasks_task_id_ok.py
|
Python
|
gpl-3.0
| 5,602
| 0.001071
|
# coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online
OpenAPI spec version: 0.4.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class GetOpportunitiesTasksTaskIdOk(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, description=None, name=None, notification=None, task_id=None):
"""
GetOpportunitiesTasksTaskIdOk - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'description': 'str',
'name': 'str',
'notification': 'str',
'task_id': 'int'
}
self.attribute_map = {
'description': 'description',
'name': 'name',
'notification': 'notification',
'task_id': 'task_id'
}
self._description = description
self._name = name
self._notification = notification
self._task_id = task_id
@property
def description(self):
"""
Gets the description of this GetOpportunitiesTasksTaskIdOk.
description string
:return: The description of this GetOpportunitiesTasksTaskIdOk.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this GetOpportunitiesTasksTaskIdOk.
description string
:param description: The description of this GetOpportunitiesTasksTaskIdOk.
:type: str
"""
if description is None:
raise ValueError("Invalid value for `description`, must not be `None`")
self._description = description
@property
def name(self):
"""
Gets the name of this GetOpportunitiesTasksTaskIdOk.
name string
:return: The name of this GetOpportunitiesTasksTaskIdOk.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this GetOpportunitiesTasksTaskIdOk.
name string
:param name: The name of this GetOpportunitiesTasksTaskIdOk.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def notification(self):
"""
Gets the notification of this GetOpportunitiesTasksTaskIdOk.
notification string
:return: The notification of this GetOpportunitiesTasksTaskIdOk.
:rtype: str
"""
return self._notification
@notification.setter
def notification(self, notification):
"""
Sets the notification of this GetOpportunitiesTasksTaskIdOk.
notification string
:param notification: The notification of this GetOpportunitiesTasksTaskIdOk.
:type: str
"""
if notification is None:
raise ValueError("Invalid value for `notification`, must not be `None`")
self._notification = notification
@property
def task_id(self):
"""
Gets the task_id of this GetOpportunitiesTasksTaskIdOk.
task_id integer
:return: The task_id of this GetOpportunitiesTasksTaskIdOk.
:rtype: int
"""
return self._task_id
@task_id.setter
def task_id(self, task_id):
"""
Sets the task_id of this GetOpportunitiesTasksTaskIdOk.
task_id integer
:param task_id: The task_id of this GetOpportunitiesTasksTaskIdOk.
:type: int
"""
if task_id is None:
raise ValueError("Invalid value for `task_id`, must not be `None`")
self._task_id = task_id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] =
|
value.to_dict()
eli
|
f isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, GetOpportunitiesTasksTaskIdOk):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
VirusTotal/misp-modules
|
misp_modules/modules/expansion/threatfox.py
|
Python
|
agpl-3.0
| 2,146
| 0.001864
|
# -*- coding: utf-8 -*-
import requests
import json
misperrors = {'error': 'Error'}
mispattributes = {'input': ['md5', 'sha1', 'sha256', 'domain', 'url', 'email-src', 'ip-dst|port', 'ip-src|port'], 'output': ['text']}
moduleinfo = {'version': '0.1', 'author': 'Corsin Camichel', 'description': 'Module to search for an IOC on ThreatFox by abuse.ch.', 'module-type': ['hover', 'expansion']}
moduleconfig = []
API_URL = "https://threatfox-api.abuse.ch/api/v1/"
# copied from
# https://github.com/marjatech/threatfox2misp/blob/main/threatfox2misp.py
def confidence_level_to_tag(level: int) -> str:
confidence_tagging = {
0: 'misp:confidence-level="unconfident"',
10: 'misp:confidence-level="rarely-confident"',
37: 'misp:confidence-level="fairly-confident"',
63: 'misp:confidence-level="usually-confident"',
90: 'misp:confidence-level="completely-confident"',
}
confidence_tag = ""
for tag_minvalue, tag in confidence_tagging.items():
if level >= tag_minvalue:
confidence_tag = tag
return confidence_tag
def handler(q=False):
if q is False:
return False
request = json.loads(q)
ret_val = ""
for input_type in mispattributes['input']:
if input_type in request:
to_query = request[input_type]
break
else:
misperrors['error'] = "Unsupported attributes type:"
return misperrors
data = {"query": "search_ioc", "search_term": f"{to_query}"}
response = requests.post(API_URL, data=json.dumps(data))
if response.status_code == 200:
result = json.loads(response.text)
if(result["query_status"] == "ok"):
confidence_tag = confiden
|
ce_level_to_tag(result["data"][0]["confidence_level"])
ret_val = {'results': [{'types': mispattributes['output'], 'values': [result["data"][0]["threat_type_desc"]], 'tags': [result["data"][0]["malware"], result
|
["data"][0]["malware_printable"], confidence_tag]}]}
return ret_val
def introspection():
return mispattributes
def version():
moduleinfo['config'] = moduleconfig
return moduleinfo
|
codeMarble/codeMarble_Web
|
codeMarble_Web/codeMarble_py3des.py
|
Python
|
gpl-3.0
| 649
| 0.007704
|
# -*- coding: utf-8 -*-
from py3Des.pyDes import triple_des, ECB, PAD_PKCS5
class TripleDES:
__triple_des = None
@staticmethod
def in
|
it():
TripleDES.__triple_des = triple_des('1234567812345678',
mode=ECB,
IV = '\0\0\0\0\0\0\0\0',
pad=None,
padmode = PAD_PKCS5)
@staticmethod
def encrypt(data):
return TripleDES.__triple_des.encrypt(data)
@staticmethod
def decrypt(data):
return Tri
|
pleDES.__triple_des.decrypt(data)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.