repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
prefix
stringlengths
0
8.16k
middle
stringlengths
3
512
suffix
stringlengths
0
8.17k
ofer43211/unisubs
apps/subtitles/admin.py
Python
agpl-3.0
6,564
0.001219
# -*- coding: utf-8 -*- # Amara, universalsubtitles.org # # Copyright (C) 2013 Participatory Culture Foundation # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see # http://www.gnu.org/licenses/agpl-3.0.html. from django.contrib import admin from django.contrib.admin.views.main import ChangeList from django.core.urlresolvers import reverse from subtitles.models import (get_lineage, SubtitleLanguage, SubtitleVersion) class SubtitleVersionInline(admin.TabularInline): def has_delete_permission(self, request, obj=None): # subtitle versions should be immutable, don't allow deletion return False model = SubtitleVersion fields = ['version_number'] max_num = 0 class SubtitleLanguageAdmin(admin.ModelAdmin): list_display = ['video_title', 'language_code', 'version_count', 'tip', 'unofficial_signoffs', 'official_signoffs', 'pending_collaborators', 'expired_pending_collaborators', 'unexpired_pending_collaborators', 'is_forked'] list_filter = ['created', 'language_code'] inlines = [SubtitleVersionInline] search_fields = ['video__title', 'video__video_id', 'language_code'] raw_id_fields = ['video'] def unofficial_signoffs(self, o): return o.unofficial_signoff_count unofficial_signoffs.admin_order_field = 'unofficial_signoff_count' def official_signoffs(self, o): return o.official_signoff_count official_signoffs.admin_order_field = 'official_signoff_count' def pending_collaborators(self, o): return o.pending_signoff_count pending_collaborators.sh
ort_description = 'pending' pending_collaborators.admin_order_field = 'pending_signoff_count' def expired_pending_collaborators(self, o): return o.pending_signoff_expired_count expired_pending_collaborators.short_description = 'expired pending' expired_pending_collaborators.admin_order_
field = 'pending_signoff_expired_count' def unexpired_pending_collaborators(self, o): return o.pending_signoff_unexpired_count unexpired_pending_collaborators.short_description = 'unexpired pending' unexpired_pending_collaborators.admin_order_field = 'pending_signoff_unexpired_count' def video_title(self, sl): return sl.video.title_display() video_title.short_description = 'video' def version_count(self, sl): return sl.subtitleversion_set.full().count() version_count.short_description = 'number of versions' def tip(self, sl): ver = sl.get_tip(full=True) return ver.version_number if ver else None tip.short_description = 'tip version' class SubtitleVersionChangeList(ChangeList): def get_query_set(self, request): qs = super(SubtitleVersionChangeList, self).get_query_set(request) # for some reason using select_related makes MySQL choose an # absolutely insane way to perform the query. Use prefetch_related() # instead to work around this. return qs.prefetch_related('video', 'subtitle_language') class SubtitleVersionAdmin(admin.ModelAdmin): list_per_page = 20 list_display = ['video_title', 'id', 'language', 'version_num', 'visibility', 'visibility_override', 'subtitle_count', 'created'] list_select_related = False raw_id_fields = ['video', 'subtitle_language', 'parents', 'author'] list_filter = ['created', 'visibility', 'visibility_override', 'language_code'] list_editable = ['visibility', 'visibility_override'] search_fields = ['video__video_id', 'video__title', 'title', 'language_code', 'description', 'note'] # Unfortunately Django uses .all() on related managers instead of # .get_query_set(). We've disabled .all() on SubtitleVersion managers so we # can't let Django do this. This means we can't edit parents in the admin, # but you should never be doing that anyway. exclude = ['parents', 'serialized_subtitles'] readonly_fields = ['parent_versions'] # don't allow deletion actions = [] def get_changelist(self, request, **kwargs): return SubtitleVersionChangeList def has_delete_permission(self, request, obj=None): # subtitle versions should be immutable, don't allow deletion return False def version_num(self, sv): return '#' + str(sv.version_number) version_num.short_description = 'version #' def video_title(self, sv): return sv.video.title video_title.short_description = 'video' def language(self, sv): return sv.subtitle_language.get_language_code_display() def parent_versions(self, sv): links = [] for parent in sv.parents.full(): href = reverse('admin:subtitles_subtitleversion_change', args=(parent.pk,)) links.append('<a href="%s">%s</a>' % (href, parent)) return ', '.join(links) parent_versions.allow_tags = True # Hack to generate lineages properly when modifying versions in the admin # interface. Maybe we should just disallow this entirely once the version # models are hooked up everywhere else? def response_change(self, request, obj): response = super(SubtitleVersionAdmin, self).response_change(request, obj) obj.lineage = get_lineage(obj.parents.full()) obj.save() return response def response_add(self, request, obj, *args, **kwargs): response = super(SubtitleVersionAdmin, self).response_add(request, obj) obj.lineage = get_lineage(obj.parents.full()) obj.save() return response # ----------------------------------------------------------------------------- admin.site.register(SubtitleLanguage, SubtitleLanguageAdmin) admin.site.register(SubtitleVersion, SubtitleVersionAdmin)
irl/gajim
src/common/connection_handlers_events.py
Python
gpl-3.0
94,929
0.00316
# -*- coding:utf-8 -*- ## src/common/connection_handlers_events.py ## ## Copyright (C) 2010-2014 Yann Leboulanger <asterix AT lagaule.org> ## ## This file is part of Gajim. ## ## Gajim is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published ## by the Free Software Foundation; version 3 only. ## ## Gajim is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Gajim. If not, see <http://www.gnu.org/licenses/>. ## import datetime import sys import os from time import (localtime, time as time_time) from calendar import timegm import hmac from common import atom from common import nec from common import helpers from common import gajim from common import i18n import nbxmpp from common import dataforms from common import exceptions from common.zeroconf import zeroconf from common.logger import LOG_DB_PATH from common.pep import SUPPORTED_PERSONAL_USER_EVENTS from nbxmpp.protocol import NS_CHATSTATES from common.jingle_transport import JingleTransportSocks5 from common.file_props import FilesProp if gajim.HAVE_PYOPENSSL: import OpenSSL.crypto import logging log = logging.getLogger('gajim.c.connection_handlers_events') CONDITION_TO_CODE = { 'realjid-public': 100, 'affiliation-changed': 101, 'unavailable-shown': 102, 'unavailable-not-shown': 103, 'configuration-changed': 104, 'self-presence': 110, 'logging-enabled': 170, 'logging-disabled': 171, 'non-anonymous': 172, 'semi-anonymous': 173, 'fully-anonymous': 174, 'room-created': 201, 'nick-assigned': 210, 'banned': 301, 'new-nick': 303, 'kicked': 307, 'removed-affiliation': 321, 'removed-membership': 322, 'removed-shutdown': 332, } class HelperEvent: def get_jid_resource(self, check_fake_jid=False): if check_fake_jid and hasattr(self, 'id_') and \ self.id_ in self.conn.groupchat_jids: self.fjid = self.conn.groupchat_jids[self.id_] del self.conn.groupchat_jids[self.id_] else: self.fjid = helpers.get_full_jid_from_iq(self.stanza) self.jid, self.resource = gajim.get_room_and_nick_from_fjid(self.fjid) def get_id(self): self.id_ = self.stanza.getID() def get_gc_control(self): self.gc_control = gajim.interface.msg_win_mgr.get_gc_control(self.jid, self.conn.name) # If gc_control is missing - it may be minimized. Try to get it # from there. If it's not there - then it's missing anyway and # will remain set to None. if not self.gc_control: mi
nimized = gajim.interface.minimized_controls[self.conn.name] self.gc_control = minimized.get(self.jid) def _generate_timestamp(self, tag): tim = helpers.datetime_tuple(tag) self.timestamp = localtime(timegm(tim)) def get_chatstate(self):
""" Extract chatstate from a <message/> stanza Requires self.stanza and self.msgtxt """ self.chatstate = None # chatstates - look for chatstate tags in a message if not delayed delayed = self.stanza.getTag('x', namespace=nbxmpp.NS_DELAY) is not None if not delayed: children = self.stanza.getChildren() for child in children: if child.getNamespace() == NS_CHATSTATES: self.chatstate = child.getName() break class HttpAuthReceivedEvent(nec.NetworkIncomingEvent): name = 'http-auth-received' base_network_events = [] def generate(self): self.opt = gajim.config.get_per('accounts', self.conn.name, 'http_auth') self.iq_id = self.stanza.getTagAttr('confirm', 'id') self.method = self.stanza.getTagAttr('confirm', 'method') self.url = self.stanza.getTagAttr('confirm', 'url') # In case it's a message with a body self.msg = self.stanza.getTagData('body') return True class LastResultReceivedEvent(nec.NetworkIncomingEvent, HelperEvent): name = 'last-result-received' base_network_events = [] def generate(self): self.get_id() self.get_jid_resource(check_fake_jid=True) if self.id_ in self.conn.last_ids: self.conn.last_ids.remove(self.id_) self.status = '' self.seconds = -1 if self.stanza.getType() == 'error': return True qp = self.stanza.getTag('query') if not qp: return sec = qp.getAttr('seconds') self.status = qp.getData() try: self.seconds = int(sec) except Exception: return return True class VersionResultReceivedEvent(nec.NetworkIncomingEvent, HelperEvent): name = 'version-result-received' base_network_events = [] def generate(self): self.get_id() self.get_jid_resource(check_fake_jid=True) if self.id_ in self.conn.version_ids: self.conn.version_ids.remove(self.id_) self.client_info = '' self.os_info = '' if self.stanza.getType() == 'error': return True qp = self.stanza.getTag('query') if qp.getTag('name'): self.client_info += qp.getTag('name').getData() if qp.getTag('version'): self.client_info += ' ' + qp.getTag('version').getData() if qp.getTag('os'): self.os_info += qp.getTag('os').getData() return True class TimeResultReceivedEvent(nec.NetworkIncomingEvent, HelperEvent): name = 'time-result-received' base_network_events = [] def generate(self): self.get_id() self.get_jid_resource(check_fake_jid=True) if self.id_ in self.conn.entity_time_ids: self.conn.entity_time_ids.remove(self.id_) self.time_info = '' if self.stanza.getType() == 'error': return True qp = self.stanza.getTag('time') if not qp: # wrong answer return tzo = qp.getTag('tzo').getData() if tzo.lower() == 'z': tzo = '0:0' tzoh, tzom = tzo.split(':') utc_time = qp.getTag('utc').getData() ZERO = datetime.timedelta(0) class UTC(datetime.tzinfo): def utcoffset(self, dt): return ZERO def tzname(self, dt): return "UTC" def dst(self, dt): return ZERO class contact_tz(datetime.tzinfo): def utcoffset(self, dt): return datetime.timedelta(hours=int(tzoh), minutes=int(tzom)) def tzname(self, dt): return "remote timezone" def dst(self, dt): return ZERO try: t = datetime.datetime.strptime(utc_time, '%Y-%m-%dT%H:%M:%SZ') except ValueError: try: t = datetime.datetime.strptime(utc_time, '%Y-%m-%dT%H:%M:%S.%fZ') except ValueError as e: log.info('Wrong time format: %s' % str(e)) return t = t.replace(tzinfo=UTC()) self.time_info = t.astimezone(contact_tz()).strftime('%c') return True class GMailQueryReceivedEvent(nec.NetworkIncomingEvent): name = 'gmail-notify' base_network_events = [] def generate(self): if not self.stanza.getTag('mailbox'): return mb = self.stanza.getTag('mailbox') if not mb.getAttr('url'): return self.conn.gmail_url = mb.getAttr('url') if mb.getNamespace() != nbxmpp.NS_GMAILNOTIFY: return self.newmsgs = mb.getAttr('total-matched') if not self.newmsgs: return if self.newmsgs == '0': return # there are new messages self.gmail_messages_list = [] if mb.getTag('mail-thread-info'):
cnewcome/sos
sos/plugins/xen.py
Python
gpl-2.0
3,945
0
# This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. from sos.plugins import Plugin, RedHatPlugin import os import re from stat import * class Xen(Plugin, RedHatPlugin): """Xen virtualization """ plugin_name = 'xen' profiles = ('virt',) def determine_xen_host(self): if os.access("/proc/acpi/dsdt", os.R_OK): result = self.call_ext_prog("grep -qi xen /proc/acpi/dsdt") if result['status'] == 0: return "hvm" if os.access("/proc/xen/capabilities", os.R_OK): result = self.call_ext_prog( "grep -q control_d /proc/xen/capabilities") if result['status'] == 0: return "dom0" else: return "domU" return "baremetal" def check_enabled(self): return (self.determine_xen_host() == "baremetal") def is_running_xenstored(self): xs_pid = self.call_ext_prog("pidof xenstored")['output'] xs_pidnum = re.split('\n$', xs_pid)[0] return xs_pidnum.isdigit() def dom_collect_proc(self): self.add_copy_spec([ "/proc/xen/balloon", "/proc/xen/capabilities", "/proc/xen/xsd_kva", "/proc/xen/xsd_port"]) # determine if CPU has PAE support self.add_cmd_output("grep pae /proc/cpuinfo") # determine if CPU has Intel-VT or AMD-V support self.add_cmd_output("egre
p -e 'vmx|svm' /proc/cpuinfo") def setup(self): host_type = self.determine_xen_host() if host_type == "domU": # we should collect /proc/xen and /sys/hypervisor self.dom_collect_proc() # determine if hardware virtualization support is enab
led # in BIOS: /sys/hypervisor/properties/capabilities self.add_copy_spec("/sys/hypervisor") elif host_type == "hvm": # what do we collect here??? pass elif host_type == "dom0": # default of dom0, collect lots of system information self.add_copy_spec([ "/var/log/xen", "/etc/xen", "/sys/hypervisor/version", "/sys/hypervisor/compilation", "/sys/hypervisor/properties", "/sys/hypervisor/type"]) self.add_cmd_output([ "xm dmesg", "xm info", "xm list", "xm list --long", "brctl show" ]) self.dom_collect_proc() if self.is_running_xenstored(): self.add_copy_spec("/sys/hypervisor/uuid") self.add_cmd_output("xenstore-ls") else: # we need tdb instead of xenstore-ls if cannot get it. self.add_copy_spec("/var/lib/xenstored/tdb") # FIXME: we *might* want to collect things in /sys/bus/xen*, # /sys/class/xen*, /sys/devices/xen*, /sys/modules/blk*, # /sys/modules/net*, but I've never heard of them actually being # useful, so I'll leave it out for now else: # for bare-metal, we don't have to do anything special return # USEFUL self.add_custom_text("Xen hostType: "+host_type) # vim: set et ts=4 sw=4 :
smurfix/HomEvenT
irrigation/rainman/models/env.py
Python
gpl-3.0
3,941
0.041921
# -*- coding: utf-8 -*- ## Copyright © 2012, Matthias Urlichs <[email protected]> ## ## This program is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 3 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License (included; see the file LICENSE) ## for more details. ## from __future__ import division,absolute_import from rainman.models import Model from rainman.models.site import Site from django.db import models as m from django.db.models import Q # Tables for environmental effects. # Note that table names are different for Hysterical Raisins. class EnvGroup(Model): class Meta(Model.Meta): unique_together = (("site", "name"),) db_table="rainman_paramgroup" def __unicode__(self): return self.name name = m.CharField(max_length=200) comment = m.CharField(max_length=200,blank=True) site = m.ForeignKey(Site,related_name="envgroups") factor = m.FloatField(default=1.0, help_text="Base Factor") rain = m.BooleanField(default=True,help_text="stop when it's raining?") def __init__(self,*a,**k): super(EnvGroup,self).__init__(*a,**k) self.env_cache =
{} def list_valves(self): return u"¦".join((d.name for d in self.valves.all())) def refresh(sel
f): super(EnvGroup,self).refresh() self.env_cache = {} def env_factor_one(self, tws, h): p=4 # power factor, favoring nearest-neighbor qtemp,qwind,qsun = tws if qtemp and h.temp is None: return None if qwind and h.wind is None: return None if qsun and h.sun is None: return None q=Q() q &= Q(temp__isnull=not qtemp) q &= Q(wind__isnull=not qwind) q &= Q(sun__isnull=not qsun) sum_f = 0 sum_w = 0 try: ec = self.env_cache[tws] except KeyError: self.env_cache[tws] = ec = list(self.items.filter(q)) for ef in ec: d=0 if qtemp: d += (h.temp-ef.temp)**2 if qwind: d += (h.wind-ef.wind)**2 if qsun: d += (h.sun-ef.sun)**2 d = d**(p*0.5) if d < 0.001: # close enough return ef.factor sum_f += ef.factor/d sum_w += 1/d if not sum_w: return None return sum_f / sum_w def env_factor(self, h, logger=None): """Calculate a weighted factor for history entry @h, based on the given environmental parameters""" ql=( (6,(True,True,True)), (4,(False,True,True)), (4,(True,False,True)), (4,(True,True,False)), (1,(True,False,False)), (1,(False,True,False)), (1,(False,False,True)), ) sum_f = 1 # if there are no data, return 1 sum_w = 1 n = 1 for weight,tws in ql: f = self.env_factor_one(tws,h) if f is not None: if logger: logger("Simple factor %s%s%s: %f" % ("T" if tws[0] else "-", "W" if tws[1] else "-", "S" if tws[2] else "-", f)) sum_f *= f**weight sum_w += weight n += 1 return sum_f ** (n/sum_w) @property def schedules(self): from rainman.models.schedule import Schedule return Schedule.objects.filter(valve__envgroup=self) class EnvItem(Model): class Meta(Model.Meta): db_table="rainman_environmenteffect" def __unicode__(self): return u"@%s %s¦%s¦%s" % (self.group.name,self.temp,self.wind,self.sun) group = m.ForeignKey(EnvGroup,db_column="param_group_id",related_name="items") factor = m.FloatField(default=1.0, help_text="Factor to use at this data point") # these are single- or multi-dimensional data points for finding a reasonable factor temp = m.FloatField(blank=True,null=True, help_text="average temperature (°C)") wind = m.FloatField(blank=True,null=True, help_text="wind speed (m/s or whatever)") sun = m.FloatField(blank=True,null=True, help_text="how much sunshine was there (0-1)") # measured value
icarito/arbio-azucar-adoptarbol
loader.py
Python
gpl-3.0
2,243
0.002675
import csv from dateutil.parser import parse from adoptarbol.tree.models import Tree def load(filename): with open(filename, encoding='utf-8') as f: reader = csv.reader(f) header = next(reader) def pos_for(field): return header.index(field) def float_or_none(string): try: return(float(string)) except ValueError: return None for row in reader: # codigo = str(row[pos_for('codigo')]), print('Procesando ', row) tree = {'code': row[pos_for('codigo')], 'common_name': row[pos_for('especie')], 'scientific_name': row[pos_for('cientifico')], 'family': row[pos_for('familia')], 'coord_utm_e': float_or_none(row[pos_for('utm_x')].replace(',', '.')), 'coord_utm_n': float_or_none(row[pos_for('utm_y')].replace(',', '.')),
'coord_utm_zone_letter': row[pos_for('utm_zone')],
'coord_utm_zone_n': row[pos_for('utm_south')], 'coord_lat': float_or_none(row[pos_for('lat')].replace(',', '.')), 'coord_lon': float_or_none(row[pos_for('long')].replace(',', '.')), 'photo': row[pos_for('fotos')], 'diameter': row[pos_for('dia')], 'height': row[pos_for('alt')], 'circ': row[pos_for('circ')], 'base_area': float_or_none(row[pos_for('areabasal')].replace(',', '.')), 'size_class': row[pos_for('clasetamano')], 'quality': float_or_none(row[pos_for('calidad')].replace(',', '.')), 'relevance': row[pos_for('relevancia')], 'notes': row[pos_for('notas')], 'phenology': row[pos_for('fenologia')], 'observation': row[pos_for('obs')], 'surveyed_on': parse(row[pos_for('fechahora')]), } t = Tree(**tree) t.save() """ if __name__ == '__main__': app = create_app(CONFIG) manager = Manager(app) with app.app_context(): load() """
Talvalin/server-client-python
tableauserverclient/server/endpoint/datasources_endpoint.py
Python
mit
6,780
0.003835
from .endpoint import Endpoint from .exceptions import MissingRequiredFieldError from .fileuploads_endpoint import Fileuploads from .. import RequestFactory, DatasourceItem, PaginationItem, ConnectionItem import os import logging import copy import cgi from contextlib import closing # The maximum size of a file that can be published in a single request is 64MB FILESIZE_LIMIT = 1024 * 1024 * 64 # 64MB ALLOWED_FILE_EXTENSIONS = ['tds', 'tdsx', 'tde'] logger = logging.getLogger('tableau.endpoint.datasources') class Datasources(Endpoint): @property def baseurl(self): return "{0}/sites/{1}/datasources".format(self.parent_srv.baseurl, self.parent_srv.site_id) # Get all datasources def get(self, req_options=None): logger.info('Querying all datasources on site') url = self.baseurl server_response = self.get_request(url, req_options) pagination_item = PaginationItem.from_response(server_response.content) all_datasource_items = DatasourceItem.from_response(server_response.content) return all_datasource_items, pagination_item # Get 1 datasource by id def get_by_id(self, datasource_id): if not datasource_id: error = "Datasource ID undefined." raise ValueError(error) logger.info('Querying single datasource (ID: {0})'.format(datasource_id)) url = "{0}/{1}".format(self.baseurl, datasource_id) server_response = self.get_request(url) return DatasourceItem.from_response(server_response.content)[0] # Populate datasource item's connections def populate_connections(self, datasource_item): if not datasource_item.id: error = 'Datasource item missing ID. Datasource must be retrieved from server first.' raise MissingRequiredFieldError(error) url = '{0}/{1}/connections'.format(self.baseurl, datasource_item.id) server_response = self.get_request(url) datasource_item._set_connections(ConnectionItem.from_response(server_response.content)) logger.info('Populated connections for datasource (ID: {0})'.format(datasource_item.id)) # Delete 1 datasource by id def delete(self, datasource_id): if not datasource_id: error = "Datasource ID undefined." raise ValueError(error) url = "{0}/{1}".format(self.baseurl, datasource_id) self.delete_request(url) logger.info('Deleted single datasource (ID: {0})'.format(datasource_id)) # Download 1 datasource by id def download(self, datasource_id,
filepath=None): if not datasource_id: error = "Datasource ID undefined." raise ValueError(error) url = "{0}/{1}/content".format(self.baseurl, datasource_id) with closing(self.get_request(url, parameters={'stream': True})) as server
_response: _, params = cgi.parse_header(server_response.headers['Content-Disposition']) filename = os.path.basename(params['filename']) if filepath is None: filepath = filename elif os.path.isdir(filepath): filepath = os.path.join(filepath, filename) with open(filepath, 'wb') as f: for chunk in server_response.iter_content(1024): # 1KB f.write(chunk) logger.info('Downloaded datasource to {0} (ID: {1})'.format(filepath, datasource_id)) return os.path.abspath(filepath) # Update datasource def update(self, datasource_item): if not datasource_item.id: error = 'Datasource item missing ID. Datasource must be retrieved from server first.' raise MissingRequiredFieldError(error) url = "{0}/{1}".format(self.baseurl, datasource_item.id) update_req = RequestFactory.Datasource.update_req(datasource_item) server_response = self.put_request(url, update_req) logger.info('Updated datasource item (ID: {0})'.format(datasource_item.id)) updated_datasource = copy.copy(datasource_item) return updated_datasource._parse_common_tags(server_response.content) # Publish datasource def publish(self, datasource_item, file_path, mode, connection_credentials=None): if not os.path.isfile(file_path): error = "File path does not lead to an existing file." raise IOError(error) if not mode or not hasattr(self.parent_srv.PublishMode, mode): error = 'Invalid mode defined.' raise ValueError(error) filename = os.path.basename(file_path) file_extension = os.path.splitext(filename)[1][1:] # If name is not defined, grab the name from the file to publish if not datasource_item.name: datasource_item.name = os.path.splitext(filename)[0] if file_extension not in ALLOWED_FILE_EXTENSIONS: error = "Only {} files can be published as datasources.".format(', '.join(ALLOWED_FILE_EXTENSIONS)) raise ValueError(error) # Construct the url with the defined mode url = "{0}?datasourceType={1}".format(self.baseurl, file_extension) if mode == self.parent_srv.PublishMode.Overwrite or mode == self.parent_srv.PublishMode.Append: url += '&{0}=true'.format(mode.lower()) # Determine if chunking is required (64MB is the limit for single upload method) if os.path.getsize(file_path) >= FILESIZE_LIMIT: logger.info('Publishing {0} to server with chunking method (datasource over 64MB)'.format(filename)) upload_session_id = Fileuploads.upload_chunks(self.parent_srv, file_path) url = "{0}&uploadSessionId={1}".format(url, upload_session_id) xml_request, content_type = RequestFactory.Datasource.publish_req_chunked(datasource_item, connection_credentials) else: logger.info('Publishing {0} to server'.format(filename)) with open(file_path, 'rb') as f: file_contents = f.read() xml_request, content_type = RequestFactory.Datasource.publish_req(datasource_item, filename, file_contents, connection_credentials) server_response = self.post_request(url, xml_request, content_type) new_datasource = DatasourceItem.from_response(server_response.content)[0] logger.info('Published {0} (ID: {1})'.format(filename, new_datasource.id)) return new_datasource
PermeAgility/FrameworkBenchmarks
toolset/run-tests.py
Python
bsd-3-clause
11,680
0.009589
#!/usr/bin/env python import argparse import ConfigParser import sys import os import multiprocessing import itertools import copy import subprocess from pprint import pprint from benchmark.benchmarker import Benchmarker from setup.linux.unbuffered import Unbuffered from setup.linux import setup_util from ast import literal_eval # Enable cross-platform colored output from colorama import init init() class StoreSeqAction(argparse.Action): '''Helper class for parsing a sequence from the command line''' def __init__(self, option_strings, dest, nargs=None, **kwargs): super(StoreSeqAction, self).__init__(option_strings, dest, type=str, **kwargs) def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, self.parse_seq(values)) def parse_seq(self, argument): result = argument.split(',') sequences = [x for x in result if ":" in x] for sequence in sequences: try: (start,step,end) = sequence.split(':') except ValueError: print " Invalid: %s" % sequence print " Requires start:step:end, e.g. 1:2:10" raise result.remove(sequence) result = result + range(int(start), int(end), int(step)) return [abs(int(item)) for item in result] ################################################################################################### # Main ################################################################################################### def main(argv=None): ''' Runs the program. There are three ways to pass arguments 1) environment variables TFB_* 2) configuration file benchmark.cfg 3) command line flags In terms of precedence, 3 > 2 > 1, so config file trumps environment variables but command line flags have the final say ''' # Do argv default this way, as doing it in the functional declaration sets it at compile time if argv is None: argv = sys.argv # Enable unbuffered output so messages will appear in the proper order with subprocess output. sys.stdout=Unbuffered(sys.stdout) # Update python environment # 1) Ensure the current directory (which should be the benchmark home directory) is in the path so that the tests can be imported. sys.path.append('.') # 2) Ensure toolset/setup/linux is in the path so that the tests can "import setup_util". sys.path.append('toolset/setup/linux') # Update environment for shell scripts fwroot = setup_util.get_fwroot() if not fwroot: fwroot = os.getcwd() setup_util.replace_environ(config='config/benchmark_profile', root=fwroot) print "FWROOT is %s"%setup_util.get_fwroot() conf_parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, add_help=False) conf_parser.add_argument('--conf_file', default='benchmark.cfg', metavar='FILE', help='Optional configuration file to provide argument defaults. All config options can be overridden using the command line.') args, remaining_argv = conf_parser.parse_known_args() try: with open (args.conf_file): config = ConfigParser.SafeConfigParser() config.read([os.getcwd() + '/' + args.conf_file]) defaults = dict(config.items("Defaults")) # Convert strings into proper python types for k,v in defaults.iteritems(): try: defaults[k] = literal_eval(v) except Exception: pass except IOError: if args.conf_file != 'benchmark.cfg': print 'Configuration file not found!' defaults = { "client-host":"localhost"} ########################################################## # Set up default values ########################################################## serverHost = os.environ.get('TFB_SERVER_HOST') clientHost = os.environ.get('TFB_CLIENT_HOST') clientUser = os.environ.get('TFB_CLIENT_USER') clientIden = os.environ.get('TFB_CLIENT_IDENTITY_FILE') runnerUser = os.environ.get('TFB_RUNNER_USER') databaHost = os.getenv('TFB_DATABASE_HOST', clientHost) databaUser = os.getenv('TFB_DATABASE_USER', clientUser) dbIdenFile = os.getenv('TFB_DATABASE_IDENTITY_FILE', clientIden) maxThreads = 8 try: maxThreads = multiprocessing.cpu_count() except Exception: pass ########################################################## # Set up argument parser ########################################################## parser = argparse.ArgumentParser(description="Install or run the Framework Benchmarks test suite.", parents=[conf_parser], formatter_class=argparse.ArgumentDefaultsHelpFormatter, epilog='''If an argument includes (type int-sequence), then it accepts integer lists in multiple forms. Using a single number e.g. 5 will create a list [5]. Using commas will create a list containing those values e.g. 1,3,6 creates [1, 3, 6]. Using three colon-separated numbers of start:step:end will create a list, using the semantics of python's range function, e.g. 1:3:15 creates [1, 4, 7, 10, 13] while 0:1:5 creates [0, 1, 2, 3, 4] ''') # SSH options parser.add_argument('-s', '--server-host', default=serverHost, help='The application server.') parser.add_argument('-c', '--client-host', default=clientHost, help='The client / load generation server.') parser.add_argument('-u', '--client-user', default=clientUser, help='The username to use for SSH to the client instance.') parser.add_argument('-r', '--runner-user', default=runnerUser, help='The user to run each test as.') parser.add_argument('-i', '--client-identity-file', dest='client_identity_file', default=clientIden, help='The key to use for SSH to the client instance.') parser.add_argument('-d', '--database-host', default=databaHost, help='The database server. If not provided, defaults to the value of --client-host.') parser.add_argument('--database-user', default=databaUser, help='The username to use for SSH to the database instance. If not provided, defaults to the value of --client-user.') parser.add_argument('--database-identity-file', default=dbIdenFile, dest='database_identity_file', help='The key to use for SSH to the database instance. If not provided, defaults to the value of --client-identity-file.') parser.add_argument('-p', dest='password_prompt', action='store_true', help='Prompt for password') # Install options parser.add_argument('--install', choices=['client', 'database', 'server', 'all'], default=None, help='Runs installation script(s) before continuing on to execute the tests.') parser.add_argument('--install-error-action', choices=['abort', 'continue'], default='continue', help='action to take in case of error during installation') parser.add_argument('--install-strategy', choices=['unified', 'pertest'], default='unified', help='''Affects : With unified, all server software is installed into a single directory. With pertest each test gets its own installs directory, but installation takes longer''') parser.add_argument('--install-only', action='store_true', default=False, help='Do not run benchmark or verification, just install and exit') par
ser.add_argument('--clean', action='store_true', defau
lt=False, help='Removes the results directory') parser.add_argument('--clean-all', action='store_true', dest='clean_all', default=False, help='Removes the results and installs directories') # Test options parser.add_argument('--test', nargs='+', help='names of tests to run') parser.add_argument('--exclude', nargs='+', help='names of tests to exclude') parser.add_argument('--type', choices=['all', 'json', 'db', 'query', 'fortune', 'update', 'plaintext'], default='all', help='which type of test to run') parser.add_argument('-m', '--mode', choices=['benchmark', 'verify'], default
JulienMcJay/eclock
windows/Python27/Lib/site-packages/Cython/Compiler/Tests/TestVisitor.py
Python
gpl-2.0
2,228
0.002693
from Cython.Compiler.ModuleNode import ModuleNode from Cython.Compiler.Symtab import ModuleScope from Cython.TestUtils import TransformTest from Cython.Compiler.Visitor import MethodDispatcherTransform from Cython.Compiler.ParseTreeTransforms import ( NormalizeTree, AnalyseDeclarationsTransform, AnalyseExpressionsTransform, InterpretCompilerDirectives) class TestMethodDispatcherTransform(TransformTest): _tree = None def _build_tree(self): if self._tree is None: context = None def fake_module(node): scope = ModuleScope('test', None, None) return ModuleNode(node.pos, doc=None, body=node, scope=scope, full_module_name='test', directive_comments={}) pipeline = [ fake_module, NormalizeTree(context), InterpretCompilerDirectives(context, {}), AnalyseDeclarationsTransform(context), AnalyseExpressionsTransform(context), ] self._tree = self.run_pipeline(pipeline, u""" cdef bytes s = b'asdfg' cdef dict d = {1:2} x = s * 3 d.get('test') """) return self._tree def test_builtin_method(self): calls = [0] class Test(MethodDispatcherTransform): def _handle_simple_method_dict_get(self, node, func, args, unbound): calls[0] += 1 return node tree = self._build_tree() Test(None)(tree) self.assertEqual(1, calls[0]) def test_binop_method(self): calls = {'byte
s': 0, 'object': 0} class Test(MethodDispatcherTransform): def _handle_simple_method_bytes___mul__(self,
node, func, args, unbound): calls['bytes'] += 1 return node def _handle_simple_method_object___mul__(self, node, func, args, unbound): calls['object'] += 1 return node tree = self._build_tree() Test(None)(tree) self.assertEqual(1, calls['bytes']) self.assertEqual(0, calls['object'])
OptimusGREEN/repo67beta
OGT Installer/plugin.program.ogtools/downloader.py
Python
gpl-3.0
2,845
0.011951
""" Copyright (C) 2016 ECHO Wizard : Modded by TeamGREEN This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have rec
eived a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import xbmcgui import urllib import time from urllib import FancyURLopener import sys class MyOpener(FancyURLopener): version = "W
hosTheDaddy?" myopener = MyOpener() urlretrieve = MyOpener().retrieve urlopen = MyOpener().open AddonTitle= "[COLORgreen]OptimusGREEN Tools[/COLOR]" dialog = xbmcgui.Dialog() def download(url, dest, dp = None): if not dp: dp = xbmcgui.DialogProgress() # dp.create("[COLORgold]Download In Progress[/COLOR]"' ',' ', ' ') # dp.update(0) start_time=time.time() urlretrieve(url, dest, lambda nb, bs, fs: _pbhook(nb, bs, fs, dp, start_time)) def auto(url, dest, dp = None): dp = xbmcgui.DialogProgress() start_time=time.time() urlretrieve(url, dest, lambda nb, bs, fs: _pbhookauto(nb, bs, fs, dp, start_time)) def _pbhookauto(numblocks, blocksize, filesize, url, dp): none = 0 def _pbhook(numblocks, blocksize, filesize, dp, start_time): try: percent = min(numblocks * blocksize * 100 / filesize, 100) currently_downloaded = float(numblocks) * blocksize / (1024 * 1024) kbps_speed = numblocks * blocksize / (time.time() - start_time) if kbps_speed > 0: eta = (filesize - numblocks * blocksize) / kbps_speed else: eta = 0 kbps_speed = kbps_speed / 1024 mbps_speed = kbps_speed / 1024 total = float(filesize) / (1024 * 1024) mbs = '[COLOR green]%.02f MB[/COLOR] of [COLOR white][B]%.02f MB[/B][/COLOR]' % (currently_downloaded, total) e = '[COLOR white][B]Speed: [/B][/COLOR][COLOR green]%.02f Mb/s ' % mbps_speed + '[/COLOR]' e += '[COLOR white][B]ETA: [/B][/COLOR][COLOR green]%02d:%02d' % divmod(eta, 60) + '[/COLOR]' # dp.update(percent, "",mbs, e) except: percent = 100 # dp.update(percent) # if dp.iscanceled(): # dialog.ok(AddonTitle, 'The download was cancelled.') # dp.close() quit()
abstract-open-solutions/l10n-italy
l10n_it_ricevute_bancarie/models/account_config.py
Python
agpl-3.0
2,038
0
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2012 Andrea Cometa. # Email: [email protected] # Web site: http://www.andreacometa.it # Copyright (C) 2012 Agile Business Group sagl (<http://www.agilebg.com>) # Copyright (C) 2012 Domsense srl (<http://www.domsense.com>) # Copyright (C) 2012 Associazione OpenERP Italia # (<http://www.odoo-italia.org>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models, fields class AccountConfigSettings(models.TransientModel): _inherit = 'account.config.settings' due_cost_service_id = fields.Many2one( related='company_id.due_cost_service_id', help='Default Service for RiBa Due Cost (collection fees) on invoice', domain=[('type', '=', 'service')]) def default_get(self, cr, uid, fields, context=None): res = super(AccountConfigSettings,
self).default_get( cr, uid, fields, context) if res: user = self.pool['res.users'].browse(cr, uid, uid, context) res['due_cost_service_i
d'] = user.company_id.due_cost_service_id.id return res class ResCompany(models.Model): _inherit = 'res.company' due_cost_service_id = fields.Many2one('product.product')
alphagov/notifications-admin
app/extensions.py
Python
mit
340
0
from notifications_utils.clients.antivirus.antivirus_client import
( AntivirusClient, ) from notifications_utils.clients.redis.redis_client import RedisClient from
notifications_utils.clients.zendesk.zendesk_client import ZendeskClient antivirus_client = AntivirusClient() zendesk_client = ZendeskClient() redis_client = RedisClient()
taschik/ramcloud-load-manager
scripts/recoverymetrics.py
Python
isc
32,500
0.003692
#!/usr/bin/env python # Copyright (c) 2011 Stanford University # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """ This program scans the log files generated by a RAMCloud recovery, extracts performance metrics, and print a summary of interesting data from those metrics. """ from __future__ import division, print_function from glob import glob from optparse import OptionParser from pprint import pprint from functools import partial import math import os import random import re import sys from common import * __all
__ = ['parseRecovery', 'makeReport'] ### Utilities: class AttrDict(dict): """A mapping with string keys that aliases x.y syntax to x['y'] syntax. The attribute syntax is easier to read and type than the item syntax. """ def __getattr__(self, name): if name not in self: self[name] = AttrDict() return self[name] def __setattr__(self, name, value):
self[name] = value def __delattr__(self, name): del self[name] def assign(self, path, value): """ Given a hierarchical path such as 'x.y.z' and a value, perform an assignment as if the statement self.x.y.z had been invoked. """ names = path.split('.') container = self for name in names[0:-1]: if name not in container: container[name] = AttrDict() container = container[name] container[names[-1]] = value def parse(f): """ Scan a log file containing metrics for several servers, and return a list of AttrDicts, one containing the metrics for each server. """ list = [] for line in f: match = re.match('.* Metrics: (.*)$', line) if not match: continue info = match.group(1) start = re.match('begin server (.*)', info) if start: list.append(AttrDict()) # Compute a human-readable name for this server (ideally # just its short host name). short_name = re.search('host=([^,]*)', start.group(1)) if short_name: list[-1].server = short_name.group(1) else: list[-1].server = start.group(1) continue; if len(list) == 0: raise Exception, ('metrics data before "begin server" in %s' % f.name) var, value = info.split(' ') list[-1].assign(var, int(value)) if len(list) == 0: raise Exception, 'no metrics in %s' % f.name return list def maxTuple(tuples): """Return the tuple whose first element is largest.""" maxTuple = None; maxValue = 0.0; for tuple in tuples: if tuple[0] > maxValue: maxValue = tuple[0] maxTuple = tuple return maxTuple def minTuple(tuples): """Return the tuple whose first element is smallest.""" minTuple = None; minValue = 1e100; for tuple in tuples: if tuple[0] < minValue: minValue = tuple[0] minTuple = tuple return minTuple def values(s): """Return a sequence of the second items from a sequence.""" return [p[1] for p in s] def scale(points, scalar): """Try really hard to scale 'points' by 'scalar'. @type points: mixed @param points: Points can either be: - a sequence of pairs, in which case the second item will be scaled, - a list, or - a number @type scalar: number @param scalar: the amount to multiply points by """ if type(points) is list: try: return [(k, p * scalar) for k, p in points] except TypeError: return [p * scalar for p in points] else: return points * scalar def toString(x): """Return a reasonable string conversion for the argument.""" if type(x) is int: return '{0:7d}'.format(x) elif type(x) is float: return '{0:7.2f}'.format(x) else: return '{0:>7s}'.format(x) ### Summary functions # This is a group of functions that can be passed in to Section.line() to # affect how the line is summarized. # Each one takes the following arguments: # - values, which is a list of numbers # - unit, which is a short string specifying the units for values # Each returns a list, possibly empty, of strings to contribute to the summary # text. The summary strings are later displayed with a delimiter or line break # in between. def AVG(values, unit): """Returns the average of its values.""" if max(values) > min(values): r = toString(sum(values) / len(values)) if unit: r += ' ' + unit r += ' avg' else: r = toString(values[0]) if unit: r += ' ' + unit return [r] def MIN(values, unit): """Returns the minimum of the values if the range is non-zero.""" if len(values) < 2 or max(values) == min(values): return [] r = toString(min(values)) if unit: r += ' ' + unit r += ' min' return [r] def MAX(values, unit): """Returns the maximum of the values if the range is non-zero.""" if len(values) < 2 or max(values) == min(values): return [] r = toString(max(values)) if unit: r += ' ' + unit r += ' max' return [r] def SUM(values, unit): """Returns the sum of the values if there are any.""" if len(values) == 0: return [] r = toString(sum(values)) if unit: r += ' ' + unit r += ' total' return [r] def FRAC(total): """Returns a function that shows the average percentage of the values from the total given.""" def realFrac(values, unit): r = toString(sum(values) / len(values) / total * 100) r += '%' if max(values) > min(values): r += ' avg' return [r] return realFrac def CUSTOM(s): """Returns a function that returns the string or list of strings given. This is useful when you need custom processing that doesn't fit in any of the other summary functions and is too specific to merit a new summary function. """ def realCustom(values, unit): if type(s) is list: return s else: return [s] return realCustom ### Report structure: class Report(object): """This produces a report which can be uploaded to dumpstr. It is essentially a list of Sections. """ def __init__(self): self.sections = [] def add(self, section): """Add a new Section to the report.""" self.sections.append(section) return section def jsonable(self): """Return a representation of the report that can be JSON-encoded in dumpstr format. """ doc = [section.jsonable() for section in self.sections if section] return doc class Section(object): """A part of a Report consisting of lines with present metrics.""" def __init__(self, key): """ @type key: string @param key: a stable, unique string identifying the section This should not ever be changed, as dumpstr's labels and descriptions are looked up by this key. """ self.key = key self.lines = [] def __len__(self): return len(self.lines) def jsonable(self): """Return a representation of the section that can be JSON-encoded in dumpstr format. """ return {'key': self.key, 'lines':
mjg2203/edx-platform-seas
lms/djangoapps/courseware/roles.py
Python
agpl-3.0
7,324
0.001912
""" Classes used to model the roles used in the courseware. Each role is responsible for checking membership, adding users, removing users, and listing members """ from abc import ABCMeta, abstractmethod from django.contrib.auth.models import User, Group from xmodule.modulestore import Location from xmodule.modulestore.exceptions import InvalidLocationError, ItemNotFoundError from xmodule.modulestore.django import loc_mapper from xmodule.modulestore.locator import CourseLocator, Locator class CourseContextRequired(Exception): """ Raised when a course_context is required to determine permissions """ pass class AccessRole(object): """ Object representing a role with particular access to a resource """ __metaclass__ = ABCMeta @abstractmethod def has_user(self, user): # pylint: disable=unused-argument """ Return whether the supplied django user has access to this role. """ return False @abstractmethod def add_users(self, *users): """ Add the role to the supplied django users. """ pass @abstractmethod def remove_users(self, *users): """ Remove the role from the supplied django users. """ pass @abstractmethod def users_with_role(self): """ Return a django QuerySet for all of the users with this role """ return User.objects.none() class GlobalStaff(AccessRole): """ The global staff role """ def has_user(self, user): return user.is_staff def add_users(self, *users): for user in users: user.is_staff = True user.save() def remove_users(self, *users): for user in users: user.is_staff = False user.save() def users_with_role(self): raise Exception("This operation is un-indexed, and shouldn't be used") class GroupBasedRole(AccessRole): """ A role based on membership to any of a set of groups. """ def __init__(self, group_names): """ Create a GroupBasedRole from a list of group names The first element of `group_names` will be the preferred group to use when adding a user to this Role. If a user is a member of any of the groups in the list, then they wi
ll be consider a member of the Role """ self._group_names = [name.lower() for name in group_names] def has_user(self, user): """
Return whether the supplied django user has access to this role. """ # pylint: disable=protected-access if not user.is_authenticated(): return False if not hasattr(user, '_groups'): user._groups = set(name.lower() for name in user.groups.values_list('name', flat=True)) return len(user._groups.intersection(self._group_names)) > 0 def add_users(self, *users): """ Add the supplied django users to this role. """ group, _ = Group.objects.get_or_create(name=self._group_names[0]) group.user_set.add(*users) for user in users: if hasattr(user, '_groups'): del user._groups def remove_users(self, *users): """ Remove the supplied django users from this role. """ group, _ = Group.objects.get_or_create(name=self._group_names[0]) group.user_set.remove(*users) for user in users: if hasattr(user, '_groups'): del user._groups def users_with_role(self): """ Return a django QuerySet for all of the users with this role """ return User.objects.filter(groups__name__in=self._group_names) class CourseRole(GroupBasedRole): """ A named role in a particular course """ def __init__(self, role, location, course_context=None): """ Location may be either a Location, a string, dict, or tuple which Location will accept in its constructor, or a CourseLocator. Handle all these giving some preference to the preferred naming. """ # TODO: figure out how to make the group name generation lazy so it doesn't force the # loc mapping? location = Locator.to_locator_or_location(location) # direct copy from auth.authz.get_all_course_role_groupnames will refactor to one impl asap groupnames = [] # pylint: disable=no-member if isinstance(location, Location): try: groupnames.append('{0}_{1}'.format(role, location.course_id)) except InvalidLocationError: # will occur on old locations where location is not of category course if course_context is None: raise CourseContextRequired() else: groupnames.append('{0}_{1}'.format(role, course_context)) try: locator = loc_mapper().translate_location(location.course_id, location, False, False) groupnames.append('{0}_{1}'.format(role, locator.package_id)) except (InvalidLocationError, ItemNotFoundError): # if it's never been mapped, the auth won't be via the Locator syntax pass # least preferred legacy role_course format groupnames.append('{0}_{1}'.format(role, location.course)) elif isinstance(location, CourseLocator): groupnames.append('{0}_{1}'.format(role, location.package_id)) # handle old Location syntax old_location = loc_mapper().translate_locator_to_location(location, get_course=True) if old_location: # the slashified version of the course_id (myu/mycourse/myrun) groupnames.append('{0}_{1}'.format(role, old_location.course_id)) # add the least desirable but sometimes occurring format. groupnames.append('{0}_{1}'.format(role, old_location.course)) super(CourseRole, self).__init__(groupnames) class OrgRole(GroupBasedRole): """ A named role in a particular org """ def __init__(self, role, location): # pylint: disable=no-member location = Location(location) super(OrgRole, self).__init__(['{}_{}'.format(role, location.org)]) class CourseStaffRole(CourseRole): """A Staff member of a course""" def __init__(self, *args, **kwargs): super(CourseStaffRole, self).__init__('staff', *args, **kwargs) class CourseInstructorRole(CourseRole): """A course Instructor""" def __init__(self, *args, **kwargs): super(CourseInstructorRole, self).__init__('instructor', *args, **kwargs) class CourseBetaTesterRole(CourseRole): """A course Beta Tester""" def __init__(self, *args, **kwargs): super(CourseBetaTesterRole, self).__init__('beta_testers', *args, **kwargs) class OrgStaffRole(OrgRole): """An organization staff member""" def __init__(self, *args, **kwargs): super(OrgStaffRole, self).__init__('staff', *args, **kwargs) class OrgInstructorRole(OrgRole): """An organization instructor""" def __init__(self, *args, **kwargs): super(OrgInstructorRole, self).__init__('instructor', *args, **kwargs)
RincewindWizzard/gruppenkasse-gtk
src/tests/test_model.py
Python
lgpl-3.0
1,143
0.0035
#!/usr/bin/python3 # -*- encoding: utf-8 -*- import unittest from model import * from example_data import expenses, payments, participations, persons, events kasse = Gruppenkasse.create_new() kasse.fill_with(expenses, payments, participations) class TestGruppenkasse(unittest.TestCase): def setUp(self): ... def test_persons(self): person_names = list(map(lambda p: p.name, kasse.persons))
for name in person_names: self.assertTrue(name in persons, msg=name) def test_events(self): print(kasse.person_dict) event_names = list(map(lambda p: p.name, kasse.events)) for name in event_names: self.assertTrue(name in events, msg=name) for name in events: self.assertTrue(name in event_names, msg=name) def test_event(self): for event in kasse.eve
nts: ...#print(event) def test_person(self): for person in kasse.persons: print(person, "\t{:5.2f}".format(person.balance / 100)) def test_payments(self): print(kasse.payments) if __name__ == '__main__': unittest.main()
mfraezz/osf.io
api_tests/nodes/views/test_node_wiki_list.py
Python
apache-2.0
19,731
0.001977
import mock import pytest from rest_framework import exceptions from addons.wiki.models import WikiPage from addons.wiki.tests.factories import WikiFactory, WikiVersionFactory from api.base.settings.defaults import API_BASE from api_tests.wikis.views.test_wiki_detail import WikiCRUDTestCase from framework.auth.core import Auth from osf_tests.factories import ( AuthUserFactory, ProjectFactory, OSFGroupFactory, RegistrationFactory, ) from osf.utils.permissions import WRITE, READ from tests.base import fake @pytest.fixture() def user(): return AuthUserFactory() def create_wiki_payload(name): return { 'data': { 'type': 'wikis', 'attributes': { 'name': name } } } @pytest.mark.django_db class TestNodeWikiList: @pytest.fixture() def add_project_wiki_page(self): def add_page(node, user): with mock.patch('osf.models.AbstractNode.update_search'): wiki_page = WikiFactory(node=node, user=user) WikiVersionFactory(wiki_page=wiki_page) return wiki_page return add_page @pytest.fixture() def non_contrib(self): return AuthUserFactory() @pytest.fixture() def public_project(self, user): return ProjectFactory(is_public=True, creator=user) @pytest.fixture() def public_wiki(self, add_project_wiki_page, user, public_project): return add_project_wiki_page(public_project, user) @pytest.fixture() def public_url(self, public_project, public_wiki): return '/{}nodes/{}/wikis/'.format(API_BASE, public_project._id) @pytest.fixture() def private_project(self, user): return ProjectFactory(creator=user) @pytest.fixture() def private_wiki(self, add_project_wiki_page, user, private_project): return add_project_wiki_page(private_project, user) @pytest.fixture() def private_url(self, private_project, private_wiki): return '/{}nodes/{}/wikis/'.format(API_BASE, private_project._id) @pytest.fixture() def public_registration(self, user, public_project, public_wiki): public_registration = RegistrationFactory( project=public_project, user=user, is_public=True) return public_registration @pytest.fixture() def public_registration_url(self, public_registration): return '/{}registrations/{}/wikis/'.format( API_BASE, public_registration._id) @pytest.fixture() def private_registration(self, user, private_project, private_wiki): private_registration = RegistrationFactory( project=private_project, user=user) return private_registration @pytest.fixture() def private_registration_url(self, private_registration): return '/{}registrations/{}/wikis/'.format( API_BASE, private_registration._id) def test_return_wikis( self, app, user, non_contrib, private_registration, private_project, public_wiki, private_wiki, public_url, private_url, private_registration_url): # test_return_public_node_wikis_logged_out_user res = app.get(public_url) assert res.status_code == 200 wiki_ids = [wiki['id'] for wiki in res.json['data']] assert public_wiki._id in wiki_ids # test_return_public_node_wikis_logged_in_non_contributor res = app.get(public_url, auth=non_contrib.auth) assert res.status_code == 200 wiki_ids = [wiki['id'] for wiki in res.json['data']] assert public_wiki._id in wiki_ids # test_return_public_node_wikis_logged_in_contributor res = app.get(public_url, auth=user.auth) assert res.status_code == 200
wiki_ids = [wiki['id'] for wiki in res.json['data']] assert public_wiki._id in wiki_ids # test_return_private_node_wikis_logged_out_user res = app.get(private_url, expect_errors=True) assert res.status_code == 401 assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detai
l # test_return_private_node_wikis_logged_in_osf_group_member group_mem = AuthUserFactory() group = OSFGroupFactory(creator=group_mem) private_project.add_osf_group(group, READ) res = app.get(private_url, auth=group_mem.auth) assert res.status_code == 200 wiki_ids = [wiki['id'] for wiki in res.json['data']] assert private_wiki._id in wiki_ids # test_return_private_node_wikis_logged_in_non_contributor res = app.get(private_url, auth=non_contrib.auth, expect_errors=True) assert res.status_code == 403 assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail # test_return_private_node_wikis_logged_in_contributor res = app.get(private_url, auth=user.auth) assert res.status_code == 200 wiki_ids = [wiki['id'] for wiki in res.json['data']] assert private_wiki._id in wiki_ids # test_return_registration_wikis_logged_out_user res = app.get(private_registration_url, expect_errors=True) assert res.status_code == 401 assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail # test_return_registration_wikis_logged_in_non_contributor res = app.get( private_registration_url, auth=non_contrib.auth, expect_errors=True) assert res.status_code == 403 assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail # test_return_registration_wikis_logged_in_contributor res = app.get(private_registration_url, auth=user.auth) assert res.status_code == 200 wiki_ids = [wiki['id'] for wiki in res.json['data']] assert WikiPage.objects.get_for_node(private_registration, 'home')._id in wiki_ids def test_wikis_not_returned_for_withdrawn_registration( self, app, user, private_registration, private_registration_url): private_registration.is_public = True withdrawal = private_registration.retract_registration( user=user, save=True) token = list(withdrawal.approval_state.values())[0]['approval_token'] # TODO: Remove mocking when StoredFileNode is implemented with mock.patch('osf.models.AbstractNode.update_search'): withdrawal.approve_retraction(user, token) withdrawal.save() res = app.get( private_registration_url, auth=user.auth, expect_errors=True) assert res.status_code == 403 assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail def test_do_not_return_disabled_wiki(self, app, user, public_url, public_project): public_project.delete_addon('wiki', auth=Auth(user)) res = app.get(public_url, expect_errors=True) assert res.status_code == 404 def test_relationship_links( self, app, user, public_project, private_project, public_registration, private_registration, public_url, private_url, public_registration_url, private_registration_url): # test_public_node_wikis_relationship_links res = app.get(public_url) expected_nodes_relationship_url = '{}nodes/{}/'.format( API_BASE, public_project._id) expected_comments_relationship_url = '{}nodes/{}/comments/'.format( API_BASE, public_project._id) assert expected_nodes_relationship_url in res.json['data'][ 0]['relationships']['node']['links']['related']['href'] assert expected_comments_relationship_url in res.json['data'][ 0]['relationships']['comments']['links']['related']['href'] # test_private_node_wikis_relationship_links res = app.get(private_url, auth=user.auth) expected_nodes_relationship_url = '{}nodes/{}/'.format( API_BASE, private_project._id) expected_comments_relationship_url = '{}nodes/{}/comments/'.format( API_BAS
nhquiroz/hacker-rank
python/introduction/mod-div-mod.py
Python
mit
239
0
from __future__ import division # Read two integers from STDIN a = int(raw_input()) b = int(raw_input()) # Print integer division, a//b print(a // b) # Print float d
ivision, a/b print(a % b) # Print
divmod of a and b print(divmod(a, b))
ZenithDK/mopidy-primare
mopidy_primare/primare_serial.py
Python
apache-2.0
17,724
0
"""Primare amplier control. This module allows you to control your Primare I22 and I32 amplifier from the command line using Primare's binary protocol via the RS232 port on the amplifier. """ from __future__ import with_statement import binascii import logging import struct import time # from twisted.logger import Logger # logger = Logger() logger = logging.getLogger(__name__) # Primare documentation on their RS232 protocol writes this: # == Command structure == # Commands are sent to the device using the following format, where each field # is one byte sent to the device: # <STX> <command> <variable> [<value>] <DLE> <ETX> # The <command> can be either 'W' for write or 'R' for read. The variable # table that follows specifies which variables supports which <command> types. # If verbose is active, the device will send replies on the following format # either when a command is received or when the variable in question is # changed on the device. # <STX> <variable> [<value>] <DLE> <ETX> # Note that the <value> field can contain several bytes of data for certain # commands. # == Command special chars == # <STX> = 0x02 # <DLE> = 0x10 # <ETX> = 0x03 # Write = 0x57 (ASCII: W) # Read = 0x52 (ASCII: R) # == Escape sequence == # If any variable or value byte is equal to <DLE>, this byte must be sent # twice to avoid confusing this with end of message. # Protocol settings # Baud rate: 4800 # Bits: 8 # Stop bits: 1 # Parity: None # == Example == # The specific variables and commands will be defined later, here are # examples on what the commands looks like in bytes. # This is an example of a command to toggle verbose setting. # Command is write (0x57), variable is 13 (0x0d) # and value is 0. The footer is x10 x03 # 0x02 0x57 0x0xd 0x00 0x10 0x03 POS_STX = slice(0, 1) POS_DLE_ETX = slice(-2, None) POS_CMD_VAR = slice(2, 3) POS_REPLY_VAR = slice(1, 2) POS_REPLY_DATA = slice(2, -2) BYTE_STX = '\x02' BYTE_WRITE = '\x57' BYTE_READ = '\x52' BYTE_DLE_ETX = '\x10\x03' INDEX_CMD = 0 INDEX_VARIABLE = 1 INDEX_REPLY = 2 INDEX_WAIT = 3 PRIMARE_CMD = { 'power_toggle': ['W', '0100', '01', True], 'power_set': ['W', '81YY', '01YY', False], 'input_set': ['W', '82YY', '02YY', True], 'input_next': ['W', '0201', '02', True], 'input_prev': ['W', '02FF', '02', True], 'volume_set': ['W', '83YY', '03YY', True], 'volume_get': ['W', '0300', '03', True], 'volume_up': ['W', '0301', '03', True], 'volume_down': ['W', '03FF', '03', True], 'balance_adjust': ['W', '04YY', '04', True], 'balance_set': ['W', '84YY', '04YY', True], 'mute_toggle': ['W', '0900', '09', True], 'mute_set': ['W', '89YY', '09YY', True], 'dim_cycle': ['W', '0A00', '0A', True], 'dim_set': ['W', '8AYY', '0AYY', True], 'verbose_toggle': ['W', '0D00', '0D', True], 'verbose_set': ['W', '8DYY', '0DYY', True], 'menu_toggle': ['W', '0E01', '0E', True], 'menu_set': ['W', '8EYY', '0EYY', True], 'remote_cmd': ['W', '0FYY', 'YY', True], 'ir_input_toggle': ['W', '1200', '12', True], 'ir_input_set': ['W', '92YY', '12YY', True], 'recall_factory_settings': ['R', '1300', '', False], 'inputname_current_get': ['R', '1400', '14YY', True], 'inputname_specific_get': ['R', '94YY', '94YY', True], 'manufacturer_get': ['R', '1500', '15', True], 'modelname_get': ['R', '1600', '16', True], 'swversion_get': ['R', '1700', '17', True] } PRIMARE_REPLY = { '01': 'power', '02': 'input', '03': 'volume', '04': 'balance', '09': 'mute', '0a': 'dim', '0d': 'verbose', '0e': 'menu', '12': 'ir_input', '13': 'recall_factory_settings', '14': 'inputname', '15': 'manufacturer', '16': 'modelname', '17': 'swversion' } # TODO: # FIXING Better reply handling than table? # * Better error handling # After suspend/resume, if volume up/down fails (or similar), # try turning amp on # # LATER # * v2: Implement as module(?), not class, for multiple writers/subscribers # (singleton) # Seems like a factory would be better, so 'import primare_serial' then # primare_serial.initComs() which then creates the single Serial object. # * v2: Add notification callback mechanism to notify users of changes on # amp (dials or other SW) # http://bit.ly/WGRn0g # Better idea: websocket # http://forums.lantronix.com/showthread.php?p=3131 # * ... class PrimareController(): """This class provides methods for controlling a Primare amplifier.""" # Number of volume levels the amplifier supports. # Primare amplifiers have 79 levels VOLUME_LEVELS = 79 def __init__(self, source=None, volume=None, writer=None): """Initialization.""" self._bytes_read = bytearray() self._write_cb = writer self._boot_print = True self._manufacturer = '' self._modelname = '' self._swversion = '' self._inputname = '' self._source = source # Volume in range 0..VOLUME_LEVELS. :class:`None` before calibration. if volume:
self.volume_set(volume) # Setup logging so that is available logging.basicConfig(level=logging.DEBUG) # Private methods def _set_device_to_known_state(self): logger.debug('_set_device_to_known_state') self.verbose_set(True) self.power_on() time.sleep(1) if self._source is not None:
self.input_set(self._source) self.mute_set(False) def _print_device_info(self): self.manufacturer_get() self.modelname_get() self.swversion_get() # We always get inputname last, this represents our initialization self.inputname_current_get() def _primare_reader(self, rawdata): r"""Take raw data and finds the EOL sequence \x10\x03.""" eol = BYTE_DLE_ETX leneol = len(eol) for index, c in enumerate(rawdata): self._bytes_read += c # TODO: Need to do conversion of \x10\x10 before looking for EOL! # Doing it after is actually wrong, move code up here from # _decode_raw_data if self._bytes_read[-leneol:] == eol: logger.debug('_primare_reader - decoded: %s', binascii.hexlify(self._bytes_read)) variable_char, decoded_data = self._decode_raw_data( self._bytes_read) # We found a data sequence, extract remaining data and start # again rawdata = rawdata[index + 1:] self._bytes_read = bytearray() self._parse_and_store(variable_char, decoded_data) else: # logger.debug('_primare_reader - not-eol: %s', # binascii.hexlify(self._bytes_read[-leneol:])) pass def _decode_raw_data(self, rawdata): r"""Decode raw data from the serial port. Replace any '\x10\x10' sequences with '\x10'. Returns the variable char and the data received between the STX and DLE+ETX markers """ variable_char = '' data = '' # logger.debug('Read: "%s"', binascii.hexlify(rawdata)) byte_string = struct.unpack('c' * len(rawdata), rawdata) variable_char = binascii.hexlify(''.join(byte_string[POS_REPLY_VAR])) byte_string = byte_string[POS_REPLY_DATA] # We need to replace double DLE (0x10) with single DLE for byte_pairs in zip(byte_string[0:None:2], byte_string[1:None:2]): # Convert binary tuple to str to ascii str_pairs = binascii.hexlify(''.join(byte_pairs)) if str_pairs == '1010': data += '10' else: data += str_pairs # Very often we have an odd amount of data which not handled by # the zip above, manually append that one byte if len(byte_string) % 2 != 0: data += binascii.hexlify(byte_string[-1]) logger.debug('Read(%s) = %s (%s)', PRIMARE_REPLY[variable_char], da
TheArbiter/Networks
lab4/lab4exercise2/exp_monitor.py
Python
gpl-3.0
1,212
0.008251
from monitor import monitor_qlen from subprocess import Popen, PIPE from time import sleep, time from multiprocessing import Process from argparse import ArgumentParser import sys import os parser = ArgumentParser(description="CWND/Queue Monitor") parser.add_argument('--exp', '-e', dest="exp", action="store", help
="Name of the Experiment", required=True) # Expt parameters args = parser.parse_args() def start_tcpprobe(): "Insta
ll tcp_pobe module and dump to file" os.system("(rmmod tcp_probe >/dev/null 2>&1); modprobe tcp_probe full=1;") print "Monitoring TCP CWND ... will save it to ./%s_tcpprobe.txt " % args.exp Popen("cat /proc/net/tcpprobe > ./%s_tcpprobe.txt" % args.exp, shell=True) def qmon(): monitor = Process(target=monitor_qlen,args=('s0-eth2', 0.01, '%s_sw0-qlen.txt' % args.exp )) monitor.start() print "Monitoring Queue Occupancy ... will save it to %s_sw0-qlen.txt " % args.exp raw_input('Press Enter key to stop the monitor--> ') monitor.terminate() if __name__ == '__main__': start_tcpprobe() qmon() Popen("killall -9 cat", shell=True).wait()
t--wagner/pymeasure
instruments/oxford_ilm.py
Python
gpl-3.0
1,931
0.004661
# -*- coding: utf-8 -* from pymeasure.instruments.pyvisa_instrument import PyVisaInstrument from pymeasure.case import ChannelRead from pymeasure.instruments.oxford import OxfordInstrument import time class _QxfordILMChannel(ChannelRead): def __init__(self, instrument): ChannelRead.__init__(self) self._instrument = instrument self.unit = 'percent' self._config
+= ['fast'] @ChannelRead._readmethod def read(self): while True: helium = self._instrument.query('R') helium = helium[2:] if len(helium) == 4: break return [float(helium)/10] @property def fast(self): while True: status = self._instrument.query('X') status = status[5] if status == '4' or status == 'C': return False
elif status == '2' or status == '3' or status == 'A' : return True else: time.sleep(1) pass @fast.setter def fast(self, boolean): if boolean: self._instrument.write('T1') else: self._instrument.write('S1') class QxfordILM(PyVisaInstrument): def __init__(self, address, name='', reset=True, defaults=True, isobus=6, **pyvisa): super().__init__(address, name, **pyvisa) self._isobus = isobus self._instrument = OxfordInstrument(self._instrument, isobus = self._isobus) self._instrument.timeout = 200 self._instrument.read_termination = '\r' self._instrument.write_termination = '\r' self._instrument.write('C3') # Channels self.__setitem__('helium', _QxfordILMChannel(self._instrument)) if defaults is True: self.defaults() #@property #def status(self): # return self._instrument.ask('X') def defaults(self): pass
arpruss/plucker
plucker_desktop/installer/osx/application_bundle_files/Resources/parser/python/vm/PIL/MicImagePlugin.py
Python
gpl-2.0
2,334
0.002571
# # The Python Imaging Library. # $Id: MicImagePlugin.py,v 1.2 2007/06/17 14:12:15 robertoconnor Exp $ # # Microsoft Image Composer support for PIL # # Notes: # uses TiffImagePlugin.py to read the actual image streams # # History: # 97-01-20 fl Created # # Copyright (c) Secret Labs AB 1997. # Copyright (c) Fredrik Lundh 1997. # # See the README file for information on usage and redistribution. # __version__ = "0.1" import string import Image, TiffImagePlugin from OleFileIO import * # # ---------------------------------------------------------------
----- def _accept(prefix): return prefix[:8] == MAGIC ## # Image plugin for Microsoft'
s Image Composer file format. class MicImageFile(TiffImagePlugin.TiffImageFile): format = "MIC" format_description = "Microsoft Image Composer" def _open(self): # read the OLE directory and see if this is a likely # to be a Microsoft Image Composer file try: self.ole = OleFileIO(self.fp) except IOError: raise SyntaxError, "not an MIC file; invalid OLE file" # find ACI subfiles with Image members (maybe not the # best way to identify MIC files, but what the... ;-) self.images = [] for file in self.ole.listdir(): if file[1:] and file[0][-4:] == ".ACI" and file[1] == "Image": self.images.append(file) # if we didn't find any images, this is probably not # an MIC file. if not self.images: raise SyntaxError, "not an MIC file; no image entries" self.__fp = self.fp self.frame = 0 if len(self.images) > 1: self.category = Image.CONTAINER self.seek(0) def seek(self, frame): try: filename = self.images[frame] except IndexError: raise EOFError, "no such frame" self.fp = self.ole.openstream(filename) TiffImagePlugin.TiffImageFile._open(self) self.frame = frame def tell(self): return self.frame # # -------------------------------------------------------------------- Image.register_open("MIC", MicImageFile, _accept) Image.register_extension("MIC", ".mic")
rwl/openpowersystem
cdpsm/iec61970/wires/disconnector.py
Python
agpl-3.0
1,912
0.004184
#------------------------------------------------------------------------------ # Copyright (C) 2009 Richard Lincoln # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the Free # Software Foundation; version 2 dated June, 1991. # # This software is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the
implied warranty of MERCHANDABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GN
U Affero General Public License # along with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #------------------------------------------------------------------------------ """ A manually operated or motor operated mechanical switching device used for changing the connections in a circuit, or for isolating a circuit or equipment from a source of power. It is required to open or close circuits when negligible current is broken or made. """ # <<< imports # @generated from cdpsm.iec61970.wires.switch import Switch from google.appengine.ext import db # >>> imports class Disconnector(Switch): """ A manually operated or motor operated mechanical switching device used for changing the connections in a circuit, or for isolating a circuit or equipment from a source of power. It is required to open or close circuits when negligible current is broken or made. """ # <<< disconnector.attributes # @generated # >>> disconnector.attributes # <<< disconnector.references # @generated # >>> disconnector.references # <<< disconnector.operations # @generated # >>> disconnector.operations # EOF -------------------------------------------------------------------------
hellysmile/django-redis-sessions-fork
redis_sessions_fork/conf.py
Python
bsd-3-clause
932
0
from __future__ import absolute_import, unicode_literals import os from appconf import AppConf from django.conf import settings # noqa class SessionRedis
Conf(AppConf): HOST = '127.0.0.1' PORT = 6379
DB = 0 PREFIX = 'django_sessions' PASSWORD = None UNIX_DOMAIN_SOCKET_PATH = None URL = None CONNECTION_POOL = None JSON_ENCODING = 'latin-1' ENV_URLS = ( 'REDISCLOUD_URL', 'REDISTOGO_URL', 'OPENREDIS_URL', 'REDISGREEN_URL', 'MYREDIS_URL', ) def configure(self): if self.configured_data['URL'] is None: for url in self.configured_data['ENV_URLS']: redis_env_url = os.environ.get(url) if redis_env_url: self.configured_data['URL'] = redis_env_url break return self.configured_data class Meta: prefix = 'session_redis'
pabloalcain/lammps-python
pylammps/Computes/Compute.py
Python
gpl-3.0
1,563
0.007038
""" Compute class """ import numpy as np import matplotlib matplotlib.use('Agg') import pylab as pl #TODO: Take care of the looks of these plots class Compute(object): """ Abstract compute class. It will never be used, but is parent of all the different computes. """ def __init__(self): """ Constructor. Not clear what to do here """ self.value = 0 self.idx = 0 self.header = [] def compute(self, system): """ Compute routine """ pass def tally(self, value): """ Tally new compute with the previous ones. Mostly because not all of the computes have the same structure, so the "average" is not standard. By default we do the usual average. """ self.idx += 1 self.value *= (self.idx - 1)/self.idx self.value += value/self.idx def zero(self): """ Zero out current tallies. """ self.value = 0 self.idx = 0 def log(self, filename): """ Logging routine. By default we just write self.value to filename, with self.header """
np.savetxt(filename, self.value, header='; '.join(self.header)) def plot(self, filename):
""" Plotting routine. By default we plot every column [1:] as a function of column 0, setting labels and axis names with self.header and save it to filename. """ fig, axis = pl.subplots() for i, vec in enumerate(self.value.T[1:]): axis.plot(self.value[:, 0], vec, label=self.header[i]) axis.set_xlabel(self.header[0]) fig.savefig('{0}.pdf'.format(filename)) pl.close()
pcolmant/repanier
repanier/widget/picture.py
Python
gpl-3.0
1,573
0
from django.core.files.storage import default_storage from django.forms import widgets from django.urls import reverse from repanier.const import EMPTY_STRING from repanier.picture.const import SIZE_M from repanier.tools import get_repanier_template_name class RepanierPictureWidget(widgets.TextInput): template
_name = get_repanier_template_name("widgets/picture.html") def __init__(self, *args, **kwargs): self.upload_to = kwargs.pop("upload_to", "pictures") self.size = kwargs.pop("size", SIZE_M) self.bootstrap = kwargs.pop("bootstrap", False) super().__init__(*args, **kwargs) def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs)
context["upload_url"] = reverse( "repanier:ajax_picture", args=(self.upload_to, self.size) ) if value: context["repanier_file_path"] = file_path = str(value) context["repanier_display_picture"] = "inline" context["repanier_display_upload"] = "none" context["repanier_file_url"] = default_storage.url(file_path) else: context["repanier_file_path"] = EMPTY_STRING context["repanier_display_picture"] = "none" context["repanier_display_upload"] = "inline" context["repanier_file_url"] = EMPTY_STRING context["repanier_height"] = context["repanier_width"] = self.size context["bootstrap"] = self.bootstrap return context class Media: js = ("admin/js/jquery.init.js",)
zebde/RobIP
iplookup.py
Python
gpl-3.0
6,811
0.000147
"""This will perform basic enrichment on a given IP.""" import csv import json import mmap import os import socket import urllib import dns.resolver import dns.reversename from geoip import geolite2 from IPy import IP from joblib import Parallel, delayed from netaddr import AddrFormatError, IPSet torcsv = 'Tor_ip_list_ALL.csv' sfile = 'http://torstatus.blutmagie.de/ip_list_all.php/Tor_ip_list_ALL.csv' SUBNET = 0 INPUTDICT = {} SECTOR_CSV = 'sector.csv' OUTFILE = 'IPLookup-output.csv' CSVCOLS = '"ip-address","asn","as-name","isp","abuse-1","abuse-2","abuse-3","domain","reverse-dns","type","country","lat","long","tor-node"' def identify(var): result = "" with open(SECTOR_CSV) as f: root = csv.reader(f) for i in root: if i[0] in var: result = i[1] return result def lookup(value): """Perform a dns request on the given value.""" try: answers = dns.resolver.query(value, 'TXT') for rdata in answers: for txt_string in rdata.strings: value = txt_string.replace(" | ", "|") value = value.replace(" |", "|").split("|") except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers): value = [] return value def flookup(value, fname, sfile): """Look up a value in a file.""" try: fhandle = open(fname) except IOError: sourceFile = urllib.URLopener() sourceFile.retrieve( sfile, fname) fhandle = open(fname) search = mmap.mmap(fhandle.fileno(), 0, access=mmap.ACCESS_READ) if search.find(value) != -1: return 'true' else: return 'false' def iprange(sample, sub): """Identify if the given ip address is in the previous range.""" if sub is not 0: try: ipset = IPSet([sub]) if sample in ipset: return True except AddrFormatError: return False else: return False def mainlookup(var): """Wrap the main lookup and generated the dictionary.""" global SUBNET global INPUTDICT var = ''.join(var.split()) if IP(var).iptype() != 'PRIVATE' and IP(var).version() == 4: if iprange(var, SUBNET) is True: print elif INPUTDICT.get("ip-address") == var: print else: try: socket.inet_aton(var) except socket.error: var = socket.gethostbyname(var) contactlist = [] rvar = '.'.join(reversed(str(var).split("."))) origin = lookup(rvar + '.origin.asn.shadowserver.org') SUBNET = origin[1] try: contact = lookup(rvar + '.abuse-contacts.abusix.org') contactlist = str(contact[0]).split(",") except IndexError: contactlist = [] contactlist.extend(["-"] * (4 - len(contactlist))) try: addr = dns.reversename.from_address(var) rdns = str(dns.resolver.query(addr, "PTR")[0]) except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers): rdns = "" match = geolite2.lookup(var) if match is None or match.location is None: country = '' location = ["", ""] else: country = match.country location = match.location tor = flookup(var, torcsv, sfile) category = identify(origin[4]) if category == "": category = identify(contactlist[0]) origin.extend(["-"] * (6 - len(origin))) INPUTDICT = { 'abuse-1': contactlist[0], 'abuse-2': contactlist[1], 'abuse-3': contactlist[2], 'as-name': origin[2], 'asn': origin[0], 'country': country, 'descr': origin[5], 'domain': origin[4], 'ip-address': var, 'lat': location[0], 'long': location[1], 'reverse-dns': rdns, 'tor-node': tor, 'sector': category, } else: INPUTDICT = { 'abuse-1': "", 'abuse-2': "", 'abuse-3': "", 'as-name': "", 'asn': "", 'country': "", 'descr': "", 'domain': "", 'domain-count': "", 'ip-address': var, 'lat': "", 'long': "", 'reverse-dns': "", 'tor-node': "", 'sector': "", } INPUTDICT['ip-address'] = var out = json.dumps( INPUTDICT, indent=4, sort_keys=True, ensure_ascii=False) csvout(INPUTDICT) return out def batch(inputfile): """Handle batch lookups using file based input.""" if os.path.isfile(OUTFILE): os.remove(OUTFILE) fhandle = open(OUTFILE, "a") header = 0 if header == 0: fhandle.write(str(CSVCOLS) + "\n") header = 1 fhandle.close() with open(inputfile) as fhandle: Parallel(n_jobs=100, verbose=51)(delayed(mainlookup)(i.rstrip('\n')) for i in fhandle) def single(lookupvar): """Do a single IP lookup.""" result = mainlookup(lookupvar) return result def csvout(inputdict): """Generate a CSV file from the output inputdict.""" fhandle = open(OUTFILE, "a") # header = 0 # if header == 0: # fhandle.write("Boop") # header = 1 try: writer = csv.writer(fhandle, quoting=csv.QUOTE_ALL) writer.writerow(( inputdict['ip-address'], inputdict['asn'], inputdict['as-name'], inputdict['descr'], inputdict['abuse-1'], inputdict['abuse-2'], inputdict['abuse-3'], inputdict['domain'], inputdict['reverse-dns'], inputdict['sector'],
inputdict['country'], inputdict['lat'], inputdict['long'], inputdict['tor-node'])) finally: fhandle.close() def main(): import argparse PARSER = argparse.ArgumentParser() PARSER.add_argument("-t"
, choices=('single', 'batch'), required="false", metavar="request-type", help="Either single or batch request") PARSER.add_argument("-v", required="false", metavar="value", help="The value of the request") ARGS = PARSER.parse_args() if ARGS.t == "single": print(single(ARGS.v)) elif ARGS.t == "batch": batch(ARGS.v) else: PARSER.print_help() if __name__ == "__main__": main()
Eloston/ungoogled-chromium
utils/downloads.py
Python
bsd-3-clause
17,917
0.003628
#!/usr/bin/env python3 # -*- coding: UTF-8 -*- # Copyright (c) 2019 The ungoogled-chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Module for the downloading, checking, and unpacking of necessary files into the source tree. """ import argparse import configparser import enum import hashlib import shutil import subprocess import sys import urllib.request from pathlib import Path from _common import ENCODING, USE_REGISTRY, ExtractorEnum, get_logger, \ get_chromium_version, add_common_params from _extraction import extract_tar_file, extract_with_7z, extract_with_winrar sys.path.insert(0, str(Path(__file__).parent / 'third_party')) import schema #pylint: disable=wrong-import-position sys.path.pop(0) # Constants class HashesURLEnum(str, enum.Enum): """Enum for supported hash URL schemes""" chromium = 'chromium' class HashMismatchError(BaseException): """Exception for computed hashes not matching expected hashes""" class DownloadInfo: #pylint: disable=too-few-public-methods """Representation of an downloads.ini file for downloading files""" _hashes = ('md5', 'sha1', 'sha256', 'sha512') hash_url_delimiter = '|' _nonempty_keys = ('url', 'download_filename') _optional_keys = ( 'version', 'strip_leading_dirs', ) _passthrough_properties = (*_nonempty_keys, *_optional_keys, 'extractor', 'output_path') _ini_vars = { '_chromium_version': get_chromium_version(), } @staticmethod def _is_hash_url(value): return value.count(DownloadInfo.hash_url_delimiter) == 2 and value.split( DownloadInfo.hash_url_delimiter)[0] in iter(HashesURLEnum) _schema = schema.Schema({ schema.Optional(schema.And(str, len)): { **{x: schema.And(str, len) for x in _nonempty_keys}, 'output_path': (lambda x: str(Path(x).relative_to(''))), **{schema.Optional(x): schema.And(str, len) for x in _optional_keys}, schema.Optional('extractor'): schema.Or(ExtractorEnum.TAR, ExtractorEnum.SEVENZIP, ExtractorEnum.WINRAR), schema.Optional(schema.Or(*_hashes)): schema.And(str, len), schema.Optional('hash_url'): lambda x: DownloadInfo._is_hash_url(x), #pylint: disable=unnecessary-lambda } }) class _DownloadsProperties: #pylint: disable=too-few-public-methods def __init__(self, section_dict, passthrough_properties, hashes): self._section_dict = section_dict self._passthrough_properties = passthrough_properties self._hashes = hashes def has_hash_url(self): """ Returns a boolean indicating whether the current download has a hash URL""" return 'hash_url' in self._section_dict def __getattr__(self, name): if name in self._passthrough_properties: return self._section_dict.get(name, fallback=Non
e) if name == 'hashes': hashes
_dict = {} for hash_name in (*self._hashes, 'hash_url'): value = self._section_dict.get(hash_name, fallback=None) if value: if hash_name == 'hash_url': value = value.split(DownloadInfo.hash_url_delimiter) hashes_dict[hash_name] = value return hashes_dict raise AttributeError('"{}" has no attribute "{}"'.format(type(self).__name__, name)) def _parse_data(self, path): """ Parses an INI file located at path Raises schema.SchemaError if validation fails """ def _section_generator(data): for section in data: if section == configparser.DEFAULTSECT: continue yield section, dict( filter(lambda x: x[0] not in self._ini_vars, data.items(section))) new_data = configparser.ConfigParser(defaults=self._ini_vars) with path.open(encoding=ENCODING) as ini_file: new_data.read_file(ini_file, source=str(path)) try: self._schema.validate(dict(_section_generator(new_data))) except schema.SchemaError as exc: get_logger().error('downloads.ini failed schema validation (located in %s)', path) raise exc return new_data def __init__(self, ini_paths): """Reads an iterable of pathlib.Path to download.ini files""" self._data = configparser.ConfigParser() for path in ini_paths: self._data.read_dict(self._parse_data(path)) def __getitem__(self, section): """ Returns an object with keys as attributes and values already pre-processed strings """ return self._DownloadsProperties(self._data[section], self._passthrough_properties, self._hashes) def __contains__(self, item): """ Returns True if item is a name of a section; False otherwise. """ return self._data.has_section(item) def __iter__(self): """Returns an iterator over the section names""" return iter(self._data.sections()) def properties_iter(self): """Iterator for the download properties sorted by output path""" return sorted( map(lambda x: (x, self[x]), self), key=(lambda x: str(Path(x[1].output_path)))) class _UrlRetrieveReportHook: #pylint: disable=too-few-public-methods """Hook for urllib.request.urlretrieve to log progress information to console""" def __init__(self): self._max_len_printed = 0 self._last_percentage = None def __call__(self, block_count, block_size, total_size): # Use total_blocks to handle case total_size < block_size # total_blocks is ceiling of total_size / block_size # Ceiling division from: https://stackoverflow.com/a/17511341 total_blocks = -(-total_size // block_size) if total_blocks > 0: # Do not needlessly update the console. Since the console is # updated synchronously, we don't want updating the console to # bottleneck downloading. Thus, only refresh the output when the # displayed value should change. percentage = round(block_count / total_blocks, ndigits=3) if percentage == self._last_percentage: return self._last_percentage = percentage print('\r' + ' ' * self._max_len_printed, end='') status_line = 'Progress: {:.1%} of {:,d} B'.format(percentage, total_size) else: downloaded_estimate = block_count * block_size status_line = 'Progress: {:,d} B of unknown size'.format(downloaded_estimate) self._max_len_printed = len(status_line) print('\r' + status_line, end='') def _download_via_urllib(url, file_path, show_progress, disable_ssl_verification): reporthook = None if show_progress: reporthook = _UrlRetrieveReportHook() if disable_ssl_verification: import ssl # TODO: Remove this or properly implement disabling SSL certificate verification orig_https_context = ssl._create_default_https_context #pylint: disable=protected-access ssl._create_default_https_context = ssl._create_unverified_context #pylint: disable=protected-access try: urllib.request.urlretrieve(url, str(file_path), reporthook=reporthook) finally: # Try to reduce damage of hack by reverting original HTTPS context ASAP if disable_ssl_verification: ssl._create_default_https_context = orig_https_context #pylint: disable=protected-access if show_progress: print() def _download_if_needed(file_path, url, show_progress, disable_ssl_verification): """ Downloads a file from url to the specified path file_path if necessary. If show_progress is True, download progress is p
googleapis/python-speech
samples/generated_samples/speech_v1p1beta1_generated_speech_recognize_async.py
Python
apache-2.0
1,643
0.000609
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may
obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generat
ed code. DO NOT EDIT! # # Snippet for Recognize # NOTE: This snippet has been automatically generated for illustrative purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-speech # [START speech_v1p1beta1_generated_Speech_Recognize_async] from google.cloud import speech_v1p1beta1 async def sample_recognize(): # Create a client client = speech_v1p1beta1.SpeechAsyncClient() # Initialize request argument(s) config = speech_v1p1beta1.RecognitionConfig() config.language_code = "language_code_value" audio = speech_v1p1beta1.RecognitionAudio() audio.content = b'content_blob' request = speech_v1p1beta1.RecognizeRequest( config=config, audio=audio, ) # Make the request response = await client.recognize(request=request) # Handle the response print(response) # [END speech_v1p1beta1_generated_Speech_Recognize_async]
niktre/espressopp
testsuite/pi_water/water.py
Python
gpl-3.0
8,192
0.007935
#!/usr/bin/env python2 # # Copyright (C) 2013-2017(H) # Max Planck Institute for Polymer Research # # This file is part of ESPResSo++. # # ESPResSo++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # -*- coding: utf-8 -*- ########################################################################### # # # ESPResSo++ Python script for tabulated GROMACS simulation # # # ########################################################################### import sys import time import espressopp import mpi4py.MPI as MPI import logging import copy import math from espressopp import Real3D, Int3D from espressopp.tools import gromacs from espressopp.tools import decomp from espressopp.tools import timers import pathintegral def genTabPotentials(tabfilesnb): potentials = {} for fg in tabfilesnb: fe = fg.split(".")[0]+".tab" # name of espressopp file gromacs.convertTable(fg, fe, sigma, epsilon, c6, c12) pot = espressopp.interaction.Tabulated(itype=3, filename=fe, cutoff=rc) t1, t2 = fg[6], fg[8] # type 1, type 2 potentials.update({t1+"_"+t2: pot}) print "created", t1, t2, fe return potentials # This example reads in a gromacs water system (SPC/Fw) treated with reaction field. See the corresponding gromacs grompp.mdp paramter file. # Output of gromacs energies and esp energies should be the same # simulation parameters (nvt = False is nve) steps = 1 #100 check = 1 #steps/10 rc = 0.9 # Verlet list cutoff skin = 0.14 timestep = 0.0002 # parameters to convert GROMACS tabulated potential file sigma = 1.0 epsilon = 1.0 c6 = 1.0 c12 = 1.0 # GROMACS setup files grofile = "conf.gro" topfile = "topol.top" # this calls the gromacs parser for processing the top file (and included files) and the conf file # The variables at the beginning defaults, types, etc... can be found by calling # gromacs.read(grofile,topfile) without return values. It then prints out the variables to be unpacked defaults, types, atomtypes, masses, charges, atomtypeparameters, bondtypes, bondtypeparams, angletypes, angletypeparams, exclusions, x, y, z, resname, resid, Lx, Ly, Lz= gromacs.read(grofile,topfile) ###########################
########################################### ## IT SHOULD BE UNNECESSARY TO MAKE MODIFICATIONS BELOW THIS LINE ## ###################################################################### #types, bonds, angles, dihedrals, x, y, z, vx, vy, vz,
Lx, Ly, Lz = gromacs.read(grofile,topfile) num_particles = len(x) density = num_particles / (Lx * Ly * Lz) size = (Lx, Ly, Lz) sys.stdout.write('Setting up simulation ...\n') system = espressopp.System() system.rng = espressopp.esutil.RNG() system.bc = espressopp.bc.OrthorhombicBC(system.rng, size) system.skin = skin comm = MPI.COMM_WORLD nodeGrid = decomp.nodeGrid(comm.size,size,rc,skin) cellGrid = decomp.cellGrid(size, nodeGrid, rc, skin) system.storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid) # setting up GROMACS interaction stuff # create a force capped Lennard-Jones interaction that uses a verlet list verletlist = espressopp.VerletList(system, rc) #interaction = espressopp.interaction.VerletListLennardJonesGromacs(verletlist) # add particles to the system and then decompose props = ['id', 'pos', 'v', 'type', 'mass', 'q'] allParticles = [] for pid in range(num_particles): part = [pid + 1, Real3D(x[pid], y[pid], z[pid]), Real3D(0, 0, 0), types[pid], masses[pid], charges[pid]] allParticles.append(part) system.storage.addParticles(allParticles, *props) #system.storage.decompose() # set up LJ interaction according to the parameters read from the .top file #ljinteraction=gromacs.setLennardJonesInteractions(system, defaults, atomtypeparameters, verletlist,rc) ########## tabulated nb interactions ############ tabfilesnb = ["table_O_O.xvg", "table_H_O.xvg", "table_H_H.xvg"] potentials = genTabPotentials(tabfilesnb) tabulatedinteraction = espressopp.interaction.VerletListTabulated(verletlist) tabulatedinteraction.setPotential(0, 0, potentials["O_O"]) tabulatedinteraction.setPotential(0, 1, potentials["H_O"]) tabulatedinteraction.setPotential(1, 1, potentials["H_H"]) system.addInteraction(tabulatedinteraction) # set up angle interactions according to the parameters read from the .top file angleinteractions=gromacs.setAngleInteractions(system, angletypes, angletypeparams) # set up bonded interactions according to the parameters read from the .top file bondedinteractions=gromacs.setBondedInteractions(system, bondtypes, bondtypeparams) # exlusions, i.e. pairs of atoms not considered for the non-bonded part. Those are defined either by bonds which automatically generate an exclusion. Or by the nregxcl variable verletlist.exclude(exclusions) # langevin thermostat langevin = espressopp.integrator.LangevinThermostat(system) langevin.gamma = 10 langevin.temperature = 2.4942 # kT in gromacs units integrator = espressopp.integrator.VelocityVerlet(system) integrator.addExtension(langevin) integrator.dt = timestep print "POT", potentials pathintegral.createPathintegralSystem(allParticles, props, types, system, langevin, potentials, P=16) system.storage.decompose() num_particles = int(espressopp.analysis.NPart(system).compute()) # print simulation parameters print '' print 'number of particles =', num_particles print 'density = %.4f' % (density) print 'rc =', rc print 'dt =', integrator.dt print 'skin =', system.skin print 'steps =', steps print 'NodeGrid = %s' % (nodeGrid,) print 'CellGrid = %s' % (cellGrid,) print '' # analysis configurations = espressopp.analysis.Configurations(system) configurations.gather() temperature = espressopp.analysis.Temperature(system) pressure = espressopp.analysis.Pressure(system) pressureTensor = espressopp.analysis.PressureTensor(system) print "i*timestep,Eb, EAng, ETab, Ek, Etotal T" fmt='%5.5f %15.8g %15.8g %15.8g %15.8g %15.8f %15.8f\n' outfile = open("esp.dat", "w") start_time = time.clock() espressopp.tools.psfwrite("system.psf", system) #espressopp.tools.decomp.tuneSkin(system, integrator) #espressopp.tools.analyse.info(system, integrator) espressopp.tools.fastwritexyz("traj.xyz", system, append=False, scale=10) for i in range(check): T = temperature.compute() P = pressure.compute() Eb = 0 EAng = 0 ETab=0 #for bd in bondedinteractions.values(): Eb+=bd.computeEnergy() #for ang in angleinteractions.values(): EAng+=ang.computeEnergy() #ELj= ljinteraction.computeEnergy() #EQQ= qq_interactions.computeEnergy() ETab= tabulatedinteraction.computeEnergy() T = temperature.compute() Ek = 0.5 * T * (3 * num_particles) Etotal = Ek+Eb+EAng+ETab sys.stdout.write(fmt%(i*timestep,Eb, EAng, ETab, Ek, Etotal, T)) outfile.write(fmt%(i*timestep,Eb, EAng, ETab, Ek, Etotal, T)) #espressopp.tools.pdb.pdbfastwrite("traj.pdb", system, append=True) espressopp.tools.fastwritexyz("traj.xyz", system, append=True, scale=10) integrator.run(steps/check) # print out every steps/check steps #espressopp.tools.vmd.imd_positions(system, sock) # print timings and neighbor list information end_time = time.clock() timers.show(integrator.getTimers(), precision=2) espressopp.tools.analyse.final_info(system, integrator, verletlist, start_time, end_time) sys.stdout.write('Integration steps = %d\n' % integrator.step) sys.stdout.write('CPU time = %.1f\n' % (end_time - start
liam2/liam2
liam2/merge_h5.py
Python
gpl-3.0
4,745
0.000211
# encoding: utf-8 from __future__ import absolute_import, division, print_function import numpy as np import tables from liam2.data import merge_arrays, get_fields, index_table_light, merge_array_records from liam2.utils import timed, loop_wh_progress, merge_items __version__ = "0.4" def get_group_fields(node): if node is None: return {} # noinspection PyProtectedMember return {table._v_name: get_fields(table) for table in node._f_iter_nodes()} def merge_group(parent1, parent2, name, output_file, index_col): print() print(name) print('=' * len(name)) group1 = getattr(parent1, name, None) group2 = getattr(parent2, name, None) if group1 is None and group2 is None: print("node not found in either input files, skipped") return output_group = output_file.create_group("/", name) fields1 = get_group_fields(group1) fields2 = get_group_fields(group2) ent_names1 = set(fields1.keys()) ent_names2 = set(fields2.keys()) for ent_name in sorted(ent_names1 | ent_names2): print() print(ent_name) ent_fields1 = fields1.get(ent_name, []) ent_fields2 = fields2.get(ent_name, []) output_fields = merge_items(ent_fields1, ent_fields2) output_table = output_file.create_table(output_group, ent_name, np.dtype(output_fields)) if ent_name in ent_names1: table1 = getattr(group1, ent_name) # noinspection PyProtectedMember print(" * indexing table from %s ..." % group1._v_file.filename, end=' ') input1_rows = index_table_light(table1, index_col) print("done.") else: table1 = None input1_rows = {} if ent_name in ent_names2: table2 = getattr(group2, ent_name) # noinspection PyProtectedMember print(" * indexing table from %s ..." % group2._v_file.filename, end=' ') input2_rows = index_table_light(table2, index_col) print("done.") else: table2 = None input2_rows = {} print(" * merging: ", end=' ') input1_periods = set(input1_rows.keys()) input2_periods = set(input2_rows.keys()) output_periods = sorted(input1_periods | input2_periods) # noinspection PyUnusedLocal def merge_period(period_idx, period): if ent_name in ent_names1: start, stop = input1_rows.get(period, (0, 0)) input1_array = table1.read(start, stop) else: input1_array = None if ent_name in ent_names2: start, stop = input2_rows.get(period, (0, 0)) input2_array = table2.read(start, stop) else: input2_array = None if ent_name in ent_names1 and ent_name in ent_names2: if 'id' in input1_array.dtype.names: assert 'id' in input2_array.dtype.names output_array, _ = merge_arrays(input1_array, input2_array) else: output_array = merge_array_records(input1_array, input2_array) elif ent_name in ent_names1: output_array = input1_array elif ent_name in ent_names2: output_array = input2_array else: raise Exception("this shouldn't have happened") output_table.append(output_array) output_table.flush() loop_wh_progress(merge_period, output_periods) print(" done.") def merge_h5(input1_path, input2_path, output_path): input1_file = tables.open_file(input1_path) input2_file = tables.open_file(input2_path) output_file = tables.open_file(output_path, mode="w") input1root = input1_file.root input2root = input2_file.root merge_group(input1roo
t, input2root, 'globals', output_file, 'PERIOD') merge_group(input1root, input2root, 'entities', output_file, 'period') input1_file.close() input2_file.close() output_file.close() if __name__ == '__main__': import sys import platform print("LIAM HDF5 merge
%s using Python %s (%s)\n" % (__version__, platform.python_version(), platform.architecture()[0])) args = sys.argv if len(args) < 4: print("Usage: %s inputpath1 inputpath2 outputpath" % args[0]) sys.exit() timed(merge_h5, args[1], args[2], args[3])
cloudera/hue
desktop/core/ext-py/Babel-2.5.1/tests/messages/test_checkers.py
Python
apache-2.0
12,538
0
# -*- coding: utf-8 -*- # # Copyright (C) 2008-2011 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://babel.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://babel.edgewall.org/log/. from datetime import datetime import time import unittest from babel import __version__ as VERSION from babel.core import Locale, UnknownLocaleError from babel.dates import format_datetime from babel.messages import checkers from babel.messages.plurals import PLURALS from babel.messages.pofile import read_po from babel.util import LOCALTZ from babel._compat import BytesIO class CheckersTestCase(unittest.TestCase): # the last msgstr[idx] is always missing except for
singular plural forms def test_1_num_plurals_checkers(self): for _locale in [p for p in PLURALS if PLURALS[p][0] == 1]: try: locale = Locale.parse(_locale) except UnknownLocaleError: # Just an alias? Not what we're testing here, let's continue continue po_file = (u"""\ # %(english_name)s translations for
TestProject. # Copyright (C) 2007 FooBar, Inc. # This file is distributed under the same license as the TestProject # project. # FIRST AUTHOR <EMAIL@ADDRESS>, 2007. # msgid "" msgstr "" "Project-Id-Version: TestProject 0.1\\n" "Report-Msgid-Bugs-To: [email protected]\\n" "POT-Creation-Date: 2007-04-01 15:30+0200\\n" "PO-Revision-Date: %(date)s\\n" "Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n" "Language-Team: %(locale)s <[email protected]>\n" "Plural-Forms: nplurals=%(num_plurals)s; plural=%(plural_expr)s\\n" "MIME-Version: 1.0\\n" "Content-Type: text/plain; charset=utf-8\\n" "Content-Transfer-Encoding: 8bit\\n" "Generated-By: Babel %(version)s\\n" #. This will be a translator comment, #. that will include several lines #: project/file1.py:8 msgid "bar" msgstr "" #: project/file2.py:9 msgid "foobar" msgid_plural "foobars" msgstr[0] "" """ % dict(locale=_locale, english_name=locale.english_name, version=VERSION, year=time.strftime('%Y'), date=format_datetime(datetime.now(LOCALTZ), 'yyyy-MM-dd HH:mmZ', tzinfo=LOCALTZ, locale=_locale), num_plurals=PLURALS[_locale][0], plural_expr=PLURALS[_locale][0])).encode('utf-8') # This test will fail for revisions <= 406 because so far # catalog.num_plurals was neglected catalog = read_po(BytesIO(po_file), _locale) message = catalog['foobar'] checkers.num_plurals(catalog, message) def test_2_num_plurals_checkers(self): # in this testcase we add an extra msgstr[idx], we should be # disregarding it for _locale in [p for p in PLURALS if PLURALS[p][0] == 2]: if _locale in ['nn', 'no']: _locale = 'nn_NO' num_plurals = PLURALS[_locale.split('_')[0]][0] plural_expr = PLURALS[_locale.split('_')[0]][1] else: num_plurals = PLURALS[_locale][0] plural_expr = PLURALS[_locale][1] try: locale = Locale(_locale) date = format_datetime(datetime.now(LOCALTZ), 'yyyy-MM-dd HH:mmZ', tzinfo=LOCALTZ, locale=_locale) except UnknownLocaleError: # Just an alias? Not what we're testing here, let's continue continue po_file = (u"""\ # %(english_name)s translations for TestProject. # Copyright (C) 2007 FooBar, Inc. # This file is distributed under the same license as the TestProject # project. # FIRST AUTHOR <EMAIL@ADDRESS>, 2007. # msgid "" msgstr "" "Project-Id-Version: TestProject 0.1\\n" "Report-Msgid-Bugs-To: [email protected]\\n" "POT-Creation-Date: 2007-04-01 15:30+0200\\n" "PO-Revision-Date: %(date)s\\n" "Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n" "Language-Team: %(locale)s <[email protected]>\\n" "Plural-Forms: nplurals=%(num_plurals)s; plural=%(plural_expr)s\\n" "MIME-Version: 1.0\\n" "Content-Type: text/plain; charset=utf-8\\n" "Content-Transfer-Encoding: 8bit\\n" "Generated-By: Babel %(version)s\\n" #. This will be a translator comment, #. that will include several lines #: project/file1.py:8 msgid "bar" msgstr "" #: project/file2.py:9 msgid "foobar" msgid_plural "foobars" msgstr[0] "" msgstr[1] "" msgstr[2] "" """ % dict(locale=_locale, english_name=locale.english_name, version=VERSION, year=time.strftime('%Y'), date=date, num_plurals=num_plurals, plural_expr=plural_expr)).encode('utf-8') # we should be adding the missing msgstr[0] # This test will fail for revisions <= 406 because so far # catalog.num_plurals was neglected catalog = read_po(BytesIO(po_file), _locale) message = catalog['foobar'] checkers.num_plurals(catalog, message) def test_3_num_plurals_checkers(self): for _locale in [p for p in PLURALS if PLURALS[p][0] == 3]: po_file = (r"""\ # %(english_name)s translations for TestProject. # Copyright (C) 2007 FooBar, Inc. # This file is distributed under the same license as the TestProject # project. # FIRST AUTHOR <EMAIL@ADDRESS>, 2007. # msgid "" msgstr "" "Project-Id-Version: TestProject 0.1\n" "Report-Msgid-Bugs-To: [email protected]\n" "POT-Creation-Date: 2007-04-01 15:30+0200\n" "PO-Revision-Date: %(date)s\n" "Last-Translator: FULL NAME <EMAIL@ADDRESS>\n" "Language-Team: %(locale)s <[email protected]>\n" "Plural-Forms: nplurals=%(num_plurals)s; plural=%(plural_expr)s\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel %(version)s\n" #. This will be a translator comment, #. that will include several lines #: project/file1.py:8 msgid "bar" msgstr "" #: project/file2.py:9 msgid "foobar" msgid_plural "foobars" msgstr[0] "" msgstr[1] "" """ % dict(locale=_locale, english_name=Locale.parse(_locale).english_name, version=VERSION, year=time.strftime('%Y'), date=format_datetime(datetime.now(LOCALTZ), 'yyyy-MM-dd HH:mmZ', tzinfo=LOCALTZ, locale=_locale), num_plurals=PLURALS[_locale][0], plural_expr=PLURALS[_locale][0])).encode('utf-8') # This test will fail for revisions <= 406 because so far # catalog.num_plurals was neglected catalog = read_po(BytesIO(po_file), _locale) message = catalog['foobar'] checkers.num_plurals(catalog, message) def test_4_num_plurals_checkers(self): for _locale in [p for p in PLURALS if PLURALS[p][0] == 4]: po_file = (r"""\ # %(english_name)s translations for TestProject. # Copyright (C) 2007 FooBar, Inc. # This file is distributed under the same license as the TestProject # project. # FIRST AUTHOR <EMAIL@ADDRESS>, 2007. # msgid "" msgstr "" "Project-Id-Version: TestProject 0.1\n" "Report-Msgid-Bugs-To: [email protected]\n" "POT-Creation-Date: 2007-04-01 15:30+0200\n" "PO-Revision-Date: %(date)s\n" "Last-Translator: FULL NAME <EMAIL@ADDRESS>\n" "Language-Team: %(locale)s <[email protected]>\n" "Plural-Forms: nplurals=%(num_plurals)s; plural=%(plural_expr)s\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel %(version)s\n" #. This will be a translator comment, #. that will include several lines #: project/file1.py:8 msgid "bar" msgstr "" #: project/file2.py:9 msgid "foobar" msgid_plural "foobars" msgstr[0] ""
Mariusz1970/enigma2
lib/python/Screens/InfoBarGenerics.py
Python
gpl-2.0
140,459
0.031426
# -*- coding: utf-8 -*- from Components.ActionMap import ActionMap, HelpableActionMap, NumberActionMap from Components.Harddisk import harddiskmanager, findMountPoint from Components.Input import Input from Components.Label import Label from Components.MovieList import AUDIO_EXTENSIONS from Components.PluginComponent import plugins from Components.ServiceEventTracker import ServiceEventTracker from Components.Sources.Boolean import Boolean from Components.Sources.List import List from Components.config import config, configfile, ConfigBoolean, ConfigClock from Components.SystemInfo import SystemInfo from Components.UsageConfig import preferredInstantRecordPath, defaultMoviePath, preferredTimerPath, ConfigSelection # from Components.Task import Task, Job, job_manager as JobManager from Components.Pixmap import MovingPixmap, MultiPixmap from Components.Sources.StaticText import StaticText from Components.ScrollLabel import ScrollLabel from Plugins.Plugin import PluginDescriptor from Components.Timeshift import InfoBarTimeshift from Screens.Screen import Screen from Screens import ScreenSaver from Screens.ChannelSelection import ChannelSelection, BouquetSelector, SilentBouquetSelector, EpgBouquetSelector from Screens.ChoiceBox import ChoiceBox from Screens.Dish import Dish from Screens.EventView import EventViewEPGSelect, EventViewSimple from Screens.EpgSelection import EPGSelection from Screens.InputBox import InputBox from Screens.MessageBox import MessageBox from Screens.MinuteInput import MinuteInput from Screens.TimerSelection import TimerSelection from Screens.PictureInPicture import PictureInPicture from Screens.PVRState import PVRState, TimeshiftState from Screens.SubtitleDisplay import SubtitleDisplay from Screens.RdsDisplay import RdsInfoDisplay, RassInteractive from Screens.Standby import Standby, TryQuitMainloop from Screens.TimeDateInput import TimeDateInput from Screens.TimerEdit import TimerEditList from Screens.UnhandledKey import UnhandledKey from ServiceReference import ServiceReference, isPlayableForCur from RecordTimer import RecordTimer, RecordTimerEntry, parseEvent, AFTEREVENT, findSafeRecordPath from Screens.TimerEntry import TimerEntry as TimerEntry from Tools import Directories, Notifications from Tools.Directories import pathExists, fileExists, getRecordingFilename, copyfile, moveFiles, resolveFilename, SCOPE_TIMESHIFT, SCOPE_CURRENT_SKIN from Tools.KeyBindings import getKeyDescription from enigma import eTimer, eServiceCenter, eDVBServicePMTHandler, iServiceInformation, iPlayableService, eServiceReference, eEPGCache, eActionMap from boxbranding import getBoxType from time import time, localtime, strftime from bisect import insort from sys import maxint import os, cPickle # hack alert! from Screens.Menu import MainMenu, Menu, mdom from Screens.Setup import Setup import Screens.Standby AUDIO = False if fileExists("/usr/lib/enigma2/python/Plugins/Extensions/CoolTVGuide/plugin.pyo"): COOLTVGUIDE = True else: COOLTVGUIDE = False def isStandardInfoBar(self): return self.__class__.__name__ == "InfoBar" def isMoviePlayerInfoBar(self): return self.__class__.__name__ == "MoviePlayer" def setResumePoint(session): global resumePointCache, resumePointCacheLast service = session.nav.getCurrentService() ref = session.nav.getCurrentlyPlayingServiceOrGroup() if (service is not None) and (ref is not None): # and (ref.type != 1): # ref type 1 has its own memory... seek = service.seek() if seek: pos = seek.getPlayPosition() if not pos[0]: key = ref.toString() lru = int(time()) l = seek.getLength() if l: l = l[1] else: l = None resumePointCache[key] = [lru, pos[1], l] for k, v in resumePointCache.items(): if v[0] < lru: candidate = k filepath = os.path.realpath(candidate.split(':')[-1]) mountpoint = findMountPoint(filepath) if os.path.ismount(mountpoint) and not os.path.exists(filepath): del resumePointCache[candidate] saveResumePoints() def delResumePoint(ref): global resumePointCache, resumePointCacheLast try: del resumePointCache[ref.toString()] except KeyError: pass saveResumePoints() def getResumePoint(session): global resumePoin
tCache ref = session.nav.getCurrentlyPlayingServiceOrGroup() if (ref is not None) and (ref.type != 1): try: entry = resumePointCache[ref.toString()] entry[0] = int(time()) # update LRU timestamp return entry[1] except KeyError: return None def saveResumePoints(): global resumePointCache, resumePointCacheLast try: f = open('/etc/enigma2/resumepoints.pkl', 'wb') cPickle.dump(resum
ePointCache, f, cPickle.HIGHEST_PROTOCOL) f.close() except Exception, ex: print "[InfoBar] Failed to write resumepoints:", ex resumePointCacheLast = int(time()) def loadResumePoints(): try: file = open('/etc/enigma2/resumepoints.pkl', 'rb') PickleFile = cPickle.load(file) file.close() return PickleFile except Exception, ex: print "[InfoBar] Failed to load resumepoints:", ex return {} def updateresumePointCache(): global resumePointCache resumePointCache = loadResumePoints() def ToggleVideo(): mode = open("/proc/stb/video/policy").read()[:-1] print mode if mode == "letterbox": f = open("/proc/stb/video/policy", "w") f.write("panscan") f.close() elif mode == "panscan": f = open("/proc/stb/video/policy", "w") f.write("letterbox") f.close() else: # if current policy is not panscan or letterbox, set to panscan f = open("/proc/stb/video/policy", "w") f.write("panscan") f.close() resumePointCache = loadResumePoints() resumePointCacheLast = int(time()) class InfoBarDish: def __init__(self): self.dishDialog = self.session.instantiateDialog(Dish) class InfoBarUnhandledKey: def __init__(self): self.unhandledKeyDialog = self.session.instantiateDialog(UnhandledKey) self.hideUnhandledKeySymbolTimer = eTimer() self.hideUnhandledKeySymbolTimer.callback.append(self.unhandledKeyDialog.hide) self.checkUnusedTimer = eTimer() self.checkUnusedTimer.callback.append(self.checkUnused) self.onLayoutFinish.append(self.unhandledKeyDialog.hide) eActionMap.getInstance().bindAction('', -maxint -1, self.actionA) #highest prio eActionMap.getInstance().bindAction('', maxint, self.actionB) #lowest prio self.flags = (1<<1) self.uflags = 0 #this function is called on every keypress! def actionA(self, key, flag): try: print 'KEY: %s %s' % (key,getKeyDescription(key)[0]) except: print 'KEY: %s' % key self.unhandledKeyDialog.hide() if self.closeSIB(key) and self.secondInfoBarScreen and self.secondInfoBarScreen.shown: self.secondInfoBarScreen.hide() self.secondInfoBarWasShown = False if flag != 4: if self.flags & (1<<1): self.flags = self.uflags = 0 self.flags |= (1<<flag) if flag == 1: # break self.checkUnusedTimer.start(0, True) return 0 def closeSIB(self, key): if key >= 12 and key != 352 and key != 103 and key != 108 and key != 402 and key != 403 and key != 407 and key != 412 : return True else: return False #this function is only called when no other action has handled this key def actionB(self, key, flag): if flag != 4: self.uflags |= (1<<flag) def checkUnused(self): if self.flags == self.uflags: self.unhandledKeyDialog.show() self.hideUnhandledKeySymbolTimer.start(2000, True) class InfoBarScreenSaver: def __init__(self): self.onExecBegin.append(self.__onExecBegin) self.onExecEnd.append(self.__onExecEnd) self.screenSaverTimer = eTimer() self.screenSaverTimer.callback.append(self.screensaverTimeout) self.screensaver = self.session.instantiateDialog(ScreenSaver.Screensaver) self.onLayoutFinish.append(self.__layoutFinished) def __layoutFinished(self): self.screensaver.hide() def __onExecBegin(self): self.ScreenSaverTimerStart() def __onExecEnd(self): if self.screensaver.shown: self.screensaver.hide() eActionMap.getInstance().unbindAction('', self.keypressScreenSaver) self.screenSaverTimer.stop() def ScreenSaverTimerStart(self): time = int(config.usage.screen_saver.value) flag = self.seekstate[0] if not flag: ref = s
ziotom78/stripeline
stripeline/timetools.py
Python
mit
11,654
0.001974
#!/usr/bin/env python3 # -*- encoding: utf-8 -*- from collections import namedtuple from typing import List, Union import numpy as np from astropy.io import fits TimeChunk = namedtuple('TimeChunk', 'start_time num_of_samples') def split_time_range(time_length: float, num_of_chunks: int, sampfreq: float, time0=0.0) -> List[TimeChunk]: '''Split a time interval in a number of chunks. Return a list of objects of kind :class:`stripeline.timetools.TimeChunk`. ''' delta_time = time_length / num_of_chunks result = [] for chunk_idx in range(num_of_chunks): # Determine the time of each sample in this chunk cur_time = chunk_idx * delta_time chunk_time0 = np.ceil(cur_time * sampfreq) / sampfreq - cur_time start_time = time0 + cur_time + chunk_time0 num_of_samples = int(delta_time * sampfreq) result.append(TimeChunk(start_time=start_time, num_of_samples=num_of_samples)) return result DET_NAMES = {'Q1': 0, 'Q2': 1, 'U1': 2, 'U2': 3 } class ToiProvider: '''Load a TOI and split it evenly among MPI processes. .. note:: This is an abstract base class, and it should not be instantiated. Consider using any of its derived classes, like :class:`stripeline.timetools.FitsToiProvider`. In the case of a run split among many MPI processes, this class balances the load of a long TOI. If every MPI process creates a :class:`stripeline.timetools.ToiProvider` object, every object will take responsibility of reading one section of the TOI. The methods :func:`stripeline.timetools.ToiProvider.get_signal`, :func:`stripeline.timetools.ToiProvider.get_pointings`, and :func:`stripeline.timetools.ToiProvider.get_pixel_index` can be used by processes to read the chunk of data which belongs to each. ''' def __init__(self, rank: int, num_of_processes: int): '''Create a new object. Parameters: * "rank" is the rank of the running MPI process * "num_of_processes" is the number of MPI processes ''' self.rank = rank self.num_of_processes = num_of_processes self.total_num_of_samples = 0 def get_time(self): '''Return a vector containing the time of each sample in the TOI. Only the part of the TOI that belongs to the rank of this process is returned.''' return None def get_signal(self, det_idx: Union[int, str]): # Unused del det_idx return None def get_pixel_index(self, nside: int, nest=False, lonlat=False): '''Return a vector containing the pixel index for each sample in the TOI. Only the part of the TOI that belongs to the rank of this process is returned.''' theta, phi, psi = self.get_pointings() return healpy.ang2pix(nside, theta, phi, nest=nest, lonlat=lonlat) def get_pointings(self): '''Return two vectors containing the colatitude and longitude for each sample in the TOI. Only the part of the TOI that belongs to the rank of this process is returned.''' return None, None def get_polarization_angle(self): '''Return a vector containing the polarization angle for each sample in the TOI. Only the part of the TOI that belongs to the rank of this process is returned.''' return None ToiFile = namedtuple('ToiFile', ['file_name', 'num_of_samples']) def read_fits_file_information(file_name: str, hdu=1) -> ToiFile: '''Read the number of rows in the first tabular HDU of a FITS file Return a :class:`stripeline.timetools.ToiFile` object. ''' with fits.open(file_name) as fin: num_of_samples = fin[hdu].header['NAXIS2'] return ToiFile(file_name=file_name, num_of_samples=num_of_samples) def split_into_n(length: int, num_of_segments: int) -> List[int]: '''Split a set of `length` elements into `num_of_segments` subsets. Example:: >>> spl
it_into_n(10, 4) [2 3 2 3] >>> split_into_n(201, 2) [100 101] ''' assert num_of_segments > 0 assert length > num_of_segments start_points = np.array([int(i * length / num_of_segments) for i in range(num_of_segments + 1)]) return start_points[1:] - start_points[:-1] def
assign_toi_files_to_processes(samples_per_processes: List[int], tod_files: List[ToiFile]): '''Determine how to balance the load of TOI files among processes. Given a list of samples to be processed by each MPI process, decide which TOD and samples must be loaded by each process, using the principle that all the processes should read the same number of TODs, when possible. Return a list of :class:`stripeline.timetools.ToiFile` objects. ''' assert (sum(samples_per_processes) == sum([x.num_of_samples for x in tod_files])) result = [] # Type: List[List[ToiFile]] file_idx = 0 element_idx = 0 # Iterate over the MPI processes for samples_in_this_proc in samples_per_processes: # This is the list of FITS segments that the current MPI process is # going to load segments = [] # Type: List[ToiFileSegment] elements_in_this_segment = 0 # Iterate over the files to be read by the current MPI process while elements_in_this_segment < samples_in_this_proc: if elements_in_this_segment + (tod_files[file_idx].num_of_samples - element_idx) <= samples_in_this_proc: # The whole FITS file is going to be read by the current MPI # process num = tod_files[file_idx].num_of_samples - element_idx segments.append(ToiFileSegment(file_name=tod_files[file_idx].file_name, first_element=element_idx, num_of_elements=num)) elements_in_this_segment += num file_idx += 1 element_idx = 0 else: # This is the size of the segment we're going to append to "segments" num = samples_in_this_proc - elements_in_this_segment # Only a subset of this FITS file will be read by the current MPI process segments.append(ToiFileSegment(file_name=tod_files[file_idx].file_name, first_element=element_idx, num_of_elements=num)) elements_in_this_segment += num element_idx += num result.append(segments) return result ToiFileSegment = namedtuple( 'ToiFileSegment', ['file_name', 'first_element', 'num_of_elements']) FitsColumn = namedtuple( 'FitsColumn', ['hdu', 'column'] ) FitsTableLayout = namedtuple( 'FitsTableLayout', ['time_col', 'theta_col', 'phi_col', 'psi_col', 'signal_cols'] ) def _load_array_from_fits(segments: List[ToiFileSegment], cols_to_read: List[FitsColumn]): '''Read a set of columns from a list of FITS files. The chunks to read from each FITS file are specified in the parameter `segments`, while the columns to read are in `cols_to_read`. The function returns a tuple containing all the data from the columns (each in a NumPy array) in the same order as in `cols_to_read`.''' arrays = [np.array([], dtype=np.float64) for i in range(len(cols_to_read))] for cur_segment in segments: start = cur_segment.first_element end = cur_segment.first_element + cur_segment.num_of_elements with fits.open(cur_segment.file_name) as f: # TODO: maybe this is not the most efficient way to load # chunks of data from a FITS column cur_chunk_arr = [f[x.hdu].data.field(x.column)[start:end] for x in cols_to_read] for col_i
asedunov/intellij-community
python/testData/editing/spaceDocStringStubInClass.after.py
Python
apache-2.0
28
0.071429
class
C: ''' <caret
> '''
AutorestCI/azure-sdk-for-python
azure-mgmt-containerinstance/azure/mgmt/containerinstance/models/operation_list_result.py
Python
mit
955
0
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT Licen
se. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class OperationListResult(Model): """The operation list response that contains all operations for Azure Container Instance service. :param value: The list of op
erations. :type value: list of :class:`Operation <azure.mgmt.containerinstance.models.Operation>` """ _attribute_map = { 'value': {'key': 'value', 'type': '[Operation]'}, } def __init__(self, value=None): self.value = value
HIPS/neural-fingerprint
neuralfingerprint/build_convnet.py
Python
mit
6,508
0.005532
import autograd.numpy as np from autograd.scipy.misc import logsumexp from features import num_atom_features, num_bond_features from util import memoize, WeightsParser from mol_graph import graph_from_smiles_tuple, degrees from build_vanilla_net import build_fingerprint_deep_net, relu, batch_normalize def fast_array_from_list(xs): return np.concatenate([np.expand_dims(x, axis=0) for x in xs], axis=0) def sum_and_stack(features, idxs_list_of_lists): return fast_arr
ay_from_list([np.sum(features[idx_list], axis=0) for idx_list in idxs_list_of_lists]) def softmax(X, axis=0): return np.exp(X - logsumexp(X, axis=axis, keepdims=True)) def matmult_neighbors(array_rep, atom_features, bond_features, get_weights): activations_by_degree = []
for degree in degrees: atom_neighbors_list = array_rep[('atom_neighbors', degree)] bond_neighbors_list = array_rep[('bond_neighbors', degree)] if len(atom_neighbors_list) > 0: neighbor_features = [atom_features[atom_neighbors_list], bond_features[bond_neighbors_list]] # dims of stacked_neighbors are [atoms, neighbors, atom and bond features] stacked_neighbors = np.concatenate(neighbor_features, axis=2) summed_neighbors = np.sum(stacked_neighbors, axis=1) activations = np.dot(summed_neighbors, get_weights(degree)) activations_by_degree.append(activations) # This operation relies on atoms being sorted by degree, # in Node.graph_from_smiles_tuple() return np.concatenate(activations_by_degree, axis=0) def weights_name(layer, degree): return "layer " + str(layer) + " degree " + str(degree) + " filter" def build_convnet_fingerprint_fun(num_hidden_features=[100, 100], fp_length=512, normalize=True, activation_function=relu, return_atom_activations=False): """Sets up functions to compute convnets over all molecules in a minibatch together.""" # Specify weight shapes. parser = WeightsParser() all_layer_sizes = [num_atom_features()] + num_hidden_features for layer in range(len(all_layer_sizes)): parser.add_weights(('layer output weights', layer), (all_layer_sizes[layer], fp_length)) parser.add_weights(('layer output bias', layer), (1, fp_length)) in_and_out_sizes = zip(all_layer_sizes[:-1], all_layer_sizes[1:]) for layer, (N_prev, N_cur) in enumerate(in_and_out_sizes): parser.add_weights(("layer", layer, "biases"), (1, N_cur)) parser.add_weights(("layer", layer, "self filter"), (N_prev, N_cur)) for degree in degrees: parser.add_weights(weights_name(layer, degree), (N_prev + num_bond_features(), N_cur)) def update_layer(weights, layer, atom_features, bond_features, array_rep, normalize=False): def get_weights_func(degree): return parser.get(weights, weights_name(layer, degree)) layer_bias = parser.get(weights, ("layer", layer, "biases")) layer_self_weights = parser.get(weights, ("layer", layer, "self filter")) self_activations = np.dot(atom_features, layer_self_weights) neighbour_activations = matmult_neighbors( array_rep, atom_features, bond_features, get_weights_func) total_activations = neighbour_activations + self_activations + layer_bias if normalize: total_activations = batch_normalize(total_activations) return activation_function(total_activations) def output_layer_fun_and_atom_activations(weights, smiles): """Computes layer-wise convolution, and returns a fixed-size output.""" array_rep = array_rep_from_smiles(tuple(smiles)) atom_features = array_rep['atom_features'] bond_features = array_rep['bond_features'] all_layer_fps = [] atom_activations = [] def write_to_fingerprint(atom_features, layer): cur_out_weights = parser.get(weights, ('layer output weights', layer)) cur_out_bias = parser.get(weights, ('layer output bias', layer)) atom_outputs = softmax(cur_out_bias + np.dot(atom_features, cur_out_weights), axis=1) atom_activations.append(atom_outputs) # Sum over all atoms within a moleclue: layer_output = sum_and_stack(atom_outputs, array_rep['atom_list']) all_layer_fps.append(layer_output) num_layers = len(num_hidden_features) for layer in xrange(num_layers): write_to_fingerprint(atom_features, layer) atom_features = update_layer(weights, layer, atom_features, bond_features, array_rep, normalize=normalize) write_to_fingerprint(atom_features, num_layers) return sum(all_layer_fps), atom_activations, array_rep def output_layer_fun(weights, smiles): output, _, _ = output_layer_fun_and_atom_activations(weights, smiles) return output def compute_atom_activations(weights, smiles): _, atom_activations, array_rep = output_layer_fun_and_atom_activations(weights, smiles) return atom_activations, array_rep if return_atom_activations: return output_layer_fun, parser, compute_atom_activations else: return output_layer_fun, parser @memoize def array_rep_from_smiles(smiles): """Precompute everything we need from MolGraph so that we can free the memory asap.""" molgraph = graph_from_smiles_tuple(smiles) arrayrep = {'atom_features' : molgraph.feature_array('atom'), 'bond_features' : molgraph.feature_array('bond'), 'atom_list' : molgraph.neighbor_list('molecule', 'atom'), # List of lists. 'rdkit_ix' : molgraph.rdkit_ix_array()} # For plotting only. for degree in degrees: arrayrep[('atom_neighbors', degree)] = \ np.array(molgraph.neighbor_list(('atom', degree), 'atom'), dtype=int) arrayrep[('bond_neighbors', degree)] = \ np.array(molgraph.neighbor_list(('atom', degree), 'bond'), dtype=int) return arrayrep def build_conv_deep_net(conv_params, net_params, fp_l2_penalty=0.0): """Returns loss_fun(all_weights, smiles, targets), pred_fun, combined_parser.""" conv_fp_func, conv_parser = build_convnet_fingerprint_fun(**conv_params) return build_fingerprint_deep_net(net_params, conv_fp_func, conv_parser, fp_l2_penalty)
lizardsystem/lizard-damage
lizard_damage/migrations/0012_auto__add_field_damagescenario_customlandusegeoimage.py
Python
gpl-3.0
9,306
0.007629
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'DamageScenario.customlandusegeoimage' db.add_column('lizard_damage_damagescenario', 'customlandusegeoimage', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lizard_damage.GeoImage'], null=True, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'DamageScenario.customlandusegeoimage' db.delete_column('lizard_damage_damagescenario', 'customlandusegeoimage_id') models = { 'lizard_damage.benefitscenario': { 'Meta': {'object_name': 'BenefitScenario'}, 'datetime_created': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '128'}), 'expiration_date': ('django.db.models.fields.DateTimeField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.
models.fields.CharField', [], {'max_length': '64'}), 'slug': ('django.db.models.fields.SlugField', [], {'db
_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'zip_result': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'zip_risk_a': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), 'zip_risk_b': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}) }, 'lizard_damage.benefitscenarioresult': { 'Meta': {'object_name': 'BenefitScenarioResult'}, 'benefit_scenario': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.BenefitScenario']"}), 'east': ('django.db.models.fields.FloatField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), 'north': ('django.db.models.fields.FloatField', [], {}), 'south': ('django.db.models.fields.FloatField', [], {}), 'west': ('django.db.models.fields.FloatField', [], {}) }, 'lizard_damage.damageevent': { 'Meta': {'object_name': 'DamageEvent'}, 'depth_slugs': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'floodmonth': ('django.db.models.fields.IntegerField', [], {'default': '9'}), 'floodtime': ('django.db.models.fields.FloatField', [], {}), 'height_slugs': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'landuse_slugs': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'max_height': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'min_height': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'repairtime_buildings': ('django.db.models.fields.FloatField', [], {'default': '432000'}), 'repairtime_roads': ('django.db.models.fields.FloatField', [], {'default': '432000'}), 'repetition_time': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'result': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'scenario': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.DamageScenario']"}), 'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'table': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}) }, 'lizard_damage.damageeventresult': { 'Meta': {'object_name': 'DamageEventResult'}, 'damage_event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.DamageEvent']"}), 'east': ('django.db.models.fields.FloatField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), 'north': ('django.db.models.fields.FloatField', [], {}), 'south': ('django.db.models.fields.FloatField', [], {}), 'west': ('django.db.models.fields.FloatField', [], {}) }, 'lizard_damage.damageeventwaterlevel': { 'Meta': {'ordering': "(u'index',)", 'object_name': 'DamageEventWaterlevel'}, 'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.DamageEvent']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'index': ('django.db.models.fields.IntegerField', [], {'default': '100'}), 'waterlevel': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}) }, 'lizard_damage.damagescenario': { 'Meta': {'object_name': 'DamageScenario'}, 'calc_type': ('django.db.models.fields.IntegerField', [], {'default': '2'}), 'customheights': ('django.db.models.fields.FilePathField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'customlanduse': ('django.db.models.fields.FilePathField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'customlandusegeoimage': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.GeoImage']", 'null': 'True', 'blank': 'True'}), 'damagetable': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'datetime_created': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '128'}), 'expiration_date': ('django.db.models.fields.DateTimeField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'scenario_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}) }, 'lizard_damage.geoimage': { 'Meta': {'object_name': 'GeoImage'}, 'east': ('django.db.models.fields.FloatField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), 'north': ('django.db.models.fields.FloatField', [], {}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}), 'south': ('django.db.models.fields.FloatField', [], {}), 'west': ('django.db.models.fields.FloatField', [], {}) }, 'lizard_damage.riskresult': { 'Meta': {'object_name': 'RiskResult'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'scenario': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_damage.DamageScenario']"}), 'zip_risk': ('django.db.models.fields.files.FileField', [], {'max_length':
evonove/urt-tully
django-tully/manage.py
Python
mit
803
0
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tully.settings") try: from django.core.management import execute_from_command_line except ImportError: # The above import may fail for some other reason. Ensure that the # issue is really t
hat Django is missing to avoid masking other # exceptions on Python 2. try: import django except ImportError: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) ra
ise execute_from_command_line(sys.argv)
psvnl/podb
ui_mainwindow.py
Python
gpl-3.0
28,313
0.002508
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'mainwindow.ui' # # Created: Wed May 25 13:43:28 2016 # by: PyQt4 UI code generator 4.10.4 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disamb
ig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_MainWindow(object): def setupUi(
self, MainWindow): MainWindow.setObjectName(_fromUtf8("MainWindow")) MainWindow.resize(800, 752) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/podbicon.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) MainWindow.setWindowIcon(icon) self.centralwidget = QtGui.QWidget(MainWindow) self.centralwidget.setObjectName(_fromUtf8("centralwidget")) self.statusBarLabel = QtGui.QLabel(self.centralwidget) self.statusBarLabel.setGeometry(QtCore.QRect(0, 690, 801, 20)) self.statusBarLabel.setFrameShape(QtGui.QFrame.StyledPanel) self.statusBarLabel.setFrameShadow(QtGui.QFrame.Sunken) self.statusBarLabel.setText(_fromUtf8("")) self.statusBarLabel.setObjectName(_fromUtf8("statusBarLabel")) self.frame = QtGui.QFrame(self.centralwidget) self.frame.setGeometry(QtCore.QRect(0, 0, 801, 31)) self.frame.setFrameShape(QtGui.QFrame.StyledPanel) self.frame.setFrameShadow(QtGui.QFrame.Raised) self.frame.setObjectName(_fromUtf8("frame")) self.clearToolButton = QtGui.QToolButton(self.frame) self.clearToolButton.setGeometry(QtCore.QRect(90, 0, 32, 32)) icon1 = QtGui.QIcon() icon1.addPixmap(QtGui.QPixmap(_fromUtf8(":/clear.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.clearToolButton.setIcon(icon1) self.clearToolButton.setIconSize(QtCore.QSize(32, 32)) self.clearToolButton.setObjectName(_fromUtf8("clearToolButton")) self.saveToolButton = QtGui.QToolButton(self.frame) self.saveToolButton.setGeometry(QtCore.QRect(60, 0, 32, 32)) icon2 = QtGui.QIcon() icon2.addPixmap(QtGui.QPixmap(_fromUtf8(":/save.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.saveToolButton.setIcon(icon2) self.saveToolButton.setIconSize(QtCore.QSize(32, 32)) self.saveToolButton.setObjectName(_fromUtf8("saveToolButton")) self.openToolButton = QtGui.QToolButton(self.frame) self.openToolButton.setGeometry(QtCore.QRect(30, 0, 32, 32)) icon3 = QtGui.QIcon() icon3.addPixmap(QtGui.QPixmap(_fromUtf8(":/open.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.openToolButton.setIcon(icon3) self.openToolButton.setIconSize(QtCore.QSize(32, 32)) self.openToolButton.setObjectName(_fromUtf8("openToolButton")) self.newToolButton = QtGui.QToolButton(self.frame) self.newToolButton.setGeometry(QtCore.QRect(0, 0, 32, 32)) icon4 = QtGui.QIcon() icon4.addPixmap(QtGui.QPixmap(_fromUtf8(":/new.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.newToolButton.setIcon(icon4) self.newToolButton.setIconSize(QtCore.QSize(32, 32)) self.newToolButton.setObjectName(_fromUtf8("newToolButton")) self.printToolButton = QtGui.QToolButton(self.frame) self.printToolButton.setGeometry(QtCore.QRect(770, 0, 32, 32)) icon5 = QtGui.QIcon() icon5.addPixmap(QtGui.QPixmap(_fromUtf8(":/print.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.printToolButton.setIcon(icon5) self.printToolButton.setIconSize(QtCore.QSize(32, 32)) self.printToolButton.setObjectName(_fromUtf8("printToolButton")) self.exportToolButton = QtGui.QToolButton(self.frame) self.exportToolButton.setGeometry(QtCore.QRect(740, 0, 32, 32)) icon6 = QtGui.QIcon() icon6.addPixmap(QtGui.QPixmap(_fromUtf8(":/exportpdf.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.exportToolButton.setIcon(icon6) self.exportToolButton.setIconSize(QtCore.QSize(32, 32)) self.exportToolButton.setObjectName(_fromUtf8("exportToolButton")) self.orderDetailsGroupBox = QtGui.QGroupBox(self.centralwidget) self.orderDetailsGroupBox.setGeometry(QtCore.QRect(0, 40, 801, 71)) self.orderDetailsGroupBox.setObjectName(_fromUtf8("orderDetailsGroupBox")) self.layoutWidget = QtGui.QWidget(self.orderDetailsGroupBox) self.layoutWidget.setGeometry(QtCore.QRect(10, 20, 781, 48)) self.layoutWidget.setObjectName(_fromUtf8("layoutWidget")) self.gridLayout = QtGui.QGridLayout(self.layoutWidget) self.gridLayout.setMargin(0) self.gridLayout.setObjectName(_fromUtf8("gridLayout")) self.label_2 = QtGui.QLabel(self.layoutWidget) font = QtGui.QFont() font.setPointSize(11) font.setBold(True) font.setWeight(75) self.label_2.setFont(font) self.label_2.setObjectName(_fromUtf8("label_2")) self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1) self.orderNumberLabel = QtGui.QLabel(self.layoutWidget) font = QtGui.QFont() font.setPointSize(11) font.setBold(True) font.setWeight(75) self.orderNumberLabel.setFont(font) self.orderNumberLabel.setText(_fromUtf8("")) self.orderNumberLabel.setObjectName(_fromUtf8("orderNumberLabel")) self.gridLayout.addWidget(self.orderNumberLabel, 0, 1, 1, 1) self.label_3 = QtGui.QLabel(self.layoutWidget) self.label_3.setObjectName(_fromUtf8("label_3")) self.gridLayout.addWidget(self.label_3, 0, 2, 1, 1) self.orderDateEdit = QtGui.QDateEdit(self.layoutWidget) self.orderDateEdit.setObjectName(_fromUtf8("orderDateEdit")) self.gridLayout.addWidget(self.orderDateEdit, 0, 3, 1, 1) self.label_5 = QtGui.QLabel(self.layoutWidget) self.label_5.setObjectName(_fromUtf8("label_5")) self.gridLayout.addWidget(self.label_5, 0, 4, 1, 1) self.paymentTermsComboBox = QtGui.QComboBox(self.layoutWidget) self.paymentTermsComboBox.setObjectName(_fromUtf8("paymentTermsComboBox")) self.gridLayout.addWidget(self.paymentTermsComboBox, 0, 5, 1, 1) self.label_18 = QtGui.QLabel(self.layoutWidget) self.label_18.setObjectName(_fromUtf8("label_18")) self.gridLayout.addWidget(self.label_18, 1, 0, 1, 1) self.projectComboBox = QtGui.QComboBox(self.layoutWidget) self.projectComboBox.setObjectName(_fromUtf8("projectComboBox")) self.gridLayout.addWidget(self.projectComboBox, 1, 1, 1, 1) self.label_4 = QtGui.QLabel(self.layoutWidget) self.label_4.setObjectName(_fromUtf8("label_4")) self.gridLayout.addWidget(self.label_4, 1, 2, 1, 1) self.orderStatusComboBox = QtGui.QComboBox(self.layoutWidget) self.orderStatusComboBox.setObjectName(_fromUtf8("orderStatusComboBox")) self.gridLayout.addWidget(self.orderStatusComboBox, 1, 3, 1, 1) self.taxRateLabel = QtGui.QLabel(self.layoutWidget) self.taxRateLabel.setObjectName(_fromUtf8("taxRateLabel")) self.gridLayout.addWidget(self.taxRateLabel, 1, 4, 1, 1) self.taxRateValueLabel = QtGui.QLabel(self.layoutWidget) self.taxRateValueLabel.setText(_fromUtf8("")) self.taxRateValueLabel.setObjectName(_fromUtf8("taxRateValueLabel")) self.gridLayout.addWidget(self.taxRateValueLabel, 1, 5, 1, 1) self.supplierGroupBox = QtGui.QGroupBox(self.centralwidget) self.supplierGroupBox.setGeometry(QtCore.QRect(0, 120, 801, 80)) self.supplierGroupBox.setObjectName(_fromUtf8("supplierGroupBox")) self.layoutWid
clarko1/Cramd
bigquery/api/getting_started_test.py
Python
apache-2.0
808
0
# Copyright 2015, Google, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or i
mplied. # See the License for the specific language governing permissions and # limitations under the License. import re
from getting_started import main def test_main(cloud_config, capsys): main(cloud_config.project) out, _ = capsys.readouterr() assert re.search(re.compile( r'Query Results:.hamlet', re.DOTALL), out)
StackStorm/st2
st2common/st2common/persistence/rule_enforcement.py
Python
apache-2.0
920
0
# Copyright 2020 The StackStorm Authors. # Copyright 2019 Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You
may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions an
d # limitations under the License. from __future__ import absolute_import from st2common.models.db.rule_enforcement import rule_enforcement_access from st2common.persistence.base import Access class RuleEnforcement(Access): impl = rule_enforcement_access @classmethod def _get_impl(cls): return cls.impl
umrashrf/scrapy
scrapy/utils/test.py
Python
bsd-3-clause
3,020
0.002649
""" This module contains some assorted functions used in tests """ from __future__ import absolute_import import os from importlib import import_module from twisted.trial.unittest import SkipTest from scrapy.ex
ceptions import NotConfigured from scrapy.utils.boto
import is_botocore def assert_aws_environ(): """Asserts the current environment is suitable for running AWS testsi. Raises SkipTest with the reason if it's not. """ skip_if_no_boto() if 'AWS_ACCESS_KEY_ID' not in os.environ: raise SkipTest("AWS keys not found") def assert_gcs_environ(): if 'GCS_PROJECT_ID' not in os.environ: raise SkipTest("GCS_PROJECT_ID not found") def skip_if_no_boto(): try: is_botocore() except NotConfigured as e: raise SkipTest(e) def get_s3_content_and_delete(bucket, path, with_key=False): """ Get content from s3 key, and delete key afterwards. """ if is_botocore(): import botocore.session session = botocore.session.get_session() client = session.create_client('s3') key = client.get_object(Bucket=bucket, Key=path) content = key['Body'].read() client.delete_object(Bucket=bucket, Key=path) else: import boto # assuming boto=2.2.2 bucket = boto.connect_s3().get_bucket(bucket, validate=False) key = bucket.get_key(path) content = key.get_contents_as_string() bucket.delete_key(path) return (content, key) if with_key else content def get_gcs_content_and_delete(bucket, path): from google.cloud import storage client = storage.Client(project=os.environ.get('GCS_PROJECT_ID')) bucket = client.get_bucket(bucket) blob = bucket.get_blob(path) content = blob.download_as_string() bucket.delete_blob(path) return content, blob def get_crawler(spidercls=None, settings_dict=None): """Return an unconfigured Crawler object. If settings_dict is given, it will be used to populate the crawler settings with a project level priority. """ from scrapy.crawler import CrawlerRunner from scrapy.spiders import Spider runner = CrawlerRunner(settings_dict) return runner.create_crawler(spidercls or Spider) def get_pythonpath(): """Return a PYTHONPATH suitable to use in processes so that they find this installation of Scrapy""" scrapy_path = import_module('scrapy').__path__[0] return os.path.dirname(scrapy_path) + os.pathsep + os.environ.get('PYTHONPATH', '') def get_testenv(): """Return a OS environment dict suitable to fork processes that need to import this installation of Scrapy, instead of a system installed one. """ env = os.environ.copy() env['PYTHONPATH'] = get_pythonpath() return env def assert_samelines(testcase, text1, text2, msg=None): """Asserts text1 and text2 have the same lines, ignoring differences in line endings between platforms """ testcase.assertEqual(text1.splitlines(), text2.splitlines(), msg)
csnake-org/CSnake
src/csnStandardModuleProject.py
Python
bsd-3-clause
7,313
0.012033
## @package csnStandardModuleProject # Definition of the methods used for project configuration. # This should be the only CSnake import in a project configuration. import csnUtility import csnProject import csnBuild import os.path import inspect from csnProject import GenericProject class StandardModuleProject(GenericProject): """ GenericProject with applications and modules in specific folders. """ def __init__(self, _name, _type, _sourceRootFolder
= None, _categories = None): if _sourceRootFolder is None: filename = csnProject.FindFilename(1) dirname = os.path.dirname(filename) _sourceRootFolder = csnUtility.NormalizePath(dirname, _correctCase = False) GenericProject.__init__(self, _name=_name, _type=_type, _sourceRootFolder=_sourceRootFolder, _categories=_categories, _context=csnProject.globalCurrentContext)
self.applicationsProject = None def AddLibraryModules(self, _libModules): """ Adds source files (anything matching *.c??) and public include folders to self, using a set of libmodules. It is assumed that the root folder of self has a subfolder called libmodules. The subfolders of libmodules should contain a subfolder called src (e.g. for mymodule, this would be libmodules/mymodule/src). If the src folder has a subfolder called 'stub', it is also added to the source tree. _libModules - a list of subfolders of the libmodules folder that should be 'added' to self. """ # add sources sourceRootFolder = self.GetSourceRootFolder() includeFileExtensions = csnUtility.GetIncludeFileExtensions() sourceFileExtensions = csnUtility.GetSourceFileExtensions() for libModule in _libModules: for stub in ("/stub", ""): srcFolder = "libmodules/%s/src%s" % (libModule, stub) srcFolderAbs = "%s/%s" % (sourceRootFolder, srcFolder) if( os.path.exists(srcFolderAbs) ): self.AddIncludeFolders([srcFolder]) for extension in sourceFileExtensions: self.AddSources(["%s/*.%s" % (srcFolder, extension)], _checkExists = 0) for extension in includeFileExtensions: self.AddSources(["%s/*.%s" % (srcFolder, extension)], _checkExists = 0) for libModule in _libModules: for stub in ("/stub", ""): includeFolder = "libmodules/%s/include%s" % (libModule, stub) includeFolderAbs = "%s/%s" % (sourceRootFolder, includeFolder) if( os.path.exists(includeFolderAbs) ): self.AddIncludeFolders([includeFolder]) for extension in includeFileExtensions: self.AddSources(["%s/*.%s" % (includeFolder, extension)], _checkExists = 0) def AddApplications(self, _modules, _pch="", _applicationDependenciesList=None, _holderName=None, _properties = []): """ Creates extra CSnake projects, each project building one application in the 'Applications' subfolder of the current project. _modules - List of the subfolders within the 'Applications' subfolder that must be scanned for applications. _pch - If not "", this is the include file used to generate a precompiled header for each application. """ dependencies = [self] if not _applicationDependenciesList is None: dependencies.extend(_applicationDependenciesList) if _holderName is None: _holderName = "%sApplications" % self.name csnProject.globalCurrentContext.SetSuperSubCategory("Applications", _holderName) if self.applicationsProject is None: self.applicationsProject = csnBuild.Project(self.name + "Applications", "container", _sourceRootFolder = self.GetSourceRootFolder(), _categories = [_holderName]) #self.applicationsProject.AddSources([csnUtility.GetDummyCppFilename()], _sourceGroup = "CSnakeGeneratedFiles") self.applicationsProject.AddProjects([self]) self.AddProjects([self.applicationsProject], _dependency = 0) # look for an 'applications' or 'Applications' folder _modulesFolder = "%s/applications" % self.GetSourceRootFolder() if not os.path.exists(_modulesFolder): _modulesFolder = "%s/Applications" % self.GetSourceRootFolder() self.__AddApplications(self.applicationsProject, dependencies, _modules, _modulesFolder, _pch, _holderName, _properties) def __AddApplications(self, _holderProject, _applicationDependenciesList, _modules, _modulesFolder, _pch = "", _holderName=None, _properties = []): """ Creates application projects and adds them to _holderProject (using _holderProject.AddProject). The holder project does not depend on these application projects. It is assumed that _modules is a list containing subfolders of _modulesFolder. Each subfolder in _modules should contain source files (.cpp, .cxx or .cc), where each source file corresponds to a single application. Hence, each source file is used to create a new application project. For example, assuming that the _modulesFolder is called 'Applications', the file 'Applications/Small/Tiny.cpp' will be used to build the 'Tiny' application. _applicationDependenciesList - List of projects that each new application project is dependent on. _modulesFolder - Folder containing subfolders with applications. _modules = List of subfolders of _modulesFolder that should be processed. _pch - If not "", this is the C++ include file which is used for building a precompiled header file for each application. """ for module in _modules: moduleFolder = "%s/%s" % (_modulesFolder, module) sourceFiles = [] headerFiles = [] for extension in csnUtility.GetSourceFileExtensions(): sourceFiles.extend(_holderProject.Glob("%s/*.%s" % (moduleFolder, extension))) for extension in csnUtility.GetIncludeFileExtensions(): headerFiles.extend(_holderProject.Glob("%s/*.%s" % (moduleFolder, extension))) for sourceFile in sourceFiles: if os.path.isdir(sourceFile): continue name = os.path.splitext( os.path.basename(sourceFile) )[0] name = name.replace(' ', '_') if _holderName is None: _holderName = _holderProject.name app = csnBuild.Project("%s_%s" % (_holderName, name), "executable", _sourceRootFolder = _holderProject.GetSourceRootFolder()) app.AddIncludeFolders([moduleFolder]) app.AddProjects(_applicationDependenciesList) app.AddSources([sourceFile]) app.AddProperties( _properties ) # add header files so that they appear in visual studio app.AddSources(headerFiles) if( _pch != "" ): app.SetPrecompiledHeader(_pch) _holderProject.AddProjects([app])
procrastinatio/mapproxy
mapproxy/test/unit/test_collections.py
Python
apache-2.0
3,064
0.001305
# This file is part of the MapProxy project. # Copyright (C) 2010 Omniscale <http://omniscale.de> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mapproxy.util.collections import LRU, ImmutableDictList from nose.tools import eq_, raises class TestLRU(object): @raises(KeyError) def test_missing_key(self): lru = LRU(10) lru['foo'] def test_contains(self): lru = LRU(10) lru['foo1'] = 1
assert 'foo1' in lru assert 'foo2' not in lru def test_repr(self): lru = LRU(10) lru['foo1'] = 1 assert 'size=10' in repr(lru) assert 'foo1' in repr(lru) def test_g
etitem(self): lru = LRU(10) lru['foo1'] = 1 lru['foo2'] = 2 eq_(lru['foo1'], 1) eq_(lru['foo2'], 2) def test_get(self): lru = LRU(10) lru['foo1'] = 1 eq_(lru.get('foo1'), 1) eq_(lru.get('foo1', 2), 1) def test_get_default(self): lru = LRU(10) lru['foo1'] = 1 eq_(lru.get('foo2'), None) eq_(lru.get('foo2', 2), 2) def test_delitem(self): lru = LRU(10) lru['foo1'] = 1 assert 'foo1' in lru del lru['foo1'] assert 'foo1' not in lru def test_empty(self): lru = LRU(10) assert bool(lru) == False lru['foo1'] = '1' assert bool(lru) == True def test_setitem_overflow(self): lru = LRU(2) lru['foo1'] = 1 lru['foo2'] = 2 lru['foo3'] = 3 assert 'foo1' not in lru assert 'foo2' in lru assert 'foo3' in lru def test_length(self): lru = LRU(2) eq_(len(lru), 0) lru['foo1'] = 1 eq_(len(lru), 1) lru['foo2'] = 2 eq_(len(lru), 2) lru['foo3'] = 3 eq_(len(lru), 2) del lru['foo3'] eq_(len(lru), 1) class TestImmutableDictList(object): def test_named(self): res = ImmutableDictList([('one', 10), ('two', 5), ('three', 3)]) assert res[0] == 10 assert res[2] == 3 assert res['one'] == 10 assert res['three'] == 3 assert len(res) == 3 def test_named_iteritems(self): res = ImmutableDictList([('one', 10), ('two', 5), ('three', 3)]) itr = res.iteritems() eq_(next(itr), ('one', 10)) eq_(next(itr), ('two', 5)) eq_(next(itr), ('three', 3)) try: next(itr) except StopIteration: pass else: assert False, 'StopIteration expected'
avoorhis/vamps-node.js
public/scripts/rdp/rdp_fasta2tax.py
Python
mit
6,320
0.024525
#!/usr/bin/env python ######################################### # # fasta2tax.py # ######################################## import sys,os import argparse import pymysql as MySQLdb import json py_pipeline_path = os.path.expanduser('~/programming/py_mbl_sequencing_pipeline') #my $rdpFile = "$inputfile.rdp"; print "rdp file: start\n"; #my $rdpFile = dirname($inputfile)."/$project--$dataset.fa.rdp"; rdpFile = inputfile+".rdp"; #my $rdpFile = "$project--$dataset.fa.rdp"; print "rdp file: rdpFile\n"; loadFile1 = inputfile+".load1" loadFile2 = inputfile+".load2" outFile = inputfile+".rdpout" logFile = inputfile+".rdplog" # $logFile => /usr/local/www/vamps/tmp/fasta2tax.log if DEBUG: print "DEBUG: Invoked with arguments (post processing):\n" print "DEBUG: user: user\n" print "DEBUG: inputfile: inputfile\n" print "DEBUG: project: project\n" print "DEBUG: dataset: dataset\n" print "DEBUG: path_to_apps: path_to_apps\n" print "DEBUG: database: database\n" print "DEBUG: table1: table1\n" print "DEBUG: table2: table2\n" print "DEBUG: db_user: db_user\n" print "DEBUG: db_password: db_password\n" print "DEBUG: db_hostname: db_hostname\n" ####################################### # # Do sanity checking for presence of # values from argument processing... # ####################################### ####################################### # # Run RDP and rdp_file_creator... # ####################################### def run(project): path_to_rdp = py_pipeline_path+"/bin/rdp" print path_to_rdp rdpCmd = path_to_rdp+' ' +inputfile+' '+rdpFile print "Preparing to execute RDP Command: rdpCmd\n"; rdpCmdOutput = subprocess.check_output(rdpCmd, shell=True) #my $rdpCheckCmd = "$path_to_apps/rdp_checker -q -log $logFile -b 80 -project \"$project\" -dataset \"$dataset\" -f1 $loadFile1 -f2 $loadFile2 $rdpFile"; rdpCheckCmd = py_pipeline_path+"/bin/rdp_file_creator -s database -q -log logFile -b 80 -project \"$project\" -dataset \"$dataset\" -f1 $loadFile1 -f2 $loadFile2 $rdpFile"; rdpCheckOutput = subprocess.check_output(rdpCheckCmd, shell=True) # $DEBUG && print "DEBUG: rdp_file_creator exited with result code: $rdpCheckExitCode<br><br>\n"; # if ($DEBUG) { # my @rdpCheckOutput_lines = split /\n/, $rdpCheckOutput; # foreach my $output_line (@rdpCheckOutput_lines) { # print "DEBUG: $output_line<br>\n"; # } # } # my $rdpCheckExitString; # if ($rdpCheckExitCode == 0) { # $rdpCheckExitString = "0"; # } elsif ($rdpCheckExitCode == 253) { # $rdpCheckExitString = "RDP boot score value is not valid."; # } elsif ($rdpCheckExitCode == 254) { # $rdpCheckExitString = "Taxonomy file is not valid."; # } elsif ($rdpCheckExitCode == 255) { # $rdpCheckExitString = "Internal error: Could not locate taxonomy file."; # } else { # $rdpCheckExitString = "Unknown error."; # } # # if ($rdpCheckExitCode != 0) { # print "Error performing RDP taxonomic checks: $rdpCheckExitString. Data has not been uploaded. Project=\"$project\", Dataset=\"$dataset\", User name=\"$user\"\n"; # exit $rdpCheckExitCode; # } ####################################### # # Load the final taxonomy into the tables specified in the @tables array... # It would be really nice if we could roll this back on failure. # ####################################### # my $dsn = "dbi:mysql:$database:$db_hostname"; # #$DEBUG && print "DEBUG: Connecting to database\n$dsn\n"; # # my $dbh = DBI->connect($dsn, $db_user, $db_password) or die "Unable to connect to $database database\n"; # # if ($use_transactions) { # # Encapsulate the changes to these tables in a transaction... # my $query = "START TRANSACTION"; # my $handle = $dbh->prepare($query) or die "Unable to prepare query: $query\n"; # $handle->execute or die "Unable to execute query: $query\n"; # } # # my %load_files = ($table1 => $loadFile1, $table2 => $loadFile2); # foreach (keys %load_files) { # # Get a table... # # Table1 = vamps_data_cube_uploads, Table2 = vamps_junk_data_cube_pipe; # my $table = $_; # # # Clear out the old data and replace with the new # #$DEBUG && print "DEBUG: Removing old project/dataset records from table $dsn.$table...\n"; # my $cleanQuery = "delete from $table where project='" . $project ."' and dataset = '" . $dataset . "'"; # #$DEBUG && print "DEBUG: Preparing query: \"$cleanQuery\"...\n"; # my $clean_h = $dbh->prepare($cleanQuery) or die "Unable to prepare query: $cleanQuery\n"; # $clean_h->execute or die "Unable to execute query: $cleanQuery\n"; # # # Add the new data into the table # #$DEBUG && print "DEBUG: Loading final taxonomy into the table $dsn.$table...\n"; # # # Set up the query to Load the data # my $loadQuery = "load data local infile '" . $load_files{$table} . "' replace into table $table fields terminated by '\t' lines terminated by '\n' # set classifier='RDP'"; # # #$DEBUG && print "DEBUG: Preparing query: \"$loadQuery\"...\n"; # # my $load_h = $dbh->prepare($loadQuery) or die "Unable to prepare query: $loadQuery\n"; # # $load_h->execute or die "Unable to execute query: $loadQuery\n"; # # if ($dbh->err) { # if ($use_transactions) { # # Encapsulate the changes to these tables in a transaction... # my $query = "ROLLBACK"; # my $handle = $dbh->prepare($query) or die "Unable to prepare query: $query\n"; # $handle->execute or die "Unab
le to execute query: $query\n"; # } # print "Application Error: An error has occured while trying to load the data into the MySQL database. The following query was used: \"$loadQuery\".\n"; # print "The database engine reports the error as: \"".$dbh->errstr."\".\n"; # print "This is a fatal error. Exiting.\n"; # exit 1; # } # } # # if ($use_transactions) { #
# commit the transaction... # my $query = "COMMIT"; # my $handle = $dbh->prepare($query) or die "Unable to prepare query: $query\n"; # $handle->execute or die "Unable to execute query: $query\n"; # } #$DEBUG && print "DEBUG: Cleaning out tmp files...\n"; # foreach my $i ($inputfile, $rdpFile, $loadFile1, $loadFile2, $logFile) # { # #my $rmErr = system("rm -f $i"); # } #$DEBUG && print "DEBUG: Execution complete.\n"; #print "Done and clean from fasta2tax.pl<br>\n";
MindLoad/MonkeyManager
widgets/__init__.py
Python
gpl-3.0
94
0
""" Project additional elements
""" from .menu_button impo
rt * from .new_key_qframe import *
ricco386/broadcaster
RPi.PIR/setup.py
Python
bsd-3-clause
1,382
0
#!/usr/bin/python3 # -*- coding: utf-8 -*- # # This software is licensed as described in the README.rst and LICENSE # files, which you should have received as part of this distribution. import setuptools # noinspection PyPep8Naming from raspi_pir import __version__ as VERSION DEPS = ['RPi.Sensor>=0.5.3'] CLASSIFIERS = [ 'Environment :: Console', 'Intended Audience :: System Administrators', 'Intended Audience :: Developers', 'Intended Audience :: End Users/Desktop', 'Operating System :: Unix', 'Operating System :: POSIX :: Linux', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Programming Langua
ge :: Python :: 3', 'Development Status :: 4 - Beta', 'Topic :: Utilities', 'Topic :: Home Automation', 'Topic :: System :: Hardware', 'Topic :: Terminals' ] with open("README.rst", "r") as fp: sensor_long_description = fp.read() setuptools.setup( name='RPi.PIR', version=VERSION, author="Richard von Kellner", au
thor_email="richard.kellner [at] gmail.com", url="https://github.com/ricco386/RPi", description='PIR sensor state monitor', long_description=sensor_long_description, license="MIT", packages=setuptools.find_packages(), classifiers=CLASSIFIERS, install_requires=DEPS, scripts=['bin/raspi-pir'], include_package_data=True )
kernt/linuxtools
gnome3-shell/nautilus-scripts/Archiving/PlowShare-Upload.py
Python
gpl-3.0
6,622
0.056025
#!/usr/bin/python #requires the following: #sudo apt-get install curl #curl http://apt.wxwidgets.org/key.asc | apt-key add - #sudo apt-get update #sudo apt-get install python-wxgtk2.8 python-wxtools wx2.8-i18n #sudo apt-get install python-gdata import wx import os import sys def pinger(): f = os.popen('ping -c 1 google.com') y = '' for x in f.readlines(): y += x a = y.find('--- google.com ping statistics ---') return a #a = pinger() """abc = wx.ShowMessageDialog(None,-1,'No Internet connectoin found!. Will now exit','Error') abc.ShowModal() abc.Destroy() self.Destroy() return False""" #uplist = ['115.com','2shared','4shared','Badongo','Data.hu','DepositFiles','divShare','dl.free.fr','Humyo','Mediafire*','Megaupload','Netload.in','Rapidshare*','Sendspace','Uploading.com','Usershare','x7.to','ZShare'] uplist = ['Megaupload','2shared','Mediafire*','ZShare'] uplist2 =['megaupload','2shared','mediafire','zshare'] class FinalFrame(wx.Frame): def __init__(self): pass mupl = '' tshl = '' medl = '' zshl = '' def add(self,typ,string): if typ == 0: self.mupl += string + '\n\n' elif typ == 1: self.tshl += string + '\n\n' elif typ == 2: self.medl += string + '\n\n' elif typ == 3: self.zshl += string + '\n\n' def doit(self): self.display(self.mupl,self.tshl,self.medl,self.zshl) self.Show() def display(self,megaupload_links,tshared_links,mediafire_links,zshare_links): wx.Frame.__init__(self,None,-1,'Upload Complete!',size=(600,550)) self.panel = wx.Panel(self) wx.StaticText(self.panel,-1,'Your Upload has completed :) Here are your links:',pos = (30,30)) wx.StaticText(self.panel,-1,'Megaupload links:',pos=(30,80)) mupld_link_box = wx.TextCtrl(self.panel,-1,megaupload_links,size=(540,80),pos=(30,100),style=wx.TE_MULTILINE | wx.TE_READONLY) wx.StaticText(self.panel,-1,'2shared links:',pos=(30,190)) tshrd_link_box = wx.TextCtrl(self.panel,-1,tshared_links,size=(540,80),pos=(30,210),style=wx.TE_MULTILINE | wx.TE_READONLY) wx.StaticText(self.panel,-1,'Mediafire links:',pos=(30,300)) mfire_link_box = wx.TextCtrl(self.panel,-1,mediafire_links,size=(540,80),pos=(30,320),style=wx.TE_MULTILINE | wx.TE_READONLY) wx.StaticText(self.panel,-1,'ZShare Links:',pos=(30,410)) zshre_link_box = wx.TextCtrl(self.panel,-1,zshare_links,size=(540,80),pos=(30,430),style=wx.TE_MULTILINE | wx.TE_READONLY) class MyFrame(wx.Frame): fframe = FinalFrame() def __init__(self):
self.param = '' self.check=0 self.args = sys.argv[1:] if len(self.args)==0: self.check=1 wx.Frame.__init__(self,None,-1,'Pshare',size=(600,330)) self.panel = wx.Panel(self) wx.StaticText(self.panel,-1,'Welcome to the Plowshare Uploader GUI.\n\nThis app lets you upload any file to any of the supported file-sharing site
s. To proceed, please select one (or more) of the uploading sites:',pos = (30,30), size = (540,70)) wx.StaticText(self.panel,-1,'Available Sites to upload:',pos = (30,160)) self.choice_box = wx.ListBox(self.panel,-1,(30,120),(540,100),uplist, wx.LB_EXTENDED | wx.LB_HSCROLL) wx.StaticText(self.panel,-1,'*Upload to these sites may NOT work at the moment; developers are trying to fix the issues',pos=(30,225),size=(540,50)) if self.check==1: self.button_browse_files = wx.Button(self.panel,-1,'Browse for files',pos=(420,270),size=(150,30)) self.button_upload = wx.Button(self.panel,-1,'Start Upload',pos=(30,270),size=(150,30)) self.button_login_mupload = wx.Button(self.panel,-1,'Login to Megaupload Account',pos=(190,270),size = (220,30)) self.Bind(wx.EVT_BUTTON,self.browsefiles,self.button_browse_files) else: self.button_upload = wx.Button(self.panel,-1,'Start Upload',pos=(30,270),size=(265,30)) self.button_login_mupload = wx.Button(self.panel,-1,'Login to Megaupload Account',pos=(305,270),size = (265,30)) self.Bind(wx.EVT_BUTTON,self.upload,self.button_upload) self.Bind(wx.EVT_BUTTON,self.login_mega,self.button_login_mupload) def upload(self,evt): temp1 = len(self.args) temp2 = len(self.choice_box.GetSelections()) if temp1==0: nofile_dlg = wx.MessageDialog(None,'No files Chosen!\nChoose atleast 1 file','Error',wx.OK | wx.ICON_ERROR) nofile_dlg.ShowModal() nofile_dlg.Destroy() return if temp2==0: nofile_dlg = wx.MessageDialog(None,'No Upload sites Chosen!\nChoose atleast 1 Upload Site','Error',wx.OK | wx.ICON_ERROR) nofile_dlg.ShowModal() nofile_dlg.Destroy() return self.udlg = wx.ProgressDialog('Processing Request','Wait while we upload your file(s)',maximum=60) self.udlg.Update(1) y = 0 temp2 = 30/temp1 val = 'bash ~/.plowshare/src/upload.sh ' for x in self.args: val += '\"' + x + '\" ' y += temp2 self.udlg.Update(y) y = 30 self.linkss = [] #print val temp3 = self.choice_box.GetSelections() #print temp3 for x in temp3: temp4 = val if uplist2[x] == 'megaupload': temp4 += self.param temp4 += uplist2[x] #print temp4 file1=os.popen(temp4) file1_lines = file1.readlines() if len(file1_lines)==0: err_dlg = wx.MessageDialog(None,'Upload Failed! Possible Reasons:\n1. No Internet connection\n2. Upload error (choose different upload\nsite in this case)','Error',wx.OK | wx.ICON_ERROR) err_dlg.ShowModal() err_dlg.Destroy() self.udlg.Update(60) self.udlg.Destroy() return; for x2 in file1_lines: ind = x2.find('(http:') if ind != -1: x2 = 'Link\n====================\n' + x2[0:ind] + '\n\nDelete_link\n====================\n' + x2[ind+1:] self.fframe.add(x,x2) y += temp2 self.udlg.Update(y) self.fframe.doit() self.udlg.Update(60) self.udlg.Destroy() ## self.panel.Destroy() self.Destroy() def login_mega(self,evt): self.username = '' self.password = '' ubox = wx.TextEntryDialog(None,"Please Enter Username","UserName",'username') if ubox.ShowModal()==wx.ID_OK: self.username = ubox.GetValue() ubox.Destroy() ubox = wx.TextEntryDialog(None,'Please Enter Password','Password','********',wx.TE_PASSWORD | wx.OK | wx.CANCEL) if ubox.ShowModal()==wx.ID_OK: self.password = ubox.GetValue() self.param = ' -a ' + self.username + ':' + self.password + ' ' #print '\n\n' + self.param + '\n\n' ubox.Destroy() def browsefiles(self,evt): filed = wx.FileDialog(None,"Choose a file",style=wx.FD_MULTIPLE) filed.ShowModal() a = filed.GetPaths() # print a if len(a) > 0: self.args = a # print len(self.args) filed.Destroy() class MyApp(wx.App): def OnInit(self): frame = MyFrame() frame.Show() return True if __name__=='__main__': app = MyApp(redirect=True) app.MainLoop()
geographika/mappyscript
mappyscript/__init__.py
Python
mit
190
0.010526
# for Python3 we need a f
ully qualified name import from mappyscript._mappyscript import version, version_number, load, loads, dumps, create_request, load_map_from_params
, Layer, convert_sld
Vladkryvoruchko/PSPNet-Keras-tensorflow
tests/test_smoke.py
Python
mit
2,114
0.005676
import os import pytest import numpy as np from imageio import imread def compare_2_images(validator_path, output_path): val_abs_path = os.path.join(os.path.dirname(__file__), validator_path) out_abs_path = os.path.join(os.path.dirname(__file__), output_path) val_img = imread(val_abs_path, pilmode='RGB') out_img = imread(out_abs_path, pilmode='RGB') assert np.all(np.equal(val_img, out_img)) def clean_test_results(output_file_no_ext): os.remove("tests/" + output_file_no_ext + "
_probs.jpg") os.remove("tests/" + output_file_no_ext + "_seg.jpg") os.remove("tests/" + output_file_no_ext + "_seg_blended.jpg") os.remove("tests/" + output_file_no_ext + "_seg_read.jpg") def test_main_flip_ade20k(cli_args_ade): from pspnet import main main(cli_args_ade) compare_2_images("ade20k_test_probs.jpg", "validators/ade20k_test_probs.jpg") compare_2_images("ade20k_tes
t_seg.jpg", "validators/ade20k_test_seg.jpg") compare_2_images("ade20k_test_seg_read.jpg", "validators/ade20k_test_seg_read.jpg") clean_test_results("ade20k_test") @pytest.mark.skip def test_main_flip_cityscapes(cli_args_cityscapes): """ TODO: Add images :param cli_args_cityscapes: :return: """ from pspnet import main main(cli_args_cityscapes) compare_2_images("cityscapes_test_probs.jpg", "validators/cityscapes_test_probs.jpg") compare_2_images("cityscapes_test_seg.jpg", "validators/cityscapes_test_seg.jpg") compare_2_images("cityscapes_test_seg_read.jpg", "validators/cityscapes_test_seg_read.jpg") clean_test_results("cityscapes_test") @pytest.mark.skip def test_main_flip_voc(cli_args_voc): """ TODO: Add images :param cli_args_voc: :return: """ from pspnet import main main(cli_args_voc) compare_2_images("pascal_voc_test_probs.jpg", "validators/pascal_voc_test_probs.jpg") compare_2_images("pascal_voc_test_seg.jpg", "validators/pascal_voc_test_seg.jpg") compare_2_images("pascal_voc_test_seg_read.jpg", "validators/pascal_voc_test_seg_read.jpg") clean_test_results("pascal_voc_test")
bitsoffreedom/baas
baas/boem/models.py
Python
gpl-2.0
607
0.008237
from django.db import models # from django.contrib.gis.geoip import GeoIP # # g = GeoIP() # Create your models here. class TempM
ail(models.Model): mailfrom = models.EmailField() mailsubj = models.CharField(max_length=20) mailrcvd = models.DateTimeField() mail
hdrs = models.CharField() class SavedMail(models.Model): mailrcvd = models.DateTimeField() mailhdrs = models.CharField() organization = models.ForeignKey('Organization') class Organization(models.Model): emailsuffix = models.CharField(max_length=255) class Follower(models.Model): email = models.EmailField()
Habitissimo/vespapp-web
api/admin.py
Python
gpl-3.0
278
0
from djang
o.contrib import admin from a
pi.models import * admin.site.register(Question) admin.site.register(Answer) admin.site.register(Sighting) admin.site.register(Picture) admin.site.register(UserComment) admin.site.register(ExpertComment) admin.site.register(SightingFAQ)
Azure/azure-sdk-for-python
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_frc_identity_documents_async.py
Python
mit
8,158
0.003187
# coding=utf-8 # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ import pytest import functools from io import BytesIO from devtools_testutils.aio import recorded_by_proxy_async from azure.core.exceptions import ServiceRequestError from azure.core.credentials import AzureKeyCredential from azure.ai.formrecognizer._generated.v2_1.models import AnalyzeOperationResult from azure.ai.formrecognizer._response_handlers import prepare_prebuilt_models from azure.ai.formrecognizer.aio import FormRecognizerClient from azure.ai.formrecognizer import FormRecognizerApiVersion from asynctestcase import AsyncFormRecognizerTest from preparers import FormRecognizerPreparer from preparers import GlobalClientPreparer as _GlobalClientPreparer FormRecognizerClientPreparer = functools.partial(_GlobalClientPreparer, FormRecognizerClient) class TestIdDocumentsAsync(AsyncFormRecognizerTest): def teardown(self): self.sleep(4) @pytest.mark.skip() @FormRecognizerPreparer() @recorded_by_proxy_async async def test_identity_document_bad_endpoint(self, formrecognizer_test_endpoint, formrecognizer_test_api_key, **kwargs): with open(self.identity_document_license_jpg, "rb") as fd: my_file = fd.read() with pytest.raises(ServiceRequestError): client = FormRecognizerClient("http://notreal.azure.com", AzureKeyCredential(formrecognizer_test_api_key)) async with client: poller = await client.begin_recognize_identity_documents(my_file) @FormRecognizerPreparer() @FormRecognizerClientPreparer() async def test_damaged_file_bytes_fails_autodetect_content_type(self, **kwargs): client = kwargs.pop("client") damaged_pdf = b"\x50\x44\x46\x55\x55\x55" # doesn't match any magic file numbers with pytest.raises(ValueError): async with client: poller = await client.begin_recognize_identity_documents( damaged_pdf ) @FormRecognizerPreparer() @FormRecognizerClientPreparer() async def test_damaged_file_bytes_io_fails_autodetect(self, **kwargs): client = kwargs.pop("client") damaged_pdf = BytesIO(b"\x50\x44\x46\x55\x55\x55") # doesn't match any magic file numbers with pytest.raises(ValueError): async with client: poller = await client.begin_recognize_identity_documents( damaged_pdf ) @FormRecognizerPreparer() @FormRecognizerClientPreparer() async def test_passing_bad_content_type_param_passed(self, **kwargs): client = kwargs.pop("client") with open(self.identity_document_license_jpg, "rb") as fd: my_file = fd.read() with pytest.raises(ValueError): async with client: poller = await client.begin_recognize_identity_documents( my_file, content_type="application/jpeg" ) @FormRecognizerPreparer() @FormRecognizerClientPreparer() async def test_auto_detect_unsupported_stream_content(self, **kwargs): client = kwargs.pop("client") with open(self.unsupported_content_py, "rb") as fd: my_file = fd.read() with pytest.raises(ValueError): async with client: poller = await client.begin_recognize_identity_documents( my_file ) @FormRecognizerPreparer() @FormRecognizerClientPreparer() @recorded_by_proxy_async async def test_identity_document_stream_transform_jpg(self, client): responses = [] def callback(raw_response, _, headers): analyze_result = client._deserialize(AnalyzeOperationResult, raw_response) extracted_id_document = prepare_prebuilt_models(analyze_result) responses.append(analyze_result) responses.append(extracted_id_document) with open(self.identity_document_license_jpg, "rb") as fd: my_file = fd.read() async with client: poller = await client.begin_recognize_identity_documents( identity_document=my_file, include_field_elements=True, cls=callback ) result = await poller.result() raw_response = responses[0] returned_model = responses[1] id_document = returned_model[0] actual = raw_response.analyze_result.document_results[0].fields read_results = raw_response.analyze_result.read_results document_results = raw_response.analyze_result.document_results page_results = raw_response.analyze_result.page_results self.assertFormFieldsTransformCorrect(id_document.fields, actual, read_results) # check page range assert id_document.page_range.first_page_number == document_results[0].page_range[0] assert id_document.page_range.last_page_number == document_results[0].page_range[1] # Check page metadata self.assertFormPagesTransformCorrect(id_document.pages, read_results, page_results) @FormRecognizerPreparer() @FormRecognizerClientPreparer() @recorded_by_proxy_async async def test_identity_document_jpg_include_field_elements(self, client): with open(self.identity_document_license_jpg, "rb") as fd: id_document = fd.read() async with client: poller = await client.begin_recognize_identity_documents(id_document, include_field_elements=True) result = await poller.result() assert len(result) == 1 id_document = result[0] self.assertFormPagesHasValues(id_document.pages) for field in id_document.fields.values(): if field.name == "CountryRegion": assert field.value == "USA" continue elif field.name == "Region": assert field.value == "Washington" else: self.assertFieldElementsHasValues(field.value_data.field_elements, id_document.page_range.first_page_number) @pytest.mark.live_test_only @FormRecognizerPreparer() @FormRecognizerClientPreparer() async def test_identity_document_continuation_token(self, **kwargs): client = kwargs.pop("client") with open(self.identity_document_license_jpg, "rb") as fd: id_document = fd.read() async with client: initial_poller = await client.begin_recognize_identity_documents(id_document) cont_token = initial_poller.continuation_token() poller = await client.begin_recognize_identity_documents(None, continuation_token=cont_token) result = await poller.result() assert result is not None await initial_poller.wait() # necessary so azure-devtools doesn't throw assertion error @FormRecognizerPreparer() @FormRecognizerClientPreparer(client_kwargs={"api_version": FormRecognizerApiVersion.V2_0}) async def test_identity_document_v2(self, **kwargs): client = kwargs.pop("client") with open(self.identity_document_license_jpg, "rb") as fd: id_document = fd.read() with pytest.raises(ValueError) as e: async with client: await client.begin_recognize_identity_documents(id
_document) assert "Method 'begin_recognize_identity_documents' is only available for API version V2_1 and up" in str(e.value) @FormRecognizerPreparer() @FormRecognizerClientPreparer() @recorded_by
_proxy_async async def test_pages_kwarg_specified(self, client): with open(self.identity_document_license_jpg, "rb") as fd: id_document = fd.read() async with client: poller = await client.begin_recognize_identity_documents(id_document, pages=["1"]) assert '1' == poller._polling_method._initial_response.http_response.request.query['pages'] result = await poller.result() assert result
mwiencek/picard
picard/tagger.py
Python
gpl-2.0
23,746
0.002653
# -*- coding: utf-8 -*- # # Picard, the next-generation MusicBrainz tagger # Copyright (C) 2004 Robert Kaye # Copyright (C) 2006 Lukáš Lalinský # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. from PyQt4 import QtGui, QtCore import gettext import locale import getopt import os.path import shutil import signal import sys from collections import deque # Install gettext "noop" function. import __builtin__ __builtin__.__dict__['N_'] = lambda a: a # Py2exe 0.6.6 has broken fake_getline which doesn't work with Python 2.5 if hasattr(sys, "frozen"): import linecache def fake_getline(filename, lineno, module_globals = None): return '' linecache.getline = fake_getline del linecache, fake_getline # A "fix" for http://python.org/sf/1438480 def _patched_shutil_copystat(src, dst): try: _orig_shutil_copystat(src, dst) except OSError: pass _orig_shutil_copystat = shutil.copystat shutil.copystat = _patched_shutil_copystat import picard.resources import picard.plugins from picard import version_string, log, acoustid from picard.album import Album, NatAlbum from picard.browser.browser import BrowserIntegration from picard.browser.filelookup import FileLookup from picard.cluster import Cluster, ClusterList, UnmatchedFiles from picard.config import Config from picard.disc import Disc from picard.file import File from picard.formats import open as open_file from picard.track import Track, NonAlbumTrack from picard.releasegroup import ReleaseGroup from picard.collection import load_user_collections from picard.ui.mainwindow import MainWindow from picard.ui.itemviews import BaseTreeView from picard.plugin import PluginManager from picard.acoustidmanager import AcoustIDManager from picard.util import ( decode_filename, encode_filename, partial, queue, thread, mbid_validate, check_io_encoding ) from picard.webservice import XmlWebService class Tagger(QtGui.QApplication): file_state_changed = QtCore.pyqtSignal(int) listen_port_changed = QtCore.pyqtSignal(int) cluster_added = QtCore.pyqtSignal(Cluster) cluster_removed = QtCore.pyqtSignal(Cluster) album_added = QtCore.pyqtSignal(Album) album_removed = QtCore.pyqtSignal(Album) __instance = None def __init__(self, args, localedir, autoupdate, debug=False): QtGui.QApplication.__init__(self, args) self.__class__.__instance = self self._args = args self._autoupdate = autoupdate self.config = Config() if sys.platform == "win32": userdir = os.environ.get("APPDATA", "~\\Application Data") else: userdir = os.environ.get("XDG_CONFIG_HOME", "~/.config") self.userdir = os.path.join(os.path.expanduser(userdir), "MusicBrainz", "Picard") # Initialize threading and allocate threads self.thread_pool = thread.ThreadPool(self) self.load_queue = queue.Queue() self.save_queue = queue.Queue() self.analyze_queue = queue.Queue() self.other_queue = queue.Queue() threads = self.thread_pool.threads for i in range(4): threads.append(thread.Thread(self.thread_pool, self.load_queue)) threads.append(thread.Thread(self.thread_pool, self.save_queue)) threads.append(thread.Thread(self.thread_pool, self.other_queue)) threads.append(thread.Thread(self.thread_pool, self.other_queue)) threads.append(thread.Thread(self.thread_pool, self.analyze_queue)) self.thread_pool.start() self.stopping = False # Setup logging if debug or "PICARD_DEBUG" in os.environ: self.log = log.DebugLog() else: self.log = log.Log() self.log.debug("Starting Picard %s from %r", picard.__version__, os.path.abspath(__file__)) # TODO remove this before the final release if sys.platform == "win32": olduserdir = "~\\Local Settings\\Application Data\\MusicBrainz Picard" else: olduserdir = "~/.picard" olduserdir = os.path.expanduser(olduserdir) if os.path.isdir(olduserdir): self.log.info("Moving %s to %s", olduserdir, self.userdir) try: shutil.move(olduserdir, self.userdir) except: pass QtCore.QObject.tagger = self QtCore.QObject.config = self.config QtCore.QObject.log = self.log check_io_encoding() self.setup_gettext(localedir) self.xmlws = XmlWebService() load_user_collections() # Initialize fingerprinting self._acoustid = acoustid.AcoustIDClient() self._acoustid.init() # Load plugins self.pluginmanager = PluginManager() self.user_plugin_dir = os.path.join(self.userdir, "plugins") if not os.path.exists(self.user_plugin_dir): os.makedirs(self.user_plugin_dir) self.pluginmanager.load_plugindir(self.user_plugin_dir) if hasattr(sys, "frozen"): self.pluginmanager.load_plugindir(os.path.join(os.path.dirname(sys.argv[0]), "plugins")) else: self.pluginmanager.load_plugindir(os.path.join(os.path.dirname(__file__), "plugins")) self.acoustidmanager = AcoustIDManager() self.browser_integration = BrowserIntegration() self.files = {} self.clusters = ClusterList() self.albums = {} self.release_groups = {} self.mbid_redirects = {} self.unmatched_files = UnmatchedFiles() self.nats = None self.window = MainWindow() def remove_va_file_naming_format(merge=True): if merge: self.config.setting["file_naming_format"] = \ "$if($eq(%compilation%,1),\n$noop(Various Artist albums)\n"+\ "%s,\n$noop(Single Artist Albums)\n%s)" %\ (self.config.setting["va_file_naming_format"].toString(),
self.config.setting["file_naming_format"]) self.config.setting.remove("va_file_naming_format") self.config.setting.remove("use_va_format") if "va_file_naming_format" in self.config.setting\ and "use_va_format" in self.config.setting: if self.config.setting["use_
va_format"].toBool(): remove_va_file_naming_format() self.window.show_va_removal_notice() elif self.config.setting["va_file_naming_format"].toString() !=\ r"$if2(%albumartist%,%artist%)/%album%/$if($gt(%totaldiscs%,1),%discnumber%-,)$num(%tracknumber%,2) %artist% - %title%": if self.window.confirm_va_removal(): remove_va_file_naming_format(merge=False) else: remove_va_file_naming_format() else: # default format, disabled remove_va_file_naming_format(merge=False) def setup_gettext(self, localedir): """Setup locales, load translations, install gettext functions.""" ui_language = self.config.setting["ui_language"] if ui_language: os.environ['LANGUAGE'] = '' os.environ['LANG'] = ui_language if sys.platform == "win32": try: locale.setlocale(locale.LC_ALL, os.environ["LANG"]) except KeyError: os.environ["LANG"] = locale.getdefaultlocale()[0] try: lo
levilucio/SyVOLT
UMLRT2Kiltera_MM/Properties/from_thesis/HMM9_if_IsolatedLHS.py
Python
mit
2,478
0.010896
from core.himesis import Himesis, HimesisPreConditionPatternLHS import uuid class HMM9_if_IsolatedLHS(HimesisPreConditionPatternLHS): def __init__(self): """ Creates the himesis graph representing the AToM3 model HMM9_if_IsolatedLHS. """ # Flag this instance as compiled now self.is_compiled = True super(HMM9_if_IsolatedLHS, self).__init__(name='HMM9_if_IsolatedLHS', num_nodes=0, edges=[]) # Add the edges self.add_edges([]) # Set the graph attributes self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule'] self["MT_constraint__"] = """#=============================================================================== # This code is executed after the nodes in the LHS have been matched. # You can access a matched node labelled n by: PreNode('n'). # To access attribute x of node n, use: PreNode('n')['x']. # The given constraint must evaluate to a boolean expression: # returning True enables the rule to be applied, # returning False forbids the rule from being applied. #=============================================================================== return True """ self["name"] = """""" self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'MM9_if') # Set the node attributes # Add the attribute equations self["equations"] = [] def constraint(self, PreNode, graph): """ Executable constraint code. @param PreNode: Function taking an integer as parameter and returns the node corresponding to that label. """ #=============================================================================== # This code is executed afte
r the nodes in the LHS have been matched. # You can access a
matched node labelled n by: PreNode('n'). # To access attribute x of node n, use: PreNode('n')['x']. # The given constraint must evaluate to a boolean expression: # returning True enables the rule to be applied, # returning False forbids the rule from being applied. #=============================================================================== return True
williamg/recycle
recycle/recycle.py
Python
mit
6,538
0.00153
#!/usr/bin/env python import argparse import logging import os import shutil import sys import glob # Location of saved templates SAVE_DIR = os.environ.get("RECYCLE_TEMPLATES_DIR", "~/.recycle") try: input = raw_input except NameError: pass def should_overwrite(typeOfThing, path): assert os.path.exists(path) nameOfThing = get_name(path) logging.debug("{} already exists. Asking to overwrite...".format(path)) res = "" while res != "y" and res != "n": prompt = "{0} {1} already exists. Do you want to replace it? " \ "(y/n) ".format(typeOfThing, nameOfThing) res = input(prompt) res = res.lower() if res == "y": logging.debug("Overwrite approved. Deleting {}".format(path)) return True else: logging.debug("Overwrite denied.") return False def copy(contents, dest): if not os.path.isdir(dest): os.makedirs(dest) for obj in contents: name = os.path.basename(os.path.normpath(obj)) destName = os.path.join(dest, name) if os.path.exists(destName): if should_overwrite("File or directory", destName): if os.path.isdir(destName): shutil.rmtree(destName) else: os.remove(destName) else: continue assert not os.path.exists(destName) if os.path.isdir(obj): shutil.copytree(obj, destName) elif os.path.isfile(obj): shutil.copy(obj, dest) else: raise IOError("Source doest not exist!") def get_name(path): return os.path.basename(os.path.normpath(path)) def get_save_path(templateName): global SAVE_DIR return os.path.join(SAVE_DIR, templateName) def setup_logging(): global SAVE_DIR logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%m-%d %H:%M", filename=os.path.join(SAVE_DIR, "recycle.log"), filemode="w") console = logging.StreamHandler() # INFO or higher goes to console console.setLevel(logging.INFO) formatter = logging.Formatter("%(levelname)-8s %(message)s") console.setFormatter(formatter) logging.getLogger("").addHandler(console) def init(): global SAVE_DIR SAVE_DIR = os.path.expanduser(SAVE_DIR) SAVE_DIR = os.path.expandvars(SAVE_DIR) SAVE_DIR = os.path.abspath(SAVE_DIR) if not os.path.isdir(SAVE_DIR): os.makedirs(SAVE_DIR) setup_logging() logging.debug("Using Python version {}".format(sys.version)) def handle_new(name, files): save_path = get_save_path(name) fileList = [] for filename in files: fileList += [os.path.abspath(f) for f in glob.glob(filename)] # Remove duplicates fileList = list(set(fileList)) if len(fileList) is 0: logging.error("No files found matching '{}'".format(files)) return if os.path.isdir(save_path): # Boilerplate with that name already exists if should_overwrite("Template", save_path): handle_delete(name) else: return assert not os.path.isdir(save_path) logging.debug("Creating new template '{}' from {}".format(name, files)) try: copy(fileList, save_path) except IOError as e: logging.error(e.strerror) assert os.path.isdir(save_path) logging.debug("Boilerplate created!") def handle_use(name): save_path = get_save_path(name) if os.path.isdir(save_path): logging.debug("Using template '{}'".format(name)) contents = os.listdir(save_path) contentPaths = [os.path.join(save_path, c) for c in contents] try: copy(contentPaths, os.getcwd()) except IOError as e: logging.error("Your recycle directory doesn't seem to exist...") else: logging.error("No template with the name '{}' was found!".format(name)) def handle_list(): global SAVE_DIR assert os.path.isdir(SAVE_DIR) names = next(os.walk(SAVE_DIR))[1] for line in names: if line.startswith(SAVE_DIR): line = line[len(SAVE_DIR):-1] print(line) def handle_delete(name): save_path = get_save_path(name) if os.path.isdir(save_path): shutil.rmtree(save_path) else: logging.error("No template with the name '{}' was found!".format(name)) assert not os.path.isdir(save_path) def handle_location(): global SAVE_DIR
print(os
.path.normpath(SAVE_DIR) + os.sep) def parseargs(): parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() new_parser = subparsers.add_parser( "new", help="Create a new template or overwrite an existing one") new_parser.add_argument( "name", type=str, help="The name under which to save this template") new_parser.add_argument( "files", type=str, nargs="+", help="The file or directory to save as the template") new_parser.set_defaults(mode="new") use_parser = subparsers.add_parser( "use", help="Insert existing template in the current directory") use_parser.add_argument( "name", type=str, help="The name of the template to use") use_parser.set_defaults(mode="use") list_parser = subparsers.add_parser( "list", help="List the available template") list_parser.set_defaults(mode="list") delete_parser = subparsers.add_parser( "delete", help="Delete a template") delete_parser.add_argument( "name", type=str, help="The name of the template to delete") delete_parser.set_defaults(mode="delete") location_parser = subparsers.add_parser( "location", help="Print the current location of the templates directory") location_parser.set_defaults(mode="location") return parser.parse_args() def main(): args = parseargs() init() if args.mode is None: logging.error("Mode must be provided. Use --help for more information.") return if args.mode is "new": handle_new(args.name, args.files) elif args.mode is "use": handle_use(args.name) elif args.mode is "list": handle_list() elif args.mode is "delete": handle_delete(args.name) elif args.mode is "location": handle_location() else: logging.error("Invalid mode") if __name__ == "__main__": main()
D4wN/brickv
src/brickv/plugin_system/plugins/ozone/ozone.py
Python
gpl-2.0
3,949
0.002026
# -*- coding: utf-8 -*- """ Ozone Bricklet Plugin Copyright (C) 2015 Olaf Lüke <[email protected]> ozone.py: Ozone Bricklet Plugin Implementation This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. """ from PyQt4.QtCore import Qt from PyQt4.QtGui import QVBoxLayout, QLabel, QHBoxLayout, QSpinBox from brickv.plugin_system.plugin_base import PluginBase from brickv.bindings.bricklet_ozone import BrickletOzone from brickv.plot_widget import PlotWidget from brickv.async_call import async_call from brickv.callback_emulator import CallbackEmulator class OzoneConcent
rationLabel(QLabel): def setText(self, text): text = "Ozone Concentration: " + text + " ppb (parts per billion)" super(OzoneConcentrationLabel, self).setText(text) class Ozone(PluginBase): def __init__(self, *args): PluginBase.__init__(self, BrickletOzone, *args) self.ozone = self.device self.cbe_ozone_concentration = CallbackEmu
lator(self.ozone.get_ozone_concentration, self.cb_ozone_concentration, self.increase_error_count) self.ozone_concentration_label = OzoneConcentrationLabel('Ozone Concentration: ') self.current_value = None plot_list = [['', Qt.red, self.get_current_value]] self.plot_widget = PlotWidget('Ozone Concentration [ppb]', plot_list) layout_h2 = QHBoxLayout() layout_h2.addStretch() layout_h2.addWidget(self.ozone_concentration_label) layout_h2.addStretch() layout = QVBoxLayout(self) layout.addLayout(layout_h2) layout.addWidget(self.plot_widget) self.spin_average = QSpinBox() self.spin_average.setMinimum(1) self.spin_average.setMaximum(50) self.spin_average.setSingleStep(1) self.spin_average.setValue(50) self.spin_average.editingFinished.connect(self.spin_average_finished) layout_h1 = QHBoxLayout() layout_h1.addWidget(QLabel('Length of moving average:')) layout_h1.addWidget(self.spin_average) layout_h1.addStretch() layout.addLayout(layout_h1) def get_moving_average_async(self, average): self.spin_average.setValue(average) def start(self): async_call(self.ozone.get_moving_average, None, self.get_moving_average_async, self.increase_error_count) async_call(self.ozone.get_ozone_concentration, None, self.cb_ozone_concentration, self.increase_error_count) self.cbe_ozone_concentration.set_period(100) self.plot_widget.stop = False def stop(self): self.cbe_ozone_concentration.set_period(0) self.plot_widget.stop = True def destroy(self): pass def get_url_part(self): return 'ozone' @staticmethod def has_device_identifier(device_identifier): return device_identifier == BrickletOzone.DEVICE_IDENTIFIER def get_current_value(self): return self.current_value def cb_ozone_concentration(self, ozone_concentration): self.current_value = ozone_concentration self.ozone_concentration_label.setText(str(ozone_concentration)) def spin_average_finished(self): self.ozone.set_moving_average(self.spin_average.value())
daelmaselli/ovirt-vm-hot-backup
ovirt-vm-rolling-snapshot.py
Python
mit
6,894
0.003046
#!/usr/bin/python import sys import time import datetime import re import ConfigParser import os from operator import attrgetter scriptdir = os.path.abspath(os.path.dirname(sys.argv[0])) conffile = scriptdir + "/ovirt-vm-rolling-snapshot.conf" Config = ConfigParser.ConfigParser() if not os.path.isfile(conffile): print "Config file %s does not exists. Exiting." % conffile sys.exit(1) Config.read(conffile) if len(Config.sections()) < 1: print "Config file is not valid. Exiting." sys.exit(1) basetime = datetime.datetime.now() for vmname in Config.sections(): starttime = time.time() try: etime_to_keep = int(Config.get(vmname, 'etime_to_keep')) hourly_to_keep = int(Config.get(vmname, 'hourly_to_keep')) daily_to_keep = int(Config.get(vmname, 'daily_to_keep')) weekly_to_keep = int(Config.get(vmname, 'weekly_to_keep')) monthly_to_keep = int(Config.get(vmname, 'monthly_to_keep')) time_hours = "%02d" % int(Config.get(vmname, 'time_hours')) time_minutes = "%02d" % int(Config.get(vmname, 'time_minutes')) time_weekday = "%d" % int(Config.get(vmname, 'time_weekday')) time_monthweek = int(Config.get(vmname, 'time_monthweek')) if time_monthweek < 1 or time_monthweek > 5: time_monthweek = 1 if time_weekday == "7": time_weekday = "0" last_to_keep = {"____": etime_to_keep, "H___": hourly_to_keep, "HD__": daily_to_keep, "HDW_": weekly_to_keep, "HDWM": monthly_to_keep} hpos = dpos = wpos = mpos = "_" if basetime.strftime("%M") == time_minutes: # minutes is 00 hpos = "H" if basetime.strftime("%H") == time_hours: # hour is 00 dpos = "D" if basetime.strftime("%w") == time_weekday: # day of week is sunday wpos = "W" if (int(basetime.strftime("%d")) <= (7 * time_monthweek)) and ( int(basetime.strftime("%d")) > (7 * (time_monthweek - 1))): # is the first week of month mpos = "M" snap_time_id = hpos + dpos + wpos + mpos deleteonly = '' if len(sys.argv) > 1: snap_time_id = sys.argv[1] if not last_to_keep[snap_time_id]: last_to_keep[snap_time_id] = 1 if len(sys.argv) > 2: deleteonly = sys.argv[2] if last_to_keep[snap_time_id]: print print "------------------------------------------------------------" print "VM name: " + vmname try: ovirtsdk except: import ovirtsdk.api from ovirtsdk.xml import params api = ovirtsdk.api.API( url=Config.get(vmname, 'server'), username=Config.get(vmname, 'username'), password=Config.get(vmname, 'password'), insecure=True, debug=False ) vm = api.vms.get(vmname) print "Begin backup of VM '%s' at %s" % (vmname, datetime.datetime.now().isoformat(" ")) print "VM status: %s" % str(vm.get_status().state) if deleteonly == 'deleteonly': print "Skipping snapshot creation." else: snap_description = "Rolling snapshot " + snap_time_id + " at " + datetime.datetime.now().isoformat(" ") print "Creating Snapshot '" + snap_description + "'" snapcreation = vm.snapshots.add(params.Snapshot(description=snap_description)) snaptoclone = "" snap_status = "" sys.stdout.write( "Snapshot in progress..." ) sys.stdout.flush() while True: snaptoclone = vm.snapshots.get(id=snapcreation.get_id()) snap_status = snaptoclone.get_snapshot_status()
if snap_status == "locked": time.sleep(5) sys.stdout.write('.') sys.stdout.flush() else: print break for snapi in vm.g
et_snapshots().list(): snapi_id = snapi.get_id() if vm.snapshots.get(id=snapi_id).description == snap_description: snap_status = "ok" break else: snap_status = "error" if snap_status != "ok": print "Snapshot creation ERROR!!!" continue print "Snapshot done" time.sleep(1) snapshots_param = params.Snapshots(snapshot=[params.Snapshot(id=snaptoclone.get_id())]) snaptodel = [] for snapi in vm.get_snapshots().list(): snapi_id = snapi.get_id() snapi_descr = vm.snapshots.get(id=snapi_id).description snapi_time_match = re.match('^Rolling snapshot ' + snap_time_id + ' at', snapi_descr) if snapi_time_match: snaptodel.append(snapi) snaptodel = sorted(snaptodel, key=attrgetter('creation_time')) if last_to_keep[snap_time_id] > 0: del snaptodel[-last_to_keep[snap_time_id]:] for snapitodel in snaptodel: print "Deleting old snapshot '" + snapitodel.description + "'" snapitodel.delete(async=False) oldsndelstatus = sndelstatus = '' while True: try: sndelstatus = vm.snapshots.get(id=snapitodel.get_id()).get_snapshot_status() except Exception, e: break if sndelstatus == oldsndelstatus: sys.stdout.write('.') else: if sndelstatus == 'ok': break sys.stdout.write( "Delete snapshot in progress..." ) oldsndelstatus = sndelstatus sys.stdout.flush() time.sleep(5) print if sndelstatus == 'ok': print "Delete snapshot ERROR!!!" else: print "Delete snapshot done." eltime = time.time() - starttime print "Finished backup of VM '%s' at %s. %d seconds." % (vmname, datetime.datetime.now().isoformat(" "), eltime) print except Exception, e: print e print "Backup ERROR!!!"
pytroll/pytroll-aapp-runner
aapp_runner/tests/test_helper_functions.py
Python
gpl-3.0
5,294
0.002456
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2022 Pytroll developers # Author(s): # Adam Dybbroe <Firstname.Lastname @ smhi.se> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Pu
blic License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """Unittesting the helper functions for the AAPP-runner. """ import logging import unittest from datetime import datetime from unittest.mock import patch
from aapp_runner.helper_functions import check_if_scene_is_unique from aapp_runner.read_aapp_config import AappL1Config, AappRunnerConfig from aapp_runner.tests.test_config import (TEST_YAML_CONTENT_OK, create_config_from_yaml) class TestProcessConfigChecking(unittest.TestCase): """Test various functions checking on the (non-static) config during processing.""" def setUp(self): self.config_complete = create_config_from_yaml(TEST_YAML_CONTENT_OK) @patch('aapp_runner.read_aapp_config.load_config_from_file') def test_check_if_scene_is_unique_return_value(self, config): """Test checking if the current scene is unique or if it has been processed earlier.""" config.return_value = self.config_complete myfilename = "/tmp/mytestfile" aapp_run_config = AappRunnerConfig(myfilename, 'norrkoping', 'xl-band') aapp_config = AappL1Config(aapp_run_config.config, 'xl-band') aapp_config['platform_name'] = 'metop03' aapp_config['collection_area_id'] = 'euron1' aapp_config['starttime'] = datetime(2022, 1, 8, 12, 49, 50) aapp_config['endtime'] = datetime(2022, 1, 8, 13, 0, 26) aapp_config.job_register = {} result = check_if_scene_is_unique(aapp_config) assert result aapp_config.job_register = {'metop03': [(datetime(2022, 1, 8, 12, 49, 50), datetime(2022, 1, 8, 13, 0, 26), 'euron1')]} # An EARS scene (same platform and overlapping time interval and over # the same area of interest) arrives shortly after: aapp_config['platform_name'] = 'metop03' aapp_config['collection_area_id'] = 'euron1' aapp_config['starttime'] = datetime(2022, 1, 8, 12, 50) aapp_config['endtime'] = datetime(2022, 1, 8, 13, 0) result = check_if_scene_is_unique(aapp_config) assert not result @patch('aapp_runner.read_aapp_config.load_config_from_file') def test_check_if_scene_is_unique_logging(self, config): """Test the logging when checking if the current scene is unique.""" config.return_value = self.config_complete myfilename = "/tmp/mytestfile" aapp_run_config = AappRunnerConfig(myfilename, 'norrkoping', 'xl-band') aapp_config = AappL1Config(aapp_run_config.config, 'xl-band') aapp_config.job_register = {'metop03': [(datetime(2022, 1, 8, 12, 49, 50), datetime(2022, 1, 8, 13, 0, 26), 'euron1')]} # An EARS scene (same platform and overlapping time interval and over # the same area of interest) arrives shortly after: aapp_config['platform_name'] = 'metop03' aapp_config['collection_area_id'] = 'euron1' aapp_config['starttime'] = datetime(2022, 1, 8, 12, 50) aapp_config['endtime'] = datetime(2022, 1, 8, 13, 0) expected_logging = ['INFO:aapp_runner.helper_functions:first message', 'INFO:aapp_runner.helper_functions:Processing of scene metop03 2022-01-08 12:49:50 2022-01-08 13:00:26 with overlapping time has been launched previously. Skip it!'] with self.assertLogs('aapp_runner.helper_functions', level='INFO') as cm: logging.getLogger('aapp_runner.helper_functions').info('first message') _ = check_if_scene_is_unique(aapp_config) self.assertEqual(cm.output, expected_logging) with self.assertLogs('aapp_runner.helper_functions', level='WARNING') as cm: logging.getLogger('aapp_runner.helper_functions').warning('first message') _ = check_if_scene_is_unique(aapp_config) self.assertEqual(len(cm.output), 1) # Scene is different (different satellite) from previous: aapp_config['platform_name'] = 'metop01' aapp_config['collection_area_id'] = 'euron1' aapp_config['starttime'] = datetime(2022, 1, 8, 12, 50) aapp_config['endtime'] = datetime(2022, 1, 8, 13, 0) with self.assertLogs('aapp_runner.helper_functions', level='INFO') as cm: logging.getLogger('aapp_runner.helper_functions').info('first message') result = check_if_scene_is_unique(aapp_config) assert result self.assertEqual(len(cm.output), 1)
braintree/braintree_python
tests/unit/test_resource.py
Python
mit
3,302
0.000909
from tests.test_helper import * from braintree.resource import Resource class TestResource(unittest.TestCase): def test_verify_keys_allows_wildcard_keys(self): signature = [ {"foo": [{"bar": ["__any_key__"]}]} ] params = { "foo[bar][lower]": "lowercase", "foo[bar][UPPER]": "uppercase", "foo[bar][123]": "numeric", "foo[bar][under_scores]": "underscores", "foo[bar][dash-es]": "dashes", "foo[bar][ABC-abc_123]": "all together" } Resource.verify_keys(params, signature) @raises(KeyError) def test_verify_keys_escapes_brackets_in_signature(self): signature = [ {"customer": [{"custom_fields": ["__an
y_key__"]}]} ] params = { "customer_id": "value", } Resource.verify_keys(params, signature) def test_verify_keys_works_with_array_param(self): signature = [ {"customer": ["one", "two"]} ] params = { "customer": { "one": "foo" } } Resource.verify_keys(params, signature) @raises(KeyError) def test_verify_keys_raises_on_bad_array_param(self):
signature = [ {"customer": ["one", "two"]} ] params = { "customer": { "invalid": "foo" } } Resource.verify_keys(params, signature) def test_verify_keys_works_with_arrays(self): signature = [ {"add_ons": [{"update": ["existing_id", "quantity"]}]} ] params = { "add_ons": { "update": [ { "existing_id": "foo", "quantity": 10 } ] } } Resource.verify_keys(params, signature) @raises(KeyError) def test_verify_keys_raises_with_invalid_param_in_arrays(self): signature = [ {"add_ons": [{"update": ["existing_id", "quantity"]}]} ] params = { "add_ons": { "update": [ { "invalid": "foo", "quantity": 10 } ] } } Resource.verify_keys(params, signature) def test_verify_keys_allows_text(self): text_string = u"text_string" assert isinstance(text_string, TestHelper.text_type) signature = [ {"customer": [{"custom_fields": [text_string]}]} ] params = { "customer": { "custom_fields": { text_string : text_string } } } Resource.verify_keys(params, signature) def test_verify_keys_allows_raw_data(self): raw_string = str.encode("raw_string") assert isinstance(raw_string, TestHelper.raw_type) signature = [ {"customer": [{"custom_fields": [raw_string]}]} ] params = { "customer": { "custom_fields": { raw_string : raw_string } } } Resource.verify_keys(params, signature)
yunify/qingcloud-cli
qingcloud/cli/driver.py
Python
apache-2.0
3,383
0.004138
# ========================================================================= # Copyright 2012-present Yunify, Inc. # ------------------------------------------------------------------------- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this work except in compliance with the License. # You may obtain a copy of the License in the LICENSE file, or at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ========================================================================= import sys import argparse import pkg_resources from difflib import get_close_matches from .iaas_client.actions import ActionManager as IaaSActionManager from .qs_client.actions import ActionManager as QSActionManager SERVICES = ('iaas', 'qs') INDENT = ' ' * 2 NEWLINE = '\n' + INDENT def exit_due_to_invalid_service(suggest_services=None): usage = NEWLINE + '%(prog)s <service> <action> [parameters]\n\n' \ + 'Here are valid services:\n\n' \ + INDENT + NEWLINE.join(SERVICES) if suggest_services: usage += '\n\nInvalid service, maybe you meant:\n ' \ + ','.join(suggest_services) parser = argparse.ArgumentParser( prog = 'qingcloud', usage = usage, ) parser.print_help() sys.exit(-1) def exit_due_to_invalid_action(service, suggest_actions=None): usage = NEWLINE + '%(prog)s <action> [par
ameters]\n\n' \ + 'Here are valid actions:\n\n' \ + INDENT + NEWLINE.join(get_valid_acti
ons(service)) if suggest_actions: usage += '\n\nInvalid action, maybe you meant:\n ' \ + NEWLINE.join(suggest_actions) parser = argparse.ArgumentParser( prog = 'qingcloud %s' % service, usage = usage, ) parser.print_help() sys.exit(-1) def get_valid_actions(service): if service == 'iaas': return IaaSActionManager.get_valid_actions() elif service == 'qs': return QSActionManager.get_valid_actions() def get_action(service, action): if service == 'iaas': return IaaSActionManager.get_action(action) elif service == 'qs': return QSActionManager.get_action(action) def check_argument(args): if len(args) < 2: exit_due_to_invalid_service() if args[1].lower() in ('--version', '-v'): version = pkg_resources.require("qingcloud-cli")[0].version print('qingcloud-cli version %s' % version) sys.exit(0) service = args[1] if service not in SERVICES: suggest_services = get_close_matches(service, SERVICES) exit_due_to_invalid_service(suggest_services) if len(args) < 3: exit_due_to_invalid_action(service) valid_actions = get_valid_actions(service) if args[2] not in valid_actions: suggest_actions = get_close_matches(args[2], valid_actions) exit_due_to_invalid_action(service, suggest_actions) def main(): args = sys.argv check_argument(args) action = get_action(args[1], args[2]) action.main(args[3:])
adam111316/SickGear
sickbeard/providers/grabtheinfo.py
Python
gpl-3.0
6,393
0.002659
# coding=utf-8 # # This file is part of SickGear. # # SickGear is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # SickGear is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SickGear. If not, see <http://www.gnu.org/licenses/>. import re import datetime import traceback from . import generic from sickbeard import logger, tvcache, helpers from sickbeard.bs4_parser import BS4Parser from lib.unidecode import unidecode class GrabTheInfoProvider(generic.TorrentProvider): def __init__(self): generic.TorrentProvider.__init__(self, 'GrabTheInfo') self.url_base = 'http://grabthe.info/' self.urls = {'config_provider_home_uri': self.url_base, 'login': self.url_base + 'takelogin.php', 'cache': self.url_base + 'browse.php?%s', 'search': '&search=%s', 'get': self.url_base + '%s'} self.categories = 'c56=1&c8=1&c61=1&c10=1&incldead=0&blah=0' self.url = self.urls['config_provider_home_uri'] self.username, self.password, self.minseed, self.minleech = 4 * [None] self.cache = GrabTheInfoCache(self) def _do_login(self): logged_in = lambda: 'uid' in self.session.cookies and 'pass' in self.session.cookies if logged_in(): return True if self._check_auth(): login_params = {'username': self.username, 'password': self.password} response = helpers.getURL(self.urls['login'], post_data=login_params, session=self.session) if response and logged_in(): return True msg = u'Failed to authenticate with %s, abort provider' if response and 'Username or passwor
d incorrect' in response: msg = u'Invalid username or password for %s. Check settings' logger.log(msg % self.name, logger.ERROR) return False def _do_search(self, search_params, search_mode='eponly', epcount=0, age=0): results = [] if not self._do_login(): return results items = {'Season': [], 'Episode': [], 'Cache': []} rc = dict((
k, re.compile('(?i)' + v)) for (k, v) in {'info': 'detail', 'get': 'download'}.items()) for mode in search_params.keys(): for search_string in search_params[mode]: if isinstance(search_string, unicode): search_string = unidecode(search_string) search_url = self.urls['cache'] % self.categories if 'cache' != mode.lower(): search_url += self.urls['search'] % search_string html = self.get_url(search_url) cnt = len(items[mode]) try: if not html or self._has_no_results(html): raise generic.HaltParseException html = html.replace('<?xml version="1.0" encoding="iso-8859-1"?>', '') html = re.sub(r'(</td>)[^<]*</td>', r'\1', html) html = re.sub(r'(<a[^<]*)<a[^<]*?href=details[^<]*', r'\1', html) with BS4Parser(html, 'html.parser') as soup: shows_found = False torrent_rows = soup.find_all('tr') for index, row in enumerate(torrent_rows): if 'type' == row.find_all('td')[0].get_text().strip().lower(): shows_found = index break if not shows_found or 2 > (len(torrent_rows) - shows_found): raise generic.HaltParseException for tr in torrent_rows[1 + shows_found:]: try: info = tr.find('a', href=rc['info']) if None is info: continue title = (('title' in info.attrs.keys() and info['title']) or info.get_text()).strip() download_url = tr.find('a', href=rc['get']) if None is download_url: continue seeders, leechers = [int(tr.find_all('td')[x].get_text().strip()) for x in (-2, -1)] if 'Cache' != mode and (seeders < self.minseed or leechers < self.minleech): continue except (AttributeError, TypeError, KeyError): continue if title: items[mode].append((title, self.urls['get'] % str(download_url['href'].lstrip('/')), seeders)) except generic.HaltParseException: pass except Exception: logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) self._log_result(mode, len(items[mode]) - cnt, search_url) # for each search mode sort all the items by seeders 'Cache' != mode and items[mode].sort(key=lambda tup: tup[2], reverse=True) results += items[mode] return results def find_propers(self, search_date=datetime.datetime.today()): return self._find_propers(search_date) def _get_episode_search_strings(self, ep_obj, add_string='', **kwargs): return generic.TorrentProvider._get_episode_search_strings(self, ep_obj, add_string, sep_date='|', use_or=False) class GrabTheInfoCache(tvcache.TVCache): def __init__(self, this_provider): tvcache.TVCache.__init__(self, this_provider) self.minTime = 20 # cache update frequency def _getRSSData(self): return self.provider.get_cache_data() provider = GrabTheInfoProvider()
Alcheri/Plugins
MyPing/config.py
Python
bsd-3-clause
2,709
0.001477
### # Copyright (c) 2004, Jeremiah Fincher # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions, and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions, and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the author of this software nor the name of # contributors to this software may be used to endorse or promote products # derived from this software without specific prior written consent. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL
, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (
INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ### import supybot.conf as conf import supybot.registry as registry try: from supybot.i18n import PluginInternationalization _ = PluginInternationalization('MyPing') except ImportError: # Placeholder that allows to run the plugin on a bot # without the i18n module _ = lambda x:x def configure(advanced): # This will be called by supybot to configure this module. advanced is # a bool that specifies whether the user identified themself as an advanced # user or not. You should effect your configuration by manipulating the # registry as appropriate. from supybot.questions import expect, anything, something, yn conf.registerPlugin('MyPing', True) MyPing = conf.registerPlugin('MyPing') # This is where your configuration variables (if any) should go. For example: # conf.registerGlobalValue(MyPing, 'someConfigVariableName', # registry.Boolean(False, _("""Help for someConfigVariableName."""))) conf.registerChannelValue(MyPing, 'enable', registry.Boolean(False, """Should plugin work in this channel?""")) # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
Linhua-Sun/p4-phylogenetics
p4/NexusToken2.py
Python
gpl-2.0
5,196
0.007313
import pf from Var import var import numpy,string from Glitch import Glitch """A faster version of nextTok(), using memory allocated (once only) using numpy, and using functions written in C. The slow, pure python module is NexusToken.py. This version is about twice as fast. Which one is used is under the control of var.nexus_doFastNextTok. This one does not work for CStrings, so we need to revert to the old way w
henever CStrings are encountered.""" class NexusToken(object): def __init__(self, max): self.max = numpy.array([max], numpy.int32) self.tokLen = numpy.array([0], numpy.int32) self.tok = numpy.array(['x'] * int(self.max), 'c') self.embeddedCommentLen = numpy.array([0], numpy.int32)
self.embeddedComment = numpy.array(['x'] * int(self.max), 'c') self.savedCommentLen = numpy.array([0], numpy.int32) self.filePtr = None self.nexusToken = pf.newNexusToken(var._nexus_writeVisibleComments, var._nexus_getP4CommandComments, var._nexus_getWeightCommandComments, var._nexus_getAllCommandComments, var._nexus_getLineEndingsAsTokens, self.max, self.tokLen, self.tok, self.embeddedCommentLen, self.embeddedComment, self.savedCommentLen) #self.previousTok = None #self.previousEmbeddedComment = None nt = NexusToken(300) def checkLineLengths(flob): global nt #print 'NexusToken2.checkLineLengths here.' flob.seek(0,0) longest = pf.nexusTokenCheckLineLengths(nt.nexusToken, flob) flob.seek(0,0) #print 'The longest line length is %i' % longest if longest > nt.max: nt = NexusToken(longest) def nextTok(flob): #print 'NexusToken2.nextTok() here. nt.nexusToken = %i, max=%s, tokLen=%s, type(tokLen)=%s' % (nt.nexusToken, nt.max, nt.tokLen[0], type(nt.tokLen)) #assert type(nt.tokLen) == type(numpy.array([0], numpy.int32)) #print "NexusToken2.nextTok(). nt.wordIsFinished[0]=%i, nt.tokLen=%i, previousTok=%s, previousComment=%s" % (nt.wordIsFinished[0], nt.tokLen[0], nt.previousTok, nt.previousEmbeddedComment) #if nt.wordIsFinished[0]: # assert nt.tokLen[0] # ret = nt.tok[:int(nt.tokLen[0])].tostring() # nt.tokLen[0] = 0 # nt.wordIsFinished[0] = 0 # #nt.previousTok = ret # return ret #print ' x1 NexusToken2.nextTok() here. savedCommentLen=%i' % nt.savedCommentLen[0] if nt.savedCommentLen[0]: ret = nt.embeddedComment[:int(nt.savedCommentLen[0])].tostring() nt.savedCommentLen[0] = 0 return ret pf.nextToken(nt.nexusToken, flob) #print ' x2 tokLen = %i, embeddedCommentLen[0] = %i' % (nt.tokLen[0], nt.embeddedCommentLen[0]) if nt.embeddedCommentLen[0]: ret = nt.embeddedComment[:int(nt.embeddedCommentLen[0])].tostring() nt.embeddedCommentLen[0] = 0 #nt.previousEmbeddedComment = ret return ret else: if nt.tokLen[0]: ret = nt.tok[:int(nt.tokLen[0])].tostring() nt.tokLen[0] = 0 #nt.previousTok = ret return ret else: return None def safeNextTok(flob, caller=None): t = nextTok(flob) if not t: if caller: gm = ["safeNextTok(), called from %s" % caller] else: gm = ["safeNextTok()"] gm.append("Premature Death.") gm.append("Ran out of understandable things to read in nexus file.") raise Glitch, gm else: return t def nexusSkipPastNextSemiColon(flob): pf.nexusSkipPastNextSemiColon(nt.nexusToken, flob) def nexusSkipPastBlockEnd(flob): """Read up to and including a block 'end' or 'endblock'.""" # This should only ever be issued after a semi-colon complaintHead = '\nNexus: nexusSkipPastBlockEnd()' if hasattr(flob, 'name'): complaintHead += " file: %s" % flob.name while 1: tok = nextTok(flob) if tok: lowTok = string.lower(tok) if lowTok == 'end' or lowTok == 'endblock': tok2 = nextTok(flob) if not tok2 or tok2 != ';': gm = [complaintHead] gm.append(" Expecting a semicolon after %s" % tok) if not tok2: gm.append("Got nothing.") else: gm.append("Got '%s'" % tok2) raise Glitch, gm return elif lowTok == ';': # for pathological cases where the last command is a ';' by itself. continue else: pf.nexusSkipPastNextSemiColon(nt.nexusToken, flob) else: break gm = [complaintHead] gm.append("Failed to find either 'end' or 'endblock'") gm.append("Premature end of file?") raise Glitch, gm
lordi/tickmate
analysis/tmkit/linear_regression.py
Python
gpl-3.0
2,249
0.006225
import sqlite3 from sklearn import linear_model import numpy as np import pandas as pd import datetime import sys conn = sqlite3.connect(sys.argv[1]) c = conn.cursor(); c.execute("select _id, name from tracks") rows = c.fetchall() track_names = pd.DataFrame([{'track_name': row[1]} for row in rows]) track_ids = [int(row[0]) for row in rows] track_cnt = len(track_ids) prin
t "Found {0} tracks.".format(track_cnt) c.execute("select * from ticks") last_tick = c.fetchall(
)[-1] last_day = datetime.date(last_tick[2], last_tick[3], last_tick[4]) def window(day, n=20): "return a matrix of the last `n` days before day `day`" tick_date = "date(year || '-' || substr('0' || month, -2, 2) || " + \ "'-' || substr('0' || day, -2, 2))" max_date = "date('{d.year:04d}-{d.month:02d}-{d.day:02d}')".\ format(d=day) min_date = "date('{d.year:04d}-{d.month:02d}-{d.day:02d}')".\ format(d=day-datetime.timedelta(n)) c.execute("select * from ticks where {d} <= {max_date} and {d} >= {min_date}".\ format(d=tick_date, max_date=max_date, min_date=min_date)) # ticktrix is the matrix containing the ticks ticktrix = np.zeros((n, track_cnt)) for row in c.fetchall(): print row try: row_date = datetime.date(row[2], row[3], row[4]) except ValueError: print "Error constructing date from", row x = -(row_date - day).days y = track_ids.index(int(row[1])) if x < n: ticktrix[x, y] = 1 return ticktrix last_day -= datetime.timedelta(1) print "Fitting for day:", last_day my_window = window(last_day) target_data = my_window[0,:].T training_data = my_window[1:,:].T print "Target:", target_data.shape print target_data print "Training:", training_data.shape print training_data reg = linear_model.LinearRegression() reg.fit(training_data, target_data) print "Coefficents:", reg.coef_.shape print reg.coef_ print "Applied to training data:" print np.dot(training_data, reg.coef_) print "Forecast" #print np.dot(my_window[:19,:].T, reg.coef_) #print track_names df = pd.DataFrame() df['track'] = track_names df['prob'] = pd.Series(np.dot(my_window[:19,:].T, reg.coef_) * 100.0) print df
andela/troupon
troupon/deals/migrations/0003_remove_advertiser_logo.py
Python
mit
349
0
# -*- coding: utf-8 -*- from __
future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('deals', '0002_advertiser_logo'), ] operations = [ migrations.RemoveField( model_name='advertiser', n
ame='logo', ), ]
jordanemedlock/psychtruths
temboo/core/Library/Amazon/IAM/UpdateSigningCertificate.py
Python
apache-2.0
4,842
0.006196
# -*- coding: utf-8 -*- ############################################################################### # # UpdateSigningCertificate # Changes the status of the specified signing certificate from active to disabled, or vice versa. This action can be used to disable a user's signing certificate as part of a certificate rotation workflow. # # Python versions 2.6, 2.7, 3.x # # Copyright 2014, Temboo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class UpdateSigningCertificate(Choreography): def __init__(self, temboo_session): """ Create a new instance of the UpdateSigningCertificate Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(UpdateSigningCertificate, self).__init__(temboo_session, '/Library/Amazon/IAM/UpdateSigningCertificate') def new_input_set(self): return UpdateSigningCertificateInputSet() def _make_result_set(self, result, path): return UpdateSigningCertificateResultSet(result, path) def _make_execution(self, session, exec_id, path): return UpdateSigningCertificateChoreographyExecution(session, exec_id, path) class UpdateSigningCertificateInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the UpdateSigningCertificate Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_AWSAccessKeyId(self, value): """ Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.) """ super(UpdateSigningCertificateInputSet, self)._set_input('AWSAccessKeyId', value) def set_AWSSecretKeyId(self, value): """ Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.) """ super(UpdateSigningCertificateInputSet, self)._set_input('AWSSecretKeyId', value) def set_CertificateId(self, value): """ Set the value of the CertificateId input for this Choreo. ((required, string) The ID of the signing certificate you want to update.) """ super(UpdateSigningCertificateInputSet, self)._set_input('CertificateId', value) def set_ResponseFormat(self, value): """ Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are "xml" (the default) and "json".) """ super(UpdateSig
ningCertificateInputSet, self)._set_input('ResponseFormat', value) def set_Status(self, value): """ Set the value of the Status input for this Choreo. ((required, string) The status you want to assign to the certificate. Active means the certificate can be used for API calls to AWS, while Inactive means the certificate cannot be used.) """ super(UpdateSigningCertificateInputSet, self)._set_input('Status', value) def set_UserName(self, value): """
Set the value of the UserName input for this Choreo. ((optional, string) Name of the user the signing certificate belongs to.) """ super(UpdateSigningCertificateInputSet, self)._set_input('UserName', value) class UpdateSigningCertificateResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the UpdateSigningCertificate Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. (The response from Amazon.) """ return self._output.get('Response', None) class UpdateSigningCertificateChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return UpdateSigningCertificateResultSet(response, path)
pombredanne/django-rest-framework-braces
drf_braces/forms/serializer_form.py
Python
mit
5,024
0.001194
from __future__ import print_function, unicode_literals import inspect import six from django import forms from django.forms.forms import DeclarativeFieldsMetaclass from rest_framework import serializers from .. import fields from ..utils import ( initialize_class_using_reference_object, reduce_attr_dict_from_base_classes, ) from .fields import ISO8601DateTimeField SERIALIZER_FORM_FIELD_MAPPING = { fields.BooleanField: forms.BooleanField, fields.CharField: forms.CharField, fields.ChoiceField: forms.ChoiceField, fields.DateTimeField: ISO8601DateTimeField, fields.EmailField: forms.EmailField, fields.IntegerField: forms.IntegerField, serializers.BooleanField: forms.BooleanField, serializers.CharField: forms.CharField, serializers.ChoiceField: forms.ChoiceField, serializers.DateTimeField: ISO8601DateTimeField, serializers.EmailField: forms.EmailField, serializers.IntegerField: forms.IntegerField, } class SerializerFormOptions(object): def __init__(self, options=None, name=None): self.serializer = getattr(options, 'serializer', None) self.fields = getattr(options, 'fields', []) self.exclude = getattr(options, 'exclude', []) self.field_mapping = getattr(options, 'field_mapping', {}) assert self.serializer is not None, ( '{}.Meta.serializer must be provided' ''.format(name) ) assert issubclass(self.serializer, serializers.BaseSeriali
zer), ( '{}.Meta.serializer must be a subclass of DRF serializer' ''.format(name) ) class SerializerFormMeta(DeclarativeFieldsMetaclass): def __new__(cls, name, bases, attrs): try: parents = [b for b in bases if issubclass(b, SerializerForm)] except NameError:
# We are defining SerializerForm itself parents = None meta = attrs.pop('Meta', None) if not parents or attrs.pop('_is_base', False): return super(SerializerFormMeta, cls).__new__(cls, name, bases, attrs) attrs['_meta'] = options = SerializerFormOptions(meta, name=name) new_attrs = cls.get_form_fields_from_serializer(bases, options) # attrs should take priority in case a specific field is overwritten new_attrs.update(attrs) return super(SerializerFormMeta, cls).__new__(cls, name, bases, new_attrs) @classmethod def get_field_mapping(cls, bases, options): mapping = reduce_attr_dict_from_base_classes( bases, lambda i: getattr(getattr(i, '_meta', None), 'field_mapping', {}), SERIALIZER_FORM_FIELD_MAPPING ) mapping.update(options.field_mapping) return mapping @classmethod def get_form_fields_from_serializer(cls, bases, options): fields = {} mapping = cls.get_field_mapping(bases, options) for name, field in options.serializer._declared_fields.items(): if field.read_only: continue if name not in options.fields or name in options.exclude: continue form_field_class = mapping.get(type(field)) if not form_field_class: raise TypeError( '{} is not mapped to appropriate form field class. ' 'Please add it to the mapping via `field_mapping` ' 'Meta attribute.' ''.format(type(field)) ) fields[name] = initialize_class_using_reference_object(field, form_field_class) return fields class SerializerFormBase(forms.Form): def __init__(self, *args, **kwargs): super(SerializerFormBase, self).__init__(*args, **kwargs) # instantiated during validation self.serializer = None def get_serializer_context(self): return {} def get_serializer_data(self): data = self.initial.copy() data.update(self.cleaned_data or {}) return data def get_serializer(self): return self._meta.serializer( data=self.get_serializer_data(), context=self.get_serializer_context() ) def _clean_form(self): super(SerializerFormBase, self)._clean_form() self.serializer = self.get_serializer() if not self.serializer.is_valid(): self._errors.update(self.serializer.errors) else: self.cleaned_data = self.serializer.validated_data class SerializerForm(six.with_metaclass(SerializerFormMeta, SerializerFormBase)): _is_base = True def form_from_serializer(serializer, **kwargs): assert inspect.isclass(serializer) and issubclass(serializer, serializers.BaseSerializer), ( 'Can only create forms from DRF Serializers' ) kwargs.update({'serializer': serializer}) meta = type(str('Meta'), (object,), kwargs) return type(str('{}Form'.format(serializer.__name__)), (SerializerForm,), {'Meta': meta})
diofant/diofant
diofant/tests/core/test_operations.py
Python
bsd-3-clause
1,106
0.000904
import pytest from diofant import Integer
, SympifyError from diofant.core.operations import AssocOp, LatticeOp __all__ = () class MyMul(AssocOp): identity = Integer(1) def test_flatten(): assert MyMul(2, MyMul(4, 3)) == MyMul(2, 4, 3) class Join(LatticeOp): """Simplest possible Lattice class.""" zero
= Integer(0) identity = Integer(1) def test_lattice_simple(): assert Join(Join(2, 3), 4) == Join(2, Join(3, 4)) assert Join(2, 3) == Join(3, 2) assert Join(0, 2) == 0 assert Join(1, 2) == 2 assert Join(2, 2) == 2 assert Join(Join(2, 3), 4) == Join(2, 3, 4) assert Join() == 1 assert Join(4) == 4 assert Join(1, 4, 2, 3, 1, 3, 2) == Join(2, 3, 4) def test_lattice_shortcircuit(): pytest.raises(SympifyError, lambda: Join(object)) assert Join(0, object) == 0 def test_lattice_print(): assert str(Join(5, 4, 3, 2)) == 'Join(2, 3, 4, 5)' def test_lattice_make_args(): assert Join.make_args(0) == {0} assert Join.make_args(1) == {1} assert Join.make_args(Join(2, 3, 4)) == {Integer(2), Integer(3), Integer(4)}
aipescience/django-daiquiri
daiquiri/query/__init__.py
Python
apache-2.0
55
0
default_app_config = 'da
iquiri.query.apps.QueryCo
nfig'
trevorlinton/skia
gm/rebaseline_server/results.py
Python
bsd-3-clause
22,208
0.005629
#!/usr/bin/python """ Copyright 2013 Google Inc. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. Repackage expected/actual GM results as needed by our HTML rebaseline viewer. """ # System-level imports import argparse import fnmatch import json import logging import os import re import sys import time # Imports from within Skia # # We need to add the 'gm' directory, so that we can import gm_json.py within # that directory. That script allows us to parse the actual-results.json file # written out by the GM tool. # Make sure that the 'gm' dir is in the PYTHONPATH, but add it at the *end* # so any dirs that are already in the PYTHONPATH will be preferred. PARENT_DIRECTORY = os.path.dirname(os.path.realpath(__file__)) GM_DIRECTORY = os.path.dirname(PARENT_DIRECTORY) if GM_DIRECTORY not in sys.path: sys.path.append(GM_DIRECTORY) import gm_json import imagediffdb IMAGE_FILENAME_RE = re.compile(gm_json.IMAGE_FILENAME_PATTERN) IMAGE_FILENAME_FORMATTER = '%s_%s.png' # pass in (testname, config) FIELDS_PASSED_THRU_VERBATIM = [ gm_json.JSONKEY_EXPECTEDRESULTS_BUGS, gm_json.JSONKEY_EXPECTEDRESULTS_IGNOREFAILURE, gm_json.JSONKEY_EXPECTEDRESULTS_REVIEWED, ] CATEGORIES_TO_SUMMARIZE = [ 'builder', 'test', 'config', 'resultType', gm_json.JSONKEY_EXPECTEDRESULTS_IGNOREFAILURE, gm_json.JSONKEY_EXPECTEDRESULTS_REVIEWED, ] RESULTS_ALL = 'all' RESULTS_FAILURES = 'failures' class Results(object): """ Loads actual and expected results from all builders, supplying combined reports as requested. Once this object has been constructed, the results (in self._results[]) are immutable. If you want to update the results based on updated JSON file contents, you will need to create a new Results object.""" def __init__(self, actuals_root, expected_root, generated_images_root): """ Args: actuals_root: root directory containing all actual-results.json files expected_root: root directory containing all expected-results.json files generated_images_root: directory within which to create all pixel diffs; if this directory does not yet exist, it will be created """ time_start = int(time.time()) self._image_diff_db = imagediffdb.ImageDiffDB(generated_images_root) self._actuals_root = actuals_root self._expected_root = expected_root self._load_actual_and_expected() self._timestamp = int(time.time()) logging.info('Results complete; took %d seconds.' % (self._timestamp - time_start)) def get_timestamp(self): """Return the time at which this object was created, in seconds past epoch (UTC). """ return self._timestamp def edit_expectations(self, modifications): """Edit the expectations stored within this object and write them back to disk. Note that this will NOT update the results stored in self._results[] ; in order to see those updates, you must instantiate a new Results object based on the (now updated) files on disk. Args: modifications: a list of dictionaries, one for each expectation to update: [ { 'builder': 'Test-Mac10.6-MacMini4.1-GeForce320M-x86-Debug', 'test': 'bigmatrix', 'config': '8888', 'expectedHashType': 'bitmap-64bitMD5', 'expectedHashDigest': '10894408024079689926', 'bugs': [123, 456], 'ignore-failure': false, 'reviewed-by-human': true, }, ... ] """ expected_builder_dicts = Results._read_dicts_from_root(self._expected_root) for mod in modifications: image_name = IMAGE_FILENAME_FORMATTER % (mod['test'], mod['config']) # TODO(epoger): assumes a single allowed digest per test allowed_digests = [[mod['expectedHashType'], int(mod['expectedHashDigest'])]] new_expectations = { gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGESTS: allowed_digests, } for field in FIELDS_PASSED_THRU_VERBATIM: value = mod.get(field) if value is not None: new_expectations[field] = value builder_dict = expected_builder_dicts[mod['builder']] builder_expectations = builder_dict.get(gm_json.JSONKEY_EXPECTEDRESULTS) if not builder_expectations: builder_expectations = {} builder_dict[gm_json.JSONKEY_EXPECTEDRESULTS] = builder_expectations builder_expectations[image_name] = new_expectations Results._write_dicts_to_root(expected_builder_dicts, self._expected_root) def get_results_of_type(self, type): """Return results of some/all tests (depending on 'type' parameter). Args: type: string describing which types of results to include; must be one of the RESULTS_* constants Results are returned as a dictionary in this form: { 'categories': # dictionary of categories listed in # CATEGORIES_TO_SUMMARIZE, with the number of times # each value appears within its category { 'resultType': # category name { 'failed': 29, # category value and total number found of that value 'failure-ignored': 948, 'no-comparison': 4502, 'succeeded': 38609, }, 'builder': { 'Test-Mac10.6-MacMini4.1-GeForce320M-x86-Debug': 1286, 'Test-Mac10.6-MacMini4.1-GeForce320M-x86-Release': 1134, ... }, ... # other categories from CATEGORIES_TO_SUMMARIZE }, # end of 'categories' dictionary 'testData': # list of test results, with a dictionary for each [ { 'resultType': 'failed', 'builder': 'Test-Mac10.6-MacMini4.1-GeForce320M-x86-Debug', 'test': 'bigmatrix', 'config': '8888', 'expectedHashType': 'bitmap-64bitMD5', 'expectedHashDigest': '10894408024079689926', 'actualHashType': 'bitmap-64bitMD5', 'actualHashDigest': '2409857384569', 'bugs': [123, 456], 'ignore-failure': false, 'reviewed-by-human': true, }, ... ], # end of 'testData' list } """ return self._results[type] @staticmethod def _ignore_builder(builder)
: """Returns True if
we should ignore expectations and actuals for a builder. This allows us to ignore builders for which we don't maintain expectations (trybots, Valgrind, ASAN, TSAN), and avoid problems like https://code.google.com/p/skia/issues/detail?id=2036 ('rebaseline_server produces error when trying to add baselines for ASAN/TSAN builders') Args: builder: name of this builder, as a string Returns: True if we should ignore expectations and actuals for this builder. """ return (builder.endswith('-Trybot') or ('Valgrind' in builder) or ('TSAN' in builder) or ('ASAN' in builder)) @staticmethod def _read_dicts_from_root(root, pattern='*.json'): """Read all JSON dictionaries within a directory tree. Args: root: path to root of directory tree pattern: which files to read within root (fnmatch-style pattern) Returns: A meta-dictionary containing all the JSON dictionaries found within the directory tree, keyed by the builder name of each dictionary. Raises: IOError if root does not refer to an existing directory """ if not os.path.isdir(root): raise IOError('no directory found at path %s' % root) meta_dict = {} for dirpath, dirnames, filenames in os.walk(root): for matching_filename in fnmatch.filter(filenames, pattern): builder = os.path.basename(dirpath) if Results._ignore_builder(builder): continue fullpath = os.path.join(dirpath, matching_filename) meta_dict[builder] = gm_json.LoadFromFile(fullpath) return meta_dict @staticmethod def _write_dicts_to_root(meta_dict, root, p
goddardl/gaffer
python/GafferSceneUI/TransformUI.py
Python
bsd-3-clause
2,347
0.008095
########################################################################## # # Copyright (c) 2013-2014, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation a
nd/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FIT
NESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import Gaffer import GafferUI import GafferScene Gaffer.Metadata.registerNodeDescription( GafferScene.Transform, """Modifies the transforms of all locations matched by the filter.""", "space", """The space in which the transform is applied.""", "transform", """The transform to be applied.""", ) GafferUI.PlugValueWidget.registerCreator( GafferScene.Transform, "space", GafferUI.EnumPlugValueWidget, labelsAndValues = ( ( "World", GafferScene.Transform.Space.World ), ( "Object", GafferScene.Transform.Space.Object ), ) )
seehuhn/wisent
template.py
Python
gpl-2.0
10,909
0.006508
# Copyright (C) 2008, 2009 Jochen Voss <[email protected]> # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # 3. The name of the author may not be used to endorse or promote # products derived from this software without specific prior # written permission. # # This software is provided by the author "as is" and any express or # implied warranties, including, but not limited to, the implied # warranties of merchantability and fitness for a particular purpose # are disclaimed. In no event shall the author be liable for any # direct, indirect, incidental, special, exemplary, or consequential # damages (including, but not limited to, procurement of substitute # goods or services; loss of use, data, or profits; or business # interruption) however caused and on any theory of liability, whether # in contract, strict liability, or tort (including negligence or # otherwise) arising in any way out of the use of this software, even # if advised of the possibility of such damage. def print_tree(tree, terminals, indent=0): """Print a parse tree to stdout.""" prefix = " "*indent if tree[0] in terminals: print prefix + repr(tree) else: print prefix + unicode(tree[0]) for x in tree[1:]: print_tree(x, terminals, indent+1) class Parser(object): """LR(1) parser class template. This class is only used to store source code sniplets for the generated parser. Code is taken out via code inspection and pasted into the output file. """ class ParseErrors(Exception): """Exception class to represent a collection of parse errors. Instances of this class have two attributes, `errors` and `tree`. `errors` is a list of tuples, each describing one error. #@ IF error_stacks Each tuple consists of the first input token which could not be processed, the list of grammar symbols which were allowed at this point, and a list of partial parse trees which represent the input parsed so far. #@ ELSE Each tuple consists of the first input token which could not be processed and the list of grammar symbols which were allowed at this point. #@ ENDIF `tree` is a "repaired" parse tree which might be used for further error checking, or `None` if no repair was possible. """ def __init__(self, errors, tree): msg = "%d parse errors"%len(errors) Exception.__init__(self, msg) self.errors = errors self.tree = tree def __init__(self, max_err=None, errcorr_pre=4, errcorr_post=4): """Create a new parser instance. The constructor arguments are all optional, they control the handling of parse errors: `max_err` can be given to bound the number of errors reported during one run of the parser. `errcorr_pre` controls how many tokens before an invalid token the parser considers when trying to repair the input. `errcorr_post` controls how far beyond an invalid token the parser reads when evaluating the quality of an attempted repair. """ self.max_err = max_err self.m = errcorr_pre self.n = errcorr_post @staticmethod def leaves(tree): """Iterate over the leaves of a parse tree. This function can be used to reconstruct the input from a parse tree. """ if tree[0] in Parser.terminals: yield tree else: for x in tree[1:]: for t in Parser.leaves(x): yield t def _parse(self, tokens, stack, state): """Internal function to construct a parse tree. 'Tokens' is the input token stream, 'stack' is the inital stack and 'state' is the inital state of the automaton. Returns a 4-tuple (done, count, state, error). 'done' is a boolean indicationg whether parsing is completed, 'count' is number of successfully shifted tokens, and 'error' is None on success or else the first token which could not be parsed. """ read_
next = True count = 0 while state != self._halting_state: if read_next: try:
lookahead = tokens.next() except StopIteration: return (False,count,state,None) read_next = False token = lookahead[0] #@ IF parser_debugprint debug = [ ] for s in stack: debug.extend([str(s[0]), repr(s[1][0])]) debug.append(str(state)) print " ".join(debug)+" [%s]"%repr(token) #@ ENDIF parser_debugprint if (state,token) in self._shift: #@ IF parser_debugprint print "shift %s"%repr(token) #@ ENDIF stack.append((state,lookahead)) state = self._shift[(state,token)] read_next = True count += 1 elif (state,token) in self._reduce: X,n = self._reduce[(state,token)] if n > 0: state = stack[-n][0] #@ IF transparent_tokens tree = [ X ] for s in stack[-n:]: if s[1][0] in self._transparent: tree.extend(s[1][1:]) else: tree.append(s[1]) tree = tuple(tree) #@ ELSE tree = (X,) + tuple(s[1] for s in stack[-n:]) #@ ENDIF #@ IF parser_debugprint debug = [ s[1][0] for s in stack[-n:] ] #@ ENDIF del stack[-n:] else: tree = (X,) #@ IF parser_debugprint debug = [ ] #@ ENDIF #@ IF parser_debugprint print "reduce %s -> %s"%(repr(debug),repr(X)) #@ ENDIF stack.append((state,tree)) state = self._goto[(state,X)] else: #@ IF parser_debugprint print "parse error" #@ ENDIF return (False,count,state,lookahead) return (True,count,state,None) def _try_parse(self, tokens, stack, state): count = 0 while state != self._halting_state and count < len(tokens): token = tokens[count][0] if (state,token) in self._shift: stack.append(state) state = self._shift[(state,token)] count += 1 elif (state,token) in self._reduce: X,n = self._reduce[(state,token)] if n > 0: state = stack[-n] del stack[-n:] stack.append(state) state = self._goto[(state,X)] else: break return count def parse(self, tokens): """Parse the tokens from `tokens` and construct a parse tree. `tokens` must be an interable over tuples. The first element of each tuple must be a terminal symbol of the grammar which is used for parsing. All other element of the tuple are just copied into the constructed parse tree. If `tokens` is invalid, a ParseErrors exception is raised. Otherwise the function returns the parse tree. """ errors = [] tokens = chain(tokens, [(se
HyperloopTeam/FullOpenMDAO
lib/python2.7/site-packages/traits-4.3.0-py2.7-macosx-10.10-x86_64.egg/traits/protocols/__init__.py
Python
gpl-2.0
365
0
"""Trivial Interfaces and Adaptation from PyProtocols. This package is a subset of the files from Phillip J. Eby's PyProtocols package. They are only included here to help remove dependenc
ies on external packages from the Traits package. The code has been reorganized to address circular imports that were discovered when explicit relative imports were added. ""
"
rikpg/django-googlytics
test_settings.py
Python
bsd-3-clause
510
0
# -*- coding: utf
-8 -*- # # Minimum amount of settings to run the googlytics test suite # # googlytics options are often overriden during tests GOOGLE_ANALYTICS_KEY = 'U-TEST-XXX' DEBUG = True DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'googlytics_test.sqlite3' } } INSTALLED_APPS = ( 'django.contrib.auth
', 'django.contrib.contenttypes', 'googlytics', ) TEMPLATE_CONTEXT_PROCESSORS = ( 'googlytics.context_processors.googlytics', )
OpringaoDoTurno/airflow
tests/utils/test_log_handlers.py
Python
apache-2.0
6,367
0.001099
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import logging.config import os import unittest import six from airflow.models import TaskInstance, DAG, DagRun from airflow.config_templates.airflow_local_settings import DEFAULT_LOGGING_CONFIG from airflow.operators.dummy_operator import DummyOperator from airflow.operators.python_operator import PythonOperator from airflow.utils.timezone import datetime from airflow.utils.log.logging_mixin import set_context from airflow.utils.log.file_task_handler import FileTaskHandler from airflow.utils.db import create_session from airflow.utils.state import State DEFAULT_DATE = datetime(2016, 1, 1) TASK_LOGGER = 'airflow.task' FILE_TASK_HANDLER = 'file.task' class TestFileTaskLogHandler(unittest.TestCase): def cleanUp(self): with create_session() as session: session.query(DagRun).delete() session.query(TaskInstance).delete() def setUp(self): super(TestFileTaskLogHandler, self).setUp() logging.config.dictConfig(DEFAULT_LOGGING_CONFIG) logging.root.disabled = False self.cleanUp() # We use file task handler by default. def tearDown(self): self.cleanUp() super(TestFileTaskLogHandler, self).tearDown() def test_default_task_lo
gging_setup(self): # file task handler is used by default. logger = logging.getLogger(TASK_LOGGER) handlers = logger.handlers self.assertEqual(len(handlers), 1) handler = handlers[0]
self.assertEqual(handler.name, FILE_TASK_HANDLER) def test_file_task_handler(self): def task_callable(ti, **kwargs): ti.log.info("test") dag = DAG('dag_for_testing_file_task_handler', start_date=DEFAULT_DATE) task = PythonOperator( task_id='task_for_testing_file_log_handler', dag=dag, python_callable=task_callable, provide_context=True ) ti = TaskInstance(task=task, execution_date=DEFAULT_DATE) logger = ti.log ti.log.disabled = False file_handler = next((handler for handler in logger.handlers if handler.name == FILE_TASK_HANDLER), None) self.assertIsNotNone(file_handler) set_context(logger, ti) self.assertIsNotNone(file_handler.handler) # We expect set_context generates a file locally. log_filename = file_handler.handler.baseFilename self.assertTrue(os.path.isfile(log_filename)) self.assertTrue(log_filename.endswith("1.log"), log_filename) ti.run(ignore_ti_state=True) file_handler.flush() file_handler.close() self.assertTrue(hasattr(file_handler, 'read')) # Return value of read must be a list. logs = file_handler.read(ti) self.assertTrue(isinstance(logs, list)) self.assertEqual(len(logs), 1) target_re = r'\n\[[^\]]+\] {test_log_handlers.py:\d+} INFO - test\n' # We should expect our log line from the callable above to appear in # the logs we read back six.assertRegex( self, logs[0], target_re, "Logs were " + str(logs) ) # Remove the generated tmp log file. os.remove(log_filename) def test_file_task_handler_running(self): def task_callable(ti, **kwargs): ti.log.info("test") dag = DAG('dag_for_testing_file_task_handler', start_date=DEFAULT_DATE) task = PythonOperator( task_id='task_for_testing_file_log_handler', dag=dag, python_callable=task_callable, provide_context=True ) ti = TaskInstance(task=task, execution_date=DEFAULT_DATE) ti.try_number = 2 ti.state = State.RUNNING logger = ti.log ti.log.disabled = False file_handler = next((handler for handler in logger.handlers if handler.name == FILE_TASK_HANDLER), None) self.assertIsNotNone(file_handler) set_context(logger, ti) self.assertIsNotNone(file_handler.handler) # We expect set_context generates a file locally. log_filename = file_handler.handler.baseFilename self.assertTrue(os.path.isfile(log_filename)) self.assertTrue(log_filename.endswith("2.log"), log_filename) logger.info("Test") # Return value of read must be a list. logs = file_handler.read(ti) self.assertTrue(isinstance(logs, list)) # Logs for running tasks should show up too. self.assertEqual(len(logs), 2) # Remove the generated tmp log file. os.remove(log_filename) class TestFilenameRendering(unittest.TestCase): def setUp(self): dag = DAG('dag_for_testing_filename_rendering', start_date=DEFAULT_DATE) task = DummyOperator(task_id='task_for_testing_filename_rendering', dag=dag) self.ti = TaskInstance(task=task, execution_date=DEFAULT_DATE) def test_python_formatting(self): expected_filename = 'dag_for_testing_filename_rendering/task_for_testing_filename_rendering/%s/42.log' % DEFAULT_DATE.isoformat() fth = FileTaskHandler('', '{dag_id}/{task_id}/{execution_date}/{try_number}.log') rendered_filename = fth._render_filename(self.ti, 42) self.assertEqual(expected_filename, rendered_filename) def test_jinja_rendering(self): expected_filename = 'dag_for_testing_filename_rendering/task_for_testing_filename_rendering/%s/42.log' % DEFAULT_DATE.isoformat() fth = FileTaskHandler('', '{{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log') rendered_filename = fth._render_filename(self.ti, 42) self.assertEqual(expected_filename, rendered_filename)
Fbonazzi/Scripts
binary/int_to_byte.py
Python
gpl-3.0
471
0
#!/usr/bin/env pyt
hon # Written by Filippo Bonazzi <[email protected]> 2016 # # Convert an integer from its decimal representation into its hexadecimal # representation. # TODO: add argparse import sys import math s = "".join(sys.argv[1].split()) for c in s: if c not in "1234567890": print("Bad string \"{}\"".format(s)) sys.exit(1) a = 0 for i in range(0, len(s)): a += int(s[len(s) - i - 1]) * int(math.pow(10, i)) print
("{0:#x}".format(a))
Unofficial-Extend-Project-Mirror/foam-extend-foam-extend-3.2
ThirdParty/LocalDev/Hydro-Quebec/PyFoam/ChangeGGIBoundary.py
Python
gpl-3.0
7,040
0.012642
""" Application-class that implements pyFoamChangeGGIBoundary.py Modification of GGI and cyclicGGI interface parameters in constant/polymesh/boundary file. Author: Martin Beaudoin, Hydro-Quebec, 2009. All rights reserved """ from PyFoam.Applications.PyFoamApplication import PyFoamApplication from PyFoam.RunDictionary.ParsedParameterFile import ParsedParameterFile from PyFoam.ThirdParty.six import print_ from os import path import sys import re class ChangeGGIBoundary(PyFoamApplication): def __init__(self,args=None): description="""\ Change GGI boundary condition parameters """ PyFoamApplication.__init__(self, args=args, description=description, usage="%prog <caseDirectory> ggiPatchName", interspersed=True, changeVersion=False, nr=2) def addOptions(self): self
.parser.add_option("--shadowPatch", action="store", dest="shadowPatch", default=None, help='Name of the shadowPatch') self.parser.add_option("--shadowName", action="store", dest="shadowName",
default=None, help='Name of the shadowPatch. Deprecated. Use --shadowPatch instead') self.parser.add_option("--zone", action="store", dest="zone", default=None, help='Name of the zone for the GGI patch') self.parser.add_option("--patchZoneName", action="store", dest="patchZoneName", default=None, help='Name of the zone for the GGI patch. Deprecated. Use --zone instead') self.parser.add_option("--bridgeOverlap", action="store", dest="bridgeOverlap", default=None, help='bridgeOverlap flag (on/off)') self.parser.add_option("--bridgeOverlapFlag", action="store", dest="bridgeOverlapFlag", default=None, help='bridgeOverlap flag (on/off). Deprecated. Use --bridgeOverlap instead') self.parser.add_option("--rotationAxis", action="store", dest="rotationAxis", default=None, help='rotation axis for cyclicGgi') self.parser.add_option("--rotationAngle", action="store", dest="rotationAngle", default=None, help='rotation axis angle for cyclicGgi') self.parser.add_option("--separationOffset", action="store", dest="separationOffset", default=None, help='separation offset for cyclicGgi') self.parser.add_option("--test", action="store_true", default=False, dest="test", help="Only print the new boundary file") def run(self): fName=self.parser.getArgs()[0] bName=self.parser.getArgs()[1] boundary=ParsedParameterFile(path.join(".",fName,"constant","polyMesh","boundary"),debug=False,boundaryDict=True) bnd=boundary.content if type(bnd)!=list: self.error("Problem with boundary file (not a list)") found=False for val in bnd: if val==bName: found=True elif found: bcType=val["type"] if re.match("cyclicGgi", bcType)!= None or re.match("ggi", bcType)!= None: if self.parser.getOptions().shadowPatch!=None: shadowPatch=self.parser.getOptions().shadowPatch val["shadowPatch"]=shadowPatch if shadowPatch not in bnd: self.error("\n Option --shadowPatch for patch:",bName,": there is no patch called",shadowPatch,"\n") if self.parser.getOptions().zone!=None: val["zone"]=self.parser.getOptions().zone if self.parser.getOptions().bridgeOverlap!=None: val["bridgeOverlap"]=self.parser.getOptions().bridgeOverlap if val["type"]=="cyclicGgi": if self.parser.getOptions().rotationAxis!=None: val["rotationAxis"]=self.parser.getOptions().rotationAxis if self.parser.getOptions().rotationAngle!=None: val["rotationAngle"]=self.parser.getOptions().rotationAngle if self.parser.getOptions().separationOffset!=None: val["separationOffset"]=self.parser.getOptions().separationOffset # Deprecated if self.parser.getOptions().shadowName!=None: self.warning("\n PatchName:",bName,": Option --shadowName is deprecated. Use --shadowPatch instead\n") shadowName=self.parser.getOptions().shadowName val["shadowPatch"]=shadowName if shadowName not in bnd: self.error("\n Option --shadowName for patch:",bName,": there is no patch called",shadowName,"\n") # Deprecated if self.parser.getOptions().patchZoneName!=None: self.warning("\n PatchName:",bName,": Option --patchZoneName is deprecated. Use --zone instead\n") val["zone"]=self.parser.getOptions().patchZoneName # Deprecated if self.parser.getOptions().bridgeOverlapFlag!=None: self.warning("\n PatchName:",bName,": Option --bridgeOverlapFlag is deprecated. Use --bridgeOverlap instead\n") val["bridgeOverlap"]=self.parser.getOptions().bridgeOverlapFlag else: print_("Unsupported GGI type '",bcType,"' for patch",bName) break if not found: self.error("Boundary",bName,"not found in",bnd[::2]) if self.parser.getOptions().test: print_(boundary) else: boundary.writeFile()
splunk/eventgen
splunk_eventgen/lib/eventgentoken.py
Python
apache-2.0
23,368
0.002225
# TODO: Handle timestamp generation for modinput and set sample.timestamp properly for timestamp replacement import datetime import json import os import pprint import random import re import time import uuid import six.moves.urllib.error import six.moves.urllib.parse import six.moves.urllib.request from splunk_eventgen.lib.logging_config import logger from splunk_eventgen.lib.timeparser import timeDelta2secs class Token(object): """Contains data and methods for replacing a token in a given sample""" token = None replacementType = None replacement = None sample = None mvhash = {} _replaytd = None _lastts = None _tokenfile = None _tokents = None _earliestTime = None _latestTime = None _replacementFile = None _replacementColumn = None _integerMatch = None _floatMatch = None _hexMatch = None _stringMatch = None _listMatch = None _tokenfilecounter = 0 def __init__(self, sample=None): self._earliestTime = (None, None) self._latestTime = (None, None) def __str__(self): """Only used for debugging, outputs a pretty printed representation of this token""" # Eliminate recursive going back to parent temp = dict( [(key, value) for (key, value) in self.__dict__.items() if key != "sample"] ) return pprint.pformat(temp) def __repr__(self): return self.__str__() def _match(self, event): """Executes regular expression match and returns the re.Match object""" return re.match(self.token, event) def _search(self, event): """Executes regular expression search and returns the re.Match object""" return re.search(self.token, event) def _finditer(self, event): """Executes regular expression finditer and returns the re.Match object""" return re.finditer(self.token, event) def _findall(self, event): """Executes regular expression finditer and returns the re.Match object""" return re.findall(self.token, event) def replace(self, event, et=None, lt=None, s=None, pivot_timestamp=None): """Replaces all instances of this token in provided event and returns event""" offset = 0 tokenMatch = list(self._finditer(event)) if len(tokenMatch) > 0: replacement = self._getReplacement( event[tokenMatch[0].start(0) : tokenMatch[0].end(0)], et, lt, s, pivot_timestamp=pivot_timestamp, ) if replacement is not None or self.replacementType == "replaytimestamp": # logger.debug("Replacement: '%s'" % replacement) # Iterate matches for match in tokenMatch: # logger.debug("Match: %s" % (match)) try: matchStart = match.start(1) + offset matchEnd = match.end(1) + offset startEvent = event[:matchStart] endEvent = event[matchEnd:] # In order to not break legacy which might replace the same timestamp # with the same value in multiple matches, here we'll include # ones that need to be replaced for every match if self.replacementType == "replaytimestamp": replacement = lt.strftime(self.replacement) offset += len(replacement) - len(match.group(1)) except: matchStart = match.start(0) + offset matchEnd = match.end(0) + offset startEvent = event[:matchStart] endEvent = event[matchEnd:] # In order to not break legacy which might replace the same timestamp # with the same value in multiple matches, here we'll include # ones that need to be replaced for every match if self.replacementType == "replaytimestamp": replacement = lt.strftime(self.replacement) offset += len(replacement) - len(match.group(0)) # logger.debug("matchStart %d matchEnd %d offset %d" % (matchStart, matchEnd, offset)) event = startEvent + replacement + endEvent # Reset replay internal variables for this token self._replaytd = None self._lastts = None return event def _getReplacement( self, old=None, earliestTime=None, latestTime=None, s=None, pivot_timestamp=None ): if self.replacementType == "static": return self.replacement # This logic is done in replay.py elif self.replacementType == "replaytimestamp": pass elif self.replacementType == "timestamp": if s.earliest and s.latest: if earliestTime and latestTime: if latestTime >= earliestTime: if pivot_timestamp: replacementTime = pivot_timestamp elif s.timestamp is None: minDelta = 0 # Compute timeDelta as total_seconds td = latestTime - earliestTime if not type(td) == float: maxDelta = timeDelta2secs(td) else: maxDelta = td # Get random timeDelta randomDelta = datetime.timedelta( seconds=random.randint(minDelta, maxDelta), microseconds=random.randint( 0, latestTime.microsecond if latestTime.microsecond > 0 else 999999, ), ) # Compute replacmentTime replacementTime = latestTime - randomDelta s.timestamp = replacementTime else: replacementTime = s.timestamp replacement = self.replacement.replace( "%s", str(round(time.mktime(replacementTime.timetuple()))) .rstrip("0") .rstrip("."), ) replacementTime = replacementTime.strftime(replacement) # replacementTime == replacement for invalid strptime specifiers if replacementTime != self.replacement.replace("%", ""): return replacementTime else: logger.error( "Invalid strptime specifier '%s' detected; will not replace" % (self.replacement) ) return old # earliestT
ime/latestTime not proper else: logger.error( ( "Earliest specifier '%s', value '%s' is greater than latest specifier '%s'" + "value '%s' for sample '%s'; will not replace" )
% (s.earliest, earliestTime, s.latest, latestTime, s.name) ) return old # earliest/latest not proper else: logger.error( "Earliest or latest specifier were not set; will not replace" ) return old elif self.replacementType in ("random", "rated"): # Validations: if self._i
edb1rd/BTC
plugins/trustedcoin.py
Python
gpl-3.0
25,459
0.003928
#!/usr/bin/env python # # Electrum - Lightweight Bitcoin Client # Copyright (C) 2015 Thomas Voegtlin # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import threading import socket import os import re import requests import json from hashlib import sha256 from urlparse import urljoin from urllib import quote from PyQt4.QtGui import * from PyQt4.QtCore import * import electrum from electrum import bitcoin from electrum.bitcoin import * from electrum.mnemonic import Mnemonic from electrum import version from electrum.wallet import Wallet_2of3 from electrum.i18n import _ from electrum.plugins import BasePlugin, run_hook, hook from electrum_gui.qt.util import * from electrum_gui.qt.qrcodewidget import QRCodeWidget from electrum_gui.qt.amountedit import AmountEdit from electrum_gui.qt.main_window import StatusBarButton from decimal import Decimal # signing_xpub is hardcoded so that the wallet can be restored from seed, without TrustedCoin's server signing_xpub = "xpub661MyMwAqRbcGnMkaTx2594P9EDuiEqMq25PM2aeG6UmwzaohgA6uDmNsvSUV8ubqwA3Wpste1hg69XHgjUuCD5HLcEp2QPzyV1HMrPppsL" billing_xpub = "xpub6DTBdtBB8qUmH5c77v8qVGVoYk7WjJNpGvutqjLasNG1mbux6KsojaLrYf2sRhXAVU4NaFuHhbD9SvVPRt1MB1MaMooRuhHcAZH1yhQ1qDU" SEED_PREFIX = version.SEED_PREFIX_2FA class TrustedCoinException(Exception): def __init__(self, message, status_code=0): Exception.__init__(self, message) self.status_code = status_code class TrustedCoinCosignerClient(object): def __init__(self, user_agent=None, base_url='https://api.trustedcoin.com/2/', debug=False): self.base_url = base_url self.debug = debug self.user_agent = user_agent def send_request(self, method, relative_url, data=None): kwargs = {'headers': {}} if self.user_agent: kwargs['headers']['user-agent'] = self.user_agent if method == 'get' and data: kwargs['params'] = data elif method == 'post' and data: kwargs['data'] = json.dumps(data) kwargs['headers']['content-type'] = 'application/json' url = urljoin(self.base_url, relative_url) if self.debug: print '%s %s %s' % (method, url, data) response = requests.request(method, url, **kwargs) if self.debug: print response.text print if response.status_code != 200: message = str(response.text) if response.headers.get('content-type') == 'application/json': r = response.json() if 'message' in r: message = r['message'] raise TrustedCoinException(message, response.status_
code) if response.headers.get('content-type') == 'application/json': re
turn response.json() else: return response.text def get_terms_of_service(self, billing_plan='electrum-per-tx-otp'): """ Returns the TOS for the given billing plan as a plain/text unicode string. :param billing_plan: the plan to return the terms for """ payload = {'billing_plan': billing_plan} return self.send_request('get', 'tos', payload) def create(self, xpubkey1, xpubkey2, email, billing_plan='electrum-per-tx-otp'): """ Creates a new cosigner resource. :param xpubkey1: a bip32 extended public key (customarily the hot key) :param xpubkey2: a bip32 extended public key (customarily the cold key) :param email: a contact email :param billing_plan: the billing plan for the cosigner """ payload = { 'email': email, 'xpubkey1': xpubkey1, 'xpubkey2': xpubkey2, 'billing_plan': billing_plan, } return self.send_request('post', 'cosigner', payload) def auth(self, id, otp): """ Attempt to authenticate for a particular cosigner. :param id: the id of the cosigner :param otp: the one time password """ payload = {'otp': otp} return self.send_request('post', 'cosigner/%s/auth' % quote(id), payload) def get(self, id): """ Attempt to authenticate for a particular cosigner. :param id: the id of the cosigner :param otp: the one time password """ return self.send_request('get', 'cosigner/%s' % quote(id)) def sign(self, id, transaction, otp): """ Attempt to authenticate for a particular cosigner. :param id: the id of the cosigner :param transaction: the hex encoded [partially signed] compact transaction to sign :param otp: the one time password """ payload = { 'otp': otp, 'transaction': transaction } return self.send_request('post', 'cosigner/%s/sign' % quote(id), payload) def transfer_credit(self, id, recipient, otp, signature_callback): """ Tranfer a cosigner's credits to another cosigner. :param id: the id of the sending cosigner :param recipient: the id of the recipient cosigner :param otp: the one time password (of the sender) :param signature_callback: a callback that signs a text message using xpubkey1/0/0 returning a compact sig """ payload = { 'otp': otp, 'recipient': recipient, 'timestamp': int(time.time()), } relative_url = 'cosigner/%s/transfer' % quote(id) full_url = urljoin(self.base_url, relative_url) headers = { 'x-signature': signature_callback(full_url + '\n' + json.dumps(payload)) } return self.send_request('post', relative_url, payload, headers) server = TrustedCoinCosignerClient(user_agent="Electrum/" + version.ELECTRUM_VERSION) class Wallet_2fa(Wallet_2of3): wallet_type = '2fa' def get_action(self): xpub1 = self.master_public_keys.get("x1/") xpub2 = self.master_public_keys.get("x2/") xpub3 = self.master_public_keys.get("x3/") if xpub2 is None and not self.storage.get('use_trustedcoin'): return 'show_disclaimer' if xpub2 is None: return 'create_extended_seed' if xpub3 is None: return 'create_remote_key' if not self.accounts: return 'create_accounts' def make_seed(self): return Mnemonic('english').make_seed(num_bits=256, prefix=SEED_PREFIX) def estimated_fee(self, tx): fee = Wallet_2of3.estimated_fee(self, tx) x = run_hook('extra_fee', tx) if x: fee += x return fee def get_tx_fee(self, tx): fee = Wallet_2of3.get_tx_fee(self, tx) x = run_hook('extra_fee', tx) if x: fee += x return fee class Plugin(BasePlugin): wallet = None def __init__(self, x, y): BasePlugin.__init__(self, x, y) self.seed_func = lambda x: bitcoin.is_new_seed(x, SEED_PREFIX) self.billing_info = None self.is_billing = False def constructor(self, s): return Wallet_2fa(s) def is_available(self): if not self.wallet: return False if self.wallet.storage.get('wallet_type') == '2fa': return True return False def requires_settings(self): return True def set_enabled(self, enabled): self.wallet.storage.put('use_' + self.name, enabled) def is_enabled(self): if not self.is_available(): return
Nikola-K/django-template
users/migrations/0001_initial.py
Python
mit
3,082
0.004218
# -*- coding: utf-8 -*- # Generated by Django 1.9 on 2016-01-02 18:22 from __future__ import unicode_literals import django.contrib.auth.models import django.core.validators from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0007_alter_validators_add_error_messages'), ] operations = [ migrations.CreateModel( name='Person', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigni
ng them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer
. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username')), ('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')), ('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')), ('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')), ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('description', models.TextField(blank=True)), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], options={ 'verbose_name_plural': 'users', 'abstract': False, 'verbose_name': 'user', }, managers=[ ('objects', django.contrib.auth.models.UserManager()), ], ), ]
lightbulb-framework/lightbulb-framework
examples/test_custom_api_native_example_1.py
Python
mit
1,286
0.012442
from lightbulb.api.api_nativ
e import LightBulb import base64 lightbulbapp = LightBulb() path = "/test/env/bin/lightbulb" #Path to binary configuration_A = {'TESTS_FILE_TYPE': 'None', 'ALPHABET': '32-57,58-64,65-126', 'SEED_FILE_TYPE': 'FLEX', 'TESTS_FILE': 'None','DFA1_MINUS_DFA2': 'True', 'SAVE': 'False', 'HANDLER': 'None', 'SEED_FILE': '{library}/regex/BROWSER/html_p_attribute.y'} configuration_B = {'
TESTS_FILE_TYPE': 'None', 'ALPHABET': '32-57,58-64,65-126', 'SEED_FILE_TYPE': 'FLEX', 'TESTS_FILE': 'None','DFA1_MINUS_DFA2': 'True', 'SAVE': 'False', 'HANDLER': 'None', 'SEED_FILE': '{library}/regex/BROWSER/html_p_attribute.y'} handlerconfig_A = {'WSPORT': '5000','WBPORT': '5080', 'BROWSERPARSE': 'True', 'DELAY': '50', 'HOST': 'localhost'} handlerconfig_B = {'URL': 'http://127.0.0.1/~fishingspot/securitycheck/index.php', 'BLOCK':'Impact', 'REQUEST_TYPE':'GET','PARAM':'input','BYPASS':'None', 'PROXY_SCHEME': 'None', 'PROXY_HOST': 'None', 'PROXY_PORT': 'None', 'PROXY_USERNAME': 'None', 'PROXY_PASSWORD': 'None','USER_AGENT': "Mozilla/5.0", 'REFERER': "http://google.com"} stats = lightbulbapp.start_sfadiff_algorithm( path, configuration_A, configuration_B, handlerconfig_A, handlerconfig_B, "BrowserHandler", "HTTPHandler") print stats
USGSDenverPychron/pychron
pychron/core/ui/qt/keybinding_editor.py
Python
apache-2.0
4,794
0.001043
# =============================================================================== # Copyright 2014 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF AN
Y KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ====================================================================
=========== # ============= enthought library imports ======================= from __future__ import absolute_import from PySide import QtGui, QtCore from traits.trait_types import Event from traitsui.api import View, UItem from traitsui.basic_editor_factory import BasicEditorFactory from traitsui.editors.api import TableEditor from traitsui.handler import Controller from traitsui.qt4.editor import Editor from traitsui.qt4.key_event_to_name import key_event_to_name from traitsui.table_column import ObjectColumn # ============= standard library imports ======================== # ============= local library imports ========================== # from traitsui.basic_editor_factory import BasicEditorFactory from pychron.envisage.key_bindings import keybinding_exists class KeyBindingsEditor(Controller): def traits_view(self): cols = [ ObjectColumn(name="binding", editor=KeyBindingEditor()), ObjectColumn(name="description", editable=False, width=400), ] v = View( UItem("bindings", editor=TableEditor(columns=cols)), width=500, height=600, title="Edit Key Bindings", kind="livemodal", buttons=["OK", "Cancel"], resizable=True, ) return v class KeyBindingControl(QtGui.QLabel): def keyPressEvent(self, event): """Handle keyboard keys being pressed.""" # Ignore presses of the control and shift keys. if event.key() not in (QtCore.Qt.Key_Control, QtCore.Qt.Key_Shift): self.editor.key = event class _KeyBindingEditor(Editor): key = Event # clear = Event # refresh_needed = Event # dump_needed = Event def dispose(self): # override Editor.dispose. don't break reference to control if self.ui is None: return name = self.extended_name if name != "None": self.context_object.on_trait_change(self._update_editor, name, remove=True) if self._user_from is not None: for name, handler in self._user_from: self.on_trait_change(handler, name, remove=True) if self._user_to is not None: for object, name, handler in self._user_to: object.on_trait_change(handler, name, remove=True) # self.object = self.ui = self.item = self.factory = self.control = \ # self.label_control = self.old_value = self._context_object = None def init(self, parent): self.control = self._create_control() # self.sync_value(self.factory.refresh_needed, 'refresh_needed', mode='to') # self.sync_value(self.factory.refresh_needed, 'dump_needed', mode='to') def _create_control(self): ctrl = KeyBindingControl() ctrl.editor = self return ctrl def update_editor(self): """Updates the editor when the object trait changes externally to the editor. """ if self.control: self.control.setText(self.value) def _key_changed(self, event): key_name = key_event_to_name(event) key_name = key_name.replace("-", "+") desc = keybinding_exists(key_name) if desc: if ( QtGui.QMessageBox.question( self.control, "Duplicate Key Definition", "'%s' has already been assigned to '%s'.\n" "Do you wish to continue?" % (key_name, desc), QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No, ) != QtGui.QMessageBox.Yes ): return # else: # clear_keybinding(desc) # self.refresh_needed = True self.value = key_name self.control.setText(key_name) class KeyBindingEditor(BasicEditorFactory): klass = _KeyBindingEditor # refresh_needed = Str # ============= EOF =============================================
shimarin/discourse-ja-translation
yaml2csv.py
Python
gpl-2.0
1,474
0.00882
#!/usr/bin/python2.7 import sys import csv import yaml import codec
s TO_BE_TRANSLATED_MARK = "***TO BE TRANSLATED***" def collect(result, node, prefix=None): for key,value in node.items(): new_prefix = (key if prefix == None else prefix + "." + key) if isinstance(value, dict): collect(result, value, new_prefix) else: result[new_prefix] = value def collect_old_csv(filename): result = {} reader = csv.reader(open(filename)) for row in reader: if TO_BE_TRANSLATED_MARK not in row[1]: resu
lt[row[0]] = row[1].decode("utf-8") return result def flatten(namespace=None,old_csv=None): namespace = "" if namespace == None else namespace + "." en_src = yaml.load(open("%sen.yml" % namespace)) ja_src = yaml.load(open("%sja.yml" % namespace)) en = {} collect(en, en_src["en"]) ja = {} collect(ja, ja_src["ja"]) ja_old = collect_old_csv(old_csv) if old_csv else {} writer = csv.writer(sys.stdout) for key,value in sorted(en.items()): val = TO_BE_TRANSLATED_MARK + value if key in ja: val = ja[key] elif key in ja_old: val = ja_old[key] writer.writerow([key, val.encode("UTF-8")]) if __name__ == '__main__': if len(sys.argv) < 2: print "Usage: yaml2csv.py namespace('server'|'client') [old-translated-csv-file]" sys.exit(1) flatten(sys.argv[1], None if len(sys.argv) < 3 else sys.argv[2])
hazelcast/hazelcast-python-client
hazelcast/proxy/pn_counter.py
Python
apache-2.0
12,287
0.002442
import functools import logging import random from hazelcast.future import Future from hazelcast.proxy.base import Proxy from hazelcast.cluster import VectorClock from hazelcast.protocol.codec import ( pn_counter_add_codec, pn_counter_get_codec, pn_counter_get_configured_replica_count_codec, ) from hazelcast.errors import NoDataMemberInClusterError _logger = logging.getLogger(__name__) class PNCounter(Proxy): """PN (Positive-Negative) CRDT counter. The counter supports adding and subtracting values as well as retrieving the current counter value. Each replica of this counter can perform operations locally without coordination with the other replicas, thus increasing availability. The counter guarantees that whenever two nodes have received the same set of updates, possibly in a different order, their state is identical, and any conflicting updates are merged automatically. If no new updates are made to the shared state, all nodes that can communicate will eventually have the same data. When invoking updates from the client, the invocation is remote. This may lead to indeterminate state - the update may be applied but the response has not been received. In this case, the caller will be notified with a TargetDisconnectedError. The read and write methods provide monotonic read and RYW (read-your-write) guarantees. These guarantees are session guarantees which means that if no replica with the previously observed state is reachable, the session guarantees are lost and the method invocation will throw a ConsistencyLostError. This does not mean that an update is lost. All of the updates are part of some replica and will be eventually reflected in the state of all other replicas. This exception just means that you cannot observe your own writes because all replicas that contain your updates are currently unreachable. After you have received a ConsistencyLostError, you can either wait for a sufficiently up-to-date replica to become reachable in which case the session can be continued or you can reset the session by calling the reset() method. If you have called the reset() method, a new session is started with the next invocation to a CRDT replica. Notes: The CRDT state is kept entirely on non-lite (data) members. If there aren't any and the methods here are invoked on a lite member, they will fail with an NoDataMemberInClusterError. """ _EMPTY_ADDRESS_LIST = [] def __init__(self, service_name, name, context): super(PNCounter, self).__init__(service_name, name, context) self._observed_clock = VectorClock() self._max_replica_count = 0 self._current_target_replica_address = None def get(self): """Returns the current value of the counter. Returns: hazelcast.future.Future[int]: The current value of the counter. Raises: NoDataMemberInClusterError: if the cluster does not contain any data members. ConsistencyLostError: if the session guarantees have been lost. """ return self._invoke_internal(pn_counter_get_codec) def get_and_add(self, delta): """Adds the given value to the current value and returns the previous value. Args: delta (int): The value to add. Returns: hazelcast.future.Future[int]: The previous value. Raises: NoDataMemberInClusterError: if the cluster does not contain any data members. ConsistencyLostError: if the session guarantees have been lost. """ return self._invoke_internal(pn_counter_add_codec, delta=delta, get_before_update=True) def add_and_get(self, delta): """Adds the given value to the current value and returns the updated value. Args: delta (int): The value to add. Returns: hazelcast.future.Future[int]: The updated value. Raises: NoDataMemberInClusterError: if the cluster does not contain any data members. ConsistencyLostError: if the session guarantees have been lost. """ return self._invoke_internal(pn_counter_add_codec, delta=delta, get_before_update=False) def get_and_subt
ract(self, delta): """Subtracts the given value from the curr
ent value and returns the previous value. Args: delta (int): The value to subtract. Returns: hazelcast.future.Future[int]: The previous value. Raises: NoDataMemberInClusterError: if the cluster does not contain any data members. ConsistencyLostError: if the session guarantees have been lost. """ return self._invoke_internal(pn_counter_add_codec, delta=-1 * delta, get_before_update=True) def subtract_and_get(self, delta): """Subtracts the given value from the current value and returns the updated value. Args: delta (int): The value to subtract. Returns: hazelcast.future.Future[int]: The updated value. Raises: NoDataMemberInClusterError: if the cluster does not contain any data members. ConsistencyLostError: if the session guarantees have been lost. """ return self._invoke_internal( pn_counter_add_codec, delta=-1 * delta, get_before_update=False ) def get_and_decrement(self): """Decrements the counter value by one and returns the previous value. Returns: hazelcast.future.Future[int]: The previous value. Raises: NoDataMemberInClusterError: if the cluster does not contain any data members. ConsistencyLostError: if the session guarantees have been lost. """ return self._invoke_internal(pn_counter_add_codec, delta=-1, get_before_update=True) def decrement_and_get(self): """Decrements the counter value by one and returns the updated value. Returns: hazelcast.future.Future[int]: The updated value. Raises: NoDataMemberInClusterError: if the cluster does not contain any data members. ConsistencyLostError: if the session guarantees have been lost. """ return self._invoke_internal(pn_counter_add_codec, delta=-1, get_before_update=False) def get_and_increment(self): """Increments the counter value by one and returns the previous value. Returns: hazelcast.future.Future[int]: The previous value. Raises: NoDataMemberInClusterError: if the cluster does not contain any data members. ConsistencyLostError: if the session guarantees have been lost. """ return self._invoke_internal(pn_counter_add_codec, delta=1, get_before_update=True) def increment_and_get(self): """Increments the counter value by one and returns the updated value. Returns: hazelcast.future.Future[int]: The updated value. Raises: NoDataMemberInClusterError: if the cluster does not contain any data members. UnsupportedOperationError: if the cluster version is less than 3.10. ConsistencyLostError: if the session guarantees have been lost. """ return self._invoke_internal(pn_counter_add_codec, delta=1, get_before_update=False) def reset(self): """Resets the observed state by this PN counter. This method may be used after a method invocation has thrown a ``ConsistencyLostError`` to reset the proxy and to be able to start a new session. """ self._observed_clock = VectorClock() def _invoke_internal(self, codec, **kwargs): delegated_future = Future() self._set_result_or_error( delegated_future, PNCounter._EMPTY_ADDRESS_LIST, None, codec, **kwargs ) return delegated_future def _set_result_or_error( self, delegated_future, excluded_addresses, last_error, codec, **kwargs ):
cprogrammer1994/ModernGL
tests/test_code_style.py
Python
mit
587
0
import os import unittest import moderngl import pycodestyle c
lass TestCase(unittest.TestCase): def test_style(self): config_file = os.path.join(os.path.dirname(__file__), '..', 'tox.ini') style = pycodestyle.StyleGuide(config_file=config_file, ignore='E402') check = style.check_files([ os.path.join(os.path.dirname(__file__), '../moderngl/__init__.py'), os.path.join(os.path.dirname(__file__), '../moderngl/__main__.py'), ]) self.assertEqual(check.total_errors, 0) i
f __name__ == '__main__': unittest.main()
blackice5514/QuickAp
menu/showSubMenu.py
Python
gpl-3.0
20,613
0.027119
import time from menu.ncolor import * from menu.showMainMenu import * from command.shell import * from write.dnsmasq_write import * class Sub_Menu(): dns_message = """ you can add a redirect entry in this menu or edit the dnsmasq configuration file located in""" + color.BLEU + """ '/etc/redirect/dnsmasq.host'\n """ + color.ENDC #the user choose a new name. the input of the user will be put in the user #object def nameMenu(ssid): while True: print ("\nthe current name of the access point is " + color.VERT + "'" + ssid + "'" + color.ENDC) print("") print("%49s" % ("current options" + color.ENDC)) print("%58s" % (color.DARKCYAN + "-----------------------" + color.ENDC)) print("%48s" % ("(1) choose a new name.")) print("%41s" % ("(5) main menu.\n")) while True: NameChoice = input(color.BLEU + "name > " + color.ENDC) if NameChoice == "1": print(color.DARKYELLOW + "enter the new name of the ap..." + color.ENDC) ssid = input(color.BLEU + "name > " + color.DARKROUGE + "new name > " + color.ENDC) print (color.VERT + "[+]" + color.ENDC + " changing the name for " + color.VERT + "'" + ssid + "'" + color.ENDC) time.sleep(1) return ssid elif NameChoice == "5": print(color.VERT + "[+]" + color.ENDC + " going back to main menu.") time.sleep(0.3) return ssid else: print(color.ROUGE + "[*]" + color.ENDC + " please enter a valid option!") #taking the crypt variable object to check if an encryption have been chosen. If not #the user is ask to choose an encryption type. The PassHandle function will be called #to verify if the password respect the security exigence def PassordMenu(crypt, password): while True: if crypt != "N/A": print("") print("%48s" % ("current options" + color.ENDC)) print("%56s" % (color.DARKCYAN + "-----------------------" + color.ENDC)) print("%48s" % ("(1) choose new password.")) print("%39s" % ("(5) main menu.\n")) while True: PasswordChoice = input(color.BLEU + "password > " + color.ENDC) if PasswordChoice == "1": print(color.DARKYELLOW + "enter the new password for the ap..." + color.ENDC) error = False while error == False: password = input(color.BLEU + "password > " + color.DARKROUGE + "new password > " + color.ENDC) error = Sub_Menu.PassHandle(crypt, password) print (color.VERT + "[+]" + color.ENDC + " changing the password to " + color.VERT + "'" + password + "'" + color.ENDC) time.sleep(1) return password elif PasswordChoice == "5": print(color.VERT + "[+]" + color.ENDC + " going back to main menu.") time.sleep(0.3) return password else: print(color.ROUGE + "[*]" + color.ENDC + " please enter a valid option!") else: print(color.ROUGE + "[*]" + color.ENDC + " please select a security type if you want to choose a password.") time.sleep(1.5) return password #take the security type and password in parameter. If a new password is chosen the old #password gonna be reset to zero. def securityMenu(crypt, password): while True: security_text = color.BLEU + color.BOLD + """ -WPA2 """ + color.ENDC + """is the most advanced wifi security protocol curently used by most router by default. The passphrase must have a minimum of 8 character.""" + color.BLEU + color.BOLD + """\n -WPA""" + color.ENDC + """ wpa is older and less secure than wpa2. it is using an older encryption (TKIP). Like wpa2 you need to put at least 8 charactere. """ + color.BLEU + color.BOLD + """\n -WEP""" + color.ENDC + """ wep is deprecated and can be very easely cracked. your wep key must be at least 10 charactere and only contain hexadecimal character.""" print(security_text) print ("\n - the current security o
f the access point is " + color.VERT + "'" + crypt + "'" + color.ENDC) print("") print("%53s" % ("current options" + color.ENDC)) print("%61s" % (color.DARKCYAN + "-----------------------" + color.ENDC)) print("%38s" % ("(1) WPA2.
")) print("%44s" % ("(2) WPA (TKIP).")) print("%47s" % ("(3) WEP (64 bits).")) print("%45s" % ("(4) no security.")) print("%44s" % ("(5) main menu.\n")) while True: NameChoice = input(color.BLEU + "security > " + color.ENDC) pwd = "" if NameChoice == "1": Sec = "WPA2" crypt, password = Sub_Menu.AskPassword(Sec, pwd) return crypt, password elif NameChoice == "2": Sec = "WPA" crypt, password = Sub_Menu.AskPassword(Sec, pwd) return crypt, password elif NameChoice == "3": Sec = "WEP" crypt, password = Sub_Menu.AskPassword(Sec, pwd) return crypt, password elif NameChoice == "4": print (color.VERT + "[+]" + color.ENDC + " deleting the " + color.VERT + crypt + color.ENDC + " security.") time.sleep(1) crypt = "N/A" password = "N/A" return crypt, password elif NameChoice == "5": print(color.VERT + "[+]" + color.ENDC + " going back to main menu.") time.sleep(0.3) return crypt, password else: print(color.ROUGE + "[*]" + color.ENDC + " please enter a valid option!") #giving the option to decide if the dhcp server will be on or off. It will also #give the option to change the dhcp pool adresse. def dhcpMenu(dhcp): while True: #putting some information for the dhcp in variable couleur = color.Color_check(dhcp) dhcpPool = "10.0.0.10-250" dhcpLease = "12h" # show the appropriate option in the menu if dhcp == "N/A": dhcpOPTION = "(1) set dhcp server to" + color.VERT + " 'on'" + color.ENDC else: dhcpOPTION = "%47s" % " (1) set dhcp server to" + color.ROUGE + " 'off'" + color.ENDC print ("""\n the dhcp server should always be on. If the dhcp is set to 'N/A' the client will need to have is adresse, gateway and dns set manualy.\n""") print (color.BOLD + " dhcp status: " + color.ENDC + couleur + "'" + dhcp + "'" + color.ENDC) print (color.BOLD + " dhcp pool: " + color.ENDC + color.BLEU + dhcpPool + color.ENDC) print (color.BOLD + " dhcp lease: " + color.ENDC + color.BLEU + dhcpLease + color.ENDC) print("") print("%49s" % ("current options" + color.ENDC)) print("%57s" % (color.DARKCYAN + "-----------------------" + color.ENDC)) print("%61s" % ( dhcpOPTION)) print("%40s" % ("(5) main menu.\n")) while True: DhcpChoice = input(color.BLEU + "dhcp > " + color.ENDC) #check the last dhcp value and take the decision to put it to on or off if DhcpChoice == "1": if dhcp == "N/A": dhcp = "ON" else: dhcp = "N/A" print (color.VERT + "[+]" + color.ENDC + " changing dhcp status to " + color.VERT + "'" + dhcp + "'" + color.ENDC) time.sleep(1) return dhcp
fboers/jumeg
examples/connectivity/plot_shuffle_time_slices.py
Python
bsd-3-clause
2,449
0.008575
""" ==================================================== Shuffle channels' data in the time domain and plot. ==================================================== """ # Author: Eberhard Eich # Praveen Sripad # # License: BSD (3-clause) import numpy as np import os.path as op import mne from jumeg.jumeg_utils import (get_files_from_list, time_shuffle_slices, channel_indices_from_list) from mne.datasets import sample data_path = sample.data_path() raw_fname = str(data_path + '/MEG/sample/sample_audvis_raw.fif') # shuffle all MEG channels that begin with number 11 shflchanlist = ['MEG 11..'] # shuffle the whole length of the data tmin, tmax = 0., None # apply the shuffling # time_shuffle_slices(raw_fname, shufflechans=shflchanlist, tmin=tmin, tmax=tmax) plot_things = True if plot_things: permname = op.join(op.dirname(raw_fname), op.basename(raw_fname).split('-')[0]) + ',tperm-raw.fif' rawraw = mne.io.Raw(raw_fname,preload=True) shflpick = channel_indices_from_list(rawraw.info['ch_names'][:],
shflchanlist) procdperm = mne.io.Raw(permname, preload=True) figraw = rawraw.plot_psd(fmin=0., fmax=300., tmin=0., color=(1,0,0), picks=shflpick) axisraw = figraw.gca() axisraw.set_ylim([-300., -250.]) # procdnr.plot_psd(fmin=0.,fmax=300., color=(0,0,1), picks=shflpick) figshfl = procdperm.plot_psd(fmin=0., fmax=300., tmin=0., color=(1,0,0), picks=shflpick) axisshfl = figshfl.gca() axisshfl.set_ylim([-300., -250.]) megpick = mne.pick_types(rawraw.info, meg=True, ref_meg=False, eeg=False, eog=False, stim=False) figraw1 = rawraw.plot_psd(fmin=0., fmax=300., tmin=0., color=(0,0,1), picks=megpick) axisraw1 = figraw1.gca() axisraw1.set_ylim([-300., -250.]) figshfl1 = procdperm.plot_psd(fmin=0., fmax=300., tmin=0., color=(0,0,1), picks=megpick) axisshfl1 = figshfl1.gca() axisshfl1.set_ylim([-300., -250.]) megnochgpick = np.setdiff1d(megpick, shflpick) figraw2 = rawraw.plot_psd(fmin=0., fmax=300., tmin=0., color=(0,1,0), picks=megnochgpick) axisraw2 = figraw2.gca() axisraw2.set_ylim([-300., -250.]) figshfl2 = procdperm.plot_psd(fmin=0., fmax=300., tmin=0., color=(0,1,0), picks=megnochgpick) axisshfl2 = figshfl2.gca() axisshfl2.set_ylim([-300., -250.])
liuguanyu/evparse
lib/hunantv/entry.py
Python
gpl-3.0
3,213
0.006225
# -*- coding: utf-8 -*- # entry.py, part for evparse : EisF Video Parse, evdh Video Parse. # entry: evparse/lib/hunantv # version 0.1.0.0 test201505151816 # author sceext <[email protected]> 2009EisF2015, 2015.05. # copyright 2015 sceext # # This is FREE SOFTWARE, released under GNU GPLv3+ # please see README.md and LICENSE for more information. # # evparse : EisF Video Parse, evdh Video Parse. # Copyright (C) 2015 sceext <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # b
ut WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Gene
ral Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # import import re from .. import error from . import get_base_info from . import get_video_info # global vars # version of this extractor THIS_EXTRACTOR_VERSION = 'evparse lib/hunantv version 0.1.0.0 test201505151816' # http://www.hunantv.com/v/2/150668/f/1518250.html# # http://www.hunantv.com/v/2/51717/f/692063.html# # http://www.hunantv.com/v/2/107768/f/1517224.html# RE_SUPPORT_URL = '^http://www\.hunantv\.com/v/2/[0-9]+/f/[0-9]+\.html' RE_VID = 'http://www\.hunantv\.com/v/2/[0-9]+/f/([0-9]+)\.html' # global config obj etc = {} # NOTE should be set etc['flag_debug'] = False etc['hd_min'] = 0 etc['hd_max'] = 0 # functions def set_config(config): # just copy it etc['flag_debug'] = config['flag_debug'] etc['hd_min'] = config['hd_min'] etc['hd_max'] = config['hd_max'] # get vid def get_vid(url_to): vid_info = {} vid_info['url'] = url_to # get vid vids = re.findall(RE_VID, url_to) vid_info['vid'] = vids[0] # done return vid_info def parse(url_to): # this site entry main entry function # frist re-check url, if supported by this if not re.match(RE_SUPPORT_URL, url_to): raise error.NotSupportURLError('not support this url', url_to) # create evinfo evinfo = {} evinfo['info'] = {} evinfo['video'] = [] # add some base info evinfo['info']['url'] = url_to evinfo['info']['site'] = 'hunantv' # get vid vid_info = get_vid(url_to) # DEBUG info if etc['flag_debug']: print('lib.hunantv: DEBUG: got vid \"' + vid_info['vid'] + '\" ') # get base, more info info, more = get_base_info.get_info(vid_info, flag_debug=etc['flag_debug']) # add more info evinfo['info']['title'] = more['title'] evinfo['info']['title_sub'] = more['sub_title'] evinfo['info']['title_short'] = more['short_title'] evinfo['info']['title_no'] = more['no'] # get video info evinfo['video'] = get_video_info.get_info(info, hd_min=etc['hd_min'], hd_max=etc['hd_max'], flag_debug=etc['flag_debug']) # done return evinfo # end entry.py
vhavlena/appreal
netbench/pattern_match/bin/library/bdz.py
Python
gpl-2.0
9,513
0.01356
from b_hash import b_hash from b_hash import NoData from jenkins import jenkins from h3_hash import h3_hash from jenkins import jenkins_fast, jenkins_wrapper from graph import * from collections import deque from bitstring import BitArray import math class bdz(b_hash): """Class for perfect hash function generated by the BDZ algorithm. This algorithms uses uniform random hypergraph.""" def __init__(self): b_hash.__init__(self) self.known_keys = False #Keyset is not set self.function_number = 3 #random 3-graph self.iteration_limit = 5 self.ratio = 1.24 #ratio between keyset size and theconsumed memory self.limit = -1 self.m = -1 self.g = None; def get_g(self): """This function return values of the g array. It can not be called before the generate_seed, since it is part of the seed""" return self.g def get_range(self): """This function returns the size of the biggest possible hash value. If the range is not known yet, the -1 is returned""" return self.m def get_ratio(self): """Return ratio c between keyset and the size of the memory""" return self.ratio def set_ratio(self,ratio): """sets the ration and therefore size of the data structure of the PHF""" self.ratio = ratio def set_limit(self, limit): """Sets the size of the memory bank for one hash function. This function can be used instead of the set ratio. BDZ computes three hash functions with nonoverlapping outputs. Outputs of these hash functions are used as a pointers to the memory. If user know amount of the memory, he may set the limit as 1/3 of the available memory. The ration and other parameters are computed when the key set is given. The limit value always take precedents before the ratio. To stop using limit value, limit should be set to the negative value.""" self.limit = limit; def get_iteration_limit(self): """The BDZ algorithm may have fail to create PHF. The iteration_limit is used to limit the number of attempts of PHF creation""" return self.iteration_limit def set_iteration_limit(self,iteration_limit): """The BDZ algorithm may have fail to create PHF. The iteration_limit is used to limit the number of attempts of PHF creation""" self.iteration_limit = iteration_limit def get_order(self): """This function return the number of uniform hash function used to create hypergraph""" return self.function_number def set_order(self,number): """This function sets the number of hash function used for the creation of the hypergraph. It can not be changed after generation of the PHF""" self.function_number = number def set_keys(self, key_set): """This is a perfect hash function. For the construction of the PHF, the set of keys has to be known. This function gives set of keys to the function, so generate_seed can build correct function""" self.key_set = key_set self.known_keys = True if self.limit > 0 : #The limit is set, recompute ratio for the given limit self.ratio = (3.0*self.limit)/len(key_set) def is_key_set(self): """This function return information, if the set of keys is prepared for the generation of the PHF""" return self.known_keys def _found_graph(self): """This is internal function. It generate random hypergraph according to the specification in the bdz class. It returns a queue of the edge and changes internal datastructure of BDZ class. Returned edges are ordered in such way, that they can be used for the construction of the PHF""" #First step is to initialize seed self.seed = dict() #Second step is to generate the random hash functions hashes = list() for i in range(0,self.function_number): x = jenkins_wrapper() x.generate_seed() # x = h3_hash() # x.set_bitsize(16) # x.set_input_size(len(self.key_set[0])) # x.generate_seed() hashes.append(x) self.seed["hashes"] = hashes #setting m self.m = int(math.ceil(self.ratio * len(self.key_set))) limit = int(math.ceil(float(self.m) /self.function_number)) self.m = 3*limit #print("XXXXXXXXXXXXXXX",limit, self.m) #Generation of hypergraph hyper = graph() hyper.set_order(self.function_number) hyper.add_vertices(self.m) #Generation of the edges of the hypergraph for x in self.key_set: values = list() for i in self.seed["hashes"]: #print("test",i.hash(x)%limit,limit*len(values)) vertex = (i.hash(x) % limit) + limit*len(values) values.append(vertex) #Add this edge into the hypergraph e = hyper.add_edge(values) # print(e.get_vertices()) #Add edge to the vertices for v in values: hyper.get_vertex(v).add_edge(e) #Generate queue for the edge evaluation queue_list = [] queue = deque() #Boolean vector of the used edges used = [False] * hyper.get_edge_number() #First remove edges that have at least one vertex with degree 1 for i in range(0,hyper.get_edge_number()): vert = hyper.get_edge(i).get_vertices() #print([hyper.get_vertex(x).get_degree() for x in vert]) Deg = [hyper.get_vertex(x).get_degree() == 1 for x in vert] if sum(Deg) > 0 and used[i] == False: #This edge has at least one vertex with degree 1 used[i] = True queue_list.append(i) queue.append(i) #Removing edges that have unique vertex (on the stack) #adding a new edges with unique vertex into stack while(len(queue)>0): edge = queue.popleft() #remove edge from the graph (only from vertex and decrease degree) for v in hyper.get_edge(edge).get_vertices(): hyper.get_vertex(v).get_edges().remove(hyper.get_edge(edge)) deg = hyper.get_vertex(v).get_degree() - 1 #print("KVIK",deg) hyper.get_vertex(v).set_degree(deg) #if degree decrease to 1, the remaining edge should be added #into the queue if(deg == 1): #Found the edge position e1 = hyper.get_vertex(v).get_edges()[0] position = hyper.get_edge_position(e1
) #If it is not in the queue, put it there if used[position] == False: queue.append(position) queue_list.append(position) used[posi
tion] = True self.hyper = hyper return queue_list def _found_g(self,v,ed,vi): """This function computes value of the g array for given vertex. It uses plus operation.""" s = [self.g[s1] for s1 in self.hyper.get_edge(ed).get_vertices()] sum1 = sum(s)-s[vi]; self.g[v] = (vi-sum1)%len(s) return True; def _found_g2(self,v,ed,vi): """This function computes value of the g array for given vertex by the use of the xor function. Assumes two bit representation of the g array""" s = [self.g[s1] for s1 in self.hyper.get_edge(ed).get_vertices()] sum1 = s[0]; for index in range(1,len(self.hyper.get_edge(ed).get_vertices())): sum1 = sum1^s[index] sum1 = sum1^s[vi] self.g[v] = (vi^sum1)&3 #3 is the 11 in binary, therefore it clear all the higher bits to zero return True def generate_seed(self): """This function generates the PHF function according to the BDZ algorithm""" if not self.known_keys: raise NoData("The key set is unknown") size = 0 iteration = 0 while(size != len(self.key_set) and self.iteration_limit > iteration): queue
FernanOrtega/DAT210x
Module3/notes/histogram_example.py
Python
mit
446
0.008969
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Tue Jul 11 20:4
7:53 2017 @author: fernando """ import pandas as pd import matplotlib import matplotlib.pyplot as plt plt.style.use('ggplot') df = pd.read_csv("/home/fernando/CoursePythonDS/DAT210x/Module3/Datasets/wheat.data") print df.describe() df[df.groove>5].asymmetry.plot.hist(alpha=0.3, normed=True) df[df.gr
oove<=5].asymmetry.plot.hist(alpha=0.5, normed=True) plt.show()
frankosan/pypers
pypers/steps/mothur/MothurSummarySeqs.py
Python
gpl-3.0
3,305
0.014221
from pypers.core.step import Step from pypers.steps.mothur import Mothur import os import json import re import glob class MothurSummarySeqs(Mothur): """ Summarizes the quality of sequences in an unaligned or aligned fasta-formatted sequence file. """ spec = { 'name' : 'MothurSummarySeqs', 'version' : '20150512', 'descr' : [ 'Summarizes the quality of sequences in an unaligned or aligned fasta-formatted sequence file' ], 'url' : 'www.mothur.org/wiki/Summary.seqs', 'args' : { 'inputs' : [ { 'name' : 'input_fasta', 'type' : 'file', 'iterable' : True, 'descr' : 'input fasta filename' }, { 'name' : 'input_names', 'type' : 'file', 'iterable' : True, 'required' : False, 'descr' : 'input names filename' }, { 'name' : 'input_counts', 'type' : 'file', 'iterable' : True, 'required' : False, 'descr' : 'input counts filename' } ], 'outputs' : [ { 'name' :
'output_summary', 'type' : 'file', 'value' : '*.summary', 'descr': 'output summary filename' }, {
'name' : 'output_log', 'type' : 'file', 'value' : '*.log.txt', 'descr': 'output summary logfile with tile summary table' } ] }, 'requirements' : { 'cpus' : '8' } } def process(self): """ Create the necessary input file links and run mothur command """ if type(self.input_fasta) != list: self.input_fasta = [self.input_fasta] if type(self.input_names) != list: self.input_names = [self.input_names] if type(self.input_counts) != list: self.input_counts = [self.input_counts] for idx, input_fasta in enumerate(self.input_fasta): self.mk_links([input_fasta],self.output_dir) input_fasta = os.path.join(self.output_dir,os.path.basename(input_fasta)) extra_params={'fasta':input_fasta} if self.input_names[idx]: input_names = os.path.join(self.output_dir,os.path.basename(self.input_names[idx])) self.mk_links([self.input_names[idx]],self.output_dir) extra_params['name'] = input_names if self.input_counts[idx]: input_counts = os.path.join(self.output_dir,os.path.basename(self.input_counts[idx])) self.mk_links([self.input_counts[idx]],self.output_dir) extra_params['count'] = input_counts self.run_cmd('summary.seqs',extra_params)
appium/appium
sample-code/python/test/conftest.py
Python
apache-2.0
1,108
0.002708
import pytest import datetime import os from helpers import ensure_dir def pytest_configure(config): if no
t hasattr(config, 'input'): current_day = '{:%Y_%m_%d_%H_%S}'.format(datetime.datetime.now()) ensure_dir(os.path.join(os.path.dirname(__file
__), 'input', current_day)) result_dir = os.path.join(os.path.dirname(__file__), 'results', current_day) ensure_dir(result_dir) result_dir_test_run = result_dir ensure_dir(os.path.join(result_dir_test_run, 'screenshots')) ensure_dir(os.path.join(result_dir_test_run, 'logcat')) config.screen_shot_dir = os.path.join(result_dir_test_run, 'screenshots') config.logcat_dir = os.path.join(result_dir_test_run, 'logcat') class DeviceLogger: def __init__(self, logcat_dir, screenshot_dir): self.screenshot_dir = screenshot_dir self.logcat_dir = logcat_dir @pytest.fixture(scope='function') def device_logger(request): logcat_dir = request.config.logcat_dir screenshot_dir = request.config.screen_shot_dir return DeviceLogger(logcat_dir, screenshot_dir)
fs714/concurrency-example
asynchronous/py36/asyncio/async_test.py
Python
apache-2.0
868
0.002304
# import asyncio # # async def compute(x, y): # print("Compute %s + %s ..." % (x, y)) # await asyncio.sleep(1.0) # return x + y # # async def print_sum(x, y): # for i in range(10): # result = await compute(x, y) # print("%s + %s = %s" % (x, y, result)) # # loop = asyncio.get_event_loop() # loop.run_until_complete(print_sum(1,2)) # asyncio.ensure_future(print_sum(1, 2)) # asyncio.ensure_future(print_sum(3, 4)) # asyncio.ensure_future(print_sum(5, 6)) # loop.run_forever() import asyncio async def display_date(who, nu
m): i = 0 while True: if i > num: return print('{}: Before loop {}'.format(who, i)) await asyncio.sleep(1) i += 1 loop = asyncio.get_event_loop() asyncio.ensure_future(display_date('AAA', 4)) asyncio.ensure_future(d
isplay_date('BBB', 6)) loop.run_forever()
CCI-MOC/GUI-Backend
core/migrations/0012_remove_null_from_many_many.py
Python
apache-2.0
1,573
0.006357
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.core.validators import django.contrib.auth.models class Migration(migrations.Migration): dependencies = [ ('core', '0011_atmosphere_user_manager_updat
e'), ] operations = [ migrations.AlterField( model_name='allocationstrategy', name='refresh_behaviors', field=models.ManyToManyField( to='core.RefreshBehavior', blank=True), ), migrations.AlterField( model_name='allocationstrategy', name='rules_behaviors', field=models.ManyToManyField( to='core.RulesBehavior', blank=True),
), migrations.AlterField( model_name='machinerequest', name='new_machine_licenses', field=models.ManyToManyField( to='core.License', blank=True), ), migrations.AlterField( model_name='project', name='applications', field=models.ManyToManyField( related_name='projects', to='core.Application', blank=True), ), migrations.AlterField( model_name='project', name='instances', field=models.ManyToManyField( related_name='projects', to='core.Instance', blank=True), ), migrations.AlterField( model_name='project', name='volumes', field=models.ManyToManyField( related_name='projects', to='core.Volume', blank=True), ), migrations.AlterField( model_name='providermachine', name='licenses', field=models.ManyToManyField( to='core.License', blank=True), ), ]
nirmeshk/oh-mainline
vendor/packages/webob/setup.py
Python
agpl-3.0
2,150
0.00186
from setuptools import setup version = '1.4' testing_extras = ['nose', 'coverage'] docs_extras = ['Sphinx'] setup( name='WebOb', version=version, description="WSGI request and response object", long_description="""\ WebOb provides wrappers around the WSGI request environment, and an object to help create WSGI responses. The objects map much of the specified behavior of HTTP, including header parsing and accessors for other standard parts of the environment. You may install the `in-development version of WebOb <https://github.com/Pylons/webob/zipball/m
aster#egg=WebOb-dev>`_ with ``pip install WebOb==dev`` (or ``easy_install WebOb==dev``). * `WebOb reference <http://docs.webob.org/en/latest/reference.html>`_ * `Bug tracker <https://github.com/Pylons/webob/issues>`_ * `Browse source code <https://github.com/Pylons/webob>`_ * `Mailing list <http://bit.ly/paste-users>`_ * `Release news <http://docs.webob.org/en/latest/news.html>`_ * `Detailed changelog <https://github.com/Pylons/webob/commits/master>`_ """, classifiers=[ "Development Stat
us :: 6 - Mature", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Topic :: Internet :: WWW/HTTP :: WSGI", "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", "Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", ], keywords='wsgi request web http', author='Ian Bicking', author_email='[email protected]', maintainer='Pylons Project', url='http://webob.org/', license='MIT', packages=['webob'], zip_safe=True, test_suite='nose.collector', tests_require=['nose'], extras_require = { 'testing':testing_extras, 'docs':docs_extras, }, )
eddie-dunn/pytest-typehints
setup.py
Python
bsd-3-clause
1,385
0
#!/usr/bin/env python # -*- coding: utf-8 -*- # pylint: disable=missing-docstring import os import codecs from setuptools import setup def read(fname): file_path = os.path.join(os.path.dirname(__file__), fname) return codecs.open(file_path, encoding='utf-8').read() setup( name
='pytest-typehints', version='0.1.0', author='Edward Dunn Ekelund', author_email='[email protected]', maintainer='Edward Dunn Ekelund', maintainer_email='[email protected]', license='BSD-3', url='https://github.com/eddie-dunn/pytest-typehints', description='Pytest plugin that checks for type hinting', long_description=read('README.rst'), py_modules=['pytest_typehints'], install_requires=['pytest>=2.9
.2'], classifiers=[ 'Development Status :: 4 - Beta', 'Framework :: Pytest', 'Intended Audience :: Developers', 'Topic :: Software Development :: Testing', 'Programming Language :: Python', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Operating System :: OS Independent', 'License :: OSI Approved :: BSD License', ], entry_points={ 'pytest11': [ 'typehints = pytest_typehints', ], }, )
vahidR/test-builder
tests/test_utils.py
Python
gpl-2.0
1,040
0.005769
""" Copyright (C) 2014 Vahid Rafiei (@vahid_r) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General
Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import unittest from testbuilder.utils import get_version class TestUtilsModuleFunctions(unittest.TestCase): """ This is a test skeleton for module-level functions at the utils
module""" def test_version(self): self.assertEquals("0.9", get_version(), "The current version should be 0.9") if __name__ == "__main__": unittest.main()
devs1991/test_edx_docmode
venv/lib/python2.7/site-packages/amqp/serialization.py
Python
agpl-3.0
16,315
0
""" Convert between bytestreams and higher-level AMQP types. 2007-11-05 Barry Pederson <[email protected]> """ # Copyright (C) 2007 Barry Pederson <[email protected]> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 from __future__ import absolute_import import calendar import sys from datetime import datetime from decimal import Decimal from io import BytesIO from struct import pack, unpack from .exceptions import FrameSyntaxError from .five import int_types, long_t, string, string_t, items IS_PY3K = sys.version_info[0] >= 3 if IS_PY3K: def byte(n): return bytes([n]) else: byte = chr ILLEGAL_TABLE_TYPE_WITH_KEY = """\ Table type {0!r} for key {1!r} not handled by amqp. [value: {2!r}] """ ILLEGAL_TABLE_TYPE = """\ Table type {0!r} not handled by amqp. [value: {1!r}] """ class AMQPReader(object): """Read higher-level AMQP types from a bytestream.""" def __init__(self, source): """Source should be either a file-like object with a read() method, or a plain (non-unicode) string.""" if isinstance(source, bytes): self.input = BytesIO(source) elif hasattr(source, 'read'): self.input = source else: raise ValueError( 'AMQPReader needs a file-like object or plain string') self.bitcount = self.bits = 0 def close(self): self.input.close() def read(self, n): """Read n bytes.""" self.bitcount = self.bits = 0 return self.input.read(n) def read_bit(self): """Read a single boolean value.""" if not self.bitcount: self.bits = ord(self.input.read(1)) self.bitcount = 8 result = (self.bits & 1) == 1 self.bits >>= 1 self.bitcount -= 1 return result def read_octet(self): """Read one byte, return as an integer""" self.bitcount = self.bits = 0 return unpack('B', self.input.read(1))[0] def read_short(self): """Read an unsigned 16-bit integer""" self.bitcount = self.bits = 0 return unpack('>H', self.input.read(2))[0] def read_long(self): """Read an unsigned 32-bit integer""" self.bitcount = self.bits = 0 return unpack('>I', self.input.read(4))[0] def read_longlong(self): """Read an unsigned 64-bit integer""" self.bitcount = self.bits = 0 return unpack('>Q', self.input.read(8))[0] def read_float(self): """Read float value.""" self.bitcount = self.bits = 0 return unpack('>d', self.input.read(8))[0] def read_shortstr(self): """Read a short string that's stored in up to 255 bytes. The encoding isn't specified in the AMQP spec, so assume it's utf-8 """ self.bitcount = self.bits = 0 slen = unpack('B', self.input.read(1))[0] return self.input.read(slen).decode('utf-8') def read_longstr(self): """Read a string that's up to 2**32 bytes. The encoding isn't specified in the AMQP spec, so assume it's utf-8 """ self.bitcount = self.bits = 0 slen = unpack('>I',
self.input.read(4))[0] return self.input.read(slen).decode('utf-8') def r
ead_table(self): """Read an AMQP table, and return as a Python dictionary.""" self.bitcount = self.bits = 0 tlen = unpack('>I', self.input.read(4))[0] table_data = AMQPReader(self.input.read(tlen)) result = {} while table_data.input.tell() < tlen: name = table_data.read_shortstr() val = table_data.read_item() result[name] = val return result def read_item(self): ftype = ord(self.input.read(1)) # 'S': long string if ftype == 83: val = self.read_longstr() # 's': short string elif ftype == 115: val = self.read_shortstr() # 'b': short-short int elif ftype == 98: val, = unpack('>B', self.input.read(1)) # 'B': short-short unsigned int elif ftype == 66: val, = unpack('>b', self.input.read(1)) # 'U': short int elif ftype == 85: val, = unpack('>h', self.input.read(2)) # 'u': short unsigned int elif ftype == 117: val, = unpack('>H', self.input.read(2)) # 'I': long int elif ftype == 73: val, = unpack('>i', self.input.read(4)) # 'i': long unsigned int elif ftype == 105: # 'l' val, = unpack('>I', self.input.read(4)) # 'L': long long int elif ftype == 76: val, = unpack('>q', self.input.read(8)) # 'l': long long unsigned int elif ftype == 108: val, = unpack('>Q', self.input.read(8)) # 'f': float elif ftype == 102: val, = unpack('>f', self.input.read(4)) # 'd': double elif ftype == 100: val = self.read_float() # 'D': decimal elif ftype == 68: d = self.read_octet() n, = unpack('>i', self.input.read(4)) val = Decimal(n) / Decimal(10 ** d) # 'F': table elif ftype == 70: val = self.read_table() # recurse # 'A': array elif ftype == 65: val = self.read_array() # 't' (bool) elif ftype == 116: val = self.read_bit() # 'T': timestamp elif ftype == 84: val = self.read_timestamp() # 'V': void elif ftype == 86: val = None else: raise FrameSyntaxError( 'Unknown value in table: {0!r} ({1!r})'.format( ftype, type(ftype))) return val def read_array(self): array_length = unpack('>I', self.input.read(4))[0] array_data = AMQPReader(self.input.read(array_length)) result = [] while array_data.input.tell() < array_length: val = array_data.read_item() result.append(val) return result def read_timestamp(self): """Read and AMQP timestamp, which is a 64-bit integer representing seconds since the Unix epoch in 1-second resolution. Return as a Python datetime.datetime object, expressed as localtime. """ return datetime.utcfromtimestamp(self.read_longlong()) class AMQPWriter(object): """Convert higher-level AMQP types to bytestreams.""" def __init__(self, dest=None): """dest may be a file-type object (with a write() method). If None then a BytesIO is created, and the contents can be accessed with this class's getvalue() method.""" self.out = BytesIO() if dest is None else dest self.bits = [] self.bitcount = 0 def _flushbits(self): if self.bits: out = self.out for b in self.bits: out.write(pack('B', b)) self.bits = [] self.bitcount = 0 def close(self): """Pass through if possible to any file-like destinations.""" try: self.out.close() except AttributeError: pass def flush(self): """Pass through if possible to any file-like destinations.""" try: self.out.flush() except AttributeError: pass def getvalue(self): """Get what's been encoded so far if
miurahr/translate
translate/storage/poxliff.py
Python
gpl-2.0
14,579
0.000549
# # Copyright 2006-2009 Zuza Software Foundation # # This file is part of the Translate Toolkit. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. """XLIFF classes specifically suited for handling the PO representation in XLIFF. This way the API supports plurals as if it was a PO file, for example. """ import re from lxml import etree from translate.misc.multistring import multistring from translate.misc.xml_helpers import setXMLspace from translate.storage import base, lisa, poheader, xliff from translate.storage.placeables import general def hasplurals(thing): if not isinstance(thing, multistring): return False return len(thing.strings) > 1 class PoXliffUnit(xliff.xliffunit): """A class to specifically handle the plural units created from a po file.""" rich_parsers = general.parsers def __init__(self, source=None, empty=False, **kwargs): self._rich_source = None self._rich_target = None self._state_n = 0 self.units = [] if empty: return if not hasplurals(source): super().__init__(source) return self.xmlelement = etree.Element(self.namespaced("group")) self.xmlelement.set("restype", "x-gettext-plurals") self.source = source def __eq__(self, other): if isinstance(other, PoXliffUnit): if len(self.units) != len(other.units): return False if not super().__eq__(other): return False for i in range(len(self.units) - 1): if not self.units[i + 1] == other.units[i + 1]: return False return True if len(self.units) <= 1: if isinstance(other, lisa.LISAunit): return super().__eq__(other) else: return self.source == other.source and self.target == other.target return False # XXX: We don't return language nodes correctly at the moment # def getlanguageNodes(self): # if not self.hasplural(): # return super().getlanguageNodes() # else: # return self.units[0].getlanguageNodes() @property def source(self): if not self.hasplural(): return super().source return multistring([unit.source for unit in self.units]) @source.setter def source(self, source): self.setsource(source, sourcelang="en") def setsource(self, source, sourcelang="en"): # TODO: consider changing from plural to singular, etc. self._rich_source = None if not hasplurals(source
): super().setsource(source, sourcelang) else: target = self.target for unit in self.units:
try: self.xmlelement.remove(unit.xmlelement) except ValueError: pass self.units = [] for s in source.strings: newunit = xliff.xliffunit(s) # newunit.namespace = self.namespace #XXX?necessary? self.units.append(newunit) self.xmlelement.append(newunit.xmlelement) self.target = target # We don't support any rich strings yet multistring_to_rich = base.TranslationUnit.multistring_to_rich rich_to_multistring = base.TranslationUnit.rich_to_multistring rich_source = base.TranslationUnit.rich_source rich_target = base.TranslationUnit.rich_target def gettarget(self, lang=None): if self.hasplural(): strings = [unit.target for unit in self.units] if strings: return multistring(strings) else: return None else: return super().gettarget(lang) def settarget(self, target, lang="xx", append=False): self._rich_target = None if self.target == target: return if not self.hasplural(): super().settarget(target, lang, append) return if not isinstance(target, multistring): target = multistring(target) source = self.source sourcel = len(source.strings) targetl = len(target.strings) if sourcel < targetl: sources = source.strings + [source.strings[-1]] * (targetl - sourcel) targets = target.strings id = self.getid() self.source = multistring(sources) self.setid(id) elif targetl < sourcel: targets = target.strings + [""] * (sourcel - targetl) else: targets = target.strings for i in range(len(self.units)): self.units[i].target = targets[i] def addnote(self, text, origin=None, position="append"): """Add a note specifically in a "note" tag""" note = etree.SubElement(self.xmlelement, self.namespaced("note")) note.text = text if origin: note.set("from", origin) for unit in self.units[1:]: unit.addnote(text, origin) def getnotes(self, origin=None): # NOTE: We support both <context> and <note> tags in xliff files for comments if origin == "translator": notes = super().getnotes("translator") trancomments = self.gettranslatorcomments() if notes == trancomments or trancomments.find(notes) >= 0: notes = "" elif notes.find(trancomments) >= 0: trancomments = notes notes = "" return trancomments + notes elif origin in ["programmer", "developer", "source code"]: devcomments = super().getnotes("developer") autocomments = self.getautomaticcomments() if devcomments == autocomments or autocomments.find(devcomments) >= 0: devcomments = "" elif devcomments.find(autocomments) >= 0: autocomments = devcomments devcomments = "" return autocomments else: return super().getnotes(origin) def markfuzzy(self, value=True): super().markfuzzy(value) for unit in self.units[1:]: unit.markfuzzy(value) def marktranslated(self): super().marktranslated() for unit in self.units[1:]: unit.marktranslated() def setid(self, id): super().setid(id) if len(self.units) > 1: for i in range(len(self.units)): self.units[i].setid("%s[%d]" % (id, i)) def getlocations(self): """Returns all the references (source locations)""" groups = self.getcontextgroups("po-reference") references = [] for group in groups: sourcefile = "" linenumber = "" for (type, text) in group: if type == "sourcefile": sourcefile = text elif type == "linenumber": linenumber = text assert sourcefile if linenumber: sourcefile = sourcefile + ":" + linenumber references.append(sourcefile) return references def getautomaticcomments(self): """Returns the automatic comments (x-po-autocomment), which corresponds to the #. style po comments. """ def hasautocomment(grp): return grp[0] == "x-po-autocomment" groups = self.getcontextgroups("po-entry") comments = [] for group in groups:
otadmor/Open-Knesset
lobbyists/scrapers/lobbyist.py
Python
bsd-3-clause
5,096
0.002747
# encoding: utf-8 from bs4 import BeautifulSoup from okscraper.base import BaseScraper from okscraper.sources import UrlSource, ScraperSource from okscraper.storages import ListStorage, DictStorage from lobbyists.models import LobbyistHistory, Lobbyist, LobbyistData, LobbyistRepresent, LobbyistRepresentData from persons.models import Person from django.core.exceptions import ObjectDoesNotExist from datetime import datetime from lobbyist_represent import LobbyistRepresentScraper class LobbyistScraperDictStorage(DictStorage): """ This storage first determines if a new Lobbyist object needs to be created: it searches for a Lobbyist object with the same source_id and first / last name if such an object exists - it uses that object, otherwise created a new Lobbyist It then updates the lobbyist.data: it gets the last LobbyistData object for this lobbyist and compares that to the current data if it matches - then that object is used and a new object is not created else - a new LobbyistData object is created and appended to the lobbyist.data This storage returns the lobbyist object """ _commitInterval = -1 def _get_data_keys(self): return ['first_name', 'family_name', 'profession', 'corporation_name', 'corporation_id', 'faction_member', 'faction_name', 'permit_type'] def _get_represents_data(self, source_id): return LobbyistRepresentScraper().scrape(source_id) def _get_latest_lobbyist_data(self, lobbyist): return lobbyist.latest_data def _get_last_lobbyist_data(self, lobbyist, data): try: last_lobbyist_data = self._get_latest_lobbyist_data(lobbyist) except ObjectDoesNotExist: last_lobbyist_data = None if last_lobbyist_data is not None: for key in self._get_data_keys(): if data[key] != getattr(last_lobbyist_data, key): last_lobbyist_data = None break if last_lobbyist_data is not None: represent_ids = sorted(data['represents'], key=lambda represent: represent.id) last_represent_ids = sorted(last_lobbyist_data.represents.al
l(), key=lambda represent: represent.id) if represent_ids != last_represent_ids: last_lobbyist_data = None return last_lobbyist_data def commit(self): super(LobbyistScraperDictStorage, self).commit() data = self._data source_id = data['id'] data['represents'] = se
lf._get_represents_data(source_id) full_name = '%s %s' % (data['first_name'], data['family_name']) q = Lobbyist.objects.filter(source_id=source_id, person__name=full_name) if q.count() > 0: lobbyist = q[0] else: lobbyist = Lobbyist.objects.create(person=Person.objects.create(name=full_name), source_id=source_id) self._data = lobbyist last_lobbyist_data = self._get_last_lobbyist_data(lobbyist, data) if last_lobbyist_data is None: kwargs = {} for key in self._get_data_keys(): kwargs[key] = data[key] kwargs['source_id'] = source_id lobbyist_data = LobbyistData.objects.create(**kwargs) for represent in data['represents']: lobbyist_data.represents.add(represent) lobbyist_data.scrape_time = datetime.now() lobbyist_data.save() lobbyist.data.add(lobbyist_data) else: lobbyist.data.add(last_lobbyist_data) lobbyist.save() class LobbyistScraper(BaseScraper): """ This scraper gets a lobbyist id, it then goes to the knesset api to get the data about the lobbyist """ def __init__(self): super(LobbyistScraper, self).__init__() self.source = UrlSource('http://online.knesset.gov.il/WsinternetSps/KnessetDataService/LobbyistData.svc/View_lobbyist(<<id>>)') self.storage = LobbyistScraperDictStorage() def _storeLobbyistDataFromSoup(self, soup): lobbyist_id = soup.find('d:lobbyist_id').text.strip() self._getLogger().info('got lobbyist id "%s"', lobbyist_id) lobbyist = { 'id': lobbyist_id, 'first_name': soup.find('d:first_name').text.strip(), 'family_name': soup.find('d:family_name').text.strip(), 'profession': soup.find('d:profession').text.strip(), 'corporation_name': soup.find('d:corporation_name').text.strip(), 'corporation_id': soup.find('d:corporation_id').text.strip(), 'faction_member': soup.find('d:faction_member').text.strip(), 'faction_name': soup.find('d:faction_name').text.strip(), 'permit_type': soup.find('d:lobyst_permit_type').text.strip(), } self.storage.storeDict(lobbyist) self._getLogger().debug(lobbyist) def _scrape(self, lobbyist_id): html = self.source.fetch(lobbyist_id) soup = BeautifulSoup(html) return self._storeLobbyistDataFromSoup(soup)