repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
prefix
stringlengths
0
8.16k
middle
stringlengths
3
512
suffix
stringlengths
0
8.17k
plotly/plotly.py
packages/python/plotly/plotly/validators/layout/ternary/baxis/_nticks.py
Python
mit
459
0.002179
import _plotly_utils.basevalidators class NticksVa
lidator(_plotly_utils.basevalidators.IntegerValidator): def __init__( self, plotly_name="nticks", parent_name="layout.ternary.baxis", **kwargs ): super(NticksValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "plot"), min=kwargs.pop("min", 1),
**kwargs )
jinser/automate_pydatastream
getcustom.py
Python
mit
3,917
0.038295
from pydatastream import Datastream import json import datetime import sys import os.path #hardcoded directories dir_input = "input/" dir_output = "output/" #check that the login credentials and input file location are being passed in numOfArgs = len(sys.argv) - 1 if numOfArgs != 3: print "Please run this python script with username,password and input file location in that order respectively." exit() #Setup login credentials and input file location username = str(sys.argv[1]) pw = str(sys.argv[2]) input_file_loc = dir_input + str(sys.argv[3]) #Ensure that the input file location exists if ( not os.path.isfile(str(input_file_loc)) ): print "The file " + str(input_file_loc) + " does not exist." exit() #login credentials to datastream DWE = Datastream(username=username,password=pw) #other info from datastream info = DWE.system_info() subscribed_sources = DWE.sources() #replace missing data with NaNs DWE.raise_on_error = False #get all codes, groups, start dates from input file with open(input_file_loc,'r') as input_file: symbol_ref = json.load(input_file) #download timestamp download_date = {'Custom_Download_Date' : datetime.datetime.now().isoformat()} #calculate time taken for entire process time_taken = datetime.datetime.now() time_taken = time_taken - time_taken for desc,desc_value in symbol_ref.iteritems(): for group,group_value in desc_value.iteritems(): #create list for custom fields custom_fields = list() for code_key,code_value in group_value.iteritems(): for key,value in code_value.iteritems(): if(key == 'code'): search_code = value search_symbol = {'Custom_Ticker' : value} if(key == 'start_date'): start_date = value if(key == 'custom_field'): custom_fields[:] = [] custom_fields.append(value) startTime = datetime.datetime.now() #send request to retrieve the data from Datastream req = DWE.fetch(str(search_code),custom_fields,date_from=str(start_date),only_data=False) time_taken = time_taken + datetime.datetime.now() - startTime #format date and convert to json raw_json = req[0].to_json(date_format='iso') raw_metadata = req[1].to_json() #Data cleaning and processing #remove the time component including the '.' char from the key values of datetime in the data raw_json = raw_json.replace("T00:00:00.000Z","") #replace the metadata's keys from "0" to "default_ws_key" raw_metadata = raw_metadata.replace("\"0\"","\"Custom_WS_Key\"") #combine the data and the metadata about the code allData_str = json.loads(raw_json) metadata_str = json.loads(raw_metadata) datastream_combined = {key : value for (key,value) in (allData_str.items() + metadata_str.items())} #create symbol json string and append to data data_with_symbol = {key : value for (key,value) in (search_symbol.items() + datastream_combined.items())} #append group group_code = {'Custom_Group' : group} data_with_group = {key : value for (key,value) in (group_code.items() + data_with_symbol.items())} #append category category = {'Custom_Description' : desc} data_with_category = {key : value
for (key,value) in (category.items() + data_with_group.items())} #append download timestamp final_data = {key : value for (key,value) in (download_date.items() + data_with_category.items())} final_data_json = json.dumps(final_data) #decode to the right format for saving to disk json_file = json.JSONDecoder().decode((final_data_json)) #save to json file on server if
(len(group_value) > 1): filename = dir_output + desc + '_' + group + '_' + code_key + '.json' else: filename = dir_output + desc + '_' + group + '.json' with open(filename,'w') as outfile: json.dump(json_file,outfile,sort_keys=True) print "time taken for " + str(sys.argv[3]) + " to be retrieved: " + str(time_taken)
AntonKuksov/Weather_analyzer
test_form.py
Python
gpl-3.0
300
0.006667
import unittest from .Weather_analyzer imp
ort is_not_number class BtcPriceTestCase(unittest.TestCase): def test_checking_of_input_in_form(self): input = 46 answer = is_not_number(input) # The bitcoin returned chan
ges over time! self.assertEqual(answer, False)
Kami/sgrstats.com
sgrstats/articles/views.py
Python
apache-2.0
1,447
0.026261
import datetime from django.shortcuts import render_to_response, get_object_or_404, HttpResponse, HttpResponseRedirect, Http404 from django.template import RequestContext from django.core.urlresolvers import reverse from articles.models import Article from taxonomy.models import TaxonomyMap from core.views import update_online_users @update_online_users def index(request): articles = Article.objects.all()[:10] return render_to_response('article
s/index.html', {'articles': articles}, context_instance = RequestContext(request)) @update_online_users def category(request, category_id): article_ids = TaxonomyMap.objects.filter(term__id = category_id, type__type = 'Category', content_type__model = 'article').values_list('object_id', flat = True) category_title = TaxonomyMap.objects.filter(term__id = category_id, type__type = 'Categor
y', content_type__model = 'article')[0].term.term articles = Article.objects.filter(id__in = article_ids) return render_to_response('articles/category.html', {'category_id': category_id, 'category_title': category_title, 'articles': articles}, context_instance = RequestContext(request)) @update_online_users def details(request, title_slug): article = get_object_or_404(Article, title_slug = title_slug) return render_to_response('articles/details.html', {'article': article}, context_instance = RequestContext(request))
guns/weechat
doc/docgen.py
Python
gpl-3.0
29,781
0.000067
# -*- coding: utf-8 -*- # # Copyright (C) 2008-2014 Sébastien Helleu <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """ Documentation generator for WeeChat: build include files with commands, options, infos, infolists, hdata and completions for WeeChat core and plugins. Instructions to build config files yourself in WeeChat directories (replace all paths with your path to WeeChat): 1. run WeeChat and load this script, with following command: /python load ~/src/weechat/doc/docgen.py 2. change path to build in your doc/ directory: /set plugins.var.python.docgen.path "~/src/weechat/doc" 3. run docgen command: /docgen Note: it is recommended to load only this script when building doc. Files should be in ~/src/weechat/doc/xx/autogen/ (where xx is language). """ from __future__ import print_function SCRIPT_NAME = 'docgen' SCRIPT_AUTHOR = 'Sébastien Helleu <[email protected]>' SCRIPT_VERSION = '0.1' SCRIPT_LICENSE = 'GPL3' SCRIPT_DESC = 'Documentation generator for WeeChat' SCRIPT_COMMAND = 'docgen' IMPORT_OK = True try: import weechat # pylint: disable=import-error except ImportError: print('This script must be run under WeeChat.') print('Get WeeChat now at: http://weechat.org/') IMPORT_OK = False try: import gettext import hashlib import os import re from collections import defaultdict from operator import itemgetter except ImportError as message: print('Missing package(s) for {0}: {1}'.format(SCRIPT_NAME, message)) IMPORT_OK = False # default path where doc files will be written (should be doc/ in sources # package tree) # path must have subdirectories with languages and autogen directory: # path # |-- en # | |-- autogen # |-- fr # | |-- autogen # ... DEFAULT_PATH = '~/src/weechat/doc' # list of locales for which we want to build doc files to include LOCALE_LIST = ('en_US', 'fr_FR', 'it_IT', 'de_DE', 'ja_JP', 'pl_PL') # all commands/options/.. of following plugins will produce a file # non-listed plugins will be ignored # value: "c" = plugin may have many commands # "o" = write config options for plugin # if plugin is listed without "c", that means plugin has only one command # /name (where "name" is name of plugin) # Note: we consider core is a plugin called "weechat" PLUGIN_LIST = { 'sec': 'o', 'weechat': 'co', 'alias': '', 'aspell': 'o', 'charset': 'o', 'exec': 'o', 'fifo': 'o', 'irc': 'co', 'logger': 'o', 'relay': 'o', 'script': 'o', 'perl': '', 'python': '', 'ruby': '', 'lua': '', 'tcl': '', 'guile': '', 'trigger': 'o', 'xfer': 'co', } # options to ignore IGNORE_OPTIONS = ( r'aspell\.dict\..*', r'aspell\.option\..*', r'charset\.decode\..*', r'charset\.encode\..*', r'irc\.msgbuffer\..*', r'irc\.ctcp\..*', r'irc\.ignore\..*', r'irc\.server\..*', r'jabber\.server\..*', r'logger\.level\..*
', r'logger\.mask\..*', r'relay\.port\..*', r'trigger\.trigger\..*', r'weechat\.palette\..*', r'weechat\.proxy\..*', r'weechat\.bar\..*', r'weechat\.debug\..*', r'weechat\.notify\..*', ) # comp
letions to ignore IGNORE_COMPLETIONS_ITEMS = ( 'docgen.*', 'jabber.*', 'weeget.*', ) def get_commands(): """ Get list of commands in a dict with 3 indexes: plugin, command, xxx. """ commands = defaultdict(lambda: defaultdict(defaultdict)) infolist = weechat.infolist_get('hook', '', 'command') while weechat.infolist_next(infolist): plugin = weechat.infolist_string(infolist, 'plugin_name') or 'weechat' if plugin in PLUGIN_LIST: command = weechat.infolist_string(infolist, 'command') if command == plugin or 'c' in PLUGIN_LIST[plugin]: for key in ('description', 'args', 'args_description', 'completion'): commands[plugin][command][key] = \ weechat.infolist_string(infolist, key) weechat.infolist_free(infolist) return commands def get_options(): """ Get list of config options in a dict with 4 indexes: config, section, option, xxx. """ options = \ defaultdict(lambda: defaultdict(lambda: defaultdict(defaultdict))) infolist = weechat.infolist_get('option', '', '') while weechat.infolist_next(infolist): full_name = weechat.infolist_string(infolist, 'full_name') if not re.search('|'.join(IGNORE_OPTIONS), full_name): config = weechat.infolist_string(infolist, 'config_name') if config in PLUGIN_LIST and 'o' in PLUGIN_LIST[config]: section = weechat.infolist_string(infolist, 'section_name') option = weechat.infolist_string(infolist, 'option_name') for key in ('type', 'string_values', 'default_value', 'description'): options[config][section][option][key] = \ weechat.infolist_string(infolist, key) for key in ('min', 'max', 'null_value_allowed'): options[config][section][option][key] = \ weechat.infolist_integer(infolist, key) weechat.infolist_free(infolist) return options def get_infos(): """ Get list of infos hooked by plugins in a dict with 3 indexes: plugin, name, xxx. """ infos = defaultdict(lambda: defaultdict(defaultdict)) infolist = weechat.infolist_get('hook', '', 'info') while weechat.infolist_next(infolist): info_name = weechat.infolist_string(infolist, 'info_name') plugin = weechat.infolist_string(infolist, 'plugin_name') or 'weechat' for key in ('description', 'args_description'): infos[plugin][info_name][key] = \ weechat.infolist_string(infolist, key) weechat.infolist_free(infolist) return infos def get_infos_hashtable(): """ Get list of infos (hashtable) hooked by plugins in a dict with 3 indexes: plugin, name, xxx. """ infos_hashtable = defaultdict(lambda: defaultdict(defaultdict)) infolist = weechat.infolist_get('hook', '', 'info_hashtable') while weechat.infolist_next(infolist): info_name = weechat.infolist_string(infolist, 'info_name') plugin = weechat.infolist_string(infolist, 'plugin_name') or 'weechat' for key in ('description', 'args_description', 'output_description'): infos_hashtable[plugin][info_name][key] = \ weechat.infolist_string(infolist, key) weechat.infolist_free(infolist) return infos_hashtable def get_infolists(): """ Get list of infolists hooked by plugins in a dict with 3 indexes: plugin, name, xxx. """ infolists = defaultdict(lambda: defaultdict(defaultdict)) infolist = weechat.infolist_get('hook', '', 'infolist') while weechat.infolist_next(infolist): infolist_name = weechat.infolist_string(infolist, 'infolist_name') plugin = weechat.infolist_string(infolist, 'plugin_name') or 'weechat' for key in ('description', 'pointer_description', 'args_description'): infolists[plugin][infolist_name][key] = \ weechat.infolist_string(infolist, key) weechat.infolist_free(infolist) return infolists # pylint: disable=too-many-locals def get_hdata(): """ Get list of hdata hooked by plugins in a dict with 3 indexes: plugin, name, xxx.
liamks/pyitunes
libpytunes/Playlist.py
Python
mit
470
0.002128
from six import
iteritems class Playlist: is_folder = False playlist_persistent_id = None parent_persistent_id = None distinguished_kind = None playlist_id = None def __init__(self, playListName=None): self.name = p
layListName self.tracks = [] def __iter__(self): for attr, value in iteritems(self.__dict__): yield attr, value def ToDict(self): return {key: value for (key, value) in self}
mohclips/k5-ansible-modules
k5_novnc_console.py
Python
gpl-3.0
5,289
0.008697
#!/usr/bin/python ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: k5_novnc_console short_description: Display the URL to the NoVNC Console version_added: "1.0" description: - returns a URL to the noVNC console. options: server_name: description: - Name of the server. required: true default: None k5_auth: description: - dict of k5_auth module output. required: true default: None requirements: - "python >= 2.6" ''' EXAMPLES = ''' # Get novnc url - k5_novnc_console: server_name: test01 k5_auth: "{{ k5_auth_facts }}" ''' RETURN = ''' k5_novnc_console_facts description: Dictionary describing the novnc details. returned: On success when the server is found type: dictionary contains: id: description: Router ID. type: string sample: "474acfe5-be34-494c-b339-50f06aa143e4" ''' import requests import os import json from ansible.module_utils.basic import * ############## Common debug ############### k5_debug = False k5_debug_out = [] def k5_debug_get(): """Return our debug list""" return k5_debug_out def k5_debug_clear(): """Clear our debug list""" k5_debug_out = [] def k5_debug_add(s): """Add string to debug list if env K5_DEBUG is defined""" if k5_debug: k5_debug_out.append(s) ############## functions ############# def k5_get_endpoint(e,name): """Pull particular endpoint name from dict""" return e['endpoints'][name] def k5_get_server_facts(module, k5_facts): """Get server facts""" endpoint = k5_facts['endpoints']['compute'] auth_token = k5_facts['auth_token'] session = requests.Session() headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': auth_token } url = endpoint + '/servers/detail' k5_debug_add('endpoint: {0}'.format(endpoint)) k5_debug_add('REQ: {0}'.format(url)) k5_debug_add('headers: {0}'.format(headers)) try: response = session.request('GET', url, headers=headers) except requests.exceptions.RequestException as e: module.fail_json(msg=e) # we failed to get data if response.status_code not in (200,): module.fail_json(msg="RESP: HTTP Code:" + str(response.status_code) + " " + str(response.content), debug=k5_debug_out) if 'servers' in response.json(): return response.json() else: module.fail_json(msg="Missing servers in response to server details request") def k5_get_novnc_console(module): """Get novnc url""" global k5_debug k5_debug_clear() if 'K5_DEBUG' in os.environ: k5_debug = True if 'auth_spec' in module.params['k5_auth']: k5_facts = module.params['k5_auth'] else: module.fail_json(msg="k5_auth_facts not found, have you run k5_auth?") endpoint = k5_facts['endpoints']['compute'] auth_token = k5_facts['auth_token'] server_name = module.params['server_name'] # we need the server_id not server_name, so grab it server_facts = k5_get_server_facts(module, k5_facts) server_id = '' for s in server_facts['servers']: if s['name'] == server_name: server_id = s['id'] break if server_id == '': if k5_debug: module.exit_json(changed=False, msg="Server " + server_name + " not found", debug=k5_debug_out) else: module.exit_json(changed=False, msg="Server " + server_name + " not found") k5_debug_add('auth_token: {0}'.format(auth_token)) k5_debug_add('server_name: {0}'.format(server_name)) session = requests.Session() headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': auth_token } url = endpoint + '/servers/' + server_id + '/action' query_json = { 'os-getVNCConsole': {'type': 'novnc' }} k5_debug_add('endpoint: {0}'.format(endpoint)) k5_debug_add('REQ: {0}'.format(url)) k5_debug_add('headers: {0}'.format(headers)) k5_debug_add('json: {0}'.format(query_
json)) try: response = session.request('POST', url, headers=headers, json=query_json
) except requests.exceptions.RequestException as e: module.fail_json(msg=e) # we failed to make a change if response.status_code not in (200,): module.fail_json(msg="RESP: HTTP Code:" + str(response.status_code) + " " + str(response.content), debug=k5_debug_out) if k5_debug: module.exit_json(changed=True, msg="Get URL Successful", k5_novnc_console_facts=response.json(), debug=k5_debug_out ) module.exit_json(changed=True, msg="Get URL Successful", k5_novnc_console_facts=response.json() ) ###################################################################################### def main(): module = AnsibleModule( argument_spec=dict( server_name = dict(required=True, default=None, type='str'), k5_auth = dict(required=True, default=None, type='dict') ) ) k5_get_novnc_console(module) ###################################################################################### if __name__ == '__main__': main()
martanoga/yacas
docs/util/yacasdomain.py
Python
lgpl-2.1
13,972
0.001288
# -*- coding: utf-8 -*- """ The Yacas domain. :copyright: Copyright 2014 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from docutils import nodes from docutils.parsers.rst import directives from sphinx import addnodes from sphinx.roles import XRefRole from sphinx.locale import l_, _ from sphinx.domains import Domain, ObjType, Index from sphinx.directives import ObjectDescription from sphinx.util.nodes import make_refnode from sphinx.util.compat import Directive from sphinx.util.docfields import Field, GroupedField, TypedField # REs for Yacas signatures yacas_sig_re = re.compile( r'''^ (prefix|infix|postfix|bodied)? \s* # syntax ([\@a-zA-Z0-9'!*+-/^<>:=]+) \s* # thing name (?: \((.*)\) # optional: arguments )? $ # and nothing more ''', re.VERBOSE) def _pseudo_parse_arglist(signode, arglist): """"Parse" a list of arguments separated by commas. Arguments can have "optional" annotations given by enclosing them in brackets. Currently, this will split at any comma, even if it's inside a string literal (e.g. default argument value). """ paramlist = addnodes.desc_parameterlist() stack = [paramlist] try: for argument in arglist.split(','): argument = argument.strip() ends_open = ends_close = 0 while argument.startswith('['): stack.append(addnodes.desc_optional()) stack[-2] += stack[-1] argument = argument[1:].strip() while argument.startswith(']'): stack.pop() argument = argument[1:].strip() while argument.endswith(']'): ends_close += 1 argument = argument[:-1].strip() while argument.endswith('['): ends_open += 1 argument = argument[:-1].strip() if argument: stack[-1] += addnodes.desc_parameter(argument, argument) while ends_open: stack.append(addnodes.desc_optional()) stack[-2] += stack[-1] ends_open -= 1 while ends_close: stack.pop() ends_close -= 1 if len(stack) != 1: raise IndexError except IndexError: # if there are too few or too many elements on the stack, just give up # and treat the whole argument list as one argument, discarding the # already partially populated paramlist node signode += addnodes.desc_parameterlist() signode[-1] += addnodes.desc_parameter(arglist, arglist) else: signode += paramlist class YacasObject(ObjectDescription): """ Description of a general Yacas object. """ option_spec = { 'noindex': directives.flag, 'module': directives.unchanged, 'annotation': directives.unchanged, } doc_field_types = [ Field('parameter', label=l_('Arguments'), names=('param')), Field('returnvalue', label=l_('Returns'), has_arg=False, names=('returns', 'return')), ] def get_signature_prefix(self, sig): """May return a prefix to put before the object name in the signature. """ return '' def needs_arglist(self): """May return true if an empty argument list is to be generated even if the document contains none. """ return self.objtype == 'function' def handle_signature(self, sig, signode): """Transform a Yacas signature into RST nodes. Return (fully qualified name of the thing, classname if any). If inside a class, the current class name is handled intelligently: * it is stripped from the displayed name if present * it is added to the full name (return value) if not present """ m = yacas_sig_re.match(sig) if m is None: raise ValueError syntax, name, arglist = m.groups() add_module = False fullname = name signode['fullname'] = fullname sig_prefix = self.get_signature_prefix(sig) if sig_prefix: signode += addnodes.desc_annotation(sig_prefix, sig_prefix) if add_module and self.env.config.add_module_names: modname = self.options.get( 'module', self.env.temp_data.get('ys:module')) if modname: nodetext = modname + '.' signode += addnodes.desc_addname(nodetext, nodetext) anno = self.options.get('annotation') if syntax == 'prefix': signode += addnodes.desc_name(name, name) signode += addnodes.desc_type(arglist, arglist) return fullname, '' if syntax == 'infix': left, right = arglist.split(',') left = left + ' ' right = ' ' + right signode += addnodes.desc_type(left, left) signode += addnodes.desc_name(name, name) signode += addnodes.desc_type(right, right) return fullname, '' if syntax == 'postfix': signode += addnodes.desc_type(arglist, arglist) signode += addnodes.desc_name(name, name) return fullname, '' signode += addnodes.desc_name(name, name) if not arglist: if self.needs_arglist(): # for callables, add an empty parameter list signode += addnodes.desc_parameterlist() if anno: signode += addnodes.desc_annotation(' ' + anno, ' ' + anno) return fullname, '' if (syntax == 'bodied'): body = arglist.split(',')[0] arglist = str.join(',', arglist.split(',')[1:]) _pseudo_parse_arglist(signode, arglist) if (syntax == 'bodied'): signode += addnodes.desc_type(' ' + body, ' ' + body) if anno: signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, '' def get_index_text(self, modname, name): """Return the text for the index entry of the object.""" if self.objtype == 'function': return _('%s()') % name[0] elif self.objtype == 'data': return _('%s') % name[0] else: return '' def add_target_and_inde
x(self, name_cls, sig, signode): modname = self.options.get( 'module', self.env.temp_data.get('ys:module')) fullname = (modname and modname + '.' or '') + name_cls[0] # note target if fullname not in self.state.document.ids: signode['names'].append(fullname) signode['ids'].append(fullname) signode['first'] = (not self.names) self.state.document.note_explicit_target(signode) objects = self.env.domaindata['ys']['objects'] if fullname in objects: self.state_machine.reporter.warning( 'duplicate object description of %s, ' % fullname + 'other instance in ' + self.env.doc2path(objects[fullname][0]) + ', use :noindex: for one of them', line=self.lineno) objects[fullname] = (self.env.docname, self.objtype) indextext = self.get_index_text(modname, name_cls) if indextext: self.indexnode['entries'].append(('single', indextext, fullname, '', None)) def before_content(self): # needed for automatic qualification of members (reset in subclasses) self.clsname_set = False def after_content(self): if self.clsname_set: self.env.temp_data['yacas:class'] = None class YacasXRefRole(XRefRole): def process_link(self, env, refnode, has_explicit_title, title, target): refnode['ys:module'] = env.temp_data.get('ys:module') refnode['ys:class'] = env.temp_data.get('ys:class') if not
pwillworth/galaxyharvester
test/pyunit/testObjects.py
Python
gpl-3.0
2,899
0.029665
import unittest from datetime import timedelta, datetime import sys import json sys.path.append("../../config") sys.path.append("../../html") import ghObjects import ghObjectRecipe class testObjects(unittest.TestCase): def setUp(self): # nothin yet self.test = "rad" def test_spawnHTML(self): # arrange spawnName = "testspawn" s = ghObjects.resourceSpawn() s.spawnID = 42 s.spawnName = spawnName s.spawnGalaxy = 1 s.resourceType = "wood_deciduous_yavin4" s.resourceTypeName = "Yavinian Deciduous Wood" s.containerType = "flora_structural" s.stats.CR = 0 s.stats.CD = 0 s.stats.DR = 780 s.stats.FL = 0 s.stats.HR = 0 s.stats.MA = 560 s.stats.PE = 0 s.stats.OQ = 656 s.stats.SR = 450 s.stats.UT = 800 s.stats.ER = 0 s.percentStats.CR = None s.percentStats.CD = None s.percentStats.DR = 780.0/800 s.percentStats.FL = None s.percentStats.HR = None s.percentStats.MA = 160.0/400 s.percentStats.PE = None s.percentStats.OQ = 656.0/1000 s.percentStats.SR = 150.0/400 s.percentStats.UT = 800.0/800 s.percentStats.ER = None s.entered = daysago = datetime.now() - timedelta(4) s.enteredBy = "ioscode" s.verified = daysago = datetime.now() - timedelta(3) s.verifiedBy = "tester" s.unavailable = None s.unavailableBy = None s.maxWaypointConc = None # act mobileHTML = s.getMobileHTML("", 0, 0) normalHTML = s.getHTML(0, "", "", 0, 0) rowHTML = s.getRow(False) invHTML = s.getInventoryObject() spawnJSON = s.getJSON() spawnJSON = "{ " + spawnJSON[:-2] + " }" #assert self.assertIn("ioscode", mobileHTML, "Username not in mobile HTML.") self.assertIn("ioscode", normalHTML, "Username not in normal HTML.") self.assertIn(spawnName, rowHTML, "No spawn name in row HTML.") self.assertIn(spawnName, invHTML, "No spawn name in inventory HTML.") try: jsonObject = json.loads(spawnJSON) jsonValid = True except ValueError: jsonValid = False self.assertTrue(jsonValid, "Generated Spawn JSON output not valid.") def test_recipeRender(self): # arrage r = ghObjectRecipe.schematicRecipe() r.recipeID = 1 r.schematicID = "armor_segment_composite_advanced" r.recipeName = "Test Recipe" i1 = ghObjectRecipe.recipeIngredient("steel_kiirium", "17895", "armor_layer_weld_tabs", 8, "0", "Kiirium Steel", 455, "stuff steel") i2 = ghObjectRecipe.recipeIngredient("copper_polysteel", "13455", "segment_mounting_tabs", 5, "0", "Polystee
l Copper", 877, "This is great") r.recipeIngredients.append(i1) r.recipeIngredients.append(i2) # act slotHTML = r.getIngredientSlots() rowHTML = r.getRow() # assert self.assertIn("steel_kiirium", slotHTML, "Resource id not in slot html.") self.assertIn("Test Recipe", rowHTML, "Title not in row html
.") self.assertIn("yellow", slotHTML, "Expected quality color not present in slot HTML.") if __name__ == '__main__': unittest.main()
ImmobilienScout24/aws-deployment-notifier
src/unittest/python/notifier_tests.py
Python
apache-2.0
900
0.003333
import json import dnot from mock import patch import unittest2 class NotifierTest(unittest2.TestCase): @patch("dnot.sns.connect_to_region") def test_parameters_are_submitted(self, connect_to_region_mock): topic = "abc" region = "eu-west-2" result_topic = "result" stack_name = "stack1" params = '{"key": "value"}' notifier = dnot.Notifier(sns_region=region) notifier.publish(sns_topic_arn=topic, stack_name=stack_name, result_topic=result_topic, params=params) connect_to_region_mock.assert_called_with(region) message = json.loads('{{"stac
kName": "{0}", "notificationARN": "{1}", "re
gion": "eu-west-1", "params": {2}}}' .format(stack_name, result_topic, params)) connect_to_region_mock.return_value.publish.assert_called_with( topic=topic, message=json.dumps(message))
defivelo/db
apps/user/migrations/0019_auto_20160922_1342.py
Python
agpl-3.0
1,161
0.001726
from __future__ import unicode_literals from django.db import migrations, models import multiselectfield.db.fields class Migration(migrations.Migration): dependencies = [ ('user', '0018_auto_20160922_1258'), ] operations = [ migrations.AddField( model_name='userprofile', name='activity_cantons', field=multiselectfield.db.fields.MultiSelectField(default='', verbose_name='Défi Vélo mobile', choices=[('BS', 'Basel-Stadt'), ('BE', 'Berne'), ('FR', 'Fribourg'), ('GE', 'Ge
neva'), ('LU', 'Lucerne'), ('NE', 'Neuchatel'), ('SG', 'St. Gallen'), ('VS', 'Valais'), ('VD', 'Vaud'), ('ZH', 'Zurich')], max_length=29),
preserve_default=False, ), migrations.AlterField( model_name='userprofile', name='affiliation_canton', field=models.CharField(verbose_name="Canton d'affiliation", choices=[('', '---------'), ('BS', 'Basel-Stadt'), ('BE', 'Berne'), ('FR', 'Fribourg'), ('GE', 'Geneva'), ('LU', 'Lucerne'), ('NE', 'Neuchatel'), ('SG', 'St. Gallen'), ('VS', 'Valais'), ('VD', 'Vaud'), ('ZH', 'Zurich')], max_length=2), ), ]
Cadene/keras
keras/utils/layer_utils.py
Python
mit
4,856
0.002265
from __future__ import print_function import inspect import numpy as np import theano from ..layers.advanced_activations import LeakyReLU, PReLU from ..layers.core import Dense, Merge, Dropout, Activation, Reshape, Flatten, RepeatVector, Layer from ..layers.core import ActivityRegularization, TimeDistributedDense, AutoEncoder, MaxoutDense from ..layers.embeddings import Embedding, WordContextProduct from ..layers.noise import GaussianNoise, GaussianDropout from ..layers.normalization import BatchNormalization from ..layers.recurrent import SimpleRNN, SimpleDeepRNN, GRU, LSTM, JZS1, JZS2, JZS3 from ..layers import containers from .. import regularizers from .. import constraints def container_from_config(layer_dict): name = layer_dict.get('name') hasParams = False if name == 'Merge': mode = layer_dict.get('mode') layers = layer_dict.get('layers') layer_list = [] for layer in layers: init_layer = container_from_config(layer) layer_list.append(init_layer) merge_layer = Merge(layer_list, mode) return merge_layer elif name == 'Sequential': layers = layer_dict.get('layers') layer_list = [] for layer in layers: init_layer = container_from_config(layer) layer_list.append(init_layer) seq_layer = containers.Sequential(layer_list) return seq_layer elif name == 'Graph': graph_layer = containers.Graph() inputs = layer_dict.get('input_config') for input in inputs: graph_layer.add_input(**input) nodes = layer_dict.get('node_config') for node in nodes: layer = container_from_config(layer_dict['nodes'].get(node['name'])) node['layer'] = layer graph_layer.add_node(**node) outputs = layer_dict.get('output_config') for output in outputs: graph_layer.add_output(**output) return graph_layer else: # The case in which layer_dict represents an "atomic" layer layer_dict.pop('name') if 'parameters' in layer_dict: params = layer_dict.get('parameters') layer_dict.pop('parameters') hasParams = True for k, v in layer_dict.items(): # For now, this can only happen for regularizers and constraints if isinstance(v, dict): vname = v.get('name') v.pop('name') if vname in [x for x, y in inspect.getmembers(constraints, predicate=inspect.isclass)]: layer_dict[k] = constraints.get(vname, v) if vname in [x for x, y in inspect.getmembers(regularizers, predicate=inspect.isclass)]: layer_dict[k] = regularizers.get(vname, v) base_layer = get_layer(name, layer_dict) if hasParams: shaped_params = [] for param in params: data = np.asarray(param.get('data')) shape = tuple(param.get('shape'
)) shaped_params.append(data.reshape(shape)) base_layer.set_weights(shaped_params) return base_layer def print_layer_shapes(model, input_shapes): """ Utility function to print the shape of the output at each layer of a Model Arguments: model: instance of Model / Merge i
nput_shapes: dict (Graph), list of tuples (Merge) or tuple (Sequential) """ if model.__class__.__name__ in ['Sequential', 'Merge']: # in this case input_shapes is a tuple, or a list [shape1, shape2] if not isinstance(input_shapes[0], tuple): input_shapes = [input_shapes] inputs = model.get_input(train=False) if not isinstance(inputs, list): inputs = [inputs] input_dummy = [np.zeros(shape, dtype=np.float32) for shape in input_shapes] layers = model.layers elif model.__class__.__name__ == 'Graph': # in this case input_shapes is a dictionary inputs = [model.inputs[name].input for name in model.input_order] input_dummy = [np.zeros(input_shapes[name], dtype=np.float32) for name in model.input_order] layers = [model.nodes[c['name']] for c in model.node_config] print("input shapes : ", input_shapes) for l in layers: shape_f = theano.function(inputs, l.get_output(train=False).shape, on_unused_input='ignore') out_shape = tuple(shape_f(*input_dummy)) config = l.get_config() print('shape after %s: %s' % (config['name'], out_shape)) from .generic_utils import get_from_module def get_layer(identifier, kwargs=None): return get_from_module(identifier, globals(), 'layer', instantiate=True, kwargs=kwargs)
prman-pixar/RenderManForBlender
rman_ui/rman_ui_txmanager.py
Python
mit
27,755
0.00508
import bpy from bpy.props import StringProperty, IntProperty, CollectionProperty, EnumProperty, BoolProperty, FloatProperty from bpy.types import PropertyGroup, UIList, Operator, Panel from bpy_extras.io_utils import ImportHelper from .rman_ui_base import _RManPanelHeader from ..rfb_utils import texture_utils from ..rfb_utils import shadergraph_utils from ..rfb_utils import scene_utils from ..rfb_utils import object_utils from ..rfb_utils.prefs_utils import get_pref from ..rfb_logger import rfb_log from ..rman_config import __RFB_CONFIG_DICT__ as rfb_config from .. import rman_render from rman_utils.txmanager import txparams from rman_utils import txmanager as txmngr from .. import rfb_icons import os import uuid class TxFileItem(PropertyGroup): """UIList item representing a TxFile""" name: StringProperty( name="Name", description="Image name", default="") tooltip: StringProperty( name="tooltip", description="Tool Tip", default="") nodeID: StringProperty( name="nodeID", description="Node ID (hidden)", default="") state: IntProperty( name="state", description="", default=0 ) enable: BoolProperty( name="enable", description="Enable or disable this TxFileItem", default=True ) def colorspace_names(self, context): items = [] items.append(('0', '', '')) try: mdict = texture_utils.get_txmanager().txmanager.color_manager.colorspace_names() for nm in mdict: items.append((nm, nm, "")) except AttributeError: pass return items ocioconvert: EnumProperty( name="Color Space", description="colorspace", items=colorspace_names ) txsettings = ['texture_type', 's_mode', 't_mode', 'texture_format', 'data_type', 'resize', 'ocioconvert'] items = [] for item in txparams.TX_TYPES: items.append((item, item, '')) texture_type: EnumProperty( name="Texture Type", items=items, description="Texture Type", default=txparams.TX_TYPE_REGULAR) items = [] for item in txparams.TX_WRAP_MODES: items.append((item, item, '')) s_mode: EnumProperty( name="S Wrap", items=items, default=txparams.TX_WRAP_MODE_PERIODIC) t_mode: EnumProperty( name="T Wrap", items=items, default=txparams.TX_WRAP_MODE_PERIODIC) items = [] for item in txparams.TX_FORMATS: items.append((item, item, '')) texture_format: EnumProperty( name="Format", default=txparams.TX_FORMAT_PIXAR, items=items, description="Texture format") items = [] items.append(('default', 'default', '')) for item in txparams.TX_DATATYPES: items.append((item, item, '')) data_type:
EnumProperty( name="Data Type", default=txparams.TX_DATATYPE_FLOAT, items=items, description="The data storage txmake uses") items = [] for item in txparams.TX_RESIZES: items.append((item, item, '')) resize: EnumPrope
rty( name="Resize", default=txparams.TX_RESIZE_UP_DASH, items=items, description="The type of resizing flag to pass to txmake") bumpRough: EnumProperty( name="Bump Rough", default="-1", items=( ("-1", "Off", ""), ("0", "Bump Map", ""), ("1", "Normal Map", "") ) ) bumpRough_factor: FloatProperty( name="Scale", default=2.0 ) bumpRough_invert: BoolProperty( name="Invert", default=False ) bumpRough_invertU: BoolProperty( name="InvertU", default=False ) bumpRough_invertV: BoolProperty( name="InvertV", default=False ) bumpRough_refit: BoolProperty( name="Refit", default=False ) class PRMAN_UL_Renderman_txmanager_list(UIList): """RenderMan TxManager UIList.""" def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index): icons_map = {txmngr.STATE_MISSING: 'ERROR', txmngr.STATE_EXISTS: 'CHECKBOX_HLT', txmngr.STATE_IS_TEX: 'TEXTURE', txmngr.STATE_IN_QUEUE: 'PLUS', txmngr.STATE_PROCESSING: 'TIME', txmngr.STATE_ERROR: 'CANCEL', txmngr.STATE_REPROCESS: 'TIME', txmngr.STATE_UNKNOWN: 'CANCEL', txmngr.STATE_INPUT_MISSING: 'ERROR'} txfile = None if item.nodeID != "": txfile = texture_utils.get_txmanager().txmanager.get_txfile_from_id(item.nodeID) if txfile: custom_icon = icons_map[txfile.state] else: custom_icon = 'CANCEL' if self.layout_type in {'DEFAULT', 'COMPACT'}: layout.label(text=item.name, icon = custom_icon) elif self.layout_type in {'GRID'}: layout.alignment = 'CENTER' layout.label(text="", icon = custom_icon) class PRMAN_OT_Renderman_txmanager_parse_scene(Operator): """Parse scene for textures to add to to the txmanager""" bl_idname = "rman_txmgr_list.parse_scene" bl_label = "Parse Scene" bl_description = "Parse the scene and look for textures that need converting." def execute(self, context): rman_txmgr_list = context.scene.rman_txmgr_list texture_utils.parse_for_textures(context.scene) texture_utils.get_txmanager().txmake_all(blocking=False) bpy.ops.rman_txmgr_list.refresh('EXEC_DEFAULT') return{'FINISHED'} class PRMAN_OT_Renderman_txmanager_reset_state(Operator): """Reset State""" bl_idname = "rman_txmgr_list.reset_state" bl_label = "Reset State" bl_description = "All texture settings will be erased and the scene will be re-parsed. All manual edits will be lost." def execute(self, context): rman_txmgr_list = context.scene.rman_txmgr_list rman_txmgr_list.clear() texture_utils.get_txmanager().txmanager.reset() texture_utils.parse_for_textures(context.scene) texture_utils.get_txmanager().txmake_all(blocking=False) texture_utils.get_txmanager().txmanager.reset_state() return{'FINISHED'} class PRMAN_OT_Renderman_txmanager_clear_unused(Operator): """Clear Unused""" bl_idname = "rman_txmgr_list.clear_unused" bl_label = "Clear Unused" bl_description = "Clear unused textures" def execute(self, context): rman_txmgr_list = context.scene.rman_txmgr_list nodeIDs = list() for item in rman_txmgr_list: nodeID = item.nodeID if item.nodeID != "": txfile = texture_utils.get_txmanager().txmanager.get_txfile_from_id(item.nodeID) if not txfile: nodeIDs.append(nodeID) continue tokens = nodeID.split('|') if len(tokens) < 3: continue node_name,param,ob_name = tokens node, ob = scene_utils.find_node_by_name(node_name, ob_name) if not node: continue if getattr(node, param) != item.name: nodeIDs.append(nodeID) for nodeID in nodeIDs: bpy.ops.rman_txmgr_list.remove_texture('EXEC_DEFAULT', nodeID=nodeID) return{'FINISHED'} class PRMAN_OT_Renderman_txmanager_pick_images(Operator, ImportHelper): """Pick images from a directory.""" bl_idname = "rman_txmgr_list.pick_images" bl_label = "Pick Images" bl_description = "Manually choos
rgroten/NetApp-Snapshot-Manager
snapmgr/NaFunctions.py
Python
gpl-2.0
6,288
0.00493
''' Created on Feb 23, 2015 @author: rgroten ''' import ConfigParser import ssl from datetime import datetime from flask.globals import g # Import NetApp API libraries from NaElement import NaElement from NaServer import NaServer # from flask.globals import g def connect(): try: _create_unverified_https_context = ssl._create_unverified_context except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default pass else: # Handle target environment that doesn't support HTTPS verification ssl._create_default_https_context = _create_unverified_https_context naHost = getConfigOption("NAHost") user = getConfigOption("User") password = getConfigOption("Password") s = NaServer(naHost, 1 , 21) s.set_server_type("FILER") s.set_transport_type("HTTPS") s.set_port(443) s.set_style("LOGIN") s.set_admin_user(user, password) return s def getConfigOption(option, section=None): config = ConfigParser.ConfigParser() config.read("config.ini") # If section is not provided, first check if g.env is set and use that. # Otherwise, set section to GENERAL if not section: try: if g.env: section = g.env except: section = "GENERAL" return config.get(section, option) def executeCmd(cmd): isDebug = getConfigOption("Debug") s= connect() if (isDebug == 'True'): print("Request Object: " + cmd.sprintf()) ret = s.invoke_elem(cmd) if (ret.results_status() == "failed"): print("Error: ") print(ret.sprintf()) # Print object for debugging if (isDebug == 'True'): print( "Response Object: " + ret.sprintf()) return ret def listVolumes(): isDebug = getConfigOption("Debug") # Build command to list volumes cmd = NaElement("volume-get-iter") xi = NaElement("desired-attributes") xi1 = NaElement("volume-attributes") xi1.child_add(NaElement("volume-id-attributes")) xi1.child_add(NaElement("volume-snapshot-attributes")) xi1.child_add(NaElement("volume-space-attributes")) xi2 = NaElement("volume-clone-attributes") xi2.child_add(NaElement("volume-clone-parent-attributes")) xi1.child_add(xi2) xi.child_add(xi1) cmd.child_add(xi) cmd.child_add_string("max-records", "500") ret =
executeCmd(cmd) # Remove volumes from list that contain filterStrings filterString = getConfigOption("VolFilters") filterList = filterString.replace(" ","").split(",") filteredVolumes = NaElement("attributes-list") for vol in ret.child_get("attributes-list").children_get(): volattrs = vol.child_get('volume-id-attributes') if any(x in volattrs.child_get_string('name') for x in filterList): if (isDebug =
= 'True'): print "Skipping filtered vol : %s" % volattrs.child_get_string('name') continue if (isDebug == 'True'): print 'Volume Name : %s' % volattrs.child_get_string('name') filteredVolumes.child_add(vol) filteredRet = NaElement("results") filteredRet.attr_set("status", "passed") filteredRet.child_add(filteredVolumes) if (isDebug == 'True'): print "Number of volumes (after filtering): " + str(ret.child_get("attributes-list").children_get().__len__()) return filteredRet def listSnapshots(volume): cmd = NaElement('snapshot-list-info') cmd.child_add_string('volume', volume) ret = executeCmd(cmd) return ret def createSnapshot(volume, customname=None): if customname: snapshotName = customname else: # Create snapshot format name snapshotName = "snap_" + volume + "_" + datetime.strftime(datetime.now(), "%Y%m%d%H%M%S") cmd = NaElement('snapshot-create') cmd.child_add_string("volume", volume) cmd.child_add_string("snapshot", snapshotName) return executeCmd(cmd) def deleteSnapshot(volume, snapshot): cmd = NaElement('snapshot-delete') cmd.child_add_string("snapshot", snapshot) cmd.child_add_string("volume", volume) return executeCmd(cmd) def restoreSnapshot(volume, snapshot): cmd = NaElement('snapshot-restore-volume') cmd.child_add_string("snapshot", snapshot) cmd.child_add_string("volume", volume) return executeCmd(cmd) def renameSnapshot(volume, snapshot, newName): cmd = NaElement('snapshot-rename') cmd.child_add_string("current-name", snapshot) cmd.child_add_string("volume", volume) cmd.child_add_string("new-name", newName) return executeCmd(cmd) def createClone(parentVolume, volume): cmd = NaElement('volume-clone-create') cmd.child_add_string("parent-volume", parentVolume) cmd.child_add_string("volume", volume) # Feature disabled for now debugret = NaElement("results") debugret.attr_set("status", "failed") debugret.attr_set("reason", "Creating clones not supported...yet!") return debugret def getEnvs(): envs = getConfigOption("Environments", "GENERAL").split(",") envObjs = [] for env in envs: try: envObj = EnvObj(env) envObjs.append(envObj) except Exception as e: print str(e) print "Error: couldn't load options for environment: " + env return envObjs class EnvObj: name = "" rfcRequired = False def __init__(self, envName): self.get_env_properties(envName) def get_env_properties(self, envName): self.name = envName self.rfcRequired = getConfigOption("RFCRequired", envName) return self def get_name(self): return self.__name def get_rfc_required(self): return self.rfcRequired def set_name(self, value): self.__name = value def set_rfc_required(self, value): self.__rfcRequired = value def del_name(self): del self.__name def del_rfc_required(self): del self.__rfcRequired name = property(get_name, set_name, del_name, "name's docstring") rfcRequired = property(get_rfc_required, set_rfc_required, del_rfc_required, "rfcRequired's docstring")
animekita/selvbetjening
selvbetjening/sadmin2/views/user.py
Python
mit
1,851
0.001621
from django.contrib.auth import get_user_model from django.core.urlresolvers import reverse from django.shortcuts import get_object_or_404 from django.utils.translation import ugettext as _ from selvbetjening.sadmin2 import menu from selvbetjening.sadmin2.decorators import sadmin_prerequisites from selvbetjening.sadmin2.forms import UserForm, PasswordForm from selvbetjening.sadmin2.views.generic import generic_create_view @sadmin_prerequisites def user_change(request, user_pk): user = get_object_or_404(get_user_model(), pk=user_pk) context = { 'sadmin2_menu_mai
n_active': 'userportal', 'sadmin2_breadcrumbs_active': 'user', 'sadmin2_menu_tab': menu.sadmin2_menu_tab_user, 'sadmin2_menu_tab_active': 'user', 'user': user } return generic_create_view(request, UserForm, reverse('sadmin2:user', kw
args={'user_pk': user.pk}), message_success=_('User updated'), context=context, instance=user) @sadmin_prerequisites def user_password(request, user_pk): user = get_object_or_404(get_user_model(), pk=user_pk) context = { 'sadmin2_menu_main_active': 'userportal', 'sadmin2_breadcrumbs_active': 'user_password', 'sadmin2_menu_tab': menu.sadmin2_menu_tab_user, 'sadmin2_menu_tab_active': 'password', 'user': user } return generic_create_view(request, PasswordForm, redirect_success_url=reverse('sadmin2:user_password', kwargs={'user_pk': user.pk}), message_success=_('Password updated'), context=context, instance=user)
moiseshiraldo/inviMarket
inviMarket/views/del_partner.py
Python
agpl-3.0
1,187
0.001685
# -*- coding: utf-8 -*- from django.shortcuts import r
ender, get_object_or_404 from django.contrib.auth.decorators import login_required from django.utils.translation import ugettext as _ from inviMarket.models import User @login_required def del_partner(request, partner_id): """ Delete the :model:`auth.User` passed by argument from the partners list. **Context** ``message`` A string variable used to inform the user. **Template:** :template:`inviMar
ket/addpartner.html` """ user = request.user partner = get_object_or_404(User.objects.select_related('profile'), pk=partner_id) message = _("Ther user is not your partner.") if partner.profile.partners.filter(pk=user.id).exists(): partner.profile.partners.remove(user) message = _("The partnership proposal has been rejected.") user.notification_set.filter(code=20, sender=partner).delete() if user.profile.partners.filter(pk=partner_id).exists(): user.profile.partners.remove(partner) message = _("The user is no longer your partner.") return render(request, 'message.html', {'message': message})
MeGotsThis/BotGotsThis
pkg/spam/items/channel.py
Python
gpl-3.0
941
0
from typing import Iterable, Mapping, Optional from lib import data from ..channel import pyramid from ..channel import wall def filterMessage() -> Iterable[data.ChatCommand]: return [] def commands() -> Mappin
g[str, Optional[data.ChatCommand]]: if not hasattr(commands, 'commands'): setattr(commands, 'commands', { '!pyramid': pyramid.commandPyramid, '!rpyramid': pyramid.commandRandomPyramid, '!wall': wall.commandWall, }) return getattr(commands, 'commands') def commandsStartWith() -> Mapping[str, Optional[data.ChatCommand]]: if not hasattr(commandsStartWith, 'commands'): setattr(commandsStartWith, 'command
s', { '!pyramid-': pyramid.commandPyramidLong, '!wall-': wall.commandWallLong, }) return getattr(commandsStartWith, 'commands') def processNoCommand() -> Iterable[data.ChatCommand]: return []
ic-hep/DIRAC
src/DIRAC/ProductionSystem/Client/ProductionStep.py
Python
gpl-3.0
2,408
0.000415
""" Class defining a production step """ from __future__ import absolute_import from __future__ import division from __future__ import print_function __RCSID__ = "$Id$" import json from DIRAC import S_OK, S_ERROR class ProductionStep(object): """Define the Production Step object""" def __init__(self, **kwargs): """Simple constructor""" # Default values for transformation step parameters self.Name = "" self.Description = "description" self.LongDescription = "longDescription" self.Type = "MCSimulation" self.Plugin = "Standard" self.AgentType = "Manual" self.FileMask = "" ######################################### self.ParentStep = None self.Inputquery = None self.Outputquery = None self.GroupSize = 1 self.Body = "body" def getAsDict(self): """It returns the Step description as a dictionary""" prodStepDict = {} prodStepDict["name"] = self.Name prodStepDict["parentStep"] = [] # check the ParentStep format if self.ParentStep: if isinstance(self.ParentStep, list): prodStepDict["parentStep"] = [] for parentStep in self.ParentStep: # pylint: disable=not-an-iterable if not parentStep.Name: return S_ERROR("Parent Step does not exist") prodStepDict["parentStep"].append(parentStep.Name) elif isinstance(self.ParentStep, ProductionStep): if not self.ParentStep.Name: return S_ERROR("Parent Step does not exist") prodStepDict["parentStep"] = [self.ParentStep.Name] else: return S_ERROR("Invalid Parent Step") prodStepDict["description"] = self.Description prodStepDict["longDescription"] = self.LongDescription prodStepDict["stepType"] = self.Type prodStepDict["plugin"] = self.Plugin prodStepDict["agentType"] = self.AgentType prodStepDict["fileMask"] = self.FileMask # O
ptional fields prodStepDict["inputquery"] = json.dumps(self.Inputquery) prodStepDict["outputquery"] = json.dumps(self.Outputquery) prodStepDict["groupsize"] = self.GroupSize prodStepDict["body"] = json.dumps(se
lf.Body) return S_OK(prodStepDict)
wenxinwilliam/docker-django-celery
mydjangoapp/mydjangoapp/celeryconf.py
Python
mit
354
0.002825
# coding=UTF8 from __future__ import absolute_import import os from celery im
port Celery from django.conf import set
tings os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mydjangoapp.settings") app = Celery('mydjangoapp') CELERY_TIMEZONE = 'UTC' app.config_from_object('django.conf:settings') app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
asm-technologies/management
employee/migrations/0006_auto__chg_field_billdetails_end_date__chg_field_billdetails_start_date.py
Python
mit
4,781
0.008576
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Changing field 'billdetails.end_date' db.alter_column(u'employee_billdetails', 'end_date', self.gf('django.db.models.fields.DateField')(null=True)) # Changing field 'billdetails.start_date' db.alter_column(u'employee_billdetails', 'start_date', self.gf('django.db.models.fields.DateField')(null=True)) def backwards(self, orm): # User chose to not deal with backwards NULL issues for 'billdetails.end_date' raise RuntimeError("Cannot reverse this migration. 'billdetails.end_date' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Changing field 'billdetails.end_date' db.alter_column(u'employee_billdetails', 'end_date', self.gf('django.db.models.fields.DateField')()) # User chose to not deal with backwards NULL issues for 'billdetails.start_date' raise RuntimeError("Cannot reverse this migration. 'billdetails.start_date' and its values cannot be restored.") # The following code is provided here to aid in writing a correct migration # Changing field 'billdetails.start_date' db.alter_column(u'employee_billdetails', 'start_date', self.gf('django.db.models.fields.DateField')()) models = { u'employee.billdetails': { 'Meta': {'object_name': 'billdetails'}, 'bill_type': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'emp_name': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['employee.Employee']"}), 'emp_proj': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['employee.Project']"}), 'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}) }, u'employee.employee': { 'Add1': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}), 'Add2': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}), 'City': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'Designation': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'Major_Subject': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'Meta': {'object_name': 'Employee'}, 'Qualification': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'Skill_sets': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'Visa_Status': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'Zip_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}), 'bill': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'dob': ('django.db.models.fields.DateField', [], {}), 'doj': ('django.db.models.fields.DateField', [], {}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '50'}), 'exp': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '2'}), 'id': ('django.db.models.fields.IntegerField', [], {'max_length': '6', 'primary_key': 'True'}), 'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'mobile': ('django.db.models.fields.IntegerField', [], {'max_length': '12'}), 'name': (
'django.db.models.fields.CharField', [], {'max_length': '100'}), 'personal_email': ('django.db.models.fields.EmailField', [], {'max_length': '50', 'blank': 'True'}), 'proj': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['employee.Project']"}), 'start_date': ('django.db.models.fields.DateField', [], {'blank': 'True'}) }, u'employee.project': { 'Meta': {'object_nam
e': 'Project'}, 'description': ('django.db.models.fields.CharField', [], {'max_length': '254'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['employee']
fxia22/ASM_xf
PythonD/site_python/twisted/cred/checkers.py
Python
gpl-2.0
2,547
0.003141
# -*- test-case-name: twisted.test.test_newcred -*- from twisted.internet import defer from twisted.python import components, failure from twisted.cred import error, credentials class ICredentialsChecker(components.Interface): """I check sub-interfaces of ICredentials. @cvar credentialInterfaces: A list of sub-interfaces of ICredentials which specifies which I may check. """ def requestAvatarId(self, credentials): """ @param credentials: something which implements one of the interfaces in self.credentialInterfaces. @return: a Deferred which will fire a string which identifies an avatar, an empty tuple to specify an authenticated anonymous user (provided as checkers.ANONYMOUS) or fire a Failure(UnauthorizedLogin). A note on anonymity - We do not want None as the value for anonymous because it is too easy to accidentally return it. We do not want the empty string, because it is too easy to mistype a password file. For example, an .htpasswd file may contain the lines: ['hello:asdf', 'world:asdf', 'goodbye', ':world']. This misconfiguration will have an ill effect in any case, but accidentally granting anonymous access is a worse failure mode than simply granting access to an untypeable username. We do not want an instance of 'object', because that would create potential problems w
ith persistence. """ ANONYMOUS = () class AllowAnonymousAccess: __implements__ = ICredentialsChecker credentialInterfaces = credentials.IAnonymous, def requestAvatarId(self, credentials): return defer.succeed(ANONYMOUS) class InMemoryUsernamePasswordDatabaseDontUse: credentialInterfaces = credentials.IUsernamePassword, __imp
lements__ = ICredentialsChecker def __init__(self): self.users = {} def addUser(self, username, password): self.users[username] = password def _cbPasswordMatch(self, matched, username): if matched: return username else: return failure.Failure(error.UnauthorizedLogin()) def requestAvatarId(self, credentials): if self.users.has_key(credentials.username): return defer.maybeDeferred( credentials.checkPassword, self.users[credentials.username]).addCallback( self._cbPasswordMatch, credentials.username) else: return defer.fail(error.UnauthorizedLogin())
spilgames/job-runner
job_runner/apps/job_runner/migrations/0013_auto__add_field_worker_ping_response_dts.py
Python
bsd-3-clause
9,629
0.007789
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Worker.ping_response_dts' db.add_column('job_runner_worker', 'ping_response_dts', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'Worker.ping_response_dts' db.delete_column('job_runner_worker', 'ping_response_dts') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'job_runner.job': { 'Meta': {'ordering': "('title',)", 'unique_together': "(('title', 'job_template'),)", 'object_name': 'Job'}, 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'disable_enqueue_after_fails': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'enqueue_is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}), 'fail_times': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'job_template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['job_runner.JobTemplate']"}), 'notification_addresses': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['job_runner.Job']"}), 'reschedule_interval': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'reschedule_interval_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'blank': 'True'}), 'reschedule_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '18', 'blank': 'True'}), 'script_content': ('django.db.models.fields.TextField', [], {}), 'script_content_partial': ('django.db.models.fields.TextField', [], {}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'job_runner.jobtemplate': { 'Meta': {'ordering': "('title',)", 'object_name': 'JobTemplate'}, 'auth_groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'body': ('django.db.models.fields.TextField', [], {}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'enqueue_is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}), 'id': ('django.db.mod
els.fields.AutoField', [], {'primary_key': 'True'}), 'notification
_addresses': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'worker': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['job_runner.Worker']"}) }, 'job_runner.killrequest': { 'Meta': {'object_name': 'KillRequest'}, 'enqueue_dts': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'execute_dts': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'run': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['job_runner.Run']"}), 'schedule_dts': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}) }, 'job_runner.project': { 'Meta': {'ordering': "('title',)", 'object_name': 'Project'}, 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'enqueue_is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'notification_addresses': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'job_runner.rescheduleexclude': { 'Meta': {'object_name': 'RescheduleExclude'}, 'end_time': ('django.db.models.fields.TimeField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['job_runner.Job']"}), 'note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'start_time': ('django.db.models.fields.TimeField', [], {}) }, 'job_runner.run': { 'Meta': {'ordering': "('-return_dts', '-start_dts', '-enqueue_dts', 'schedule_dts')", 'object_name': 'Run'}, 'enqueue_dts': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_manual': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['job_runner.Job']"}), 'pid': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True'}), 'return_dts': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'return_success': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), 'schedule_children': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}), 'schedule_dts': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), 'start_dts': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}) }, 'job_runner.runlog': { 'Meta': {'ordering': "('-run',)", 'object_name': 'RunLog'},
AlexandreGuinaudeau/ClasseurPhoto
classeur_photo/classeur_photo/album/urls.py
Python
gpl-3.0
519
0.001927
from django.conf.urls import url from . import views urlpatterns = [ # ex: /album/ u
rl(r'^$', views.index, name='index'), # ex: /album/create/ url(r'^welcome/$', views.welcome, name='welcome'), # ex: /album/create/ url(r'^create/$', views.create, name='create'), # ex: /album/vietnam_2016/ url(r'^(?P<album_permalink>[\w_]+)/$', views.detail, name='detail'), # ex: /album/vietnam_2016/settings url(r'^(?P<album_permalink>[\w_]+)/settings/', views.setting
s, name='settings'), ]
egabancho/invenio
invenio/legacy/wsgi/__init__.py
Python
gpl-2.0
25,601
0.004336
# -*- coding: utf-8 -*- ## This file is part of Invenio. ## Copyright (C) 2009, 2010, 2011, 2012, 2013, 2014 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """mod_python->WSGI Framework""" import sys import os import re import cgi import gc import inspect import socket from fnmatch import fnmatch from six.moves.urllib.parse import urlparse, urlunparse from six import iteritems from wsgiref.util import FileWrapper from invenio.legacy.wsgi.utils import table from invenio.utils.apache import \ HTTP_STATUS_MAP, SERVER_RETURN, OK, DONE, \ HTTP_NOT_FOUND, HTTP_INTERNAL_SERVER_ERROR from invenio.config import CFG_WEBDIR, CFG_SITE_LANG, \ CFG_WEBSTYLE_HTTP_STATUS_ALERT_LIST, CFG_DEVEL_SITE, CFG_SITE_URL, \ CFG_SITE_SECURE_URL, CFG_WEBSTYLE_REVERSE_PROXY_IPS from invenio.ext.logging import register_exception from invenio.utils.datastructures import flatten_multidict ## TODO for future reimplementation of stream_file #from invenio.legacy.bibdocfile.api import StreamFileException from flask import request, after_this_request ## Magic regexp to search for usage of CFG_SITE_URL within src/href or ## any src usage of an external website _RE_HTTPS_REPLACES = re.compile(r"\b((?:src\s*=|url\s*\()\s*[\"']?)http\://", re.I) ## Regexp to verify that the IP starts with a number (filter cases where 'unknown') ## It is faster to verify only the start (585 ns) compared with verifying ## the whole ip address - re.compile('^\d+\.\d+\.\d+\.\d+$') (1.01 µs) _RE_IPADDRESS_START = re.compile("^\d+\.") def _http_replace_func(match): ## src external_site -> CFG_SITE_SECURE_URL/sslredirect/external_site return match.group(1) + CFG_SITE_SECURE_URL + '/sslredirect/' _ESCAPED_CFG_SITE_URL = cgi.escape(CFG_SITE_URL, True) _ESCAPED_CFG_SITE_SECURE_URL = cgi.escape(CFG_SITE_SECURE_URL, True) def https_replace(html): html = html.decode('utf-8').replace(_ESCAPED_CFG_SITE_URL, _ESCAPED_CFG_SITE_SECURE_URL) return _RE_HTTPS_REPLACES.sub(_http_replace_func, html) class InputProcessed(object): """ Auxiliary class used when reading input. @see: <http://www.wsgi.org/wsgi/Specifications/handling_post_forms>. """ def read(self, *args): raise EOFError('The wsgi.input stream has already been consumed') readline = readlines = __iter__ = read from werkzeug import (BaseResponse, ResponseStreamMixin, CommonResponseDescriptorsMixin) class Response(BaseResponse, ResponseStreamMixin, CommonResponseDescriptorsMixin): """ Full featured response object implementing :class:`ResponseStreamMixin` to add support for the `stream` property. """ class SimulatedModPythonRequest(object): """ mod_python like request object. Minimum and cleaned implementation to make moving out of mod_python easy. @see: <http://www.modpython.org/live/current/doc-html/pyapi-mprequest.html> """ def __init__(self, environ, start_response): self.response = Response() self.__environ = environ self.__start_response = start_response self.__response_sent_p = False self.__content_type_set_p = False self.__buffer = '' self.__low_level_headers = [] self.__filename = None self.__disposition_type = None self.__bytes_sent = 0 self.__allowed_methods = [] self.__cleanups = [] self.headers_out = {'Cache-Control': None} #self.headers_out.update(dict(request.headers)) ## See: <http://www.python.org/dev/peps/pep-0333/#the-write-callable> self.__write = None self.__write_error = False self.__errors = environ['wsgi.errors'] self.__headers_in = table([]) self.__tainted = False self.__is_https = self.__environ.get('wsgi.url_scheme') == 'https' self.__replace_https = False self.track_writings = False self.__what_was_written = "" self.__cookies_out = {} self.g = {} ## global dictionary in case it's needed for key, value in iteritems(environ): if key.startswith('HTTP_'): self.__headers_in[key[len('HTTP_'):].replace('_', '-')] = value if environ.get('CONTENT_LENGTH'): self.__headers_in['content-length'] = environ['CONTENT_LENGTH'] if environ.get('CONTENT_TYPE'): self.__headers_in['content-type'] = environ['CONTENT_TYPE'] def get_wsgi_environ(self): return self.__environ def get_post_form(self): """ Returns only POST form. """ self.__tainted = True form = flatten_multidict(request.values) if request.files: for name, file_ in iteritems(request.files): setattr(file_, 'file', file_.stream) form[name] = file_ return form def get_response_sent_p(self): return self.__response_sent_p def get_low_level_headers(self): return self.__low_level_headers def get_buffer(self): return self.__buffer def write(self, string, flush=1): if isinstance(string, unicode): self.__buffer += string.encode('utf8') else: self.__buffer += string if flush: self.flush() def flush(self): self.send_http_header() if self.__buffer: self.__bytes_sent += len(self.__buffer) try: if not self.__write_error: if self.__replace_https: self.__write(https_replace(self.__buffer)) else: if self.__buffer: self.__write(self.__buffer) if self.track_writings: if self.__replace_https: self.__what_was_written += https_replace(self.__buffer) else: self.__what_was_written += self.__buffer except IOError as err: if "failed to write data" in str(err) or "client connection closed" in str(err): ## Let's just log this exception without alerting the admin: register_exception(req=self) self.__write_error = True ## This flag is there just ## to not report later other errors to the admin.
else: raise self.__buffer = '' def set_content_type(self, content_type): self.__content_type_set_p = True self.response.content_type = content_type if self.__is_https: if content_type.startswith("text/html") or content_type.startswith("application/rss+xml"):
self.__replace_https = True def get_content_type(self): return self.response.content_type def send_http_header(self): for (k, v) in self.__low_level_headers: self.response.headers[k] = v for k, v in iteritems(self.headers_out): self.response.headers[k] = v self.__write = self.response.stream.write def get_unparsed_uri(self): return '?'.join([self.__environ['PATH_INFO'], self.__environ['QUERY_STRING']]) def get_uri(self): return request.environ['PATH_INFO'] def get_full_uri(self): if self.is_https(): return CFG_SITE_SECURE_URL + self.get_unparsed_uri() else: return CFG_SITE_URL + self.get_unpars
m4nolo/steering-all
src/interact/evtInteract.py
Python
mit
196
0.015306
import interact class
EvtInteract(interact.I
nteract): def __init__(self): self.events = [] def checkEventInteraction(self, events): self.events = events self.checkInteraction()
BunsenMcDubbs/cs4400-project
app/models/__init__.py
Python
mit
386
0
from . import
( Application, Category, Course, Designation, Major, Project, Requirement, User, Year, ) Application = Application.Application Category = Category.Category Course = Course.Course Designation = Designation.Designation Major = Major.Major Project = Project.Project Requirement = Requirement.Requireme
nt User = User.User Year = Year.Year
NewAcropolis/api
migrations/versions/0047.py
Python
mit
1,247
0.005613
"""empty message Revision ID: 0047 add smtp Revises: 0046 remove long description Create Date: 2020-11-08 01:28:28.386704 """ # revision identifiers, used by Alembic. revision = '0047 add smtp' down_revision = '0046 remove long description' from alembic import op import sqlalchemy as sa def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('email_providers', sa.Column('smtp_password', sa.String(), nullable=True)) op.add_column('email_providers', sa.Column('smtp_server', sa.String(), nullable=True)) op.add_column('email_providers', sa.Column('smtp_user', sa.String(), nullable=True)) op.add_column('email_providers', sa.Column('available', sa.Boolean(), nullable=True)) op.add_column('email_providers'
, sa.Column('created_at', sa.DateTime(), nullable=True)) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by A
lembic - please adjust! ### op.drop_column('email_providers', 'smtp_user') op.drop_column('email_providers', 'smtp_server') op.drop_column('email_providers', 'smtp_password') op.drop_column('email_providers', 'available') op.drop_column('email_providers', 'created_at') # ### end Alembic commands ###
regisf/yablog
blog/templatetags/__init__.py
Python
bsd-3-clause
1,525
0.001311
# -*- coding: UTF-8 -*- # YaBlog # (c) Regis FLORET # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. #
* Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the <organization> nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRI
BUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL Regis FLORET BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
DSSG-paratransit/main_repo
Access_Analysis_Project/Scripts/dwellTimeAnalysis.py
Python
agpl-3.0
4,203
0.025696
import numpy as np import os import pandas as pd import statsmodels.formula.api as smf import sys # @params: takes mobaid codes string # @returns: list of mobaid strings def splitCode(x): if type(x) is str: codes = x.split(',') return codes else: return [] # @returns binary T/F if string code is in string/list x def containsCode(code, x): if code in x: return 1 else: return 0 # @param: takes char to be repeated c and number of repeats n # @returns: a string with c repeated n times def characterString(c, n): r = '' for i in range(n): r = r + c return r # to debug lambda functions def test(x): print(x) # combines boardings at the same stop def combineRows(data): # temp = data # debug # print(temp.columns.values) # debug # temp.drop('MobAids', 1 ,inplace=True) # debug data = data.groupby(['ServiceDate','Run','ETA','DwellTime','Activity']).sum() # 55-60 removes colums that h
ave all 0 data bool_column_df = data.apply(lambda x: (min(x) == 0) and (max(x) == 0)) bool_column_df.columns = ['values'] print(bool_column_df.values) # debug columns = bool_column_df[bool_column_df.values].index.values print(columns) # debug data.drop(columns,1,inplace=True) data.reset_index(inplace=True) # print(data.columns.values) # debug # print(data.equals(temp)) # debug return(data) # get data file from 1st argument da
ta = None try: data_path = os.path.join(os.pardir,'data',sys.argv[1]) data = pd.read_csv(data_path) except IOError: print('\n\tError: No file at ../data/' + sys.argv[1] + ' from ' + os.getcwd() + '\n') quit() except IndexError: print('\n\tdwellTimeAnalysis.py takes a csv file from\n\n\t\tmain_repo\data\n\n\tassuming that the file is run in the Python_Scripts folder\n') quit() # gathers needed data data.Activity = data.Activity.apply(lambda x: int(x)) # data = data.iloc(np.where((data.Activity == 0) | (data.Activity == 1))) data = data[['ServiceDate','Run','ETA','DwellTime','Activity', 'MobAids']].loc[(data.Activity == 0) | (data.Activity == 1)] allCodes = ['A','AM','AP','AR','BB','CA','CB','CI','CS','CT','H','H1','H2','HD','LI', 'MO','N','NR','OR','OX','PEL','PK','SA','SC','ST','SVC','U','V','V1','V2', 'WA','WG','WH','WK','WT','WX','0T'] data.MobAids = data.MobAids.apply(lambda x: splitCode(x)) # creates a column with binary values for each code for code in allCodes: data[code] = data.MobAids.apply(lambda x: containsCode(code, x)) # print(data) # debug # Attempt to fix an error caused in the regression by this 0T data.rename(columns={'0T' : 'OT'}, inplace=True) # splits data into boading and deboarding boardings = combineRows(data[data.Activity == 0]) # print(boardings) # debug deboardings = combineRows(data[data.Activity == 1]) # for debugging boardings.to_csv('../data/single_day_boardings.csv') deboardings.to_csv('../data/single_day_deboardings.csv') ################################################################### # Need to check with Matthew # # ----------------------------- # # is total dwell time for a stop is included for each client row? # # or is total dwell time sum is divided among client rows? # ################################################################### # regression for boarding dwell times x = ' + '.join(boardings.columns.values[6:]) y = 'DwellTime' reg_formula = y + ' ~ ' + x # print reg_formula # debug # boarding regression lmb = smf.ols(formula=reg_formula, data=boardings).fit() # deboarding regression lmd = smf.ols(formula=reg_formula, data=deboardings).fit() # writes data to file orig_stdout = sys.stdout output = open("../data/dwell_time_mobaid_regression.txt", 'w') sys.stdout = output top = characterString('#', 78) + '\n' bottom = characterString('-', 78) print top + characterString(' ', 34) + 'Boardings\n' + bottom print lmb.summary() print '\n\n' + top + characterString(' ', 33) + 'Deboardings\n' + bottom print lmd.summary() sys.stdout = orig_stdout output.close() #prints (debug purposes) print top + characterString(' ', 34) + 'Boardings\n' + bottom print lmb.summary() print '\n\n' + top + characterString(' ', 33) + 'Deboardings\n' + bottom print lmd.summary()
SpaceGroupUCL/qgisSpaceSyntaxToolkit
esstoolkit/external/pyqtgraph/console/template_pyside2.py
Python
gpl-3.0
6,517
0.002302
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'template.ui' # # Created: Sun Sep 18 19:19:10 2016 # by: pyside2-uic running on PySide2 2.0.0~alpha0 # # WARNING! All changes made in this file will be lost! from PySide2 import QtCore, QtGui, QtWidgets class Ui_Form(object): def setupUi(self, Form): Form.setObjectName("Form") Form.resize(694, 497) self.gridLayout = QtWidgets.QGridLayout(Form) self.gridLayout.setContentsMargins(0, 0, 0, 0) self.gridLayout.setSpacing(0) self.gridLayout.setObjectName("gridLayout") self.splitter = QtWidgets.QSplitter(Form) self.splitter.setOrientation(QtCore.Qt.Vertical) self.splitter.setObjectName("splitter") self.layoutWidget = QtWidgets.QWidget(self.splitter) self.layoutWidget.setObjectName("layoutWidget") self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget) self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.verticalLayout.setObjectName("verticalLayout") self.output = QtWidgets.QPlainTextEdit(self.layoutWidget) font = QtGui.QFont() font.setFamily("Monospace") self.output.setFont(font) self.output.setReadOnly(True) self.output.setObjectName("output") self.verticalLayout.addWidget(self.output) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") self.input = CmdInput(self.layoutWidget) self.input.setObjectName("input") self.horizontalLayout.addWidget(self.input) self.historyBtn = QtWidgets.QPushButton(self.layoutWidget) self.historyBtn.setCheckable(True) self.historyBtn.setObjectName("historyBtn") self.horizontalLayout.addWidget(self.historyBtn) self.exceptionBtn = QtWidgets.QPushButton(self.layoutWidget) self.exceptionBtn.setCheckable(True) self.exceptionBtn.setObjectName("exceptionBtn") self.horizontalLayout.addWidget(self.exceptionBtn) self.verticalLayout.addLayout(self.horizontalLayout) self.historyList = QtWidgets.QListWidget(self.splitter) font = QtGui.QFont() font.setFamily("Monospace") self.historyList.setFont(font) self.historyList.setObjectName("historyList") self.exceptionGroup = QtWidgets.QGroupBox(self.splitter) self.exceptionGroup.setObjectName("exceptionGroup") self.gridLayout_2 = QtWidgets.QGridLayout(self.exceptionGroup) self.gridLayout_2.setSpacing(0) self.gridLayout_2.setContentsMargins(-1, 0, -1, 0) self.gridLayout_2.setObjectName("gridLayout_2") self.clearExceptionBtn = QtWidgets.QPushButton(self.exceptionGroup) self.clearExceptionBtn.setEnabled(False) self.clearExceptionBtn.setObjectName("clearExceptionBtn") self.gridLayout_2.addWidget(self.clearExceptionBtn, 0, 6, 1, 1) self.catchAllExceptionsBtn = QtWidgets.QPushButton(self.exceptionGroup) self.catchAllExceptionsBtn.setCheckable(True) self.catchAllExceptionsBtn.setObjectName("catchAllExceptionsBtn") self.gridLayout_2.addWidget(self.catchAllExceptionsBtn, 0, 1, 1, 1) self.catchNextExceptionBtn = QtWidgets.QPushButton(self.exceptionGroup) self.catchNextExceptionBtn.setCheckable(True) self.catchNextExceptionBtn.setObjectName("catchNextExceptionBtn") self.gridLayout_2.addWidget(self.catchNextExceptionBtn, 0, 0, 1, 1) self.onlyUncaughtCheck = QtWidgets.QCheckBox(self.exceptionGroup) self.onlyUncaughtCheck.setChecked(True) self.onlyUncaughtCheck.setObjectName("onlyUncaughtCheck") self.gridLayout_2.addWidget(self.onlyUncaughtCheck, 0, 4, 1, 1) self.exceptionStackList = QtWidgets.QListWidget(self.exceptionGroup) self.exceptionStackList.setAlternatingRowColors(True) self.exceptionStackList.setObjectName("exceptionStackList") self.gridLayout_2.addWidget(self.exceptionStackList, 2, 0, 1, 7) self.runSelectedFrameCheck = QtWidgets.QCheckBox(self.exceptionGroup) self.runSelectedFrameCheck.setChecked(True) self.runSelectedFrameCheck.setObjectName("runSelectedFrameCheck") self.gridLayout_2.addWidget(self.runSelectedFrameCheck, 3, 0, 1, 7) self.exceptionInfoLabel = QtWidgets.QLabel(self.exceptionGroup) self.exceptionInfoLabel.setObjectName("exceptionInfoLabel") self.gridLayout_2.addWidget(self.exceptionInfoLabel, 1, 0, 1, 7) spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.gridLayout_2.addItem(spacerItem, 0, 5, 1, 1) self.label = QtWidgets.QLabel(self.exceptionGroup) self.label.setObjectName("label") self.gridLayout_2.addWidget(self.label, 0, 2, 1, 1) self.filterText = QtWidgets.QLineEdit(self.exceptionGroup) self.filterText.setObjectName("filterText") self.gridLayout_2.addWidget(self.filterText, 0, 3, 1, 1) self.gridLayout.addWidget(self.splitter, 0, 0, 1, 1) self.retranslateUi(Form) QtCore.QMetaObject.connectSlotsByName(Form) def retranslat
eUi(self, Form): Form.setWindowTitle(QtWidgets.QApplication.translate("Form", "Console", None, -1)) self.historyBtn.setText(QtWidgets.QApplication.translate("Form", "History..", None, -1)) self.exceptionBtn.setText(QtWidgets.QApplication.translate("Form", "Exceptions..", None, -1)) self.exceptionGroup.setTitle(QtWidgets.QApplication.translate("Form", "Exception Handling", None, -1)) self.clearExceptionBtn.setText(QtWidgets.QApplication.translate("Form", "Clea
r Exception", None, -1)) self.catchAllExceptionsBtn.setText(QtWidgets.QApplication.translate("Form", "Show All Exceptions", None, -1)) self.catchNextExceptionBtn.setText(QtWidgets.QApplication.translate("Form", "Show Next Exception", None, -1)) self.onlyUncaughtCheck.setText(QtWidgets.QApplication.translate("Form", "Only Uncaught Exceptions", None, -1)) self.runSelectedFrameCheck.setText(QtWidgets.QApplication.translate("Form", "Run commands in selected stack frame", None, -1)) self.exceptionInfoLabel.setText(QtWidgets.QApplication.translate("Form", "Exception Info", None, -1)) self.label.setText(QtWidgets.QApplication.translate("Form", "Filter (regex):", None, -1)) from .CmdInput import CmdInput
RRostami/Spiderpy
spiderpy/core.py
Python
gpl-3.0
2,986
0.045211
# # # import requests from bs4 import BeautifulSoup import re import os def all_links(URL,abs=False,session=None): '''Generator function for all links in a page. ARGS: URL -> url of the page abs -> (True) returns actual 'href's of each <a> tag (False) process each 'href' to generate the full link (WARNING: on false, skips the javascript links in page) RETS yields every link''' if(session): response=session.get(URL) else: response=requests.get(URL) mysoup=BeautifulSoup(response.text) for link in mysoup.find_all('a'): ret=link.get('href') if(abs): yield ret else: if(ret[0:10]=="javascript"): continue if(ret[0]=='/'): mat=re.match("(.+?\..+?\..{2,5})/",URL) print(mat.group(1)) ret = mat.group(1) + ret elif(ret[0] =='#'): ret = URL + ret elif(not re.match(".+?:.+",ret)): ret = re.sub("/[^/]+$", "/"+ret , URL) yield ret def save_file(URL,session=None,dir="",replace=False,max_size=None,altname=None,chunksize=2048): '''Saves a file from web to disk. ARGS: URL -> URL of the file to be downloaded session -> requests session if the file is only available in a session (typically login/auth/etc) dir -> directory of the saved file can be either reletive to the script or absoloute path. example: "archive/" saves files in a folder named archive replace -> if the file exists (True) replace it / (False) skip max_size -> max size of the file in Bytes , if the size exceeds this, download will be aborted altname -> name of the saved file ( if None: will attemp to retrive name from server, if fail: will attemp to pars the last part of URL into a file name , if fail: will name the file 'undefined' chunksize -> size of each chunk for writing to disk in Bytes (A.K.A buffer size) default is 2KB RETS: True -> File already Exists Number -> Bytes Written to disk False -> Download Failed (max_size exceeded) ''' if(altname==None): if(session): dlh = session.head(URL) else: dlh= requests.head(URL) if (dlh.status_code != 200): raise Exception(dlh.status_code) try: fileheader=dlh.headers['Content-Disposition'] mat=re.search('filename="(.*)"',fileheader) filename=mat.group(1) except: mat2=re.search("/([^/]+?)$",URL) if(mat2): filename=mat2.group(1) els
e: filename='undefined' else: filename=altname if (dir!="" and not os.path.exists(dir)): os.makedirs(dir) path=dir+filename if(replace==False and os.path.exists(path)) : return True else: if(session): dl = session.get(URL, stream=True) else: dl =
requests.get(URL, stream=True) if (dl.status_code != 200): raise Exception(dl.status_code) with open(path, 'wb') as f: for i,chunk in enumerate(dl.iter_content(chunksize)): f.write(chunk) if(max_size and f.tell()>max_size): dl.close() break; else: return f.tell() return False
vipul-tm/DAG
dags-ttpl/subdags/utilization_kpi_subdag.py
Python
bsd-3-clause
17,641
0.03832
from airflow.models import DAG from airflow.operators.dummy_operator import DummyOperator from datetime import datetime, timedelta from airflow.operators import PythonOperator from airflow.hooks import RedisHook from airflow.models import Variable from airflow.hooks import MemcacheHook from etl_tasks_functions import get_time from etl_tasks_functions import subtract_time from subdags.utilization_utility import calculate_wimax_utilization from subdags.utilization_utility import calculate_cambium_ss_utilization from subdags.utilization_utility import calculate_radwin5k_ss_utilization from subdags.utilization_utility import calculate_radwin5k_bs_utilization from subdags.utilization_utility import calculate_radwin5kjet_ss_utilization from subdags.utilization_utility import calculate_radwin5kjet_bs_utilization from subdags.utilization_utility import calculate_radwin5k_bs_and_ss_dyn_tl_kpi from subdags.utilization_utility import calculate_backhaul_utilization from subdags.utilization_utility import calculate_ptp_utilization from subdags.utilization_utility import calculate_mrotek_utilization from subdags.utilization_utility import backtrack_x_min from subdags.utilization_utility import get_severity_values from subdags.utilization_utility import calculate_age from subdags.utilization_utility import calculate_severity from airflow.operators import MySqlLoaderOperator import logging import itertools import socket import random import traceback import time from pprint import pprint default_args = { 'owner': 'wireless', 'depends_on_past': False, 'start_date': datetime.now() - timedelta(minutes=2), 'email': ['[email protected]'], 'email_on_failure': False, 'email_on_retry': False, 'retries': 1, 'retry_delay': timedelta(minutes=1), 'provide_context': True, 'catchup': False, # 'queue': 'bash_queue', # 'pool': 'backfill', # 'priority_weight': 10, # 'end_date': datetime(2016, 1, 1), } redis_hook_util_10 = RedisHook(redis_conn_id="redis_hook_util_10") memc_con_cluster = MemcacheHook(memc_cnx_id = 'memc_cnx') vrfprv_memc_con = MemcacheHook(memc_cnx_id = 'vrfprv_memc_cnx') pub_memc_con = MemcacheHook(memc_cnx_id = 'pub_memc_cnx') redis_hook_static_5 = RedisHook(redis_conn_id="redis_hook_5") INSERT_HEADER = "INSERT INTO %s.performance_utilization" INSERT_TAIL = """ (machine_name,current_value,service_name,avg_value,max_value,age,min_value,site_name,data_source,critical_threshold,device_name,severity,sys_timestamp,ip_address,warning_threshold,check_timestamp,refer ) values (%(machine_name)s,%(current_value)s,%(service_name)s,%(avg_value)s,%(max_value)s,%(age)s,%(min_value)s,%(site_name)s,%(data_source)s,%(critical_threshold)s,%(device_name)s,%(severity)s,%(sys_timestamp)s,%(ip_address)s,%(warning_threshold)s,%(check_timestamp)s,%(refer)s) """ UPDATE_HEADER = "INSERT INTO %s.performance_utilizationstatus" UPDATE_TAIL = """ (machine_name,current_value,service_name,avg_value,max_value,age,min_value,site_name,data_source,critical_threshold,device_name,severity,sys_timestamp,ip_address,warning_threshold,check_timestamp,refer ) values (%(machine_name)s,%(current_value)s,%(service_name)s,%(avg_value)s,%(max_value)s,%(age)s,%(min_value)s,%(site_name)s,%(data_source)s,%(critical_threshold)s,%(device_name)s,%(severity)s,%(sys_timestamp)s,%(ip_address)s,%(warning_threshold)s,%(check_timestamp)s,%(refer)s) ON DUPLICATE KEY UPDATE machine_name = VALUES(machine_name),current_value = VALUES(current_value),age=VALUES(age),site_name=VALUES(site_name),critical_threshold=VALUES(critical_threshold),severity=VALUES(severity),sys_timestamp=VALUES(sys_timestamp),ip_address=VALUES(ip_address),warning_threshold=VALUES(warning_threshold),check_timestamp=VALUES(check_timestamp),refer=VALUES(refer) ""
" ERROR_DICT ={404:'Device not found yet',405:'No SS Connected to BS-BS is not s
kipped'} ERROR_FOR_DEVICE_OMITTED = [404] kpi_rules = eval(Variable.get("kpi_rules")) DEBUG = False sv_to_ds_mapping = {} #O7_CALC_Q = "calculation_q" O7_CALC_Q = "poller_queue" down_and_unresponsive_devices = eval(redis_hook_static_5.get("current_down_devices_all")) def process_utilization_kpi( parent_dag_name, child_dag_name, start_date, schedule_interval, celery_queue, ss_tech_sites, hostnames_ss_per_site, ss_name, utilization_attributes, config_sites): #here config site is list of all sites in system_config var utilization_kpi_subdag_dag = DAG( dag_id="%s.%s"%(parent_dag_name, child_dag_name), schedule_interval=schedule_interval, start_date=start_date, ) for service in utilization_attributes: sv_to_ds_mapping [service.get("service_name")] ={"data_source":service.get("data_source"),"sector_type":service.get("sector_type")} def get_calculated_ss_data(): ss_data = redis_hook_util_10.rget("calculated_ss_utilization_kpi") combined_site_data = {} for site_data in ss_data: site_data = eval(site_data) combined_site_data.update(site_data) return combined_site_data #To create SS dict def format_data(**kwargs): device_type = kwargs.get("params").get("technology") utilization_attributes = kwargs.get("params").get("attributes") machine_name = kwargs.get("params").get("machine_name") ss_kpi_dict = { 'site_name': 'unknown' , 'device_name': 'unknown', 'service_name': 'unknown', 'ip_address': 'unknown', 'severity': 'unknown', 'age': 'unknown', 'data_source': 'unknown', 'current_value': 'unknown', 'warning_threshold': 'unknown', 'critical_threshold': 'unknown', 'check_timestamp': 'unknown', 'sys_timestamp': 'unknown' , 'refer':'unknown', 'min_value':'unknown', 'max_value':'unknown', 'avg_value':'unknown', 'machine_name':'unknown' } ss_data =redis_hook_util_10.rget("calculated_utilization_%s_%s"%(device_type,machine_name)) cur_processing_time = backtrack_x_min(time.time(),300) + 120 # this is used to rewind the time to previous multiple of 5 value so that kpi can be shown accordingly ss_devices_list = [] for ss_device in ss_data: ss_device = eval(ss_device) hostname = ss_device.get('hostname') for service in ss_device.get('services'): data_source = sv_to_ds_mapping.get(service).get("data_source") pmp_type = sv_to_ds_mapping.get(service).get("sector_type") thresholds = get_severity_values(service) ss_kpi_dict['critical_threshold']=thresholds[0] ss_kpi_dict['data_source']=data_source ss_kpi_dict['site_name']=ss_device.get('site') #TODO: ok and unknown are only 2 sev for ss we can incluudethis in rules later ss_kpi_dict['service_name']= service ss_kpi_dict['machine_name']= machine_name ss_kpi_dict['check_timestamp']=cur_processing_time ss_kpi_dict['device_name']=ss_device.get('hostname') ss_kpi_dict['sys_timestamp']=cur_processing_time ss_kpi_dict['refer']=ss_device.get("%s_sector"%(pmp_type)) ss_kpi_dict['ip_address']=ss_device.get('ipaddress') ss_kpi_dict['warning_threshold']= thresholds[1] if not isinstance(ss_device.get(service),dict): #handling cur_value if it is greater than 100 cur_value=ss_device.get(service) if ss_device.get(service) and ss_device.get(service) != None: cur_value=ss_device.get(service) try: if isinstance(curr_value,float) and cur_value and cur_value > 100.00: cur_value = 100 except Exception: logging.error("Exception while handling above 100 entries") ss_kpi_dict['severity']= calculate_severity(service,ss_device.get(service)) ss_kpi_dict['age']= calculate_age(hostname,ss_kpi_dict['severity'],ss_device.get('device_type'),cur_processing_time,service) ss_kpi_dict['current_value']=cur_value ss_kpi_dict['avg_value']=cur_value ss_kpi_dict['min_value']=cur_value ss_kpi_dict['max_value']=cur_value if ss_kpi_dict['current_value'] != None: ss_devices_list.append(ss_kpi_dict.copy()) else: for data_source in ss_device.get(service): ds_values = ss_device.get(service) curr_value= ss_device.get(service).get(data_source) if isinstance(curr_value,str): try: curr_value=float(curr_v
BBN-Q/QGL
QGL/GSTTools.py
Python
apache-2.0
7,229
0.004288
''' Various tools to interface with pyGSTi for running GST experiments. Created on May 16, 2018 Original Author: Guilhem Ribeill Copyright 2018 Raytheon BBN Technologies Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' from .PulsePrimitives import * from .Cliffords import * from .BasicSequences.helpers import create_cal_seqs from .Compiler import compile_to_hardware from itertools import chain from random import choices PYGSTI_PRESENT = False try: from pygsti.objects.circuit import Circuit PYGSTI_PRESENT = True except: pass #Default mapping from pyGSTi naming convention to QGL gates. gst_gate_map = {"Gx": X90, "Gy": Y90, "Gi": Id} def gst_map_1Q(gst_list, qubit, qgl_map=gst_gate_map, append_meas=True): """ Helper function that takes an arbitrarily nested list of pygsti gatestrings and converts them into QGL sequences, keeping the same nesting of lists. Inputs: gst_list: GateString to convert, or possibly nested list of pyGSTi GateStrings. qubit: QGL qubit to apply the sequence to qgl_map: Dictionary that maps between pyGSTi "Gx" string to QGL pulse append_meas: Append a measurement to each sequence. Returns: QGL sequences, preserving the input list nesting (as a generator) """ if isinstance(gst_list, Circuit): gst_list = [gst_list] for item in gst_list: if isinstance(item, Circuit): mapped = map(lambda x: qgl_map[str(x)](qubit), item.tup) if append_meas: yield list(chain(mapped, [MEAS(qubit)])) else: yield list(mapped) elif isinstance(item, list): yield list(gst_map_1Q(item, qubit, qgl_map=qgl_map, append_meas=append_meas)) def gst_map_2Q(gst_list, qubits, qgl_map=None, append_meas=False): """ Helper function that takes an arbitrarily nested list of pygsti gatestrings and converts them into QGL sequences, keeping the same nesting of lists. Inputs: gst_list: GateString to convert, or possibly nested list of pyGSTi GateStrings. qubit: QGL qubit to apply the sequence to qgl_map: Dictionary that maps between pyGSTi "Gx" string to QGL pulse append_meas: Append a measurement to each sequence. Returns: QGL sequences, preserving the input list nesting (as a generator) """ if isinstance(gst_list, GateString): gst_list = [gst_list] for item in gst_list: if isinstance(item, GateString): mapped = map(lambda x: qgl_map[x], item.tup) if append_meas: yield list(chain(mapped, [reduce(lambda x,y: x*y, map(MEAS, qubits))])) else: yield list(mapped) elif isinstance(item, list): yield list(gst_map_2Q(item, qubit, qgl_map=qgl_map, append_meas=append_meas)) def create_gst_sequence_from_pygsti(gst_list, qubit, gate_map=gst_gate_map): """ Returns list of QGL sequences from a pyGSTi GateString list. See gst_map_1Q. The return value is a list of sequences that can be complied by QGL. """ return list(gst_map_1Q(gst_list, qubit, qgl_map=gate_map, append_meas=True)) def pygsti_to_cliffords(gst_seq): #Map from GST convention to cliffords cliff_map = {"{}": 0, "Gi": 1, "Gx": 2, "Gy": 5} #convert to dictionary of lambdas for compatibility with gst_map_1Q lambda_map = {k: lambda x, v=v: v for k, v in cliff_map.items()} return list(gst_map_1Q(gst_seq, None, qgl_map=lambda_map, append_meas=False)) def pauli_rand_clifford_circuit(gst_seq): def seqreduce(s): if not s: return 0 else: return reduce(lambda x,y: clifford_multiply(x,y), s) def inv_cliff(c): return inverse_clifford(clifford_mat(c, 1)) c_ps = [0, 2, 5, 8] c_seqs = pygsti_to_cliffords(gst_seq) r_seqs = [] for seq in c_seqs: if not seq: r_seqs.append([]) else: rand_pauli = choices(c_ps, k=len(seq)) inter = 0 bare = 0 rseq = [] for j in range(len(seq)): inter = clifford_multiply(clifford_multiply(inter, rand_pauli[j]), seq[j]) bare = clifford_multiply(bare, seq[j]) rseq.append(clifford_multiply(rand_pauli[j], seq[j])) recovery = clifford_multiply(inv_cliff(inter), bare) rseq[-1] = clifford_multiply(rseq[-1], recovery) r_seqs.append(rseq) all_ok = all((r == i for r, i in zip(map(seqreduce, r_seqs), map(seqreduce, c_seqs)))) assert all_ok, "Something went wrong when Pauli-frame randomizing!" return r_seqs def SingleQubitCliffordGST(qubit, pygsti_seq, pulse_library="Standard", randomized=False, num_cals=1
00, diac_compiled=True): pulse_library = pulse_library.upper() # QGL pulse libraries handle the Id pulse differently. In the standard # case, the Id is of finite length equal to all the other one-pulse # elements of the library. In the Atomic and DiAtomic cases, the ID is # of length 0 by default. In GST, we need access to both types of t
he ID # gate with the first experiment in any GST experiment equal to {} = # Id(length = 0). All other Id gates in the sequence should be of finite # length. So we'll modify the Clifford indexing here to make Id(length=0) # the first element in the library and Id(length=length) the second. if pulse_library == "STANDARD": #clifford_pulse = lambda x: clifford_seq(x, qubit) clifford_pulse = [clifford_seq(i, qubit) for i in range(24)] clifford_pulse.insert(0, Id(qubit, length=0.0)) elif pulse_library == "DIAC": #clifford_pulse = lambda x: DiAC(qubit, x, diac_compiled) clifford_pulse = [AC(qubit, i, diac_compiled) for i in range(24)] clifford_pulse.insert(1, Id(qubit)) elif pulse_library == "AC": #clifford_pulse = lambda x: AC(qubit, x) clifford_pulse = [AC(qubit, i) for i in range(24)] clifford_pulse.insert(1, Id(qubit)) raise ValueError("Pulse library must be one of 'standard', 'diac', or 'ac'. Got {} instead".format(pulse_library)) if randomized: seqs = pauli_rand_clifford_circuit(pygsti_seq) else: seqs = pygsti_to_cliffords(pygsti_seq) qgl_seqs = [] for seq in seqs: qgl_seqs.append([clifford_pulse[c] for c in seq]) qgl_seqs[-1].append(MEAS(qubit)) if num_cals != 0: qgl_seqs += create_cal_seqs((qubit, ), abs(num_cals)) metafile = compile_to_hardware(qgl_seqs, 'GST/GST') return metafile
Alwnikrotikz/marinemap
lingcod/common/uaparser/clientos.py
Python
bsd-3-clause
1,804
0.007206
""" Based on http://vaig.be/2009/03/getting-client-os-in-django.html """ import re def client_os(user_agent): ''' Context pr
ocessor for Django that provides operating system information base on HTTP user agent. A user agent looks like (line break added): "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.6) \ Gecko/2009020409 Iceweasel/3.0.6 (Debian-3.0.6-1)" ''' # Mozilla/5.0 regex = '(?P<application_name>\w+)/(?P<application_version>[\d\.]+)' regex += ' \(' # X11 regex += '(?P<compatibility_flag>\w+)' regex += '; ' # U if "U;" in user_agent or
"MSIE" in user_agent: # some UA strings leave out the U; regex += '(?P<version_token>[\w .]+)' regex += '; ' # Linux i686 regex += '(?P<platform_token>[\w ._]+)' # anything else regex += '; .*' result = re.match(regex, user_agent) if result: result_dict = result.groupdict() full_platform = result_dict['platform_token'] platform_values = full_platform.split(' ') if platform_values[0] in ('Windows', 'Linux', 'Mac'): platform = platform_values[0] elif platform_values[1] in ('Mac',): # Mac is given as "PPC Mac" or "Intel Mac" platform = platform_values[1] else: platform = None else: # Total hack to avoid dealing with regex nightmares if 'mac' in user_agent.lower(): full_platform = "Intel Mac 10.6" platform = 'Mac' elif 'windows' in user_agent.lower(): full_platform = "Windows" platform = 'Windows' else: full_platform = None platform = None return { 'full_platform': full_platform, 'platform': platform, }
jzimbel/artist-expander
config.py
Python
mit
1,176
0.001701
import os class Config(object): SPOTIPY_REDIRECT_URI = os.environ['SPOTIPY_REDIRECT_URI'] SPOTIPY_CLIENT_ID = os.environ['SPOTIPY_CLIENT_ID'] SPOTIPY_CLIENT_SECRET = os.environ['SPOTIPY_CLIENT_SECRET'] SPOTIFY_ACCESS_SCOPE = 'playlist-modify-public playlist-modify-private playlist-read-private user-library-read' ########### # Options # ########### # TRACKS_PER_ARTIST # # Number of tracks per artist to add to the playlist. # I recommend 5 or less. Max is 10. TRACKS_PER_ARTIST = 3 # COL
LATE # # By default, the playlist will be ordered like: # - ARTIST A TRACK 1 # - ARTIST A TRACK 2 # - ARTIST A TRACK 3 # - ARTIST A TRACK 4 # - ARTIST A TRACK 5 # - ARTIST B TRACK 1 # - ARTIST B TRACK 2 #
- ARTIST B TRACK 3 # ... # if COLLATE is set to True, it will instead be ordered like so: # - ARTIST A TRACK 1 # - ARTIST B TRACK 1 # - ARTIST C TRACK 1 # ... # - ARTIST Z TRACK 1 # - ARTIST A TRACK 2 # - ARTIST B TRACK 2 # ... COLLATE = False # PUBLIC # # Default False. Set True to make your generated playlist public. PUBLIC = False
mikel-egana-aranguren/SADI-Galaxy-Docker
galaxy-dist/lib/galaxy/model/migrate/versions/0029_user_actions.py
Python
gpl-3.0
1,371
0.030635
""" This migration script adds a user actions table to Galaxy.
""" from sqlalchemy import * from migrate import * import datetime now = datetime.datetime.utcnow import logging log = logging.getLogger( __name__ ) metadata = MetaData() def display_migration_details(): print "" print "This migration script adds a user actio
ns table to Galaxy." print "" # New table to store user actions. UserAction_table = Table( "user_action", metadata, Column( "id", Integer, primary_key=True ), Column( "create_time", DateTime, default=now ), Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ), Column( "session_id", Integer, ForeignKey( "galaxy_session.id" ), index=True ), Column( "action", Unicode( 255 ) ), Column( "context", Unicode( 512 ) ), Column( "params", Unicode( 1024 ) ) ) def upgrade(migrate_engine): metadata.bind = migrate_engine display_migration_details() metadata.reflect() try: UserAction_table.create() except Exception, e: print str(e) log.debug( "Creating user_action table failed: %s" % str( e ) ) def downgrade(migrate_engine): metadata.bind = migrate_engine metadata.reflect() try: UserAction_table.drop() except Exception, e: print str(e) log.debug( "Dropping user_action table failed: %s" % str( e ) )
swoiow/iabe-tool
openshift.py
Python
mit
370
0.005405
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import tornado.ioloop try: import WebApp except ImportError, ImportWarning: impor
t entire as WebApp if __name__ == "__main__": ip = os.environ['OPENSHIFT_DIY_
IP'] port = int(os.environ['OPENSHIFT_DIY_PORT']) WebApp.application.listen(port, ip) tornado.ioloop.IOLoop.instance().start()
yuzie007/ph_analysis
ph_analysis/phonon_calculator.py
Python
mit
6,728
0.000297
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function import os import shutil import time import subprocess import numpy as np from .phonopy_conf_creator import PhonopyConfCreator from vasp.poscar import Poscar from autotools import symlink_force class PhononCalculator(object): def __init__(self, directory_data="./", poscar_filename="POSCAR", poscar_average_filename=None, is_average_mass=False, dim_sqs=None, is_primitive=False, is_band=True, is_partial_dos=False, is_tetrahedron=False, is_tprop=False, mesh=None, nac=None): if dim_sqs is None: dim_sqs = np.array([1, 1, 1]) if mesh is None: mesh = np.array([1, 1, 1]) self._variables = None self._home = os.path.expanduser("~") self._phonopy = subprocess.check_output(["which", "phonopy"]).strip() print("phonopy_path:", self._phonopy) self._directory_data = directory_data self._poscar_filename = poscar_filename self._poscar_average_filename = poscar_average_filename self._is_average_mass = is_average_mass self.set_dim_sqs(dim_sqs) self._is_band = is_band self.set_is_tetrahedron(is_tetrahedron) self.set_is_partial_dos(is_partial_dos) self.set_is_tprop(is_tprop) self._is_primitive = is_primitive self._mesh = np.array(mesh) self._nac = nac def set_dim_sqs(self, dim_sqs): self._dim_sqs = dim_sqs def set_is_tetrahedron(self, is_tetrahedron): self._is_tetrahedron = is_tetrahedron def set_is_partial_dos(self, is_partial_dos): self._is_partial_dos = is_partial_dos def set_is_tprop(self, is_tprop): self._is_tprop = is_tprop def set_mesh(self, mesh): self._mesh = mesh def set_variables(self, variables): self._variables = variables def run(self): self.copy_files() self.create_phonopy_conf() conf_files = self.gather_conf_files() for conf_file in conf_files: self.run_phonopy(conf_file) def copy_files(self): dir_data = self._directory_data symlink_force(os.path.join(dir_data, 'writefc.conf'), 'writefc.conf') symlink_force(os.path.join(dir_data, 'POSCAR'), 'POSCAR') symlink_force(os.path.join(dir_data, 'POSCAR_ideal'), 'POSCAR_ideal') symlink_force(os.path.join(dir_data, 'FORCE_CONSTANTS'), 'FORCE_CONSTANTS') def create_phonopy_conf(self): directory_data = self._directory_data dim_sqs = self._dim_sqs variables = self._variables mesh = self._mesh.copy() print("directory_data:", directory_data) print("mesh:", mesh) spg_number = self.create_spg_number() # Get band path for the specific space group phonopy_conf_creator = PhonopyConfCreator( spg_num
ber, mesh=mesh, tmax=3000, dim_sqs=dim_sqs, is_average_mass=self._is_average_mass, is_primitive=self._is_primitive, band_points=101, poscar_name="POSCAR", # For getting the chemical symbols magmom_line=None, variables=variables, nac=self._nac, ) phonopy_conf_creator.run() def create_spg_number(self): """
spg_number is used to determine the primitive axis and band paths. """ if self._poscar_average_filename is not None: poscar_filename = self._poscar_average_filename else: poscar_filename = self._poscar_filename print('SPG number is searched from {}'.format(poscar_filename)) spg_number = Poscar(poscar_filename).get_symmetry_dataset()["number"] print("spg_number:", spg_number) return spg_number def gather_conf_files(self): conf_files = [ "dos_smearing.conf", ] if self._is_band: conf_files.append("band.conf") if self._is_tetrahedron: conf_files.append("dos_tetrahedron.conf") if self._is_partial_dos: conf_files.append("partial_dos_smearing.conf") if self._is_tetrahedron and self._is_partial_dos: conf_files.append("partial_dos_tetrahedron.conf") if self._is_tprop: conf_files.append("tprop.conf") return conf_files def run_phonopy(self, conf_file): root = os.getcwd() home = self._home phonopy = self._phonopy print("=" * 80) print(conf_file) print("=" * 80) dir_name = conf_file.replace(".conf", "_calc") log_file = conf_file.replace(".conf", ".log") if os.path.exists(dir_name): shutil.rmtree(dir_name) os.mkdir(dir_name) os.chdir(dir_name) for fn in [conf_file, "POSCAR", "FORCE_CONSTANTS", "BORN"]: if os.path.exists(os.path.join("..", fn)): os.symlink("../" + fn, fn) if os.path.exists(log_file): os.remove(log_file) time1 = time.time() with open(log_file, "w") as f: subprocess.call( [phonopy, conf_file, "-v"], stdout=f, ) time2 = time.time() dtime = time2 - time1 print("Time for calc.: {:12.6f} s".format(dtime)) if conf_file == "tprop.conf": subprocess.call( ["python", home + "/script/python/phonopy_tprop_arranger.py"] ) os.chdir(root) def main(): import argparse parser = argparse.ArgumentParser() parser.add_argument("-d", "--datadir", default="..", type=str, help="Data directory") parser.add_argument("--tetrahedron", action="store_true", help="Calculate using tetrahedron method.") parser.add_argument("--partial_dos", action="store_true", help="Calculate partial DOS.") parser.add_argument("--tprop", action="store_true", help="Calculate thermal properties.") args = parser.parse_args() phonon_analyzer = PhononCalculator( directory_data=args.datadir, is_tetrahedron=args.tetrahedron, is_partial_dos=args.partial_dos, is_tprop=args.tprop, ) phonon_analyzer.run() if __name__ == "__main__": main()
dimitri-justeau/niamoto-core
niamoto/data_providers/sql_provider/sql_occurrence_provider.py
Python
gpl-3.0
2,139
0
# coding: utf-8 import sqlalchemy as sa import pandas as pd from niamoto.data_providers.base_occurrence_provider import \ BaseOccurrenceProvider from niamoto.exceptions import MalformedDataSourceError class SQLOccurrenceProvider(BaseOccurrenceProvider): """ SQL occurrence provider. Instantiated with a sql query, that must return AT LEAST the following columns: id -> The provider's identifier for the occurrence. taxon_id -> The provider's taxon id for the occurrence. x -> The longitude of the occurrence (WGS84).
y -> The latitude of the occurrence (WGS84). All the remaining column will be stored as
properties. """ REQUIRED_COLUMNS = set(['id', 'taxon_id', 'x', 'y']) def __init__(self, data_provider, occurrence_sql): super(SQLOccurrenceProvider, self).__init__(data_provider) self.occurrence_sql = occurrence_sql def get_provider_occurrence_dataframe(self): connection = sa.create_engine(self.data_provider.db_url).connect() df = pd.read_sql(self.occurrence_sql, connection, index_col='id') cols = set(list(df.columns) + ['id', ]) inter = cols.intersection(self.REQUIRED_COLUMNS) if not inter == self.REQUIRED_COLUMNS: m = "The queried data does not contains the required columns " \ "('id', 'taxon_id', 'x', 'y'), " \ "queried data has: {}".format(cols) raise MalformedDataSourceError(m) if len(df) == 0: return df property_cols = cols.difference(self.REQUIRED_COLUMNS) if len(property_cols) > 0: properties = df[list(property_cols)].apply( lambda x: x.to_json(), axis=1 ) else: properties = '{}' df.drop(property_cols, axis=1, inplace=True) df['properties'] = properties location = df[['x', 'y']].apply( lambda x: "SRID=4326;POINT({} {})".format(x['x'], x['y']), axis=1 ) df['location'] = location df.drop(['x', 'y'], axis=1, inplace=True) return df
dolfandringa/AquaponicsModeler
AquaponicsModeler/model.py
Python
gpl-3.0
14,950
0.000401
# -*- coding: utf-8 -*- """ The :mod:`AquaponicsModeler.model` module contains all components to be used in models. All model components are classes that should inherit from :class:`BaseModelClass`. So far there are two groups of component types: containers and pumps. :class:`Containers <Container>` are compents that contain water and have water flowing in or out. They need to have another component before them in the model, so water can flow from one container to the other. As :class:`Containers <Container>` always need a source of water, the first component in the model is a :class:`Pump`. There are several types of pumps, but they all assume an infinite water source that they can pump from, and they pump into a :class:`Container`. """ import logging import collections import copy from PyElectronics.timers import AStable555 log = logging.getLogger("aquaponics.model") class _PARAM_TYPES: """Constant holding the different parameter types.""" MODEL = 'Model Component Parameter' INTEGER = 'Integer Parameter' FLOAT = 'Float Parameter' TEXT = 'Text Parameter' class BaseModelClass(object): """ A base class for the model that other objects inherit from. The BaseModelClass doesn't implement much except for general methods to get the parameters for a component and to manage the state while stepping through the model. The state is the main variable manipulated by the model. For :class:`Pump` it contains the on/off state, while for :class:`Containers <Container>` it contains the water volume of the container. """ _PARAMS = collections.OrderedDict() def __init__(self):
self.state = None def __str__(self): return self.__class__.__name__ def get_state(self): """ Get the current contents of this container. Returns: float: current state value """ return self.state @classmethod def getParameter
s(cls): """ Return the model parameters. Returns: collections.OrderedDict: The parameters for this class. """ log.debug('Getting parameters for class %s: %s' % (cls, cls._PARAMS)) return cls._PARAMS def step(self): """Step into the next iteration of the model.""" raise NotImplementedError("Please implement a step instance method") class SimpleContainer(BaseModelClass): """ A container in the aquaponics loop. Each container is a container/tank/basin/growbed/etc containing a volume of water, with possibly water flowing out into the next component and flowing into it from the previous container in the loop. The inflow speed of each container is determined by the outflow speed of the previous container. The outflow of each container only starts when in the treshold has been reached, and only if the contents of the container > 0 liters. """ _PARAMS = { 'previous': (_PARAM_TYPES.MODEL, 'previous'), 'outflow': (_PARAM_TYPES.FLOAT, 'outflow (l/min)'), 'start_content': (_PARAM_TYPES.INTEGER, 'start content (l)') } def __init__(self, previous, outflow, start_content=0): """ Args: previous (Container): The previous Container in the chain. outflow (float): The outflow speed of this container. threshold (int): The threshold contents after which the container outflow speed starts. start_content (int): The starting contents of the container. """ self.previous = previous self.outflow = outflow self.state = self.start_content = start_content def get_current_outflow_speed(self): """ Determine the current flow speed of water from this container. Returns: float: The current outflow speed. """ return self.outflow def get_current_inflow_speed(self): """ Determine the current speed of water flowing into this container. This is determined by the outflow speed of the previous container. Returns: float: The current inflow speed. """ return self.previous.get_current_outflow_speed() def step(self, time=10): """ Go through the next step of the simulation of this container. Args: time (int): The length of the next step in seconds. """ inflow = self.get_current_inflow_speed() outflow = self.get_current_outflow_speed() self.state += time / 60 * inflow - time / 60 * outflow class Container(SimpleContainer): _PARAMS = copy.deepcopy(SimpleContainer._PARAMS) _PARAMS['threshold']= (_PARAM_TYPES.INTEGER, 'dump threshold (l)') def __init__(self, previous, outflow, threshold, start_content=0): """ Args: previous (Container): The previous Container in the chain. outflow (float): The outflow speed of this container. threshold (int): The threshold contents after which the container outflow speed starts. start_content (int): The starting contents of the container. """ self.previous = previous self.outflow = outflow self.threshold = threshold self.state = self.start_content = start_content def get_current_outflow_speed(self): """ Determine the current flow speed of water from this container. Returns: float: The current outflow speed. """ if self.state >= self.threshold: return self.outflow else: return 0 class FloodDrainContainer(Container): """ This :class:`Container` will drain fully when the threshold has been reached. In other respects it works like other :class:`Containers <Container>` but for the way it drains. A container with a U-siphon or bell siphon at the end will only start draining when the waterlevel has reached a maximum. When that happens, suction makes sure that all water is drained from the container at the speed specified in outflow. """ def __init__(self, *args, **kwargs): super(FloodDrainContainer, self).__init__(*args, **kwargs) self.flooding = False def get_current_outflow_speed(self): """ Return the current outlflow speed. Outflow starts when self.threshold has been reached and will continue at self.outflow speed until the container is empty. Returns: float: The outflow speed of this :class:`Container` """ if (self.flooding is True and self.state > 0)\ or self.state >= self.threshold: self.flooding = True return self.outflow else: self.flooding = False return 0 class Pump(BaseModelClass): """ A general Pump object. It pumps water into the system (from an unlimited source) and has a constant outflow speed. It doesn't have contents (unlike containers for instance). The state attribute contains the on (1) or off (0) state of the pump, which is also what is plotted in the resulting graphs. """ _PARAMS = { 'outflow': (_PARAM_TYPES.FLOAT, 'outflow (l/min)'), } def __init__(self, outflow): """ Args: outflow (float): The speed at which the pump pumps. """ self.outflow = outflow self.state = 1 def get_current_outflow_speed(self): """ Return the pump speed of this pump. Returns: float: The outflow speed of this pump in L/min. """ return self.outflow def step(self, time=10): """ Go through the next step of the pump state and return that state. Args: time (int): The time in seconds for which the pump state should be returned. Returns: int: The state of the pump. 1=on 0=off. """ return self.state class WaterSource(BaseModelClass): """ A general Water Sou
SunWalter/Hard
ex11.py
Python
apache-2.0
231
0.025974
print ("How old a
re you?",) age = input() print ("How tall are you?",) height = input() pr
int ("How much do you weigh?",) weight = input() print ("So, you are %r years old, %r tall and %r heavy." %(age, height, weight))
olehermanse/sim_game
tests/path_fix.py
Python
mit
331
0.003021
#!/usr/bin/env python3 #
-*- coding: utf-8 -*- '''Hacky way to make sure imports work''' from os.path import abspath, dirname, realpath, join import sys # This allows imports to work, even if sim_game is not in python path: package_location = abspath(join(dirname(realpath(__file__)) , "..")) sys.path.insert(0, package_lo
cation)
achauvinhameau/netProbe
py-net-probe/database/__init__.py
Python
gpl-3.0
1,035
0
# -*- Mode: Python; python-indent-offset: 4 -*- # # Time-stamp: <2017-06-03 11:36:32 alex> # # -------------------------------------------------------------------- # PiProbe # Copyright (C) 2016-2017 Alexandre Chauvin Hameau <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gn
u.org/licenses/>. # -------------------------------------------------------------------- """ database package, redis and test modules """ from . import dbRedis fr
om . import dbTest
gangadharkadam/saloon_frappe
frappe/utils/response.py
Python
mit
4,943
0.025086
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import json import datetime import mimetypes import os import frappe from frappe import _ import frappe.model.document import frappe.utils import frappe.sessions import werkzeug.utils from werkzeug.local import LocalProxy from werkzeug.wsgi import wrap_file from werkzeug.wrappers import Response from werkzeug.exceptions import NotFound, Forbidden def report_error(status_code): if (status_code!=404 or frappe.conf.logging) and not frappe.local.flags.disable_traceback: frappe.errprint(frappe.utils.get_traceback()) response = build_response("json") response.status_code = status_code return response def build_response(response_type=None): if "docs" in frappe.local.response and not frappe.local.response.docs: del frappe.local.response["docs"] response_type_map = { 'csv': as_csv, 'download': as_raw, 'json': as_json, 'page': as_page, 'redirect': redirect } return response_type_map[frappe.response.get('type') or response_type]() def as_csv(): response = Response() response.headers[b"Content-Type"] = b"text/csv; charset: utf-8" response.headers[b"Content-Disposition"] = ("attachment; filename=\"%s.csv\"" % frappe.response['doctype'].replace(' ', '_')).encode("utf-8") response.data = frappe.response['result'] return response def as_raw(): response = Response() response.headers[b"Content-Type"] = frappe.response.get("content_type") or mimetypes.guess_type(frappe.response['filename'])[0] or b"application/unknown" response.headers[b"Content-Disposition"] = ("filename=\"%s\"" % frappe.response['filename'].replace(' ', '_')).encode("utf-8") response.data = frappe.response['filecontent'] return response def as_json(): make_logs() response = Response() if frappe.local.response.http_status_code: response.status_code = frap
pe.local.response['http_status_code'] del frappe.local.response['http_status_code'] response.headers[b"Content-Type"] = b"application/json; charset: utf-8" response.data = json.dumps(frappe.local.response, default=json_handler, separators=(',',':')) return response def make_logs(response = None): """make strings for msgprint
and errprint""" if not response: response = frappe.local.response if frappe.error_log: # frappe.response['exc'] = json.dumps("\n".join([cstr(d) for d in frappe.error_log])) response['exc'] = json.dumps([frappe.utils.cstr(d) for d in frappe.local.error_log]) if frappe.local.message_log: response['_server_messages'] = json.dumps([frappe.utils.cstr(d) for d in frappe.local.message_log]) if frappe.debug_log and frappe.conf.get("logging") or False: response['_debug_messages'] = json.dumps(frappe.local.debug_log) def json_handler(obj): """serialize non-serializable data for json""" # serialize date if isinstance(obj, (datetime.date, datetime.timedelta, datetime.datetime)): return unicode(obj) elif isinstance(obj, LocalProxy): return unicode(obj) elif isinstance(obj, frappe.model.document.BaseDocument): doc = obj.as_dict(no_nulls=True) return doc else: raise TypeError, """Object of type %s with value of %s is not JSON serializable""" % \ (type(obj), repr(obj)) def as_page(): """print web page""" from frappe.website.render import render return render(frappe.response['page_name'], http_status_code=frappe.response.get("http_status_code")) def redirect(): return werkzeug.utils.redirect(frappe.response.location) def download_backup(path): try: frappe.only_for(("System Manager", "Administrator")) except frappe.PermissionError: raise Forbidden(_("You need to be logged in and have System Manager Role to be able to access backups.")) return send_private_file(path) def send_private_file(path): path = os.path.join(frappe.local.conf.get('private_path', 'private'), path.strip("/")) if frappe.local.request.headers.get('X-Use-X-Accel-Redirect'): path = '/' + path response = Response() response.headers[b'X-Accel-Redirect'] = path else: filename = os.path.basename(path) filepath = frappe.utils.get_site_path(path) try: f = open(filepath, 'rb') except IOError: raise NotFound response = Response(wrap_file(frappe.local.request.environ, f)) response.headers.add(b'Content-Disposition', 'attachment', filename=filename.encode("utf-8")) response.headers[b'Content-Type'] = mimetypes.guess_type(filename)[0] or b'application/octet-stream' return response def handle_session_stopped(): response = Response("""<html> <body style="background-color: #EEE;"> <h3 style="width: 900px; background-color: #FFF; border: 2px solid #AAA; padding: 20px; font-family: Arial; margin: 20px auto"> Updating. We will be back in a few moments... </h3> </body> </html>""") response.status_code = 503 response.content_type = 'text/html' return response
indianajohn/ycmd
ycmd/tests/python/testdata/goto_file1.py
Python
gpl-3.0
31
0
from got
o_file2 import foo fo
o
westurner/pyleset
test/test_pyleset.py
Python
mit
267
0.003745
""
" Tests for `pyleset` module. """ import pytest from pyleset import pyleset class TestPyleset(object): @classmethod def setup_class(cls): pass def test_something
(self): pass @classmethod def teardown_class(cls): pass
JT5D/Alfred-Popclip-Sublime
Sublime Text 2/JsFormat/libs/jsbeautifier/tests/testjsbeautifier.py
Python
gpl-2.0
64,176
0.013246
#!/usr/bin/env python # -*- coding: utf-8 -*- import re import unittest import jsbeautifier class TestJSBeautifier(unittest.TestCase): def test_unescape(self): # Test cases contributed by <chrisjshull on GitHub.com> test_fragment = self.decodesto bt = self.bt bt('"\\\\s"'); # == "\\s" in the js source bt("'\\\\s'"); # == '\\s' in the js source bt("'\\\\\\s'"); # == '\\\s' in the js source bt("'\\s'"); # == '\s' in the js source bt('"•"'); bt('"—"'); bt('"\\x41\\x42\\x43\\x01"', '"\\x41\\x42\\x43\\x01"'); bt('"\\u2022"', '"\\u2022"'); bt('a = /\s+/') #bt('a = /\\x41/','a = /A/') bt('"\\u2022";a = /\s+/;"\\x41\\x42\\x43\\x01".match(/\\x41/);','"\\u2022";\na = /\s+/;\n"\\x41\\x42\\x43\\x01".match(/\\x41/);') bt('"\\x22\\x27",\'\\x22\\x27
\',"\\x5c",\'\\x5c\',"\\xff and \\xzz","unicode \\u0000 \
\u0022 \\u0027 \\u005c \\uffff \\uzzzz"', '"\\x22\\x27", \'\\x22\\x27\', "\\x5c", \'\\x5c\', "\\xff and \\xzz", "unicode \\u0000 \\u0022 \\u0027 \\u005c \\uffff \\uzzzz"'); self.options.unescape_strings = True bt('"\\x41\\x42\\x43\\x01"', '"ABC\\x01"'); bt('"\\u2022"', '"\\u2022"'); bt('a = /\s+/') bt('"\\u2022";a = /\s+/;"\\x41\\x42\\x43\\x01".match(/\\x41/);','"\\u2022";\na = /\s+/;\n"ABC\\x01".match(/\\x41/);') bt('"\\x22\\x27",\'\\x22\\x27\',"\\x5c",\'\\x5c\',"\\xff and \\xzz","unicode \\u0000 \\u0022 \\u0027 \\u005c \\uffff \\uzzzz"', '"\\"\'", \'"\\\'\', "\\\\", \'\\\\\', "\\xff and \\xzz", "unicode \\u0000 \\" \' \\\\ \\uffff \\uzzzz"'); self.options.unescape_strings = False def test_beautifier(self): test_fragment = self.decodesto bt = self.bt bt(''); bt('return .5'); test_fragment(' return .5'); bt('a = 1', 'a = 1'); bt('a=1', 'a = 1'); bt("a();\n\nb();", "a();\n\nb();"); bt('var a = 1 var b = 2', "var a = 1\nvar b = 2"); bt('var a=1, b=c[d], e=6;', 'var a = 1,\n b = c[d],\n e = 6;'); bt('a = " 12345 "'); bt("a = ' 12345 '"); bt('if (a == 1) b = 2;', "if (a == 1) b = 2;"); bt('if(1){2}else{3}', "if (1) {\n 2\n} else {\n 3\n}"); bt('if(1||2);', 'if (1 || 2);'); bt('(a==1)||(b==2)', '(a == 1) || (b == 2)'); bt('var a = 1 if (2) 3;', "var a = 1\nif (2) 3;"); bt('a = a + 1'); bt('a = a == 1'); bt('/12345[^678]*9+/.match(a)'); bt('a /= 5'); bt('a = 0.5 * 3'); bt('a *= 10.55'); bt('a < .5'); bt('a <= .5'); bt('a<.5', 'a < .5'); bt('a<=.5', 'a <= .5'); bt('a = 0xff;'); bt('a=0xff+4', 'a = 0xff + 4'); bt('a = [1, 2, 3, 4]'); bt('F*(g/=f)*g+b', 'F * (g /= f) * g + b'); bt('a.b({c:d})', "a.b({\n c: d\n})"); bt('a.b\n(\n{\nc:\nd\n}\n)', "a.b({\n c: d\n})"); bt('a=!b', 'a = !b'); bt('a?b:c', 'a ? b : c'); bt('a?1:2', 'a ? 1 : 2'); bt('a?(b):c', 'a ? (b) : c'); bt('x={a:1,b:w=="foo"?x:y,c:z}', 'x = {\n a: 1,\n b: w == "foo" ? x : y,\n c: z\n}'); bt('x=a?b?c?d:e:f:g;', 'x = a ? b ? c ? d : e : f : g;'); bt('x=a?b?c?d:{e1:1,e2:2}:f:g;', 'x = a ? b ? c ? d : {\n e1: 1,\n e2: 2\n} : f : g;'); bt('function void(void) {}'); bt('if(!a)foo();', 'if (!a) foo();'); bt('a=~a', 'a = ~a'); bt('a;/*comment*/b;', "a; /*comment*/\nb;"); bt('a;/* comment */b;', "a; /* comment */\nb;"); test_fragment('a;/*\ncomment\n*/b;', "a;\n/*\ncomment\n*/\nb;"); # simple comments don't get touched at all bt('a;/**\n* javadoc\n*/b;', "a;\n/**\n * javadoc\n */\nb;"); test_fragment('a;/**\n\nno javadoc\n*/b;', "a;\n/**\n\nno javadoc\n*/\nb;"); bt('a;/*\n* javadoc\n*/b;', "a;\n/*\n * javadoc\n */\nb;"); # comment blocks detected and reindented even w/o javadoc starter bt('if(a)break;', "if (a) break;"); bt('if(a){break}', "if (a) {\n break\n}"); bt('if((a))foo();', 'if ((a)) foo();'); bt('for(var i=0;;) a', 'for (var i = 0;;) a'); bt('for(var i=0;;)\na', 'for (var i = 0;;)\n a'); bt('a++;', 'a++;'); bt('for(;;i++)a()', 'for (;; i++) a()'); bt('for(;;i++)\na()', 'for (;; i++)\n a()'); bt('for(;;++i)a', 'for (;; ++i) a'); bt('return(1)', 'return (1)'); bt('try{a();}catch(b){c();}finally{d();}', "try {\n a();\n} catch (b) {\n c();\n} finally {\n d();\n}"); bt('(xx)()'); # magic function call bt('a[1]()'); # another magic function call bt('if(a){b();}else if(c) foo();', "if (a) {\n b();\n} else if (c) foo();"); bt('switch(x) {case 0: case 1: a(); break; default: break}', "switch (x) {\n case 0:\n case 1:\n a();\n break;\n default:\n break\n}"); bt('switch(x){case -1:break;case !y:break;}', 'switch (x) {\n case -1:\n break;\n case !y:\n break;\n}'); bt('a !== b'); bt('if (a) b(); else c();', "if (a) b();\nelse c();"); bt("// comment\n(function something() {})"); # typical greasemonkey start bt("{\n\n x();\n\n}"); # was: duplicating newlines bt('if (a in b) foo();'); bt('var a, b;'); # bt('var a, b'); bt('{a:1, b:2}', "{\n a: 1,\n b: 2\n}"); bt('a={1:[-1],2:[+1]}', 'a = {\n 1: [-1],\n 2: [+1]\n}'); bt('var l = {\'a\':\'1\', \'b\':\'2\'}', "var l = {\n 'a': '1',\n 'b': '2'\n}"); bt('if (template.user[n] in bk) foo();'); bt('{{}/z/}', "{\n {}\n /z/\n}"); bt('return 45', "return 45"); bt('If[1]', "If[1]"); bt('Then[1]', "Then[1]"); bt('a = 1e10', "a = 1e10"); bt('a = 1.3e10', "a = 1.3e10"); bt('a = 1.3e-10', "a = 1.3e-10"); bt('a = -1.3e-10', "a = -1.3e-10"); bt('a = 1e-10', "a = 1e-10"); bt('a = e - 10', "a = e - 10"); bt('a = 11-10', "a = 11 - 10"); bt("a = 1;// comment", "a = 1; // comment"); bt("a = 1; // comment", "a = 1; // comment"); bt("a = 1;\n // comment", "a = 1;\n// comment"); bt('a = [-1, -1, -1]'); # The exact formatting these should have is open for discussion, but they are at least reasonable bt('a = [ // comment\n -1, -1, -1\n]'); bt('var a = [ // comment\n -1, -1, -1\n]'); bt('a = [ // comment\n -1, // comment\n -1, -1\n]'); bt('var a = [ // comment\n -1, // comment\n -1, -1\n]'); bt('o = [{a:b},{c:d}]', 'o = [{\n a: b\n}, {\n c: d\n}]'); bt("if (a) {\n do();\n}"); # was: extra space appended bt("if (a) {\n// comment\n}else{\n// comment\n}", "if (a) {\n // comment\n} else {\n // comment\n}"); # if/else statement with empty body bt("if (a) {\n// comment\n// comment\n}", "if (a) {\n // comment\n // comment\n}"); # multiple comments indentation bt("if (a) b() else c();", "if (a) b()\nelse c();"); bt("if (a) b() else if c() d();", "if (a) b()\nelse if c() d();"); bt("{}"); bt("{\n\n}"); bt("do { a(); } while ( 1 );", "do {\n a();\n} while (1);"); bt("do {} while (1);"); bt("do {\n} while (1);", "do {} while (1);"); bt("do {\n\n} while (1);"); bt("var a = x(a, b, c)"); bt("delete x if (a) b();", "delete x\nif (a) b();"); bt("delete x[x] if (a) b();", "delete x[x]\nif (a) b();"); bt("for(var a=1,b=2)d", "for (var a = 1, b = 2) d"); bt("for(var a=1,b=2,c=3) d", "for (var a = 1, b = 2, c = 3) d"); bt("for(var a=1,b=2,c=3;d<3;d++)\ne", "for (var a = 1, b = 2, c = 3; d < 3; d++)\n e"); bt("function x(){(a||b).c()}", "function x() {\n (a || b).c()\n}"); bt("function x(){return - 1}", "function x() {\n return -1\n}"); bt("function x(){return ! a}", "function x() {\n return !a\n}"); # a common snippet in jQuery plugins bt("settings = $.extend({},defaul
masahir0y/buildroot-yamada
support/testing/tests/package/test_python_subprocess32.py
Python
gpl-2.0
348
0
from tests.package.test_python import TestPythonPackageBase class TestPythonPy2Subprocess32(TestPythonPackageBase): __test__ = True
config = TestPythonPackageBase.config + \ """ BR2_PACKAGE_PYTHON=y BR2_PACKAGE_PYTHON_SUBPROCESS32=y """ sample_scripts = ["tests/package/sample_python_subprocess32.py"]
tmerrick1/spack
lib/spack/spack/test/svn_fetch.py
Python
lgpl-2.1
3,303
0
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## import os import pytest from llnl.util.filesystem import touch, working_dir import spack.repo import spack.config from spack.spec import Spec from spack.version import ver from spack.util.executable import which pytestmark = pytest.mark.skipif( not which('svn'), reason='requires subversion to be installed') @pytest.mark.parametrize("type_of_test", ['default', 'rev0']) @pytest.mark.parametrize("secure", [True, False]) def test_fetch( type_of_test, secure, mock_svn_repository, config, mutable_mock_packages ): """Tries to: 1. Fetch the repo using a fetch strategy constructed with supplied args (they depend on type_of_test). 2. Check if the test_file is in the checked out repository. 3. Assert that the repository is at the revision supplied. 4. Add and remove some files, then reset the repo, and ensure it's all there again. """ # Retrieve the right test parameters t = mock_svn_r
epository.checks[type_of_test]
h = mock_svn_repository.hash # Construct the package under test spec = Spec('svn-test') spec.concretize() pkg = spack.repo.get(spec) pkg.versions[ver('svn')] = t.args # Enter the stage directory and check some properties with pkg.stage: with spack.config.override('config:verify_ssl', secure): pkg.do_stage() with working_dir(pkg.stage.source_path): assert h() == t.revision file_path = os.path.join(pkg.stage.source_path, t.file) assert os.path.isdir(pkg.stage.source_path) assert os.path.isfile(file_path) os.unlink(file_path) assert not os.path.isfile(file_path) untracked_file = 'foobarbaz' touch(untracked_file) assert os.path.isfile(untracked_file) pkg.do_restage() assert not os.path.isfile(untracked_file) assert os.path.isdir(pkg.stage.source_path) assert os.path.isfile(file_path) assert h() == t.revision
KawashiroNitori/Anubis
anubis/util/domainjob.py
Python
gpl-3.0
765
0
import logging from anubis.model import builtin from anub
is.model import domain from anubis.util import argmethod _logger = logging.getLogger(__name__) def wrap(m
ethod): async def run(): _logger.info('Built in domains') for ddoc in builtin.DOMAINS: _logger.info('Domain: {0}'.format(ddoc['_id'])) await method(ddoc['_id']) _logger.info('User domains') ddocs = domain.get_multi(fields={'_id': 1}) async for ddoc in ddocs: _logger.info('Domain: {0}'.format(ddoc['_id'])) await method(ddoc['_id']) if method.__module__ == '__main__': argmethod._methods[method.__name__] = method argmethod._methods[method.__name__ + '_all'] = run() return method
ugoertz/django-familio
genealogio/migrations/0024_auto_20160316_2039.py
Python
bsd-3-clause
3,111
0.003214
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models import partialdate.fields class Migration(migrations.Migration): dependencies = [ ('genealogio', '0023_auto_20160303_2105'), ] operations = [ migrations.AlterField( model_name='event', name='date', field=partialdate.fields.PartialDateField(default='', help_text='Datum im Format JJJJ-MM-TT (Teilangaben m\xf6glich)', verbose_name='Datum', blank=True), ), migrations.AlterField( model_name='family', name='end_date', field=partialdate.fields.PartialDateField(default='', help_text='Datum im Format JJJJ-MM-TT (Teilangaben m\xf6glich)', verbose_name='Enddatum', blank=True), ), migrations.AlterField( model_name='family', name='start_date', field=partialdate.fields.PartialDateField(default='', help_text='Datum im Format JJJJ-MM-TT (Teilangaben m\xf6glich)', verbose_name='Anfangsdatum', blank=True), ), migrations.AlterField( model_name='person', name='datebirth', field=partialdate.fields.PartialDateField(default='', help_text='Datum im Format JJJJ-MM-TT (Teilangaben m\xf6glich)', verbose_name='Geburtsdatum', blank=True), ), migrations.AlterField( model_name='person', name='datedeath', field=partialdate.fields.PartialDateField(default='', help_text='Datum im Format JJJJ-MM-TT (Teilangaben m\xf6glich)', verbose_name='Todesdatum', blank=True), ), migrations.AlterField( model_name='personplace', name='end', field=partialdate.fields.PartialDateField(default='', help_text='Datum im Format JJJJ-MM-TT (Teilangaben m\xf6glich)', verbose_name='Ende', blank=True), ), migrations.AlterField( model_name='personplace', name='start', field=partialdate.fields.PartialDateField(default='', help_text='Datum im Format JJJJ-MM-TT (Teilangaben m\xf6glich)', verbose_name='Beginn', blank=True), ), migrations.AlterField( model_name='timelineitem', name='description', field=models.TextField(default='', help_text='Wird beim pdf-Export verwendet, kann als ReST formattiert werden, mit Links auf Objekte der Datenbank
(siehe Dokumentation).', verbose_name='Beschreibung', blank=True), ), migrations.AlterField( model_name='timelineitem', name='end_date', field=partialdate.fields.PartialDateField(default='', hel
p_text='Datum im Format JJJJ-MM-TT (Teilangaben m\xf6glich); kann freibleiben', verbose_name='Enddatum', blank=True), ), migrations.AlterField( model_name='timelineitem', name='start_date', field=partialdate.fields.PartialDateField(default='', help_text='Datum im Format JJJJ-MM-TT (Teilangaben m\xf6glich)', verbose_name='Startdatum', blank=True), ), ]
akranga/mafia-serverless
game/day.py
Python
apache-2.0
979
0.015322
import os, sys # to read depend
encies from ./lib direcroty script_dir = os.path.dirname( os.path.realpath(__
file__) ) sys.path.insert(0, script_dir + os.sep + "lib") import logging, boto3, json, random # for dynamodb filter queries from boto3.dynamodb.conditions import Key, Attr # setup log level to DEBUG log = logging.getLogger() log.setLevel(logging.DEBUG) # initialize DynamoDB client dynamodb = boto3.resource('dynamodb') table = dynamodb.Table(os.environ['DYNAMO_TABLE']) # During the day players are pointing to each other to blame for murder in the night def handler(event, context): return response( {"Message": "Welcome to the Serverless Workshop fully powered by AWS Lambda elastic cloud computing service"}, event) def response(body, event, code=200): if 'resource' in event and 'httpMethod' in event: return { 'statusCode': code, 'headers': {}, 'body': json.dumps(body, indent=4, separators=(',', ':')) } return body
fredericlepied/os-net-config
os_net_config/openstack/common/importutils.py
Python
apache-2.0
2,368
0
# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Import related utilities and helper functions. """ import sys import traceback def import_class(import_str): """Returns a class from a string including module and class.""" mod_str, _sep, class_str = import_str.rpartition('.') __import__(mod_str) try: return getattr(sys.modules[mod_str], class_str) except AttributeError: raise ImportError('Class %s cannot be found (%s)' % (class_str, traceback.format_exception(*sys.exc_info()))) def import_object(import_str, *args, **kwargs): """Import a class and return an instance of it.""" return import_class(import_str)(*args, **kwargs) def import_object_ns(name_space, import_str, *args, **kwargs): """Tries to import object
from default namespace. Imports a class and return an instance of it, first by trying to find the class in a default namespace, then failing back to a full path if not found in the default namespace. """ import_value = "%s.%s" % (name_space, import_str) try: return import_class(import_value)(*args, **kwargs) except ImportError: return import_class(import_str)(*args, **kwargs) def import_module(import_str): """Im
port a module.""" __import__(import_str) return sys.modules[import_str] def import_versioned_module(version, submodule=None): module = 'os_net_config.v%s' % version if submodule: module = '.'.join((module, submodule)) return import_module(module) def try_import(import_str, default=None): """Try to import a module and if it fails return default.""" try: return import_module(import_str) except ImportError: return default
fbr1/textmining-eac
main.py
Python
mit
3,609
0.019119
import numpy as np import scipy.cluster.hierarchy as hr import scipy.spatial as spa import clustering import matplotlib.pyplot as plt from sklearn.cluster import AgglomerativeClustering import filter class textMiningEac: def __init__(self,k,N,low,high=0): self.k = k # Leer datos desde archivo [Temporal] #data = np.genfromtxt('iris.data',delimiter=',') #temp= spa.distance.pdist(data,'euclidean') #self.D = spa.distance.squareform(temp) self.D,self.tweets,self.words,self.freq = filter.filtertweets() # Calcula la matriz de coasociacion self.loadEAC(N,low,high) def loadEAC(self,N,low,high=0): """ Genera de vuelta la matriz de coasociacion """ m,n = self.D.shape coasocMatrix = clustering.EAC(self.D,N,low,high) print(coasocMatrix) self.EAC_D = np.ones(n) - coasocMatrix def startPAM(self): """ Hace sobre PAM sobre la matriz de distancia del EAC """ (a,b,self.labels) = clustering.PAM(self.EAC_D, self.k,True) return self.labels def startHierarchical(self): """ Hace clustering Jerarquico sobre la matriz de distancia del EAC """ z = AgglomerativeClustering(n_clusters=self.k, linkage='ward').fit(self.EAC_D) self.labels = z.labels_ return self.labels def getClustersTweets(self): """ Obtiene clusters en relacion a la frecuencia de aparicion de las palabras """ labelsTweets = np.zeros(len(self.tweets),dtype=np.int) for i in range(len(self.tweets)): acum = np.zeros(2) for j in range(len(self.labels)): # Si la palabra se encuentra en el tweet if(self.words[j] in self.tweets[i]): #Acumula el valor en el acumulador del indice del cluster acum[self.labels[j]] += self.freq[j] # Asigna el cluster con mayor valor acumulado labelsTweets[i] = np.argmax(acum) lista = labelsTweets.tolist() try: saveFile = open('clustered.csv','w') for i in range(len(self.tweets)): saveFile.write(str(lista[i])+': '+' '.join(self.tweets[i])+'\n') saveFile.close() except Exception as e: print("error: {0}".format(e)) return labelsTweets def getPrecisionIris(self): """ Metodo de prueba Calcula una precision de acierto. No es fiable. """ #Lee los cluster originales originalClusters = np.genfromtxt('orCL.data',delimiter=',',dtype=None) results ={} j=0 for i in range(50,151,50): # Encuentra el cluster con mayor frecuencia unique, counts = np.unique(self.labels[i-50:i], return_count=True) print(unique) print(counts) maxvalue = np.amax(counts) result
s[j]=maxvalue/50 j=j+1 print("Setosa= " + '%.2f' % results[0] + "\nVersicolor= " + '%.2f' % results[1] + "\nVirginica= " + '%.2f' % results[2]) def getSilhouette(self): """ Grafica silhouet
te """ clustering.Silhouette(self.D,self.labels,self.k)
RicterZ/pyprint
pyprint/views/background.py
Python
mit
3,116
0.001284
import tornado.web from datetime import date from sqlalchemy.orm.exc import NoResultFound from pyprint.handler import BaseHandler from pyprint.models import User, Link, Post class SignInHandler(BaseHandler): def get(self): return self.background_render('login.html') def post(self): username = self.get_argument('username', None) password = self.get_argument('password', None) if username and password: try: user = self.orm.query(User).filter(User.username == username).one() except NoResultFound: return self.redirect('/login') if user.check(password): self.set_secure_cookie('username', user.username) self.redirect('/kamisama/posts') return self.redirect('/login') class ManagePostHandler(BaseHandler): @tornado.web.authenticated def get(self):
posts = self.orm.query(Post.title, Post.id).order_by(Post.id.desc()).all() self.background_render('posts.html', posts=posts) @tornado.web.authenticated def post(self): action = self.get_argument('action', None) if action == 'del': post_id = self.get_argument('id', 0)
if post_id: post = self.orm.query(Post).filter(Post.id == post_id).one() self.orm.delete(post) self.orm.commit() class AddPostHandler(BaseHandler): @tornado.web.authenticated def get(self): self.background_render('add_post.html', post=None) @tornado.web.authenticated def post(self): title = self.get_argument('title', None) content = self.get_argument('content', None) tags = self.get_argument('tags', '').strip().split(',') if not title or not content: return self.redirect('/kamisama/posts/add') post = self.orm.query(Post.title).filter(Post.title == title).all() if post: return self.write('<script>alert("Title has already existed");window.history.go(-1);</script>') self.orm.add(Post(title=title, content=content, created_time=date.today())) self.orm.commit() return self.redirect('/kamisama/posts') class AddLinkHandler(BaseHandler): @tornado.web.authenticated def get(self): links = self.orm.query(Link).all() self.background_render('links.html', links=links) @tornado.web.authenticated def post(self): action = self.get_argument('action', None) if action == 'add': name = self.get_argument('name', '') url = self.get_argument('url', '') if not name or not url: return self.redirect('/kamisama/links') self.orm.add(Link(name=name, url=url)) self.orm.commit() return self.redirect('/kamisama/links') elif action == 'del': link_id = self.get_argument('id', 0) if link_id: link = self.orm.query(Link).filter(Link.id == link_id).one() self.orm.delete(link) self.orm.commit()
ArthurStart/arthurstart.github.io
GenerateUpdateLines.py
Python
mit
889
0.013498
import datetime def suffix(d): return 'th' if 11<=d<=13 else {1:'st',2:'nd',3:'rd'}.get(d%10, 'th') def custom_strftime(format, t): return t.strftime(format).replace('{S}', str(t.day) + suffix(t.day)) print "Welcome to GenerateUpdateLines, the nation's favourite automatic update line generator." start = int(raw_input("Enter initial day number: ")) stop = int(raw_input("Enter final day number: ")) t0 = datetime.date(2018, 3, 24) for d in range(start, stop+1): date = t0 + datetime.timedelta(d-1) print "| "+str(d)+" | "+custom_strftime("%a {S} %B", date)+" | | |" # from datetime import datetime as dt # # def suffix(d): # return 'th' if 11<=d<=13
else {1:'st',2:'nd',3:'rd'}.get(d%10, 'th') # # def custom_strftime(format, t): # return t.
strftime(format).replace('{S}', str(t.day) + suffix(t.day)) # # print custom_strftime('%B {S}, %Y', dt.now())
jmwright/cadquery-x
gui/libs/pyqode/core/widgets/tabs.py
Python
lgpl-3.0
16,288
0
# -*- coding: utf-8 -*- """ This module contains the implementation of a tab widget specialised to show code editor tabs. """ import logging import os from pyqode.core.dialogs.unsaved_files import DlgUnsavedFiles from pyqode.core.modes.filewatcher import FileWatcherMode from pyqode.core.widgets.tab_bar import TabBar from pyqode.qt import QtCore, QtWidgets from pyqode.qt.QtWidgets import QTabBar, QTabWidget def _logger(): return logging.getLogger(__name__) class TabWidget(QTabWidget): """ QTabWidget specialised to hold CodeEdit instances (or any other object that has the same interace). It ensures that there is only one open editor tab for a specific file path, it adds a few utility methods to quickly manipulate the current editor widget. It will automatically rename tabs that share the same base filename to include their distinctive parent directory. It handles tab close requests automatically and show a dialog box when a dirty tab widget is being closed. It also adds a convenience QTabBar with a "close", "close others" and "close all" menu. (You can add custom actions by using the addAction and addSeparator methods). It exposes a variety of signal and slots for a better integration with your applications( dirty_changed, save_current, save_all, close_all, close_current, close_others). .. deprecated: starting from version 2.4, this widget is considered as deprecated. You should use :class:`pyqode.core.widgets.SplittableTabWidget` instead. It will be removed in version 2.6. """ #: Signal emitted when a tab dirty flag changed dirty_changed = QtCore.Signal(bool) #: Signal emitted when the last tab has been closed last_tab_closed = QtCore.Signal() #: Signal emitted when a tab has been closed tab_closed = QtCore.Signal(QtWidgets.QWidget) @property def active_editor(self): """ Returns the current editor widget or None if the current tab widget is not a subclass of CodeEdit or if there is no open tab. """ return self._current def __init__(self, parent): QtWidgets.QTabWidget.__init__(self, parent) self._current = None self.currentChanged.connect(self._on_current_changed) self.tabCloseRequested.connect(self._on_tab_close_requested) tab_bar = TabBar(self) tab_bar.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) tab_bar.customContextMenuRequested.connect(self._show_tab_context_menu) self.setTabBar(tab_bar) self.tab_bar = tab_bar self._context_mnu = QtWidgets.QMenu() for name, slot in [('Close', self.close), ('Close others', self.close_others), ('Close all', self.close_all)]: qaction = QtWidgets.QAction(name, self) qaction.triggered.connect(slot) self._context_mnu.addAction(qaction) self.addAction(qaction) # keep a list of widgets (to avoid PyQt bug where # the C++ class loose the wrapped obj type). self._widgets = [] @QtCore.Slot() def close(self): """ Closes the active editor """ self.tabCloseRequested.emit(self.currentIndex()) @QtCore.Slot() def close_others(self): """ Closes every editors tabs except the current one. """ current_widget = self.currentWidget() self._try_close_dirty_tabs(exept=current_widget) i = 0 while self.count() > 1: widget = self.widget(i) if widget != current_widget: self.removeTab(i) else: i = 1 @QtCore.Slot() def close_all(self): """ Closes all editors """ if self._try_close_dirty_tabs(): while self.count(): widget = self.widget(0) self.removeTab(0) self.tab_closed.emit(widget) return True return False def _ensure_unique_name(self, code_edit, name): if name is not None: code_edit._tab_name = name else: code_edit._tab_name = code_edit.file.name file_name = code_edit.file.name if self._name_exists(file_name): file_name = self._rename_duplicate_tabs( code_edit, code_edit.file.name, code_edit.file.path) code_edit._tab_name = file_name @QtCore.Slot() def save_current(self, path=None): """ Save current editor content. Leave file to None to erase the previous file content. If the current editor's file_path is None and path is None, the function will call ``QtWidgets.QFileDialog.getSaveFileName`` to get a valid save filename. :param path: path of the file to save, leave it None to overwrite existing file. """ try: if not path and not self._current.file.path: path, filter = QtWidgets.QFileDialog.getSaveFileName( self, 'Choose destination path') if not path: return False old_path = self._current.file.path code_edit = self._current self._save_editor(code_edit, path) path = code_edit.file.path # path (and icon) may have changed if path and old_path != path: self._ensure_unique_name(code_edit, code_edit.file.name) self.setTabText(self.currentIndex(), code_edit._tab_name) ext = os.path.splitext(path)[1] old_ext = os.path.splitext(old_path)[1] if ext != old_ext or not old_path: icon = QtWidgets.QFileIconProvider().icon( QtCore.QFileInfo(code_edit.file.path)) self.setTabIcon(self.currentIndex(), icon) return True except AttributeError: # not an editor widget pass return False @QtCore.Slot() def save_all(self): """ Save all editors. """ initial_index = self.currentIndex() for i in range(self.count()): try: self.setCurrentIndex(i) self.save_current() e
xcept AttributeError: pass self.setCurrentIndex(initial_index) def addAction(self, action): """ Adds an action to the TabBar context menu :param action: QAction to append """ self._context_mnu.addAction(action) def add_separator(self): """ Adds a separato
r to the TabBar context menu. :returns The separator action. """ return self._context_mnu.addSeparator() def index_from_filename(self, path): """ Checks if the path is already open in an editor tab. :param path: path to check :returns: The tab index if found or -1 """ if path: for i in range(self.count()): widget = self.widget(i) try: if widget.file.path == path: return i except AttributeError: pass # not an editor widget return -1 @staticmethod def _del_code_edit(code_edit): try: code_edit.close() code_edit.delete() except AttributeError: pass del code_edit def add_code_edit(self, code_edit, name=None): """ Adds a code edit tab, sets its text as the editor.file.name and sets it as the active tab. The widget is only added if there is no other editor tab open with the same filename, else the already open tab is set as current. If the widget file path is empty, i.e. this is a new document that has not been saved to disk, you may provided a formatted string such as 'New document %d.txt' for the document name. The int format will be automatically replaced by the number of new documents
HybridF5/jacket
jacket/storage/backup/rpcapi.py
Python
apache-2.0
4,652
0
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the volume backup RPC API. """ from oslo_config import cfg from oslo_log import log as logging from jacket import rpc CONF = cfg.CONF LOG = logging.getLogger(__name__) class BackupAPI(rpc.RPCAPI): """Client side of the volume rpc API. API version history: 1.0 - Initial version. 1.1 - Changed methods to accept backup objects instead of IDs. 1.2 - A version that got in by mistake (without breaking anything). 1.3 - Dummy version bump to mark start of having storage-backup service decoupled from storage-volume. ... Mitaka supports messaging 1.3. Any changes to existing methods in 1.x after this point should be done so that they can handle version cap set to 1.3. 2.0 - Remove 1.x compatibility """ RPC_API_VERSION = '1.3' TOPIC = CONF.backup_topic BINARY = 'storage-backup' def _compat_ver(self, current, legacy): if self.client.ca
n_send_version(current): return current else: return legacy def create_backup(self, ctxt, backup): LOG.debug("create_backup in rpcapi backup_id %s", backup.id) version = self._compat_ver('2.0', '1.1') cctxt = self.client.prepare(server=backup.host, version=version) cctxt.cast(ctxt, 'create_backup', backup=backup) def restore_backup(self, ctxt, volume_host, backup, volume_id):
LOG.debug("restore_backup in rpcapi backup_id %s", backup.id) version = self._compat_ver('2.0', '1.1') cctxt = self.client.prepare(server=volume_host, version=version) cctxt.cast(ctxt, 'restore_backup', backup=backup, volume_id=volume_id) def delete_backup(self, ctxt, backup): LOG.debug("delete_backup rpcapi backup_id %s", backup.id) version = self._compat_ver('2.0', '1.1') cctxt = self.client.prepare(server=backup.host, version=version) cctxt.cast(ctxt, 'delete_backup', backup=backup) def export_record(self, ctxt, backup): LOG.debug("export_record in rpcapi backup_id %(id)s " "on host %(host)s.", {'id': backup.id, 'host': backup.host}) version = self._compat_ver('2.0', '1.1') cctxt = self.client.prepare(server=backup.host, version=version) return cctxt.call(ctxt, 'export_record', backup=backup) def import_record(self, ctxt, host, backup, backup_service, backup_url, backup_hosts): LOG.debug("import_record rpcapi backup id %(id)s " "on host %(host)s for backup_url %(url)s.", {'id': backup.id, 'host': host, 'url': backup_url}) version = self._compat_ver('2.0', '1.1') cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'import_record', backup=backup, backup_service=backup_service, backup_url=backup_url, backup_hosts=backup_hosts) def reset_status(self, ctxt, backup, status): LOG.debug("reset_status in rpcapi backup_id %(id)s " "on host %(host)s.", {'id': backup.id, 'host': backup.host}) version = self._compat_ver('2.0', '1.1') cctxt = self.client.prepare(server=backup.host, version=version) return cctxt.cast(ctxt, 'reset_status', backup=backup, status=status) def check_support_to_force_delete(self, ctxt, host): LOG.debug("Check if backup driver supports force delete " "on host %(host)s.", {'host': host}) version = self._compat_ver('2.0', '1.1') cctxt = self.client.prepare(server=host, version=version) return cctxt.call(ctxt, 'check_support_to_force_delete')
napalm-automation/napalm-yang
napalm_yang/models/openconfig/network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/__init__.py
Python
apache-2.0
14,694
0.001021
# -*- coding: utf-8 -*- from operator import attrgetter from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType from pyangbind.lib.yangtypes import RestrictedClassType from pyangbind.lib.yangtypes import TypedListType from pyangbind.lib.yangtypes import YANGBool from pyangbind.lib.yangtypes import YANGListType from pyangbind.lib.yangtypes import YANGDynClass from pyangbind.lib.yangtypes import ReferenceType from pyangbind.lib.base import PybindBase from collections import OrderedDict from decimal import Decimal from bitarray import bitarray import six # PY3 support of some PY2 keywords (needs improved) if six.PY3: import builtins as __builtin__ long = int elif six.PY2: import __builtin__ from . import p2p_primary_path_ class p2p_primary_path(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/mpls/lsps/constrained-path/tunnels/tunnel/p2p-tunnel-attributes/p2p-primary-path. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Primary paths associated with the LSP """ __slots__ = ("_path_helper", "_extmethods", "__p2p_primary_path") _yang_name = "p2p-primary-path" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__p2p_primary_path = YANGDynClass( base=YANGListType( "name", p2p_primary_path_.p2p_primary_path, yang_name="p2p-primary-path", parent=self, is_container="list", user_ordered=False, path_helper=self._path_helper, yang_keys="name", extensions=None, ), is_container="list", yang_name="p2p-primary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="list", is_config=True, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "mpls", "lsps", "constrained-path", "tunnels", "tunnel", "p2p-tunnel-attributes", "p2p-primary-path", ] def _get_p2p_primary_path(self): """ Getter method for p2p_primary_path, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path (list) YANG Description: List of p2p primary paths for a tunnel """ return self.__p2p_primary_path def _set_p2p_primary_path(self, v, load=False): """ Setter method for p2p_primary_path, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path (list) If this variable is read-only (config: false) in the source YANG file, then _set_p2p_primary_path is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_p2p_primary_path() directly. YANG Description: List of p2p primary paths for a tunnel """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=YANGListType( "name", p2p_primary_path_.p2p_primary_path, yang_name="p2p-primary-path", parent=self, is_container="list", user_ordered=False, path_helper=self._path_helper, yang_keys="name", extensions=None, ), is_container="list", yang_name="p2p-primary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="list", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """p2p_primary_path must be of a type compatible with list""", "defined-type": "list", "generated-type": """YANGDynClass(base=YANGListType("name",p2p_primary_path_.p2p_primary_path, yang_name="p2p-primary-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name="p2p-primary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=True)""", } ) self.__p2p_primary_path = t if hasattr(self, "_set"): self._set() def _unset_p2p_primary_path(self): self.__p2p_primary_path = YANGDynClass( base=YANGListType( "name", p2p_primary_path_.p2p_primary_path, yang_name="p2p-primary-path", parent=self, is_container="list", user_ordered=False, path_helper=self._path_helper, yang_keys="name", extensions=None, ), is_container="list", yang_name="p2p-primary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance", yang_type="list", is_config=True, ) p2p_primary_path = __builtin__.property( _get_p2p_primary_path, _set_p2p_primary_path )
_pyangbind_elements = OrderedDict([("p2p_primary_path", p2p_primary_path)]) from . import p2p_primary_path_ class p2p_primary_path(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/mpls/lsps/constrained-path/tunnels/tunnel/p2p-tunnel-attributes/p2p-primary-path. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Primary path
jalibras/coop
coop/coop/urls.py
Python
apache-2.0
2,258
0.0124
"""coop URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.10/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url, include from django.contrib import admin from django.conf import settings from django.conf.urls.static import static from rest_framework import routers from guide.views import area, ArtificialProblemViewSet,NaturalProblemViewSet,ProblemImageViewSet, AreaViewSet, SectorViewSet from members.views import UserViewSet,MemberViewSet admin.site.site_header='Galway Climbing Co-op admin' admin.site.site_title='Galway Climbing Co-op admin' #admin.site.index_title='Galway Climbing Co-op admin' # django rest framework url routers for viewsets router = routers.DefaultRouter() router.register(r'artificialproblems',ArtificialProblemViewSet) router.register(r'naturalproblems',NaturalProblemViewSet) router.register(r'problemimages',ProblemImageViewSet) router.register(r'users',UserViewSet) router.register(r'members',MemberViewSet) router.register(r'areas',AreaViewSet) router.register(r'sectors',SectorViewSet) from guide.views import area_map urlpatterns = [ url(r'api/', include(router.urls)), url(r'api-aut
h/',include('rest_framework.urls',namespace='rest_framework')), url(r'^admin/', admin.site.urls), url(r'^$',area_map,{'area_id':1}), url(r'^guide/', include('guide.urls',namespace="guide")), url(r'^home/', include('homepage.urls',namespace="homepage")), url(r'^members/auth/', include('members.urls')), # note that the (customised) templates for the auth views are in [BASE_DIR]/templates/registration url(r'^members/
', include('members.urls',namespace="members")), ] + static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
opengeogroep/inasafe
realtime/shake_data.py
Python
gpl-3.0
15,213
0.000394
# coding=utf-8 """ InaSAFE Disaster risk assessment tool developed by AusAid and World Bank - **Functionality related to shake data files.** Contact : [email protected] .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = '[email protected]' __version__ = '0.5.0' __date__ = '30/07/2012' __copyright__ = ('Copyright 2012, Australia Indonesia Facility for ' 'Disaster Reduction') import os import shutil from datetime import datetime from zipfile import ZipFile # The logger is intiailsed in utils.py by init import logging LOGGER = logging.getLogger('InaSAFE') from rt_exceptions import ( EventUndefinedError, EventIdError, NetworkError, EventValidationError, InvalidInputZipError, ExtractionError) from ftp_client import FtpClient from utils import shakemap_zip_dir, shakemap_extract_dir class ShakeData: """A class for retrieving, reading, converting and extracting data from shakefiles. Shake files are provided on an ftp server. There are two files for every event: * an 'inp' file * an 'out' file These files are provided on the ftp serve
r as zip files. For example: * `ftp://118.97.83.243/20110413170148.inp.zip`_ * `ftp://118.97.83.243/20110413170148.out.zip`_ There are numerous files provided within these two zip files, but there is only really one that we are interested in:
* grid.xml - which contains all the metadata pertaining to the event The remaining files are fetched for completeness and possibly use in the future. This class provides a high level interface for retrieving this data and then extracting various by products from it. """ def __init__(self, event=None, host='118.97.83.243'): """Constructor for the ShakeData class :param event: (Optional) a string representing the event id that this raster is associated with. e.g. 20110413170148. **If no event id is supplied, a query will be made to the ftp server, and the latest event id assigned.** :param host: (Optional) a string representing the ip address or host name of the server from which the data should be retrieved. It assumes that the data is in the root directory. Defaults to 118.97.83.243 :returns: None :raises: None """ self.event_id = event self.host = host # private Shake event instance associated with this shake dataset self._shakeEvent = None if self.event_id is None: try: self.get_latest_event_id() except NetworkError: raise else: # If we fetched it above using get_latest_event_id we assume it is # already validated. try: self.validate_event() except EventValidationError: raise # If event_id is still None after all the above, moan.... if self.event_id is None: message = ('No id was passed to the constructor and the ' 'latest id could not be retrieved from the server.') LOGGER.exception('ShakeData initialisation failed') raise EventIdError(message) def get_latest_event_id(self): """Query the ftp server and determine the latest event id. :return: A string containing a valid event id. :raises: NetworkError """ ftp_client = FtpClient() try: ftp_client_list = ftp_client.get_listing() ftp_client_list.sort(key=lambda x: x.lower()) except NetworkError: raise now = datetime.now() now = int( '%04d%02d%02d%02d%02d%02d' % ( now.year, now.month, now.day, now.hour, now.minute, now.second )) event_id = now + 1 while int(event_id) > now: if len(ftp_client_list) < 1: raise EventIdError('Latest Event Id could not be obtained') event_id = ftp_client_list.pop().split('/')[-1].split('.')[0] if event_id is None: raise EventIdError('Latest Event Id could not be obtained') self.event_id = event_id def is_on_server(self): """Check the event associated with this instance exists on the server. :return: True if valid, False if not :raises: NetworkError """ input_file_name, output_file_name = self.file_names() file_list = [input_file_name, output_file_name] ftp_client = FtpClient() return ftp_client.has_files(file_list) def file_names(self): """Return file names for the inp and out files based on the event id. e.g. 20120726022003.inp.zip, 20120726022003.out.zip :return: Tuple Consisting of inp and out local cache paths. :rtype: tuple (str, str) :raises: None """ input_file_name = '%s.inp.zip' % self.event_id output_file_name = '%s.out.zip' % self.event_id return input_file_name, output_file_name def cache_paths(self): """Return the paths to the inp and out files as expected locally. :return: Tuple consisting of inp and out local cache paths. :rtype: tuple (str, str) :raises: None """ input_file_name, output_file_name = self.file_names() input_file_path = os.path.join(shakemap_zip_dir(), input_file_name) output_file_path = os.path.join(shakemap_zip_dir(), output_file_name) return input_file_path, output_file_path def is_cached(self): """Check the event associated with this instance exists in cache. :return: True if locally cached, False if not :raises: None """ input_file_path, output_file_path = self.cache_paths() if os.path.exists(input_file_path) and \ os.path.exists(output_file_path): # TODO: we should actually try to unpack them for deeper validation return True else: LOGGER.debug('%s is not cached' % input_file_path) LOGGER.debug('%s is not cached' % output_file_path) return False def validate_event(self): """Check that the event associated with this instance exists either in the local event cache, or on the remote ftp site. :return: True if valid, False if not :raises: NetworkError """ # First check local cache if self.is_cached(): return True else: return self.is_on_server() #noinspection PyMethodMayBeStatic def _fetch_file(self, event_file, retries=3): """Private helper to fetch a file from the ftp site. e.g. for event 20110413170148 this file would be fetched:: ftp://118.97.83.243/20110413170148.inp.zip and this local file created:: /tmp/realtime/20110413170148.inp.zip .. note:: If a cached copy of the file exits, the path to the cache copy will simply be returned without invoking any network requests. :param event_file: Filename on server e.g.20110413170148.inp.zip :type event_file: str :param retries: Number of reattempts that should be made in in case of network error etc. :type retries: int :return: A string for the dataset path on the local storage system. :rtype: str :raises: EventUndefinedError, NetworkError """ # Return the cache copy if it exists local_path = os.path.join(shakemap_zip_dir(), event_file) if os.path.exists(local_path): return local_path #Otherwise try to fetch it using ftp for counter in range(retries): last_error = None try:
bvacaliuc/pyrasdr
plugins/pyLMS7002M/pyLMS7002M/LMS7002_DCCAL.py
Python
gpl-3.0
27,053
0.007467
#*************************************************************** #* Name: LMS7002_DCCAL.py #* Purpose: Class implementing LMS7002 DCCAL functions #* Author: Lime Microsystems () #* Created: 2017-02-10 #* Copyright: Lime Microsystems (limemicro.com) #* License: #************************************************************** from LMS7002_base import * class LMS7002_DCCAL(LMS7002_base): __slots__ = [] # Used to generate error on typos def __init__(self, chip): self.chip = chip self.channel = None self.prefix = "DCCAL_" # # DCCAL_CFG (0x05C0) # # DCMODE @property def DCMODE(self): """ Get the value of DCMODE """ if self.chip.chipID == self.chip.chipIDMR3: return self._readReg('CFG', 'DCMODE') else: raise ValueError("Bitfield DCMODE is not supported on chip version "+str(self.chip.chipID)) @DCMODE.setter def DCMODE(self, value): """ Set the value of DCMODE """ if self.chip.chipID == self.chip.chipIDMR3: if value not in [0, 1, 'MANUAL', 'AUTO']: raise ValueError("Value must be [0,1,'MANUAL','AUTO']") if value==0 or value=='MANUAL': val = 0 else: val = 1 self._writeReg('CFG', 'DCMODE', val) else: raise ValueError("Bitfield DCMODE is not supported on chip version "+str(self.chip.chipID)) # PD_DCDAC_RXB @property def PD_DCDAC_RXB(self): """ Get the value of PD_DCDAC_RXB """ if self.chip.chipID == self.chip.chipIDMR3: return self._readReg('CFG', 'PD_DCDAC_RXB') else: raise ValueError("Bitfield PD_DCDAC_RXB is not supported on chip version "+str(self.chip.chipID)) @PD_DCDAC_RXB.setter def PD_DCDAC_RXB(self, value): """ Set the value of PD_DCDAC_RXB """ if self.chip.chipID == self.chip.chipIDMR3: if value not in [0, 1]: raise ValueError("Value must be [0,1]") self._writeReg('CFG', 'PD_DCDAC_RXB', value) else: raise ValueError("Bitfield PD_DCDAC_RXB is not supported on chip version "+str(self.chip.chipID)) # PD_DCDAC_RXA @property def PD_DCDAC_RXA(self): """ Get the value of PD_DCDAC_RXA """ if self.chip.chipID == self.chip.chipIDMR3: return self._readReg('CFG', 'PD_DCDAC_RXA') else: raise ValueError("Bitfield PD_DCDAC_RXA is not supported on chip version "+str(self.chip.chipID)) @PD_DCDAC_RXA.setter def PD_DCDAC_RXA(self, value): """ Set the value of PD_DCDAC_RXA """ if self.chip.chipID == self.chip.chipIDMR3: if value not in [0, 1]: raise ValueError("Value must be [0,1]") self._writeReg('CFG', 'PD_DCDAC_RXA', value) else: raise ValueError("Bitfield PD_DCDAC_RXA is not supported on chip version "+str(self.chip.chipID)) # PD_DCDAC_TXB @property def PD_DCDAC_TXB(self): """ Get the value of PD_DCDAC_TXB """ if self.chip.chipID == self.chip.chipIDMR3: return self._readReg('CFG', 'PD_DCDAC_TXB') else: raise ValueError("Bitfield PD_DCDAC_TXB is not supported on chip version "+str(self.chip.chipID)) @PD_DCDAC_TXB.setter def PD_DCDAC_TXB(self, value): """ Set the
value of PD_DCDAC_TXB """ if self.chip.chipID == self.chip.chipIDMR3: if value not in [0, 1]: raise ValueError("Value must be [0,1]") self._writeReg('CFG', 'PD_DCDAC_TXB', value) else: raise ValueError("Bitfield PD_DCDAC_TXB is not supported on chip version "+str(self.chip.chipID)) # PD_DCDAC_TXA @property def PD_DCDAC_TXA(self): """ Get the value o
f PD_DCDAC_TXA """ if self.chip.chipID == self.chip.chipIDMR3: return self._readReg('CFG', 'PD_DCDAC_TXA') else: raise ValueError("Bitfield PD_DCDAC_TXA is not supported on chip version "+str(self.chip.chipID)) @PD_DCDAC_TXA.setter def PD_DCDAC_TXA(self, value): """ Set the value of PD_DCDAC_TXA """ if self.chip.chipID == self.chip.chipIDMR3: if value not in [0, 1]: raise ValueError("Value must be [0,1]") self._writeReg('CFG', 'PD_DCDAC_TXA', value) else: raise ValueError("Bitfield PD_DCDAC_TXA is not supported on chip version "+str(self.chip.chipID)) # PD_DCCMP_RXB @property def PD_DCCMP_RXB(self): """ Get the value of PD_DCCMP_RXB """ if self.chip.chipID == self.chip.chipIDMR3: return self._readReg('CFG', 'PD_DCCMP_RXB') else: raise ValueError("Bitfield PD_DCCMP_RXB is not supported on chip version "+str(self.chip.chipID)) @PD_DCCMP_RXB.setter def PD_DCCMP_RXB(self, value): """ Set the value of PD_DCCMP_RXB """ if self.chip.chipID == self.chip.chipIDMR3: if value not in [0, 1]: raise ValueError("Value must be [0,1]") self._writeReg('CFG', 'PD_DCCMP_RXB', value) else: raise ValueError("Bitfield PD_DCCMP_RXB is not supported on chip version "+str(self.chip.chipID)) # PD_DCCMP_RXA @property def PD_DCCMP_RXA(self): """ Get the value of PD_DCCMP_RXA """ if self.chip.chipID == self.chip.chipIDMR3: return self._readReg('CFG', 'PD_DCCMP_RXA') else: raise ValueError("Bitfield PD_DCCMP_RXA is not supported on chip version "+str(self.chip.chipID)) @PD_DCCMP_RXA.setter def PD_DCCMP_RXA(self, value): """ Set the value of PD_DCCMP_RXA """ if self.chip.chipID == self.chip.chipIDMR3: if value not in [0, 1]: raise ValueError("Value must be [0,1]") self._writeReg('CFG', 'PD_DCCMP_RXA', value) else: raise ValueError("Bitfield PD_DCCMP_RXA is not supported on chip version "+str(self.chip.chipID)) # PD_DCCMP_TXB @property def PD_DCCMP_TXB(self): """ Get the value of PD_DCCMP_TXB """ if self.chip.chipID == self.chip.chipIDMR3: return self._readReg('CFG', 'PD_DCCMP_TXB') else: raise ValueError("Bitfield PD_DCCMP_TXB is not supported on chip version "+str(self.chip.chipID)) @PD_DCCMP_TXB.setter def PD_DCCMP_TXB(self, value): """ Set the value of PD_DCCMP_TXB """ if self.chip.chipID == self.chip.chipIDMR3: if value not in [0, 1]: raise ValueError("Value must be [0,1]") self._writeReg('CFG', 'PD_DCCMP_TXB', value) else: raise ValueError("Bitfield PD_DCCMP_TXB is not supported on chip version "+str(self.chip.chipID)) # PD_DCCMP_TXA @property def PD_DCCMP_TXA(self): """ Get the value of PD_DCCMP_TXA """ if self.chip.chipID == self.chip.chipIDMR3: return self._readReg('CFG', 'PD_DCCMP_TXA') else: raise ValueError("Bitfield PD_DCCMP_TXA is not supported on chip version "+str(self.chip.chipID)) @PD_DCCMP_TXA.setter def PD_DCCMP_TXA(self, value): """ Set the value of PD_DCCMP_TXA """ if self.chip.chipID == self.chip.chipIDMR3: if value not in [0, 1]: raise ValueError("Value must be [0,1]") self._writeReg('CFG', 'PD_DCCMP_TXA', value) else: raise ValueError("Bitfield PD_DCCMP_TXA is not supported on chip version "+str(self.chip.chipID)) # # DCCAL_STAT (0x05C1) # # DCCAL_CALSTATUS<7:0> @property def DCCAL_CALSTATUS(self): """ Get the value of DCCAL_CALSTATUS<7:0> """ if self.chip.chipID == self.chip.chip
zegami/omero-idr-fetch
fetch_omero_data.py
Python
mit
3,129
0.003196
""" Grab screen data from OMERO based on Screen ID """ import csv import multiprocessing import progressbar import signal import sys import time import requests import json from argparse import ArgumentParser import omeroidr.connect as connect from omeroidr.data import Data parser = ArgumentParser(prog='OMERO screen data downloader') parser.add_argument('-i', '--id', help='Id of the screen') parser.add_argument('-o', '--output', required=False, default='omero.tab', help='Path to the tab separated output file') parser.add_argument('-s', '--server', required=False, default='http://idr-demo.openmicroscopy.org', help='Base url for OMERO server') parser.add_argument('-u', '--user', required=False, help='OMERO Username') parser.add_argument('-w', '--password', required=False, help='OMERO Password') pargs = parser.parse_args() # list of well metadata wells_data = [] # initialize the progress bar widgets = [progressbar.Percentage(), " ", progressbar.Bar(), " ", progressbar.ETA()] pbar = progressbar.ProgressBar(widgets=widgets) def init_worker(): """ Initialise multiprocess
ing pool """ signal.signal(signal.SIGI
NT, signal.SIG_IGN) def well_details_callback(well): """ Callback from apply_async. Used to update progress bar :param well: Well metadata object """ pbar.update(pbar.previous_value + 1) # append well the wells data list wells_data.append(well) def main(): # login session = connect.connect_to_omero(pargs.server, pargs.user, pargs.password) # init data omero_data = Data(session, pargs.server) # get wells for screen print('loading plates...') wells = omero_data.get_wells(pargs.id) print('Retrieving annotations...') # get all annotations # using a pool of processes p = multiprocessing.Pool(multiprocessing.cpu_count(), init_worker) pbar.max_value = len(wells) pbar.start() for well in wells: p.apply_async(omero_data.get_well_details, args=(well,), callback=well_details_callback) try: # wait 10 seconds, this allows for the capture of the KeyboardInterrupt exception time.sleep(10) except KeyboardInterrupt: p.terminate() p.join() disconnect(session, pargs.server) print('exiting...') sys.exit(0) finally: p.close() p.join() pbar.finish() # sort results by id wells_sorted = sorted(wells_data, key=lambda k: k['id']) print('Writing flat file...') # build a dict of all keys which will form the header row of the flat file # this is necessary as the metadata key-value pairs might not be uniform across the dataet columns = set() for well in wells_sorted: columns |= set(well.keys()) # write to a tab delimited file with open(pargs.output, 'w') as output: w = csv.DictWriter(output, columns, delimiter='\t', lineterminator='\n') w.writeheader() w.writerows(wells_sorted) output.close() connect.disconnect(session, pargs.server) print('Metadata fetch complete') if __name__ == '__main__': main()
hecchi777/S3-SlaacSecuritySolution
impacket-0.9.11/impacket/dcerpc/v5/lsad.py
Python
apache-2.0
57,687
0.0121
# Copyright (c) 2003-2014 CORE Security Technologies # # This software is provided under under a slightly modified version # of the Apache Software License. See the accompanying LICENSE file # for more information. # # $Id: lsad.py 1106 2014-01-19 14:17:01Z [email protected] $ # # Author: Alberto Solino # # Description: # [MS-LSAD] Interface implementation # # Best way to learn how to use these calls is to grab the protocol standard # so you understand what the call does, and then read the test case located # at https://code.google.com/p/impacket/source/browse/#svn%2Ftrunk%2Fimpacket%2Ftestcases%2FSMB-RPC # # Some calls have helper functions, which makes it even easier to use. # They are located at the end of this file. # Helper functions start with "h"<name of the call>. # There are test cases for them too. # from impacket.dcerpc.v5 import ndr from impacket.dcerpc.v5.ndr import NDRCALL, NDR, NDRSTRUCT, NDRENUM, NDRUNION, NDRPOINTER, NDRUniConformantArray, NDRUniConformantVaryingArray from impacket.dcerpc.v5.dtypes import * from impacket import nt_errors from impacket.uuid import uuidtup_to_bin from impacket.dcerpc.v5.enum import Enum MSRPC_UUID_LSAD = uuidtup_to_bin(('12345778-1234-ABCD-EF00-0123456789AB','0.0')) class DCERPCSessionError(Exception): def __init__( self, packet = None, error_code = None): Exception.__init__(self) self.packet = packet if packet is not None: self.error_code = packet['ErrorCode'] else: self.error_code = error_code def get_error_code( self ): return self.error_code def get_packet( self ): return self.packet def __str__( self ): key = self.error_code if (nt_errors.ERROR_MESSAGES.has_key(key)): error_msg_short = nt_errors.ERROR_MESSAGES[key][0] error_msg_verbose = nt_errors.ERROR_MESSAGES[key][1] return 'LSAD SessionError: code: 0x%x - %s - %s' % (self.error_code, error_msg_short, error_msg_verbose) else: return 'LSAD SessionError: unknown error code: 0x%x' % (self.error_code) ################################################################################ # CONSTANTS ################################################################################ # 2.2.1.1.2 ACCESS_MASK for Policy Objects POLICY_VIEW_LOCAL_INFORMATION = 0x00000001 POLICY_VIEW_AUDIT_INFORMATION = 0x00000002 POLICY_GET_PRIVATE_INFORMATION = 0x00000004 POLICY_TRUST_ADMIN = 0x00000008 POLICY_CREATE_ACCOUNT = 0x00000010 POLICY_CREATE_SECRET = 0x00000020 POLICY_CREATE_PRIVILEGE = 0x00000040 POLICY_SET_DEFAULT_QUOTA_LIMITS = 0x00000080 POLICY_SET_AUDIT_REQUIREMENTS = 0x00000100 POLICY_AUDIT_LOG_ADMIN = 0x00000200 POLICY_SERVER_ADMIN = 0x00000400 POLICY_LOOKUP_NAMES = 0x00000800 POLICY_NOTIFICATION = 0x00001000 # 2.2.1.1.3 ACCESS_MASK for Account Objects ACCOUNT_VIEW = 0x00000001 ACCOUNT_ADJUST_PRIVILEGES = 0x00000002 ACCOUNT_ADJUST_QUOTAS = 0x00000004 ACCOUNT_ADJUST_SYSTEM_ACCESS = 0x00000008 # 2.2.1.1.4 ACCESS_MASK for Secret Objects SECRET_SET_VALUE = 0x00000001 SECRET_QUERY_VALUE = 0x00000002 # 2.2.1.1.5 ACCESS_MASK for Trusted Domain Objects TRUSTED_QUERY_DOMAIN_NAME = 0x00000001 TRUSTED_QUERY_CONTROLLERS = 0x00000002 TRUSTED_SET_CONTROLLERS = 0x00000004 TRUSTED_QUERY_POSIX
= 0x00000008 TRUSTED_SET_POSIX = 0x00000010 TRUSTED_SET_AUTH = 0x00000020 TRUSTED_QUERY_AUTH = 0x00000040 # 2.2.1.2 POLICY_SYSTEM_ACCESS_MODE POLICY_MODE_INTERACTIVE = 0x00000001 POLICY_MODE_NETWORK = 0x00000002 POLICY_MODE_BATCH = 0x00000004 POLICY_MOD
E_SERVICE = 0x00000010 POLICY_MODE_DENY_INTERACTIVE = 0x00000040 POLICY_MODE_DENY_NETWORK = 0x00000080 POLICY_MODE_DENY_BATCH = 0x00000100 POLICY_MODE_DENY_SERVICE = 0x00000200 POLICY_MODE_REMOTE_INTERACTIVE = 0x00000400 POLICY_MODE_DENY_REMOTE_INTERACTIVE = 0x00000800 POLICY_MODE_ALL = 0x00000FF7 POLICY_MODE_ALL_NT4 = 0x00000037 # 2.2.4.4 LSAPR_POLICY_AUDIT_EVENTS_INFO # EventAuditingOptions POLICY_AUDIT_EVENT_UNCHANGED = 0x00000000 POLICY_AUDIT_EVENT_NONE = 0x00000004 POLICY_AUDIT_EVENT_SUCCESS = 0x00000001 POLICY_AUDIT_EVENT_FAILURE = 0x00000002 # 2.2.4.19 POLICY_DOMAIN_KERBEROS_TICKET_INFO # AuthenticationOptions POLICY_KERBEROS_VALIDATE_CLIENT = 0x00000080 # 2.2.7.21 LSA_FOREST_TRUST_RECORD # Flags LSA_TLN_DISABLED_NEW = 0x00000001 LSA_TLN_DISABLED_ADMIN = 0x00000002 LSA_TLN_DISABLED_CONFLICT = 0x00000004 LSA_SID_DISABLED_ADMIN = 0x00000001 LSA_SID_DISABLED_CONFLICT = 0x00000002 LSA_NB_DISABLED_ADMIN = 0x00000004 LSA_NB_DISABLED_CONFLICT = 0x00000008 LSA_FTRECORD_DISABLED_REASONS = 0x0000FFFF ################################################################################ # STRUCTURES ################################################################################ # 2.2.2.1 LSAPR_HANDLE class LSAPR_HANDLE(NDR): align = 1 structure = ( ('Data','20s=""'), ) # 2.2.2.3 LSA_UNICODE_STRING LSA_UNICODE_STRING = RPC_UNICODE_STRING # 2.2.3.1 STRING class STRING(NDRSTRUCT): commonHdr = ( ('MaximumLength','<H=len(Data)-12'), ('Length','<H=len(Data)-12'), ('ReferentID','<L=0xff'), ) commonHdr64 = ( ('MaximumLength','<H=len(Data)-24'), ('Length','<H=len(Data)-24'), ('ReferentID','<Q=0xff'), ) referent = ( ('Data',STR), ) def dump(self, msg = None, indent = 0): if msg is None: msg = self.__class__.__name__ ind = ' '*indent if msg != '': print "%s" % (msg), # Here just print the data print " %r" % (self['Data']), def __setitem__(self, key, value): if key == 'Data': self.fields['MaximumLength'] = None self.fields['Length'] = None self.data = None # force recompute return NDR.__setitem__(self, key, value) # 2.2.3.2 LSAPR_ACL class LSAPR_ACL(NDRSTRUCT): structure = ( ('AclRevision', UCHAR), ('Sbz1', UCHAR), ('AclSize', USHORT), ('Dummy1',NDRUniConformantArray), ) # 2.2.3.4 LSAPR_SECURITY_DESCRIPTOR LSAPR_SECURITY_DESCRIPTOR = SECURITY_DESCRIPTOR class PLSAPR_SECURITY_DESCRIPTOR(NDRPOINTER): referent = ( ('Data', LSAPR_SECURITY_DESCRIPTOR), ) # 2.2.3.5 SECURITY_IMPERSONATION_LEVEL class SECURITY_IMPERSONATION_LEVEL(NDRENUM): class enumItems(Enum): SecurityAnonymous = 0 SecurityIdentification = 1 SecurityImpersonation = 2 SecurityDelegation = 3 # 2.2.3.6 SECURITY_CONTEXT_TRACKING_MODE SECURITY_CONTEXT_TRACKING_MODE = UCHAR # 2.2.3.7 SECURITY_QUALITY_OF_SERVICE class SECURITY_QUALITY_OF_SERVICE(NDRSTRUCT): structure = ( ('Length', DWORD), ('ImpersonationLevel', SECURITY_IMPERSONATION_LEVEL), ('ContextTrackingMode', SECURITY_CONTEXT_TRACKING_MODE), ('EffectiveOnly', UCHAR), ) class PSECURITY_QUALITY_OF_SERVICE(NDRPOINTER): referent = ( ('Data', SECURITY_QUALITY_OF_SERVICE), ) # 2.2.2.4 LSAPR_OBJECT_ATTRIBUTES class LSAPR_OBJECT_ATTRIBUTES(NDRSTRUCT): structure = ( ('Length', DWORD), ('RootDirectory', LPWSTR), ('ObjectName', LPWSTR), ('Attributes', DWORD), ('SecurityDescriptor', PLSAPR_SECURITY_DESCRIPTOR), ('SecurityQualityOfService', PSECURITY_QUALITY_OF_SERVICE), ) # 2.2.2.5 LSAPR_SR_SECURITY_DESCRIPTOR class LSAPR_SR_SECURITY_DESCRIPTOR(NDRSTRUCT): structure = ( ('Length', DWORD), ('SecurityDescriptor', LPBYTE), ) class PLSAPR_SR_SECURITY_DESCRIPTOR(NDRPOINTER): referent = ( ('Data', LSAPR_SR_SECURITY_DESCRIPTOR), ) # 2.2.3.3 SECURITY_DESCRIPTOR_CONTROL SECURITY_DESCRIPTOR_CONTROL = ULONG # 2.2.4.1 POLICY_INFORMATION_CLASS class POLICY_INFORMATION_CLASS(NDRENUM): class enumItems(Enum):
tedunderwood/GenreProject
python/extract/GenerateExtractPBS.py
Python
mit
630
0.009524
for i in range (0, 53): filepath = '/Users/tunder/Dropbox/PythonScripts/requests/pbs/fic' + str(i) + '.pbs' with open(filepath, mode='w', encoding = 'utf-8') as file: file.write('#!/bin/bash\n') file.write('#PBS -l walltime=10:00:
00\n') file.write('#PBS -l nodes=1:ppn=12\n') file.write('#PBS -N Fiction' + str(i) + '\n') file.write('#PBS -q ichass\n') file.write('#PBS -m be\n') file.write('cd $PBS_O_WORKDIR\n') file.write('python3 extract.py -idfile
/projects/ichass/usesofscale/hathimeta/pre20cslices/slice' + str(i) + '.txt -g fic -v -sub -rh' + '\n')
ctuning/ck-env
module/artifact/module.py
Python
bsd-3-clause
12,275
0.037882
# # Collective Knowledge (artifact description (reproducibility, ACM meta, etc)) # # See CK LICENSE.txt for licensing details # See CK COPYRIGHT.txt for copyright details # # Developer: Grigori Fursin, [email protected], http://fursin.net # cfg={} # Will be updated by CK (meta description of this module) work={} # Will be updated by CK (temporal data) ck=None # Will be updated by CK (initialized CK kernel) # Local settings ############################################################################## # Initialize module def init(i): """ Input: {} Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """ return {'return':0} ############################################################################## def recursive_repos(i): import os repo=i['repo'] repo_deps=i.get('repo_deps',[]) level=i.get('level','') ilevel=i.get('ilevel',0) if ilevel>8: # Somewhere got into loop - quit # ck.out('Warning: you have a cyclic dependency in your repositories ...') return {'return':0, 'repo_deps':repo_deps} # Load repo r=ck.access({'action':'load', 'module_uoa':cfg['module_deps']['repo'], 'data_uoa':repo}) if r['return']>0: return r d=r['dict'] # Note that sometimes we update .ckr.json while CK keeps old deps cached p=d.get('path','') p1=os.path.join(p, ck.cfg['repo_file']) if os.path.isfile(p1): r=ck.load_json_file({'json_file':p1}) if r['return']==0: d=r['dict'].get('dict',{}) rd=d.get('repo_deps',{}) # print (level+repo) for q in rd: drepo=q['repo_uoa'] if drepo!=repo: repo_deps.append(drepo) r=recursive_repos({'repo':drepo, 'repo_deps':repo_deps, 'level':level+' ', 'ilevel':ilevel+1}) if r['return']>0: return r return {'return':0, 'repo_deps':repo_deps} ############################################################################## # prepare artifact snapshot def snapshot(i): """ Input: { repo - which repo to snapshot with all deps (file_name) - customize name ("ck-artifacts-" by default) (no_deps) - if 'yes', do not process repo dependencies (useful for results repo accompanying main repos) (copy_repos) - if 'yes', copy repositories instead of zipping (date) - use this date (YYYYMMDD) instead of current one } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """ import os import platform import zipfile import shutil o=i.get('out','') repo=i.get('repo','') if repo=='': return {'return':1, 'error': '"repo" to snapshot is not defined'} no_deps=i.get('no_deps','')=='yes' copy_repos=i.get('copy_repos','')=='yes' force_clean=i.get('force_clean','')=='yes' # Preparing tmp directory where to zip repos and add scripts ... curdir0=os.getcwd() # ptmp=os.path.join(curdir0, 'tmp') import tempfile ptmp=os.path.join(tempfile.gettempdir(),'tmp-snapshot') if o=='con': ck.out('Temp directory: '+ptmp) ck.out('') if os.path.isdir(ptmp) and force_clean: shutil.rmtree(ptmp, onerror=ck.rm_read_only) if os.path.isdir(ptmp): r=ck.inp({'text':'Directory "'+ptmp+'" exists. Delete (Y/n)?'}) if r['return']>0: return r ck.out('') x=r['string'].strip().lower() if x=='' or x=='y' or x=='yes': r=ck.delete_directory({'path':ptmp}) if r['return']>0: return r if not os.path.isdir(ptmp): os.makedirs(ptmp) os.chdir(ptmp) curdir=os.getcwd() # Checking repo deps final_repo_deps=[] if not no_deps: if o=='con': ck.out('Checking dependencies on other repos ...') r=recursive_repos({'repo':repo}) if r['return']>0: return r # Removing redundant for q in reversed(r['repo_deps']): if q not in final_repo_deps: final_repo_deps.append(q) if repo not in final_repo_deps: final_repo_deps.append(repo) if o=='con': ck.out('') for q in final_repo_deps: ck.out(' * '+q) ck.out('') ck.out('Collecting revisions, can take some time ...') ck.out('') r=ck.reload_repo_cache({}) # Ignore errors pp=[] pp2={} il=0 path_to_main_repo='' for xrepo in final_repo_deps: # Reload repo to get UID r=ck.access({'action':'load', 'module_uoa':cfg['module_deps']['repo'], 'data_uoa':xrepo}) if r['return']>0: return r ruid=r['data_uid'] if ruid not in ck.cache_repo_info: return {'return':1, 'error':'"'+q+'" repo is not in cache - strange!'} # Get repo info qq=ck.cache_repo_info[ruid] d=qq['dict'] p=d.get('path','') if xrepo==repo: path_to_main_repo=p t=d.get('shared','') duoa=qq['data_uoa'] if t!='': if len(duoa)>il: il=len(duoa) url=d.get('url','') branch='' checkout='' if os.path.isdir(p): # Detect status pc=os.getcwd() os.chdir(p) # Get current branch r=ck.run_and_get_stdout({'cmd':['git','rev-parse','--abbrev-ref','HEAD']}) if r['return']==0 and r['return_code']==0: branch=r['stdout'].strip() # Get current ch
eckout r=ck.run_and_get_stdout({'cmd':['git','rev-parse','--short','HEAD']}) if r['
return']==0 and r['return_code']==0: checkout=r['stdout'].strip() os.chdir(pc) x={'branch':branch, 'checkout':checkout, 'path':p, 'type':t, 'url':url, 'data_uoa':duoa} else: x={'path':p, 'type':t, 'data_uoa':duoa} pp.append(x) pp2[duoa]=x if copy_repos: pu=os.path.join(ptmp,'CK') if not os.path.isdir(pu): os.mkdir(pu) pu1=os.path.join(pu,xrepo) if o=='con': ck.out(' * Copying repo '+xrepo+' ...') shutil.copytree(p,pu1,ignore=shutil.ignore_patterns('*.pyc', 'tmp', 'tmp*', '__pycache__')) # Copying Readme if exists fr='README.md' pr1=os.path.join(path_to_main_repo, fr) if os.path.isfile(pr1): pr2=os.path.join(ptmp, fr) if os.path.isfile(pr2): os.remove(pr2) shutil.copy(pr1,pr2) # Print if o=='con': ck.out('') for q in pp: name=q['data_uoa'] x=' * '+name+' '*(il-len(name)) branch=q.get('branch','') checkout=q.get('checkout','') url=q.get('url','') if branch!='' or checkout!='' or url!='': x+=' ( '+branch+' ; '+checkout+' ; '+url+' )' ck.out(x) os.chdir(curdir) # Archiving if o=='con': ck.out('') ck.out('Archiving ...') # Add some dirs and files to ignore for q in ['__pycache__', 'tmp', 'module.pyc', 'customize.pyc']: if q not in ck.cfg['ignore_directories_when_archive_repo']: ck.cfg['ignore_directories_when_archive_repo'].append(q) # Get current date in YYYYMMDD date=i.get('date','') if date=='': r=ck.get_current_date_time({}) if r['return']>0: return r a=r['array'] a1=str(a['date_year']) a2=str(a['date_month']) a2='0'*(2-len(a2))+a2 a3=str(a['date_day']) a3='0'*(2-len(a3))+a3 date=a1+a2+a3 date=date.strip() if not copy_repos: zips=[] for repo in final_repo_deps: if o=='con': ck.out('') ck.out(' * '+repo) ck.out('') an
rwl/PyCIM
CIM15/IEC61970/Informative/InfERPSupport/ErpInventory.py
Python
mit
2,914
0.001373
# Copyright (C) 2010-2011 Richard Lincoln # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCL
UDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A P
ARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from CIM15.IEC61970.Core.IdentifiedObject import IdentifiedObject class ErpInventory(IdentifiedObject): """Utility inventory-related information about an item or part (and not for description of the item and its attributes). It is used by ERP applications to enable the synchronization of Inventory data that exists on separate Item Master databases. This data is not the master data that describes the attributes of the item such as dimensions, weight, or unit of measure - it describes the item as it exists at a specific location.Utility inventory-related information about an item or part (and not for description of the item and its attributes). It is used by ERP applications to enable the synchronization of Inventory data that exists on separate Item Master databases. This data is not the master data that describes the attributes of the item such as dimensions, weight, or unit of measure - it describes the item as it exists at a specific location. """ def __init__(self, Asset=None, status=None, *args, **kw_args): """Initialises a new 'ErpInventory' instance. @param Asset: @param status: """ self._Asset = None self.Asset = Asset self.status = status super(ErpInventory, self).__init__(*args, **kw_args) _attrs = [] _attr_types = {} _defaults = {} _enums = {} _refs = ["Asset", "status"] _many_refs = [] def getAsset(self): return self._Asset def setAsset(self, value): if self._Asset is not None: self._Asset._ErpInventory = None self._Asset = value if self._Asset is not None: self._Asset.ErpInventory = None self._Asset._ErpInventory = self Asset = property(getAsset, setAsset) status = None
oxfordinternetinstitute/scriptingcourse
DSR-Week 2/wk02_twitter_test.py
Python
gpl-3.0
1,313
0.033511
# -*- coding: utf-8 -*-# """ Basic Twitter Authentication requirements: Python 2.5+ tweepy (easy_install tweepy | pip install tweepy) """ __author__ = 'Bernie Hogan' __version__= '1.0' import string import codecs import os import pickle import copy import sys import json import webbrowser import tweepy from tweepy import Cursor import twitterhelpers as th def getFollowerCount(api, screen_name="BarackObama"): user = api.get_user(screen_name) return user.followers_count def getFollowingCount(api, screen_name="BarackObama"): user = api.get_user(screen_name) print user print dir(user) return user.friends_count if __name__=='__main__': CONSUMER_KEY = th.CONSUMER_KEY CONSUMER_SECRET = th.CONSUMER_SECRET auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) ACCESS_TOKEN_SECRET = th.ACCESS_TOKEN_SECRET ACCESS_TOKEN = th.ACCESS_TOKEN auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET) api = tweepy.API(auth) print "Now you have received an access token." print "O
r rather, your account has authorized this application to use the twitter api." print "You have this many hits to the API left this hour: " #
print json.dumps(api.rate_limit_status(), indent = 1) #['remaining_hits'] print getFollowerCount(api, "blurky") print getFollowingCount(api, "blurky")
dossorio/python-blog
reddit-data-extractor.py
Python
mit
891
0.001122
import json from urllib import request import pymongo connection = pymongo.MongoClient('mongodb://localhost') db = connection.reddit stories = db.stories # stories.drop() # req = request.Request('http://www.reddit.com/r/technology/.json') # req.add_header('User-agent', 'Mozilla/5.0') # reddit_page = request.urlopen(req) # # parsed_reddi
t = json.loads(reddit_page.read().decode()) # # print('Ad
ding reddit posts') # for item in parsed_reddit['data']['children']: # stories.insert_one(item['data']) # # print('Finished adding reddit posts') def find(): print('Keyword search started') query = {'title': {'$regex': 'apple|google', '$options': 'i'}} projection = {'title': 1, '_id': 0} try: cursor = stories.find(query, projection) except Exception as e: print('Unexpected error', type(e), e) for post in cursor: print(post) find()
thortex/rpi3-webiopi
webiopi_0.7.1/python/webiopi/devices/analog/mcp492X.py
Python
apache-2.0
1,847
0.005414
# Copyright 2012-2013 Eric Ptak - trouch.com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from webiopi.utils.types import toint from webiopi.devices.spi import SPI from webiopi.devices.analog import DAC class MCP492X(SPI, DAC): def __init__(self, chip, channelCount, vref): SPI.__init__(self, toint(chip), 0, 8, 10000000) DAC.__init__(self, channelCount, 12, float(vref)) self.buffered=False self.gain=False self.shutdown=False self.values = [0 for i in range(channelCount)] def __str__(self): return "MCP492%d(chip=%d)" % (self._analogCount, self.chip) def __analogRead__(self, channel, diff=False): return self.values[channel] def __analogWrite__(self, channel, value): d = bytearray(2) d[0] = 0 d[0] |= (channel
& 0x01) << 7 d[0] |= (self.buffered & 0x01) << 6 d[0] |= (not self.gain & 0x01) << 5 d[0] |= (not self.shutdown & 0x01) << 4 d[0] |= (value >> 8) & 0x0F d[1] = value & 0xFF self.writeBytes(d) self.values[channel] = value class MCP4921(MCP492X):
def __init__(self, chip=0, vref=3.3): MCP492X.__init__(self, chip, 1) class MCP4922(MCP492X): def __init__(self, chip=0, vref=3.3): MCP492X.__init__(self, chip, 2)
rankactive/cassandra-fdw
cassandra-fdw/__init__.py
Python
mit
4,389
0.00319
from multicorn import ForeignDataWrapper from cassandra_provider import CassandraProvider from properties import ISDEBUG import properties import schema_importer import time class CassandraFDW(ForeignDataWrapper): def __init__(self, options, columns): super(CassandraFDW, self).__init__(options, columns) self.init_options = options self.init_columns = columns self.cassandra_provider = None self.concurency_level = int(options.get('modify_concurency', properties.DEFAULT_CONCURENCY_LEVEL)) self.per_transaction_connection = options.get('per_transaction_connection', properties.PER_TRANSACTION_CONNECTION) == 'True' self.modify_items = [] def build_cassandra_provider(self): if self.cassandra_provider == None: self.cassandra_provider = CassandraProvider(self.init_options, self.init_columns) @classmethod def import_schema(self, schema, srv_options, options, restriction_type, restricts): return schema_importer.import_schema(schema, srv_options, options, restriction_type, restricts) def insert(self, new_values): if self.concurency_level > 1: self.modify_items.append(('insert', new_values)) if len(self.modify_items) >= properties.BATCH_MODIFY_THRESHOLD: self.end_modify() return new_values else: return self.cassandra_provider.insert(new_values) def delete(self, rowid): if self.concurency_level > 1: self.modify_items.append(('delete', rowid)) if len(self.modify_items) >= properties.BATCH_MODIFY_THRESHOLD: self.end_modify() return { } else: return self.cassandra_provider.delete(rowid) def update(self, rowid, new_values): if ISDEBUG: logger.log(u"requested update {0}".format(new_values)) self.insert(new_values) return new_values def execute(self, quals, columns, sort_keys=None): self.scan_start_time = time.time() return self.cassandra_provider.execute(quals, columns, sort_keys) def can_sort(self, sort_keys): return [] def begin(self, serializable): self.build_cassandra_provider() if ISDEBUG: logger.log("begin: {0}".format(serializable)) def commit(self): if ISDEBUG: logger.log("commit") if self.per_transaction_connection: self.close_cass_connection() pass def close_cass_connection(self): if self.cassandra_provider != None: self.cassandra_provider.close() self.cassandra_provider = None def end_modify(self): try: mod_len = len(self.modify_items) if mod_len > 0: if ISDEBUG: logger.log("end modify") logger.log("modify concurrency level: {0}".format(self.concurency_level)) self.cassandra_provider.execute_modify_items(self.modify_items, self.concurency_level) finally: self.modify_items = [] pass def explain(self, quals, columns, sortkeys=None, verbose=False): return self.cassandra_provider.build_select_stmt(quals, columns, self.cassandra_provider.allow_filtering, verbose) def end_scan(self): if ISDEBUG: logger.log("end_scan. Total time: {0} ms".format((time.time() - self.scan_start_time) * 1000)) pas
s def pre_commit(self): if ISDEBUG: logger.log("pre commit")
pass def rollback(self): if ISDEBUG: logger.log("rollback") pass def sub_begin(self, level): if ISDEBUG: logger.log("sub begin {0}".format(level)) pass def sub_commit(self, level): if ISDEBUG: logger.log("sub commit {0}".format(level)) pass def sub_rollback(self, level): if ISDEBUG: logger.log("sub rollback {0}".format(level)) pass @property def rowid_column(self): return self.cassandra_provider.get_row_id_column() def get_rel_size(self, quals, columns): return self.cassandra_provider.get_rel_size(quals, columns) def get_path_keys(self): self.scan_start_time = time.time() return self.cassandra_provider.get_path_keys()
lightningvapes/conky-ethereum-ticker-with-graph-chart
cb_balance_grabber.py
Python
gpl-3.0
267
0.011236
#! /usr/bin/env python import requests, json from os.path import exp
anduser from coinbase.wallet.client import Client home = expanduser('~') client = Client('YOUR_API_KEY', 'YOUR_API_SECRET') accounts = cli
ent.get_accounts() print accounts ['data'][0]['balance']
mcdeoliveira/ctrl
test/test_packet.py
Python
apache-2.0
5,810
0.012909
import struct import numpy import io import pickle import pyctrl.packet as packet def testA(): # test A assert packet.pack('A','C') == b'AC' assert packet.pack('A','B') == b'AB' assert packet.pack('A','C') != b'AB' assert packet.unpack_stream(io.BytesIO(b'AC')) == ('A', 'C') assert packet.unpack_stream(io.BytesIO(b'AB')) == ('A', 'B') assert packet.unpack_stream(io.BytesIO(b'AB')) != ('A', 'C') def testC(): # test C assert packet.pack('C','C') == b'CC' assert packet.pack('C','B') == b'CB' assert packet.pack('C','C') != b'CB' assert packet.unpack_stream(io.BytesIO(b'CC')) == ('C', 'C') assert packet.unpack_stream(io.BytesIO(b'CB')) == ('C', 'B') assert packet.unpack_stream(io.BytesIO(b'CB')) != ('C', 'C') def testS(): # test S assert packet.pack('S','abc') == struct.pack('<cI3s', b'S', 3, b'abc') assert packet.pack('S','abcd') != struct.pack('<cI3s', b'S', 3, b'abc') assert packet.unpack_stream( io.BytesIO(struct.pack('<cI3s', b'S', 3, b'abc'))) == ('S', 'abc') assert packet.unpack_stream( io.BytesIO(struct.pack('<cI3s', b'S', 3, b'abc'))) != ('S', 'abcd') def testIFD(): # test I assert packet.pack('I',3) == struct.pack('<ci', b'I', 3) assert packet.pack('I',3) != struct.pack('<ci', b'I', 4) assert packet.unpack_stream( io.BytesIO(struct.pack('<ci', b'I', 3))) == ('I', 3) assert packet.unpack_stream( io.BytesIO(struct.pack('<ci', b'I', 4))) != ('I', 3) # test F assert packet.pack('F',3.3) == struct.pack('<cf', b'F', 3.3) assert packet.pack('F',3.3) != struct.pack('<cf', b'F', 4.3) assert packet.unpack_stream( io.BytesIO(struct.pack('<cf', b'F', numpy.float32(3.3)))) == ('F', numpy.float32(3.3)) assert packet.unpack_stream( io.BytesIO(struct.pack('<cf', b'F', 4.3))) != ('F', 3.3) # test D assert packet.pack('D',3.3) == struct.pack('<cd', b'D', 3.3) assert packet.pack('D',3.3) != struct.pack('<cd', b'D', 4.3) assert packet.unpack_stream( io.BytesIO(struct.pack('<cd', b'D', 3.3))) == ('D', 3.3) assert packet.unpack_stream( io.BytesIO(struct.pack('<cd', b'D', 4.3))) != ('D', 3.3) def testV(): # test VI vector = numpy.array((1,2,3), int) assert packet.pack('V',vector) == struct.pack('<ccIiii', b'V', b'I', 3, 1, 2, 3) (type, rvector) = packet.unpack_stream( io.BytesIO(struct.pack('<ccIiii', b'V', b'I', 3, 1, 2, 3))) assert type == 'V' assert numpy.all(rvector == vector) vector = numpy.array((1,-2,3), int) assert packet.pack('V',vector) == struct.pack('<ccIiii', b'V', b'I', 3, 1, -2, 3) (type, rvector) = packet.unpack_stream( io.BytesIO(struct.pack('<ccIiii', b'V', b'I', 3, 1, -2, 3))) assert type == 'V' assert numpy.all(rvector == vector) # test VF vector = numpy.array((1.3,-2,3), numpy.float32) assert packet.pack('V',vector) == struct.pack('<ccIfff', b'V', b'F', 3, 1.3, -2, 3) (type, rvector) = packet.unpack_stream( io.BytesIO(struct.pack('<ccIfff', b'V', b'F', 3, 1.3, -2, 3))) assert type == 'V' assert numpy.all(rvector == vector) # test VD vector = numpy.array((1.3,-2,3), float) assert pack
et.pack('V',vector) == struct.pack('<ccIddd', b'V', b'D', 3, 1.3, -2,
3) (type, rvector) = packet.unpack_stream( io.BytesIO(struct.pack('<ccIddd', b'V', b'D', 3, 1.3, -2, 3))) assert type == 'V' assert numpy.all(rvector == vector) def testM(): # test MI vector = numpy.array(((1,2,3), (3,4,5)), int) assert packet.pack('M',vector) == struct.pack('<cIccIiiiiii', b'M', 2, b'V', b'I', 6, 1, 2, 3, 3, 4, 5) (type, rvector) = packet.unpack_stream( io.BytesIO(struct.pack('<cIccIiiiiii', b'M', 2, b'V', b'I', 6, 1, 2, 3, 3, 4, 5))) assert type == 'M' assert numpy.all(rvector == vector) vector = numpy.array(((1,-2,3), (3,4,-5)), int) assert packet.pack('M',vector) == struct.pack('<cIccIiiiiii', b'M', 2, b'V', b'I', 6, 1, -2, 3, 3, 4, -5) (type, rvector) = packet.unpack_stream( io.BytesIO(struct.pack('<cIccIiiiiii', b'M', 2, b'V', b'I', 6, 1, -2, 3, 3, 4, -5))) assert type == 'M' assert numpy.all(rvector == vector) # test MF vector = numpy.array(((1.3,-2,3), (0,-1,2.5)), numpy.float32) assert packet.pack('M',vector) == struct.pack('<cIccIffffff', b'M', 2, b'V', b'F', 6, 1.3, -2, 3, 0, -1, 2.5) (type, rvector) = packet.unpack_stream( io.BytesIO(struct.pack('<cIccIffffff', b'M', 2, b'V', b'F', 6, 1.3, -2, 3, 0, -1, 2.5))) assert type == 'M' assert numpy.all(rvector == vector) # test MD vector = numpy.array(((1.3,-2,3), (0,-1,2.5)), numpy.float) assert packet.pack('M',vector) == struct.pack('<cIccIdddddd', b'M', 2, b'V', b'D', 6, 1.3, -2, 3, 0, -1, 2.5) (type, rvector) = packet.unpack_stream( io.BytesIO(struct.pack('<cIccIdddddd', b'M', 2, b'V', b'D', 6, 1.3, -2, 3, 0, -1, 2.5))) assert type == 'M' assert numpy.all(rvector == vector) def testP(): vector = numpy.array(((1.3,-2,3), (0,-1,2.5)), numpy.float) string = packet.pack('P', vector) (type, rvector) = packet.unpack_stream(io.BytesIO(string)) assert type == 'P' assert numpy.all(rvector == vector) def testKR(): args = { 'a': 1, 'b': 2 } string = packet.pack('K', args) (type, rargs) = packet.unpack_stream(io.BytesIO(string)) assert type == 'K' assert (args == rargs) args = ('a', 1, 'b', 2) string = packet.pack('R', args) (type, rargs) = packet.unpack_stream(io.BytesIO(string)) assert type == 'R' assert (args == rargs) if __name__ == "__main__": testA() testC() testS() testIFD() testV() testM() testP() testKR()
rvianello/rdkit
rdkit/sping/TK/pidTK.py
Python
bsd-3-clause
16,537
0.010099
""" A Tkinter based backend for piddle. Perry A. Stoll Created: February 15, 1999 Requires PIL for rotated string support. Known Problems: - Doesn't handle the interactive commands yet. - PIL based canvas inherits lack of underlining strings from piddlePIL You can find the latest version of this file: via http://piddle.sourceforge.net """ # we depend on PIL for rotated strings so watch for changes in PIL import Tkinter, tkFont tk = Tkinter import rdkit.sping.pid __version__ = "0.3" __date__ = "April 8, 1999" __author__ = "Perry Stoll, [email protected] " # fixups by chris lee, [email protected] # $Id$ # - added drawImage scaling support # - shifted baseline y parameter in drawString to work around font metric # shift due to Tkinter's Canvas text_item object # - fixed argument names so that argument keywords agreed with piddle.py (passes discipline.py) # # # ToDo: for TKCanvas # make sure that fontHeight() is returnng appropriate measure. Where is this info? # # $Log: pidTK.py,v $ # Revision 1.1 2002/07/12 18:34:47 glandrum # added # # Revision 1.6 2000/11/03 00:56:57 clee # fixed sizing error in TKCanvas # # Revision 1.5 2000/11/03 00:25:37 clee # removed reference to "BaseTKCanvas" (should just use TKCanvas as default) # # Revision 1.4 2000/10/29 19:35:31 clee # eliminated BaseTKCanvas in favor of straightforward "TKCanvas" name # # Revision 1.3 2000/10/29 01:57:41 clee # - added scrollbar support to both TKCanvas and TKCanvasPIL # - added getTKCanvas() access method to TKCanvasPIL # # Revision 1.2 2000/10/15 00:47:17 clee # commit before continuing after getting pil to work as package # # Revision 1.1.1.1 2000/09/27 03:53:15 clee # Simple Platform Independent Graphics # # Revision 1.6 2000/04/06 01:55:34 pmagwene # - TKCanvas now uses multiple inheritance from Tkinter.Canvas and piddle.Canvas # * for the most part works much like a normal Tkinter.Canvas object # - TKCanvas draws rotated strings using PIL image, other objects using normal Tk calls # - Minor fixes to FontManager and TKCanvas so can specify root window other than Tk() # - Removed Quit/Clear buttons from default canvas # # Revision 1.5 2000/03/12 07:07:42 clee # sync with 1_x # # Revision 1.4 2000/02/26 23:12:42 clee # turn off compression by default on piddlePDF # add doc string to new pil-based piddleTK # # Revision 1.3 2000/02/26 21:23:19 clee # update that makes PIL based TKCanvas the default Canvas for TK. # Updated piddletest.py. Also, added clear() methdo to piddlePIL's # canvas it clears to "white" is this correct behavior? Not well # specified in current documents. # class FontManager: __alt_faces = {"serif": "Times", "sansserif": "Helvetica", "monospaced": "Courier"} def __init__(self, master): self.master = master self.font_cache = {} # the main interface def stringWidth(self, s, font): tkfont = self.piddleToTkFont(font) return tkfont.measure(s) def fontHeight(self, font): tkfont = self.piddleToTkFont(font) return self._tkfontHeight(tkfont) def fontAscent(self, font): tkfont = self.piddleToTkFont(font) return self._tkfontAscent(tkfont) def fontDescent(self, font): tkfont = self.piddleToTkFont(font) return self._tkfontDescent(tkfont) def getTkFontString(self, font): """Return a string suitable to pass as the -font option to to a Tk widget based on the piddle-style FONT""" tkfont = self.piddleToTkFont(font) # XXX: should just return the internal tk font name? # return str(tkfont) return ('-family %(family)s -size %(size)s ' '-weight %(weight)s -slant %(slant)s ' '-underline %(underline)s' % tkfont.config()) def getTkFontName(self, font): """Return a the name associated with the piddle-style FONT""" tkfont = self.piddleToTkFont(font) return str(tkfont) def piddleToTkFont(self, font): """Return a tkFont instance based on the pid-style FONT""" if font is None: return '' #default 12 pt, "Times", non-bold, non-italic size = 12 family = "Times" weight = "normal" slant = "roman" underline = "false" if font.face: # check if the user specified a generic face type # like serif or monospaced. check is case-insenstive. f = font.face.lower() if f in self.__alt_faces: family = self.__alt_faces[f] else: family = font.face size = font.size or 12 if font.bold: weight = "bold" if font.italic: slant = "italic" if font.underline: underline = 'true' # ugh... is there a better way to do this? key = (family, size, weight, slant, underline) # check if we've already seen this font. if key in self.font_cache: # yep, don't bother creating a new one. just fetch it. font = self.font_cache[key] else: # nope, let's create a new tk font. # this way we will return info about the actual font # selected by Tk, which may be different than what we ask # for if it's not availible. font = tkFont.Font(self.master, family=family, size=size, weight=weight, slant=slant, underline=underline) self.font_cache[(family, size, weight, slant, underline)] = font return font def _tkfontAscent(self, tkfont): return tkfont.metrics("ascent") def _tkfontDescent(self, tkfont): return tkfont.metrics("descent") class TKCanvas(tk.Canvas, rdkit.sping.pid.Canvas): __TRANSPARENT = '' # transparent for Tk color def __init__(self, size=(300, 300), name="sping.TK", master=None,
scrollingViewPortSize=None, # a 2-tuple to define the size of the viewport **kw): """This canvas allows you to add a tk.Canvas with a sping API for drawing. To add scrollbars, the simpliest method is to set the 'scrollingViewPortSize' equal to a tuple that describes the width and height of the visible porition
of the canvas on screen. This sets scrollregion=(0,0, size[0], size[1]). Then you can add scrollbars as you would any tk.Canvas. Note, because this is a subclass of tk.Canvas, you can use the normal keywords to specify a tk.Canvas with scrollbars, however, you should then be careful to set the "scrollregion" option to the same size as the 'size' passed to __init__. Tkinter's scrollregion option essentially makes 'size' ignored. """ rdkit.sping.pid.Canvas.__init__(self, size=size, name=size) if scrollingViewPortSize: # turn on ability to scroll kw["scrollregion"] = (0, 0, size[0], size[1]) kw["height"] = scrollingViewPortSize[0] kw["width"] = scrollingViewPortSize[1] else: kw["width"] = size[0] kw["height"] = size[1] apply(tk.Canvas.__init__, (self, master), kw) # use kw to pass other tk.Canvas options self.config(background="white") self.width, self.height = size self._font_manager = FontManager(self) self._configure() self._item_ids = [] self._images = [] def _configure(self): pass def _display(self): self.flush() self.mainloop() def _quit(self): self.quit() # Hmmm...the postscript generated by this causes my Ghostscript to barf... def _to_ps_file(self, filename): self.postscript(file=filename) def isInteractive(self): return 0 def onOver(self, event): pass def onClick(self, event): pass def onKey(self, event): pass def flush(self): tk.Canvas.update(self) def clear(self): map(self.delete, self._item_ids) self._item_ids = [] def _colorToTkColor(self, c): return "#%02X%02X%02X" % (int(c.red * 255), int(c.green * 255), int(c.blue * 255)) def _getTkColor(self, color, defaultColor): if color is None: color = defaultColor if color is rdkit.sping.pid.transparent: color = self.__TRANSPARENT else: color = self._colorToTkColor(color) return color def drawLine(self, x1, y1, x2, y2, color=None, width=None): color = self._getTkColor(
themiszamani/okeanos-LoD
core/fokia/provisioner.py
Python
agpl-3.0
21,373
0.001544
from __future__ import (absolute_import, division, print_function, unicode_literals) import logging import re logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) from kamaki.clients import astakos, cyclades from kamaki.clients import ClientError from kamaki.cli.config import Config as KamakiConfig from fokia.utils import patch_certs from fokia.cluster_error_constants import * from Crypto.PublicKey import RSA from base64 import b64encode storage_templates = ['drdb', 'ext_vlmc'] class Provisioner: """ provisions virtual machines on ~okeanos """ def __init__(self, auth_token, cloud_name=None): if auth_token is None and cloud_name is not None: # Load .kamakirc configuration logger.info("Retrieving .kamakirc configuration") self.config = KamakiConfig() patch_certs(self.config.get('global', 'ca_certs')) cloud_section = self.config._sections['cloud'].get(cloud_name) if not cloud_section: message = "Cloud '%s' was not found in you .kamakirc configuration file. " \ "Currently you have availablie in your configuration these clouds: %s" raise KeyError(message % (cloud_name, self.config._sections['cloud'].keys())) # Get the authentication url and token auth_url, auth_token = cloud_section['url'], cloud_section['token'] else: auth_url = "https://accounts.okeanos.grnet.gr/identity/v2.0" logger.info("Initiating Astakos Client") self.astakos = astakos.AstakosClient(auth_url, auth_token) logger.info("Retrieving cyclades endpoint url") compute_url = self.astakos.get_endpoint_url( cyclades.CycladesComputeClient.service_type) logger.info("Initiating Cyclades client") self.cyclades = cyclades.CycladesComputeClient(compute_url, auth_token) # Create the network client networkURL = self.astakos.get_endpoint_url( cyclades.CycladesNetworkClient.service_type) self.network_client = cyclades.CycladesNetworkClient(networkURL, auth_token) # Constants self.Bytes_to_GB = 1024 * 1024 * 1024 self.Bytes_to_MB = 1024 * 1024 self.master = None self.ips = None self.slaves = None self.vpn = None self.subnet = None self.private_key = None self.image_id = 'c6f5adce-21ad-4ce3-8591-acfe7eb73c02' """ FIND RESOURCES """ def find_flavor(self, **kwargs): """ :param kwargs: should contains the keys that specify the specs :return: first flavor objects that matches the specs criteria """ # Set all the default parameters kwargs.setdefault("vcpus", 1) kwargs.setdefault("ram", 1024) kwargs.setdefault("disk", 40) kwargs.setdefault("SNF:allow_create", True) logger.info("Retrieving flavor") for flavor in self.cyclades.list_flavors(detail=True): if all([kwargs[key] == flavor[key] for key in set(flavor.keys()).intersection(kwargs.keys())]): return flavor return None def find_image(self, **kwargs): """ :param image_name: Name of the image to filter by :param kwargs: :return: first image object that matches the name criteria """ image_name = kwargs['image_name'] logger.info("Retrieving image") for image in self.cyclades.list_images(detail=True): if image_name in image['name']: return image return None def find_project_id(self, **kwargs): """ :param kwargs: name, state, owner and mode to filter project by :return: first project_id that matches the project name """ filter = { 'name': kwargs.get("project_name"), 'state': kwargs.get("project_state"), 'owner': kwargs.get("project_owner"), 'mode': kwargs.get("project_mode"), } logger.info("Retrieving project") return self.astakos.get_projects(**filter)[0] """ CREATE RESOURCES """ def create_lambda_cluster(self, vm_name, wait=True, **kwargs): """ :param vm_name: hostname of the master :param kwargs: contains specifications of the vms. :return: dictionary object with the nodes of the cluster if it was successfully created """ quotas = self.get_quotas()
vcpus = kwargs['slaves'] * kwargs['vcpus_slave'] + kwargs['vcpus_master'] ram = kwargs['slaves'] * kwargs['ram_slave'] + kwargs['ram_master'] disk = kwargs['slaves'] * kwargs['disk_slave'] + kwargs['disk_master'] project_id = self.find_project_id(**kwargs)['id'] cluster_size = kwargs['slaves'] + 1 response = self.check_all_resources(quotas, cluster_size=cluster_size,
vcpus=vcpus, ram=ram, disk=disk, ip_allocation=kwargs['ip_allocation'], network_request=kwargs['network_request'], project_name=kwargs['project_name']) if response: # Check flavors for master and slaves master_flavor = self.find_flavor(vcpus=kwargs['vcpus_master'], ram=kwargs['ram_master'], disk=kwargs['disk_master']) if not master_flavor: msg = 'This flavor does not allow create.' raise ClientError(msg, error_flavor_list) slave_flavor = self.find_flavor(vcpus=kwargs['vcpus_slave'], ram=kwargs['ram_slave'], disk=kwargs['disk_slave']) if not slave_flavor: msg = 'This flavor does not allow create.' raise ClientError(msg, error_flavor_list) # Get ssh keys key = RSA.generate(2048) self.private_key = key.exportKey('PEM') pub_key = key.publickey().exportKey('OpenSSH') + ' root' public = dict(contents=b64encode(pub_key), path='/root/.ssh/id_rsa.pub', owner='root', group='root', mode=0600) authorized = dict(contents=b64encode(pub_key), path='/root/.ssh/authorized_keys', owner='root', group='root', mode=0600) private = dict(contents=b64encode(self.private_key), path='/root/.ssh/id_rsa', owner='root', group='root', mode=0600) master_personality = [authorized, public, private] slave_personality = [authorized] # Create private network for cluster self.vpn = self.create_vpn('lambda-vpn', project_id=project_id) vpn_id = self.vpn['id'] self.create_private_subnet(vpn_id) master_ip = None slave_ips = [None] * kwargs['slaves'] # reserve ip if kwargs['ip_allocation'] in ["master", "all"]: master_ip = self.reserve_ip(project_id=project_id) if kwargs['ip_allocation'] == "all": slave_ips = [self.reserve_ip(project_id=project_id) for i in range(kwargs['slaves'])] self.ips = [ip for ip in [master_ip] + slave_ips if ip] self.master = self.create_vm(vm_name=vm_name, ip=master_ip, net_id=vpn_id, flavor=master_flavor, personality=master_personality, **kwargs) # Create slaves self.slaves = list()
redcurrant/redcurrant
svc-monitor/contrib/rainx-monitor.py
Python
lgpl-3.0
1,285
0.010117
#!/usr/bin/python import sys import urllib2 RAINX_STAT_KEYS = [ ("rainx.reqpersec", "total_reqpersec"), ("rainx.reqputpersec", "put_reqpersec"), ("rainx.reqgetpersec", "get_reqpersec"), ("rainx.avreqtime", "total_avreqtime"), ("rainx.avputreqtime", "put_avreqtime"), ("rainx.avgetreqtime", "get_avreqtime"), ] def parse_info(stream): data = {} for line in stream.readlines(): parts = line.split() if len(parts) > 1: # try to cast value to int or float try: value = int(parts[1]) except ValueError: try: value = float(parts[1]) except ValueError: value = parts[1] data[parts[0]] = value else: data[parts[0]] = None return
data def get_stat_lines(url
, stat_keys): stream = urllib2.urlopen(url) data = parse_info(stream) stream.close() stats = [("stat.%s = %s" % (k[1], str(data[k[0]]))) for k in stat_keys if k[0] in data] return stats def main(args): ip_port = args[1].split("|")[2] stats_url = "http://%s/stat" % ip_port for stat in get_stat_lines(stats_url, RAINX_STAT_KEYS): print stat if __name__ == "__main__": main(sys.argv)
EBI-Metagenomics/emgapi
emgapi/migrations/0001_initial.py
Python
apache-2.0
19,658
0.006511
# -*- coding: utf-8 -*- # Generated by Django 1.11.1 on 2017-06-28 20:51 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='PipelineRelease', fields=[ ('pipeline_id', models.AutoField(db_column='PIPELINE_ID', primary_key=True, serialize=False)), ('description', models.TextField(blank=True, db_column='DESCRIPTION', null=True)), ('changes', models.TextField(db_column='CHANGES')), ('release_version', models.CharField(db_column='RELEASE_VERSION', max_length=20)), ('release_date', models.DateField(db_column='RELEASE_DATE')), ], options={ 'db_table': 'PIPELINE_RELEASE', }, ), migrations.CreateModel( name='PipelineTool', fields=[ ('tool_id', models.SmallIntegerField(db_column='TOOL_ID', primary_key=True, serialize=False)), ('tool_name', models.CharField(db_column='TOOL_NAME', max_length=30)), ('description', models.TextField(db_column='DESCRIPTION')), ('web_link', models.CharField(blank=True, db_column='WEB_LINK', max_length=500, null=True)), ('version', models.CharField(db_column='VERSION', max_length=30)), ('exe_command', models.CharField(db_column='EXE_COMMAND', max_length=500)), ('installation_dir', models.CharField(blank=True, db_column='INSTALLATION_DIR', max_length=200, null=True)), ('configuration_file', models.TextField(blank=True, db_column='CONFIGURATION_FILE', null=True)), ('notes', models.TextField(blank=True, db_column='NOTES', null=True)), ], options={ 'db_table': 'PIPELINE_TOOL', }, ), migrations.CreateModel( name='PipelineReleaseTool', fields=[ ('pipeline', models.ForeignKey(db_column='PIPELINE_ID', on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='emgapi.PipelineRelease')), ('tool', models.ForeignKey(db_column='TOOL_ID', on_delete=django.db.models.deletion.DO_NOTHING, to='emgapi.PipelineTool')), ('tool_group_id', models.DecimalField(db_column='TOOL_GROUP_ID', decimal_places=3, max_digits=6)), ('how_tool_used_desc', models.TextField(db_column='HOW_TOOL_USED_DESC')), ], options={ 'db_table': 'PIPELINE_RELEASE_TOOL', }, ), migrations.CreateModel( name='AnalysisStatus', fields=[ ('analysis_status_id', models.AutoField(db_column='ANALYSIS_STATUS_ID', primary_key=True, serialize=False)), ('analysis_status', models.CharField(db_column='ANALYSIS_STATUS', max_length=25)), ], options={ 'db_table': 'ANALYSIS_STATUS', }, ), migrations.CreateModel( name='BiomeHierarchyTree', fields=[ ('biome_id', models.SmallIntegerField(db_column='BIOME_ID', primary_key=True, serialize=False)), ('biome_name', models.CharField(db_column='BIOME_NAME', max_length=60)), ('lft', models.SmallIntegerField(db_column='LFT')), ('rgt', models.SmallIntegerField(db_column='RGT')), ('depth', models.IntegerField(db_column='DEPTH')), ('lineage', models.CharField(db_column='LINEAGE', max_length=500)), ], options={ 'db_table': 'BIOME_HIERARCHY_TREE', }, ), migrations.CreateModel( name='Publication', fields=[ ('pub_id', models.AutoField(db_column='PUB_ID', primary_key=True, serialize=False)), ('authors', models.CharField(blank=True, db_column='AUTHORS', max_length=4000, null=True)), ('doi', models.CharField(blank=True, db_column='DOI', max_length=1500, null=True)), ('isbn', models.CharField(blank=True, db_column='ISBN', max_length=100, null=True)), ('iso_journal', models.CharField(blank=True, db_column='ISO_JOURNAL', max_length=255, null=True)), ('issue', models.CharField(blank=True, db_column='ISSUE', max_length=55, null=True)), ('medline_journal', models.CharField(blank=True, db_column='MEDLINE_JOURNAL', max_length=255, null=True)), ('pub_abstract', models.TextField(blank=True, db_column='PUB_ABSTRACT', null=True)), ('pubmed_central_id', models.IntegerField(blank=True, db_column='PUBMED_CENTRAL_ID', null=True)), ('pubmed_id', models.IntegerField(blank=True, db_column='PUBMED_ID', null=True)), ('pub_title', models.CharField(db_column='PUB_TITLE', max_length=740)), ('raw_pages', models.CharField(blank=True, db_column='RAW_PAGES', max_length=30, null=True)), ('url', models.CharField(blank=True, db_column='URL', max_length=740, null=True)), ('volume', models.CharField(blank=True, db_column='VOLUME', max_length=55, null=True)), ('published_year', models.SmallIntegerField(blank=True, db_column='PUBLISHED_YEAR', null=True)), ('pub_type', models.CharField(blank=True, db_column='PUB_TYPE', max_length=150, null=True)), ], options={ 'db_table': 'PUBLICATION', }, ), migrations.CreateModel( name='Study', fields=[ ('study_id', models.AutoField(db_column='STUDY_ID', primary_key=True, serialize=False)), ('centre_name', models.CharField(blank=True, db_column='CENTRE_NAME', max_length=255, null=True)), ('experimental_factor', models.CharField(blank=True, db_column='EXPERIMENTAL_FACTOR', max_length=255, null=True)), ('is_public', models.IntegerField(blank=True, db_column='IS_PUBLIC', null=True)), ('ncbi_project_id', models.IntegerField(blank=True, db_column='NCBI_PROJECT_ID', null=True)), ('public_release_date', models.DateField(blank=True, db_column='PUBLIC_RELEASE_DATE', null=True)), ('study_abstract', models.TextField(blank=True, db_column='STUDY_ABSTRACT', null=True)), ('ext_study_id', models.CharField(db_column='EXT_STUDY_ID', max_length=18)), ('study_name', models.CharField(blank=True, db_column='STUDY_NAME', max_length=255, null=True)), ('study_status', models.CharField(blank=True, db_column='STUDY_STATUS', max_length=30, null=True)), ('data_origination', models.CharField(blank=True, db_column='DATA_ORIGINATION', max_length=20, null=True)), ('author_email', models.CharField(blank=True, db_column='AUTHOR_EMAIL', max_length=100, null=True)),
('author_name', models.CharField(blank=True, db_column='AUTHOR_NAME', max_length=100, null=True)), ('last_update', models.DateTimeField(db_column='LAST_UPDATE')), ('submission_account_id', models.CharField(blank=True, db_column='SUBMISSION_ACC
OUNT_ID', max_length=15, null=True)), ('result_directory', models.CharField(blank=True, db_column='RESULT_DIRECTORY', max_length=100, null=True)), ('first_created', models.DateTimeField(db_column='FIRST_CREATED')), ('project_id', models.CharField(blank=True, db_column='PROJECT_ID', max_length=18, null=True)), ('biome', models.ForeignKey(db_column='BIOME_ID', on_delete=django.db.models.deletion.DO_NOTHING, to='emgapi.BiomeHierarchyTree')), # ('publications', models.ManyToManyField(through='emgapi.StudyPublication', to='emgapi.Publication')), ], options={ 'db_table': 'STUD
mgrouchy/django-stronghold
test_project/test_project/wsgi.py
Python
mit
402
0
""" WSGI config for test_project project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see
https://docs.djangoproject.com/en/stable/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTI
NGS_MODULE", "test_project.settings") application = get_wsgi_application()
jokuf/hack-blog
posts/migrations/0006_auto_20170327_1906.py
Python
mit
843
0.003559
# -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2017-03-27 19:06 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('posts', '0005_post_author'), ] operations = [ migrations.CreateModel( name='PostImage', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('alt_text', models.CharField(blank=True, max_length=96, null=True)), ('image', models.ImageField(upload_to='')), ], ), migrations.AddField( model_name='post', name='images', field=models.ManyToManyField(related_name='posts', to
='posts.PostImage'),
), ]
ptitjes/quodlibet
tests/test_util_config.py
Python
gpl-2.0
11,883
0
# This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. import os from tests import TestCase, mkstemp from .helper import temp_filename from quodlibet.util.config import Config, Error, ConfigProxy class TConfig(TestCase): def test_set_default_only(self): conf = Config() self.assertRaises(Error, conf.set, "foo", "bar", 1) conf.defaults.add_section("foo") conf.set("foo", "bar", 1) def test_options(self): conf = Config() self.assertRaises(Error, conf.options, "foo") conf.defaults.add_section("foo") self.assertEqual(conf.options("foo"), []) conf.defaults.set("foo", "bar", 1) conf.defaults.set("foo", "blah", 1) conf.set("foo", "blah", 1) conf.set("foo", "quux", 1) self.assertEqual(conf.options("foo"), ['blah', 'quux', 'bar']) conf.defaults.clear() def test_options_no_default(self): conf = Config() conf.add_section("foo") self.assertEqual(conf.options("foo"), []) def test_has_section(self): conf = Config() self.assertFalse(conf.has_section("foo")) conf.defaults.add_section("foo") self.assertTrue(conf.has_section("foo")) conf.add_section("foo") conf.defaults.clear() self.assertTrue(conf.has_section("foo")) conf.clear() self.assertFalse(conf.has_section("foo")) def test_read_garbage_file(self): conf = Config() garbage = b"\xf1=\xab\xac" fd, filename = mkstemp() os.close(fd) with open(filename, "wb") as f: f.write(garbage) self.assertRaises(Error, conf.read, filename) os.remove(filename) def test_set(self): conf = Config() conf.add_section("foo") conf.set("foo", "bar", 1) self.failUnlessEqual(conf.get("foo", "bar"), "1") self.failUnlessEqual(conf.getint("foo", "bar"), 1) def test_setbytes(self): conf = Config() conf.add_section("foo") conf.setbytes("foo", "bar", b"\xff\xff") assert conf.getbytes("foo", "bar") == b"\xff\xff" def test_getbytes(self): conf = Config() assert conf.getbytes("foo", "bar", b"\xff") == b"\xff" def test_reset(self): conf = Config() conf.defaults.add_section("player") conf.defaults.set("player", "backend", "blah") conf.set("player", "backend", "foo") self.assertEqual(conf.get("player", "backend"), "foo") conf.reset("player", "backend") conf.defaults.set("player", "backend", "blah_new") self.assertEqual(conf.get("player", "backend"), "blah_new") def test_reset_no_section(self): conf = Config() conf.defaults.add_section("player") conf.defaults.set("player", "backend", "blah") conf.reset("player", "backend") assert conf.get("player", "backend") == "blah" def test_initial_after_set(self): conf = Config() conf.add_section("player") conf.set("player", "backend", "orig") conf.defaults.add_section("player") conf.defaults.set("player", "backend", "initial") self.assertEqual(conf.get("player", "backend"), "orig") self.assertEqual(conf.defaults.get("player", "backend"), "initial") conf.reset("player", "backend") self.assertEqual(conf.get("player", "backend"), "initial") def test_get_fallback_default(self): conf = Config() conf.defaults.add_section("get") self.assertRaises(Error, conf.get, "get", "bar") conf.defaults.set("get", "bar", 1) self.assertEqual(conf.get("get", "bar"), "1") conf.defaults.add_section("getboolean") self.assertRaises(Error, conf.getboolean, "getboolean", "bar") conf.defaults.set("getboolean", "bar", True) self.assertEqual(conf.getboolean("getboolean", "bar"), True) conf.defaults.add_section("getfloat") self.assertRaises(Error, conf.getfloat, "getfloat", "bar") conf.defaults.set("getfloat", "bar", 1.0) self.assertEqual(conf.getfloat("getfloat", "bar"), 1.0) conf.defaults.add_section("getint") self.assertRaises(Error, conf.getint, "getint", "bar") conf.defaults.set("getint", "bar", 42) self.assertEqual(conf.getint("getint", "bar"), 42) conf.defaults.add_section("getlist") self.assertRaises(Error, conf.getlist, "getlist", "bar") conf.defaults.setlist("getlist", "bar", [1, 2, 3]) self.assertEqual(conf.getlist("getlist", "bar"), ["1", "2", "3"]) def test_get(self): conf = Config() conf.add_section("foo") conf.set("foo", "int", "1") conf.set("foo", "float", "1.25") conf.set("foo", "str", "foobar") conf.set("foo", "bool", "True") self.failUnlessEqual(conf.getint("foo", "int"), 1) self.failUnlessEqual(conf.getfloat("foo", "float"), 1.25) self.failUnlessEqual(conf.get("foo", "str"), "foobar") self.failUnlessEqual(conf.getboolean("foo", "bool"), True) def test_get_invalid_data(self): conf = Config() conf.add_section("foo") conf.set("foo", "bla", "xx;,,;\n\n\naa") self.assertTrue(conf.getboolean("foo", "bla", True)) self.assertEqual(conf.getint("foo", "bla", 42), 42) self.assertEqual(conf.getfloat("foo", "bla", 1.5), 1.5) self.assertEqual(conf.getstringlist("foo", "bla", ["baz"]), ["baz"]) def test_getint_float(self): conf = Config() conf.add_section("foo") conf.set("foo", "float", "1.25") self.assertEqual(conf.getint("foo", "float"), 1) def test_get_default(self): conf = Config() conf.add_section("foo") self.failUnlessEqual(conf.getboolean("foo", "nothing", True), True) self.failUnlessEqual(conf.getint("foo", "nothing", 42), 42) self.failUnlessEqual(conf.getfloat("foo", "nothing", 42.42), 42.42) self.failUnlessEqual(conf.get("foo", "nothing", "foo"), "foo") def test_stringlist_simple(self): conf = Config() conf.add_section("foo") self.failIf(conf.get("foo", "bar", None)) vals = ["one",
"two", "three"] conf.setstringlist("foo", "bar", vals) self.failUnlessEqual(conf.getstringlist("foo", "bar"), vals)
def test_stringlist_mixed(self): conf = Config() conf.add_section("foo") self.failIf(conf.get("foo", "bar", None)) conf.setstringlist("foo", "bar", ["one", 2]) self.failUnlessEqual(conf.getstringlist("foo", "bar"), ["one", "2"]) def test_stringlist_quoting(self): conf = Config() conf.add_section("foo") self.failIf(conf.get("foo", "bar", None)) vals = ["foo's gold", "bar, \"best\" 'ever'", u"le goût d'œufs à Noël"] conf.setstringlist("foo", "bar", vals) self.failUnlessEqual(conf.getstringlist("foo", "bar"), vals) def test_stringlist_spaces(self): conf = Config() conf.add_section("foo") vals = [" ", " ", " \t ", " \n \n"] conf.setstringlist("foo", "bar", vals) self.failUnlessEqual(conf.getstringlist("foo", "bar"), vals) def test_stringlist_invalid_encoding(self): conf = Config() conf.add_section("foo") conf.setbytes("foo", "bar", b"\xff\xff\xff\xff\xff\xff") def test_getlist(self): conf = Config() conf.add_section("foo") self.assertEqual(conf.getlist("foo", "bar", ["arg"]), ["arg"]) conf.set("foo", "bar", "abc,fo:o\\,bar") self.assertEqual(conf.getlist("foo", "bar"), ["abc", "fo:o,bar"]) self.assertEqual(conf.getlist("foo", "bar", sep=":"), ["abc,fo", "o\\,bar"]) conf.set("foo", "bar", "") self.assertEqual(conf.getlist("foo", "bar"), [""]) def test_setlist(self): conf = Config()
Clinical-Genomics/scout
scout/commands/update/disease.py
Python
bsd-3-clause
3,712
0.002156
#!/usr/bin/env python # encoding: utf-8 """ update/disease.py Update the disease terms in database Created by Måns Magnusson on 2017-04-03. Copyright (c) 2017 __MoonsoInc__. All rights reserved. """ import logging import os import click from flask.cli import current_app, with_appcontext from scout.constants import UPDATE_DISEASES_RESOURCES from scout.load.hpo import load_disease_terms from scout.server.extensions import store from scout.utils.handle import get_file_handle from scout.utils.scout_requests import ( fetch_hpo_terms, fetch_hpo_to_genes_to_disease, fetch_mim_files, ) LOG = logging.getLogger(__name__) def _check_resources(resources): """Check that resource lines file contain valid data Args: resources(dict): resource names as keys and resource file lines as values """ for resname, lines in resources.items(): if not lines or lines[0].startswith("#") is False: LOG.error(f"Resource file '{resname}' doesn't contain valid data.") raise click.Abort() def _fetch_downloaded_resources(resources, downloads_folder): """Populate resource lines if a resource exists in downloads folder Args: resources(dict): downloads_folder(str): path to downloaded files or demo version of these files """ for resname, filenames in UPDATE_DISEASES_RESOURCES.items(): for filename in filenames: resource_path = os.path.join(downloads_folder, filename) resource_exists = os.path.isfile(resource_path) if resource_exists: resources[resname] = get_file_handle(resource_path).readlines() if resname not in resources: LOG.error(f"Resource file '{resname}' was not found in provided downloads folder.") raise click.Abort() @click.command("diseases", short_help="Update disease terms") @click.option( "-f", "--downloads-folder", type=click.Path(exists=True, dir_okay=True, readable=True), help="specify path to folder where files necessary to update diseases are pre-downloaded", ) @click.option( "--api-key", help="Download resources using an OMIM api key (required only if downloads folder is NOT specified)", ) @with_appcontext def diseases(downloads_folder, api_key): """ Update disease terms in mongo database. Use pre-downloaded resource files (phenotype_to_genes and genemap2) or download them from OMIM. Both options require using a valid omim api key. """ adapter = store api_key = api_key or current_app.config.get("OMIM_API_KEY") resources = {} if downloads_folder: api_key = None # Fetch required resource lines after making sure that are present in downloads folder and that contain valid data _fetch_downloaded_resources(resources, downloads_folder) else: # Download resources if not api_key: LOG.warning("Please provide a omim api key to load the omim gene panel") raise click.Abort() try: mim_files = fetch_mim_files(api_key, genemap2=True) resources["genemap_lines"] = mim_files["genemap2"] resources["hpo_gene_lines"] = fetch_hpo_to_genes_to_disease() except Exception as err: LOG.warning(err) raise click.Abort() _check_resources(resources) LOG.info("Dropping DiseaseTerms") adapter.disease_term_collection.delete_many({}) LOG.debug("DiseaseTerms dropped") load_disease_terms( adapter=adapter, genemap_lines=re
sources["genemap_lines"],
hpo_disease_lines=resources["hpo_gene_lines"], ) LOG.info("Successfully loaded all disease terms")
gvizquel/comunidad
comunidad/settings.py
Python
gpl-3.0
4,931
0.001825
import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = ')p9u&kcu@_(8u&-%4(m9!&4*82sx97zyl-!i#m9kic2lycj%0)' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'demografia.apps.DemografiaConfig', 'dal', 'dal_select2', 'suit', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'debug_toolbar', #'input_mask', ] MIDDLEWARE_CLASSES = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware', ] ROOT_URLCONF = 'comunidad.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'comunidad.wsgi.application' # Database # https://docs.djangoproject.com/en/1.9/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgre
sql_psycopg2', 'HOST': '127.0.0.1', 'NAME': 'comunidad', 'PASSWORD': '123456', 'PORT': '5432', 'USER': 'postgres', 'SCHEMAS': 'public,demografia' } } # Password validation # https://docs.djangoproject.com/en
/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ LANGUAGE_CODE = 'es' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ PROJECT_DIR = os.path.dirname(os.path.abspath(__file__)) MEDIA_ROOT = os.path.join(BASE_DIR, 'media') MEDIA_URL = '/media/' STATIC_ROOT = os.path.join(BASE_DIR, 'static') STATIC_URL = '/static/' SUIT_CONFIG = { # header 'ADMIN_NAME': 'comunidad', 'HEADER_DATE_FORMAT': 'l, j. F Y', 'HEADER_TIME_FORMAT': 'H:i', # forms 'SHOW_REQUIRED_ASTERISK': True, # Default True 'CONFIRM_UNSAVED_CHANGES': True, # Default True # menu 'SEARCH_URL': '/admin/auth/user/', 'MENU_ICONS': { 'sites': 'icon-leaf', 'auth': 'icon-lock', }, # 'MENU_OPEN_FIRST_CHILD': True, # Default True 'MENU_EXCLUDE': ('demografia.miembrohogar',), # 'MENU': ( # 'sites', # {'app': 'auth', 'icon':'icon-lock', 'models': ('user', 'group')}, # {'label': 'Settings', 'icon':'icon-cog', 'models': ('auth.user', 'auth.group')}, # {'label': 'Support', 'icon':'icon-question-sign', 'url': '/support/'}, # ), # misc 'LIST_PER_PAGE': 20 } LOGIN_URL = 'login' LOGOUT_URL = 'logout' LOGIN_REDIRECT_URL = 'index' CACHE_BACKEND = 'simple:///' AUTH_PROFILE_MODULE = "demografia.persona" DEBUG_TOOLBAR_PANELS = [ 'debug_toolbar.panels.versions.VersionsPanel', 'debug_toolbar.panels.timer.TimerPanel', 'debug_toolbar.panels.settings.SettingsPanel', 'debug_toolbar.panels.headers.HeadersPanel', 'debug_toolbar.panels.request.RequestPanel', 'debug_toolbar.panels.sql.SQLPanel', 'debug_toolbar.panels.staticfiles.StaticFilesPanel', 'debug_toolbar.panels.templates.TemplatesPanel', 'debug_toolbar.panels.cache.CachePanel', 'debug_toolbar.panels.signals.SignalsPanel', 'debug_toolbar.panels.logging.LoggingPanel', 'debug_toolbar.panels.redirects.RedirectsPanel', ]
thilaire/missionBoard
src/AVR/genTableCountDown.py
Python
gpl-3.0
1,264
0.019778
# coding=utf-8 # dictionary value -> 7-segment data Font = { 0: 0b00111111, # (48) 0 1: 0b00000110, # (49) 1 2: 0b01011011, # (50) 2 3: 0b01001111, # (51) 3 4: 0b01100110, # (52) 4 5: 0b01101101, # (53) 5 6: 0b01111101, # (54) 6 7: 0b00100111, # (55) 7 8: 0b01111111, # (56) 8 9: 0b01101111, # (57) 9 } # build array10 and array10 of numbers such that # i/16 = array10[i]/10 + array100[i&7]/100 (approximatively) array10 = [] array100 = [] for i in range(16): f = i/16.0 if i < 8: array100.append(int(f * 100) % 10) array10.append(int(f * 10)) print(array10) print(array100) # check for i in range(16): print("%d -> %s%s" % (i, array10[i], array100[i & 7])) # print the C arrays print("const uint8_t digit[16] = {" + ",".join(str(Font[i % 10]+128) for i in range(16)) + "};") print("const uint8_t array10[16] = {" + ",".join(str(Font[array10[i]]) for i in range(16)) + "};") print("const uint8_t array100[8] = {" +
",".join(str(Font[array100[i]]) for i in range(8)) + "};") # check for i in range(256): # if i&15: p
rint("%s%d.%d%d%d%d" % ("1" if ((i >> 4) > 9) else " ", (i >> 4) % 10, array10[i & 15], array100[i & 7], array100[i & 3], array100[(i << 1) & 3])) # else: # print("%d.%d%d%d%d" % (i >> 4, 0, 0, 0, 0))
raspberrywhite/django-sse
django_sse/views.py
Python
bsd-3-clause
1,543
0.000648
# -*- coding: utf-8 -*- from django.views.generic import View from django.views.decorators.csrf import csrf_exempt try: from django.http import StreamingHttpResponse as HttpResponse except ImportError: from django.http import HttpResponse from django.utils.decorators import method_decorator from sse import Sse class BaseSseView(View): """ This is a base class for sse streaming. """ def get_last_id(self): if "HTTP_LAST_EVENT_ID"
in self.request.META: return self.request.META['HTTP_LAST_EVENT_ID'] return None def _iterator(self): for subiterator in self.it
erator(): for bufferitem in self.sse: yield bufferitem @method_decorator(csrf_exempt) def dispatch(self, request, *args, **kwargs): self.sse = Sse() self.request = request self.args = args self.kwargs = kwargs response = HttpResponse(self._iterator(), content_type="text/event-stream") response['Cache-Control'] = 'no-cache' response['Software'] = 'django-sse' return response def iterator(self): """ This is a source of stream. Must use ``yield`` statement to flush content from sse object to the client. Example: def iterator(self): counter = 0 while True: self.sse.add_message('foo', 'bar') self.sse.add_message('bar', 'foo') yield """ raise NotImplementedError
Crompulence/cpl-library
test/lammps/single/no_wall/time_varying_force/CFD_single_ball.py
Python
gpl-3.0
2,218
0.008566
import numpy as np import matplotlib.pyplot as plt from matplotlib.widgets import Slider from mpi4py import MPI import sys from cplpy import CPL #initialise MPI and CPL comm = MPI.COMM_WORLD CPL
= CPL() MD_COMM = CPL.init(CPL.CFD_REALM) nprocs_realm = MD_COMM.Get_size() ## P
arameters of the cpu topology (cartesian grid) npxyz = np.array([1, 1, 1], order='F', dtype=np.int32) NProcs = np.product(npxyz) print 'Number of arguments:', len(sys.argv), 'arguments: ', str(sys.argv) if len(sys.argv) > 1: g = float(sys.argv[1]) else: g = 9.81 xyzL = np.array([1.5000000000000000E-003, 1.5000000000000000E-003, 2.5000000000000001E-003], order='F', dtype=np.float64) xyz_orig = np.array([0.0, 0.0, 0.0], order='F', dtype=np.float64) ncxyz = np.array([8, 8, 8], order='F', dtype=np.int32) if (nprocs_realm != NProcs): print("Non-coherent number of processes in MD ", nprocs_realm, " no equal to ", npxyz[0], " X ", npxyz[1], " X ", npxyz[2]) MPI.Abort(errorcode=1) #Setup coupled simulation cart_comm = MD_COMM.Create_cart([npxyz[0], npxyz[1], npxyz[2]]) CPL.setup_cfd(cart_comm, xyzL, xyz_orig, ncxyz) #Get constraint region cnst_limits = CPL.get_cnst_limits(); cnst_portion = CPL.my_proc_portion(cnst_limits) [cnst_ncxl, cnst_ncyl, cnst_nczl] = CPL.get_no_cells(cnst_portion) #Get overlap region olap_limits = CPL.get_olap_limits() BC_limits = np.array([olap_limits[0], olap_limits[1], olap_limits[2], olap_limits[3], olap_limits[4], olap_limits[5]], dtype=np.int32) BC_portion = CPL.my_proc_portion(BC_limits) [BC_ncxl, BC_ncyl, BC_nczl] = CPL.get_no_cells(BC_portion) #Allocate send and recv arrays recv_array = np.zeros((4, BC_ncxl, BC_ncyl, BC_nczl), order='F', dtype=np.float64) send_array = np.zeros((9, cnst_ncxl, cnst_ncyl, cnst_nczl), order='F', dtype=np.float64) ft = True Nsteps = 21 for time in range(Nsteps): # send data to update send_array[2,:,:,:] = -5.9490638385009208e-08*g*np.sin(2.*np.pi*time/Nsteps) CPL.send(send_array, cnst_portion) # recv data and plot recv_array, ierr = CPL.recv(recv_array, BC_portion) print(time) CPL.finalize() MPI.Finalize()
shirk3y/cyclone
cyclone/httputil.py
Python
apache-2.0
9,933
0.000101
# coding: utf-8 # # Copyright 2010 Alexandre Fiori # based on the original Tornado by Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """HTTP utility code shared by clients and servers.""" from __future__ import absolute_import, division, with_statement import urllib import re from twisted.python import log from cyclone.util import b, ObjectDict class HTTPHeaders(dict): """A dictionary that maintains Http-Header-Case for all keys. Supports multiple values per key via a pair of new methods, add() and get_list(). The regular dictionary interface returns a single value per key, with multiple values joined by a comma. >>> h = HTTPHeaders({"content-type": "text/html"}) >>> h.keys() ['Content-Type'] >>> h["Content-Type"] 'text/html' >>> h.add("Set-Cookie", "A=B") >>> h.add("Set-Cookie", "C=D") >>> h["set-cookie"] 'A=B,C=D' >>> h.get_list("set-cookie") ['A=B', 'C=D'] >>> for (k,v) in sorted(h.get_all()): ... print '%s: %s' % (k,v) ... Content-Type: text/html Set-Cookie: A=B Set-Cookie: C=D """ def __init__(self, *args, **kwargs): # Don't pass args or kwargs to dict.__init__, as it will bypass # our __setitem__ dict.__init__(self) self._as_list = {} self._last_key = None if (len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], HTTPHeaders)): # Copy constructor for k, v in args[0].get_all(): self.add(k, v) else: # Dict-style initialization self.update(*args, **kwargs) # new public methods def add(self, name, value): """Adds a new value for the given key.""" norm_name = HTTPHeaders._normalize_name(name) self._last_key = norm_name if norm_name in self: # bypass our override of __setitem__ since it modifies _as_list dict.__setitem__(self, norm_name, self[norm_name] + ',' + value) self._as_list[norm_name].append(value) else: self[norm_name] = value def get_list(self, name): """Returns all values for the given header as a list.""" norm_name = HTTPHeaders._normalize_name(name) return self._as_list.get(norm_name, []) def get_all(self): """Returns an iterable of all (name, value) pairs. If a header has multiple values, multiple pairs will be returned with the same name. """ for name, list in self._as_list.iteritems(): for value in list: yield (name, value) def parse_line(self, line): """Updates the dictionary with a single header line. >>> h = HTTPHeaders() >>> h.parse_line("Content-Type: text/html") >>> h.get('content-type') 'text/html' """ if line[0].isspace(): # continuation of a multi-line header new_part = ' ' + line.lstrip() self._as_list[self._last_key][-1] += new_part dict.__setitem__(self, self._last_key, self[self._last_key] + new_part) else: name, value = line.split(":", 1) self.add(name, value.strip()) @classmethod def parse(cls, headers): """Returns a dictionary from HTTP header text. >>> h = HTTPHeaders.parse( "Content-Type: text/html\\r\\nContent-Length: 42\\r\\n") >>> sorted(h.iteritems()) [('Content-Length', '42'), ('Content-Type', 'text/html')] """ h = cls() for line in headers.splitlines(): if line: h.parse_line(line) return h # dict implementation overrides def __setitem__(self, name, value): norm_name = HTTPHeaders._normalize_name(name) dict.__setitem__(self, norm_name, value) self._as_list[norm_name] = [value] def __getitem__(self, name): return dict.__getitem__(self, HTTPHeaders._normalize_name(name)) def __delitem__(self, name): norm_name = HTTPHeaders._normalize_name(name) dict.__delitem__(self, norm_name) del self._as_list[norm_name] def __contains__(self, name): norm_name = HTTPHeaders._normalize_name(name) return dict.__contains__(self, norm_name) def get(self, name, default=None): return dict.get(self, HTTPHeaders._normalize_name(name), default) def update(self, *args, **kwargs): # dict.update bypasses our __setitem__ for k, v in dict(*args, **kwargs).iteritems(): self[k] = v def copy(self): # default implementation returns dict(self), not the subclass return HTTPHeaders(self) _NORMALIZED_HEADER_RE = \ re.compile(r'^[A-Z0-9][a-z0-9]*(-[A-Z0-9][a-z0-9]*)*$') _normalized_headers = {} @staticmethod def _normalize_name(name): """Converts a name to Http-Header-Case. >>> HTTPHeaders._normalize_name("coNtent-TYPE") 'Content-Type' """ try: return HTTPHeaders._normalized_headers[name] except KeyError: if HTTPHeaders._NORMALIZED_HEADER_RE.match(name): normalized = name else: normalized = "-".join( [w.capitalize() for w in name.split("-")]) HTTPHeaders._normalized_headers[name] = normalized return normalized def url_concat(url, args): """Concatenate url and argument dictionary regardless of whether url has existing query parameters. >>> url_concat
("http://exampl
e.com/foo?a=b", dict(c="d")) 'http://example.com/foo?a=b&c=d' """ if not args: return url if url[-1] not in ('?', '&'): url += '&' if ('?' in url) else '?' return url + urllib.urlencode(args) class HTTPFile(ObjectDict): """Represents an HTTP file. For backwards compatibility, its instance attributes are also accessible as dictionary keys. :ivar filename: :ivar body: :ivar content_type: The content_type comes from the provided HTTP header and should not be trusted outright given that it can be easily forged. """ pass def parse_multipart_form_data(boundary, data, arguments, files): """Parses a multipart/form-data body. The boundary and data parameters are both byte strings. The dictionaries given in the arguments and files parameters will be updated with the contents of the body. """ # The standard allows for the boundary to be quoted in the header, # although it's rare (it happens at least for google app engine # xmpp). I think we're also supposed to handle backslash-escapes # here but I'll save that until we see a client that uses them # in the wild. if boundary.startswith(b('"')) and boundary.endswith(b('"')): boundary = boundary[1:-1] final_boundary_index = data.rfind(b("--") + boundary + b("--")) if final_boundary_index == -1: log.msg("Invalid multipart/form-data: no final boundary") return parts = data[:final_boundary_index].split(b("--") + boundary + b("\r\n")) for part in parts: if not part: continue eoh = part.find(b("\r\n\r\n")) if eoh == -1: log.msg("multipart/form-data missing headers") continue headers = HTTPHeaders.parse(part[:eoh].decode("utf-8")) disp_header = headers.get("Content-Disposition", "") disposition, disp_params = _parse_header(disp_header) if disposition != "form-data" or not part.endswith(b("\r\n")): log.m
alex-ip/agdc
api-examples/source/main/python/tool/summarise_dataset_time_series.py
Python
bsd-3-clause
25,656
0.004249
#!/usr/bin/env python # =============================================================================== # Copyright (c) 2014 Geoscience Australia # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither Geoscience Australia nor the names of its contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # =============================================================================== from datetime import datetime, timedelta __author__ = "Simon Oldfield" import argparse import gdal import numpy from gdalconst import GA_ReadOnly, GA_Update import logging import os import resource from datacube.api.model import DatasetType, Satellite, get_bands, dataset_type_database from datacube.api.query import list_tiles_as_list from datacube.api.utils import PqaMask, get_dataset_metadata, get_dataset_data, get_dataset_data_with_pq, empty_array from datacube.api.utils import NDV, UINT16_MAX from datacube.api.workflow import writeable_dir from datacube.config import Config from enum import Enum _log = logging.getLogger() def satellite_arg(s): if s in Satellite._member_names_: return Satellite[s] raise argparse.ArgumentTypeError("{0} is not a supported satellite".format(s)) def pqa_mask_arg(s): if s in PqaMask._member_names_: return PqaMask[s] raise argparse.ArgumentTypeError("{0} is not a supported PQA mask".format(s)) def dataset_type_arg(s): if s in DatasetType._member_names_: return DatasetType[s] raise argparse.ArgumentTypeError("{0} is not a supported dataset type".format(s)) def summary_method_arg(s): if s in TimeSeriesSummaryMethod._member_names_: return TimeSeriesSummaryMethod[s] raise argparse.ArgumentTypeError("{0} is not a supported summary method".format(s)) class TimeSeriesSummaryMethod(Enum): __order__ = "YOUNGEST_PIXEL OLDEST_PIXEL MEDOID_PIXEL COUNT MIN MAX MEAN MEDIAN MEDIAN_NON_INTERPOLATED SUM STANDARD_DEVIATION VARIANCE PERCENTILE" YOUNGEST_PIXEL = 1 OLDEST_PIXEL = 2 MEDOID_PIXEL = 3 COUNT = 4 MIN = 5 MAX = 6 MEAN = 7 ME
DIAN = 8 MEDIAN_NON_INTERPOLATED = 9 SUM = 10 STANDARD_DEVIATION = 11 VARIANCE = 12 PERCENTILE = 13 class SummariseDatasetTimeSeriesWorkflow(): application_name = None x = None y = None acq_min
= None acq_max = None process_min = None process_max = None ingest_min = None ingest_max = None satellites = None apply_pqa_filter = None pqa_mask = None dataset_type = None output_directory = None overwrite = None list_only = None summary_method = None chunk_size_x = None chunk_size_y = None def __init__(self, application_name): self.application_name = application_name def parse_arguments(self): parser = argparse.ArgumentParser(prog=__name__, description=self.application_name) group = parser.add_mutually_exclusive_group() group.add_argument("--quiet", help="Less output", action="store_const", dest="log_level", const=logging.WARN) group.add_argument("--verbose", help="More output", action="store_const", dest="log_level", const=logging.DEBUG) parser.set_defaults(log_level=logging.INFO) parser.add_argument("--x", help="X grid reference", action="store", dest="x", type=int, choices=range(110, 155+1), required=True, metavar="[110 - 155]") parser.add_argument("--y", help="Y grid reference", action="store", dest="y", type=int, choices=range(-45, -10+1), required=True, metavar="[-45 - -10]") parser.add_argument("--acq-min", help="Acquisition Date (YYYY or YYYY-MM or YYYY-MM-DD)", action="store", dest="acq_min", type=str, required=True) parser.add_argument("--acq-max", help="Acquisition Date (YYYY or YYYY-MM or YYYY-MM-DD)", action="store", dest="acq_max", type=str, required=True) # parser.add_argument("--process-min", help="Process Date", action="store", dest="process_min", type=str) # parser.add_argument("--process-max", help="Process Date", action="store", dest="process_max", type=str) # # parser.add_argument("--ingest-min", help="Ingest Date", action="store", dest="ingest_min", type=str) # parser.add_argument("--ingest-max", help="Ingest Date", action="store", dest="ingest_max", type=str) parser.add_argument("--satellite", help="The satellite(s) to include", action="store", dest="satellite", type=satellite_arg, nargs="+", choices=Satellite, default=[Satellite.LS5, Satellite.LS7], metavar=" ".join([s.name for s in Satellite])) parser.add_argument("--apply-pqa", help="Apply PQA mask", action="store_true", dest="apply_pqa", default=False) parser.add_argument("--pqa-mask", help="The PQA mask to apply", action="store", dest="pqa_mask", type=pqa_mask_arg, nargs="+", choices=PqaMask, default=[PqaMask.PQ_MASK_CLEAR], metavar=" ".join([s.name for s in PqaMask])) supported_dataset_types = dataset_type_database parser.add_argument("--dataset-type", help="The types of dataset to retrieve", action="store", dest="dataset_type", type=dataset_type_arg, #nargs="+", choices=supported_dataset_types, default=DatasetType.ARG25, metavar=" ".join([s.name for s in supported_dataset_types])) parser.add_argument("--output-directory", help="Output directory", action="store", dest="output_directory", type=writeable_dir, required=True) parser.add_argument("--overwrite", help="Over write existing output file", action="store_true", dest="overwrite", default=False) parser.add_argument("--list-only", help="List the datasets that would be retrieved rather than retrieving them", action="store_true", dest="list_only", default=False) supported_summary_methods = [ TimeSeriesSummaryMethod.YOUNGEST_PIXEL, TimeSeriesSummaryMethod.OLDEST_PIXEL, # TimeSeriesSummaryMethod.MEDOID_PIXEL, TimeSeriesSummaryMethod.COUNT, TimeSeriesSummaryMethod.MIN, TimeSeriesSummaryMethod.MAX, TimeSeriesSummaryMethod.MEAN, TimeSeriesSummaryMethod.MEDIAN, TimeSeriesSummaryMethod.MEDIAN_NON_INTERPOLATED, TimeSeriesSummaryMethod.SUM, TimeSeriesSummaryMethod.STANDARD_DEVIATION, TimeSeriesSummaryMethod.VARIANCE, TimeSeriesSummaryMethod.PERCENTILE] parser.add_argument("--summary-method", help="The summary method to apply", action="store", dest="summary_method", type=summary_method_arg, #nargs="+", c
miptliot/edx-platform
lms/djangoapps/courseware/tests/test_lti_integration.py
Python
agpl-3.0
9,169
0.002508
"""LTI integration tests""" import json import urllib from collections import OrderedDict import mock import oauthlib from django.conf import settings from django.core.urlresolvers import reverse from nose.plugins.attrib import attr from courseware.tests import BaseTestXmodule from courseware.views.views import get_course_lti_endpoints from openedx.core.lib.url_utils import quote_slashes from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory from xmodule.x_module import STUDENT_VIEW @attr(shard=1) class TestLTI(BaseTestXmodule): """ Integration test for lti xmodule. It checks overall code, by assuring that context that goes to template is correct. As part of that, checks oauth signature generation by mocking signing function of `oauthlib` library. """ CATEGORY = "lti" def setUp(self): """ Mock oauth1 signing of requests library for testing. """ super(TestLTI, self).setUp() mocked_nonce = u'135685044251684026041377608307' mocked_timestamp = u'1234567890' mocked_signature_after_sign = u'my_signature%3D' mocked_decoded_signature = u'my_signature=' # Note: this course_id is actually a course_key context_id = self.item_descriptor.course_id.to_deprecated_string() user_id = unicode(self.item_descriptor.xmodule_runtime.anonymous_student_id) hostname = self.item_descriptor.xmodule_runtime.hostname resource_link_id = unicode(urllib.quote('{}-{}'.format(hostname, self.item_descriptor.location.html_id()))) sourcedId = "{context}:{resource_link}:{user_id}".format( context=urllib.quote(context_id), resource_link=resource_link_id, user_id=user_id ) self.correct_headers = { u'user_id': user_id, u'oauth_callback': u'about:blank', u'launch_presentation_return_url': '', u'lti_message_type': u'basic-lti-launch-request', u'lti_version': 'LTI-1p0', u'roles': u'Student', u'context_id': context_id, u'resource_link_id': resource_link_id, u'lis_result_sourcedid': sourcedId, u'oauth_nonce': mocked_nonce, u'oauth_timestamp': mocked_timestamp, u'oauth_consumer_key': u'', u'oauth_signature_method': u'HMAC-SHA1', u'oauth_version': u'1.0', u'oauth_signature': mocked_decoded_signature } saved_sign = oauthlib.oauth1.Client.sign self.expected_context = { 'display_name': self.item_descriptor.display_name, 'input_fields': self.correct_headers, 'element_class': self.item_descriptor.category, 'element_id': self.item_descriptor.location.html_id(), 'launch_url': u'http://www.example.com', # default value 'open_in_a_new_page': True, 'form_url': self.item_descriptor.xmodule_runtime.handler_url(self.item_descriptor, 'preview_handler').rstrip('/?'), 'hide_launch': False, 'has_score': False, 'module_score': None, 'comment': u'', 'weight': 1.0, 'ask_to_send_username': self.item_descriptor.ask_to_send_username, 'ask_to_send_email': self.item_descriptor.ask_to_send_email, 'description': self.item_descriptor.description, 'button_text': self.item_descriptor.button_text, 'accept_grades_past_due': self.item_descriptor.accept_grades_past_due, } def mocked_sign(self, *args, **kwargs): """ Mocked oauth1 sign function. """ # self is <oauthlib.oauth1.rfc5849.Client object> here: __, headers, __ = saved_sign(self, *args, **kwargs) # we should replace nonce, timestamp and signed_signature in headers: old = headers[u'Authorization'] old_parsed = OrderedDict([param.strip().replace('"', '').split('=') for param in old.split(',')]) old_parsed[u'OAuth oauth_nonce'] = mocked_nonce old_parsed[u'oauth_timestamp'] = mocked_timestamp old_parsed[u'oauth_signature'] = mocked_signature_after_sign headers[u'Authorization'] = ', '.join([k + '="' + v + '"' for k, v in old_parsed.items()]) return None, headers, None patcher = mock.patch.object(oauthlib.oauth1.Client, "sign", mocked_sign) patcher.start() self.addCleanup(patcher.stop) def test_lti_constructor(self): genera
ted_content = self.item_descriptor.render(STUDENT_VIEW).content expected_content = self.runtime.r
ender_template('lti.html', self.expected_context) self.assertEqual(generated_content, expected_content) def test_lti_preview_handler(self): generated_content = self.item_descriptor.preview_handler(None, None).body expected_content = self.runtime.render_template('lti_form.html', self.expected_context) self.assertEqual(generated_content, expected_content) @attr(shard=1) class TestLTIModuleListing(SharedModuleStoreTestCase): """ a test for the rest endpoint that lists LTI modules in a course """ # arbitrary constant COURSE_SLUG = "100" COURSE_NAME = "test_course" @classmethod def setUpClass(cls): super(TestLTIModuleListing, cls).setUpClass() cls.course = CourseFactory.create(display_name=cls.COURSE_NAME, number=cls.COURSE_SLUG) cls.chapter1 = ItemFactory.create( parent_location=cls.course.location, display_name="chapter1", category='chapter') cls.section1 = ItemFactory.create( parent_location=cls.chapter1.location, display_name="section1", category='sequential') cls.chapter2 = ItemFactory.create( parent_location=cls.course.location, display_name="chapter2", category='chapter') cls.section2 = ItemFactory.create( parent_location=cls.chapter2.location, display_name="section2", category='sequential') # creates one draft and one published lti module, in different sections cls.lti_published = ItemFactory.create( parent_location=cls.section1.location, display_name="lti published", category="lti", location=cls.course.id.make_usage_key('lti', 'lti_published'), ) cls.lti_draft = ItemFactory.create( parent_location=cls.section2.location, display_name="lti draft", category="lti", location=cls.course.id.make_usage_key('lti', 'lti_draft'), publish_item=False, ) def expected_handler_url(self, handler): """convenience method to get the reversed handler urls""" return "https://{}{}".format(settings.SITE_NAME, reverse( 'courseware.module_render.handle_xblock_callback_noauth', args=[ self.course.id.to_deprecated_string(), quote_slashes(unicode(self.lti_published.scope_ids.usage_id.to_deprecated_string()).encode('utf-8')), handler ] )) def test_lti_rest_bad_course(self): """Tests what happens when the lti listing rest endpoint gets a bad course_id""" bad_ids = [u"sf", u"dne/dne/dne", u"fo/ey/\\u5305"] for bad_course_id in bad_ids: lti_rest_endpoints_url = 'courses/{}/lti_rest_endpoints/'.format(bad_course_id) response = self.client.get(lti_rest_endpoints_url) self.assertEqual(404, response.status_code) def test_lti_rest_listing(self): """tests that the draft lti module is part of the endpoint response""" request = mock.Mock() request.method = 'GET' response = get_course_lti_endpoints(request, course_id=self.course.id.to_deprecated_string()) self.ass
XiaodunServerGroup/ddyedx
common/lib/xmodule/xmodule/vertical_module.py
Python
agpl-3.0
2,477
0.002019
from xblock.fragment import Fragment from xmodule.x_module import XModule from xmodule.seq_module import SequenceDescriptor from xmodule.progress import Progress from pkg_resources import resource_string # HACK: This shouldn't be hard-coded to two types # OBSOLETE: This obsoletes 'type' class_priority = ['video', 'problem'] class VerticalFields(object): has_children = True class VerticalModule(VerticalFields, XModule): ''' Layout module for laying out submodules vertically.''' def student_view(self, con
text): fragment = Fragment()
contents = [] for child in self.get_display_items(): rendered_child = child.render('student_view', context) fragment.add_frag_resources(rendered_child) contents.append({ 'id': child.id, 'content': rendered_child.content }) fragment.add_content(self.system.render_template('vert_module.html', { 'items': contents })) return fragment def mobi_student_view(self, context): fragment = Fragment() contents = [] for child in self.get_display_items(): rendered_child = child.render('mobi_student_view', context) fragment.add_frag_resources(rendered_child) contents.append({ 'id': child.id, 'content': rendered_child.content }) fragment.add_content(self.system.render_template('vert_module.html', { 'items': contents })) return fragment def get_progress(self): # TODO: Cache progress or children array? children = self.get_children() progresses = [child.get_progress() for child in children] progress = reduce(Progress.add_counts, progresses, None) return progress def get_icon_class(self): child_classes = set(child.get_icon_class() for child in self.get_children()) new_class = 'other' for c in class_priority: if c in child_classes: new_class = c return new_class class VerticalDescriptor(VerticalFields, SequenceDescriptor): module_class = VerticalModule js = {'coffee': [resource_string(__name__, 'js/src/vertical/edit.coffee')]} js_module_name = "VerticalDescriptor" # TODO (victor): Does this need its own definition_to_xml method? Otherwise it looks # like verticals will get exported as sequentials...
huggingface/transformers
tests/speech_to_text/test_feature_extraction_speech_to_text.py
Python
apache-2.0
10,984
0.003733
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ..test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import Speech2TextFeatureExtractor global_rng = random.Random() def floats_list(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values @require_torch @require_torchaudio class Speech2TextFeatureExtractionTester(unittest.TestCase): def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=24, num_mel_bins=24, padding_value=0.0, sampling_rate=16_000, return_attention_mask=True, do_normalize=True, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.feature_size = feature_size self.num_mel_bins = num_mel_bins self.padding_value = padding_value self.sampling_rate = sampling_rate self.return_attention_mask = return_attention_mask self.do_normalize = do_normalize def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)] else: # make sure that inputs increase in size speech_inputs = [ floats_list((x, self.feature_size)) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class Speech2TextFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = Speech2TextFeatureExtractor if is_speech_available() else None def setUp(self): self.feat_extract_tester = Speech2TextFeatureExtractionTester(self) def _check_zero_mean_unit_variance(self, input_vector): self.assertTrue(np.all(np.mean(input_vector, axis=0) < 1e-3)) self.assertTrue(np.all(np.abs(np.var(input_vector, axis=0) - 1) < 1e-3)) def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 1200 speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] # Test feature size input_features = feature_extractor(np_speech_inputs, padding=True, return_tensors="np").input_features self.assertTrue(input_features.ndim == 3) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size) # Test not batched input encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) # Test batched encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_cepstral_mean_and_variance_normalization(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] paddings = ["longest", "max_length", "do_not_pad"] max_lengths = [None, 16, None] for max_length, padding in zip(max_lengths, paddings): inputs = feature_extractor( speech_inputs, padding=padding, max_length=max_length, return_attention_mask=True ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = [np.sum(x) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]]) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]]) def test_cepstral_mean_and_variance_normalization_np(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] paddings = ["longest", "max_length", "do_not_pad"] max_lengths = [None, 16, None] for max_length, padding in zip(max_lengths, paddings): inputs = feature_extractor( speech_inputs, max_length=max_length, padding=padding, return_tensors="np", return_attention_mask=True ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = [np.sum(x) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]]) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]]) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]]) def test_cepstral_mean
_and_variance_normalization_trunc_max_length(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] inputs = feature_extractor( speech_inputs, padding="max_length", max_length=4, truncation=True, return_tensors="np", return_atte
ntion_mask=True, ) input_features = inputs.input_features attention_mask = inputs.attention_mask fbank_feat_lengths = np.sum(attention_mask == 1, axis=1) self.
eltonsantos/dom
dom/urls.py
Python
mit
263
0.003802
f
rom django.conf.urls import patterns, include, url from django.contrib import admin admin.autodiscover() urlpatterns = patterns('', url(r'^$', include('launch.urls', namespace="launch", app_name="launch")), url(r'^admin/', include(admin.site.urls)),
)
naototty/pyflag
tests/init.py
Python
gpl-2.0
364
0.019231
#!/usr/bin/env python import pyflag.IO as IO import pyflag.Registry as Registry Registry.Init() import pyflag.FileSystem as FileSystem from FileSystem import DBF
S case = "demo" ## This gives us a handle to the VFS fsfd = Registry.FILESYSTEMS.fs['DBFS'](case) ## WE just open a file in the VFS: #fd=fsfd.open(inode="Itest|S1/2") ## And read it #print fd.rea
d()
pysofe/pysofe
pysofe/quadrature/base.py
Python
bsd-3-clause
977
0.002047
""" Provides the base class for all quadrature rules. """ import numpy as np import copy class QuadRule(object): """ Provides an abstract base class for all quadrature rules. Parameters ---------- order : int The polynomial order up
to which the quadrature should be exact """ def __init__(self, order, dimension):
self._order = order self._dimension = dimension self._points = [None] * (dimension + 1) self._weights = [None] * (dimension + 1) self._set_data() def _set_data(self): """ Sets the quadrature points and weights. """ raise NotImplementedError() @property def order(self): return self._order @property def dimension(self): return self._dimension @property def points(self): return copy.deepcopy(self._points) @property def weights(self): return copy.deepcopy(self._weights)
googleapis/python-dialogflow
samples/generated_samples/dialogflow_generated_dialogflow_v2_intents_batch_delete_intents_sync.py
Python
apache-2.0
1,696
0.00059
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for BatchDeleteIntents # NOTE: This snippet has been automatically generated for illustrative purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-dialogflow # [START dialogflow_generated_dialogflow_v2_Intents_BatchDeleteIntents_sync] from google.cloud import dialogflow_v2 def sample_batch_delete_intents()
: # Create a client client = dialogflow_v2.IntentsClient() # Initialize request argument(s) intents = dialogflow_v2.Intent() intents.display_name = "display_name_value" request = dialogflow_v2.BatchDeleteIntentsRequest( parent="parent_value", intents=intents, ) # Make the request operation = client.batch_delete_intents(request=request) print
("Waiting for operation to complete...") response = operation.result() # Handle the response print(response) # [END dialogflow_generated_dialogflow_v2_Intents_BatchDeleteIntents_sync]
zentralopensource/zentral
zentral/contrib/okta/__init__.py
Python
apache-2.0
79
0
# django default_app
_config = "z
entral.contrib.okta.apps.ZentralOktaAppConfig"
xydinesh/b3notify
b3notify.py
Python
apache-2.0
3,613
0
import os import ConfigParser import click from base64 import b64encode import requests import json class B3Notify(object): """ Build status notifier for bitbucket server """ def __init__(self, home='~/.b3notifyrc'): self.home = home self.verbose = True self.config = {} self.build_url = '' self.key = '' self.name = '' self.commit = '' self.auth = '' def read_configuration(self, profile='default'): config = ConfigParser.ConfigParser() config.read([ os.path.expanduser('~/.b3notifyrc'), '.b3notifyrc', os.path.expanduser('{0}'.format(self.home)), ]) self.url = config.get(profile, 'url').strip("'") self.
username = config.get(profile, 'username').strip("'") self.password = config.get(profile, 'password').strip("'") self.auth = '{0}'.format( b64encode('{0}:{1}'.format(self.username, self.password)) ) @property def headers(self): return { 'Content-Type': 'application/json', 'Authorization': 'B
asic {0}'.format(self.auth) } def notify( self, commit, build_url, build_key, build_name, build_state='FAIL'): data = { # <INPROGRESS|SUCCESSFUL|FAILED>", 'state': build_state, 'key': build_key, 'name': build_name, 'url': build_url } self.commit_url = '{0}{1}'.format(self.url, commit) response = requests.post( self.commit_url, headers=self.headers, data=json.dumps(data)) return response @click.command() @click.option( '--config-file', envvar='CONFIG_FILE', default='.', help='Location to find configuration file') @click.option( '--profile', default='default', help='Profile to use for credentials') @click.option( '--host', '-h', help='Server URL') @click.option( '--verbose', '-v', is_flag=True, help='Enable verbose mode') @click.option( '--success', '-s', is_flag=True, default=False, help='Notify build success') @click.option( '--fail', '-f', is_flag=True, default=False, help='Notify build failure') @click.option( '--progress', '-p', is_flag=True, default=False, help='Notify inprogress build') @click.option( '--commit', '-c', envvar='GIT_COMMIT', help='Hash value of the commit') @click.option( '--build-url', '-b', envvar='BUILD_URL', help='Current build url') @click.option( '--key', '-k', envvar='BUILD_TAG', help='Build key') @click.option( '--name', '-n', envvar='BUILD_DISPLAY_NAME', help='Build name') @click.option( '--auth', '-a', envvar='BUILD_AUTH', required=False, help='Base64 encoded string of username:password') def cli( config_file, profile, host, verbose, success, fail, progress, commit, build_url, key, name, auth): """ Build status notifier for bitbucket server """ build_state = 'INPROGRESS' notify = B3Notify(config_file) notify.read_configuration(profile=profile) notify.verbose = verbose if host is not None: notify.url = host if auth is not None: notify.auth = auth if success is True: build_state = 'SUCCESSFUL' if fail is True: build_state = 'FAILED' response = notify.notify( commit=commit, build_url=build_url, build_key=key, build_name=name, build_state=build_state) print response.status_code, response.text
MarkEEaton/api-workshop
4-json-to-csv.py
Python
mit
1,093
0.000915
# import the libraries that you need import requests import csv # make a GET request to the OneSearch X-Service API response = requests.get('http://onesearch.cuny.edu/PrimoWebServices' '/xservice/search/brief?' '&institution=KB' '&query=any,contains,obama' '&query=facet_rtype,exact,books' '&loc=adaptor,primo_central_multiple_fe' '&loc=local,scope:(KB,AL,CUNY_BEPRESS)' '&json=true') # take the JSON from the response # and store it in a variable called alldata alldata = response.json() # drill down into a smaller subset of the j
son # and print this smaller bit of json somedata = alldata['SEGMENTS']['JAGROOT']['RESULT']['FACETLIST']['FACET']\ [1]['FACET_VALUES'] print(somedata) # open a file called mycsv.csv, then loop through the data # and write to that file with open('mycsv.csv', 'wb') as f: writer = csv.writer(f) for x in somedata: writer.writerow([x['@KEY'], x['@VALUE']])
bzamecnik/tfr
tfr/tuning.py
Python
mit
1,850
0.001622
from __future__ import print_function, division import numpy as np class Tuning(): """ Equal temperament tuning - allows to convert between frequency and pitch. - unit pitch space - continous, unbounded - 1.0 ~ one octave - step pitch space - continous, unbounded - N steps ~ one octave - unit pitch space * N - unit pitch class space - continous, bounded [0, 1.0) - unit pitch space % 1.0 - step pitch class space - continous, bounded [0, N) - unit step pitch space % N - integer step pitch space - discrete, unbounded - floor(step pitch space) - integer step pitch class space - discrete, bounded {0, 1, .. N - 1} - floor(step pitch class space) """ def __init__(self, base_freq=440, steps_per_octave=12, octave_ratio=2): self.base_freq = base_freq self.steps_per_octave = steps_per_octave self.octave_ratio = octave_ratio def pitch_to_freq(self, pitch): factor = self.pitch_to_relative_freq(pitch) return factor * self.base_freq def freq_to_pitch(self, freq): rel_freq = freq / self.base_freq if self.octave_ratio == 2: p = np.log2(rel_freq) else: p = np.log(rel_freq) / np.log(2) return p * self.steps_per_octave def pitch_to_relative_freq(self, pitch): return pow(self.octave_ratio, pitch / self.steps_per_octave) class PitchQuantizer(): def __init__(self, tuning, bin_division=1): self.tuning = tuning self.b
in_division = bin_division def quantize(self, freqs): """ Quantizes frequencies to nearest pitch bins (with optional division of bins). """ retu
rn np.round(self.tuning.freq_to_pitch(freqs) * self.bin_division) / self.bin_division
qtekfun/htcDesire820Kernel
external/chromium_org/content/test/gpu/gpu_tests/webgl_conformance_expectations.py
Python
gpl-2.0
9,671
0.009616
# Copyright (c) 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from telemetry.page import test_expectations # Valid expectation conditions are: # # Operating systems: # win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion, # linux, chromeos, android # # GPU vendors: # amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm, # vivante # # Specific GPUs can be listed as a tuple with vendor name and device ID. # Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604') # Device IDs must be paired with a GPU vendor. class WebGLConformanceExpectations(test_expectations.TestExpectations): def SetExpectations(self): # Sample Usage: # self.Fail('gl-enable-vertex-attrib.html', # ['mac', 'amd', ('nvidia', 0x1234)], bug=123) # Fails everywhere. self.Skip('conformance/glsl/misc/large-loop-compile.html', bug=322764) self.Skip('conformance/textures/texture-size-limit.html', bug=322789) # Windows failures. self.Fail('conformance/ogles/GL/atan/atan_001_to_008.html', ['win'], bug=322794) self.Fail('conformance/ogles/GL/atan/atan_009_to_012.html', ['win'], bug=322794) self.Skip('conformance/ogles/GL/control_flow/control_flow_001_to_008.html', ['win'], bug=322795) # Windows/Intel failures self.Fail('conformance/textures/texture-size.html', ['win', 'intel'], bug=121139) self.Fail('conformance/rendering/gl-scissor-test.html', ['win', 'intel'], bug=314997) # Windows/AMD failures self.Fail('conformance/rendering/more-than-65536-indices.html', ['win', 'amd'], bug=314997) # Windows 7/Intel failures self.Fail('conformance/context/context-lost-restored.html', ['win7', 'intel']) self.Fail('conformance/context/premultiplyalpha-test.html', ['win7', 'intel']) self.Fail('conformance/extensions/oes-texture-float-with-image-data.html', ['win7', 'intel']) self.Fail('conformance/extensions/oes-texture-float.html', ['win7', 'intel']) self.Fail('conformance/limits/gl-min-attribs.html', ['win7', 'intel']) self.Fail('conformance/limits/gl-max-texture-dimensions.html', ['win7', 'intel']) self.Fail('conformance/limits/gl-min-textures.html', ['win7', 'intel']) self.Fail('conformance/limits/gl-min-uniforms.html', ['win7', 'intel']) self.Fail('conformance/rendering/gl-clear.html', ['win7', 'intel']) self.Fail('conformance/textures/copy-tex-image-and-sub-image-2d.html', ['win7', 'intel']) self.Fail('conformance/textures/gl-teximage.html', ['win7', 'intel']) self.Fail('conformance/textures/tex-image-and-sub-image-2d-with-array-buffer-view.html', ['win7', 'intel']) self.Fail('conformance/textures/tex-image-and-sub-image-2d-with-image-data.html', ['win7', 'intel']) self.Fail('conformance/textures/tex-image-and-sub-image-2d-with-image-data-rgb565.html', ['win7', 'intel']) self.Fail('conformance/textures/tex-image-and-sub-image-2d-with-image-data-rgba4444.html', ['win7', 'intel']) self.Fail('conformance/textures/tex-image-and-sub-image-2d-with-image-data-rgba5551.html', ['win7', 'intel']) self.Fail('conformance/textures/tex-image-with-format-and-type.html', ['win7', 'intel']) self.Fail('conformance/textures/tex-sub-image-2d.html', ['win7', 'intel']) self.Fail('conformance/textures/texparameter-test.html', ['win7', 'intel']) self.Fail('conformance/textures/texture-active-bind-2.html', ['win7', 'intel']) self.Fail('conformance/textures/texture-active-bind.html', ['win7', 'intel']) self.Fail('conformance/textures/texture-complete.html', ['win7', 'intel']) self.Fail('conformance/textures/texture-formats-test.html', ['win7', 'intel']) self.Fail('conformance/textures/texture-mips.html', ['win7', 'intel']) self.Fail('conformance/textures/texture-npot.html', ['win7', 'intel']) self.Fail('conformance/textures/texture-size-cube-maps.html', ['win7', 'intel']) self.Fail('conformance/context/context-attribute-preserve-drawing-buffer.html', ['win7', 'intel'], bug=322770) # Mac failures. self.Fail('conformance/glsl/misc/shaders-with-varyings.html', ['mac'], bug=322760) self.Fail('conformance/context/context-attribute-preserve-drawing-buffer.html', ['mac'], bug=322770) self.Skip('conformance/ogles/GL/control_flow/control_flow_001_to_008.html', ['mac'], bug=322795) # Mac/Intel failures self.Fail('conformance/rendering/gl-scissor-test.html', ['mac', 'intel'], bug=314997) # The following two tests hang the WindowServer. self.Skip('conformance/canvas/drawingbuffer-static-canvas-test.html', ['mac', 'intel'], bug=303915) self.Skip('conformance/canvas/drawingbuffer-test.html', ['mac', 'intel'], bug=303915) # The following three tests only fail. # Radar 13499677 self.Fail( 'conformance/glsl/functions/glsl-function-smoothstep-gentype.html', ['mac', 'intel'], bug=225642) # Radar 13499466 self.Fail('conformance/limits/gl-max-texture-dimensions.html', ['mac', 'intel'], bug=225642) # Radar 13499623 self.Fail('conformance/textures/texture-size.html', ['mac', 'intel'], bug=225642) self.Skip('conformance/ogles/GL/control_flow/control_flow_009_to_010.html', ['mac', 'intel'], bug=322795) self.Fail('conformance/ogles/GL/operators/operators_009_to_016.html', ['mac', 'intel'], bug=322795) # Mac/Intel failures on 10.7 self.Skip('conformance/glsl/functions/glsl-function-asin.html', ['lion', 'intel']) self.Skip('conformance/glsl/functions/glsl-function-dot.html', ['lion', 'intel']) self.Skip('conformance/glsl/functions/glsl-function-faceforward.html', ['lion', 'intel']) self.Skip('conformance/glsl/functions/glsl-function-length.html', ['lion', 'intel']) self.Skip('conformance/glsl/functions/glsl-function-normalize.html', ['lion', 'intel']) self.Skip('conformance/glsl/functions/glsl-function-reflect.html', ['lion', 'intel']) self.Skip( 'conformance/glsl/functions/glsl-function-smoothstep-gentype.html', ['lion', 'intel']) self.Skip('conformance/limits/gl-max-texture-dimensions.html', ['lion', 'intel']) self.Skip('conformance/rendering/line-loop-tri-fan.html', ['lion', 'intel']) self.Skip('conformance/ogles/GL/control_flow/control_flow_009_to_010.html', ['lion'], bug=322795) self.Skip('conformance/ogles/GL/dot/dot_001_to_006.html', ['lion', 'intel'], bug=323736) self.Skip('conformance/ogles/GL/faceforward/faceforward_001_to_006.html', ['lion', 'intel'], bug=323736) self.Skip('conformance/ogles/GL/length/length_001_to_006.html', ['lion', 'intel'], bug=323736) self.Skip('conformance/ogles/GL/normalize/normalize_001_to_006.html', ['lion', 'intel'], bug=323736) self.Skip('conformance/ogles/GL/reflect/reflect_001_to_006.html', ['lion', 'intel'], bug=323736) self.Skip('conformance/ogles/GL
/refract/refract_001_to_006.html', ['lion', 'intel'], bug=323736) self.Skip('conformance/ogles/GL/tan/tan_001_to_006.html', ['lion', 'intel'], bug=323736) # Mac/ATI failures self.Skip('conformance/extensions/oes-texture-float-with-image-data.html', ['mac', 'amd'], bug=308328) self.Skip('conformance/rendering/gl-clear.html', ['mac', 'amd'], bug=308328)
self.Skip('conformance/textures/tex-image-and-sub-image-2d-with-array-buffer-view.html', ['mac', 'amd'], bug=308328) self.Skip('conformance/textures/tex-image-and-sub-image-2d-with-image-data.html', ['mac', 'amd'], bug=308328) self.Skip('conformance/textures/tex-image-and-sub-image-2d-with-image-data-rgb565.html', ['mac', 'amd'], bug=308328) self.Skip
masom/Puck
client/pixie/lib/setup_plugin.py
Python
lgpl-3.0
24,567
0.001628
''' Pixie: FreeBSD virtualization guest configuration client Copyright (C) 2011 The Hotel Communication Network inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import threading, Queue as queue, time, subprocess, shlex, datetime import urllib, tarfile, os, shutil, tempfile, pwd import cherrypy from cherrypy.process import wspbus, plugins from pixie.lib.jails import EzJail from pixie.lib.interfaces import NetInterfaces class SetupTask(object): def __init__(self, puck, queue): self.queue = queue self._puck = puck self.vm = puck.getVM() def run(self): raise NotImplementedError("`run` must be defined.") def log(self, msg): now = datetime.datetime.now() cherrypy.log("%s %s" % (self.__class__.__name__, msg)) tpl = "%s\t%s\t%s" date_format = "%Y-%m-%d %H:%M:%S" cls = self.__class__.__name__ self.queue.put(tpl % (now.strftime(date_format), cls, msg)) class RcReader(object): def _has_line(self, lines, line_start): for line in lines: if line.startswith(line_start): return True return False def _get_rc_content(self): rc = None try: with open('/etc/rc.conf', 'r') as f: rc = f.readlines() except IOError: pass if not rc: raise RuntimeError("File `/etc/rc.conf` is empty!") return rc class EZJailTask(SetupTask, RcReader): ''' Setups ezjail in the virtual machine. ''' def run(self): try: self.log("Enabling EZJail.") self._enable_ezjail() self.log("Installing EZJail") EzJail().install(cherrypy.config.get('setup_plugin.ftp_mirror')) except (IOError, OSError) as e: self.log("Error while installing ezjail: %s" % e) return False return True def _enable_ezjail(self): rc = self._get_rc_content() if self._has_line(rc, 'ezjail_enable'): self.log("EZJail is already enabled.") return self.log("Adding to rc: `%s`" % 'ezjail_enable="YES"') '''if we get here, it means ezjail_enable is not in rc.conf''' with open('/etc/rc.conf', 'a') as f: f.write("ezjail_enable=\"YES\"\n") class SSHTask(SetupTask): '''Create the base user `puck` and add the authorized ssh keys''' def run(self): self._setup_ssh() return True def _setup_ssh(self): if not self.vm.keys: self.log("No keys to install."); return True #@TODO Could be moved to config values instead of hardcoded. user = 'puck' try: pwd.getpwnam(user) except KeyError as e: cmd = 'pw user add %s -m -G wheel' % user self.log("Adding user. Executing `%s`" % cmd) subprocess.Popen(shlex.split(str(cmd))).wait() user_pwd = pwd.getpwnam(user) path = '/home/%s/.ssh' % user authorized_file = "%s/authorized_keys" % path if not os.path.exists(path): os.mkdir(path) os.chown(path, user_pwd.pw_uid, user_pwd.pw_gid) with open(authorized_file, 'a') as f: for key in self.vm.keys: self.log("Writing key `%s`" % key) f.write('%s\n' % self.vm.keys[key]['key']) os.chmod(authorized_file, 0400) os.chown(authorized_file, user_pwd.pw_uid, user_pwd.pw_gid) os.chmod(path, 0700) os.chmod('/home/%s' % user, 0700) class FirewallSetupTask(SetupTask, RcReader): def run(self): # TODO Move this to a congfiguration value from puck. Not high priority pf_conf = '/etc/pf.rules.conf' rc_conf = '/etc/rc.conf' self.setup_rc(rc_conf, pf_conf) self.setup_pf_conf(pf_conf) self.launch_pf() return True def launch_pf(self): # Stop it in case it commands = ['/etc/rc.d/pf stop', '/etc/rc.d/pf start'] for command in commands: self.log("Executing: `%s`" % command) subprocess.Popen(shlex.split(str(command))).wait() def setup_pf_conf(self, pf_conf): rules = self.vm.firewall if not rules: self.log("No firewall to write.") return False self.log("Writing firewall rules at `%s`." % pf_conf) with open(pf_conf, 'w') as f: f.write(rules.replace('\r\n', '\n').replace('\r', '\n')) f.flush() def setup_rc(self, rc_conf, pf_conf): #TODO Move this to a configuration value. Not high priority. rc_items = { 'pf_enable' : 'YES', 'pf_rules' : pf_conf, 'pflog_enable' : 'YES', 'gateway_enable' : 'YES' } rc_present = [] rc = self._get_rc_content() for line in rc: for k in rc_items: if line.startswith(k): rc_present.append(k) break missing = set(rc_items.keys()) - set(rc_present) tpl = 'Adding to rc: `%s="%s"`' [self.log(tpl % (k, rc_items[k])) for k in missing] template = '%s="%s"\n' with open(rc_conf, 'a') as f: [f.write(template % (k,rc_items[k])) for k in missing] f.flush() class InterfacesSetupTask(SetupTask, RcReader): '''Configures network interfaces for the jails.''' def run(self): (netaddrs, missing) = self._get_missing_netaddrs() self._add_missing_netaddrs(missing) self._add_missing_rc(netaddrs) return True def _add_missing_rc(self, netaddrs): rc_addresses = [] rc = self._get_rc_content() alias_count = self._calculate_alias_count(rc_addresses, rc) with open('/etc/rc.conf', 'a') as f: for netaddr in netaddrs: if self._add_rc_ip(rc_addresses, f, alias_count, netaddr): alias_count += 1 def _add_missing_netaddrs(self, netaddrs): for netaddr in netaddrs: self.log("Registering new ip address `%s`" % netaddr['ip']) self._add_addr(netaddr['ip'], netaddr['netmask']) def _get_missing_netaddrs(self): interfaces = NetInterfaces.getInterfaces() missing = [] netaddrs = [] for jail in self.vm.jails: netaddr = {'ip': jail.ip, 'netmask': jail.netmask} netaddrs.append(netaddr) if not interfaces.has_key(jail.ip): missing.append(netaddr) return (netaddrs, missing) def _calculate_alias_count(self, addresses, rc): alias_count = 0 for line in rc: if line.startswith('ifconfig_%s_alias' % self.vm.interface): alias_count += 1 addresses.append(line) return alias_count def _add_addr(self, ip, netmask): cmd = "ifconfig %s alias %s netmask %s"
command = cmd % (self.vm.interface, ip, netmask) self.log('executing: `%s`' % command) subprocess.Popen(shlex.split(str(command))).wait() def _add_rc_ip(self, rc_addresses, file, alias_count, netaddr): for item in rc_addresses: if item.find(netaddr['ip']) > 0: self.log("rc already knows about ip `%s`" % netaddr['ip']) return False
self.log("Registering new rc value for ip `%s`" % netaddr['ip']) template = 'ifconfig_%s_alias%s="inet %s netmask %s"' line = "%s\n" % template va