repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
prefix
stringlengths
0
8.16k
middle
stringlengths
3
512
suffix
stringlengths
0
8.17k
MichaelGrupp/evo
test/test_filters.py
Python
gpl-3.0
6,476
0
#!/usr/bin/env python """ unit test for filters module author: Michael Grupp This file is part of evo (github.com/MichaelGrupp/evo). evo is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. evo is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with evo. If not, see <http://www.gnu.org/licenses/>. """ import math import unittest import numpy as np from evo.core import filters from evo.core import lie_algebra as lie # TODO: clean these up and use proper fixtures. POSES_1 = [ lie.se3(np.eye(3), np.array([0, 0, 0])), lie.se3(np.eye(3), np.array([0, 0, 0.5])), lie.se3(np.eye(3), np.array([0, 0, 0])), lie.se3(np.eye(3), np.array([0, 0, 1])) ] POSES_2 = [ lie.se3(np.eye(3), np.array([0, 0, 0])), lie.se3(np.eye(3), np.array([0, 0, 0.5])), lie.se3(np.eye(3), np.array([0, 0, 0.99])), lie.se3(np.eye(3), np.array([0, 0, 1.0])) ] POSES_3 = [ lie.se3(np.eye(3), np.array([0, 0, 0.0])), lie.se3(np.eye(3), np.array([0, 0, 0.9])), lie.se3(np.eye(3), np.array([0, 0, 0.99])), lie.se3(np.eye(3), np.array([0, 0, 0.999])), lie.se3(np.eye(3), np.array([0, 0, 0.9999])), lie.se3(np.eye(3), np.array([0, 0, 0.99999])), lie.se3(np.eye(3), np.array([0, 0, 0.999999])), lie.se3(np.eye(3), np.array([0, 0, 0.9999999])) ] POSES_4 = [ lie.se3(np.eye(3), np.array([0, 0, 0])), lie.se3(np.eye(3), np.array([0, 0, 1])), lie.se3(np.eye(3), np.array([0, 0, 1])), lie.se3(np.eye(3), np.array([0, 0, 1])) ] class TestFilterPairsByPath(unittest.TestCase): def test_poses1_all_pairs(self): target_path = 1.0 tol = 0.0 id_pairs = filters.filter_pairs_by_path(POSES_1, target_path, tol, all_pairs=True) self.assertEqual(id_pairs, [(0, 2), (2, 3)]) def test_poses1_wrong_target(self): target_path = 2.5 tol = 0.0 id_pairs = filters.filter_pairs_by_path(POSES_1, target_path, tol, all_pairs=True) self.assertEqual(id_pairs, []) def test_poses2_all_pairs_low_tolerance(self): target_path = 1.0 tol = 0.001 id_pairs = filters.filter_pairs_by_path(POSES_2, target_path, tol, all_pairs=True) self.assertEqual(id_pairs, [(0, 3)]) def test_convergence_all_pairs(self): target_path = 1.0 tol = 0.2 id_pairs = filters.filter_pairs_by_path(POSES_3, target_path, tol, all_pairs=True) self.assertEqual(id_pairs, [(0, 7)]) axis = np.array([1, 0, 0]) POSES_5 = [ lie.se3(lie.so3_exp(axis * 0.0), np.array([0, 0, 0])), lie.se3(lie.so3_exp(axis * math.pi), np.array([0, 0, 0])), lie.se3(lie.so3_exp(axis * 0.0), np.array([0, 0, 0])), lie.se3(lie.so3_exp(axis * math.pi / 3), np.array([0, 0, 0])), lie.se3(lie.so3_exp(axis * math.pi), np.array([0, 0, 0])) ] TRANSFORM = lie.random_se3() POSES_5_TRANSFORMED = [TRANSFORM.dot(p) for p in POSES_5] axis = np.array([1, 0, 0]) p0 = lie.se3(lie.so3_exp(axis * 0.0), np.array([0, 0, 0])) pd = lie.se3(lie.so3_exp(axis * (math.pi / 3.)), np.array([1, 2, 3])) p1 = np.dot(p0, pd) p2 = np.dot(p1, pd) p3 = np.dot(p2, pd) POSES_6 = [p0, p1, p2, p3, p3] POSES_6_TRANSFORMED = [TRANSFORM.dot(p) for p in POSES_6] class TestFilterPairsByAngle(unittest.TestCase): def test_poses5(self): tol = 0.001 expected_result = [(0, 1), (1, 2), (2, 4)] # Result should be unaffected by global transformation. for poses in (POSES_5, POSES_5_TRANSFORMED): target_angle = math.pi - tol id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol, all_pairs=False) self.assertEqual(id_pairs, expected_result) # Check for same result when using degrees: target_angle = np.rad2deg(target_angle) id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol, all_pairs=False, degrees=True) self.assertEqual(id_pairs, expected_result) def test_poses5_all_pairs(self): tol = 0.01 expected_result = [(0, 1), (0, 4), (1, 2), (2, 4)] # Result should be unaffected by global transformation. for poses in (POSES_5, POSES_5_TRANSFORMED): target_angle = math.pi id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol, all_pairs=True) self.assertEqual(id_pairs, expected_result) # Check for same result when using degrees: target_angle = np.rad2deg(target_angle) id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol, all_pairs=True, degrees=True) self.assertEqual(id_pairs, expected_result) def test_poses6(self): tol = 0.001 target_angle = math.pi - tol expected_result = [(0, 3)] # Result should be unaffected by global transformation. for poses in (POSES_6, POSES_6_TRANSFORMED): id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol, all_pairs=False) self.assertEqual(id_pairs, expected_result) def test_poses6_all_pairs(self): target_angle = ma
th.pi tol = 0.001 expected_result = [(0, 3), (0, 4)] # Result should be unaffected by global transformation. for poses in (POSES_6, POSES_6_TRANSFORMED): id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol, all_pairs=True) self.assertEqual(id_pairs, expected_result) if __name__ == '__main__':
unittest.main(verbosity=2)
kevin-zhaoshuai/zun
zun/db/sqlalchemy/alembic/versions/9fe371393a24_create_table_container.py
Python
apache-2.0
1,820
0.001648
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """create_table_container Revision I
D: 9fe371393a24 Revises: a9a92eebd9a8 Create Date: 2016-06-12 16:09:35.686539 """ # revision identifiers, used by Alembic. revision =
'9fe371393a24' down_revision = 'a9a92eebd9a8' branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa import zun def upgrade(): op.create_table( 'container', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('project_id', sa.String(length=255), nullable=True), sa.Column('user_id', sa.String(length=255), nullable=True), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('image', sa.String(length=255), nullable=True), sa.Column('command', sa.String(length=255), nullable=True), sa.Column('status', sa.String(length=20), nullable=True), sa.Column('environment', zun.db.sqlalchemy.models.JSONEncodedDict(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('uuid', name='uniq_container0uuid') )
dwillis/socialcongress
tracker/utils.py
Python
unlicense
6,562
0.006248
import datetime from tracker.models import Member, Report from django.template.defaultfilters import slugify import csv import urllib import simplejson as json from dateutil.parser import * import time def update_twitter(branch='house', official=True, batch=1): if official: screen_names = [x.official_twitter_name for x in Member.objects.filter(branch=branch, official_twitter_name__isnull=False).order_by('last_name')] else: screen_names = [x.campaign_twitter_name for x in Member.objects.filter(branch=branch, campaign_twitter_name__isnull=False).order_by('last_name')] if batch == 1: screen_names = screen_names[:100] elif batch =
= 2: screen_names = screen_names[100:200] elif batch == 3:
screen_names = screen_names[200:300] elif batch == 4: screen_names = screen_names[300:400] elif batch == 5: screen_names = screen_names[400:] url = "http://api.twitter.com/1/users/lookup.json?screen_name=%s" % ",".join(screen_names) response = urllib.urlopen(url).read() results = json.loads(response) for result in results: if official: member = Member.objects.get(official_twitter_name__iexact=result['screen_name']) report, created = Report.objects.get_or_create(member=member, date=datetime.date.today()) report.official_twitter_followers=result['followers_count'] report.official_twitter_updates=result['statuses_count'] report.save() else: member = Member.objects.get(campaign_twitter_name__iexact=result['screen_name']) report, created = Report.objects.get_or_create(member=member, date=datetime.date.today()) report.campaign_twitter_followers=result['followers_count'] report.campaign_twitter_updates=result['statuses_count'] report.save() def update_facebook(members, token): for member in members: print member report, created = Report.objects.get_or_create(member=member, date=datetime.date.today()) params = {} params['access_token'] = token batch = [{'method': 'GET', 'relative_url': str(member.official_facebook_name)}, {'method': 'GET', 'relative_url': str(member.campaign_facebook_name)}] params['batch'] = [x for x in batch if x['relative_url'] != ''] encoded_params = urllib.urlencode(params) f = urllib.urlopen("https://graph.facebook.com", encoded_params).read() results = json.loads(f) for result in results: try: body = json.loads(result['body']) except: continue if body == False: continue else: try: if str(member.official_facebook_name.lower()) == body['username'].lower(): report.official_facebook_likes= body['likes'] elif str(member.campaign_facebook_name.lower()) == body['username'].lower(): report.campaign_facebook_likes= body['likes'] except: try: if member.official_facebook_name == body['id']: report.official_facebook_likes= body['likes'] elif member.campaign_facebook_name == body['id']: report.campaign_facebook_likes= body['likes'] except KeyError: print "No match found for %s" % member report.save() time.sleep(3) def update_member(member): official_likes = member.facebook_likes(member.official_facebook_name, token) campaign_likes = member.facebook_likes(member.campaign_facebook_name, token) report, created = Report.objects.get_or_create(member=member, date=datetime.date.today(), official_twitter_followers=official_twitter, official_facebook_likes=official_likes, campaign_facebook_likes=campaign_likes, campaign_twitter_followers=campaign_twitter) report.save() def load_chamber(chamber): if chamber == 'senate': f = open("senate.csv","r") elif chamber == 'house': f = open("house.csv","r") else: raise("Must be house or senate") rows = csv.DictReader(f, delimiter=',') for row in rows: member, created = Member.objects.get_or_create(last_name=row['last'], first_name=row['first'], slug=slugify(row['first']+' '+row['last']), party=row['party'], branch=chamber, state=row['state'], district=row['district']) if row['username'] != '': member.official_facebook_name = row['username'] member.save() elif row['username_campaign'] != '': member.campaign_facebook_name = row['username_campaign'] member.save() if row['twitter'] != '': member.official_twitter_name = row['twitter'] member.save() def update_from_al(): f = open("congress_upload_9_14_11.csv","r") rows = csv.DictReader(f, delimiter=',') for row in rows: print row['Name'] member, created = Member.objects.get_or_create(bioguide_id=row['bioguide']) member.date_of_birth = parse(str(row['dob'])).date() member.race = row['race'] member.gender = row['gender'] member.service = int(row['service']) member.status = row['status'][0] member.youtube_name = row['youtube_name'] member.margin_2010 = float(row['margin_2010']) member.social_networks = int(row['social_networks']) if row['facebook_10'] == '': member.facebook_10 = None else: member.facebook_10 = int(row['facebook_10']) member.facebook_status = int(row['facebook_status']) if row['twitter_10'] == '': member.twitter_10 = None else: member.twitter_10 = int(row['twitter_10']) member.twitter_status = int(row['twitter_status']) if row['official_twitter_name'] == '': member.official_twitter_name = None else: member.official_twitter_name = row['official_twitter_name'] if row['campaign_twitter_name'] == '': member.campaign_twitter_name = None else: member.campaign_twitter_name = row['campaign_twitter_name'] if row['index_10'] == None: member.index_10 = None else: member.index_10 = int(row['index_10']) member.save()
autocorr/besl
besl/clump_match.py
Python
gpl-3.0
37,622
0.000983
""" =============== Clump Match All =============== Merge catalogs based on clump label masks """ import os import numpy as _np import pandas as _pd import catalog from .image import sample_bgps_img def clump_match_water(bgps=[], out_filen='bgps_maser', verbose=False): """ Match maser catalog observations to the BGPS. Includes BGPS GBT, Red MSX, Arcetri, MMB, and HOPS. Paramters --------- bgps : pandas.DataFrame, default [] BGPS catalog to match to, defaults to read vanilla catalog out_filen : string, default 'bgps_maser' Name of output catalog, comma seperated verbose : boolean, default False Print clump and number of matches Returns ------- bgps : pd.DataFrame """ # read in catalogs gbt_h2o = catalog.read_gbt_h2o() rms_h2o = catalog.read_rms_h2o() arc_val = catalog.read_arcetri_valdettaro() hops = catalog.read_hops() if len(bgps) == 0: bgps = catalog.read_bgps() # add new columns new_cols = ['h2o_gbt_f', 'h2o_gbt_n', 'h2o_arc_f', 'h2o_arc_n', 'h2o_hops_f', 'h2o_rms_f', 'h2o_rms_n'] gbt_cols = gbt_h2o.columns.drop(labels=['h2o_glon', 'h2o_glat', 'h2o_f']) for col in new_cols: bgps[col] = _np.nan for col in gbt_cols: bgps[col] = _np.nan # make haystacks gbt_h2o_hs = gbt_h2o[['h2o_glon', 'h2o_glat']].values # galactic rms_h2o_hs = rms_h2o[['_Glon_y', '_Glat_y']].values # galactic arc_val_hs = arc_val[['_Glon', '_Glat']].values # galactic hops_hs = hops[['lWeight_deg', 'bWeight_deg']].values # galactic # loop through clumps for cnum in bgps['cnum']: cnum_select = bgps.cnum == cnum c_index = _np.argwhere(cnum_select)[0][0] glat = bgps[cnum_select].glat_cen.values[0] glon = bgps[cnum_select].glon_cen.values[0] c_ra = bgps[cnum_select].ra.values[0] c_dec = bgps[cnum_select].dec.values[0] # match hops if ((glat < 0.5) & (glat > -0.5) & ((glon > 290) | (glon < 30))): hop_match_list = catalog.clump_match(hops_hs, cnum, coord_type='gal') bgps['h2o_hops_f'][cnum_select] = len(hop_match_list) # match bgps gbt gbt_match_list = catalog.clump_match(gbt_h2o_hs, cnum, coord_type='gal') h2o_gbt_num_detects = _np.sum(gbt_h2o.h2o_f.ix[gbt_match_list]) bgps['h2o_gbt_n'][cnum_select] = len(gbt_match_list) bgps['h2o_gbt_f'][cnum_select] = h2o_gbt_num_detects if h2o_gbt_num_detects > 0: max_index = gbt_h2o['h2o_tpk'].ix[gbt_match_list].argmax() bgps.ix[c_index, gbt_cols] = \ gbt_h2o.ix[gbt_match_list[max_index]] # match rms h2o rms_match_list = catalog.clump_match(rms_h2o_hs, cnum, coord_type='gal') bgps['h2o_rms_n'][cnum_select] = len(rms_match_list) bgps['h2o_rms_f'][cnum_select] = \ _np.sum(rms_h2o.h2o_f.ix[rms_match_list]) # match arcetri arc_match_list = catalog.clump_match(arc_val_hs, cnum, coord_type='gal') bgps['h2o_arc_n'][cnum_select] = len(arc_match_list) bgps['h2o_arc_f'][cnum_select] = \ _np.sum(arc_val.h2o_f.ix[arc_match_list]) if verbose: print '-- clump {:>4d}'.format(cnum) bgps['h2o_f'] = _np.nan bgps['h2o_f'][(bgps.h2o_gbt_f > 0) | (bgps.h2o_arc_f > 0) | (bgps.h2o_rms_f > 0) | (bgps.h2o_hops_f > 0)] = 1 bgps['h2o_f'][(bgps.h2o_f != 1) & ((bgps.h2o_gbt_f == 0) & (bgps.h2o_gbt_n > 0))] = 0 bgps.to_csv(os.getcwd() + '/' + out_filen + '.csv', index=False) print '-- Maser catalog file written to {}.csv'.format(out_filen) return bgps def clump_match_hii(bgps=[], out_filen='bgps_hii', verbose=False): """ Match HII and UCHII catalog observations to the BGPS. Include CORNISH and HRDS. Paramters --------- bgps : pandas.DataFrame, default [] BGPS catalog to match to, defaults to read vanilla catalog out_filen : string, default 'bgps_hii'
Name of output catalog, comma seperated verbose : boolean, default False Print clump and number of matches Returns ------- bgps : pd.DataFrame """ # read in catalogs corn = catalog.read_cornish(exten='hii')
if len(bgps) == 0: bgps = catalog.read_bgps() # add new columns new_cols = ['corn_n'] for col in new_cols: bgps[col] = _np.nan # make haystacks corn_hs = corn[['glon', 'glat']].values # galactic # loop through clumps for cnum in bgps['cnum']: cnum_select = bgps.cnum == cnum glat = bgps[cnum_select].glat_cen.values[0] glon = bgps[cnum_select].glon_cen.values[0] c_ra = bgps[cnum_select].ra.values[0] c_dec = bgps[cnum_select].dec.values[0] if verbose: print '-- clump {:>4d}'.format(cnum) # match cornish if (glat < 1.0) & (glat > -1.0) & (glon > 9.95) & (glon < 65.55): corn_match_list = catalog.clump_match(corn_hs, cnum, coord_type='gal') bgps['corn_n'][cnum_select] = len(corn_match_list) bgps.to_csv(os.getcwd() + '/' + out_filen + '.csv', index=False) print '-- Hii catalog file written to {}.csv'.format(out_filen) return bgps def clump_match_ir(bgps=[], out_filen='bgps_ir', verbose=False): """ Match IR point source catalog observations to the BGPS. Includes EGO, RMS, and Robitaille. Paramters --------- bgps : pandas.DataFrame, default [] BGPS catalog to match to, defaults to read vanilla catalog out_filen : string, default 'bgps_ir' Name of output catalog, comma seperated verbose : boolean, default False Print clump and number of matches Returns ------- bgps : pd.DataFrame """ # read in catalogs ego = catalog.read_ego() robit = catalog.read_robitaille() msx = catalog.read_msx() if len(bgps) == 0: bgps = catalog.read_bgps() # add new columns new_cols = ['ego_n', 'msx_n', 'robit_n'] for col in new_cols: bgps[col] = _np.nan # make haystacks ego_hs = ego[['_Glon', '_Glat']].values # galactic robit_hs = robit[['_Glon', '_Glat']].values # galactic msx_hs = msx[['ra', 'dec']].values # equatorial # loop through clumps for cnum in bgps['cnum']: cnum_select = bgps.cnum == cnum glat = bgps[cnum_select].glat_cen.values[0] glon = bgps[cnum_select].glon_cen.values[0] if verbose: print '-- clump {:>4d}'.format(cnum) # match egos if (glat < 1.05) & (glat > -1.05) & (glon < 65): ego_match_list = catalog.clump_match(ego_hs, cnum, coord_type='gal') bgps['ego_n'][cnum_select] = len(ego_match_list) # match robit if (glat < 65): robit_agb_match_list = catalog.clump_match(robit_hs, cnum, coord_type='gal') robit_yso_match_list = catalog.clump_match(robit_hs, cnum, coord_type='gal') bgps['robit_agb_n'][cnum_select] = len(robit_agb_match_list) bgps['robit_yso_n'][cnum_select] = len(robit_yso_match_list) # match rms msx if (glat < 5) & (glat > -5) & (glon > 10) & (glat < 220): msx_match_list = catalog.clump_match(msx_hs, cnum, coord_type='eq') bgps['msx_n'][cnum_select] = len(msx_match_list) # TODO add red wise bgps['ir_f'] = -9 bgps['ir_f'][(bgps.ego_n > 0) | (bgps.msx_n > 0) | (bgps.robit_n > 0)] = 1 bgps['ir_f'][(bgps.ego_n == 0) & (bgps.msx_n == 0) & (bgps.robit_n == 0)] = 0 bgps.to_csv(os.getcwd() + '/' + out_filen + '.csv', index=False) print '-- IR catalog file written to {}.csv'.format(out_filen) return bgps def clump_match_molcat(bgps=[], out_filen='bgps_molcat', verbose=False): """ Match the BGPS HCO+/N2H+ molecular line survey observations to the BGPS. Citation: Shirley et al. (2013). Paramters --------- bgps :
emonty/pyos
pyos/clouddatabases.py
Python
apache-2.0
30,566
0.003991
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c)2012 Rackspace US, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from functools import wraps import six from pyos.client import BaseClient import pyos.exceptions as exc from pyos.manager import BaseManager from pyos.resource import BaseResource import pyos.utils as utils def assure_instance(fnc): @wraps(fnc) def _wrapped(self, instance, *args, **kwargs): if not isinstance(instance, CloudDatabaseInstance): # Must be the ID instance = self._manager.get(instance) return fnc(self, instance, *args, **kwargs) return _wrapped class CloudDatabaseVolume(object): instance = None size = None used = None def __init__(self, instance, info): self.instance = instance for key, val in info.items(): setattr(self, key, val) def resize(self, size): """ Resize the volume to the specified size (in GB). """ self.instance.resize_volume(size) self.size = size def get(self, att): """ For compatibility with regular resource objects. """ return getattr(self, att) class CloudDatabaseManager(BaseManager): """ This class manages communication with Cloud Database instances. """ def get(self, item): """ This additional code is necessary to properly return the 'volume' attribute of the instance as a CloudDatabaseVolume object instead of a raw dict. """ resource = super(CloudDatabaseManager, self).get(item) resource.volume = CloudDatabaseVolume(resource, resource.volume) return resource def _create_body(self, name, flavor=None, volume=None, databases=None, users=None): """ Used to create the dict required to create a Cloud Database instance. """ if flavor is None: flavor = 1 flavor_ref = self.api._get_flavor_ref(flavor) if volume is None: volume = 1 if databases is None: databases = [] if users is None: users = [] body = {"instance": { "name": name, "flavorRef": flavor_ref, "volume": {"size": volume}, "databases": databases, "users": users, }} re
turn body def create_backup(self, instance, name, description=None): """ Creates a backup of the specified instance, giving it the specified name along with an optional description. """ body = {"backup": { "instance": utils.get_id(instance), "name": name,
}} if description is not None: body["backup"]["description"] = description uri = "/backups" resp, resp_body = self.api.method_post(uri, body=body) mgr = self.api._backup_manager return CloudDatabaseBackup(mgr, body.get("backup")) def restore_backup(self, backup, name, flavor, volume): """ Restores a backup to a new database instance. You must supply a backup (either the ID or a CloudDatabaseBackup object), a name for the new instance, as well as a flavor and volume size (in GB) for the instance. """ flavor_ref = self.api._get_flavor_ref(flavor) body = {"instance": { "name": name, "flavorRef": flavor_ref, "volume": {"size": volume}, "restorePoint": {"backupRef": utils.get_id(backup)}, }} uri = "/%s" % self.uri_base resp, resp_body = self.api.method_post(uri, body=body) return CloudDatabaseInstance(self, resp_body.get("instance", {})) def list_backups(self, instance=None): """ Returns a list of all backups by default, or just for a particular instance. """ return self.api._backup_manager.list(instance=instance) def _list_backups_for_instance(self, instance): """ Instance-specific backups are handled through the instance manager, not the backup manager. """ uri = "/%s/%s/backups" % (self.uri_base, utils.get_id(instance)) resp, resp_body = self.api.method_get(uri) mgr = self.api._backup_manager return [CloudDatabaseBackup(mgr, backup) for backup in resp_body.get("backups")] class CloudDatabaseDatabaseManager(BaseManager): """ This class manages communication with databases on Cloud Database instances. """ def _create_body(self, name, character_set=None, collate=None): body = {"databases": [ {"name": name, "character_set": character_set, "collate": collate, }]} return body class CloudDatabaseUserManager(BaseManager): """ This class handles operations on the users in a database on a Cloud Database instance. """ def _create_body(self, name, password, databases=None, database_names=None, host=None): db_dicts = [{"name": db} for db in database_names] body = {"users": [ {"name": name, "password": password, "databases": db_dicts, }]} if host: body["users"][0]["host"] = host return body def _get_db_names(self, dbs, strict=True): """ Accepts a single db (name or object) or a list of dbs, and returns a list of database names. If any of the supplied dbs do not exist, a NoSuchDatabase exception will be raised, unless you pass strict=False. """ dbs = utils.coerce_string_to_list(dbs) db_names = [utils.get_name(db) for db in dbs] if strict: good_dbs = self.instance.list_databases() good_names = [utils.get_name(good_db) for good_db in good_dbs] bad_names = [db_name for db_name in db_names if db_name not in good_names] if bad_names: bad = ", ".join(bad_names) raise exc.NoSuchDatabase("The following database(s) were not " "found: %s" % bad) return db_names def change_user_password(self, user, new_pass): """ Changes the password for the user to the supplied value. Returns None upon success; raises PasswordChangeFailed if the call does not complete successfully. """ return self.update(user, password=new_pass) def update(self, user, name=None, password=None, host=None): """ Allows you to change one or more of the user's username, password, or host. """ if not any((name, password, host)): raise exc.MissingDBUserParameters("You must supply at least one of " "the following: new username, new password, or new host " "specification.") if not isinstance(user, CloudDatabaseUser): # Must be the ID/name user = self.get(user) dct = {} if name and (name != user.name): dct["name"] = name if host and (host != user.host): dct["host"] = host if password: dct["password"] = password if not dct: raise exc.DBUpdateUnchanged("You must supply at least one changed " "value when updating a user.") uri = "/%s/%s" % (self.uri_base, user.na
binoculars/osf.io
osf/models/mixins.py
Python
apache-2.0
26,369
0.002237
import pytz from django.apps import apps from django.contrib.auth.models import Group from django.core.exceptions import ObjectDoesNotExist from django.db import models, transaction from django.utils import timezone from django.utils.functional import cached_property from guardian.shortcuts import assign_perm from guardian.shortcuts import get_perms from guardian.shortcuts import remove_perm from include import IncludeQuerySet from api.preprint_providers.workflows import Workflows, PUBLIC_STATES from framework.analytics import increment_user_activity_counters from framework.exceptions import PermissionsError from osf.exceptions import InvalidTriggerError from osf.models.node_relation import NodeRelation from osf.models.nodelog import NodeLog from osf.models.subject import Subject from osf.models.tag import Tag from osf.models.validators import validate_subject_hierarchy from osf.utils.fields import NonNaiveDateTimeField from osf.utils.machines import ReviewsMachine, RequestMachine from osf.utils.permissions import ADMIN from osf.utils.workflows import DefaultStates, DefaultTriggers from website.exceptions import NodeStateError from website import settings class Versioned(models.Model): """A Model mixin class that saves delta versions.""" @classmethod def _sig_pre_delete(cls, instance, *args, **kwargs): """dispatch the pre_delete method to a regular instance method. """ return instance.sig_pre_delete(*args, **kwargs) @classmethod def _sig_post_delete(cls, instance, *args, **kwargs): """dispatch the post_delete method to a regular instance method. """ return instance.sig_post_delete(*args, **kwargs) @classmethod def _sig_pre_save(cls, instance, *args, **kwargs): """dispatch the pre_save method to a regular instance method. """ return instance.sig_pre_save(*args, **kwargs) @classmethod def _sig_post_save(cls, instance, *args, **kwargs): """dispatch the post_save method to a regular instance method. """ return instance.sig_post_save(*args, **kwargs) @classmethod def connect(cls, signal): """Connect a django signal with this model.""" # List all signals you want to connect with here: from django.db.models.signals import (pre_save, post_save, pre_delete, post_delete) sig_handler = { pre_save: cls._sig_pre_save, post_save: cls._sig_post_save, pre_delete: cls._sig_pre_delete, post_delete: cls._sig_post_delete, }[signal] signal.connect(sig_handler, sender=cls) class Meta: abstract = True class Loggable(models.Model): last_logged = NonNaiveDateTimeField(db_index=True, null=True, blank=True, default=timezone.now) def add_log(self, action, params, auth, foreign_user=None, log_date=None, save=True, request=None): AbstractNode = apps.get_model('osf.AbstractNode') user = None if auth: user = auth.user elif request: user = request.user params['node'] = params.get('node') or params.get('project') or self._id original_node = self if self._id == params['node'] else AbstractNode.load(params.get('node')) log = NodeLog( action=action, user=user, foreign_user=foreign_user, params=params, node=self, original_node=original_node ) if log_date: log.date = log_date log.save() if self.logs.count() == 1: self.last_logged = log.date.replace(tzinfo=pytz.utc) else: self.last_logged = self.logs.first().date if save: self.save() if user and not self.is_collection: increment_user_activity_counters(user._primary_key, action, log.date.isoformat()) return log class Meta: abstract = True class Taggable(models.Model): tags = models.ManyToManyField('Tag', related_name='%(class)s_tagged') def update_tags(self, new_tags, auth=None, save=True, log=True, system=False): old_tags = set(self.tags.values_list('name', flat=True)) to_add = (set(new_tags) - old_tags) to_remove = (old_tags - set(new_tags)) if to_add: self.add_tags(to_add, auth=auth, save=save, log=log, system=system) if to_remove: self.remove_tags(to_remove, auth=auth, save=save) def add_tags(self, tags, auth=None, save=True, log=True, system=False): """ Optimization method for use with update_tags. Unlike add_tag, already assumes tag is not on the object. """ if not system and not auth: raise ValueError('Must provide auth if adding a non-system tag') for tag in tags: tag_instance, created = Tag.all_tags.get_or_create(name=tag, system=system) self.tags.add(tag_instance) # TODO: Logging belongs in on_tag_added hook if log: self.add_tag_log(tag_instance, auth) self.on_tag_added(tag_instance) if save: self.save() def add_tag(self, tag, auth=None, save=True, log=True, system=False): if not system and not auth: raise ValueError('Must provide auth if adding a non-system tag') if not isinstance(tag, Tag): tag_instance, created = Tag.all_tags.get_or_create(name=tag, system=system) else: tag_instance = tag if not self.tags.filter(id=tag_instance.id).exists(): self.tags.add(tag_instance) # TODO: Logging belongs in on_tag_added hook if log: self.add_tag_log(tag_instance, auth) if save: self.save() self.on_tag_added(tag_instance) return tag_i
nstance def remove_tag(self, *args, **kwargs): raise NotImplementedError('Removing tags requires that remove_tag is implemented') def add_system_tag(self, tag, save=True): if isinstance(tag, Tag) and not tag.system: raise ValueError('Non-system tag passed to add_sy
stem_tag') return self.add_tag(tag=tag, auth=None, save=save, log=False, system=True) def add_tag_log(self, *args, **kwargs): raise NotImplementedError('Logging requires that add_tag_log method is implemented') def on_tag_added(self, tag): pass class Meta: abstract = True class AddonModelMixin(models.Model): # from addons.base.apps import BaseAddonConfig settings_type = None ADDONS_AVAILABLE = sorted([config for config in apps.get_app_configs() if config.name.startswith('addons.') and config.label != 'base']) class Meta: abstract = True @classmethod def get_addon_key(cls, config): return 2 << cls.ADDONS_AVAILABLE.index(config) @property def addons(self): return self.get_addons() def get_addons(self): return filter(None, [ self.get_addon(config.short_name) for config in self.ADDONS_AVAILABLE ]) def get_oauth_addons(self): # TODO: Using hasattr is a dirty hack - we should be using issubclass(). # We can't, because importing the parent classes here causes a # circular import error. return [ addon for addon in self.get_addons() if hasattr(addon, 'oauth_provider') ] def has_addon(self, addon_name, deleted=False): return bool(self.get_addon(addon_name, deleted=deleted)) def get_addon_names(self): return [each.short_name for each in self.get_addons()] def get_or_add_addon(self, name, *args, **kwargs): addon = self.get_addon(name) if addon: return addon return self.add_addon(name, *args, **kwargs) def get_addon(self, name, deleted=False): try: settings_model = self._settings_model(name) except LookupError: return None if not settings_model: return None try: settings_obj = settings_model.objects.get(owner=self) if not
hglkrijger/WALinuxAgent
tests/common/osutil/test_default.py
Python
apache-2.0
36,933
0.002626
# Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import socket import glob import mock import traceback import azurelinuxagent.common.osutil.default as osutil import azurelinuxagent.common.utils.shellutil as shellutil import azurelinuxagent.common.utils.textutil as textutil from azurelinuxagent.common.exception import OSUtilError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil from tests.tools import * actual_get_proc_net_route = 'azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_net_route' def fake_is_loopback(_, iface): return iface.startswith('lo') def running_under_travis(): return 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true' class TestOSUtil(AgentTestCase): def test_restart(self): # setup retries = 3 ifname = 'dummy' with patch.object(shellutil, "run") as run_patch: run_patch.return_value = 1 # execute osutil.DefaultOSUtil.restart_if(osutil.DefaultOSUtil(), ifname=ifname, retries=retries, wait=0) # assert self.assertEqual(run_patch.call_count, retries) self.assertEqual(run_patch.call_args_list[0][0][0], 'ifdown {0} && ifup {0}'.format(ifname)) def test_get_dvd_device_success(self): with patch.object(os, 'listdir', return_value=['cpu', 'cdrom0']): osutil.DefaultOSUtil().get_dvd_device() def test_get_dvd_device_failure(self): with patch.object(os, 'listdir', return_value=['cpu', 'notmatching']): try: osutil.DefaultOSUtil().get_dvd_device() self.fail('OSUtilError was not raised') except OSUtilError as ose: self.assertTrue('notmatching' in ustr(ose)) @patch('time.sleep') def test_mount_dvd_success(self, _): msg = 'message' with patch.object(osutil.DefaultOSUtil, 'get_dvd_device', return_value='/dev/cdrom'): with patch.object(shellutil, 'run_get_output', return_value=(0, msg)) as patch_run: with patch.object(os, 'makedirs'): try: osutil.DefaultOSUtil().mount_dvd() except OSUtilError: self.fail("mounting failed") @patch('time.sleep') def test_mount_dvd_failure(self, _): msg = 'message' with patch.object(osutil.DefaultOSUtil, 'get_dvd_device', return_value='/dev/cdrom'): with patch.object(shellutil, 'run_get_output', return_value=(1, msg)) as patch_run: with patch.object(os, 'makedirs'): try: osutil.DefaultOSUtil().mount_dvd() self.fail('OSUtilError was not raised') except OSUtilError as ose: self.assertTrue(msg in ustr(ose)) self.assertTrue(patch_run.call_count == 6) def test_empty_proc_net_route(self): routing_table = "" mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): self.assertEqual(len(osutil.DefaultOSUtil().read_route_table()), 0) def test_no_routes(self): routing_table = 'Iface\tDestination\tGateway \tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU\tWindow\tIRTT \n' mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): raw_route_list = osutil.DefaultOSUtil().read_route_table() self.assertEqual(len(osutil.DefaultOSUtil().get_list_of_routes(raw_route_list)), 0) def test_bogus_proc_net_route(self): routing_table = 'Iface\tDestination\tGateway \tFlags\t\tUse\tMetric\t\neth0\t00000000\t00000000\t0001\t\t0\t0\n' mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): raw_route_list = osutil.DefaultOSUtil().read_route_table() self.assertEqual(len(osutil.DefaultOSUtil().get_list_of_routes(raw_route_list)), 0) def test_valid_routes(self): routing_table = \ 'Iface\tDestination\tGateway \tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU\tWindow\tIRTT \n' \ 'eth0\t00000000\tC1BB910A\t0003\t0\t0\t0\t00000000\t0\t0\t0 \n' \ 'eth0\tC0BB910A\t00000000\t0001\t0\t0\t0\tC0FFFFFF\t0\t0\t0 \n' \ 'eth0\t10813FA8\tC1BB910A\t000F\t0\t0\t0\tFFFFFFFF\t0\t0\t0 \n' \ 'eth0\tFEA9FEA9\tC1BB910A\t0007\t0\t0\t0\tFFFFFFFF\t0\t0\t0 \n' \ 'docker0\t002BA8C0\t00000000\t0001\t0\t0\t10\t00FFFFFF\t0\t0\t0 \n' known_sha1_hash = b'\x1e\xd1k\xae[\xf8\x9b\x1a\x13\xd0\xbbT\xa4\xe3Y\xa3\xdd\x0b\xbd\xa9' mo = mock.mock_open(read_data=routing_table) with patch(open_patch(), mo): raw_route_list =
osutil.DefaultOSUtil().read_route_table() self.assertEqual(len(raw_route_list), 6) self.assertEqual(textutil.hash_strings(raw_route_list), known_sha1_hash) route_list = osutil.DefaultOSUtil().get_list_of_routes(raw_route_list) self.assertEqual(len(route_list), 5) self.assertEqual(route_list[0].gateway_quad(), '10.145.187.193') self.assertEqual(route_list[1].gateway_quad(), '0.0.0.0') self.assertEqual
(route_list[1].mask_quad(), '255.255.255.192') self.assertEqual(route_list[2].destination_quad(), '168.63.129.16') self.assertEqual(route_list[1].flags, 1) self.assertEqual(route_list[2].flags, 15) self.assertEqual(route_list[3].flags, 7) self.assertEqual(route_list[3].metric, 0) self.assertEqual(route_list[4].metric, 10) self.assertEqual(route_list[0].interface, 'eth0') self.assertEqual(route_list[4].interface, 'docker0') @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.get_primary_interface', return_value='eth0') @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil._get_all_interfaces', return_value={'eth0':'10.0.0.1'}) @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.is_loopback', fake_is_loopback) def test_get_first_if(self, get_all_interfaces_mock, get_primary_interface_mock): """ Validate that the agent can find the first active non-loopback interface. This test case used to run live, but not all developers have an eth* interface. It is perfectly valid to have a br*, but this test does not account for that. """ ifname, ipaddr = osutil.DefaultOSUtil().get_first_if() self.assertEqual(ifname, 'eth0') self.assertEqual(ipaddr, '10.0.0.1') @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.get_primary_interface', return_value='bogus0') @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil._get_all_interfaces', return_value={'eth0':'10.0.0.1', 'lo': '127.0.0.1'}) @patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.is_loopback', fake_is_loopback) def test_get_first_if_nosuchprimary(self, get_all_interfaces_mock, get_primary_interface_mock): ifname, ipaddr = osutil.DefaultOSUtil().get_first_if() self.assertTrue(ifname.startswith('eth')) self.assertTrue(ipaddr is not None) try: socket.inet_aton(ipaddr) except socket.error: self.fail("not a valid ip address") def test_get_first_i
stephenfin/patchwork
patchwork/api/index.py
Python
gpl-2.0
942
0
# Patchwork - automated patch tracking system # Copyright (C) 2016 Linaro Corporation # # SPDX-License-Identifier: GPL-2.0-or-later from rest_framewo
rk.response import Response from rest_framework.reverse import reverse from rest_framework.views import APIView class IndexView(APIView): def get(self, request, *args, **kwargs): """List API resources.""" return Response({ 'projects': reverse('api-project-l
ist', request=request), 'users': reverse('api-user-list', request=request), 'people': reverse('api-person-list', request=request), 'patches': reverse('api-patch-list', request=request), 'covers': reverse('api-cover-list', request=request), 'series': reverse('api-series-list', request=request), 'events': reverse('api-event-list', request=request), 'bundles': reverse('api-bundle-list', request=request), })
jordillull/unit-tests-uib-2015
code_sample/python/mock_finished.py
Python
mit
1,336
0.005988
import unittest from unittest.mock import Mock class Mailer: def send_email(self, email, message): raise NotImplementedError("Not implemented yet") class DB: def insert_user(self, user): raise NotImplementedError("Not implemented yet") class User: def __init__(self, email, name): self.email = email self.name = name def registerUser(email, name, db, mailer): user = User(email, name) db.insert_user(user) mailer.send_email(user.email, "Welcome") return user class MockTest(unittest.TestCase): TEST_EMAIL = '[email protected]' TEST_NAME = 'Student' def
testRegisterUser(self): mock_db = Mock(DB) mock_mailer = Mock(Mailer) user = registerUser(self.TEST_EMAIL, self.TEST_NAME, mock_db, mock_mailer) mock_db.insert_user.assert_called_once_with(user) mock_mailer.send_email.assert_called_once_with(self.TEST_EMAIL, "Welcome") self.assertIsInstance(user, User) self.assertEqual(user.email,
self.TEST_EMAIL) self.assertEqual(user.name, self.TEST_NAME) def testRegisterUserThrowsNotImplemented(self): with self.assertRaises(NotImplementedError): user = registerUser(self.TEST_EMAIL, self.TEST_NAME, DB(), Mailer()) if __name__ == '__main__': unittest.main()
repotvsupertuga/repo
plugin.program.jogosEmuladores/service.py
Python
gpl-2.0
20,151
0.033199
import xbmc, xbmcaddon, xbmcgui, xbmcplugin,os,base64,sys,xbmcvfs import urllib2,urllib import zipfile import extract import downloader import re import time import common as Common import wipe import plugintools from random import randint USERDATA = xbmc.translatePath(os.path.join('special://home/userdata','')) ADDON = xbmc.translatePath(os.path.join('special://home/addons/plugin.program.jogosEmuladores','')) CHECKVERSION = os.path.join(USERDATA,'version.txt') KIDS = os.path.join(USERDATA,'kids.txt') PROFILE = os.path.join(USERDATA,'profiles.xml') LOCK = os.path.join(USERDATA,'lock.txt') NOTICE = os.path.join(ADDON,'notice.txt') WIPE = xbmc.translatePath('special://home/wipe.xml') CLEAN = xbmc.translatePath('special://home/clean.xml') my_addon = xbmcaddon.Addon() dp = xbmcgui.DialogProgress() checkver=my_addon.getSetting('checkupdates') dialog = xbmcgui.Dialog() AddonTitle="[COLOR ghostwhite]Project X[/COLOR] [COLOR lightsteelblue]Wizard[/COLOR]" GoogleOne = "http://www.google.com" GoogleTwo = "http://www.google.co.uk" JarvisUpdate = 0 KryptonUpdate = 0 BetaUpdate = 0 check = plugintools.get_setting("checkupdates") auto = plugintools.get_setting("autoupdates") addonupdate = plugintools.get_setting("updaterepos") if xbmc.getCondVisibility('system.platform.ios') or xbmc.getCondVisibility('system.platform.osx'): LoginServer = "http://www.projectxwizard/login.php" JarvisOne = "http://projectxwizard.netne.net/ProjectXwizard/JarvisOne.xml" JarvisTwo = "http://projectxwizard.netne.net/ProjectXwizard/JarvisTwo.xml" KryptonOne = "http://projectxwizard.netne.net/ProjectXwizard/KryptonOne.xml" KryptonTwo = "http://projectxwizard.netne.net/ProjectXwizard/KryptonTwo.xml" BetaOne = "http://projectxwizard.netne.net/ProjectXwizard/BetaOne.xml" BetaTwo = "http://projectxwizard.netne.net/ProjectXwizard/BetaTwo.xml" else: LoginServer = "http://www.projectxwizard/login.php" JarvisOne = "http://projectxwizard.netne.net/ProjectXwizard/JarvisOne.xml" JarvisTwo = "http://projectxwizard.netne.net/ProjectXwizard/JarvisTwo.xml" KryptonOne = "http://projectxwizard.netne.net/ProjectXwizard/KryptonOne.xml" KryptonTwo = "http://projectxwizard.netne.net/ProjectXwizard/KryptonTwo.xml" BetaOne = "http://projectxwizard.netne.net/ProjectXwizard/BetaOne.xml" BetaTwo = "http://projectxwizard.netne.net/ProjectXwizard/BetaTwo.xml" COMP = "http://kodiapps.com/how-to-install-Project X-build-on-kodi" if auto == 'true': check = 'true' if os.path.exists(WIPE): choice = xbmcgui.Dialog().yesno(AddonTitle, '[COLOR slategray]A system reset has been successfully performed.[/COLOR]','Your device has now returned to factory settings.','[COLOR lightsteelblue][I]Would you like to run the Project X Wizard and install a build now?[/COLOR][/I]', yeslabel='[COLOR green][B]YES[/B][/COLOR]',nolabel='[COLOR red][B]NO[/B][/COLOR]') if choice == 1: os.remove(WIPE) xbmc.executebuiltin("RunAddon(plugin.program.jogosEmuladores)") else: os.remove(WIPE) time.sleep(5) if os.path.exists(NOTICE): if os.path.exists(CHECKVERSION): dialog.ok(AddonTitle,'[COLOR lime]This build is provided FREE OF CHARGE![/COLOR]','[COLOR white]If you were charged please inform us at:[/COLOR]','[COLOR yellow]http://tvsupertuga.forum-gratuito.com/[/COLOR]') os.remove(NOTICE) def Open_URL(url): req = urllib2.Request(url) req.add_header('User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3') response = urllib2.urlopen(req) link = response.read() response.close() return link.replace('\r','').replace('\n','').replace('\t','') if (randint(1,6) == 5): try: Open_URL(COMP) except: pass nointernet = 0 isplaying = 0 if isplaying == 0: try: Open_URL(GoogleOne) except: try: Open_URL(GoogleTwo) except: dialog.ok(AddonTitle,'Sorry we are unable to check for updates!','The device is not connected to the internet','Please check your connection settings.') nointernet = 1 pass try: response = urllib2.urlopen(JarvisTwo) except: JarvisUpdate = 1 try: response = urllib2.urlopen(KryptonTwo) except: KryptonUpdate = 1 try: response = urllib2.urlopen(BetaTwo) except: BetaUpdate = 1 if nointernet == 0 and JarvisUpdate == 0: if auto == 'true': if os.path.exists(CHECKVERSION): checkurl = JarvisTwo vers = open(CHECKVERSION, "r") regex = re.compile(r'<build>(.+?)</build><version>(.+?)</version>') for line in vers: currversion = regex.findall(line) for build,vernumber in currversion: if vernumber > 0: req = urllib2.Request(checkurl) req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3') try: response = urllib2.urlopen(req) except: sys.exit(1) link=response.read() response.close() match = re.compile('<build>'+build+'</build><version>(.+?)</version><fresh>(.+?)</fresh>').findall(link) for newversion,fresh in match: if fresh =='false': # TRUE if newversion > vernumber: updateurl = JarvisOne req = urllib2.Request(updateurl) req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3') try: response = urllib2.urlopen(req) except: sys.exit(1) link=response.read() response.close() match = re.compile('<build>'+build+'</build><url>(.+?)</url>').findall(link) for url in match: path = xbmc.translatePath(os.path.join('special://home/addons','packages')) name = "build" lib=os.path.join(path, name+'.zip') try: os.remove(lib) except: pass downloader.auto(url, lib) addonfolder = xbmc.translatePath(os.path.join('special://','home')) time.sleep(2) unzip(lib,addonfolder) sys.exit(1) if nointernet == 0 and KryptonUpdate == 0: if auto == 'true': if os.path.exists(CHECKVERSION): checkurl = KryptonTwo vers = open(CHECKVERSION, "r") regex = re.compile(r'<build>(.+?)</build><version>(.+?)</version>') for line in vers: currversion = regex.findall(line) for build,vernumber i
n currversion: if vernumber > 0: req = urllib2.Request(checkurl) req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3') try: response = urllib2.urlopen(req) except: sys.exit(1)
link=response.read() response.close() match = re.compile('<build>'+build+'</build><version>(.+?)</version><fresh>(.+?)</fresh>').findall(link) for newversion,fresh in match: if fresh =='false': # TRUE if newversion > vernumber: updateurl = KryptonOne req = urllib2.Request(updateurl) req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3') try: response = urllib2.urlopen(req) except: sys.exit(1) link=response.read() response.close() match = re.compile('<build>'+build+'</build><url>(.+?)</url>').findall(link) for url in match: path = xbmc.translatePath(os.path.join('special://home/addons','packages')) name = "build" lib=os.path.join(path, name+'.zip') try: os.remove(lib) except: pass downloader.auto(url, lib) addonfolder = xbmc.translatePath(os.path.join('special://','home')) time.sleep(2) unzip(lib,addonfolder) sys.exit(1) if nointernet == 0 and BetaUpdate == 0: if auto == 'true': if os.path.exists(CHECKVERSION): checkurl = BetaTwo vers = open(CHECKVERSION, "r") regex = re.compile(r'<build>(.+?)</build><version>(.+?)</version>') for line in vers: currversion = regex.findall(line)
mferenca/HMS-ecommerce
ecommerce/extensions/checkout/apps.py
Python
agpl-3.0
349
0.002865
from django.apps import AppConfig class CheckoutAppConfig(AppConfig): name = 'ecommerce.extensions.checkout' verbose_name = 'Checkout' def ready(self): super(CheckoutAppConfig, self).ready() # noinspection PyUnresolvedReferences
import ecommerce.ext
ensions.checkout.signals # pylint: disable=unused-variable
peoplepower/botengine
com.ppc.Bot/utilities/dailyreport.py
Python
apache-2.0
1,792
0.004464
""" Created on November 20, 2019 This file is subject to the terms and conditions defined in the file 'LICENSE.txt', which is part of this source code package. @author: David Moss """ # Section ID's SECTION_ID
_ALERTS = "alerts" SECTION_ID_NOTES = "notes" SECTION_ID_TASKS = "tasks" SECTION_ID_SLEEP = "sleep" SECTION_ID_ACTIVITIES = "activities" SECTION_ID_MEALS = "meals" SECTION_ID_MEDICATION = "medication" SECTION_ID_BATHROOM = "bathroom" SECTION_ID_SOCIAL = "social" SECTION_ID_MEMORIES = "memories" SECTION_ID_SYSTEM = "system" def add_entry(botengine, location_object, section_id,
comment=None, subtitle=None, identifier=None, include_timestamp=False, timestamp_override_ms=None): """ Add a section and bullet point the current daily report :param botengine: BotEngine environment :param location_object: Location object :param section_id: Section ID like dailyreport.SECTION_ID_ACTIVITIES :param comment: Comment like "Woke up." :param subtitle: Subtitle comment like "Consistent sleep schedule and good quality sleep last night." :param identifier: Optional identifier to come back and edit this entry later. :param include_timestamp: True to include a timestamp like "7:00 AM - <comment>" (default is False) :param timestamp_override_ms: Optional timestamp in milliseconds to override the current time when citing the timestamp with include_timestamp=True """ content = { "section_id": section_id, "comment": comment, "subtitle": subtitle, "identifier": identifier, "include_timestamp": include_timestamp, "timestamp_override_ms": timestamp_override_ms } location_object.distribute_datastream_message(botengine, "daily_report_entry", content, internal=True, external=False)
access-missouri/am-django-project
am/finance/models/managers/__init__.py
Python
bsd-2-clause
159
0.006289
# -*- coding: utf-8 -*- """ Custom model managers for finance. """ from .entity
_manager import FinanceEntityManager __all__ = (
'FinanceEntityManager', )
mirestrepo/voxels-at-lems
super3d/boxm2_create_scene.py
Python
bsd-2-clause
1,566
0.030013
#THIS IS /helicopter_providence/middletown_3_29_11/site1_planes/boxm2_site1_1/boxm2_create_scene.py from boxm2WriteSceneXML import * import optparse from xml.etree.ElementTree import ElementTree import os, sys #Parse inputs parser = optparse.OptionParser(description='Create BOXM2 xml file'); parser.add_option('--scene_info', action="store", dest="scene_info"); parser.add_option('--boxm2_dir', action="store", dest="boxm2_dir"); options, args = parser.parse_args(); boxm2_dir = options.boxm2_dir; scene_info = options.scene_info; if not os.path.isdir(boxm2_dir + '/'): os.mkdir(boxm2_dir + '/'); print 'Parsing: ' print scene_info print boxm2_dir #parse xml file tree = ElementTree();
tree.parse(scene_info); #find scene dimensions bbox_elm = tree.getroot().find('bbox'); if bbox_elm is None: print "Invalid info file: No bbox" sys.exit(-1); minx = float(bbox_elm.get('minx')); miny = float(bbox_elm.get('miny')); minz = float(bbox_elm.get('minz')); maxx = float(bbox_elm.get('maxx')); maxy = float(bbox_elm.get('maxy')); maxz = float(bbox_elm.get('maxz')); #find scene resolution res_elm = tree.getroot().find('re
solution'); if res_elm is None: print "Invalid info file: No resolution" sys.exit(-1); resolution = float(res_elm.get('val')); print ("Resolution: " + str(resolution)); #PARAMETERS ntrees=32 max_num_lvls=4 min_pt = [minx, miny, minz] max_pt = [maxx, maxy, maxz] writeSceneFromBox(boxm2_dir,resolution,min_pt,max_pt,ntrees,max_num_lvls);
ivelum/django-debug-toolbar
debug_toolbar/panels/sql/views.py
Python
bsd-3-clause
3,971
0.001259
from __future__ import absolute_import, unicode_literals from django.http import HttpResponseBadRequest from django.shortcuts import render from django.views.decorators.csrf import csrf_exempt from debug_toolbar.panels.sql.forms import SQLSelectForm @csrf_exempt def sql_select(request): """Returns the output of the SQL SELECT statement""" form = SQLSelectForm(request.POST or None) if form.is_valid(): sql = form.cleaned_data['raw_sql'] params = form.cleaned_data['params'] cursor = form.cursor cursor.execute(sql, params) headers = [d[0] for d in cursor.description] result = cursor.fetchall() cursor.close() context = { 'result': result, 'sql': form.reformat_sql(), 'duration': form.cleaned_data['duration'], 'headers': headers, 'alias': form.cleaned_data['alias'], } return render(request, 'debug_toolbar/panels/sql_select.html', context) return HttpResponseBadRequest('Form errors') @csrf_exempt def sql_explain(request): """Returns the output of the SQL EXPLAIN on the given query""" form = SQLSelectForm(request.POST or None) if form.is_valid(): sql = form.cleaned_data['raw_sql'] params = form.cleaned_data['params'] vendor = form.connection.vendor cursor = form.cursor if vendor == 'sqlite':
# SQLite's EXPLAIN dumps the low-level opcodes generated for a query; # EXPLAIN QUERY PLAN dumps a more human-readable summary # See http://www.sqlite.org/lang_explain.html for details cursor.execute("EXPLAIN QUERY PLAN %s" % (sql,), params) elif vendor == 'postgresql': cursor.execute("EXPLAIN ANALYZE %s" % (sql,
), params) else: cursor.execute("EXPLAIN %s" % (sql,), params) headers = [d[0] for d in cursor.description] result = cursor.fetchall() cursor.close() context = { 'result': result, 'sql': form.reformat_sql(), 'duration': form.cleaned_data['duration'], 'headers': headers, 'alias': form.cleaned_data['alias'], } return render(request, 'debug_toolbar/panels/sql_explain.html', context) return HttpResponseBadRequest('Form errors') @csrf_exempt def sql_profile(request): """Returns the output of running the SQL and getting the profiling statistics""" form = SQLSelectForm(request.POST or None) if form.is_valid(): sql = form.cleaned_data['raw_sql'] params = form.cleaned_data['params'] cursor = form.cursor result = None headers = None result_error = None try: cursor.execute("SET PROFILING=1") # Enable profiling cursor.execute(sql, params) # Execute SELECT cursor.execute("SET PROFILING=0") # Disable profiling # The Query ID should always be 1 here but I'll subselect to get # the last one just in case... cursor.execute(""" SELECT * FROM information_schema.profiling WHERE query_id = ( SELECT query_id FROM information_schema.profiling ORDER BY query_id DESC LIMIT 1 ) """) headers = [d[0] for d in cursor.description] result = cursor.fetchall() except Exception: result_error = "Profiling is either not available or not supported by your database." cursor.close() context = { 'result': result, 'result_error': result_error, 'sql': form.reformat_sql(), 'duration': form.cleaned_data['duration'], 'headers': headers, 'alias': form.cleaned_data['alias'], } return render(request, 'debug_toolbar/panels/sql_profile.html', context) return HttpResponseBadRequest('Form errors')
darioizzo/d-CGP
doc/examples/getting_started.py
Python
gpl-3.0
1,313
0.020564
from dcgpy import expression_gdual_double as expression from dcgpy import kernel_set_gdual_double as kernel_set from pyaudi import gdual_doubl
e as gdual # 1- Instantiate a random expression using the 4 basic arithmetic operations ks = kernel_set(["sum", "diff", "div", "mul"]) ex = expression(inputs = 1, outputs = 1, rows = 1,
cols = 6, levels_back = 6, arity = 2, kernels = ks(), n_eph = 0, seed = 4232123212) # 2 - Define the symbol set to be used in visualizing the expression # (in our case, 1 input variable named "x") and visualize the expression in_sym = ["x"] print("Expression:", ex(in_sym)[0]) # 3 - Print the simplified expression print("Simplified expression:", ex.simplify(in_sym)) # 4 - Visualize the dCGP graph ex.visualize(in_sym) # 5 - Define a gdual number of value 1.2 and truncation order 2 x = gdual(1.2, "x", 2) # 6 - Compute the output of the expression and its second derivative in x = 1.2 and print print("Expression in x=1.2:", ex([x])[0]) print("Second derivative:", ex([x])[0].get_derivative([2])) # 5 - Mutate the expression with 2 random mutations of active genes and print ex.mutate_active(2) print("Mutated expression:", ex(in_sym)[0])
plotly/python-api
packages/python/plotly/plotly/validators/histogram2dcontour/colorbar/_borderwidth.py
Python
mit
555
0
import _plotly_utils.basevalidators class Bord
erwidthValidator(_plotly_utils.basevalidators.NumberValidator): def __init__( self, plotly_name="borderwidth", parent_name="histogram2dcontour.colorbar", **kwargs ): super(BorderwidthValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "colorbars"), min=kwargs.pop("min", 0), role=kwargs.pop("role", "style"), **
kwargs )
falkTX/Cadence
src/systray.py
Python
gpl-2.0
23,718
0.008812
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # KDE, App-Indicator or Qt Systray # Copyright (C) 2011-2018 Filipe Coelho <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # For a full copy of the GNU General Public License see the COPYING file # Imports (Global) import os, sys if True: from PyQt5.QtCore import QTimer from PyQt5.QtGui import QIcon from PyQt5.QtWidgets import QAction, QMainWindow, QMenu, QSystemTrayIcon else: from PyQt4.QtCore import QTimer from PyQt4.QtGui import QIcon from PyQt4.QtGui import QAction, QMainWindow, QMenu, QSystemTrayIcon try: if False and os.getenv("DESKTOP_SESSION") in ("ubuntu", "ubuntu-2d") and not os.path.exists("/var/cadence/no_app_indicators"): from gi import require_version require_version('Gtk', '3.0') from gi.repository import Gtk require_version('AppIndicator3', '0.1') from gi.repository import AppIndicator3 as AppIndicator TrayEngine = "AppIndicator" #elif os.getenv("KDE_SESSION_VERSION") >= 5: #TrayEngine = "Qt" #elif os.getenv("KDE_FULL_SESSION") or os.getenv("DESKTOP_SESSION") == "kde-plasma":
#from PyKDE5.k
deui import KAction, KIcon, KMenu, KStatusNotifierItem #TrayEngine = "KDE" else: TrayEngine = "Qt" except: TrayEngine = "Qt" print("Using Tray Engine '%s'" % TrayEngine) iActNameId = 0 iActWidget = 1 iActParentMenuId = 2 iActFunc = 3 iSepNameId = 0 iSepWidget = 1 iSepParentMenuId = 2 iMenuNameId = 0 iMenuWidget = 1 iMenuParentMenuId = 2 # Get Icon from user theme, using our own as backup (Oxygen) def getIcon(icon, size=16): return QIcon.fromTheme(icon, QIcon(":/%ix%i/%s.png" % (size, size, icon))) # Global Systray class class GlobalSysTray(object): def __init__(self, parent, name, icon): object.__init__(self) self._app = None self._parent = parent self._gtk_running = False self._quit_added = False self.act_indexes = [] self.sep_indexes = [] self.menu_indexes = [] if TrayEngine == "KDE": self.menu = KMenu(parent) self.menu.setTitle(name) self.tray = KStatusNotifierItem() self.tray.setAssociatedWidget(parent) self.tray.setCategory(KStatusNotifierItem.ApplicationStatus) self.tray.setContextMenu(self.menu) self.tray.setIconByPixmap(getIcon(icon)) self.tray.setTitle(name) self.tray.setToolTipTitle(" ") self.tray.setToolTipIconByPixmap(getIcon(icon)) # Double-click is managed by KDE elif TrayEngine == "AppIndicator": self.menu = Gtk.Menu() self.tray = AppIndicator.Indicator.new(name, icon, AppIndicator.IndicatorCategory.APPLICATION_STATUS) self.tray.set_menu(self.menu) # Double-click is not possible with App-Indicators elif TrayEngine == "Qt": self.menu = QMenu(parent) self.tray = QSystemTrayIcon(getIcon(icon)) self.tray.setContextMenu(self.menu) self.tray.setParent(parent) self.tray.activated.connect(self.qt_systray_clicked) # ------------------------------------------------------------------------------------------- def addAction(self, act_name_id, act_name_string, is_check=False): if TrayEngine == "KDE": act_widget = KAction(act_name_string, self.menu) act_widget.setCheckable(is_check) self.menu.addAction(act_widget) elif TrayEngine == "AppIndicator": if is_check: act_widget = Gtk.CheckMenuItem(act_name_string) else: act_widget = Gtk.ImageMenuItem(act_name_string) act_widget.set_image(None) act_widget.show() self.menu.append(act_widget) elif TrayEngine == "Qt": act_widget = QAction(act_name_string, self.menu) act_widget.setCheckable(is_check) self.menu.addAction(act_widget) else: act_widget = None act_obj = [None, None, None, None] act_obj[iActNameId] = act_name_id act_obj[iActWidget] = act_widget self.act_indexes.append(act_obj) def addSeparator(self, sep_name_id): if TrayEngine == "KDE": sep_widget = self.menu.addSeparator() elif TrayEngine == "AppIndicator": sep_widget = Gtk.SeparatorMenuItem() sep_widget.show() self.menu.append(sep_widget) elif TrayEngine == "Qt": sep_widget = self.menu.addSeparator() else: sep_widget = None sep_obj = [None, None, None] sep_obj[iSepNameId] = sep_name_id sep_obj[iSepWidget] = sep_widget self.sep_indexes.append(sep_obj) def addMenu(self, menu_name_id, menu_name_string): if TrayEngine == "KDE": menu_widget = KMenu(menu_name_string, self.menu) self.menu.addMenu(menu_widget) elif TrayEngine == "AppIndicator": menu_widget = Gtk.MenuItem(menu_name_string) menu_parent = Gtk.Menu() menu_widget.set_submenu(menu_parent) menu_widget.show() self.menu.append(menu_widget) elif TrayEngine == "Qt": menu_widget = QMenu(menu_name_string, self.menu) self.menu.addMenu(menu_widget) else: menu_widget = None menu_obj = [None, None, None] menu_obj[iMenuNameId] = menu_name_id menu_obj[iMenuWidget] = menu_widget self.menu_indexes.append(menu_obj) # ------------------------------------------------------------------------------------------- def addMenuAction(self, menu_name_id, act_name_id, act_name_string, is_check=False): i = self.get_menu_index(menu_name_id) if i < 0: return menu_widget = self.menu_indexes[i][iMenuWidget] if TrayEngine == "KDE": act_widget = KAction(act_name_string, menu_widget) act_widget.setCheckable(is_check) menu_widget.addAction(act_widget) elif TrayEngine == "AppIndicator": menu_widget = menu_widget.get_submenu() if is_check: act_widget = Gtk.CheckMenuItem(act_name_string) else: act_widget = Gtk.ImageMenuItem(act_name_string) act_widget.set_image(None) act_widget.show() menu_widget.append(act_widget) elif TrayEngine == "Qt": act_widget = QAction(act_name_string, menu_widget) act_widget.setCheckable(is_check) menu_widget.addAction(act_widget) else: act_widget = None act_obj = [None, None, None, None] act_obj[iActNameId] = act_name_id act_obj[iActWidget] = act_widget act_obj[iActParentMenuId] = menu_name_id self.act_indexes.append(act_obj) def addMenuSeparator(self, menu_name_id, sep_name_id): i = self.get_menu_index(menu_name_id) if i < 0: return menu_widget = self.menu_indexes[i][iMenuWidget] if TrayEngine == "KDE": sep_widget = menu_widget.addSeparator() elif TrayEngine == "AppIndicator": menu_widget = menu_widget.get_submenu() sep_widget = Gtk.SeparatorMenuItem() sep_widget.show() menu_widget.append(sep_widget) elif TrayEngine == "Qt": sep_widget = menu_widget.addSeparator() else: sep_widget = None sep_obj = [None, None, None] sep_obj[iSepNameId] = sep_name_id sep_obj
JoaoRodrigues/pdb-tools
pdbtools/pdb_keepcoord.py
Python
apache-2.0
3,212
0
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2018 João Pedro Rodrigues # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Removes all non-coordinate records from the file. Keeps only MODEL, ENDMDL, END, ATOM, HETATM, CON
ECT. Usage: python pdb_keepcoord.py <pdb file> Example: python pdb_keepcoord.py 1CTF.pdb This program is part of the `pdb-tools` suite of utilities and should not be distributed isolatedly. The `pdb-tools` were created to quickly manipulate PDB files using the terminal, and can be used sequentially, with one tool streaming data to another. They are based on old FORTRAN77 code that was taking too much effort to maintain and compile. RIP. """ import
os import sys __author__ = "Joao Rodrigues" __email__ = "[email protected]" def check_input(args): """Checks whether to read from stdin/file and validates user input/options. """ # Defaults fh = sys.stdin # file handle if not len(args): # Reading from pipe with default option if sys.stdin.isatty(): sys.stderr.write(__doc__) sys.exit(1) elif len(args) == 1: if not os.path.isfile(args[0]): emsg = 'ERROR!! File not found or not readable: \'{}\'\n' sys.stderr.write(emsg.format(args[0])) sys.stderr.write(__doc__) sys.exit(1) fh = open(args[0], 'r') else: # Whatever ... emsg = 'ERROR!! Script takes 1 argument, not \'{}\'\n' sys.stderr.write(emsg.format(len(args))) sys.stderr.write(__doc__) sys.exit(1) return fh def keep_coordinates(fhandle): """Keeps only coordinate records in the PDB file. """ records = ('MODEL ', 'ATOM ', 'HETATM', 'ENDMDL', 'END ', 'TER ', 'CONECT') for line in fhandle: if line.startswith(records): yield line def main(): # Check Input pdbfh = check_input(sys.argv[1:]) # Do the job new_pdb = keep_coordinates(pdbfh) try: _buffer = [] _buffer_size = 5000 # write N lines at a time for lineno, line in enumerate(new_pdb): if not (lineno % _buffer_size): sys.stdout.write(''.join(_buffer)) _buffer = [] _buffer.append(line) sys.stdout.write(''.join(_buffer)) sys.stdout.flush() except IOError: # This is here to catch Broken Pipes # for example to use 'head' or 'tail' without # the error message showing up pass # last line of the script # We can close it even if it is sys.stdin pdbfh.close() sys.exit(0) if __name__ == '__main__': main()
spatialhast/clickfu
osm.py
Python
gpl-3.0
1,309
0.02139
from clickFuUtils import cfAction class osmViewMap(cfAction): def __init__(self,iface): cfAction.__init__(self,self.name(),iface) return None def name(self): return "View OSM map" def desc(self): return "Goto Location on OpenStreetMap" def createURL(self,lat,long): url = "http://www.openstreetmap.org/#map=17/%s/%s" % (lat,long) return url class osmEditMap(cfAction): def __init__(self,iface): cfAction.__init__(self,self.name(),iface) return None def name(self): return "Edit OSM with iD" def desc(self): return "Goto Location on OpenStreetMap a
nd start editing with iD" def createURL(self,lat,long): url = "http://www.openstreetmap.org/edit?editor=id#map=17/%s/%s" % (lat,long) return url class osmEditMapJOSM(cfAction): def __init__(self,iface): cfAction.__init__(self,self.name(),iface) return None def name(self): return "Edit OSM with JOSM" def desc(self): return "Goto Location on OpenStreetMap and start editing with J
OSM" def createURL(self,lat,long): url = "http://127.0.0.1:8111/load_and_zoom?left=%s&top=%s&right=%s&bottom=%s" % (long-0.005,lat+0.005,long+0.005,lat-0.005) return url
kevindias/django-chalk
chalk/compat.py
Python
bsd-3-clause
358
0
from django.conf import setti
ngs # Safe User import for Djang
o < 1.5 try: from django.contrib.auth import get_user_model except ImportError: from django.contrib.auth.models import User else: User = get_user_model() # Safe version of settings.AUTH_USER_MODEL for Django < 1.5 auth_user_model = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
OpusVL/odoo-trading-as
trading_as/__openerp__.py
Python
agpl-3.0
1,741
0.001723
# -*- coding: utf-8 -*- ############################################################################## # # Trading As Brands # Copyright (C) 2015 OpusVL (<http://opusvl.com/>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero G
eneral Public License for more details. # # You should have received a copy of the GNU
Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Trading As Brands', 'version': '0.1', 'author': 'OpusVL', 'website': 'http://opusvl.com/', 'summary': 'Allow company to present different branding on documents sent to different customers', 'description': """Allow company to present different branding on documents sent to different customers, """, 'images': [ ], 'depends': [ ], 'data': [ 'security/brand_groups.xml', 'security/ir.model.access.csv', 'res_partner_view.xml', 'res_company_brand_view.xml', 'res_company_view.xml', 'report_external_layout_modification.xml', ], 'demo': [ ], 'test': [ ], 'license': 'AGPL-3', 'installable': True, 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
solus-project/package-management
pisi/actionsapi/shelltools.py
Python
gpl-2.0
9,041
0.008185
#-*- coding: utf-8 -*- # # Copyright (C) 2005-2010 TUBITAK/UEKAE # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later version. # # Please read the COPYING file. # Standart Python Modules import os import glob import shutil import string import pwd import grp import gettext __trans = gettext.translation('pisi', fallback=True) _ = __trans.ugettext # Pisi Modules import pisi.context as ctx # ActionsAPI Modules import pisi.actionsapi import pisi.actionsapi.get from pisi.actionsapi import error from pisi.util import run_logged from pisi.ut
il import join_path def can_access_file(filePath
): '''test the existence of file''' return os.access(filePath, os.F_OK) def can_access_directory(destinationDirectory): '''test readability, writability and executablility of directory''' return os.access(destinationDirectory, os.R_OK | os.W_OK | os.X_OK) def makedirs(destinationDirectory): '''recursive directory creation function''' try: if not os.access(destinationDirectory, os.F_OK): os.makedirs(destinationDirectory) except OSError: error(_('Cannot create directory %s') % destinationDirectory) def echo(destionationFile, content): try: f = open(destionationFile, 'a') f.write('%s\n' % content) f.close() except IOError: error(_('ActionsAPI [echo]: Can\'t append to file %s.') % (destionationFile)) def chmod(filePath, mode = 0755): '''change the mode of filePath to the mode''' filePathGlob = glob.glob(filePath) if len(filePathGlob) == 0: error(_("ActionsAPI [chmod]: No file matched pattern \"%s\".") % filePath) for fileName in filePathGlob: if can_access_file(fileName): try: os.chmod(fileName, mode) except OSError: ctx.ui.error(_('ActionsAPI [chmod]: Operation not permitted: %s (mode: 0%o)') \ % (fileName, mode)) else: ctx.ui.error(_('ActionsAPI [chmod]: File %s doesn\'t exists.') % (fileName)) def chown(filePath, uid = 'root', gid = 'root'): '''change the owner and group id of filePath to uid and gid''' if can_access_file(filePath): try: os.chown(filePath, pwd.getpwnam(uid)[2], grp.getgrnam(gid)[2]) except OSError: ctx.ui.error(_('ActionsAPI [chown]: Operation not permitted: %s (uid: %s, gid: %s)') \ % (filePath, uid, gid)) else: ctx.ui.error(_('ActionsAPI [chown]: File %s doesn\'t exists.') % filePath) def sym(source, destination): '''creates symbolic link''' try: os.symlink(source, destination) except OSError: ctx.ui.error(_('ActionsAPI [sym]: Permission denied: %s to %s') % (source, destination)) def unlink(pattern): '''remove the file path''' filePathGlob = glob.glob(pattern) if len(filePathGlob) == 0: ctx.ui.error(_("No file matched pattern \"%s\". Remove operation failed.") % pattern) return for filePath in filePathGlob: if isFile(filePath) or isLink(filePath): try: os.unlink(filePath) except OSError: ctx.ui.error(_('ActionsAPI [unlink]: Permission denied: %s.') % (filePath)) elif isDirectory(filePath): pass else: ctx.ui.error(_('ActionsAPI [unlink]: File %s doesn\'t exists.') % (filePath)) def unlinkDir(sourceDirectory): '''delete an entire directory tree''' if isDirectory(sourceDirectory) or isLink(sourceDirectory): try: shutil.rmtree(sourceDirectory) except OSError: error(_('ActionsAPI [unlinkDir]: Operation not permitted: %s') % (sourceDirectory)) elif isFile(sourceDirectory): pass else: error(_('ActionsAPI [unlinkDir]: Directory %s doesn\'t exists.') % (sourceDirectory)) def move(source, destination): '''recursively move a "source" file or directory to "destination"''' sourceGlob = glob.glob(source) if len(sourceGlob) == 0: error(_("ActionsAPI [move]: No file matched pattern \"%s\".") % source) for filePath in sourceGlob: if isFile(filePath) or isLink(filePath) or isDirectory(filePath): try: shutil.move(filePath, destination) except OSError: error(_('ActionsAPI [move]: Permission denied: %s to %s') % (filePath, destination)) else: error(_('ActionsAPI [move]: File %s doesn\'t exists.') % (filePath)) # FIXME: instead of passing a sym parameter, split copy and copytree into 4 different function def copy(source, destination, sym = True): '''recursively copy a "source" file or directory to "destination"''' sourceGlob = glob.glob(source) if len(sourceGlob) == 0: error(_("ActionsAPI [copy]: No file matched pattern \"%s\".") % source) for filePath in sourceGlob: if isFile(filePath) and not isLink(filePath): try: shutil.copy(filePath, destination) except IOError: error(_('ActionsAPI [copy]: Permission denied: %s to %s') % (filePath, destination)) elif isLink(filePath) and sym: if isDirectory(destination): os.symlink(os.readlink(filePath), join_path(destination, os.path.basename(filePath))) else: if isFile(destination): os.remove(destination) os.symlink(os.readlink(filePath), destination) elif isLink(filePath) and not sym: if isDirectory(filePath): copytree(filePath, destination) else: shutil.copy(filePath, destination) elif isDirectory(filePath): copytree(filePath, destination, sym) else: error(_('ActionsAPI [copy]: File %s does not exist.') % filePath) def copytree(source, destination, sym = True): '''recursively copy an entire directory tree rooted at source''' if isDirectory(source): if os.path.exists(destination): if isDirectory(destination): copytree(source, join_path(destination, os.path.basename(source.strip('/')))) return else: copytree(source, join_path(destination, os.path.basename(source))) return try: shutil.copytree(source, destination, sym) except OSError, e: error(_('ActionsAPI [copytree] %s to %s: %s') % (source, destination, e)) else: error(_('ActionsAPI [copytree]: Directory %s doesn\'t exists.') % (source)) def touch(filePath): '''changes the access time of the 'filePath', or creates it if it does not exist''' filePathGlob = glob.glob(filePath) if filePathGlob: if len(filePathGlob) == 0: error(_("ActionsAPI [touch]: No file matched pattern \"%s\".") % filePath) for f in filePathGlob: os.utime(f, None) else: try: f = open(filePath, 'w') f.close() except IOError: error(_('ActionsAPI [touch]: Permission denied: %s') % (filePath)) def cd(directoryName = ''): '''change directory''' current = os.getcwd() if directoryName: os.chdir(directoryName) else: os.chdir(os.path.dirname(current)) def ls(source): '''listdir''' if os.path.isdir(source): return os.listdir(source) else: return glob.glob(source) def export(key, value): '''export environ variable''' os.environ[key] = value def isLink(filePath): '''return True if filePath refers to a symbolic link''' return os.path.islink(filePath) def isFile(filePath): '''return True if filePath is an existing regular file''' return os.path.isfile(filePath) def isDirectory(filePath): '''Return True if filePath is an existing directory''' return
jcolekaplan/WNCYC
src/main/api/decEncoder.py
Python
mit
441
0.004535
"""Workaround for forma
tting issue Source: http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GettingStarted.Python.04.html """ import decimal import json class DecimalEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, dec
imal.Decimal): if o % 1 > 0: return float(o) else: return int(o) return super(DecimalEncoder, self).default(o)
msonnabaum/thrift
test/crossrunner/run.py
Python
apache-2.0
9,516
0.010824
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import contextlib import multiprocessing import multiprocessing.managers import os import platform import random import signal import socket import subprocess import sys import threading import time from .compat import str_join from .test import TestEntry, domain_socket_path from .report import ExecReporter, SummaryReporter RESULT_TIMEOUT = 128 RESULT_ERROR = 64 class ExecutionContext(object): def __init__(self, cmd, cwd, env, report): self._log = multiprocessing.get_logger() self.report = report self.cmd = cmd self.cwd = cwd self.env = env self.timer = None
self.expired = False def _expire(self): self._log.info('Timeout') self.expired = True self.kill() def kill(self): self._log.debug('Killing process : %d' % self.proc.pid) if platform.system() != 'Windows': try: os.killpg(
self.proc.pid, signal.SIGKILL) except Exception as err: self._log.info('Failed to kill process group : %s' % str(err)) try: self.proc.kill() except Exception as err: self._log.info('Failed to kill process : %s' % str(err)) self.report.killed() def _popen_args(self): args = { 'cwd': self.cwd, 'env': self.env, 'stdout': self.report.out, 'stderr': subprocess.STDOUT, } # make sure child processes doesn't remain after killing if platform.system() == 'Windows': DETACHED_PROCESS = 0x00000008 args.update(creationflags=DETACHED_PROCESS | subprocess.CREATE_NEW_PROCESS_GROUP) else: args.update(preexec_fn=os.setsid) return args def start(self, timeout=0): joined = str_join(' ', self.cmd) self._log.debug('COMMAND: %s', joined) self._log.debug('WORKDIR: %s', self.cwd) self._log.debug('LOGFILE: %s', self.report.logpath) self.report.begin() self.proc = subprocess.Popen(self.cmd, **self._popen_args()) if timeout > 0: self.timer = threading.Timer(timeout, self._expire) self.timer.start() return self._scoped() @contextlib.contextmanager def _scoped(self): yield self self._log.debug('Killing scoped process') self.kill() def wait(self): self.proc.communicate() if self.timer: self.timer.cancel() self.report.end(self.returncode) @property def returncode(self): return self.proc.returncode if self.proc else None def exec_context(port, testdir, test, prog): report = ExecReporter(testdir, test, prog) prog.build_command(port) return ExecutionContext(prog.command, prog.workdir, prog.env, report) def run_test(testdir, test_dict, async=True, max_retry=3): try: logger = multiprocessing.get_logger() retry_count = 0 test = TestEntry(testdir, **test_dict) while True: if stop.is_set(): logger.debug('Skipping because shutting down') return None logger.debug('Start') with PortAllocator.alloc_port_scoped(ports, test.socket) as port: logger.debug('Start with port %d' % port) sv = exec_context(port, testdir, test, test.server) cl = exec_context(port, testdir, test, test.client) logger.debug('Starting server') with sv.start(): if test.delay > 0: logger.debug('Delaying client for %.2f seconds' % test.delay) time.sleep(test.delay) cl_retry_count = 0 cl_max_retry = 10 cl_retry_wait = 0.5 while True: logger.debug('Starting client') cl.start(test.timeout) logger.debug('Waiting client') cl.wait() if not cl.report.maybe_false_positive() or cl_retry_count >= cl_max_retry: if cl_retry_count > 0 and cl_retry_count < cl_max_retry: logger.warn('[%s]: Connected after %d retry (%.2f sec each)' % (test.server.name, cl_retry_count, cl_retry_wait)) break logger.debug('Server may not be ready, waiting %.2f second...' % cl_retry_wait) time.sleep(cl_retry_wait) cl_retry_count += 1 if not sv.report.maybe_false_positive() or retry_count >= max_retry: logger.debug('Finish') return RESULT_TIMEOUT if cl.expired else cl.proc.returncode logger.warn('[%s]: Detected socket bind failure, retrying...' % test.server.name) retry_count += 1 except (KeyboardInterrupt, SystemExit): logger.info('Interrupted execution') if not async: raise stop.set() return None except Exception as ex: logger.warn('%s', ex) if not async: raise logger.debug('Error executing [%s]', test.name, exc_info=sys.exc_info()) return RESULT_ERROR class PortAllocator(object): def __init__(self): self._log = multiprocessing.get_logger() self._lock = multiprocessing.Lock() self._ports = set() self._dom_ports = set() self._last_alloc = 0 def _get_tcp_port(self): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind(('127.0.0.1', 0)) port = sock.getsockname()[1] self._lock.acquire() try: ok = port not in self._ports if ok: self._ports.add(port) self._last_alloc = time.time() finally: self._lock.release() sock.close() return port if ok else self._get_tcp_port() def _get_domain_port(self): port = random.randint(1024, 65536) self._lock.acquire() try: ok = port not in self._dom_ports if ok: self._dom_ports.add(port) finally: self._lock.release() return port if ok else self._get_domain_port() def alloc_port(self, socket_type): if socket_type in ('domain', 'abstract'): return self._get_domain_port() else: return self._get_tcp_port() # static method for inter-process invokation @staticmethod @contextlib.contextmanager def alloc_port_scoped(allocator, socket_type): port = allocator.alloc_port(socket_type) yield port allocator.free_port(socket_type, port) def free_port(self, socket_type, port): self._log.debug('free_port') self._lock.acquire() try: if socket_type == 'domain': self._dom_ports.remove(port) path = domain_socket_path(port) if os.path.exists(path): os.remove(path) elif socket_type == 'abstract': self._dom_ports.remove(port) else: self._ports.remove(port) except IOError as err: self._log.info('Error while freeing port : %s' % str(err)) finally: self._lock.release() class NonAsyncResult(object): def __init__(self, value): self._value = value def get(self, timeout=None): return self._value def wait(self, timeout=None): pass def ready(self): return True def successful(self): return self._value == 0 class TestDispatcher(object): def __init__(self, testdir, concurrency): self._log = multiprocessing.get_logger() self.testdir = testdir # seems needed for python 2.x to handle keyboard interrupt self._stop = multiprocessing.Event() self._async = concurrency > 1 if not self._async: self._pool = None global stop global ports stop = self._stop ports = PortAllocator() else: self._m = multiprocessing.managers.BaseManager() self._m.register('ports', PortAllocator) self._m.start() self._pool = multiprocessing.Pool(concurrency, sel
uber-common/opentracing-python-instrumentation
tests/opentracing_instrumentation/test_mysqldb.py
Python
mit
2,279
0.000878
import sys import pytest from opentracing.ext import tags from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker from opentracing_instrumentation.client_hooks import mysqldb as mysqldb_hooks from opentracing_instrumentation.request_context import span_in_context from .sql_common import metadata, User SKIP_REASON_PYTHON_3 = 'MySQLdb is not compatible with Python 3' SKIP_REASON_CONNECTION = 'MySQL is not running or cannot connect' MYSQL_CONNECTION_STRING = 'mysql://[email protected]/test' @pytest.fixture def session(): Session = sessionmaker() engine = create_engine(MYSQL_CONNECTION_STRING) Session.configure(bind=engine) metadata.create_all(engine) try: yield Session() except: pass @pytest.fixture(autouse=True, scope='module') def patch_sqlalchemy(): mysqldb_hooks.install_patches() try: yield finally: mysqldb_hooks.reset_patches() def is_mysql_running(): try:
import MySQLdb with MySQLdb.connect(host='127.0.0.1', user='root'): pass return True except: return False def assert_span(span, operation, parent=None): assert span.operation_name == 'MySQLdb:' + operation assert span.tags.get(tags.SPAN_KIND) == tags.SPAN_KIND_RPC_CLIENT if parent: assert span.parent_id == parent.context.span_id assert span.context.trace_id == parent.context.trace_id else:
assert span.parent_id is None @pytest.mark.skipif(not is_mysql_running(), reason=SKIP_REASON_CONNECTION) @pytest.mark.skipif(sys.version_info.major == 3, reason=SKIP_REASON_PYTHON_3) def test_db(tracer, session): root_span = tracer.start_span('root-span') # span recording works for regular operations within a context only with span_in_context(root_span): user = User(name='user', fullname='User', password='password') session.add(user) session.commit() spans = tracer.recorder.get_spans() assert len(spans) == 4 connect_span, insert_span, commit_span, rollback_span = spans assert_span(connect_span, 'Connect') assert_span(insert_span, 'INSERT', root_span) assert_span(commit_span, 'commit', root_span) assert_span(rollback_span, 'rollback', root_span)
garyjs/Newfiesautodialer
newfies/api/api_playgrounds/campaign_delete_cascade_playground.py
Python
mpl-2.0
1,138
0.001757
# # Newfies-Dialer License # http://www.newfies-dialer.org # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. # # Copyright (C) 2011-2013 Star2Billing S.L. # # The Initial Developer of the
Original Code is # Arezqui Belaid <[email protected]> # from django.utils.translation import gettext as _ from apiplayground import APIPlayground class CampaignDelCascadeAPIPlayground(APIPlayground): schema = { "title": _("campaign delete cascade"), "base_url": "http://localhost/api/v1/", "resources": [ { "name": "/campaign_delete_cascade/", "description
": _("this resource allows you to delete campaign."), "endpoints": [ { "method": "DELETE", "url": "/api/v1/campaign_delete_cascade/{campaign-id}/", "description": _("delete campaign"), } ] }, ] }
lmascare/utils
python/tutorials/oop6a.py
Python
artistic-2.0
811
0.008631
#!/usr/bin/env python3 class Employee: num_of_emps = 0 raise_amount = 1.04 def __init__(self,first,last): self.first = first self.last = last self.email = first + '.' + last + '@kellynoah.com' def fullname(self): return '{} {}'.format(self.first,self.last) def apply_raise(self): self.pay
= int(self.pay * self.raise_amount) def __repr__(self): return "Employee('{}', '{}', '{}')".format(self.first, self.last, self.pay) def __str__(self): return '{} - {}'.format(self.fullname(), self.email) def __add__(self, other): return self.pay
+ other.pay def __len__(self): return len(self.fullname()) emp_1 = Employee('John', 'Smith') print(emp_1.first) print(emp_1.email) print(emp_1.fullname())
Tiger-C/python
python教程/第四集.py
Python
mit
3,007
0.006783
#第四集(包含部分文件3.py和部分第二集) # courses=['History','Math','Physics','Compsci']#此行代码在Mutable之前都要打开 # print(courses) # courses.append('Art')#在最后添加一个元素 # courses.insert(0,'English')#在0的位置添加一个元素 # courses_2=['Chinese','Education'] # courses.insert(1,courses_2)#看看这条代码与下面两条代码有什么不同 # courses.append(courses_2) # courses.extend(courses_2) # #用pop删除和用remove删除可以详见3.py # # courses.remove('Math')#删除一个元素 # popped=courses.pop()#删除一个元素并将该元素赋值给popped
(括号内无数字则默认最后一个) # print(popped)#输出被删除的元素 # courses.reverse()#将元素倒叙 # courses.sort()#排序 按开头字母的顺序 数字排在字母前 # print(courses) # courses.sort(reverse=True)#按顺序倒叙(若=False则无效) # print(courses) # sorted_courses=sorted(courses) # print(sorted_courses) # alphabet=['DA1','SA2','AD3','3AD'] # alphabet.sort() # print(alphabet) # nums=[3,5,1,4,2] # nums.sort() # print(nums) # print(min(nums))#输出
最小数 # print(max(nums))#输出最大数 # print(sum(nums))#输出总和 # #中文不知道是什么规则 # Chinese=['啊了','吧即','啦'] # Chinese.sort() # print(Chinese) # print(courses.index('Math'))#查找某元素在列表中的位置 # print('Art' in courses)#True则表示该元素存在于列表,False则是不存在 #for和in语言 # for item in courses: #将courses中的元素一个一个输出 # print(item) # #输出元素位置和元素 # for course in enumerate(courses): # print(course) # for index,course in enumerate(courses): # print(index,course) # for index,course in enumerate(courses,start=1): # print(index,course) # courses_str=' - '.join(courses)#将' - '插入courses中输出 # new_list=courses_str.split(' - ')#将' - '从courses_str中删除 # print(courses_str) # print(new_list) # #Mutable (可变的) # list_1=['History','Math','Physics','Compsci'] # list_2=list_1 # print(list_1) # print(list_2) # list_1[0]='Art' # print(list_1) # print(list_2) # #Immutable (不可变的)(这里很神奇,视频上不可以但是我可以) # tuple_1=['History','Math','Physics','Compsci'] # tuple_2=tuple_1 # print(tuple_1) # print(tuple_2) # tuple_1[0]='Art' # print(tuple_1) # print(tuple_2) # #Sets # cs_courses={'History', 'Math', 'Physics', 'Compsci','Math'}#用大括号则会将两个相同的元素只输出前一个 # art_courses={'History', 'Math', 'Art', 'Design'} # print(cs_courses) # print(cs_courses.intersection(art_courses))#输出两个列表中相同的元素 # print(cs_courses.difference(art_courses))#输出两个列表中不相同的元素 # print(cs_courses.union(art_courses))#将两个列表合并(每次运行顺序都不同) #Empty Lists empty_list=[] empty_list=list() #Empty Tuples empty_tuple=() empty_tuple=tuple() #Empty Sets empty_set={} #错误的 empty_set=set()
MichaelDoyle/Diamond
src/collectors/elb/test/testelb.py
Python
mit
8,325
0.00024
#!/usr/bin/python # coding=utf-8 import datetime import mock from test import CollectorTestCase from test import get_collector_config from test import unittest from mock import patch from test import run_only from mock import Mock from diamond.collector import Collector from elb import ElbCollector def run_only_if_boto_is_available(func): try: import boto except ImportError: boto = None pred = lambda: boto is not None return run_only(func, pred) class TestElbCollector(CollectorTestCase): @run_only_if_boto_is_available def test_throws_exception_when_interval_not_multiple_of_60(self): config = get_collector_config('ElbCollector', {'enabled': True, 'interval': 10}) assertRaisesAndContains(Exception, 'multiple of', ElbCollector, *[config, None]) @run_only_if_boto_is_available @patch('elb.cloudwatch') @patch('boto.ec2.connect_to_region') @patch('boto.ec2.elb.connect_to_region') @patch.object(Collector, 'publish_metric') def test_ignore(self, publish_metric, elb_connect_to_region, ec2_connect_to_region, cloudwatch): config = get_collector_config( 'ElbCollector', { 'enabled': True, 'interval': 60, 'regions': { 'us-west-1': {} }, 'elbs_ignored': ['^to_ignore', ], }) az = Mock() az.name = 'us-west-1a' ec2_conn = Mock() ec2_conn.get_all_zones = Mock() ec2_conn.get_all_zones.return_value = [az] ec2_connect_to_region.return_value = ec2_conn elb1 = Mock() elb1.name = 'elb1' elb2 = Mock() elb2.name = 'to_ignore' elb_conn = Mock() elb_conn.get_all_load_balancers = Mock() elb_conn.get_all_load_balancers.return_value = [elb1, elb2] elb_connect_to_region.return_value = elb_conn cw_conn = Mock() cw_conn.region = Mock() cw_conn.region.name = 'us-west-1' cw_conn.get_metric_statistics = Mock() ts = datetime.datetime.utcnow().replace(second=0, microsecond=0) cw_conn.get_metric_statistics.side_effect = [ [{u'Timestamp': ts, u'Average': 1.0, u'Unit': u'Count'}], [{u'Timestamp': ts, u'Average': 2.0, u'Unit': u'Count'}], [{u'Timestamp': ts, u'Sum': 3.0, u'Unit': u'Count'}], [{u'Timestamp': ts, u'Average': 4.0, u'Unit': u'Count'}], [{u'Timestamp': ts, u'Sum': 6.0, u'Unit': u'Count'}], [{u'Timestamp': ts, u'Sum': 7.0, u'Unit': u'Count'}], [{u'Timestamp': ts, u'Sum': 8.0, u'Unit': u'Count'}], [{u'Timestamp': ts, u'Sum': 9.0, u'Unit': u'Count'}], [{u'Timestamp': ts, u'Sum': 10.0, u'Unit': u'Count'}], [{u'Timestamp': ts, u'Sum': 11.0, u'Unit': u'Count'}], [{u'Timestamp': ts, u'Sum': 12.0, u'Unit': u'Count'}], [{u'Timestamp': ts, u'Maximum': 13.0, u'Unit': u'Count'}], [{u'Timestamp': ts, u'Sum': 14.0, u'Unit': u'Count'}], ] cloudwatch.connect_to_region = Mock() cloudwatch.connect_to_region.return_value = cw_conn collector = ElbCollector(config, handlers=[]) target = ts + datetime.timedelta(minutes=1) with mock.patch.object(datetime, 'datetime', mock.Mock(wraps=datetime.datetime)) as patched: patched.utcnow.return_value = target collector.collect() self.assertPublishedMetricMany( publish_metric, { 'us-west-1a.elb1.HealthyHostCount': 1, 'us-west-1a.elb1.UnHealthyHostCount': 2, 'us-west-1a.elb1.RequestCount': 3, 'us-west-1a.elb1.Latency': 4, 'us-west-1a.elb1.HTTPCode_ELB_4XX': 6, 'us-west-1a.elb1.HTTPCode_ELB_5XX': 7, 'us-west-1a.elb1.HTTPCode_Backend_2XX': 8, 'us-west-1a.elb1.HTTPCode_Backend_3XX': 9, 'us-west-1a.elb1.HTTPCode_Backend_4XX': 10, 'us-west-1a.elb1.HTTPCode_Backend_5XX': 11, 'us-west-1a.elb1.BackendConnectionErrors': 12, 'us-west-1a.elb1.SurgeQueueLength': 13, 'us-west-1a.elb1.SpilloverCount': 14, }) @run_only_if_boto_is_available @patch('elb.cloudwatch') @patch('boto.ec2.connect_to_region') @patch.object(Collector, 'publish_metric') def test_collect(self, publish_metric, connect_to_region, cloudwatch): config = get_collector_config( 'ElbCollector', { 'enabled': True, 'interval': 60, 'regions': { 'us-west-1': { 'elb_names': ['elb1'], } } }) az = Mock() az.name = 'us-west-1a' ec2_conn = Mock() ec2_conn.get_all_zones = Mock() ec2_conn.get_all_zones.return_value = [az] connect_to_region.return_value = ec2_conn cw_conn = Mock() cw_conn.region = Mock() cw_conn.region.name = 'us-west-1' cw_conn.get_metric_statistics = Mock() ts = datetime.datetime.utcnow().replace(second=0, microsecond=0) cw_conn.get_metric_statistics.side_effect = [ [{u'Timestamp': ts, u'Average': 1.0, u'Unit': u'Count'}], [{u'Timestamp': ts, u'Average': 2.0, u'Unit': u'Count'}], [{u'Timestamp': ts, u'Sum': 3.0, u'Unit': u'Count'}], [{u'Timestamp': ts, u'Average': 4.0, u'Unit': u'Count'}], [{u'Timestamp': ts, u'Sum': 6.0, u'Unit': u'Count'}], [{u'Timestamp': ts, u'Sum': 7.0, u'Unit': u'Count'}], [{u'Timestamp': ts, u'Sum': 8.0, u'Unit': u'Count'}], [{u'Timestamp': ts, u'Sum': 9.0, u'Unit': u'Count'}], [{u'Timestamp': ts, u'Sum': 10.0, u'Unit': u'Count'}], [{u'Timestamp': ts, u'Sum': 11.0, u'Unit': u'Count'}], [{u'Timestamp': ts, u'Sum': 12.0, u'Unit': u'Count'}], [{u'Timestamp': ts, u'Maximum': 13.0, u'Unit': u'Co
unt'}], [{u'Timestamp': ts, u'Sum': 14.0, u'Unit': u'Count'}], ] cloudwatch.connect_to_region = Mock() cloudwatch.connect_to_region.return_value = cw_conn collector = ElbCollector(config, handlers=[]) target = ts + datetime.timedelta(minutes=1) with mock.patch.object(datetime, 'datetime', mock.Mock(wraps=datetime.datetime)
) as patched: patched.utcnow.return_value = target collector.collect() self.assertPublishedMetricMany( publish_metric, { 'us-west-1a.elb1.HealthyHostCount': 1, 'us-west-1a.elb1.UnHealthyHostCount': 2, 'us-west-1a.elb1.RequestCount': 3, 'us-west-1a.elb1.Latency': 4, 'us-west-1a.elb1.HTTPCode_ELB_4XX': 6, 'us-west-1a.elb1.HTTPCode_ELB_5XX': 7, 'us-west-1a.elb1.HTTPCode_Backend_2XX': 8, 'us-west-1a.elb1.HTTPCode_Backend_3XX': 9, 'us-west-1a.elb1.HTTPCode_Backend_4XX': 10, 'us-west-1a.elb1.HTTPCode_Backend_5XX': 11, 'us-west-1a.elb1.BackendConnectionErrors': 12, 'us-west-1a.elb1.SurgeQueueLength': 13, 'us-west-1a.elb1.SpilloverCount': 14, }) def assertRaisesAndContains(excClass, contains_str, callableObj, *args, **kwargs): try: callableObj(*args, **kwargs) except excClass as e: msg = str(e) if contains_str in msg: return else: raise AssertionError( "Exception message does not contain '%s': '%s'" % ( contains_str, msg)) else: if hasattr(excClass, '__name__'): excName = excClass.__name__ e
auduno/gensim
gensim/corpora/bleicorpus.py
Python
gpl-3.0
3,768
0.00345
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2010 Radim Rehurek <[email protected]> # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html """ Blei's LDA-C format. """ from __future__ import with_statement import logging from gensim import interfaces, utils from gensim.corpora import IndexedCorpus logger = logging.getLogger('gensim.corpora.bleicorpus') class BleiCorpus(IndexedCorpus): """ Corpus in Blei's LDA-C format. The corpus is represented as two files: one describing the documents, and another describing the mapping between words and their ids. Each document is one line:: N fieldId1:fieldValue1 fieldId2:fieldValue2 ... fieldIdN:fieldValueN The vocabulary is a file with words, one word per line; word at line K has an implicit ``id=K``. """ def __init__(self, fname, fname_vocab=None): """ Initialize the corpus from a file. `fname_vocab` is the file with vocabulary; if not specified, it defaults to `fname.vocab`. """ IndexedCorpus.__init__(self, fname) logger.info("loading corpus from %s" % fname) if fname_vocab is None: fname_vocab = fname + '.vocab' self.fname = fname words = [word.rstrip() for word in open(fname_vocab)] self.id2word = dict(enumerate(words)) self.length = None def __iter__(self): """ Iterate over the corpus, returning one sparse vector at a time. """ length = 0 for lineNo, line in enumerate(open(self.fname)): length += 1 yield self.line2doc(line) self.length = length def line2doc(self, line): parts = line.split() if int(parts[0]) != len(parts) - 1: raise ValueError("invalid format in %s: %s" % (self.fname, repr(line))) doc = [part.rsplit(':', 1) for part in parts[1:]] doc = [(int(p1), float(p2)) for p1, p2 in doc] return doc @staticmethod def save_corpus(fname, corpus, id2word=None): """ Save a corpus in the LDA-C format. There are actually two files saved: `fname` and `fname.vocab`, where `fname.vocab` is the vocabulary file. This function is automatically called by `BleiCorpus.serialize`; don't call it directly, call `serialize` instead. """ if id2word is None: logger.info("no word id mapping provided; initializing from corpus") id2word = utils.dict_from_corpus(corpus) num_terms = len(id2word) else: num_terms = 1 + max([-1] + id2word.keys()) logger.info("storing corpus in Blei's LDA-C format: %s" % fname) with open(fname, 'w') as fout: offsets = [] for doc in corpus: doc = list(doc) offsets.append(fout.tell()) fout.write("%i %s\n" % (len(doc), ' '.join("%i:%s" % p for p in doc if abs(p[1]) > 1e-12))) # write out vocabulary, in a format compatible with Blei's topics.py script fname_vocab = fname + '.vocab' log
ger.info("saving vocabulary of %i words to %s" % (num_terms, fname_vocab)) with ope
n(fname_vocab, 'w') as fout: for featureid in xrange(num_terms): fout.write("%s\n" % utils.to_utf8(id2word.get(featureid, '---'))) return offsets def docbyoffset(self, offset): """ Return the document stored at file position `offset`. """ with open(self.fname) as f: f.seek(offset) return self.line2doc(f.readline()) #endclass BleiCorpus
lmazuel/azure-sdk-for-python
azure-eventgrid/azure/eventgrid/models/resource_write_failure_data.py
Python
mit
3,203
0.000312
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class ResourceWriteFailureData(Model): """Schema of the Data property of an EventGridEvent for a Microsoft.Resources.ResourceWriteFailure event. This is raised when a resource create or update operation fails. :param tenant_id: The tenant ID of the resource. :type tenant_id: str :param subscription_id: The subscription ID of the resource. :type subscription_id: str :param resource_group: The resource group of the resource. :type resource_group: str :param resource_provider: The resource provider performing the operation. :type resource_provider: str :param resource_uri: The URI of the resource in the operation. :type resource_uri: str :param operation_name: The operation that was perfo
rmed. :type operation_name: str :param status: The status of the operation. :type status: str :param authorization: The requested authorization for the operation. :type authorization: str :param claims: The properties of the claims. :type claims: str :param correlation_id: An operation ID used for troubleshooting. :type
correlation_id: str :param http_request: The details of the operation. :type http_request: str """ _attribute_map = { 'tenant_id': {'key': 'tenantId', 'type': 'str'}, 'subscription_id': {'key': 'subscriptionId', 'type': 'str'}, 'resource_group': {'key': 'resourceGroup', 'type': 'str'}, 'resource_provider': {'key': 'resourceProvider', 'type': 'str'}, 'resource_uri': {'key': 'resourceUri', 'type': 'str'}, 'operation_name': {'key': 'operationName', 'type': 'str'}, 'status': {'key': 'status', 'type': 'str'}, 'authorization': {'key': 'authorization', 'type': 'str'}, 'claims': {'key': 'claims', 'type': 'str'}, 'correlation_id': {'key': 'correlationId', 'type': 'str'}, 'http_request': {'key': 'httpRequest', 'type': 'str'}, } def __init__(self, tenant_id=None, subscription_id=None, resource_group=None, resource_provider=None, resource_uri=None, operation_name=None, status=None, authorization=None, claims=None, correlation_id=None, http_request=None): super(ResourceWriteFailureData, self).__init__() self.tenant_id = tenant_id self.subscription_id = subscription_id self.resource_group = resource_group self.resource_provider = resource_provider self.resource_uri = resource_uri self.operation_name = operation_name self.status = status self.authorization = authorization self.claims = claims self.correlation_id = correlation_id self.http_request = http_request
notapresent/rutracker_rss
feeds.py
Python
apache-2.0
2,363
0
"""(Re)builds feeds for categories""" import os import datetime import jinja2 from google.appengine.api import app_identity import dao import util def build_and_save_for_category(cat, store, prefix): """Build and save feeds for category""" feed = build_feed(cat) save_feeds(store, feed, prefix, cat.key.id()) def build_feed(cat): """Build feed for category""" feed = Feed(title=cat.title, link=get_app_url()) items = dao.latest_torrents(feed_size(cat), cat.key) for item in items: feed.add_item(item) return feed def get_app_url(): """Returns full URL for app engine app""" app_id = app_identity.get_application_id() return 'http://{}.appspot.com/'.format(app_id) def save_feeds(store, feed, prefix, name): """Saves feeds to storage""" xml = feed.render_short_rss() path = os.path.join(prefix, 'short', '{}.xml'.format(name)) store.put(path, xml.encode('utf-8'), 'application/rss+xml') c
lass Feed(object): """Represents feed wi
th torrent entries""" def __init__(self, title, link, ttl=60, description=None): self.title = title self.link = link self.description = description or title self.ttl = ttl self.items = [] self.lastBuildDate = None self.latest_item_dt = datetime.datetime.utcfromtimestamp(0) def add_item(self, item): self.items.append(item) if self.latest_item_dt < item.dt: self.latest_item_dt = item.dt def render_short_rss(self): self.lastBuildDate = self.latest_item_dt env = make_jinja_env() template = env.get_template('rss_short.xml') return template.render(feed=self) def make_jinja_env(): jinja2_env = jinja2.Environment( loader=jinja2.FileSystemLoader('templates'), # loader=PackageLoader('package_name', 'templates'), autoescape=True, extensions=['jinja2.ext.autoescape'] ) jinja2_env.filters['rfc822date'] = util.datetime_to_rfc822 return jinja2_env def feed_size(category): """Returns number of feed entries for category""" if category.key.id() == 'r0': # Root category return 100 elif category.key.id().startswith('c'): # Level 2 category return 50 return 25 # category with subcategories
SummerLW/Perf-Insight-Report
devil/devil/android/ports.py
Python
bsd-3-clause
6,336
0.007418
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Functions that deal with local and device ports.""" import contextlib import fcntl import httplib import logging import os import socket import traceback # The net test server is started from port 10201. _TEST_SERVER_PORT_FIRST = 10201 _TEST_SERVER_PORT_LAST = 30000 # A file to record next valid port of test server. _TEST_SERVER_PORT_FILE = '/tmp/test_server_port' _TEST_SERVER_PORT_LOCKFILE = '/tmp/test_server_port.lock' # The following two methods are used to allocate the port source for various # types of test servers. Because some net-related tests can be run on shards at # same time, it's important to have a mechanism to allocate the port # process-safe. In here, we implement the safe port allocation by leveraging # flock. def ResetTestServerPortAllocation(): """Resets the port allocation to start from TEST_SERVER_PORT_FIRST. Returns: Returns True if reset successes. Otherwise returns False. """ try: with open(_TEST_SERVER_PORT_FILE, 'w') as fp: fp.write('%d' % _TEST_SERVER_PORT_FIRST) if os.path.exists(_TEST_SERVER_PORT_LOCKFILE): os.unlink(_TEST_SERVER_PORT_LOCKFILE) return True except Exception: # pylint: disable=broad-except logging.exception('Error while resetting port allocation') return False def AllocateTestServerPort(): """Allocates a port incrementally. Returns: Returns a valid port which should be in between TEST_SERVER_PORT_FIRST and TEST_SERVER_PORT_LAST. Returning 0 means no more valid port can be used. """ port = 0 ports_tried = [] try: fp_lock = open(_TEST_SERVER_PORT_LOCKFILE, 'w') fcntl.flock(fp_lock, fcntl.LOCK_EX) # Get current valid port and calculate next valid port. if not os.path.exists(_TEST_SERVER_PORT_FILE): ResetTestServerPortAllocation() with open(_TEST_SERVER_PORT_FILE, 'r+') as fp: port = int(fp.read())
ports_tried.append(port) while not IsHostPortAvailable(port): port += 1 ports_tried.append(port) if (port > _TEST_SERVER_PORT_LAST or port < _TEST_SERVER_PORT_FIRST): port = 0 else: fp.seek(0, os.SEEK_SET) fp.write('%d' % (port + 1)) except Exception: # pylint: disable=broa
d-except logging.exception('ERror while allocating port') finally: if fp_lock: fcntl.flock(fp_lock, fcntl.LOCK_UN) fp_lock.close() if port: logging.info('Allocate port %d for test server.', port) else: logging.error('Could not allocate port for test server. ' 'List of ports tried: %s', str(ports_tried)) return port def IsHostPortAvailable(host_port): """Checks whether the specified host port is available. Args: host_port: Port on host to check. Returns: True if the port on host is available, otherwise returns False. """ s = socket.socket() try: s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(('', host_port)) s.close() return True except socket.error: return False def IsDevicePortUsed(device, device_port, state=''): """Checks whether the specified device port is used or not. Args: device: A DeviceUtils instance. device_port: Port on device we want to check. state: String of the specified state. Default is empty string, which means any state. Returns: True if the port on device is already used, otherwise returns False. """ base_urls = ('127.0.0.1:%d' % device_port, 'localhost:%d' % device_port) netstat_results = device.RunShellCommand( ['netstat', '-a'], check_return=True, large_output=True) for single_connect in netstat_results: # Column 3 is the local address which we want to check with. connect_results = single_connect.split() if connect_results[0] != 'tcp': continue if len(connect_results) < 6: raise Exception('Unexpected format while parsing netstat line: ' + single_connect) is_state_match = connect_results[5] == state if state else True if connect_results[3] in base_urls and is_state_match: return True return False def IsHttpServerConnectable(host, port, tries=3, command='GET', path='/', expected_read='', timeout=2): """Checks whether the specified http server is ready to serve request or not. Args: host: Host name of the HTTP server. port: Port number of the HTTP server. tries: How many times we want to test the connection. The default value is 3. command: The http command we use to connect to HTTP server. The default command is 'GET'. path: The path we use when connecting to HTTP server. The default path is '/'. expected_read: The content we expect to read from the response. The default value is ''. timeout: Timeout (in seconds) for each http connection. The default is 2s. Returns: Tuple of (connect status, client error). connect status is a boolean value to indicate whether the server is connectable. client_error is the error message the server returns when connect status is false. """ assert tries >= 1 for i in xrange(0, tries): client_error = None try: with contextlib.closing(httplib.HTTPConnection( host, port, timeout=timeout)) as http: # Output some debug information when we have tried more than 2 times. http.set_debuglevel(i >= 2) http.request(command, path) r = http.getresponse() content = r.read() if r.status == 200 and r.reason == 'OK' and content == expected_read: return (True, '') client_error = ('Bad response: %s %s version %s\n ' % (r.status, r.reason, r.version) + '\n '.join([': '.join(h) for h in r.getheaders()])) except (httplib.HTTPException, socket.error) as e: # Probably too quick connecting: try again. exception_error_msgs = traceback.format_exception_only(type(e), e) if exception_error_msgs: client_error = ''.join(exception_error_msgs) # Only returns last client_error. return (False, client_error or 'Timeout')
xtuyaowu/jtyd_python_spider
celery_tasks/weibo/search.py
Python
mit
2,317
0.003021
# coding:utf-8 from urllib import parse as url_parse from logger.log import crawler from apps.celery_init import celery from page_get.basic import get_page from config.conf import get_max_search_page from page_parse import search as parse_search from db.search_words import get_search_keywords from db.keywords_wbdata import insert_keyword_wbid from db.wb_data import insert_weibo_data, get_wb_by_mid # This url is just for original weibos. # If you want other kind of search, you can change the url below url = 'http://s.weibo.com/weibo/{}&scope=ori&suball=1&page={}' limit = get_max_search_page() + 1 @celery.task(ignore_result=True) def search_keyword(keyword, keyword_id): cur_page = 1 encode_keyword = url_parse.quote(keyword) while cur_page < limit: cur_url = url.format(encode_keyword, cur_page) search_page = get_page(cur_url) if not search_page: crawler.warning('No result for keyword {}, the source page is {}'.format(keyword, search_page)) return search_list = parse_search.get_search_info(search_page) # Because the search results are sorted by time, if any result has been stored in mysql, # we need not crawl the same keyword in this turn for wb_data in search_list: rs = get_wb_by_mid(wb_data.weibo_id) if rs: crawler.info('keyword {} has been crawled in this turn'.format(keyword)) return else: insert_weibo_data(wb_data) insert_keyword_wbid(keyword_id, wb_data.weibo_id) # send task for crawling user info celery.send_task('celery_tasks.weibo.user.crawl_person_infos', args=(wb_data.uid,), queue='user_crawler', routing_key='for_user_info') if 'page next S_txt1 S_line1' in search_page: cur_page += 1 else: crawler.info('keyword {} has been crawled in
this turn'.format(keyword)) return @celery.task(ignore_result=True) def excute_search_task(): keywords = get_search_keywords() for each in keywords: celery.send_task('celery_tasks.weibo.search.search_keyword', args=(each[0], each[1]), queue='search_crawler', routing_key='for_searc
h_info')
sykora/django-ripwrap
setup.py
Python
gpl-3.0
712
0.025281
from distutils.core import setup from ripwrap import __VERSION__ setup( name = 'ripwrap', version = __VERSION__, description = 'A wrapper for ReSTinPeace, for Django applications.', long_description = open('README').read() author = 'P.C. Shyamshankar', packages = ['ripwrap'], url = 'http://github.com/
sykora/django-ripwrap
/', license = 'GNU General Public License v3.0', classifiers = ( 'Development Status :: 1 - Planning', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: GNU General Public License (GPL)', 'Programming Language :: Python', ) )
simongibbons/numpy
numpy/core/tests/test_casting_unittests.py
Python
bsd-3-clause
29,168
0.000583
""" The tests exercise the casting machinery in a more low-level manner. The reason is mostly to test a new implementation of the casting machinery. Unlike most tests in NumPy, these are closer to unit-tests rather than integration tests. """ import pytest import textwrap import enum import itertools import random import numpy as np from numpy.lib.stride_tricks import as_strided from numpy.testing import assert_array_equal from numpy.core._multiarray_umath import _get_castingimpl as get_castingimpl # Simple skips object, parametric and long double (unsupported by struct) simple_dtypes = "?bhilqBHILQefdFD" if np.dtype("l").itemsize != np.dtype("q").itemsize: # Remove l and L, the table was generated with 64bit linux in mind. simple_dtypes = simple_dtypes.replace("l", "").replace("L", "") simple_dtypes = [type(np.dtype(c)) for c in simple_dtypes] def simple_dtype_instances(): for dtype_class in simple_dtypes: dt = dtype_class() yield pytest.param(dt, id=str(dt)) if dt.byteorder != "|": dt = dt.newbyteorder() yield pytest.param(dt, id=str(dt)) def get_expect
ed_stringlength(dtype): """Returns the string length when casting the basic dtypes to strings. """
if dtype == np.bool_: return 5 if dtype.kind in "iu": if dtype.itemsize == 1: length = 3 elif dtype.itemsize == 2: length = 5 elif dtype.itemsize == 4: length = 10 elif dtype.itemsize == 8: length = 20 else: raise AssertionError(f"did not find expected length for {dtype}") if dtype.kind == "i": length += 1 # adds one character for the sign return length # Note: Can't do dtype comparison for longdouble on windows if dtype.char == "g": return 48 elif dtype.char == "G": return 48 * 2 elif dtype.kind == "f": return 32 # also for half apparently. elif dtype.kind == "c": return 32 * 2 raise AssertionError(f"did not find expected length for {dtype}") class Casting(enum.IntEnum): no = 0 equiv = 1 safe = 2 same_kind = 3 unsafe = 4 cast_is_view = 1 << 16 def _get_cancast_table(): table = textwrap.dedent(""" X ? b h i l q B H I L Q e f d g F D G S U V O M m ? # = = = = = = = = = = = = = = = = = = = = = . = b . # = = = = . . . . . = = = = = = = = = = = . = h . ~ # = = = . . . . . ~ = = = = = = = = = = . = i . ~ ~ # = = . . . . . ~ ~ = = ~ = = = = = = . = l . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . = q . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . = B . ~ = = = = # = = = = = = = = = = = = = = = . = H . ~ ~ = = = ~ # = = = ~ = = = = = = = = = = . = I . ~ ~ ~ = = ~ ~ # = = ~ ~ = = ~ = = = = = = . = L . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~ Q . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~ e . . . . . . . . . . . # = = = = = = = = = = . . f . . . . . . . . . . . ~ # = = = = = = = = = . . d . . . . . . . . . . . ~ ~ # = ~ = = = = = = . . g . . . . . . . . . . . ~ ~ ~ # ~ ~ = = = = = . . F . . . . . . . . . . . . . . . # = = = = = = . . D . . . . . . . . . . . . . . . ~ # = = = = = . . G . . . . . . . . . . . . . . . ~ ~ # = = = = . . S . . . . . . . . . . . . . . . . . . # = = = . . U . . . . . . . . . . . . . . . . . . . # = = . . V . . . . . . . . . . . . . . . . . . . . # = . . O . . . . . . . . . . . . . . . . . . . . = # . . M . . . . . . . . . . . . . . . . . . . . = = # . m . . . . . . . . . . . . . . . . . . . . = = . # """).strip().split("\n") dtypes = [type(np.dtype(c)) for c in table[0][2::2]] convert_cast = {".": Casting.unsafe, "~": Casting.same_kind, "=": Casting.safe, "#": Casting.equiv, " ": -1} cancast = {} for from_dt, row in zip(dtypes, table[1:]): cancast[from_dt] = {} for to_dt, c in zip(dtypes, row[2::2]): cancast[from_dt][to_dt] = convert_cast[c] return cancast CAST_TABLE = _get_cancast_table() class TestChanges: """ These test cases exercise some behaviour changes """ @pytest.mark.parametrize("string", ["S", "U"]) @pytest.mark.parametrize("floating", ["e", "f", "d", "g"]) def test_float_to_string(self, floating, string): assert np.can_cast(floating, string) # 100 is long enough to hold any formatted floating assert np.can_cast(floating, f"{string}100") def test_to_void(self): # But in general, we do consider these safe: assert np.can_cast("d", "V") assert np.can_cast("S20", "V") # Do not consider it a safe cast if the void is too smaller: assert not np.can_cast("d", "V1") assert not np.can_cast("S20", "V1") assert not np.can_cast("U1", "V1") # Structured to unstructured is just like any other: assert np.can_cast("d,i", "V", casting="same_kind") # Unstructured void to unstructured is actually no cast at all: assert np.can_cast("V3", "V", casting="no") assert np.can_cast("V0", "V", casting="no") class TestCasting: size = 1500 # Best larger than NPY_LOWLEVEL_BUFFER_BLOCKSIZE * itemsize def get_data(self, dtype1, dtype2): if dtype2 is None or dtype1.itemsize >= dtype2.itemsize: length = self.size // dtype1.itemsize else: length = self.size // dtype2.itemsize # Assume that the base array is well enough aligned for all inputs. arr1 = np.empty(length, dtype=dtype1) assert arr1.flags.c_contiguous assert arr1.flags.aligned values = [random.randrange(-128, 128) for _ in range(length)] for i, value in enumerate(values): # Use item assignment to ensure this is not using casting: arr1[i] = value if dtype2 is None: if dtype1.char == "?": values = [bool(v) for v in values] return arr1, values if dtype2.char == "?": values = [bool(v) for v in values] arr2 = np.empty(length, dtype=dtype2) assert arr2.flags.c_contiguous assert arr2.flags.aligned for i, value in enumerate(values): # Use item assignment to ensure this is not using casting: arr2[i] = value return arr1, arr2, values def get_data_variation(self, arr1, arr2, aligned=True, contig=True): """ Returns a copy of arr1 that may be non-contiguous or unaligned, and a matching array for arr2 (although not a copy). """ if contig: stride1 = arr1.dtype.itemsize stride2 = arr2.dtype.itemsize elif aligned: stride1 = 2 * arr1.dtype.itemsize stride2 = 2 * arr2.dtype.itemsize else: stride1 = arr1.dtype.itemsize + 1 stride2 = arr2.dtype.itemsize + 1 max_size1 = len(arr1) * 3 * arr1.dtype.itemsize + 1 max_size2 = len(arr2) * 3 * arr2.dtype.itemsize + 1 from_bytes = np.zeros(max_size1, dtype=np.uint8) to_bytes = np.zeros(max_size2, dtype=np.uint8) # Sanity check that the above is large enough: assert stride1 * len(arr1) <= from_bytes.nbytes assert stride2 * len(arr2) <= to_bytes.nbytes if aligned: new1 = as_strided(from_bytes[:-1].view(arr1.dtype), arr1.shape, (stride1,)) new2 = as_strided(to_bytes[:-1].view(arr2.dtype), arr2.shape, (stride2,)) else: new1 = as_strided(from_bytes[1:].view(arr1.dtype), arr1.shape, (stride1,)) new2 = as_strided(to_bytes[1:].view(arr2.dtype), arr2.shape, (stride2,)) new1[...] = arr1 if not contig: # Ensure we did not overwrite bytes that should not be written: offset = arr1.dtype.items
QualiSystems/vCenterShell
package/cloudshell/tests/test_network/vlan/test_factory.py
Python
apache-2.0
324
0
from unittest import TestCase from cloudshell.cp.vcenter.network.vlan.factory im
port VlanSpecFactory class TestVlanSpecFactory(TestCase):
def test_get_vlan_spec(self): vlan_spec_factory = VlanSpecFactory() vlan_spec = vlan_spec_factory.get_vlan_spec('Access') self.assertIsNotNone(vlan_spec)
timtim17/IntroToGameProg
Labs/Paycheck.py
Python
gpl-2.0
746
0.002681
# Austin Jenchi # 1/30/2015 # 8th Period # Paycheck print "Welcome to How to Job" print wage_per_hour = raw_input("How much is your hourly wage? ==> $") if not wage_per_hour == "": try: wage_per_hour = float(wage_per_hour) except: wage_per_hour = 12.00 else: wage_per_hour = 12.00 pri
nt "Your pay is $%2.2f per hour." % wage_per_hour print print "You've worked 26 hours. (in one 24-hour day! remarkable!)" print total_wage = wage_per_hour * 26
print "Your Pay Before Taxes is $%2.2f" % total_wage print print "After taxes of 23%%, your total pay is $%2.2f." % (total_wage * .23) print print "After paying your union fees, you recieved a measly $%2.2f of your previous $%2.2f." % ((total_wage * .23) - 25, total_wage)
ifduyue/sentry
src/sentry/web/frontend/generic.py
Python
bsd-3-clause
3,640
0.001099
""" sentry.web.frontend.generic ~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import import os import posixpath from django.conf import settings from django.http import HttpResponseNotFound, Http404 from django.contrib.staticfiles import finders from django.utils.six.moves.urllib.parse import unquote from django.views import static from django.views.generic import TemplateView as BaseTemplateView from sentry.web.helpers import render_to_response FOREVER_CACHE = 'max-age=315360000' NEVER_CACHE = 'max-age=0, no-cache, no-store, mu
st-revalidate' def dev_favicon(request): document_root, path = resolve('sentry/images/favicon_dev.png') return static.serve(request, path, document_root=document_root) def resolve(path): # Mostly yanked from Dja
ngo core and changed to return the path: # See: https://github.com/django/django/blob/1.6.11/django/contrib/staticfiles/views.py normalized_path = posixpath.normpath(unquote(path)).lstrip('/') try: absolute_path = finders.find(normalized_path) except Exception: # trying to access bad paths like, `../../etc/passwd`, etc that # Django rejects, but respond nicely instead of erroring. absolute_path = None if not absolute_path: raise Http404("'%s' could not be found" % path) if path[-1] == '/' or os.path.isdir(absolute_path): raise Http404('Directory indexes are not allowed here.') return os.path.split(absolute_path) def static_media(request, **kwargs): """ Serve static files below a given point in the directory structure. """ module = kwargs.get('module') path = kwargs.get('path', '') version = kwargs.get('version') if module: path = '%s/%s' % (module, path) try: document_root, path = resolve(path) except Http404: # Return back a simpler plain-text 404 response, more suitable # for static files, rather than our full blown HTML. return HttpResponseNotFound('', content_type='text/plain') if 'gzip' in request.META.get('HTTP_ACCEPT_ENCODING', '' ) and not path.endswith('.gz') and not settings.DEBUG: paths = (path + '.gz', path) else: paths = (path, ) for p in paths: try: response = static.serve(request, p, document_root=document_root) break except Http404: # We don't need to handle this since `resolve()` is assuring to us that # at least the non-gzipped version exists, so in theory, this can # only happen on the first .gz path continue # Make sure we Vary: Accept-Encoding for gzipped responses response['Vary'] = 'Accept-Encoding' # We need CORS for font files if path.endswith(('.js', '.ttf', '.ttc', '.otf', '.eot', '.woff', '.woff2')): response['Access-Control-Allow-Origin'] = '*' # If we have a version and not DEBUG, we can cache it FOREVER if version is not None and not settings.DEBUG: response['Cache-Control'] = FOREVER_CACHE else: # Otherwise, we explicitly don't want to cache at all response['Cache-Control'] = NEVER_CACHE return response class TemplateView(BaseTemplateView): def render_to_response(self, context, **response_kwargs): return render_to_response( request=self.request, template=self.get_template_names(), context=context, **response_kwargs )
rahulunair/nova
nova/db/sqlalchemy/api_migrations/migrate_repo/versions/016_resource_providers.py
Python
apache-2.0
4,495
0.000222
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Database migrations for resource-providers.""" from migrate import UniqueConstraint from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy import Float from sqlalchemy import Index from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Table from sqlalchemy import Unicode def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine if migrate_engine.name == 'mysql': nameargs = {'collation': 'utf8_bin'} else: nameargs = {} resource_providers = Table( 'resource_providers', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('uuid', String(36), nullable=False), Column('name', Unicode(200, **nameargs), nullable=True), Column('generation', Integer, default=0), Column('can_host', Integer, default=0), UniqueConstraint('uuid', name='uniq_resource_providers0uuid'), UniqueConstraint('name', name='uniq_resource_providers0name'), Index('resource_providers_name_idx', 'name'), Index('resource_providers_uuid_idx', 'uuid'), mysql_engine='InnoDB', mysql_charset='latin1' ) inventories = Table( 'inventories', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('resource_provider_id', Integer, nullable=False), Column('resource_class_id', Integer, nullable=False), Column('total', Integer, nullable=False), Column('reserved', Integer, nullable=False), Column('min_unit', Integer, nullable=False), Column('max_unit', Integer, nullable=False), Column('step_size', Integer, nullable=False), Column('allocation_ratio', Float, nullable=False), Index('inventories_resource_provider_id_idx', 'resource_provider_id'), Index('inventories_resource_provider_resource_class_idx', 'resource_provider_id', 'resource_class_id'), Index('inventories_resource_class_id_idx', 'resource_class_id'), UniqueConstraint('resource_provider_id', 'resource_class_id', name='uniq_inventories0resource_provider_resource_class'), mysql_engine='InnoDB', mysql_charset='latin1' ) allocations = Table( 'allocations', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('resource_provider_id', Integer, nullable=False), Column('consumer_id', String(36), nullable=False), Column('resource_class_id', Integer, nullable=False), Column('used', Integer, nullable=False), Index('allocations_resource_provider_class_used_idx', 'resource_provider_id', 'resource_class_id', 'used'), Index('allocations_resourc
e_class_id_idx', 'resource_class_id'), I
ndex('allocations_consumer_id_idx', 'consumer_id'), mysql_engine='InnoDB', mysql_charset='latin1' ) resource_provider_aggregates = Table( 'resource_provider_aggregates', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('resource_provider_id', Integer, primary_key=True, nullable=False), Column('aggregate_id', Integer, primary_key=True, nullable=False), Index('resource_provider_aggregates_aggregate_id_idx', 'aggregate_id'), mysql_engine='InnoDB', mysql_charset='latin1' ) for table in [resource_providers, inventories, allocations, resource_provider_aggregates]: table.create(checkfirst=True)
isovic/marginAlign
src/margin/marginAlign.py
Python
mit
4,995
0.015816
#!/usr/bin/env python import os import sys from optparse import OptionParser from jobTree.src.bioio import logger, setLoggingFromOptions from jobTree.scriptTree.stack import Stack from margin.mappers.last import Last, LastChain, LastRealign from margin.mappers.bwa import Bwa, BwaChain, BwaRealign from margin.mappers.graphmap import GraphMap, GraphMapChain, GraphMapRealign, GraphMapAnchor, GraphMapAnchorChain, GraphMapAnchorRealign from margin.utils import pathToBaseNanoporeDir import cPecan.cPecanEm from cPecan.cPecanEm import addExpectationMaximisationOptions def main(): #Parse the inputs args/options parser = OptionParser(usage="usage: inputFastqFile referenceFastaFile outputSamFile [options]", version="%prog 0.1") #Options parser.add_option("--em", dest="em", help="Run expectation maximisation (EM)", default=False, action="store_true") ##Most people would not want to use the following, but I put them here for debug purposes parser.add_option("--bwa", dest="bwa", help="Use BWA instead of LAST", default=False, action="store_true") parser.add_option("--graphmap", dest="graphmap", help="Use GraphMap instead of LAST", default=False, action="store_true") parser.add_option("--graphmapanchor", dest="graphmapanchor", help="Use GraphMap with anchor alignment instead of LAST", default=False, action="store_true") parser.add_option("--noRealign", dest="noRealign", help="Don't run any realignment step", default=False, action="store_true") parser.add_option("--noChain", dest="noChain", help="Don't run any chaining step", default=False, action="store_true") parser.add_option("--gapGamma", dest="gapGamma", help="Set the gap gamma for the AMAP function", default=0.5, type=float) parser.add_option("--matchGamma", dest="matchGamma", help="Set the match gamma for the AMAP function", default=0.0, type=float) #Add the cPecan expectation maximisation options options = cPecan.cPecanEm.Options() options.inputModel = os.path.join(pathToBaseNanoporeDir(), "src", "margin", "mappers", "last_hmm_20.txt") options.modelType="fiveStateAsymmetric" #"threeStateAsymmetric" options.optionsToRealign="--diagonalExpansion=10 --splitMatrixBiggerThanThis=300" options.randomStart = True options.trials = 3 options.outputTrialHmms = True options.iterations = 100 options.maxAlignmentLengthPerJob=700000 options.maxAlignmentLengthToSample = 50000000 #options.outputXMLModelFile = outputModel + ".xml" #options.updateTheBand = True #options.useDefaultModelAsStart = True #options.setJukesCantorStartingEmissions=0.3 options.trainEmissions=True #options.tieEmissions = True addExpectationMaximisationOptions(parser, options) #Add the jobTree options Stack.addJobTreeOptions(parser) #Parse the options/arguments options, args = parser.parse_args() #Setup logging setLoggingFromOptions(options) #Print help message if no input if len(sys.argv) == 1: parser.print_help() sys.exit(0) #Exit if the arguments are not what we expect if len(args) != 3: raise RuntimeError("Expected three arguments, got: %s" % " ".join(args)) #Set the mapper if options.noRealign: if options.noChain: # i.e. --noChain --noRealign # mapper = Bwa if options.bwa else Last mapper = Last; if (options.bwa): mapper = Bwa; if (options.graphmap): mapper = GraphMap; if (options
.graphmapanchor): mapper = GraphMapAnchor; else: # i.e. --noRealign # mapper = BwaChain if options.bwa else LastChain mapper = LastChain; if (options.bwa): mapper = BwaChain; if (options.graphmap): mapper = GraphMapChain; if (options.graphmapanchor): mapper = GraphMapAnchorChain; else: # mapp
er = BwaRealign if options.bwa else LastRealign mapper = LastRealign; if (options.bwa): mapper = BwaRealign; if (options.graphmap): mapper = GraphMapRealign; if (options.graphmapanchor): mapper = GraphMapAnchorRealign; #This line invokes jobTree i = Stack(mapper(readFastqFile=args[0], referenceFastaFile=args[1], outputSamFile=args[2], options=options)).startJobTree(options) #The return value of the jobtree script is the number of failed jobs. If we have any then #report this. if i != 0: raise RuntimeError("Got failed jobs") if __name__ == '__main__': from margin.marginAlign import * main()
ray-project/ray
rllib/examples/recsim_with_slateq.py
Python
apache-2.0
227
0
fr
om ray.rllib.utils.deprecation import deprecation_warning deprecation_warning( old="ray/rllib/examples/recsim_with_slateq.py", new="ray/rllib/examples/recommender_system_with_recsim_and_slateq.py", error=Tr
ue, )
aleSuglia/YAIEP
yaiep/graph/SearchGraph.py
Python
mit
1,169
0.001714
import networkx from yaiep.graph.Node import Node ## # Classe che rappresenta l'intero spazio di ricerca che viene # generato via via che il metodo di ricerca ispeziona nuovi nodi # class SearchGraph(networkx.DiGraph): ## # Crea il grafo di ricerca come un grafo direzionato # il quale ha come nodo iniziale lo stato iniziale # dal quale il metodo di ricerca partirà per poter esplorare # lo spazio delle soluzioni # # @param init_state stato iniziale dal quale inizia la ricerca def __init__(self, init_state): networkx.DiGraph.__init__(self)
self._init_state = Node(init_state.copy(), None) # inserisci lo stato iniziale a partire dal quale ispezionare lo spazio di ricerca self.add_node(self._init_state) ## # Restituisce il riferimento allo stato iniziale dal # quale è iniziata la ricerca # def get_init_state(self): return self._init_state def __str__(self): res = ''
for node in self: res += '{0} -> '.format(str(node.wm)) for adj in self.neighbors(node): res += str(adj.wm) + '\n' return res
brocade/pysdn
docs/source/conf.py
Python
bsd-3-clause
9,179
0.005992
# -*- coding: utf-8 -*- # # pysdn documentation build configuration file, created by # sphinx-quickstart on Wed Aug 5 08:56:12 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../pysdn')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'pysdn' copyright = u'2015, Sergei Garbuzov' author = u'Sergei Garbuzov' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.3.4' # The full version, including alpha/beta/rc tags. release = '1.3.4' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignore
d prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = Fa
lse # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'nature' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'pysdndoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'pysdn.tex', u'pysdn Documentation', u'Sergei Garbuzov', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # On
songzhw/Hello-kotlin
Python101/src/math/PrimeSieve.py
Python
apache-2.0
1,333
0.007048
import math def isPrime(num): if num < 2: return False # 0, 1不是质数 # num为100时, 它是不可能有因子是大于50的. 比如说60 * ? = 100, 这是不可能的, 所以这里只要比较sqrt(), 平方根 boundary = int(math.sqrt(num)) + 1 for i in range(2, boundary): if num % i == 0: return False return True def primeSieve(size): sieve = [True] * size # 某格一为乘积, 就置为False sieve[0] = False sieve[1] = True
# num为100时, 它是不可能有因子是大于50的. 比如说60 * ? = 100, 这是不可能的, 所以这里只要比较sqrt(), 平方根 boundary = int(math.sqrt(size)) + 1 for i in range(2, boundary): pointer = i * 2 # startPosition. 以3为例, 3其实是质数, 但它的位数6,9, 12, ...都不是质数 while pointer < size: sieve[pointer] = False pointer += i ret = [] # contains all the prime number within "size" for i in range(size): if sieve[i] == True:
ret.append(str(i)) return ret if __name__ == '__main__': primes = primeSieve(100) primesString = ", ".join(primes) print("prime : ", primesString) ''' prime : 1, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97 '''
yu-peng/cdru
graph_theory/tests.py
Python
gpl-3.0
1,214
0.014003
import unittest from graph_the
ory.spfa import spfa class GraphTheoryTests(unittest.TestCase): def setUp(self): source = 0 num_nodes = 5 neighbour_list = [[1], # 0 [2], # 1 [3], # 2 [4, 1], # 3
[1], # 4 ] weights = {(0,1): 20, (1,2) : 1, (2,3) : 2, (3,4) : -2, (4, 1): -1, (3, 1): -4, } self.example_graph = (source, num_nodes, weights, neighbour_list) self.example_graph_cycle = [1,2,3] def is_cyclicily_equal(self, list1, list2): if len(list1) != len(list2): return False n = len(list1) for shift in range(n): if list1 == list2[shift:] + list2[:shift]: return True return False def test_negative_cycle(self): _, negative_cycle = spfa(*self.example_graph) # Careful, double negation ahead assert(negative_cycle is not None) assert(self.is_cyclicily_equal(negative_cycle, self.example_graph_cycle))
jxta/cc
vendor/boto/boto/manage/volume.py
Python
apache-2.0
16,328
0.002511
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from __future__ import with_statement from boto.sdb.db.model import Model from boto.sdb.db.property import StringProperty, IntegerProperty, ListProperty, ReferenceProperty, CalculatedProperty from boto.manage.server import Server from boto.manage import propget import boto.ec2 import time import traceback from contextlib import closing import dateutil.parser import datetime class CommandLineGetter(object): def get_region(self, params): if not params.get('region', None): prop = self.cls.find_property('region_name') params['region'] = propget.get(prop, choices=boto.ec2.regions) def get_zone(self, params): if not params.get('zone', None): prop = StringProperty(name='zone', verbose_name='EC2 Availability Zone', choices=self.ec2.get_all_zones) params['zone'] = propget.get(prop) def get_name(self, params): if not params.get('name', None): prop = self.cls.find_property('name') params['name'] = propget.get(prop) def get_size(self, params): if not params.get('size', None): prop = IntegerProperty(name='size', verbose_name='Size (GB)') params['size'] = propget.get(prop) def get_mount_point(self, params): if not params.get('mount_point', None): prop = self.cls.find_property('mount_point') params['mount_point'] = propget.get(prop) def get_device(self, params): if not params.get('device', None): prop = self.cls.find_property('device') params['device'] = propget.get(prop) def get(self, cls, params): self.cls = cls self.get_region(params) self.ec2 = params['region'].connect() self.get_zone(params) self.get_name(params) self.get_size(params) self.get_mount_point(params) self.get_device(params) class Volume(Model): name = StringProperty(required=True, unique=True, verbose_name='Name') region_name = StringProperty(required=True, verbose_name='EC2 Region') zone_name = StringProperty(required=True, verbose
_name='EC2 Zone') mount_point = StringProperty(verbose_name='Mount Point') device = StringProperty(verbose_name="Device Name", default='/dev/sdp') volume_id = StringProperty(required=True) past_volume_ids = ListProperty(item_type=str) server = ReferenceProperty(Server, collection_name='volumes', verbose_name='Server Attached To') volume_state = CalculatedProperty(verbose_name="Volume State",
calculated_type=str, use_method=True) attachment_state = CalculatedProperty(verbose_name="Attachment State", calculated_type=str, use_method=True) size = CalculatedProperty(verbose_name="Size (GB)", calculated_type=int, use_method=True) @classmethod def create(cls, **params): getter = CommandLineGetter() getter.get(cls, params) region = params.get('region') ec2 = region.connect() zone = params.get('zone') size = params.get('size') ebs_volume = ec2.create_volume(size, zone.name) v = cls() v.ec2 = ec2 v.volume_id = ebs_volume.id v.name = params.get('name') v.mount_point = params.get('mount_point') v.device = params.get('device') v.region_name = region.name v.zone_name = zone.name v.put() return v @classmethod def create_from_volume_id(cls, region_name, volume_id, name): vol = None ec2 = boto.ec2.connect_to_region(region_name) rs = ec2.get_all_volumes([volume_id]) if len(rs) == 1: v = rs[0] vol = cls() vol.volume_id = v.id vol.name = name vol.region_name = v.region.name vol.zone_name = v.zone vol.put() return vol def create_from_latest_snapshot(self, name, size=None): snapshot = self.get_snapshots()[-1] return self.create_from_snapshot(name, snapshot, size) def create_from_snapshot(self, name, snapshot, size=None): if size < self.size: size = self.size ec2 = self.get_ec2_connection() if self.zone_name == None or self.zone_name == '': # deal with the migration case where the zone is not set in the logical volume: current_volume = ec2.get_all_volumes([self.volume_id])[0] self.zone_name = current_volume.zone ebs_volume = ec2.create_volume(size, self.zone_name, snapshot) v = Volume() v.ec2 = self.ec2 v.volume_id = ebs_volume.id v.name = name v.mount_point = self.mount_point v.device = self.device v.region_name = self.region_name v.zone_name = self.zone_name v.put() return v def get_ec2_connection(self): if self.server: return self.server.ec2 if not hasattr(self, 'ec2') or self.ec2 == None: self.ec2 = boto.ec2.connect_to_region(self.region_name) return self.ec2 def _volume_state(self): ec2 = self.get_ec2_connection() rs = ec2.get_all_volumes([self.volume_id]) return rs[0].volume_state() def _attachment_state(self): ec2 = self.get_ec2_connection() rs = ec2.get_all_volumes([self.volume_id]) return rs[0].attachment_state() def _size(self): if not hasattr(self, '__size'): ec2 = self.get_ec2_connection() rs = ec2.get_all_volumes([self.volume_id]) self.__size = rs[0].size return self.__size def install_xfs(self): if self.server: self.server.install('xfsprogs xfsdump') def get_snapshots(self): """ Returns a list of all completed snapshots for this volume ID. """ ec2 = self.get_ec2_connection() rs = ec2.get_all_snapshots() all_vols = [self.volume_id] + self.past_volume_ids snaps = [] for snapshot in rs: if snapshot.volume_id in all_vols: if snapshot.progress == '100%': snapshot.date = dateutil.parser.parse(snapshot.start_time) snapshot.keep = True snaps.append(snapshot) snaps.sort(cmp=lambda x,y: cmp(x.date, y.date)) return snaps def attach(self, server=None): if self.attachment_state == 'attached': print 'already attached' return None if server: self.server = server self.put() ec2 = self.get_ec2_connection() ec2.attach_volume(self.volume_id, self.server.instance_id, self.device) def detach(self, force=False): state = self.attachment_state if state == 'available' or state == None or state == 'detaching': print 'already detached'
cmjatai/cmj
sapl/sessao/migrations/0002_sessaoplenaria_interativa.py
Python
gpl-3.0
518
0.001938
# -*- coding: utf-8 -*- #
Generated by Django 1.9.11 on 2017-05-10 15:49 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('sessao', '0001_initial'), ] operations = [ migrations.AddField( model_name='sessaoplenaria', name='interativa', field=models.NullBooleanField(
choices=[(True, 'Sim'), (False, 'Não')], verbose_name='Sessão interativa'), ), ]
ping/instagram_private_api_extensions
instagram_private_api_extensions/__init__.py
Python
mit
159
0
# Copyright (c)
2017 https://github.com/ping # # This software is released under the MIT License. # https://opensource.org/licenses/MIT __version__
= '0.3.9'
Azure/azure-sdk-for-python
sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2022_01_01/operations/_agent_pools_operations.py
Python
mit
40,397
0.004505
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import functools from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpResponse from azure.core.polling import LROPoller, NoPolling, PollingMethod from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.arm_polling import ARMPolling from msrest import Serializer from .. import models as _models from .._vendor import _convert_request, _format_url_section T = TypeVar('T') JSONType = Any ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False def build_list_request( subscription_id: str, resource_group_name: str, resource_name: str,
**kwargs: Any ) -> HttpRequest: api_version = "2022-01-01" accept = "application/json" # Construct URL url = kwargs.pop("t
emplate_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1), "resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_get_request( subscription_id: str, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any ) -> HttpRequest: api_version = "2022-01-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1), "resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'), "agentPoolName": _SERIALIZER.url("agent_pool_name", agent_pool_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_create_or_update_request_initial( subscription_id: str, resource_group_name: str, resource_name: str, agent_pool_name: str, *, json: JSONType = None, content: Any = None, **kwargs: Any ) -> HttpRequest: content_type = kwargs.pop('content_type', None) # type: Optional[str] api_version = "2022-01-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1), "resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'), "agentPoolName": _SERIALIZER.url("agent_pool_name", agent_pool_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] if content_type is not None: header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="PUT", url=url, params=query_parameters, headers=header_parameters, json=json, content=content, **kwargs ) def build_delete_request_initial( subscription_id: str, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any ) -> HttpRequest: api_version = "2022-01-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1), "resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'), "agentPoolName": _SERIALIZER.url("agent_pool_name", agent_pool_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="DELETE", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_get_upgrade_profile_request( subscription_id: str, resource_group_name: str, resource_name: str, agent_pool_name: str, **kwargs: Any ) -> HttpRequest: api_version = "2022-01-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourc
Microvellum/Fluid-Designer
win64-vc/2.78/scripts/startup/fluid_operators/fd_api_doc.py
Python
gpl-3.0
10,496
0.0121
''' Created on Jan 27, 2017 @author: montes ''' import bpy from inspect import * import mv import os import math from reportlab.pdfgen import canvas from reportlab.lib.pagesizes import legal,inch,cm from reportlab.platypus import Image from reportlab.platypus import Paragraph,Table,TableStyle from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph, Frame, Spacer, PageTemplate, PageBreak from reportlab.lib import colors from reportlab.lib.pagesizes import A3, A4, landscape, portrait from reportlab.lib.styles import ParagraphStyle, getSampleStyleSheet from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY from reportlab.platypus.flowables import HRFlowable class OPS_create_api_doc(bpy.types.Operator): bl_idname = "fd_api_doc.create_api_doc" bl_label = "Create Fluid API Documentation" output_path = bpy.props.StringProperty(name="Output Path") def esc_uscores(self, string): if string: return string.replace("_", "\_") else: return def exclude_builtins(self, classes, module): new_classes = [] for cls in classes: if module in cls[1].__module__: new_classes.append(cls) return new_classes def write_sidebar(self, modules): filepath = os.path.join(self.output_path, "FD_Sidebar.md") file = open(filepath, "w") fw = file.write fw("# Fluid Designer\n") fw("* [Home](Home)\n") fw("* [Understanding the User Interface](Understanding-the-User-Interface)\n") fw("* [Navigating the 3D Viewport](Navigating-the-3D-Viewport)\n") fw("* [Navigating the Library Browser](Navigating-the-Library-Browser)\n") fw("* [The Room Builder Panel](The-Room-Builder-Panel)\n") fw("* [Hotkeys](Fluid-Designer-Hot-Keys)\n\n") fw("# API Documentation\n") for mod in modules: fw("\n## mv.{}\n".format(mod[0])) classes = self.exclude_builtins(getmembers(mod[1], predicate=isclass), mod[0]) if len(classes) > 0: for cls in classes: fw("* [{}()]({})\n".format(self.esc_uscores(cls[0]), self.esc_uscores(cls[0]))) else: fw("* [mv.{}]({})\n".format(mod[0], mod[0])) file.close() def write_class_doc(self, cls): filepath = os.path.join(self.ou
tput_path, cls[0] + ".md") file = open(filepath, "w") fw = file.write fw("# class {}{}{}{}\n\n".format(cls[1].__module__, ".", cls[0], "():")) if getdoc(cls[1]): fw(self.esc_uscores(getdoc(cls[1])) + "\n\n") for func in getmembers(cls[1], predicate=isfunction): if cls[0] in func[1].__qualname__: args = getargspec(func[1])[0]
args_str = ', '.join(item for item in args if item != 'self') fw("## {}{}{}{}\n\n".format(self.esc_uscores(func[0]), "(", self.esc_uscores(args_str) if args_str else " ", ")")) if getdoc(func[1]): fw(self.esc_uscores(getdoc(func[1])) + "\n") else: fw("Undocumented.\n\n") file.close() def write_mod_doc(self, mod): filepath = os.path.join(self.output_path, mod[0] + ".md") file = open(filepath, "w") fw = file.write fw("# module {}{}:\n\n".format("mv.", mod[0])) if getdoc(mod[1]): fw(self.esc_uscores(getdoc(mod[1])) + "\n\n") for func in getmembers(mod[1], predicate=isfunction): args = getargspec(func[1])[0] args_str = ', '.join(item for item in args if item != 'self') fw("## {}{}{}{}\n\n".format(self.esc_uscores(func[0]), "(", self.esc_uscores(args_str if args_str else " "), ")")) if getdoc(func[1]): fw(self.esc_uscores(getdoc(func[1])) + "\n") else: fw("Undocumented.\n\n") file.close() def execute(self, context): modules = getmembers(mv, predicate=ismodule) self.write_sidebar(modules) for mod in modules: classes = self.exclude_builtins(getmembers(mod[1], predicate=isclass), mod[0]) if len(classes) > 0: for cls in classes: self.write_class_doc(cls) else: self.write_mod_doc(mod) return {'FINISHED'} class OPS_create_content_overview_doc(bpy.types.Operator): bl_idname = "fd_api_doc.create_content_overview" bl_label = "Create Fluid Content Overview Documentation" INCLUDE_FILE_NAME = "doc_include.txt" write_path = bpy.props.StringProperty(name="Write Path", default="") elements = [] package = None def write_html(self): pass def read_include_file(self, path): dirs = [] file_path = os.path.join(path, self.INCLUDE_FILE_NAME) if os.path.exists(file_path): file = open(os.path.join(path, self.INCLUDE_FILE_NAME), "r") dirs_raw = list(file) for dir in dirs_raw: dirs.append(dir.replace("\n", "")) return dirs def create_hdr(self, name, font_size): hdr_style = TableStyle([('TEXTCOLOR', (0, 0), (-1, -1), colors.black), ('BOTTOMPADDING', (0, 0), (-1, -1), 15), ('TOPPADDING', (0, 0), (-1, -1), 15), ('FONTSIZE', (0, 0), (-1, -1), 8), ('VALIGN', (0, 0), (-1, -1), 'TOP'), ('ALIGN', (0, 0), (-1, 0), 'LEFT'), ('LINEBELOW', (0, 0), (-1, -1), 2, colors.black), ('BACKGROUND', (0, 1), (-1, -1), colors.white)]) name_p = Paragraph(name, ParagraphStyle("Category name style", fontSize=font_size)) hdr_tbl = Table([[name_p]], colWidths = 500, rowHeights = None, repeatRows = 1) hdr_tbl.setStyle(hdr_style) self.elements.append(hdr_tbl) def create_img_table(self, dir): item_tbl_data = [] item_tbl_row = [] for i, file in enumerate(os.listdir(dir)): last_item = len(os.listdir(dir)) - 1 if ".png" in file: img = Image(os.path.join(dir, file), inch, inch) img_name = file.replace(".png", "") if len(item_tbl_row) == 4: item_tbl_data.append(item_tbl_row) item_tbl_row = [] elif i == last_item: item_tbl_data.append(item_tbl_row) i_tbl = Table([[img], [Paragraph(img_name, ParagraphStyle("item name style", wordWrap='CJK'))]]) item_tbl_row.append(i_tbl) if len(item_tbl_data) > 0: item_tbl = Table(item_tbl_data, colWidths=125) self.elements.append(item_tbl) self.elements.append(Spacer(1, inch * 0.5)) def search_dir(self, path): thumb_dir = False for file in os.listdir(path): if ".png" in file: thumb_dir = True if thumb_dir: self.create_img_table(path) for file in os.listdir(path): if os.path.isdir(os.path.join(path, file)): sel
rocky/python2-trepan
trepan/processor/command/edit.py
Python
gpl-3.0
2,860
0.003846
# -*- coding: utf-8 -*- # Copyright (C) 2009, 2013-2015 Rocky Bernstein # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import inspect, os # Our local modules from trepan.processor.command import base_cmd as Mbase_cmd class EditCommand(Mbase_cmd.DebuggerCommand): """**edit** *position* Edit specified file or module. With no argument, edits file containing most recent line listed. See also: --------- `list` """ aliases = ('ed',) category = 'files' min_args = 0 max_args = 1 name = os.path.basename(__file__).split('.')[0] need_stack = False short_help = 'Edit specified file or module' def run(self, args): curframe = self.proc.curframe if len(args) == 1: if curframe is None: self.errmsg('edit: no stack to pick up position from. ' 'Use edit FILE:LINE form.') return
filename = curframe.f_code.co_filename lineno = curframe
.f_lineno elif len(args) == 2: (modfunc, filename, lineno) = self.proc.parse_position(args[1]) if inspect.ismodule(modfunc) and lineno is None and len(args) > 2: val = self.proc.get_an_int(args[1], 'Line number expected, got %s.' % args[1]) if val is None: return lineno = val pass elif lineno is None: self.errmsg('edit: no linenumber provided') return pass editor = 'ex' if 'EDITOR' in os.environ: editor = os.environ['EDITOR'] pass if os.path.exists(filename): os.system("%s +%d %s" % (editor, lineno, filename)) else: self.errmsg("edit: file %s doesn't exist" % filename) pass return pass if __name__ == '__main__': from trepan import debugger as Mdebugger d = Mdebugger.Debugger() cmd = EditCommand(d.core.processor) for c in (['edit'], ['edit', './edit.py:34'], ['edit', './noogood.py'], ): cmd.run(c) pass pass
whiteclover/Choco
choco/ui.py
Python
mit
3,191
0
# choco/ui.py # Copyright (C) 2006-2016 the Choco authors and contributors <see AUTHORS file> # # This module is part of Choco and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import re import os import posixpath from choco import errors from choco import util from choco.runtime import _kwargs_for_include class UIModule(object): default_template = "" def __init__(self, context, template=None): self.lookup = context.lookup self.ui_container = self.lookup.ui_container self.context = context self.template = template or self.defa
ult_template self.initialize()
def initialize(self): pass def get(self, key, default=None): """get parent context local data by key""" return self.context.get(key, default) def _execute(self, *args, **kw): """execute the template""" data = self.render(*args, **kw) t = self.get_template() return t.render_ui(self.context, *args, **data) def get_template(self): return self.ui_container.get_template(self.template) def render(self, *args, **kw): """Entry point and logic section for custom appliction actions""" raise NotImplemented() class UIContainer(object): def __init__(self, ui_paths, uis=None): """Init ui container, param ui_paths: the ui template paths. param uis: the dict like object, contains the ui module classes. """ self.ui_paths = [posixpath.normpath(d) for d in util.to_list(ui_paths, ()) ] self.uis = uis or dict() def put_ui(self, ui_name, uicls): self.uis[ui_name] = uicls def get_ui(self, ui_name): uicls = self.uis.get(ui_name) if uicls is None: raise errors.UINotFoundException("Cant's find ui for %s" % ui_name) return uicls def set_lookup(self, lookup): """Set up template lookup""" self.lookup = lookup def get_template(self, uri): """Return a :class:`.Template` object corresponding to the given ``uri``. .. note:: The ``relativeto`` argument is not supported here at the moment. """ # the spefical ui uri with prefix "url://" uiuri = "ui#" + uri try: if self.lookup.filesystem_checks: return self.lookup.check(uiuri, self.lookup.collection[uiuri]) else: return self.lookup.collection[uiuri] except KeyError: u = re.sub(r'^\/+', '', uri) for dir in self.ui_paths: # make sure the path seperators are posix - os.altsep is empty # on POSIX and cannot be used. dir = dir.replace(os.path.sep, posixpath.sep) srcfile = posixpath.normpath(posixpath.join(dir, u)) if os.path.isfile(srcfile): return self.lookup.load(srcfile, uiuri) else: raise errors.TopLevelLookupException( "Cant locate ui template for uri %r" % uiuri)
ThomasTheSpaceFox/furry-text-escape-2
TE2.py
Python
gpl-3.0
2,244
0.043226
#!/usr/bin/env python # coding=utf-8 # Furry Text Escape 2 main sc
ript gamevers = ('v1.0') n = ('null') tprint1 = ('1') tprint2 = ('1') while
n.strip()!="4": if tprint1==('1'): t = open('./art/title1.TCR', 'r') tcr_contents = t.read() print (chr(27) + "[2J" + chr(27) + "[H" + tcr_contents) t.close() tprint1=('0') print ( '''Furry Text Escape II (c) 2015-2016 Thomas Leathers ''' ) print ( '''Choose number: 1: Watch Intro 2: begin game 3: Credits 4: quit''' ) n = raw_input('choose number from the list above:') print(chr(27) + "[2A") if n=="2": #episode selection submenu print(chr(27) + "[2J" + chr(27) + "[H") episodeselection = ('null') tprint2 = ('1') t = open('./art/EPSEL-BANNER.TCR', 'r') tcr_contents = t.read() print (chr(27) + "[2J" + chr(27) + "[H" + tcr_contents + '''"which way?" ''') while episodeselection.strip()!="5": if tprint2==('1'): print(chr(27) + "[2J" + chr(27) + "[H") episodeselection = ('null') tprint2 = ('1') t = open('./art/EPSEL-BANNER.TCR', 'r') tcr_contents = t.read() print (chr(27) + "[2J" + chr(27) + "[H" + tcr_contents + '''"which way?"''') t.close() tprint2 = ('0') print ( '''episode selection: 1: episode 1: maintenance duties (RED) : episode 2 -coming soon- (BLUE) : episode 3 -coming soon- (GREEN) 4: BONUS! Playable flashback to Furry Text Escape 1! 5: return to main menu.''' ) episodeselection = raw_input('choice:') print(chr(27) + "[2A") if episodeselection=="1": print(chr(27) + "[2J" + chr(27) + "[H") execfile("EP1-intro.py") execfile("EP-1.py") execfile("EP1-outro.py") print(chr(27) + "[2J" + chr(27) + "[H") tprint2 = ('1') if episodeselection=="4": print(chr(27) + "[2J" + chr(27) + "[H") execfile("DARKROOM.py") print(chr(27) + "[2J" + chr(27) + "[H") tprint2 = ('1') print(chr(27) + "[2J" + chr(27) + "[H") tprint1 = ('1') if n=="1": print(chr(27) + "[2J" + chr(27) + "[H") execfile("CINA1-OPEN.py") print(chr(27) + "[2J" + chr(27) + "[H") tprint1 = ('1') if n=="3": print(chr(27) + "[2J" + chr(27) + "[H") execfile("CREDITS.py") print(chr(27) + "[2J" + chr(27) + "[H") tprint1 = ('1') t.close() #
jerbob92/CouchPotatoServer
libs/guessit/language.py
Python
gpl-3.0
13,849
0.003251
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2011 Nicolas Wack <[email protected]> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit import UnicodeMixin, base_text_type, u, s from guessit.fileutils import load_file_in_same_dir from guessit.textutils import find_words from guessit.country import Country import re import logging __all__ = [ 'is_iso_language', 'is_language', 'lang_set', 'Language', 'ALL_LANGUAGES', 'ALL_LANGUAGES_NAMES', 'UNDETERMINED', 'search_language', 'guess_language' ] log = logging.getLogger(__name__) # downloaded from http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt # # Description of the fields: # "An alpha-3 (bibliographic) code, an alpha-3 (terminologic) code (when given), # an alpha-2 code (when given), an English name, and a French name of a language # are all separated by pipe (|) characters." _iso639_contents = load_file_in_same_dir(__file__, 'ISO-639-2_utf-8.txt') # drop the BOM from the beginning of the file _iso639_contents = _iso639_contents[1:] language_matrix = [ l.strip().split('|') for l in _iso639_contents.strip().split('\n') ] # update information in the language matrix language_matrix += [['mol', '', 'mo', 'Moldavian', 'moldave'], ['ass', '', '', 'Assyrian', 'assyrien']] for lang in language_matrix: # remove unused languages that shadow other common ones with a non-official form if (lang[2] == 'se' or # Northern Sami shadows Swedish lang[2] == 'br'): # Breton shadows Brazilian lang[2] = '' # add missing information if lang[0] == 'und': lang[2] = 'un' if lang[0] == 'srp': lang[1] = 'scc' # from OpenSubtitles lng3 = frozenset(l[0] for l in language_matrix if l[0]) lng3term = frozenset(l[1] for l in language_matrix if l[1]) lng2 = frozenset(l[2] for l in language_matrix if l[2]) lng_en_name = frozenset(lng for l in language_matrix for lng in l[3].lower().split('; ') if lng) lng_fr_name = frozenset(lng for l in language_matrix for lng in l[4].lower().split('; ') if lng) lng_all_names = lng3 | lng3term | lng2 | lng_en_name | lng_fr_name lng3_to_lng3term = dict((l[0], l[1]) for l in language_matrix if l[1]) lng3term_to_lng3 = dict((l[1], l[0]) for l in language_matrix if l[1]) lng3_to_lng2 = dict((l[0], l[2]) for l in language_matrix if l[2]) lng2_to_lng3 = dict((l[2], l[0]) for l in language_matrix if l[2]) # we only return the first given english name, hoping it is the most used one lng3_to_lng_en_name = dict((l[0], l[3].split('; ')[0]) for l in language_matrix if l[3]) lng_en_name_to_lng3 = dict((en_name.lower(), l[0]) for l in language_matrix if l[3] for en_name in l[3].split('; ')) # we only return the first given french name, hoping it is the most used one lng3_to_lng_fr_name = dict((l[0], l[4].split('; ')[0]) for l in language_matrix if l[4]) l
ng_fr_name_to_lng3 = dict((fr_name.lower(), l[0]) for l in language_matrix if l[4] for fr_name in l[4].split('; ')) # contains a list of exceptions: strings that shou
ld be parsed as a language # but which are not in an ISO form lng_exceptions = { 'unknown': ('und', None), 'inconnu': ('und', None), 'unk': ('und', None), 'un': ('und', None), 'gr': ('gre', None), 'greek': ('gre', None), 'esp': ('spa', None), 'español': ('spa', None), 'se': ('swe', None), 'po': ('pt', 'br'), 'pb': ('pt', 'br'), 'pob': ('pt', 'br'), 'br': ('pt', 'br'), 'brazilian': ('pt', 'br'), 'català': ('cat', None), 'cz': ('cze', None), 'ua': ('ukr', None), 'cn': ('chi', None), 'chs': ('chi', None), 'jp': ('jpn', None), 'scr': ('hrv', None) } def is_iso_language(language): return language.lower() in lng_all_names def is_language(language): return is_iso_language(language) or language in lng_exceptions def lang_set(languages, strict=False): """Return a set of guessit.Language created from their given string representation. if strict is True, then this will raise an exception if any language could not be identified. """ return set(Language(l, strict=strict) for l in languages) class Language(UnicodeMixin): """This class represents a human language. You can initialize it with pretty much anything, as it knows conversion from ISO-639 2-letter and 3-letter codes, English and French names. You can also distinguish languages for specific countries, such as Portuguese and Brazilian Portuguese. There are various properties on the language object that give you the representation of the language for a specific usage, such as .alpha3 to get the ISO 3-letter code, or .opensubtitles to get the OpenSubtitles language code. >>> Language('fr') Language(French) >>> s(Language('eng').french_name) 'anglais' >>> s(Language('pt(br)').country.english_name) 'Brazil' >>> s(Language('Español (Latinoamérica)').country.english_name) 'Latin America' >>> Language('Spanish (Latin America)') == Language('Español (Latinoamérica)') True >>> s(Language('zz', strict=False).english_name) 'Undetermined' >>> s(Language('pt(br)').opensubtitles) 'pob' """ _with_country_regexp = re.compile('(.*)\((.*)\)') _with_country_regexp2 = re.compile('(.*)-(.*)') def __init__(self, language, country=None, strict=False, scheme=None): language = u(language.strip().lower()) with_country = (Language._with_country_regexp.match(language) or Language._with_country_regexp2.match(language)) if with_country: self.lang = Language(with_country.group(1)).lang self.country = Country(with_country.group(2)) return self.lang = None self.country = Country(country) if country else None # first look for scheme specific languages if scheme == 'opensubtitles': if language == 'br': self.lang = 'bre' return elif language == 'se': self.lang = 'sme' return elif scheme is not None: log.warning('Unrecognized scheme: "%s" - Proceeding with standard one' % scheme) # look for ISO language codes if len(language) == 2: self.lang = lng2_to_lng3.get(language) elif len(language) == 3: self.lang = (language if language in lng3 else lng3term_to_lng3.get(language)) else: self.lang = (lng_en_name_to_lng3.get(language) or lng_fr_name_to_lng3.get(language)) # general language exceptions if self.lang is None and language in lng_exceptions: lang, country = lng_exceptions[language] self.lang = Language(lang).alpha3 self.country = Cou
tsdmgz/ansible
lib/ansible/plugins/action/ironware_config.py
Python
gpl-3.0
4,167
0.00072
# # (c) 2017, Red Hat, Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import re import time import glob from ansible.plugins.action.ironware import ActionModule as _ActionModule from ansible.module_utils._text import to_text from ansible.module_utils.six.moves.urllib.parse import urlsplit from ansible.utils.vars import merge_hash PRIVATE_KEYS_RE = re.compile('__.+__') class ActionModule(_ActionModule): def run(self, tmp=None, task_vars=None): if self._task.args.get('src'): try: self._handle_template() except ValueError as exc: return dict(failed=True, msg=exc.message) result = super(ActionModule, self).run(tmp, task_vars) if self._task.args.get('backup') and result.get('__backup__'): # User requested backup and no error occurred in module. # NOTE: If there is a parameter error, _backup key may not be in results. filepath = self._write_backup(task_vars['inventory_hostname'], result['__backup__']) result['backup_path'] = filepath # strip out any keys that have two leading and two trailing # underscore characters for key in result.keys(): if PRIVATE_KEYS_RE.match(key): del result[key] return result def _get_working_path(self): cwd = self._loader.get_basedir() if self._task._role is not None:
cwd = self._ta
sk._role._role_path return cwd def _write_backup(self, host, contents): backup_path = self._get_working_path() + '/backup' if not os.path.exists(backup_path): os.mkdir(backup_path) for fn in glob.glob('%s/%s*' % (backup_path, host)): os.remove(fn) tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time())) filename = '%s/%s_config.%s' % (backup_path, host, tstamp) open(filename, 'w').write(contents) return filename def _handle_template(self): src = self._task.args.get('src') working_path = self._get_working_path() if os.path.isabs(src) or urlsplit('src').scheme: source = src else: source = self._loader.path_dwim_relative(working_path, 'templates', src) if not source: source = self._loader.path_dwim_relative(working_path, src) if not os.path.exists(source): raise ValueError('path specified in src not found') try: with open(source, 'r') as f: template_data = to_text(f.read()) except IOError: return dict(failed=True, msg='unable to load src file') # Create a template search path in the following order: # [working_path, self_role_path, dependent_role_paths, dirname(source)] searchpath = [working_path] if self._task._role is not None: searchpath.append(self._task._role._role_path) if hasattr(self._task, "_block:"): dep_chain = self._task._block.get_dep_chain() if dep_chain is not None: for role in dep_chain: searchpath.append(role._role_path) searchpath.append(os.path.dirname(source)) self._templar.environment.loader.searchpath = searchpath self._task.args['src'] = self._templar.template(template_data)
julienmalard/Tikon
tikon/móds/rae/orgs/ecs/repr/triang.py
Python
agpl-3.0
857
0
import scipy.stats as estad from tikon.ecs.aprioris import APrio
riDist from tikon.ecs.árb_mód import Parám from tikon.móds.rae.orgs.ecs.repr._plntll_ec import EcuaciónReprCoh class N(Parám): no
mbre = 'n' líms = (0, None) unids = None apriori = APrioriDist(estad.expon(scale=500)) class A(Parám): nombre = 'a' líms = (0, None) unids = None apriori = APrioriDist(estad.expon(scale=100)) class B(Parám): nombre = 'b' líms = (0, None) unids = None apriori = APrioriDist(estad.expon(scale=100)) class C(Parám): nombre = 'c' líms = (0, 1) unids = None class Triang(EcuaciónReprCoh): nombre = 'Triang' cls_ramas = [N, A, B, C] _cls_dist = estad.triang def _prms_scipy(símismo): cf = símismo.cf return dict(loc=cf['a'], scale=cf['b'], c=cf['c'])
shayan72/Courseware
Courseware/wsgi.py
Python
mit
395
0.002532
""" WSGI config for Courseware project. It expose
s the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/ """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Courseware.settings") from django.core.wsgi imp
ort get_wsgi_application application = get_wsgi_application()
plotly/python-api
packages/python/plotly/plotly/validators/mesh3d/colorbar/_tickvals.py
Python
mit
460
0.002174
import _plotly_utils.basevalidators class TickvalsValidator(_plotly_utils.basevalidators.DataArrayVa
lidator): def __init__(self, plotly_name="tickvals", parent_name="mesh3d.colorbar", **kwargs): super(TickvalsValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "colorbars"), role=kwargs.pop("role", "data"),
**kwargs )
s20121035/rk3288_android5.1_repo
bionic/libc/tools/zoneinfo/update-tzdata.py
Python
gpl-3.0
8,075
0.016966
#!/usr/bin/python """Updates the timezone data held in bionic and ICU.""" import ftplib import glob import httplib import os import re import shutil import subprocess import sys import tarfile import tempfile regions = ['africa', 'antarctica', 'asia', 'australasia', 'etcetera', 'europe', 'northamerica', 'southamerica', # These two deliberately come last so they override what came # before (and each other). 'backward', 'backzone' ] def CheckDirExists(dir, dirname): if not os.path.isdir(dir): print "Couldn't find %s (%s)!" % (dirname, dir) sys.exit(1) bionic_libc_tools_zoneinfo_dir = os.path.realpath(os.path.dirname(sys.argv[0])) # Find the bionic directory, searching upward from this script. bionic_dir = os.path.realpath('%s/../../..' % bionic_libc_tools_zoneinfo_dir) bionic_libc_zoneinfo_dir = '%s/libc/zoneinfo' % bionic_dir CheckDirExists(bionic_libc_zoneinfo_dir, 'bionic/libc
/zoneinfo') CheckDirExists(bionic_libc_tools_zoneinfo_dir, 'bionic/libc/tools/zoneinfo') print 'Found bionic in %s ...' % bionic_dir # Find the icu4c directory. icu_dir = os.path.realpath('%s/../external/icu/icu4c/source' % bionic_dir) CheckDirExists(icu_dir, 'external/icu/icu
4c/source') print 'Found icu in %s ...' % icu_dir def GetCurrentTzDataVersion(): return open('%s/tzdata' % bionic_libc_zoneinfo_dir).read().split('\x00', 1)[0] def WriteSetupFile(): """Writes the list of zones that ZoneCompactor should process.""" links = [] zones = [] for region in regions: for line in open('extracted/%s' % region): fields = line.split() if fields: if fields[0] == 'Link': links.append('%s %s %s' % (fields[0], fields[1], fields[2])) zones.append(fields[2]) elif fields[0] == 'Zone': zones.append(fields[1]) zones.sort() setup = open('setup', 'w') for link in sorted(set(links)): setup.write('%s\n' % link) for zone in sorted(set(zones)): setup.write('%s\n' % zone) setup.close() def SwitchToNewTemporaryDirectory(): tmp_dir = tempfile.mkdtemp('-tzdata') os.chdir(tmp_dir) print 'Created temporary directory "%s"...' % tmp_dir def FtpRetrieveFile(ftp, filename): ftp.retrbinary('RETR %s' % filename, open(filename, 'wb').write) def FtpRetrieveFileAndSignature(ftp, data_filename): """Downloads and repackages the given data from the given FTP server.""" print 'Downloading data...' FtpRetrieveFile(ftp, data_filename) print 'Downloading signature...' signature_filename = '%s.asc' % data_filename FtpRetrieveFile(ftp, signature_filename) def HttpRetrieveFile(http, path, output_filename): http.request("GET", path) f = open(output_filename, 'wb') f.write(http.getresponse().read()) f.close() def HttpRetrieveFileAndSignature(http, data_filename): """Downloads and repackages the given data from the given HTTP server.""" path = "/time-zones/repository/releases/%s" % data_filename print 'Downloading data...' HttpRetrieveFile(http, path, data_filename) print 'Downloading signature...' signature_filename = '%s.asc' % data_filename HttpRetrievefile(http, "%s.asc" % path, signature_filename) def BuildIcuToolsAndData(data_filename): # Keep track of the original cwd so we can go back to it at the end. original_working_dir = os.getcwd() # Create a directory to run 'make' from. icu_working_dir = '%s/icu' % original_working_dir os.mkdir(icu_working_dir) os.chdir(icu_working_dir) # Build the ICU tools. print 'Configuring ICU tools...' subprocess.check_call(['%s/runConfigureICU' % icu_dir, 'Linux']) # Run the ICU tools. os.chdir('tools/tzcode') # The tz2icu tool only picks up icuregions and icuzones in they are in the CWD for icu_data_file in [ 'icuregions', 'icuzones']: icu_data_file_source = '%s/tools/tzcode/%s' % (icu_dir, icu_data_file) icu_data_file_symlink = './%s' % icu_data_file os.symlink(icu_data_file_source, icu_data_file_symlink) shutil.copyfile('%s/%s' % (original_working_dir, data_filename), data_filename) print 'Making ICU data...' # The Makefile assumes the existence of the bin directory. os.mkdir('%s/bin' % icu_working_dir) subprocess.check_call(['make']) # Copy the source file to its ultimate destination. icu_txt_data_dir = '%s/data/misc' % icu_dir print 'Copying zoneinfo64.txt to %s ...' % icu_txt_data_dir shutil.copy('zoneinfo64.txt', icu_txt_data_dir) # Regenerate the .dat file. os.chdir(icu_working_dir) subprocess.check_call(['make', '-j32']) # Copy the .dat file to its ultimate destination. icu_dat_data_dir = '%s/stubdata' % icu_dir datfiles = glob.glob('data/out/tmp/icudt??l.dat') if len(datfiles) != 1: print 'ERROR: Unexpectedly found %d .dat files (%s). Halting.' % (len(datfiles), datfiles) sys.exit(1) datfile = datfiles[0] print 'Copying %s to %s ...' % (datfile, icu_dat_data_dir) shutil.copy(datfile, icu_dat_data_dir) # Switch back to the original working cwd. os.chdir(original_working_dir) def CheckSignature(data_filename): signature_filename = '%s.asc' % data_filename print 'Verifying signature...' # If this fails for you, you probably need to import Paul Eggert's public key: # gpg --recv-keys ED97E90E62AA7E34 subprocess.check_call(['gpg', '--trusted-key=ED97E90E62AA7E34', '--verify', signature_filename, data_filename]) def BuildBionicToolsAndData(data_filename): new_version = re.search('(tzdata.+)\\.tar\\.gz', data_filename).group(1) print 'Extracting...' os.mkdir('extracted') tar = tarfile.open(data_filename, 'r') tar.extractall('extracted') print 'Calling zic(1)...' os.mkdir('data') zic_inputs = [ 'extracted/%s' % x for x in regions ] zic_cmd = ['zic', '-d', 'data' ] zic_cmd.extend(zic_inputs) subprocess.check_call(zic_cmd) WriteSetupFile() print 'Calling ZoneCompactor to update bionic to %s...' % new_version subprocess.check_call(['javac', '-d', '.', '%s/ZoneCompactor.java' % bionic_libc_tools_zoneinfo_dir]) subprocess.check_call(['java', 'ZoneCompactor', 'setup', 'data', 'extracted/zone.tab', bionic_libc_zoneinfo_dir, new_version]) # Run with no arguments from any directory, with no special setup required. # See http://www.iana.org/time-zones/ for more about the source of this data. def main(): print 'Looking for new tzdata...' tzdata_filenames = [] # The FTP server lets you download intermediate releases, and also lets you # download the signatures for verification, so it's your best choice. use_ftp = True if use_ftp: ftp = ftplib.FTP('ftp.iana.org') ftp.login() ftp.cwd('tz/releases') for filename in ftp.nlst(): if filename.startswith('tzdata20') and filename.endswith('.tar.gz'): tzdata_filenames.append(filename) tzdata_filenames.sort() else: http = httplib.HTTPConnection('www.iana.org') http.request("GET", "/time-zones") index_lines = http.getresponse().read().split('\n') for line in index_lines: m = re.compile('.*href="/time-zones/repository/releases/(tzdata20\d\d\c\.tar\.gz)".*').match(line) if m: tzdata_filenames.append(m.group(1)) # If you're several releases behind, we'll walk you through the upgrades # one by one. current_version = GetCurrentTzDataVersion() current_filename = '%s.tar.gz' % current_version for filename in tzdata_filenames: if filename > current_filename: print 'Found new tzdata: %s' % filename SwitchToNewTemporaryDirectory() if use_ftp: FtpRetrieveFileAndSignature(ftp, filename) else: HttpRetrieveFileAndSignature(http, filename) CheckSignature(filename) BuildIcuToolsAndData(filename) BuildBionicToolsAndData(filename) print 'Look in %s and %s for new data files' % (bionic_dir, icu_dir) sys.exit(0) print 'You already have the latest tzdata (%s)!' % current_version sys.exit(0) if __name__ == '__main__': main()
ActiveState/code
recipes/Python/496684_Splicing_of_lists/recipe-496684.py
Python
mit
1,158
0.025043
def splice(alists, recycle = True): """ Accepts a list of nonempty lists or indexable objects in argument alists (each element list
may not be of the same length) and a keyword argument recycle which if true will reuse elements i
n lists of shorter length. Any error will result in an empty list to be returned. """ try: nlists = len(alists) lens = [len(alist) for alist in alists] if not recycle: totlen = sum(lens) else: totlen = max(lens) * nlists pos = [0] * nlists R = [None] * totlen i, j = 0, 0 while i < totlen: if pos[j] < lens[j]: R[i] = alists[j][pos[j]] i += 1 pos[j] = pos[j] + 1 if recycle and pos[j] >= lens[j]: pos[j] = 0 j = (j + 1) % nlists return R except: return [] if __name__ == "__main__": print splice([[1,2,3], ['a','b'], [4], [-1,-2,-3,-4]], recycle = False) print splice([[1,2,3], ['a','b'], [4], [-1,-2,-3,-4]])
tekton/DocuCanvas
socialplatform/migrations/0005_auto__add_field_facebookprofile_polls.py
Python
gpl-3.0
7,753
0.007868
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'FacebookProfile.polls' db.add_column(u'socialplatform_facebookprofile', 'polls', self.gf('django.db.models.fields.NullBooleanField')(default=True, null=True, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'FacebookProfile.polls' db.delete_column(u'socialplatform_facebookprofile', 'polls') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table'
: "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.m
odels.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'socialplatform.dmall': { 'Meta': {'object_name': 'DMAll'}, 'content': ('django.db.models.fields.CharField', [], {'max_length': '140'}), 'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'send_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, u'socialplatform.dmindividual': { 'Meta': {'object_name': 'DMIndividual'}, 'content': ('django.db.models.fields.CharField', [], {'max_length': '140'}), 'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'send_ind_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), 'target_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['socialplatform.TwitterProfile']"}) }, u'socialplatform.facebookprofile': { 'Meta': {'object_name': 'FacebookProfile'}, 'access_token': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'active': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}), 'facebook_id': ('django.db.models.fields.CharField', [], {'max_length': '150'}), 'helpdesk': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'issue': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'}), 'notifications': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'}), 'polls': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'}), 'profilePicture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}) }, u'socialplatform.fbnotification': { 'Meta': {'object_name': 'FBNotification'}, 'fb_profile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['socialplatform.FacebookProfile']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'sender': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), 'text': ('django.db.models.fields.TextField', [], {}) }, u'socialplatform.tweet': { 'Meta': {'object_name': 'Tweet'}, 'content': ('django.db.models.fields.CharField', [], {'max_length': '140'}), 'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'tweet_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, u'socialplatform.twitterprofile': { 'Meta': {'object_name': 'TwitterProfile'}, 'active': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), 'user_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}) } } complete_apps = ['socialplatform']
apache/airflow
airflow/migrations/versions/952da73b5eff_add_dag_code_table.py
Python
apache-2.0
2,906
0.001376
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Add ``dag_code`` table Revision ID: 952da73b5eff Revises: 852ae6c715af Create Date: 2020-03-12 12:39:01.797462 """ import sqlalchemy as sa from alembic import op from airflow.models.dagcode import DagCode # revision identifiers, used by Alembic. revision = '952da73b5eff' down_revision = '852ae6c715af' branch_labels = None depends_on = None airflow_version = '1.10.10' def upgrade(): """Create DagCode Table.""" from sqlalchemy.ext.declarative import declarative_base Base = declarative_base()
class SerializedDagModel(Base): __tablename__ = 'serialized_dag' # There are other columns here, but these are the only ones we need for the SELECT/UPDATE we are doing da
g_id = sa.Column(sa.String(250), primary_key=True) fileloc = sa.Column(sa.String(2000), nullable=False) fileloc_hash = sa.Column(sa.BigInteger, nullable=False) """Apply add source code table""" op.create_table( 'dag_code', sa.Column('fileloc_hash', sa.BigInteger(), nullable=False, primary_key=True, autoincrement=False), sa.Column('fileloc', sa.String(length=2000), nullable=False), sa.Column('source_code', sa.UnicodeText(), nullable=False), sa.Column('last_updated', sa.TIMESTAMP(timezone=True), nullable=False), ) conn = op.get_bind() if conn.dialect.name != 'sqlite': if conn.dialect.name == "mssql": op.drop_index('idx_fileloc_hash', 'serialized_dag') op.alter_column( table_name='serialized_dag', column_name='fileloc_hash', type_=sa.BigInteger(), nullable=False ) if conn.dialect.name == "mssql": op.create_index('idx_fileloc_hash', 'serialized_dag', ['fileloc_hash']) sessionmaker = sa.orm.sessionmaker() session = sessionmaker(bind=conn) serialized_dags = session.query(SerializedDagModel).all() for dag in serialized_dags: dag.fileloc_hash = DagCode.dag_fileloc_hash(dag.fileloc) session.merge(dag) session.commit() def downgrade(): """Unapply add source code table""" op.drop_table('dag_code')
honeynet/beeswarm
beeswarm/drones/client/baits/pop3.py
Python
gpl-3.0
3,174
0.001575
# Copyright (C) 2013 Johnny Vestergaard <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import poplib from datetime import datetime import logging from beeswarm.drones.client.baits.clientbase import ClientBase logg
er = logging.getLogger(__name__) class Pop3(ClientBase): def __init__(self, options): """ Initializes common values. :param options: A dict containing all options """
super(Pop3, self).__init__(options) def start(self): """ Launches a new POP3 client session on the server. """ username = self.options['username'] password = self.options['password'] server_host = self.options['server'] server_port = self.options['port'] honeypot_id = self.options['honeypot_id'] session = self.create_session(server_host, server_port, honeypot_id) try: logger.debug( 'Sending {0} bait session to {1}:{2}. (bait id: {3})'.format('pop3', server_host, server_port, session.id)) conn = poplib.POP3(server_host, server_port) session.source_port = conn.sock.getsockname()[1] banner = conn.getwelcome() session.protocol_data['banner'] = banner session.did_connect = True conn.user(username) conn.pass_(password) # TODO: Handle failed login session.add_auth_attempt('plaintext', True, username=username, password=password) session.did_login = True session.timestamp = datetime.utcnow() # except (poplib.error_proto, h_socket.error) as err: except Exception as err: logger.debug('Caught exception: {0} ({1})'.format(err, str(type(err)))) else: list_entries = conn.list()[1] for entry in list_entries: index, octets = entry.split(' ') conn.retr(index) conn.dele(index) logger.debug('Found and deleted {0} messages on {1}'.format(len(list_entries), server_host)) conn.quit() session.did_complete = True finally: session.all_done = True session.end_session() if conn: try: conn.file.close() except Exception: pass try: conn.sock.close() except Exception: pass
horance-liu/tensorflow
tensorflow/python/keras/_impl/keras/utils/generic_utils.py
Python
apache-2.0
12,658
0.0079
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Python utilities required by Keras.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import marshal import os import sys import time import types as python_types import numpy as np import six from tensorflow.python.util import tf_decorator from tensorflow.python.util import tf_inspect _GLOBAL_CUSTOM_OBJECTS = {} class CustomObjectScope(object): """Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape. Code within a `with` statement will be able to access custom objects by name. Changes to global custom objects persist within the enclosing `with` statement. At end of the `with` statement, global custom objects are reverted to state at beginning of the `with` statement. Example: Consider a custom object `MyObject` (e.g. a class): ```python with CustomObjectScope({'MyObject':MyObject}): layer = Dense(..., kernel_regularizer='MyObject') # save, load, etc. will recognize custom object by name ``` """ def __init__(self, *args): self.custom_objects = args self.backup = None def __enter__(self): self.backup = _GLOBAL_CUSTOM_OBJECTS.copy() for objects in self.custom_objects: _GLOBAL_CUSTOM_OBJECTS.update(objects) return self def __exit__(self, *args, **kwargs): _GLOBAL_CUSTOM_OBJECTS.clear() _GLOBAL_CUSTOM_OBJECTS.update(self.backup) def custom_object_scope(*args): """Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape. Convenience wrapper for `CustomObjectScope`. Code within a `with` statement will be able to access custom objects by name. Changes to global custom objects persist within the enclosing `with` statement. At end of the `with` statement, global custom objects are reverted to state at beginning of the `with` statement. Example: Consider a custom object `MyObject` ```python with custom_object_scope({'MyObject':MyObject}): layer = Dense(..., kernel_regularizer='MyObject') # save, load, etc. will recognize custom object by name ``` Arguments: *args: Variable length list of dictionaries of name, class pairs to add to custom objects. Returns: Object of type `CustomObjectScope`. """ return CustomObjectScope(*args) def get_custom_objects(): """Retrieves a live reference to the global dictionary of custom objects. Updating and clearing custom objects using `custom_object_scope` is preferred, but `get_custom_objects` can be used to directly access `_GLOBAL_CUSTOM_OBJECTS`. Example: ```python get_custom_objects().clear() get_custom_objects()['MyObject'] = MyObject ``` Returns: Global dictionary of names to classes (`_GLOBAL_CUSTOM_OBJECTS`). """ return _GLOBAL_CUSTOM_OBJECTS def serialize_keras_object(instance): _, instance = tf_decorator.unwrap(instance) if instance is None: return None if hasattr(instance, 'get_config'): return { 'class_name': instance.__class__.__name__, 'config': instance.get_config() } if hasattr(instance, '__name__'): return instance.__name__ else: raise ValueError('Cannot serialize', instance) def deserialize_keras_object(identifier, module_objects=None, custom_objects=None, printable_module_name='object'): if isinstance(identifier, dict): # In this case we are dealing with a Keras config dictionary. config = identifier if 'class_name' not in config or 'config' not in config: raise ValueError('Improper config format: ' + str(config)) class_name = config['class_name'] if custom_objects and class_name in custom_objects: cls = custom_objects[class_name] elif class_name in _GLOBAL_CUSTOM_OBJECTS: cls = _GLOBAL_CUSTOM_OBJECTS[class_name] else: module_objects = module_objects or {} cls = module_objects.get(class_name) if cls is None: raise ValueError('Unknown ' + printable_module_name + ': ' + class_name) if hasattr(cls, 'from_config'): arg_spec = tf_inspect.getargspec(cls.from_config) custom_objects = custom_objects or {} if 'custom_objects' in arg_spec.args: return cls.from_config( config['config'], custom_objects=dict( list(_GLOBAL_CUSTOM_OBJECTS.items()) + list(custom_objects.items()))) with CustomObjectScope(custom_objects): return cls.from_config(config['config']) else: # Then `cls` may be a function returning a class. # in this case by convention `config` holds # the kwargs of the function. custom_objects = custom_objects or {} with CustomObjectScope(custom_objects): return cls(**config['config']) elif isinstance(identifier, six.string_types): function_name = identifier if custom_objects and function_name in custom_objects: fn = custom_objects.get(function_name) elif function_name in _GLOBAL_CUSTOM_OBJECTS: fn = _GLOBAL_CUSTOM_OBJECTS[function_name] else: fn = module_objects.get(function_name) if fn is None: raise ValueError('Unknown ' + printable_module_name + ':' + function_name) return fn else: raise ValueError('Could not interpret serialized ' + printable_module_name + ': ' + identifier) def func_dump(func): """Serializes a user defined function. Arguments: func: the function to serialize. Returns: A tuple `(code, defaults, closure)`. """ if os.name == 'nt': code = marshal.dumps( func.__code__).replace(b'\\', b'/').decode('raw_unicode_escape') else: code = marshal.dumps(func.__code__).decode('raw_unicode_escape') defaults = func.__defaults__ if func.__closure__: closure = tuple(c.cell_contents for c in func.__closure__) else: closure = None return code, defaults, closure def func_load(code, defaults=None, closure=None, globs=None): """Deserializes a user defined function. Arguments: code: bytecode of the function. defaults: defaults of the function. closure: closure of the function. globs: dictionary of global objects. Returns: A function object. """ if isinstance(code, (tuple, list)): # unpack previous dump code, defaults, closure = code if isinstance(defaults, list): defaults = tuple(defaults) code = marshal.loads(code.encode('raw_unicode_escape')) if globs is None: globs = globals() return python_types.FunctionType( code, globs, name=code.co_name, argdefs=defaults, closure=closure) def has_arg(fn, name, accept_all=False): """Checks if a callable accepts a given keyword argument. Arguments: fn: Callable to inspect. name: Check if `fn` can be called with `name` as a keyword argument. accept_all: What to return if there is no parameter called `name`
but the function accepts a `**kwargs` argument. Returns: bool, whether `fn` accepts a `name` keyword argument. """ arg_spec = tf_inspect.getargspec
(fn) if accept_all and arg_spec.keywords is not None: return True return name in arg_spec.args class Progbar(object): """Displays a progress bar. Arguments: target: Total number of steps expected, None if unknown. interval: Minimum visual progre
cwisecarver/osf.io
tests/test_conferences.py
Python
apache-2.0
25,084
0.000518
# -*- coding: utf-8 -*- import mock from nose.tools import * # noqa (PEP8 asserts) import hmac import hashlib from StringIO import StringIO from django.db import IntegrityError import furl from modularodm import Q from modularodm.exceptions import ValidationError from framework.auth import get_or_create_user from framework.auth.core import Auth from osf.models import OSFUser as User, AbstractNode as Node from website import settings from website.conferences import views from website.conferences import utils, message from website.util import api_url_for, web_url_for from tests.base import OsfTestCase, fake from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory def assert_absolute(url): parsed_domain = furl.furl(settings.DOMAIN) parsed_url = furl.furl(url) assert_equal(parsed_domain.host, parsed_url.host) def assert_equal_urls(first, second): parsed_first = furl.furl(first) parsed_first.port = None parsed_second = furl.furl(second) parsed_second.port = None assert_equal(parsed_first, parsed_second) def create_fake_conference_nodes(n, endpoint): nodes = [] for i in range(n): node = ProjectFactory(is_public=True) node.add_tag(endpoint, Auth(node.creator)) node.save() nodes.append(node) return nodes class TestConferenceUtils(OsfTestCase): def test_get_or_create_user_exists(self): user = UserFactory() fetched, created = get_or_create_user(user.fullname, user.username, is_spam=True) assert_false(created) assert_equal(user._id, fetched._id) assert_false('is_spam' in fetched.system_tags) def test_get_or_create_user_not_exists(self): fullname = 'Roger Taylor' username = '[email protected]' fetched, created = get_or_create_user(fullname, username, is_spam=False) fetched.save() # in order to access m2m fields, e.g. tags assert_true(created) assert_equal(fetched.fullname, fullname) assert_equal(fetched.username, username) assert_false('is_spam' in fetched.system_tags) def test_get_or_create_user_is_spam(self): fullname = 'John Deacon' username = '[email protected]' fetched, created = get_or_create_user(fullname, username, is_spam=True) fetched.save() # in order to access m2m fields, e.g. tags assert_true(created) assert_equal(fetched.fullname, fullname) assert_equal(fetched.username, username) assert_true('is_spam' in fetched.system_tags) def test_get_or_create_node_exists(self): node = ProjectFactory() fetched, created = utils.get_or_create_node(node.title, node.creator) assert_false(created) assert_equal(node._id, fetched._id) def test_get_or_create_node_title_not_exists(self): title = 'Night at the Opera' creator = UserFactory() node = ProjectFactory(creator=creator) fetched, created = utils.get_or_create_node(title, creator) assert_true(created) assert_not_equal(node._id, fetched._id) def test_get_or_create_node_title_exists_deleted(self): title = 'Night at the Opera' creator = UserFactory() node = ProjectFactory(title=title) node.is_deleted = True node.save() fetched, created = utils.get_or_create_node(title, creator) assert_true(created) assert_not_equal(node._id, fetched._id) def test_get_or_create_node_title_exists_not_deleted(self): title = 'Night at the Opera' creator = UserFactory() node = ProjectFactory(title=title, creator=creator) node.is_deleted = False node.save() fetched, created = utils.get_or_create_node(title, creator) assert_false(created) assert_equal(node._id, fetched._id) def test_get_or_create_node_user_not_exists(self): title = 'Night at the Opera' creator = UserFactory() node = ProjectFactory(title=title) fetched, created = utils.get_or_create_node(title, creator) assert_true(created) assert_not_equal(node._id, fetched._id) def test_get_or_create_user_with_blacklisted_domain(self): fullname = 'Kanye West' username = '[email protected]' with assert_raises(ValidationError) as e: get_or_create_user(fullname, username, is_spam=True) assert_equal(e.exception.message, 'Invalid Email') class ContextTestCase(OsfTestCase): MAILGUN_API_KEY = 'mailkimp' @classmethod def setUpClass(cls): super(ContextTestCase, cls).setUpClass() settings.MAILGUN_API_KEY, cls._MAILGUN_API_KEY = cls.MAILGUN_API_KEY, settings.MAILGUN_API_KEY @classmethod def tearDownClass(cls): super(ContextTestCase, cls).tearDownClass() settings.MAILGUN_API_KEY = cls._MAILGUN_API_KEY def make_context(self, method='POST', **kwargs): data = { 'X-Mailgun-Sscore': 0, 'timestamp': '123', 'token': 'secret', 'signature': hmac.new( key=settings.MAILGUN_API_KEY, msg='{}{}'.format('123', 'secret'), digestmod=hashlib.sha256, ).hexdigest(), } data.update(kwargs.pop('data', {})) data = { key: value for key, value in data.iteritems() if value is not None } return self.app.app.test_request_context(method=method, data=data, **kwargs) class TestProvisionNode(ContextTestCase): def setUp(self): super(TestProvisionNode, self).setUp() self.node = ProjectFactory() self.user = self.node.creator self.conference = ConferenceFactory() self.body = 'dragon on my back' self.content = 'dragon attack' self.attachment = StringIO(self.content) self.recipient = '{0}{1}[email protected]'.format( 'test-' if settings.DEV_MODE else '', self.conference.endpoint, ) def make_context(self, **kwargs): data = { 'attachment-count': '1', 'attachment-1': (self.attachment, 'attachment-1'), 'X-Mailgun-Sscore': 0, 'recipient': self.recipient, 'stripped-text': self.body, } data.update(kwargs.pop('data', {})) return super(TestProvisionNode, self).make_context(data=data, **kwargs) def test_provision(self): with self.make_context(): msg = message.ConferenceMessage() utils.provision_node(self.conference, msg, self.node, self.user) assert_true(self.node.is_public) assert_in(self.conference.admins.first(), self.node.contributors) assert_in('emailed', self.node.system_tags) assert_in(self.conference.endpoint, self.node.system_tags) assert_true(self.node.tags.filter(name=self.conference.endpoint).e
xists()) assert_not_in('spam', self.node.system_tags) def test_provision_private(self): self.conference.public_projects = False self.conference.save() with self.make_context(): msg = me
ssage.ConferenceMessage() utils.provision_node(self.conference, msg, self.node, self.user) assert_false(self.node.is_public) assert_in(self.conference.admins.first(), self.node.contributors) assert_in('emailed', self.node.system_tags) assert_not_in('spam', self.node.system_tags) def test_provision_spam(self): with self.make_context(data={'X-Mailgun-Sscore': message.SSCORE_MAX_VALUE + 1}): msg = message.ConferenceMessage() utils.provision_node(self.conference, msg, self.node, self.user) assert_false(self.node.is_public) assert_in(self.conference.admins.first(), self.node.contributors) assert_in('emailed', self.node.system_tags) assert_in('spam', self.node.system_tags) @mock.patch('website.util.waterbutler_url_for') @mock.patch('website.conferences.utils.requests.put') def test_upload(self, mock_put, mock_get_url): mock_get_url.r
CTSRD-SOAAP/chromium-42.0.2311.135
tools/profile_chrome/perf_controller.py
Python
bsd-3-clause
6,890
0.00566
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging import os import signal import subprocess import sys import tempfile from profile_chrome import controllers from profile_chrome import ui from pylib import android_commands from pylib import constants from pylib.perf import perf_control sys.path.append(os.path.join(constants.DIR_SOURCE_ROOT, 'tools', 'telemetry')) try: # pylint: disable=F0401 from telemetry.core.platform.profiler import android_profiling_helper from telemetry.util import support_binaries except ImportError: android_profiling_helper = None support_binaries = None _PERF_OPTIONS = [ # Sample across all processes and CPUs to so that the current CPU gets # recorded to each sample. '--all-cpus', # In perf 3.13 --call-graph requires an argument, so use the -g short-hand # which does not. '-g', # Increase priority to avoid dropping samples. Requires root. '--realtime', '80', # Record raw samples to get CPU information. '--raw-samples', # Increase sampling frequency for better coverage. '--freq', '2000', ] class _PerfProfiler(object): def __init__(self, device, perf_binary, categories): self._device = device self._output_file = android_commands.DeviceTempFile( self._device.old_interface, prefix='perf_output') self._log_file = tempfile.TemporaryFile() # TODO(jbudorick) Look at providing a way to unhandroll this once the # adb rewrite has fully landed. device_param = (['-s', str(self._device)] if str(self._device) else []) cmd = ['adb'] + device_param + \ ['shell', perf_binary, 'record', '--output', self._output_file.name] + _PERF_OPTIONS if categories: cmd += ['--event', ','.join(categories)] self._perf_control = perf_control.PerfControl(self._device) self._perf_control.SetPerfProfilingMode() self._perf_process = subprocess.Popen(cmd, stdout=self._log_file, stderr=subprocess.STDOUT) def SignalAndWait(self): self._device.KillAll('perf', signum=signal.SIGINT) self._perf_process.wait() self._perf_control.SetDefaultPerfMode() def _FailWithLog(self, msg): self._log_file.seek(0) log = self._log_file.read() raise RuntimeError('%s. Log output:\n%s' % (msg, log)) def PullResult(self, output_path): if not self._device.FileExists(self._output_file.name): self._FailWithLog('Perf recorded no data') perf_profile = os.path.join(output_path, os.path.basename(self._output_file.name)) self._device.PullFile(self._output_file.name, perf_profile) if not os.stat(perf_profile).st_size: os.remove(perf_profile) self._FailWithLog('Perf recorded a zero-sized file') self._log_file.close() self._output_file.close() return perf_profile class PerfProfilerController(controllers.BaseController): def __init__(self, device, categories): controllers.BaseController.__init__(self) self._device = device self._categories = categories self._perf_binary = self._PrepareDevice(device) self._perf_instance = None def __repr__(self): return 'perf profile' @staticmethod def IsSupported(): return bool(android_profiling_helper) @staticmethod def _PrepareDevice(device): if not 'BUILDTYPE' in os.environ: os.environ['BUILDTYPE'] = 'Release' return android_profiling_helper.PrepareDeviceForPerf(device) @classmethod def GetCategories(cls, device): perf_binary = cls._PrepareDevice(device) return device.RunShellCommand('%s list' % perf_binary) def StartTracing(self, _): self._perf_instance = _PerfProfiler(self._device, self._perf_binary, self._categories) def StopTracing(self): if not self._perf_instance: return self._perf_instance.SignalAndWait() @staticmethod def _GetInteractivePerfCommand(perfhost_path, perf_profile, symfs_dir, required_libs, kallsyms): cmd = '%s report -n -i %s --symfs %s --kallsyms %s' % ( os.path.relpath(perfhost_path, '.'), perf_profile, symfs_dir, kallsyms)
for lib in required_libs: lib = os.path.join(symfs_dir, lib[1:]) if not os.path.exists(lib): continue objdump_path = android_profiling_helper.GetToolchainBinaryPath( lib, 'objdump') if objdump_path:
cmd += ' --objdump %s' % os.path.relpath(objdump_path, '.') break return cmd def PullTrace(self): symfs_dir = os.path.join(tempfile.gettempdir(), os.path.expandvars('$USER-perf-symfs')) if not os.path.exists(symfs_dir): os.makedirs(symfs_dir) required_libs = set() # Download the recorded perf profile. perf_profile = self._perf_instance.PullResult(symfs_dir) required_libs = \ android_profiling_helper.GetRequiredLibrariesForPerfProfile( perf_profile) if not required_libs: logging.warning('No libraries required by perf trace. Most likely there ' 'are no samples in the trace.') # Build a symfs with all the necessary libraries. kallsyms = android_profiling_helper.CreateSymFs(self._device, symfs_dir, required_libs, use_symlinks=False) perfhost_path = support_binaries.FindPath( android_profiling_helper.GetPerfhostName(), 'x86_64', 'linux') ui.PrintMessage('\nNote: to view the profile in perf, run:') ui.PrintMessage(' ' + self._GetInteractivePerfCommand(perfhost_path, perf_profile, symfs_dir, required_libs, kallsyms)) # Convert the perf profile into JSON. perf_script_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'third_party', 'perf_to_tracing.py') json_file_name = os.path.basename(perf_profile) with open(os.devnull, 'w') as dev_null, \ open(json_file_name, 'w') as json_file: cmd = [perfhost_path, 'script', '-s', perf_script_path, '-i', perf_profile, '--symfs', symfs_dir, '--kallsyms', kallsyms] if subprocess.call(cmd, stdout=json_file, stderr=dev_null): logging.warning('Perf data to JSON conversion failed. The result will ' 'not contain any perf samples. You can still view the ' 'perf data manually as shown above.') return None return json_file_name
wincent/ultisnips
test/vim_interface.py
Python
gpl-3.0
6,882
0.000436
# encoding: utf-8 import os import re import shutil import subprocess import tempfile import textwrap import time from test.constant import (ARR_D, ARR_L, ARR_R, ARR_U, BS, ESC, PYTHON3, SEQUENCES) def wait_until_file_exists(file_path, times=None, interval=0.01): while times is None or times: if os.path.exists(file_path): return True time.sleep(interval) if times is not None: times -= 1 return False def read_text_file(filename): """Reads the contens of a text file.""" if PYTHON3: return open(filename, 'r', encoding='utf-8').read() else: return open(filename, 'r').read() def is_process_running(pid): """Returns true if a process with pid is running, false otherwise.""" # from # http://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid try: os.kill(pid, 0) except OSError: return False else: return True def silent_call(cmd): """Calls 'cmd' and returns the exit value.""" return subprocess.call(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE) def create_directory(dirname): """Creates 'dirname' and its parents if it does not exist.""" try: os.makedirs(dirname) except OSError: pass class TempFileManager(object): def __init__(self, name=''): self._temp_dir = tempfile.mkdtemp(prefix='UltiSnipsTest_' + name) def name_temp(self, file_path): return os.path.join(self._temp_dir, file_path) def write_temp(self, file_path, content): abs_path = self.name_temp(file_path) create_directory(os.path.dirname(abs_path)) if PYTHON3: with open(abs_path, 'w', encoding='utf-8') as f: f.write(content) else: with open(abs_path, 'w') as f: f.write(content) return abs_path def unique_name_temp(self, suffix='', prefix=''): file_handler, abspath = tempfile.mkstemp( suffix, prefix, self._temp_dir) os.close(file_handler) os.remove(abspath) return abspath def clear_temp(self): shutil.rmtree(self._temp_dir) create_directory(self._temp_dir) class VimInterface(TempFileManager): def __init__(self, vim_executable, name): TempFileManager.__init__(self, name) self._vim_executable = vim_executable def get_buffer_data(self): buffer_path = self.unique_name_temp(prefix='buffer_') self.send(ESC + ':w! %s\n' % buffer_path) if wait_until_file_exists(buffer_path, 50): return read_text_file(buffer_path)[:-1] def send(self, s): raise NotImplementedError() def launch(self, config=[]): pid_file = self.name_temp('vim.pid') done_file = self.name_temp('loading_done') if os.path.exists(done_file): os.remove(done_file) post_config = [] post_config.append('%s << EOF' % ('py3' if PYTHON3 else 'py')) post_config.append('import vim') post_config.append( "with open('%s', 'w') as pid_file: pid_file.write(vim.eval('getpid()'))" % pid_file) post_config.append( "with open('%s', 'w') as done_file: pass" % done_file) post_config.append('EOF') config_path = self.write_temp('vim_config.vim', textwrap.dedent(os.linesep.join(config + post_config) + '\n')) # Note the space to exclude it from shell history. self.send(""" %s -u %s\r\n""" % (self._vim_executable, config_path)) wait_until_file_exists(done_file) self._vim_pid = int(open(pid_file, 'r').read()) def leave_with_wait(self): self.send(3 * ESC + ':qa!\n') while is_process_running(self._vim_pid): time.sleep(.05) class VimInterfaceTmux(VimInterface): def __init__(self, vim_executable, session): VimInterface.__init__(self, vim_executable, 'Tmux') self.session = session self._check_version() def send(self, s): # I did not find any documentation on what needs escaping when sending # to tmux, but it seems like this is all that is needed for now. s = s.replace(';', r'\;') if PYTHON3: s = s.encode('utf-8') silent_call(['tmux', 'send-keys', '-t', self.session, '-l', s]) def _check_version(self): stdout, _ = subprocess.Popen(['tmux', '-V'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() if PYTHON3: stdout = stdout.decode('utf-8') m = re.match(r"tmux (\d+).(\d+)", stdout) if not m or not (int(m.group(1)), int(m.group(2))) >= (1, 8): raise RuntimeError( 'Need at least tmux 1.8, you have %s.' % stdout.strip()) class VimInterfaceWindows(VimInterface): BRACES = re.compile('([}{])') WIN_ESCAPES = ['+', '^', '%', '~', '[', ']', '<', '>', '(', ')'] WIN_REPLACES = [ (BS, '{BS}'), (ARR_L, '{LEFT}'), (ARR_R, '{RIGHT}'), (ARR_U, '{UP}'), (ARR_D, '{DOWN}'), ('\t', '{TAB}'), ('\n', '~'), (ESC, '{ESC}'), # On my system ` waits for a second keystroke, so `+SPACE = "`". On # most systems,
`+Space = "` ". I work around this, by sending the host # ` as `+_+BS. Awkward, but the only way I found to get this working. ('`', '`_{BS}'), ('´', '´_{BS}'), ('{^}', '{^}_{BS}'), ] def __init__(self): # import window
s specific modules import win32com.client import win32gui self.win32gui = win32gui self.shell = win32com.client.Dispatch('WScript.Shell') def is_focused(self, title=None): cur_title = self.win32gui.GetWindowText( self.win32gui.GetForegroundWindow()) if (title or '- GVIM') in cur_title: return True return False def focus(self, title=None): if not self.shell.AppActivate(title or '- GVIM'): raise Exception('Failed to switch to GVim window') time.sleep(1) def convert_keys(self, keys): keys = self.BRACES.sub(r"{\1}", keys) for k in self.WIN_ESCAPES: keys = keys.replace(k, '{%s}' % k) for f, r in self.WIN_REPLACES: keys = keys.replace(f, r) return keys def send(self, keys): keys = self.convert_keys(keys) if not self.is_focused(): time.sleep(2) self.focus() if not self.is_focused(): # This is the only way I can find to stop test execution raise KeyboardInterrupt('Failed to focus GVIM') self.shell.SendKeys(keys)
ypid/series60-remote
pc/ui/ui_calendar_edit.py
Python
gpl-2.0
14,073
0.003837
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'ui/src/calendar_edit.ui' # # Created: Wed Nov 17 12:05:53 2010 # by: PyQt4 UI code generator 4.7.3 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui class Ui_CalendarEntryEdit(object): def setupUi(self, CalendarEntryEdit): CalendarEntryEdit.setObjectName("CalendarEntryEdit") CalendarEntryEdit.resize(543, 313) CalendarEntryEdit.setWindowTitle("New calendar entry") icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(":/view-calendar"), QtGui.QIcon.Normal, QtGui.QIcon.Off) CalendarEntryEdit.setWindowIcon(icon) self.verticalLayout = QtGui.QVBoxLayout(CalendarEntryEdit) self.verticalLayout.setContentsMargins(-1, -1, -1, 6) self.verticalLayout.setObjectName("verticalLayout") self.formLayout = QtGui.QFormLayout() self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.ExpandingFieldsGrow) self.formLayout.setContentsMargins(10, 10, 15, 15) self.formLayout.setHorizontalSpacing(3) self.formLayout.setVerticalSpacing(6) self.formLayout.setObjectName("formLayout") self.label = QtGui.QLabel(CalendarEntryEdit) font = QtGui.QFont() font.setWeight(75) font.setBold(True) self.label.setFont(font) self.label.setObjectName("label") self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.label) self.titleLine = QtGui.QLineEdit(CalendarEntryEdit) self.titleLine.setObjectName("titleLine") self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.titleLine) self.label_2 = QtGui.QLabel(CalendarEntryEdit) self.label_2.setObjectName("label_2") self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_2) self.locationLine = QtGui.QLineEdit(CalendarEntryEdit) self.locationLine.setObjectName("locationLine") self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.locationLine) self.label_3 = QtGui.QLabel(CalendarEntryEdit) self.label_3.setObjectName("label_3") self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_3) self.horizontalLayout_5 = QtGui.QHBoxLayout() self.horizontalLayout_5.setObjectName("horizontalLayout_5") self.startDate = QtGui.QDateEdit(CalendarEntryEdit) self.startDate.setCalendarPopup(True) self.startDate.setObjectName("startDate") self.horizontalLayout_5.addWidget(self.startDate) self.startTime = QtGui.QTimeEdit(CalendarEntryEdit) self.startTime.setObjectName("startTime") self.horizontalLayout_5.addWidget(self.startTime) self.formLayout.setLayout(2, QtGui.QFormLayout.FieldRole, self.horizontalLayout_5) self.label_4 = QtGui.QLabel(CalendarEntryEdit) self.label_4.setObjectName("label_4") self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_4) self.horizontalLayout_6 = QtGui.QHBoxLayout() self.horizontalLayout_6.setObjectName("horizontalLayout_6") self.endDate = QtGui.QDateEdit(CalendarEntryEdit) self.endDate.setCalendarPopup(True) self.endDate.setObjectName("endDate") self.horizontalLayout_6.addWidget(self.endDate) self.endTime = QtGui.QTimeEdit(CalendarEntryEdit) self.endTime.setObjectName("endTime") self.horizontalLayout_6.addWidget(self.endTime) self.formLayout.setLayout(3, QtGui.QFormLayout.FieldRole, self.horizontalLayout_6) self.horizontalLayout_4 = QtGui.QHBoxLayout() self.horizontalLayout_4.setContentsMargins(4, -1, -1, -1) self.horizontalLayout_4.setObjectName("horizontalLayout_4") self.recurrenceLabel = QtGui.QLabel(CalendarEntryEdit) self.recurrenceLabel.setObjectName("recurrenceLabel") self.horizontalLayout_4.addWidget(self.recurrenceLabel) self.recurrenceButton = QtGui.QPushButton(CalendarEntryEdit) self.recurrenceButton.setObjectName("recurrenceButton") self.horizontalLayout_4.addWidget(self.recurrenceButton) self.formLayout.setLayout(4, QtGui.QFormLayout.FieldRole, self.horizontalLayout_4) self.label_6 = QtGui.QLabel(CalendarEntryEdit) self.label_6.setObjectName("label_6") self.formLayout.setWidget(5, QtGui.QFormLayout.LabelRole, self.label_6) self.horizontalLayout_3 = QtGui.QHBoxLayout() self.horizontalLayout_3.setContentsMargins(4, -1, -1, -1) self.horizontalLayout_3.setObjectName("horizontalLayout_3") self.reminderCheckBox = QtGui.QCheckBox(CalendarEntryEdit) self.reminderCheckBox.setObjectName("reminderCheckBox") self.horizontalLayout_3.addWidget(self.reminderCheckBox) self.reminderStack = QtGui.QStackedWidget(CalendarEntryEdit) self.reminderStack.setObjectName("reminderStack") self.basicReminderWidget = QtGui.QWidget() self.basicReminderWidget.setObjectName("basicReminderWidget") self.horizontalLayout = QtGui.QHBoxLayout(self.basicReminderWidget) self.horizontalLayout.setSpacing(0) self.horizontalLayout.setContentsMargins(0, 0, 5, 0) self.horizontalLayout.setObjectName("horizontalLayout") self.reminderTimeBox = QtGui.QSpinBox(self.basicReminderWidget) self.reminderTimeBox.setEnabled(False) self.reminderTimeBox.setMinimumSize(QtCore.QSize(70, 0)) self.reminderTimeBox.setMaximum(500) self.reminderTimeBox.setObjectName("reminderTimeBox") self.horizontalLayout.addWidget(self.reminderTimeBox) self.reminderUnitBox = QtGui.QComboBox(self.basicReminderWidget) self.reminderUnitBox.setEnabled(False) self.reminderUnitBox.setMinimumSize(QtCore.QSize(110, 0))
self.reminderUnitBox.setObjectName("reminderUnitBox") self.horizontalLayout.addWidget(self.reminderUnitBox) self.reminde
rStack.addWidget(self.basicReminderWidget) self.advancedReminderWidget = QtGui.QWidget() self.advancedReminderWidget.setObjectName("advancedReminderWidget") self.horizontalLayout_2 = QtGui.QHBoxLayout(self.advancedReminderWidget) self.horizontalLayout_2.setSpacing(0) self.horizontalLayout_2.setContentsMargins(0, 0, 5, 0) self.horizontalLayout_2.setObjectName("horizontalLayout_2") self.reminderDateTime = QtGui.QDateTimeEdit(self.advancedReminderWidget) self.reminderDateTime.setEnabled(False) self.reminderDateTime.setObjectName("reminderDateTime") self.horizontalLayout_2.addWidget(self.reminderDateTime) self.reminderStack.addWidget(self.advancedReminderWidget) self.horizontalLayout_3.addWidget(self.reminderStack) self.reminderAdvancedButton = QtGui.QPushButton(CalendarEntryEdit) self.reminderAdvancedButton.setEnabled(False) self.reminderAdvancedButton.setCheckable(True) self.reminderAdvancedButton.setObjectName("reminderAdvancedButton") self.horizontalLayout_3.addWidget(self.reminderAdvancedButton) spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_3.addItem(spacerItem) self.formLayout.setLayout(5, QtGui.QFormLayout.FieldRole, self.horizontalLayout_3) self.label_7 = QtGui.QLabel(CalendarEntryEdit) self.label_7.setObjectName("label_7") self.formLayout.setWidget(6, QtGui.QFormLayout.LabelRole, self.label_7) self.priorityBox = QtGui.QComboBox(CalendarEntryEdit) self.priorityBox.setMinimumSize(QtCore.QSize(150, 0)) self.priorityBox.setObjectName("priorityBox") self.formLayout.setWidget(6, QtGui.QFormLayout.FieldRole, self.priorityBox) self.label_8 = QtGui.QLabel(CalendarEntryEdit) self.label_8.setObjectName("label_8") self.formLayout.setWidget(7, QtGui.QFormLayout.LabelRole, self.label_8) self.accessBox = QtGui.QComboBox(CalendarEntryEdit) self.accessBox.setMinimumSize(QtCore.QSize(150, 0)) self.accessBox.
benjyw/pants
src/python/pants/backend/awslambda/python/register.py
Python
apache-2.0
577
0.001733
# Copyright 2019 Pants project contributors (see CONTRIBUTORS
.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). """Create AWS Lambdas from Python code. See https://www.pantsbuild.org/docs/awslambda-python. """ from pants.backend.awslambda.python import rules as python_rules from pants.backend.awslambda.python.target_types import PythonAWSLambda from pants.backend.awslambda.python.target_types import rules as target_types_rules def rules(): return (*python_rules.rules(), *target_types_rules()) def target_types(): return
[PythonAWSLambda]
bouhlelma/smt
smt/sampling_methods/full_factorial.py
Python
bsd-3-clause
1,806
0.000554
""" Author: Dr. John T. Hwang <[email protected]> This package is distributed under New BSD license. Full-factorial sampling. """ import numpy as np from smt.sampling_methods.sampling_method import SamplingMethod class FullFactorial(SamplingMethod): def _initialize(self): self.options.declare( "weights", values=None, types=(list, np.ndarray), desc="relative sampling weights for each nx dimensions", ) self.options.declare( "clip", default=False, types=bool, desc="round number of samples to the sampling number product of each nx dimensions (> asked nt)", ) def _compute(self, nt): """ Compute the requested number of sampling points. Arguments --------- nt : int Number of points requested. Returns ------- ndarray[nt, nx] The sampling locations in the input space. """ xlimits = self.options["xlimits"] nx = xlimits.shape[0] if self.options["weights"] is None: weights = np.ones(nx) / nx else: weights = np.at
least_1d(self.options["weights"]) weights /= np.sum(weights) num_list = np.ones(nx, int) while np.prod(num_list) < nt: ind = np.argmax(weights - num_list / np.sum(num_list)) num_list[ind] += 1 lins_list = [np.linspace(0.0, 1.0, num_list[kx]) for kx in range(nx)] x_list = np.meshgrid(*lins_list, indexing="ij") if self.options["clip"]: nt = np.prod(num_list) x = n
p.zeros((nt, nx)) for kx in range(nx): x[:, kx] = x_list[kx].reshape(np.prod(num_list))[:nt] return x
jameshensman/pythonGPLVM
PCA_EM.py
Python
gpl-3.0
6,841
0.05087
# -*- coding: utf-8 -*- # Copyright 2009 James Hensman # Licensed under the Gnu General Public license, see COPYING #from numpy import matlib as ml import numpy as np from scipy import linalg class PCA_EM_matrix: def __init__(self,data,target_dim): """Maximum likelihood PCA by the EM algorithm""" self.X = ml.matrix(data) self.N,self.d = self.X.shape self.q = target_dim def learn(self,niters): self.mu = self.X.mean(0).reshape(self.d,1)#ML solution for mu self.X2 = self.X - self.mu.T self.xxTsum = ml.sum([x*x.T for x in self.X2])#precalculate for speed #initialise paramters: self.W = ml.randn(self.d,self.q) self.sigma2 = 1.2 for i in range(niters): #print self.sigma2 self.E_step() self.M_step() def E_step(self): M = self.W.T*self.W + ml.eye(self.q)*self.sigma2 M_inv = ml.linalg.inv(M) self.m_Z = (M_inv*self.W.T*self.X2.T).T self.S_z = M_inv*self.sigma2 def M_step(self): zzT = self.m_Z.T*self.m_Z + self.N*self.S_z self.W = self.X2.T*self.m_Z*ml.linalg.inv(zzT) WTW = self.W.T*self.W self.sigma2 = self.xxTsum - 2*ml.multiply(self.m_Z*self.W.T,self.X2).sum() + ml.trace(zzT*WTW) #self.sigma2 = self.xxTsum - 2*ml.trace(self.m_Z*self.W.T*self.X2.T) + ml.trace(zzT*WTW) #self.sigma2 = self.xxTsum + ml.sum([- 2*z*self.W.T*x.T + ml.trace((z.T*z + self.S_z)*WTW) for z,x in zip(self.m_Z, self.X2)]) self.sigma2 /= self.N*self.d class PCA_EM: def __init__(self,data,target_dim): """Maximum likelihood PCA by the EM algorithm""" self.X = np.array(data) self.N,self.d = self.X.shape self.q = target_dim def learn(self,niters): self.mu = self.X.mean(0).reshape(self.d,1)#ML solution for mu self.X2 = self.X - self.mu.T self.xxTsum = np.sum([np.dot(x,x.T) for x in self.X2])#precalculate for speed #initialise paramters: self.W = np.random.randn(self.d,self.q) self.sigma2 = 1.2 for i in range(niters): #print self.sigma2 self.E_step() self.M_step() def E_step(self): M = np.dot(self.W.T,self.W) + np.eye(self.q)*self.sigma2 #M_inv = np.linalg.inv(M) #self.m_Z = np.dot(M_inv,np.dot(self.W.T,self.X2.T)).T #self.S_z = M_inv*self.sigma2 M_chol = linalg.cholesky(M) M_inv = linalg.cho_solve((M_chol,1),np.eye(self.q)) self.m_Z = linalg.cho_solve((M_chol,1),np.dot(self.W.T,self.X2.T)).T self.S_z = M_inv*self.sigma2 def M_step(self): zzT = np.dot(self.m_Z.T,self.m_Z) + self.N*self.S_z #self.W = np.dot(np.dot(self.X2.T,self.m_Z),np.linalg.inv(zzT)) zzT_chol = linalg.cholesky(zzT) self.W = linalg.cho_solve((zzT_chol,0),np.dot(self.m_Z.T,self.X2)).T WTW = np.dot(self.W.T,self.W) self.sigma2 = self.xxTsum - 2*np.sum(np.dot(self.m_Z,self.W.T)*self.X2) + np.trace(np.dot(zzT,WTW)) self.sigma2 /= self.N*self.d class PCA_EM_missing: def __init__(self,data,target_dim): """Maximum likelihood PCA by the EM algorithm, allows for missing data. uses a masked array to 'hide' the elements of X that are NaN""" self.X = np.array(data) self.imask,self.jmask = np.nonzero(np.isnan(self.X))#positions that are missing. self.indices = [np.nonzero(np.isnan(x)-1)[0] for x in self.X] #positions that are not missing... self.N,self.d = self.X.shape self.q = target_dim def learn(self,niters): self.Xreconstruct = self.X.copy() self.Xreconstruct[self.imask,self.jmask] = 0 self.mu = np.sum(self.Xreconstruct,0)/(self.X.shape[0]-np.sum(np.isnan(self.X),0)) self.X2 = self.X.copy()-self.mu self.X2reconstruct = self.X.copy() - self.mu #initialise paramters: self.W = np.random.randn(self.d,self.q) self.sigma2 = 1.2 #pre-allocate self.m_Z and self.S_Z self.m_Z = np.zeros((self.X2.shape[0],self.q)) self.S_Z = np.zeros((self.X2.shape[0],self.q,self.q)) for i in range(niters): print i,self.sigma2 self.E_step() self.M_step() self.Xreconstruct = self.X2reconstruct + self.mu def E_step(self): """ This should handle missing data, but needs testing (TODO)""" Ms = np.zeros((self.X.shape[0],self.q,self.q)) #M is going to be different for (potentially) every data point for m,x,i,mz,sz in zip(Ms,self.X2,self.indices,self.m_Z,self.S_Z): W = self.W.take(i,0)# get relevant bits of W x2 = np.array(x).take(i) # get relevant bits of x m[:,:] = np.dot(W.T,W) + np.eye(self.q)*self.sigma2 mchol = linalg.cholesky(m) minv = linalg.cho_solve((mchol,1),np.eye(self.q)) mz[:] = linalg.cho_solve((mchol,1),np.dot(W.T,x2.reshape(i.size,1))).T sz[:,:] = minv*self.sigma2 #calculate reconstructed X values self.X2reconstruct[self.imask,self.jmask] = np.dot(self.m_Z,self.W.T)[self.imask,self.jmask] self.xxTsum = np.sum(np.square(self.X2reconstruct))# can;t be pre-calculate in the missing data version :( def M_step(self): """ This should handle missing data - needs testing (TODO)""" zzT = np.dot(self.m_Z.T,self.m_Z) + np.sum(self.S_Z,0) #self.W = np.dot(np.dot(self.X2.T,self.m_Z),np.linalg.inv(zzT)) zzT_chol = linalg.cholesky(zzT) self.W = linalg.cho_solve((zzT_chol,0),np.dot(self.m_Z.T,self.X2reconstruct)).T WTW = np.dot(self.W.T,self.W) self.sigma2 = self.xxTsum - 2*np.sum(np.dot(self.m_Z,self.W.T)*self.X2reconstruct) + np.trace(np.dot(zzT,WTW)) self.sigma2 /= self.N*self.d if __name__=='__main__': q=5#latent dimensions d=15# observed dimensions N=500 missing_pc = 100 # percentage of the data points to be 'missing' truesigma = .002 niters = 300 phases = np.random.rand(1,q)*2*np.pi frequencies = np.random.randn(1,q)*2 latents = np.sin(np.linspace(0,12,N).reshape(N,1)*frequencies-phases) trueW = np.random.randn(d,q) observed = np.dot(latents,trueW.T) + np.random.randn(N,d)*truesigma #PCA without missing values a = PCA_EM(observed,q) a.learn(niters) #a missing data problem Nmissing = int(N
*missing_pc/100) observed2 = observed.copy() missingi = np.argsort(np.random.rand(N))[:Nmissing] missingj = np.random.randint(0,d-q,Nmissing)#last q columns will be complete observed2[missingi,missingj] = np.NaN b = PCA_E
M_missing(observed2,q) b.learn(niters) from hinton import hinton import pylab colours = np.arange(N)# to colour the dots with hinton(linalg.qr(trueW.T)[1].T) pylab.title('true transformation') pylab.figure() hinton(linalg.qr(a.W.T)[1].T) pylab.title('reconstructed transformation') pylab.figure() hinton(linalg.qr(b.W.T)[1].T) pylab.title('reconstructed transformation (missing data)') pylab.figure() pylab.subplot(3,1,1) pylab.plot(latents) pylab.title('true latents') pylab.subplot(3,1,2) pylab.plot(a.m_Z) pylab.title('reconstructed latents') pylab.subplot(3,1,3) pylab.plot(b.m_Z) pylab.title('reconstructed latents (missing data)') pylab.figure() pylab.subplot(2,1,1) pylab.plot(observed) pylab.title('Observed values') pylab.subplot(2,1,2) pylab.plot(observed2,linewidth=2,marker='.') pylab.plot(b.Xreconstruct) pylab.show()
mnunberg/couchbase-python-client
txcouchbase/iops.py
Python
apache-2.0
3,633
0.001376
from twisted.internet import error as TxErrors import couchbase._libcouchbase as LCB from couchbase._libcouchbase import ( Event, TimerEvent, IOEvent, LCB_READ_EVENT, LCB_WRITE_EVENT, LCB_RW_EVENT, PYCBC_EVSTATE_ACTIVE, PYCBC_EVACTION_WATCH, PYCBC_EVACTION_UNWATCH, PYCBC_EVACTION_CLEANUP ) class TxIOEvent(IOEvent): """ IOEvent is a class implemented in C. It exposes a 'fileno()' method, so we don't have to. """ __slots__ = [] def __init__(self): super(TxIOEvent, self).__init__() def doRead(self): self.ready_r() def doWrite(self):
self.ready_w()
def connectionLost(self, reason): if self.state == PYCBC_EVSTATE_ACTIVE: self.ready_w() def logPrefix(self): return "Couchbase IOEvent" class TxTimer(TimerEvent): __slots__ = ['_txev', 'lcb_active'] def __init__(self): super(TxTimer, self).__init__() self.lcb_active = False self._txev = None def _timer_wrap(self): if not self.lcb_active: return self.lcb_active = False self.ready(0) def schedule(self, usecs, reactor): nsecs = usecs / 1000000.0 if not self._txev or not self._txev.active(): self._txev = reactor.callLater(nsecs, self._timer_wrap) else: self._txev.reset(nsecs) self.lcb_active = True def cancel(self): self.lcb_active = False def cleanup(self): if not self._txev: return try: self._txev.cancel() except (TxErrors.AlreadyCalled, TxErrors.AlreadyCancelled): pass self._txev = None class v0Iops(object): """ IOPS Implementation to be used with Twisted's "FD" based reactors """ __slots__ = [ 'reactor', 'is_sync', '_stop' ] def __init__(self, reactor, is_sync=False): self.reactor = reactor self.is_sync = is_sync self._stop = False def update_event(self, event, action, flags): """ Called by libcouchbase to add/remove event watchers """ if action == PYCBC_EVACTION_UNWATCH: if event.flags & LCB_READ_EVENT: self.reactor.removeReader(event) if event.flags & LCB_WRITE_EVENT: self.reactor.removeWriter(event) elif action == PYCBC_EVACTION_WATCH: if flags & LCB_READ_EVENT: self.reactor.addReader(event) if flags & LCB_WRITE_EVENT: self.reactor.addWriter(event) if flags & LCB_READ_EVENT == 0: self.reactor.removeReader(event) if flags & LCB_WRITE_EVENT == 0: self.reactor.removeWriter(event) def update_timer(self, timer, action, usecs): """ Called by libcouchbase to add/remove timers """ if action == PYCBC_EVACTION_WATCH: timer.schedule(usecs, self.reactor) elif action == PYCBC_EVACTION_UNWATCH: timer.cancel() elif action == PYCBC_EVACTION_CLEANUP: timer.cleanup() def io_event_factory(self): return TxIOEvent() def timer_event_factory(self): return TxTimer() def start_watching(self): """ Start/Stop operations. This is a no-op in twisted because it's a continuously running async loop """ if not self.is_sync: return self._stop = False while not self._stop: self.reactor.doIteration(0) def stop_watching(self): self._stop = True
eduNEXT/edunext-platform
import_shims/lms/certificates/apps.py
Python
agpl-3.0
374
0.008021
""
"Deprecated import support. Auto-generated by import_shims/generate_shims.sh.""" # pylint: disable=redefined-builtin,wrong-import-position,wildcard-import,useless-suppression,line-too-long from import_shims.warn import warn_deprecated_import warn_deprecated_import('certificates.apps', 'lms.djangoapps.certificates.apps') from lms.djangoapps.certificates.apps import *
crs4/ProMort
promort/reviews_manager/management/commands/build_prediction_reviews_worklist.py
Python
mit
5,988
0.00501
# Copyright (c) 2021, CRS4 # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from csv import DictReader, DictWriter from uuid import uuid4 import logging, sys from django.core.management.base import BaseCommand, CommandError from django.contrib.auth.models import Group from promort.settings import DEFAULT_GROUPS from predictions_manager.models import Prediction from reviews_manager.models import PredictionReview logger = logging.getLogger('promort_commands') class Command(BaseCommand): help = 'build Predictions reviews worklist' def add_arguments(self, parser): parser.add_argument('--prediction-type', choices=['TUMOR', 'GLEASON'], type=str, dest='prediction_type', help='the type of the Prediction objects that are going to be reviewed') parser.add_argument('--worklist-file', dest='worklist', type=str, default=None, help='a CSV file containing the worklist, if not present reviews will be assigned randomly') parser.add_argument('--allow-duplicated', action='store_true', dest='allow_duplicated', help='create worklist even for predictions that already have a related review') parser.add_argument('--report-file', dest='report_file', type=str, default=None, help='a CSV file containing a report of the created prediction reviews') def _get_prediction_reviews_manager_users(self): prev_manager_group = Group.objects.get(name=DEFAULT_GROUPS['prediction_manager']['name']) return prev_manager_group.user_set.all() def _get_predictions_list(
self, prediction_type): return Prediction.objects.filter(type=prediction_type, review_required=True).all() def _check_duplicated(self, prediction, reviewer): annotation_objs = PredictionReview.objects.filter(prediction=prediction, reviewe
r=reviewer) if annotation_objs.count() > 0: logger.info('There are already %d reviews for prediction %s assigned to user %s', annotation_objs.count(), prediction.label, reviewer.username) return True else: return False def _create_prediction_annotation(self, prediction, reviewer, allow_duplicated): if not allow_duplicated: if self._check_duplicated(prediction, reviewer): return None prev_obj = PredictionReview( label=uuid4().hex, prediction=prediction, slide=prediction.slide, reviewer=reviewer ) prev_obj.save() return { 'review_id': prev_obj.id, 'slide': prev_obj.slide.id, 'prediction': prev_obj.prediction.label, 'review_label': prev_obj.label, 'reviewer': prev_obj.reviewer.username } def create_random_worklist(self, prediction_type, allow_duplicated, report_file=None): logger.info('Creating RANDOM worklist') prediction_rev_managers = self._get_prediction_reviews_manager_users() if len(prediction_rev_managers) < 1: raise CommandError('No prediction managers configured') predictions = self._get_predictions_list(prediction_type) for i, pred in enumerate(predictions): logger.info('Processing prediction %s', pred.label) pred_report = self._create_prediction_annotation(pred, prediction_rev_managers[i % len(prediction_rev_managers)], allow_duplicated) if report_file and pred_report: report_file.writerow(pred_report) def create_worklist_from_file(self, worklist_file, prediction_type, allow_duplicated, report_file=None): raise NotImplementedError() def handle(self, *args, **opts): logger.info('=== Starting Predictions Reviews worklist creation ===') worklist_file = opts['worklist'] allow_duplicated = opts['allow_duplicated'] if opts['report_file']: report_file = open(opts['report_file'], 'w') report_writer = DictWriter(report_file, ['review_id', 'review_label', 'slide', 'prediction', 'reviewer']) report_writer.writeheader() else: report_writer = None try: if worklist_file: self.create_worklist_from_file(worklist_file, opts['prediction_type'], allow_duplicated, report_writer) else: self.create_random_worklist(opts['prediction_type'], allow_duplicated, report_writer) except CommandError as cme: logger.error('A problem occurred while building the worklist, exit') sys.exit(cme) if report_writer: report_file.close() logger.info('=== Prediction Reviews worklist creation completed ===')
moodpulse/l2
integration_framework/views.py
Python
mit
76,926
0.002275
import base64 import os from django.test import Client as TC import datetime import logging import pytz from django.utils.module_loading import import_string from api.directions.sql_func import direction_by_card, get_lab_podr, get_confirm_direction_patient_year, get_type_confirm_direction from api.stationar.stationar_func import desc_to_data from api.views import mkb10_dict from clients.utils import find_patient from directory.utils import get_researches_details, get_can_created_patient from doctor_schedule.views import get_hospital_resource, get_available_hospital_plans, check_available_hospital_slot_before_save from integration_framework.authentication import can_use_schedule_only from laboratory import settings from plans.models import PlanHospitalization, PlanHospitalizationFiles, Messages from podrazdeleniya.models import Podrazdeleniya import random from collections import defaultdict import re import time import petrovna import simplejson as json from dateutil.relativedelta import relativedelta from django.db import transaction from django.db.models import Q, Prefetch from django.http import JsonResponse from django.utils import timezone from rest_framework.decorators import api_view, authentication_classes, permission_classes, parser_classes from rest_framework.parsers import JSONParser, FormParser, MultiPartParser from rest_framework.response import Response import directions.models as directions from appconf.manager import SettingManager from clients.models import Individual, Card from clients.sql_func import last_results_researches_by_time_ago from directory.models import Researches, Fractions, ReleationsFT from doctor_call.models import DoctorCall from hospitals.models import Hospitals from laboratory.settings import ( AFTER_DATE, CENTRE_GIGIEN_EPIDEMIOLOGY, MAX_DOC_CALL_EXTERNAL_REQUESTS_PER_DAY, REGION, SCHEDULE_AGE_LIMIT_LTE, LK_FORMS, LK_USER, LK_FILE_SIZE_BYTES, LK_FILE_COUNT, ) from laboratory.utils import current_time, strfdatetime from refprocessor.result_parser import ResultRight from researches.models import Tubes from results.sql_func import get_laboratory_results_by_directions, get_not_confirm_direction from rmis_integration.client import Client from slog.models import Log from tfoms.integration import match_enp, match_patient, get_ud_info_by_enp, match_patient_by_snils, get_dn_info_by_enp from users.models import DoctorProfile from utils.common import values_as_structure_data from utils.data_verification import data_parse from utils.dates import normalize_date, valid_date, try_strptime from utils.xh import check_type_research, short_fio_dots from . import sql_if from directions.models import DirectionDocument, DocumentSign, Napravleniya from .models import CrieOrder, ExternalService from laboratory.settings import COVID_RESEARCHES_PK from .utils import get_json_protocol_data, get_json_labortory_data, check_type_file from django.contrib.auth.models import User logger = logging.getLogger("IF") @api_view() def next_result_direction(request): from_pk = request.GET.get("fromPk") after_date = request.GET.get("afterDate") only_signed = request.GET.get("onlySigned") if after_date == '0': after_date = AFTER_DATE next_n = int(request.GET.get("nextN", 1)) type_researches = request.GET.get("research", '*') d_start = f'{after_date}' is_research = 1 researches = [-999] if type_researches == 'lab': researches = [x.pk for x in Researches.objects.filter(podrazdeleniye__p_type=Podrazdeleniya.LABORATORY)] elif type_researches != '*': researches = [int(i) for i in type_researches.split(',')] else: is_research = -1 if only_signed == '1': # TODO: вернуть только подписанные и как дату next_time использовать дату подписания, а не подтверждения # признак – eds_total_signed=True, датавремя полного подписания eds_total_signed_at dirs = sql_if.direction_collect(d_start, researches, is_research, next_n) or [] else: dirs = sql_if.direction_collect(d_start, researches, is_research, next_n) or [] next_time = None naprs = [d[0] for d in dirs] if dirs: next_time = dirs[-1][3] return Response({"next": naprs, "next_time": next_time, "n": next_n, "fr
omPk": from_pk, "afterDate": after_date}) @api_view() def get_dir_amd(request): next_n = int(request.GET.get("nextN", 5))
dirs = sql_if.direction_resend_amd(next_n) result = {"ok": False, "next": []} if dirs: result = {"ok": True, "next": [i[0] for i in dirs]} return Response(result) @api_view() def get_dir_n3(request): next_n = int(request.GET.get("nextN", 5)) dirs = sql_if.direction_resend_n3(next_n) result = {"ok": False, "next": []} if dirs: result = {"ok": True, "next": [i[0] for i in dirs]} return Response(result) @api_view() def resend_dir_l2(request): next_n = int(request.GET.get("nextN", 5)) dirs = sql_if.direction_resend_l2(next_n) result = {"ok": False, "next": []} if dirs: result = {"ok": True, "next": [i[0] for i in dirs]} return Response(result) @api_view() def resend_dir_crie(request): next_n = int(request.GET.get("nextN", 5)) dirs = sql_if.direction_resend_crie(next_n) result = {"ok": False, "next": []} if dirs: result = {"ok": True, "next": [i[0] for i in dirs]} return Response(result) @api_view() def result_amd_send(request): result = json.loads(request.GET.get("result")) resp = {"ok": False} if result['error']: for i in result['error']: dir_pk = int(i.split(':')[0]) directions.Napravleniya.objects.filter(pk=dir_pk).update(need_resend_amd=False, error_amd=True) resp = {"ok": True} if result['send']: for i in result['send']: data_amd = i.split(':') dir_pk = int(data_amd[0]) amd_num = data_amd[1] directions.Napravleniya.objects.filter(pk=dir_pk).update(need_resend_amd=False, amd_number=amd_num, error_amd=False) resp = {"ok": True} return Response(resp) @api_view() def direction_data(request): pk = request.GET.get("pk") research_pks = request.GET.get("research", '*') direction: directions.Napravleniya = directions.Napravleniya.objects.select_related('istochnik_f', 'client', 'client__individual', 'client__base').get(pk=pk) card = direction.client individual = card.individual iss = directions.Issledovaniya.objects.filter(napravleniye=direction, time_confirmation__isnull=False).select_related('research', 'doc_confirmation') if research_pks != '*': iss = iss.filter(research__pk__in=research_pks.split(',')) if not iss: return Response({"ok": False}) iss_index = random.randrange(len(iss)) signed_documents = [] if direction.eds_total_signed: last_time_confirm = direction.last_time_confirm() for d in DirectionDocument.objects.filter(direction=direction, last_confirmed_at=last_time_confirm): document = { 'type': d.file_type.upper(), 'content': base64.b64encode(d.file.read()).decode('utf-8'), 'signatures': [], } for s in DocumentSign.objects.filter(document=d): document['signatures'].append( { "content": s.sign_value.replace('\n', ''), "type": s.sign_type, "executor": s.executor.uploading_data, } ) signed_documents.append(document) return Response( { "ok": True, "pk": pk, "createdAt": direction.data_sozdaniya, "patient": { **card.get_data_individual(full_empty=True, only_json_serializable=True), "family": individual.family, "name": individual.name, "patronymic": individual.patronymic, "birthday": individual.birthday, "docs": card.get_n3_documents(), "sex": individual.sex,
yngcan/patentprocessor
test/test_alchemy.py
Python
bsd-2-clause
14,826
0.000742
import unittest import os import sys import shutil sys.path.append('../lib/') sys.path.append(os.path.dirname(os.path.realpath(__file__))) import alchemy from alchemy.schema import * class TestAlchemy(unittest.TestCase): def setUp(self): # this basically resets our testing database path = config.get('sqlite').get('path') shutil.copyfile('{0}/alchemy.raw'.format(path), '{0}/test.db'.format(path)) def tearDown(self): # we keep this to tidy up our database if it fails session.close() def test_raw_clean(self): # add a Clean record to mark something against asg0 = session.query(RawAssignee).limit(10) asg1 = session.query(RawAssignee).limit(10).offset(10) alchemy.match(asg0, session) alchemy.match(asg1, session) alchemy.match([asg0[0], asg1[0].assignee], session) def test_match_all(self): alchemy.match(session.query(RawAssignee), session) def test_set_default(self): # create two items loc = session.query(RawLocation) alchemy.match(loc, session) alchemy.match(loc[0], session, {"city": u"Frisco", "state": u"Cali", "country": u"US", "longitude": 10.0, "latitude": 10.0}) self.assertEqual("Frisco, Cali, US", loc[0].location.address) alchemy.match(loc[0], session, keepexisting=True) self.assertEqual("Frisco, Cali, US", loc[0].location.address) self.assertEqual(10.0, loc[0].location.latitude) self.assertEqual(10.0, loc[0].location.longitude) alchemy.match(loc[0], session) self.assertEqual("Hong Kong, OH, US", loc[0].location.address) self.assertEqual(10.0, loc[0].location.latitude) self.assertEqual(10.0, loc[0].location.longitude) alchemy.match(loc[0], session, {"city": u"Frisco"}, keepexisting=True) self.assertEqual("Frisco, OH, US", loc[0].location.address) self.assertEqual(10.0, loc[0].location.latitude) self.assertEqual(10.0, loc[0].location.longitude) def test_unmatch_asgloc(self): loc = session.query(RawLocation).limit(20) asg = session.query(RawAssignee).limit(20) alchemy.match(asg, session) alchemy.match(loc[0:5], session) alchemy.match(loc[5:10], session) alchemy.match(loc[10:15], session) alchemy.match(loc[15:20], session) clean = asg[0].assignee alchemy.unmatch(asg[0], session) self.assertEqual(None, asg[0].assignee) self.assertEqual(19, len(clean.rawassignees)) self.assertEqual(19, len(clean.patents)) self.assertEqual(4, session.query(Location).count()) self.assertEqual(4, session.query(locationassignee).count()) clean = loc[0].location self.assertEqual(5, len(clean.rawlocations)) alchemy.unmatch(loc[0], session) self.assertEqual(4, len(clean.rawlocations)) alchemy.unmatch(loc[1], session) self.assertEqual(3, len(clean.rawlocations)) alchemy.unmatch(loc[2:5], session) self.assertEqual(None, loc[0].location) self.assertEqual(3, session.query(Location).count()) self.assertEqual(3, session.query(locationassignee).count()) alchemy.unmatch(loc[5].location, session) self.assertEqual(2, session.query(Location).count()) self.assertEqual(2, session.query(locationassignee).count()) alchemy.unmatch(asg[3:20], session) alchemy.unmatch(loc[10].location, session) self.assertEqual(1, session.query(Location).count()) self.assertEqual(0, session.query(locationassignee).count()) def test_unmatch_invloc(self): loc = session.query(RawLocation).limit(20) inv = session.query(RawInventor).limit(20) alchemy.match(inv, session) alchemy.match(loc[0:5], session) alchemy.match(loc[5:10], session) alchemy.match(loc[10:15], session) alchemy.match(loc[15:20], session) clean = inv[0].inventor alchemy.unmatch(inv[0], session) self.assertEqual(None, inv[0].inventor) self.assertEqual(19, len(clean.rawinventors)) self.assertEqual(10, len(clean.patents)) self.assertEqual(4, session.query(Location).count()) self.assertEqual(4, session.query(locationinventor).count()) clean = loc[0].location self.assertEqual(5, len(clean.rawlocations)) alchemy.unmatch(loc[0], session) self.assertEqual(4, len(clean.rawlocations)) alchemy.unmatch(loc[1], session) self.assertEqual(3, len(clean.rawlocations)) alchemy.unmatch(loc[2:5], session) self.assertEqual(None, loc[0].location) self.assertEqual(3, session.query(Location).count()) self.assertEqual(3, session.query(locationinventor).count()) clean = inv[5].inventor alchemy.unmatch(inv[1], session) self.assertEqual(None, inv[1].inventor) self.assertEqual(18, len(clean.rawinventors)) # this patent is repeated self.assertEqual(10, len(clean.patents)) alchemy.unmatch(inv[2], session) self.assertEqual(None, inv[2].inventor) self.assertEqual(17, len(clean.rawinventors)) self.assertEqual(9, len(clean.patents)) alchemy.unmatch(loc[5].location, session) self.assertEqual(2, session.query(Location).count()) self.assertEqual(2, session.query(locationinventor).count()) alchemy.unmatch(inv[3:20], session) alchemy.unmatch(loc[10].location, session) self.assertEqual(1, session.query(Location).count()) self.assertEqual(0, session.query(locationinventor).count()) def test_unmatch_lawyer(self): law = session.query(RawLawyer).limit(20) alchemy.match(law, session) alchemy.unmatch(law[0], session) self.assertEqual(None, law[0].lawyer)
self.assertEqual(19, len(law[1].lawyer.rawlawyers)) self.assertEqual(14, len(law[1].lawyer.patents)) def test_assigneematch(self): # blindly assume first 10 are the same asg0 = session.query(RawAssignee).limit(10) asg1 = session.query(RawAssignee).limit(1
0).offset(10) asgs = session.query(Assignee) alchemy.match(asg0, session) alchemy.match(asg1, session) # create two items self.assertEqual(10, len(asg0[0].assignee.rawassignees)) self.assertEqual(10, len(asg1[0].assignee.rawassignees)) self.assertEqual(10, len(asg0[0].assignee.patents)) self.assertEqual(2, asgs.count()) self.assertEqual("CAFEPRESS.COM", asg0[0].assignee.organization) # merge the assignees together alchemy.match([asg0[0], asg1[0]], session) self.assertEqual(20, len(asg0[0].assignee.rawassignees)) self.assertEqual(20, len(asg1[0].assignee.rawassignees)) self.assertEqual(20, len(asg0[0].assignee.patents)) self.assertEqual(1, asgs.count()) # override the default values provided alchemy.match(asg0[0], session, {"organization": u"Kevin"}) self.assertEqual("Kevin", asg0[0].assignee.organization) # determine the most common organization name alchemy.match(session.query(RawAssignee).limit(40).all(), session) self.assertEqual(40, len(asg1[0].assignee.rawassignees)) self.assertEqual("The Procter & Gamble Company", asg0[0].assignee.organization) def test_inventormatch(self): # blindly assume first 10 are the same inv0 = session.query(RawInventor).limit(10) inv1 = session.query(RawInventor).limit(10).offset(10) invs = session.query(Inventor) alchemy.match(inv0, session) alchemy.match(inv1, session) # create two items self.assertEqual(10, len(inv0[0].inventor.rawinventors)) self.assertEqual(10, len(inv1[0].inventor.rawinventors)) self.assertEqual(2, invs.count()) self.assertEqual(6, len(inv0[0].inventor.patents)) self.assertEqual(5, len(inv1[0].inventor.patents)) self.assertEqual("David C. Mattison", inv0[0].inventor.name_full) # merge the assign
Venturi/cms
env/lib/python2.7/site-packages/cms/tests/test_plugins.py
Python
gpl-2.0
79,253
0.002801
# -*- coding: utf-8 -*- from __future__ import with_statement import base64 import datetime import json import os from django import http from django.conf import settings from django.conf.urls import url from django.contrib import admin from django.core import urlresolvers from django.core.cache import cache from django.core.exceptions import ValidationError, ImproperlyConfigured from django.core.files.uploadedfile import SimpleUploadedFile from django.core.management import call_command from django.forms.widgets import Media from django.test.testcases import TestCase from django.utils import timezone from cms import api from cms.constants import PLUGIN_MOVE_ACTION, PLUGIN_COPY_ACTION from cms.exceptions import PluginAlreadyRegistered, PluginNot
Registered, DontUsePageAttributeWarning from cms.models import Page, Placeholder from cms.models.pluginmodel import CMSPlugin from cms.plugin_base import CMSPluginBase from cms.plugin_pool import plugin_pool from cms.sitemaps.cms_sitemap import CMSSitemap from cms.test_utils.project.pluginapp.plugins.manytomany_rel.models import ( Article, Section, ArticlePluginModel, FKModel, M2MTa
rgetModel) from cms.test_utils.project.pluginapp.plugins.meta.cms_plugins import ( TestPlugin, TestPlugin2, TestPlugin3, TestPlugin4, TestPlugin5) from cms.test_utils.project.pluginapp.plugins.validation.cms_plugins import ( NonExisitngRenderTemplate, NoRender, NoRenderButChildren, DynTemplate) from cms.test_utils.testcases import ( CMSTestCase, URL_CMS_PAGE, URL_CMS_PLUGIN_MOVE, URL_CMS_PAGE_ADD, URL_CMS_PLUGIN_ADD, URL_CMS_PLUGIN_EDIT, URL_CMS_PAGE_CHANGE, URL_CMS_PLUGIN_REMOVE, URL_CMS_PAGE_PUBLISH) from cms.test_utils.util.fuzzy_int import FuzzyInt from cms.toolbar.toolbar import CMSToolbar from cms.utils.conf import get_cms_setting from cms.utils.copy_plugins import copy_plugins_to from cms.utils.plugins import get_plugins_for_page, get_plugins from djangocms_googlemap.models import GoogleMap from djangocms_inherit.cms_plugins import InheritPagePlaceholderPlugin from djangocms_file.models import File from djangocms_inherit.models import InheritPagePlaceholder from djangocms_link.forms import LinkForm from djangocms_link.models import Link from djangocms_picture.models import Picture from djangocms_text_ckeditor.models import Text from djangocms_text_ckeditor.utils import plugin_tags_to_id_list, plugin_to_tag class DumbFixturePlugin(CMSPluginBase): model = CMSPlugin name = "Dumb Test Plugin. It does nothing." render_template = "" admin_preview = False render_plugin = False def render(self, context, instance, placeholder): return context class DumbFixturePluginWithUrls(DumbFixturePlugin): name = DumbFixturePlugin.name + " With custom URLs." render_plugin = False def _test_view(self, request): return http.HttpResponse("It works") def get_plugin_urls(self): return [ url(r'^testview/$', admin.site.admin_view(self._test_view), name='dumbfixtureplugin'), ] plugin_pool.register_plugin(DumbFixturePluginWithUrls) class PluginsTestBaseCase(CMSTestCase): def setUp(self): self.super_user = self._create_user("test", True, True) self.slave = self._create_user("slave", True) self.FIRST_LANG = settings.LANGUAGES[0][0] self.SECOND_LANG = settings.LANGUAGES[1][0] self._login_context = self.login_user_context(self.super_user) self._login_context.__enter__() def tearDown(self): self._login_context.__exit__(None, None, None) def approve_page(self, page): response = self.client.get(URL_CMS_PAGE + "%d/approve/" % page.pk) self.assertRedirects(response, URL_CMS_PAGE) # reload page return self.reload_page(page) def get_request(self, *args, **kwargs): request = super(PluginsTestBaseCase, self).get_request(*args, **kwargs) request.placeholder_media = Media() request.toolbar = CMSToolbar(request) return request def get_response_pk(self, response): return int(response.content.decode('utf8').split("/edit-plugin/")[1].split("/")[0]) class PluginsTestCase(PluginsTestBaseCase): def _create_text_plugin_on_page(self, page): plugin_data = { 'plugin_type': "TextPlugin", 'plugin_language': settings.LANGUAGES[0][0], 'placeholder_id': page.placeholders.get(slot="body").pk, 'plugin_parent': '', } response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data) self.assertEqual(CMSPlugin.objects.count(), 1) self.assertEqual(response.status_code, 200) created_plugin_id = self.get_response_pk(response) self.assertEqual(created_plugin_id, CMSPlugin.objects.all()[0].pk) return created_plugin_id def _edit_text_plugin(self, plugin_id, text): edit_url = "%s%s/" % (URL_CMS_PLUGIN_EDIT, plugin_id) response = self.client.get(edit_url) self.assertEqual(response.status_code, 200) data = { "body": text } response = self.client.post(edit_url, data) self.assertEqual(response.status_code, 200) txt = Text.objects.get(pk=plugin_id) return txt def test_add_edit_plugin(self): """ Test that you can add a text plugin """ # add a new text plugin page_data = self.get_new_page_data() self.client.post(URL_CMS_PAGE_ADD, page_data) page = Page.objects.all()[0] created_plugin_id = self._create_text_plugin_on_page(page) # now edit the plugin txt = self._edit_text_plugin(created_plugin_id, "Hello World") self.assertEqual("Hello World", txt.body) # edit body, but click cancel button data = { "body": "Hello World!!", "_cancel": True, } edit_url = '%s%d/' % (URL_CMS_PLUGIN_EDIT, created_plugin_id) response = self.client.post(edit_url, data) self.assertEqual(response.status_code, 200) txt = Text.objects.all()[0] self.assertEqual("Hello World", txt.body) def test_plugin_edit_marks_page_dirty(self): page_data = self.get_new_page_data() response = self.client.post(URL_CMS_PAGE_ADD, page_data) self.assertEqual(response.status_code, 302) page = Page.objects.all()[0] response = self.client.post(URL_CMS_PAGE_PUBLISH % (page.pk, 'en')) self.assertEqual(response.status_code, 302) created_plugin_id = self._create_text_plugin_on_page(page) page = Page.objects.all()[0] self.assertEqual(page.is_dirty('en'), True) response = self.client.post(URL_CMS_PAGE_PUBLISH % (page.pk, 'en')) self.assertEqual(response.status_code, 302) page = Page.objects.all()[0] self.assertEqual(page.is_dirty('en'), False) self._edit_text_plugin(created_plugin_id, "Hello World") page = Page.objects.all()[0] self.assertEqual(page.is_dirty('en'), True) def test_plugin_order(self): """ Test that plugin position is saved after creation """ page_en = api.create_page("PluginOrderPage", "col_two.html", "en", slug="page1", published=True, in_navigation=True) ph_en = page_en.placeholders.get(slot="col_left") # We check created objects and objects from the DB to be sure the position value # has been saved correctly text_plugin_1 = api.add_plugin(ph_en, "TextPlugin", "en", body="I'm the first") text_plugin_2 = api.add_plugin(ph_en, "TextPlugin", "en", body="I'm the second") db_plugin_1 = CMSPlugin.objects.get(pk=text_plugin_1.pk) db_plugin_2 = CMSPlugin.objects.get(pk=text_plugin_2.pk) with self.settings(CMS_PERMISSION=False): self.assertEqual(text_plugin_1.position, 0) self.assertEqual(db_plugin_1.position, 0) self.assertEqual(text_plugin_2.position, 1) self.assertEqual(db_plugin_2.position, 1) ## Finally we render the placeholder to test the actual co
ESOedX/edx-platform
openedx/core/djangoapps/embargo/tests/test_forms.py
Python
agpl-3.0
5,361
0.002425
# -*- coding: utf-8 -*- """ Unit tests for embargo app admin forms. """ from __future__ import absolute_import import six # Explicitly import the cache from ConfigurationModel so we can reset it after each test from config_models.models import cache from django.test import TestCase from opaque_keys.edx.locator import CourseLocator from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory from ..forms import IPFilterForm, RestrictedCourseForm from ..models import IPFilter class RestrictedCourseFormTest(ModuleStoreTestCase): """Test the course form properly validates course IDs""" def test_save_valid_data(self): course = CourseFactory.create() data = { 'course_key': six.text_type(course.id), 'enroll_msg_key': 'default', 'access_msg_key': 'default' } form = RestrictedCourseForm(data=data) self.assertTrue(form.is_valid()) def test_invalid_course_key(self): # Invalid format for the course key form = RestrictedCourseForm(data={'course_key': 'not/valid'}) self._assert_course_field_error(form) def test_course_not_found(self): course_key = CourseLocator(org='test', course='test', run='test') form = RestrictedCourseForm(data={'course_key': course_key}) self._assert_course_field_error(form) def _assert_course_field_error(self, form): """ Validation shouldn't work. """ self.assertFalse(form.is_valid()) msg = 'COURSE NOT FOUND' self.assertIn(msg, form._errors['course_key'][0]) # pylint: disable=protected-access with self.assertRaisesRegexp( ValueError, "The RestrictedCourse could not be created because the data didn't validate." ): form.save() class IPFilterFormTest(TestCase): """Test form for adding [black|white]list IP addresses""" def tearDown(self): super(IPFilterFormTest, self).tearDown() # Explicitly clear ConfigurationModel's cache so tests have a clear cache # and don't interfere with each other cache.clear() def test_add_valid_ips(self): # tes
t adding valid ip addresses # should be able to do both ipv4 and ipv6 # spacing sho
uld not matter form_data = { 'whitelist': u'127.0.0.1, 2003:dead:beef:4dad:23:46:bb:101, 1.1.0.1/32, 1.0.0.0/24', 'blacklist': u' 18.244.1.5 , 2002:c0a8:101::42, 18.36.22.1, 1.0.0.0/16' } form = IPFilterForm(data=form_data) self.assertTrue(form.is_valid()) form.save() whitelist = IPFilter.current().whitelist_ips blacklist = IPFilter.current().blacklist_ips for addr in u'127.0.0.1, 2003:dead:beef:4dad:23:46:bb:101'.split(','): self.assertIn(addr.strip(), whitelist) for addr in u'18.244.1.5, 2002:c0a8:101::42, 18.36.22.1'.split(','): self.assertIn(addr.strip(), blacklist) # Network tests # ips not in whitelist network for addr in [u'1.1.0.2', u'1.0.1.0']: self.assertNotIn(addr.strip(), whitelist) # ips in whitelist network for addr in [u'1.1.0.1', u'1.0.0.100']: self.assertIn(addr.strip(), whitelist) # ips not in blacklist network for addr in [u'2.0.0.0', u'1.1.0.0']: self.assertNotIn(addr.strip(), blacklist) # ips in blacklist network for addr in [u'1.0.100.0', u'1.0.0.10']: self.assertIn(addr.strip(), blacklist) # Test clearing by adding an empty list is OK too form_data = { 'whitelist': '', 'blacklist': '' } form = IPFilterForm(data=form_data) self.assertTrue(form.is_valid()) form.save() self.assertEqual(len(IPFilter.current().whitelist), 0) self.assertEqual(len(IPFilter.current().blacklist), 0) def test_add_invalid_ips(self): # test adding invalid ip addresses form_data = { 'whitelist': u'.0.0.1, :dead:beef:::, 1.0.0.0/55', 'blacklist': u' 18.244.* , 999999:c0a8:101::42, 1.0.0.0/' } form = IPFilterForm(data=form_data) self.assertFalse(form.is_valid()) if six.PY2: wmsg = "Invalid IP Address(es): [u'.0.0.1', u':dead:beef:::', u'1.0.0.0/55']" \ " Please fix the error(s) and try again." else: wmsg = "Invalid IP Address(es): ['.0.0.1', ':dead:beef:::', '1.0.0.0/55']" \ " Please fix the error(s) and try again." self.assertEquals(wmsg, form._errors['whitelist'][0]) # pylint: disable=protected-access if six.PY2: bmsg = "Invalid IP Address(es): [u'18.244.*', u'999999:c0a8:101::42', u'1.0.0.0/']" \ " Please fix the error(s) and try again." else: bmsg = "Invalid IP Address(es): ['18.244.*', '999999:c0a8:101::42', '1.0.0.0/']" \ " Please fix the error(s) and try again." self.assertEquals(bmsg, form._errors['blacklist'][0]) # pylint: disable=protected-access with self.assertRaisesRegexp(ValueError, "The IPFilter could not be created because the data didn't validate."): form.save()
Morgan-Stanley/treadmill
lib/python/treadmill/api/nodeinfo.py
Python
apache-2.0
1,004
0
"""Implementation of allocation API. """ from __future__ import absolute_import from __future__ import division from __future_
_ import print_function from __future__ import unicode_literals import logging from treadmill import discovery from treadmill import context _LO
GGER = logging.getLogger(__name__) class API: """Treadmill Local REST api.""" def __init__(self): def _get(hostname): """Get hostname nodeinfo endpoint info.""" _LOGGER.info('Redirect: %s', hostname) discovery_iter = discovery.iterator( context.GLOBAL.zk.conn, 'root.%s' % hostname, 'nodeinfo', False ) for (_app, hostport) in discovery_iter: if not hostport: continue _LOGGER.info('Found: %s - %s', hostname, hostport) return hostport _LOGGER.info('nodeinfo not found: %s', hostname) return None self.get = _get
CommonClimate/teaching_notebooks
GEOL351/CoursewareModules/setpath.py
Python
mit
5,073
0.013996
#---------------------------------------------------------------------- #This utility sets up the python configuration files so as to #allow Python to find files in a specified directory, regardless #of what directory the user is working from. This is typically #used to create a directory where the user will put resources shared #by many Python scripts, such as courseware modules # #---------------------------------------------------------------------- #Usage: # (1) Put a copy of this file (setpath.py) in the directory # you want to share # # (2) Execute setpath.py, either by opening it and running it # in Canopy, or from the command line by changing director # to the directory you want to share and then typing # python setup.py # If you run it by opening it in the Canopy editor you need to # select the directory popup menu item that tells Canopy to # change the working directory to the Editor directory. # in Canopy, the working directory always appears at the upper # right corner of the Python interpreter window. # #---------------------------------------------------------------------- #Notes: # # This will create a startup file which will properly # initialize ipython (whether used directly or via Enthought # Canopy) to find your files, and will do that regardless # of your operating system. # # If you are using a Linux or Mac OSX operating system, it # will also edit your .cshrc and .bash_profile shell startup # scripts to set the environment variable PYTHONPATH so that # any version of the python interperter started from the # command line (i.e. whet
her ipython or python) will find # the shared files. This feature will not work on # Windows operating systems, so Windows users should start # either start up python by clicking on the Canopy app, or # by starting ipython from the command line. It is possible # to set the PYTHONPATH environment variable in Windows, # but this script does not yet implement that feature. # # Note that it is also possible to manually set up a temporary # shared path (for example /home/MyModule
s) in a given script # by executing the lines: # # import sys # sys.path.append('home/MyModules') # # where you would replace '/home/MyModules') with the # actual full path to the directory you want on your own # system #---------------------------------------------------------------------- import os,glob,platform #Utility function to return an acceptable filename for the #startup file def makeFileName(startupDir): files = glob.glob(os.path.join(startupDir,'*.py')) #Make a startup filename that doesn't already exist for i in range(10000): if i<100: fname = '%02d-startup.py'%i else: fname ='%04d-startup.py'%i fname = os.path.join(startupDir,fname) if not fname in files: break return fname # #--------Main program starts here # #Get current path curPath = os.getcwd() #Get home directory home = os.path.expanduser('~') # #If this is a Linux or Mac OS X system, edit the #shell initialization files to set the PYTHONPATH environment #variable if ( (platform.system()=='Darwin') or ('inux' in platform.system())): #We are on a Linux or Mac system. Edit Shell startup files print 'This is a Linux or Mac system. Adding path to shell startup scripts' # #csh script: (Note, should also do this for .tcshrc if it exists) cshFile = os.path.join(home,'.cshrc') print 'csh family -- Editing '+cshFile #Make backup copy of file os.system('cp %s %s'%(cshFile,cshFile+'.setPathBackup')) #Append line to set PYTHONPATH outfile = open(cshFile,'a') outfile.write('#Line added by setPath.py. Original in %s\n'%(cshFile+'.setPathBackup')) #Note: the double quotes allow paths to contain spaces outfile.write('setenv PYTHONPATH \"%s:$PYTHONPATH\"\n'%curPath) outfile.close() # #bash script (ToDo: also edit .profile, for sh users) bashFile = os.path.join(home,'.bash_profile') print 'sh family -- Editing '+bashFile #Make backup copy of file os.system('cp %s %s'%(bashFile,bashFile+'.setPathBackup')) #Append line to set PYTHONPATH outfile = open(bashFile,'a') outfile.write('#Line added by setPath.py. Original in %s\n'%(bashFile+'.setPathBackup')) #Note: the double quotes allow paths to contain spaces outfile.write('export PYTHONPATH=\"%s:$PYTHONPATH\"\n'%curPath) outfile.close() # # #Set paths for ipython startup. This takes care of starting up ipython from #double-clicking the Canopy app on any operating system # profilepath = os.path.join(home,'.ipython/profile_default/startup') if os.path.isdir(profilepath): fname = makeFileName(profilepath) else: print "Could not find .ipython startup directory. Exiting." exit(1) # #Write the startup file contents = 'import sys \nsys.path.append(\'%s\')\n'%curPath outfile = open(fname,'w') outfile.write(contents) outfile.close()
teamclairvoyant/airflow-scheduler-failover-controller
scheduler_failover_controller/command_runner/command_runner.py
Python
apache-2.0
2,797
0.002503
import subprocess import os class CommandRunner: HOST_LIST_TO_RUN_LOCAL = ["l
ocalhost", "127.0.0.1"] def __init__(self, local_hostname, logger): logger.debug("Creating CommandRunner with Args - local_hostname: {local_hostname}, logger: {logger}".format(**locals())) self.local_hostname = local_hostname self.logger = logger # returns: is_successful, output def run_command(self, hos
t, base_command): self.logger.debug("Running Command: " + str(base_command)) if host == self.local_hostname or host in self.HOST_LIST_TO_RUN_LOCAL: return self._run_local_command(base_command) else: return self._run_ssh_command(host, base_command) # This will start the process up as a child process. Meaning if the scheduler_failover_controller fails the child process will fail as well. (unless you're running the systemctl command) def _run_local_command(self, base_command): self.logger.debug("Running command as Local command") output = os.popen(base_command).read() if output: output = output.split("\n") self.logger.debug("Run Command output: " + str(output)) return True, output def _run_ssh_command(self, host, base_command): self.logger.debug("Running command as SSH command") if base_command.startswith("sudo"): command_split = ["ssh", "-tt", host, base_command] else: command_split = ["ssh", host, base_command] return self._run_split_command( command_split=command_split ) def _run_split_command(self, command_split): self.logger.debug("Running command_split: " + str(command_split)) is_successful = True output = [] try: process = subprocess.Popen(command_split, stdout=subprocess.PIPE, stderr=subprocess.PIPE) process.wait() if process.stderr is not None: stderr_output = process.stderr.readlines() if stderr_output and len(stderr_output) > 0: output += [err.decode() if isinstance(err, bytes) else err for err in stderr_output] self.logger.debug("Run Command stderr output: " + str(stderr_output)) if process.stdout is not None: output += [out.decode() if isinstance(out, bytes) else out for out in process.stdout.readlines()] if process.returncode != 0: self.logger.warn("Process returned code '" + str(process.returncode) + "'") is_successful = False except Exception as e: is_successful = False output = str(e) self.logger.debug("Run Command output: " + str(output)) return is_successful, output
IBM-Security/ibmsecurity
ibmsecurity/isam/base/network/felb/services/advanced_tuning.py
Python
apache-2.0
6,046
0.004135
import logging import ibmsecurity.utilities.tools logger = logging.getLogger(__name__) module_uri = "/isam/felb/configuration/services/" requires_modules = None requires_versions = None requires_model = "Appliance" def add(isamAppliance, service_name, name, value, check_mode=False, force=False): """ Creates a service attribute """ check_value, warnings = _check(isamAppliance, service_name, name) if force is True or check_value is False: if check_mode is True: return isamAppliance.create_return_object(changed=True, warnings=warnings) else: return isamAppliance.invoke_post("Creating a service attribute", "{0}{1}/attributes".format(module_uri, service_name), { "name": name, "value": value }, requires_version=requires_versions, requires_modules=requires_modules, requires_model=requires_model) else: return isamAppliance.create_return_object(warnings=warnings) def delete(isamAppliance, service_name, attribute_name, check_mode=False, force=False): """
deletes a service level attribute """ check_value, warnings = _check(isamAppliance, service_name,
attribute_name) if force is True or check_value is True: if check_mode is True: return isamAppliance.create_return_object(changed=True, warnings=warnings) else: return isamAppliance.invoke_delete("Deleting a service attribute", "{0}{1}/attributes/{2}".format(module_uri, service_name, attribute_name), requires_version=requires_versions, requires_modules=requires_modules, requires_model=requires_model) else: return isamAppliance.create_return_object(warnings=warnings) def get(isamAppliance, service_name, attribute_name): """ Retrieving a service attribute """ return isamAppliance.invoke_get("Retrieving a service attribute", "{0}{1}/attributes/{2}".format(module_uri, service_name, attribute_name), requires_version=requires_versions, requires_modules=requires_modules, requires_model=requires_model) def get_all(isamAppliance, service_name): """ Retrieving service attribute names """ return isamAppliance.invoke_get("Retrieving service attribute names", "{0}{1}/attributes?includeAllValues=true".format(module_uri, service_name), requires_version=requires_versions, requires_modules=requires_modules, requires_model=requires_model) def update(isamAppliance, service_name, attribute_name, attribute_value, check_mode=False, force=False): """ Updating a service attribute """ check_value, warnings = _check_add(isamAppliance, service_name, attribute_name, attribute_value) if force is True or check_value is True: if check_mode is True: return isamAppliance.create_return_object(changed=True, warnings=warnings) else: return isamAppliance.invoke_put("Updating a service attribute", "{0}{1}/attributes/{2}".format(module_uri, service_name, attribute_name), { "value": attribute_value }, requires_modules=requires_modules, requires_version=requires_versions, requires_model=requires_model) else: return isamAppliance.create_return_object(warnings=warnings) def set(isamAppliance, service_name, attribute_name, attribute_value, check_mode=False, force=False): """ Determines if add or update is called """ check_value, warnings = _check(isamAppliance, service_name, attribute_name) if check_value is False: return add(isamAppliance, service_name, attribute_name, attribute_value, check_mode, force) else: return update(isamAppliance, service_name, attribute_name, attribute_value, check_mode, force) def compare(isamAppliance1, service_name1, isamAppliance2, service_name2): """ Compare configuration between two appliances """ ret_obj1 = get_all(isamAppliance1, service_name1) ret_obj2 = get_all(isamAppliance2, service_name2) return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=[]) def _check_add(isamAppliance, service_name, name, value): """ idempotency test for add function """ check_obj = {} warnings = "" # check to see if attribute under service name exist, return True if it doesnt exist try: check_obj = get(isamAppliance, service_name, name) warnings = check_obj['warnings'] except: return True, warnings if 'value' in check_obj['data']: if check_obj['data']['value'] != value: return True, warnings else: return False, warnings else: return False, warnings def _check(isamAppliance, service_name, attribute_name): """ Checks to see if attribute exists """ warnings = "" try: check_obj = get(isamAppliance, service_name, attribute_name) warnings = check_obj['warnings'] except: return False, warnings if check_obj['data'] == {}: return False, warnings return True, warnings
WindCanDie/spark
python/pyspark/tests/test_util.py
Python
apache-2.0
3,052
0.001311
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest from py4j.protocol import Py4JJavaError from pyspark import keyword_only from pyspark.testing.utils import PySparkTestCase class KeywordOnlyTests(unittest.TestCase): class Wrapped(object): @keyword_only def set(self, x=None, y=None): if "x" in self._input_kwargs: self._x = self._input_kwargs["x"] if "y" in self._input_kwargs: self._y = self._input_kwargs["y"] return x, y def test_keywords(self): w = self.Wrapped() x, y = w.set(y=1) self.assertEqual(y, 1) self.assertEqual(y, w._y) self.assertIsNone(x) self.assertFalse(hasattr(w, "_x")) def test_non_keywords(self): w = self.Wrapped() self.assertRaises(TypeError, lambda: w.set(0, y=1)) def test_kwarg_ownership(self): # test _input_kwargs is owned by each class instance and not a shared static variable class Setter(object): @keyword_only def set(self, x=None, other=None, other_x=None): if "other" in self._input_kwargs: self._input_kwargs["other"].set(x=self._input_kwargs["other_x"]) self._x = self._input_kwargs["x"] a = Setter() b = Setter()
a.set(x=1, other=b, other_x=2) self.assertEqual(a._x, 1) self.assertEqual(b._x, 2) class UtilTests(PySparkTestCase): def test_py4j_exception_message(self): from pyspark.util import _exception_message with self.assertRaises(Py4JJavaError) as context: # This attempts java.lang.String(null) which throws an NPE. self.sc._jvm.java.lang.String(None) self.assertTrue('NullPointerException' in _exception_message(conte
xt.exception)) def test_parsing_version_string(self): from pyspark.util import VersionUtils self.assertRaises(ValueError, lambda: VersionUtils.majorMinorVersion("abced")) if __name__ == "__main__": from pyspark.tests.test_util import * try: import xmlrunner testRunner = xmlrunner.XMLTestRunner(output='target/test-reports') except ImportError: testRunner = None unittest.main(testRunner=testRunner, verbosity=2)
llgoncalves/harpia
harpia/model/diagrammodel.py
Python
gpl-2.0
854
0.001171
# -*- coding: utf-8 -*- from harpia.model.connectionmodel import ConnectionModel as ConnectionModel from harpia.system import System as System class DiagramModel(object): # ---------------------------------------------------------------------- def __init__(self): self.last_id = 1 # first block is n1, increments to each new block self.blocks = {} # GUI blocks self.connectors = [] self.zoom = 1.0 # pixels per unit self.file_name = "Untitled" self.modified = False self.language = None self.undo_stack = [] self.redo_stack = [] #
---------------------------------------------------------------------- @property def patch_name(self): return self.file_name.split
("/").pop() # ----------------------------------------------------------------------
lino-framework/xl
lino_xl/lib/orders/choicelists.py
Python
bsd-2-clause
992
0.006048
# -*- coding: UTF-8 -*- # Copyright 2019-2020 Rumma & Ko Ltd # License: GNU Affero General Public License v3 (see file COPYING for details) from django.db import models from lino_xl.lib.ledger.choicelists import VoucherStates from lino.api import dd, _ class OrderStates(VoucherStates): pass add = OrderStates.add_item add('10', _("Waiting"), 'draft', is_editable=True) add('20', _("Active"), 'active', is_editable=True) add('30', _("Urgent"), 'urgent', is_editable=True) add('40', _("Done"), 'registered') add('50', _("Canc
elled"), 'cancelled') OrderStates.draft.add_transition(required_states="active urgent registered cancelled") OrderStates.active.add_transition(required_states="draft urgent registered cancelled") OrderStates.urgent.add_transition(required_states="draft active registered cancelled") OrderStates.registered.add_transition(required_states="draft active urgent cancelled") OrderStates.cancelled.add_transition(r
equired_states="draft active urgent registered")
hfp/tensorflow-xsmm
tensorflow/contrib/distribute/python/minimize_loss_test.py
Python
apache-2.0
20,274
0.007695
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either ex
press or implied. # See the License for the specific language
governing permissions and # limitations under the License. # ============================================================================== """Tests for running legacy optimizer code with DistributionStrategy.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy from tensorflow.contrib.distribute.python import combinations from tensorflow.contrib.distribute.python.single_loss_example import batchnorm_example from tensorflow.contrib.distribute.python.single_loss_example import minimize_loss_example from tensorflow.python.data.ops import dataset_ops from tensorflow.python.distribute import reduce_util from tensorflow.python.eager import context from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.layers import core from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables as variables_lib from tensorflow.python.ops.losses import losses_impl class MinimizeLossStepTest(test.TestCase, parameterized.TestCase): def _get_iterator(self, ds): if context.executing_eagerly(): iterator = ds.make_one_shot_iterator() else: iterator = ds.make_initializable_iterator() self.evaluate(iterator.initializer) return iterator @combinations.generate( combinations.times( combinations.distributions_and_v1_optimizers(), combinations.combine(mode=["graph"], use_callable_loss=[True, False]) + combinations.combine(mode=["eager"], use_callable_loss=[True])) + combinations.combine( distribution=[combinations.tpu_strategy], optimizer_fn=combinations.optimizers_v1, mode=["graph"], use_callable_loss=[True, False])) def testTrainNetwork(self, distribution, optimizer_fn, use_callable_loss): with distribution.scope(): model_fn, dataset_fn, layer = minimize_loss_example( optimizer_fn, use_bias=True, use_callable_loss=use_callable_loss) def step_fn(ctx, inputs): del ctx # Unused return distribution.group( distribution.call_for_each_replica(model_fn, args=(inputs,))) iterator = self._get_iterator(distribution.distribute_dataset(dataset_fn)) def run_step(): return distribution.run_steps_on_dataset( step_fn, iterator, iterations=2).run_op self.evaluate(distribution.initialize()) if not context.executing_eagerly(): with self.cached_session() as sess: run_step = sess.make_callable(run_step()) self.evaluate(variables_lib.global_variables_initializer()) weights, biases = [], [] for _ in range(5): run_step() weights.append(self.evaluate(layer.kernel)) biases.append(self.evaluate(layer.bias)) self.evaluate(distribution.finalize()) error = abs(numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1) is_not_increasing = all(y <= x for x, y in zip(error, error[1:])) self.assertTrue(is_not_increasing) @combinations.generate( combinations.times( combinations.distributions_and_v1_optimizers(), combinations.combine(mode=["graph"], use_callable_loss=[True, False]) + combinations.combine(mode=["eager"], use_callable_loss=[True]))) def testTrainNetworkByCallForEachReplica(self, distribution, optimizer_fn, use_callable_loss): with distribution.scope(): model_fn, dataset_fn, layer = minimize_loss_example( optimizer_fn, use_bias=True, use_callable_loss=use_callable_loss) iterator = self._get_iterator(distribution.distribute_dataset(dataset_fn)) def run_step(): return distribution.group( distribution.call_for_each_replica( model_fn, args=(iterator.get_next(),))) if not context.executing_eagerly(): with self.cached_session() as sess: run_step = sess.make_callable(run_step()) self.evaluate(variables_lib.global_variables_initializer()) weights, biases = [], [] for _ in range(10): run_step() weights.append(self.evaluate(layer.kernel)) biases.append(self.evaluate(layer.bias)) error = abs(numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1) is_not_increasing = all(y <= x for x, y in zip(error, error[1:])) self.assertTrue(is_not_increasing) @combinations.generate( combinations.times( combinations.distributions_and_v1_optimizers() + combinations.distributions_and_v2_optimizers(), combinations.combine(mode=["graph", "eager"])) + combinations.combine( distribution=[combinations.tpu_strategy], optimizer_fn=combinations.optimizers_v1+combinations.optimizers_v2, mode=["graph"])) def testOptimizerInsideModelFn(self, distribution, optimizer_fn): created_variables = [] trainable_variables = [] def appending_creator(next_creator, *args, **kwargs): v = next_creator(*args, **kwargs) created_variables.append(v.name) if "trainable" in kwargs and kwargs["trainable"]: trainable_variables.append(v.name) return v # Creator scope needs to be set before it's used inside # `distribution.scope`. with variable_scope.variable_creator_scope( appending_creator), distribution.scope(): model_fn, dataset_fn, layer = minimize_loss_example( optimizer_fn, use_bias=True, use_callable_loss=True, create_optimizer_inside_model_fn=True) def step_fn(ctx, inputs): del ctx # Unused return distribution.group( distribution.call_for_each_replica(model_fn, args=(inputs,))) iterator = self._get_iterator(distribution.distribute_dataset(dataset_fn)) def run_step(): return distribution.run_steps_on_dataset( step_fn, iterator, iterations=1).run_op self.evaluate(distribution.initialize()) if not context.executing_eagerly(): with self.cached_session() as sess: run_step = sess.make_callable(run_step()) self.evaluate(variables_lib.global_variables_initializer()) run_step() self.evaluate(distribution.finalize()) def get_expected_variables(optimizer_fn, num_parameter_devices): variables_map = { "GradientDescent": ["dense/kernel", "dense/bias"], "Adagrad": [ "dense/kernel/Adagrad", "dense/kernel", "dense/bias/Adagrad", "dense/bias" ] } variables = variables_map[optimizer_fn().get_name()] variables.extend([ v + "/replica_{}".format(replica) for v in variables for replica in range(1, num_parameter_devices) ]) return set([v + ":0" for v in variables]) self.assertEqual( get_expected_variables(optimizer_fn, len(distribution.parameter_devices)), set(created_variables)) @combinations.generate( combinations.times( combinations.combine(momentum=[0.8, 0.9, 0.99], renorm=[False, True]), combinations.times( combinations.distributions_and_v1_optimizers(), combinations.combine( mode=["g
matthiask/django-admin-sso
admin_sso/default_settings.py
Python
bsd-3-clause
1,071
0.001867
from django.conf import settings from django.utils.translation import gettext_lazy as _ ASSIGNMENT_ANY = 0 ASSIGNMENT_MATCH = 1 ASSIGNMENT_EXCEPT = 2 ASSIGNMENT_CHOICES = ( (ASSIGNMENT_ANY, _("any")), (ASSIGNMENT_MATCH, _("matches")), (ASSIGNMENT_EXCEPT, _("don't match")), ) DJANGO_ADMIN_SSO_ADD_LOGIN_BUTTON = getattr( settings, "DJANGO_ADMIN_SSO_ADD_LOGIN_BUTTON", True ) AUTH_USER_MODEL = getattr(settings, "AUTH_USER
_MODEL", "auth.User") DJANGO_ADMIN_SSO_OAUTH_CLIENT_ID = getattr( settings, "DJANGO_ADMIN_SSO_OAUTH_CLIENT_ID", None ) DJANGO_ADMIN_SSO_OAUTH_CLIENT_SECRET = getattr( settings, "DJANGO_ADMIN_SSO_OAUTH_CLIENT_SECRET", Non
e ) DJANGO_ADMIN_SSO_AUTH_URI = getattr( settings, "DJANGO_ADMIN_SSO_AUTH_URI", "https://accounts.google.com/o/oauth2/auth" ) DJANGO_ADMIN_SSO_TOKEN_URI = getattr( settings, "DJANGO_ADMIN_SSO_TOKEN_URI", "https://accounts.google.com/o/oauth2/token" ) DJANGO_ADMIN_SSO_REVOKE_URI = getattr( settings, "DJANGO_ADMIN_SSO_REVOKE_URI", "https://accounts.google.com/o/oauth2/revoke", )
GREO/GNU-Radio
gr-audio-alsa/src/qa_alsa.py
Python
gpl-3.0
1,240
0.010484
#!/usr/bin/env python # # Copyright 2005,2007 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, gr_unittest import audio_alsa class qa_alsa (gr_unittest.TestCase): def setUp (self): self.tb = gr.top_block () def tearDown (self): self.tb = None def test_000_nop (se
lf): """Just see if we can import the module... They may not have ALSA drivers, etc. Don't try to run anything""" pass if __name__ == '__main__': gr_unittest.main ()
levythu/swift-layerC
inapi/httpd.py
Python
gpl-2.0
287
0
# coding=utf-8 from tornado.wsgi import WSGIContainer from tornado.httpserver import HTTPServer from tornado.ioloop import IOL
oop from app import app if __name__ == "__main__": http_server = HTTPServer(WSGIContainer(app)) http_server.listen(5000)
IOLoop.instance().start()
timm/timmnix
pypy3-v5.5.0-linux64/lib-python/3/distutils/command/sdist.py
Python
mit
17,891
0.000391
"""distutils.command.sdist Implements the Distutils 'sdist' command (create a source distribution).""" import os import string import sys from types import * from glob import glob from warnings import warn from distutils.core import Command from distutils import dir_util, dep_util, file_util, archive_util from distutils.text_file import TextFile from distutils.errors import * from distutils.filelist import FileList from distutils import log from distutils.util import convert_path def show_formats(): """Print all possible values for the 'formats' option (used by the "--help-formats" command-line option). """ from distutils.fancy_getopt import FancyGetopt from distutils.archive_util import ARCHIVE_FORMATS formats = [] for format in ARCHIVE_FORMATS.keys(): formats.append(("formats=" + format, None, ARCHIVE_FORMATS[format][2])) formats.sort() FancyGetopt(formats).print_help( "List of available source distribution formats:") class sdist(Command): description = "create a source distribution (tarball, zip file, etc.)" def checking_metadata(self): """Callable used for the check sub-command. Placed here so user_options can view it""" return self.metadata_check user_options = [ ('template=', 't', "name of manifest template file [default: MANIFEST.in]"), ('manifest=', 'm', "name of manifest file [default: MANIFEST]"), ('use-defaults', None, "include the default file set in the manifest " "[default; disable with --no-defaults]"), ('no-defaults', None, "don't include the default file set"), ('prune', None, "specifically exclude files/directories that should not be " "distributed (build tree, RCS/CVS dirs, etc.) " "[default; disable with --no-prune]"), ('no-prune', None, "don't automatically exclude anything"), ('manifest-only', 'o', "just regenerate the manifest and then stop " "(implies --force-manifest)"), ('force-manifest', 'f', "forcibly regenerate the manifest and carry on as usual. " "Deprecated: now the manifest is always regenerated."), ('formats=', None, "formats for source distribution (comma-separated list)"), ('keep-temp', 'k', "keep the distribution tree around after creating " + "archive file(s)"), ('dist-dir=', 'd', "directory to put the source distribution archive(s) in " "[default: dist]"), ('metadata-check', None, "Ensure that all required elements of meta-data " "are supplied. Warn if any missing. [default]"), ] boolean_options = ['use-defaults', 'prune', 'manifest-only', 'force-manifest', 'keep-temp', 'metadata-check'] help_options = [ ('help-formats', None, "list available distribution formats", show_formats), ] negative_opt = {'no-defaults': 'use-defaults', 'no-prune': 'prune' } default_format = {'posix': 'gztar', 'nt': 'zip' } sub_commands = [('check', checking_metadata)] def initialize_options(self): # 'template' and 'manifest' are, respectively, the names of # the manifest template and manifest file. self.template = None self.manifest = None # 'use_defaults': if true, we will include the default file set # in the manifest self.use_defaults = 1 self.prune = 1 self.manifest_only = 0 self.force_manifest = 0 self.formats = None self.keep_temp = 0 self.dist_dir = None self.archive_files = None self.metadata_check = 1 def finalize_options(self): if self.manifest is None: self.manifest = "MANIFEST" if self.template is None: self.template = "MANIFEST.in" self.ensure_string_list('formats') if self.formats is None: try:
self.formats = [self.default_format[os.name]] except KeyError: raise DistutilsPlatformError( "don't know how to create source distributions " "on platform %s" % os.name) bad_format = archive_util.check_archive_formats(self.formats) if bad_format: raise DistutilsOptionError( "unknown archive format '%
s'" % bad_format) if self.dist_dir is None: self.dist_dir = "dist" def run(self): # 'filelist' contains the list of files that will make up the # manifest self.filelist = FileList() # Run sub commands for cmd_name in self.get_sub_commands(): self.run_command(cmd_name) # Do whatever it takes to get the list of files to process # (process the manifest template, read an existing manifest, # whatever). File list is accumulated in 'self.filelist'. self.get_file_list() # If user just wanted us to regenerate the manifest, stop now. if self.manifest_only: return # Otherwise, go ahead and create the source distribution tarball, # or zipfile, or whatever. self.make_distribution() def check_metadata(self): """Deprecated API.""" warn("distutils.command.sdist.check_metadata is deprecated, \ use the check command instead", PendingDeprecationWarning) check = self.distribution.get_command_obj('check') check.ensure_finalized() check.run() def get_file_list(self): """Figure out the list of files to include in the source distribution, and put it in 'self.filelist'. This might involve reading the manifest template (and writing the manifest), or just reading the manifest, or just using the default file set -- it all depends on the user's options. """ # new behavior when using a template: # the file list is recalculated every time because # even if MANIFEST.in or setup.py are not changed # the user might have added some files in the tree that # need to be included. # # This makes --force the default and only behavior with templates. template_exists = os.path.isfile(self.template) if not template_exists and self._manifest_is_not_generated(): self.read_manifest() self.filelist.sort() self.filelist.remove_duplicates() return if not template_exists: self.warn(("manifest template '%s' does not exist " + "(using default file list)") % self.template) self.filelist.findall() if self.use_defaults: self.add_defaults() if template_exists: self.read_template() if self.prune: self.prune_file_list() self.filelist.sort() self.filelist.remove_duplicates() self.write_manifest() def add_defaults(self): """Add all the default files to self.filelist: - README or README.txt - setup.py - test/test*.py - all pure Python modules mentioned in setup script - all files pointed by package_data (build_py) - all files defined in data_files. - all files defined as scripts. - all C sources listed as part of extensions or C libraries in the setup script (doesn't catch C headers!) Warns if (README or README.txt) or setup.py are missing; everything else is optional. """ standards = [('README', 'README.txt'), self.distribution.script_name] for fn in standards: if isinstance(fn, tuple): alts = fn got_it = False for fn in alts: if os.path.exists(fn): got_it = True self.filelist.append
RickMohr/nyc-trees
src/nyc_trees/nyc_trees/middleware.py
Python
apache-2.0
846
0
# -*- coding: utf-8 -*- from __future__ import print_function from __future__ import unicode_literals from __future__ import division import re import waffle from django.conf import settings from django.shortcuts import redirect class SoftLaunchMiddleware(object): def __init__(self): self.redirect_url = getattr(settings, 'SOFT_LAUNCH_REDIRECT_URL', '/') regexes = getattr(settings, 'SOFT
_LAUNCH_REGEXES', []) self.regexes = [re.compile(r) for r in regexes] def process_view(self, request, view_func, view_args, view_kwargs): if waffle.flag_is_active(request, 'full_access'): return None allowed = ((request.path == self.redirect_url) or any(r.match(request.path) for r in self.regexes)) if not allowed: return
redirect(self.redirect_url)
dirkmoors/drf-tus
rest_framework_tus/storage.py
Python
mit
1,346
0.000743
# -*- coding: utf-8 -*- from __future__ import unicode_literals from abc import ABCMeta, abstractmethod from django.core.files import File from six import with_metaclass from django.utils.module_loading import import_string from rest_framework_tus import signals from .settings import TUS_SAVE_HANDLER_CLASS class AbstractUploadSaveHandler(with_metaclass(ABCMeta, object)): def __init__(self, upload): self.upload = upload @abstractmethod def handle_save(self):
pass def run(self): # Trigger state change self.upload.start_saving() self.upload.save() # Initialize saving self.handle_save() def finish(self): # Trigger signal signals.saved.send(sender=self.__class__, instance=self) # Finish self.upload.finish() self.upload.save() class Def
aultSaveHandler(AbstractUploadSaveHandler): destination_file_field = 'uploaded_file' def handle_save(self): # Save temporary field to file field file_field = getattr(self.upload, self.destination_file_field) file_field.save(self.upload.filename, File(open(self.upload.temporary_file_path))) # Finish upload self.finish() def get_save_handler(import_path=None): return import_string(import_path or TUS_SAVE_HANDLER_CLASS)
Phonemetra/TurboCoin
test/functional/rpc_scantxoutset.py
Python
mit
12,820
0.008892
#!/usr/bin/env python3 # Copyright (c) 2018-2019 TurboCoin # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the scantxoutset rpc call.""" from test_framework.test_framework import TurbocoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error from decimal import Decimal import shutil import os def descriptors(out): return sorted(u['desc'] for u in out['unspents']) class ScantxoutsetTest(TurbocoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): self.log.info("Mining blocks...") self.nodes[0].generate(110) addr_P2SH_SEGWIT = self.nodes[0].getnewaddress("", "p2sh-segwit") pubk1 = self.nodes[0].getaddressinfo(addr_P2SH_SEGWIT)['pubkey'] addr_LEGACY = self.nodes[0].getnewaddress("", "legacy") pubk2 = self.nodes[0].getaddressinfo(addr_LEGACY)['pubkey'] addr_BECH32 = self.nodes[0].getnewaddress("", "bech32") pubk3 = self.nodes[0].getaddressinfo(addr_BECH32)['pubkey'] self.nodes[0].sendtoaddress(addr_P2SH_SEGWIT, 0.001) self.nodes[0].sendtoaddress(addr_LEGACY, 0.002) self.nodes[0].sendtoaddress(addr_BECH32, 0.004) #send to child keys of tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK self.nodes[0].sendtoaddress("mkHV1C6JLheLoUSSZYk7x3FH5tnx9bu7yc", 0.008) # (m/0'/0'/0') self.nodes[0].sendtoaddress("mipUSRmJAj2KrjSvsPQtnP8ynUon7FhpCR", 0.016) # (m/0'/0'/1') self.nodes[0].sendtoaddress("n37dAGe6Mq1HGM9t4b6rFEEsDGq7Fcgfqg", 0.032) # (m/0'/0'/1500') self.nodes[0].sendtoaddress("mqS9Rpg8nNLAzxFExsgFLCnzHBsoQ3PRM6", 0.064) # (m/0'/0'/0) self.nodes[0].sendtoaddress("mnTg5gVWr3rbhHaKjJv7EEEc76ZqHgSj4S", 0.128) # (m/0'/0'/1) self.nodes[0].sendtoaddress("mketCd6B9U9Uee1iCsppDJJBHfvi6U6ukC", 0.256) # (m/0'/0'/1500) self.nodes[0].sendtoaddress("mj8zFzrbBcdaWXowCQ1oPZ4qioBVzLzAp7", 0.512) # (m/1/1/0') self.nodes[0].sendtoaddress("mfnKpKQEftniaoE1iXuMMePQU3PUpcNisA", 1.024) # (m/1/1/1') self.nodes[0].sendtoaddress("mou6cB1kaP1nNJM1sryW6YRwnd4shTbXYQ", 2.048) # (m/1/1/1500') self.nodes[0].sendtoaddress("mtfUoUax9L4tzXARpw1oTGxWyoogp52KhJ", 4.096) # (m/1/1/0) self.nodes[0].sendtoaddress("mxp7w7j8S1Aq6L8StS2PqVvtt4HGxXEvdy", 8.192) # (m/1/1/1) self.nodes[0].sendtoaddress("mpQ8rokAhp1TAtJQR6F6TaUmjAWkAWYYBq", 16.384) # (m/1/1/1500) self.nodes[0].generate(1) self.log.info("Stop node, remove wallet, mine again some blocks...") self.stop_node(0) shutil.rmtree(os.path.join(self.nodes[0].datadir, "regtest", 'wallets')) self.start_node(0) self.nodes[0].generate(110) self.restart_node(0, ['-nowallet']) self.log.info("Test if we have found the non HD unspent outputs.") assert_equal(self.nodes[0].scantxoutset("start", [ "pkh(" + pubk1 + ")", "pkh(" + pubk2 + ")", "pkh(" + pubk3 + ")"])['total_amount'], Decimal("0.002")) assert_equal(self.nodes[0].scantxoutset("start", [ "wpkh(" + pubk1 + ")", "wpkh(" + pubk2 + ")", "wpkh(" + pubk3 + ")"])['total_amount'], Decimal("0.004")) assert_equal(self.nodes[0].scantxoutset("start", [ "sh(wpkh(" + pubk1 + "))", "sh(wpkh(" + pubk2 + "))", "sh(wpkh(" + pubk3 + "))"])['total_amount'], Decimal("0.001")) assert_equal(self.nodes[0].scantxoutset("start", [ "combo(" + pubk1 + ")", "combo(" + pubk2 + ")", "combo(" + pubk3 + ")"])['total_amount'], Decimal("0.007")) assert_equal(self.nodes[0].scantxoutset("start", [ "addr(" + addr_P2SH_SEGWIT + ")", "addr(" + addr_LEGACY + ")", "addr(" + addr_BECH32 + ")"])['total_amount'], Decimal("0.007")) assert_equal(self.nodes[0].scantxoutset("start", [ "addr(" + addr_P2SH_SEGWIT + ")", "addr(" + addr_LEGACY + ")", "combo(" + pubk3 + ")"])['total_amount'], Decimal("0.007")) self.log.info("Test range validation.") assert_raises_rpc_error(-8, "End of range is too high", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": -1}]) assert_raises_rpc_error(-8, "Range should be greater or equal than 0", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": [-1, 10]}]) assert_raises_rpc_error(-8, "End of range is too high", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": [(2 << 31 + 1) - 1000000, (2 << 31 + 1)]}]) assert_raises_rpc_error(-8, "Range specified as [begin,end] must not have begin after end", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": [2, 1]}]) assert_raises_rpc_error(-8, "Range is too large", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": [0, 1000001]}]) self.log.info("Test extended key derivation.") # Run various scans, and verify that the sum of the amounts of the matches corresponds to the expected subset. # Note that all amounts in the UTXO set are powers of 2 multiplied by 0.001 TURBO, so each amounts uniquely identifies a subset. assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0h/0h)"])['total_amount'], Decimal("0.008")) assert_equal(self.nodes[0].scantxoutset("start", [ "combo(t
prv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0'/1h)"])['total_amount'], Decimal("0.016")) assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0'/1500')"])['total_amount'], Decimal("0.032")) assert_equal(self.nodes[0].scantxoutset("
start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0h/0)"])['total_amount'], Decimal("0.064")) assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0h/1)"])['total_amount'], Decimal("0.128")) assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0'/1500)"])['total_amount'], Decimal("0.256")) assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0h/*h)", "range": 1499}])['total_amount'], Decimal("0.024")) assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0'/*h)", "range": 1500}])['total_amount'], Decimal("0.056")) assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0'/*)", "range": 1499}])['total_amount'], Decimal("0.192")) assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0h/*)", "range": 1500}])['total_amount'], Decimal("0.448")) assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0')"])['total_amount'], Decimal("0.512")) assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/1')"])['total_amount'], Decimal("1.024")) assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1
eramirem/astroML
book_figures/chapter10/fig_wavelets.py
Python
bsd-2-clause
2,201
0.001363
""" Examples of Wavelets -------------------- Figure 10.9 Wavelets for several values of wavelet parameters Q and f0. Solid lines show the real part and dashed lines show the imaginary part (see eq. 10.16). """ # Author: Jake VanderPlas # License: BSD # The figure produced by this code is published in the textbook # "Statistics, Data Mining, and Machine Learning in Astronomy" (2013) # For more information, see http://astroML.github.com # To report a bug or issue, use the following forum: # https://groups.google.com/forum/#!forum/astroml-general import numpy as np from matplotlib import pyplot as plt from astroML.fourier import FT_continuous, IFT_continuous, sinegauss #---------------------------------------------------------------------- # This function adjusts matplotlib settings for a uniform feel in the textbook. # Note that with usetex=True, fonts are rendered with LaTeX. This may # result in an error if LaTeX is not installed on your system. In that case, # you can set usetex to False. from astroML.plotting import setup_text_plots setup_text_plots(fontsize=8, usetex=True) #-----------------------------------------------------
------- # Set up the wavelets t0 = 0 t = np.linspace(-0.4, 0.4, 10000) f0 = np.array([5, 5, 10, 10]) Q = np.array([1, 0.5, 1, 0.5]) # compute wavelets all at once W = sinegauss(t, t0, f0[:, None], Q[:, None]) #------------------------------------------------------------ # Plot the wavelets fig = plt.figure(figsize=(5, 3.75)) fig.subplots_adjust(hspace=0.05, wspace=0.05) # in each panel, plot and label a different wavelet for i in r
ange(4): ax = fig.add_subplot(221 + i) ax.plot(t, W[i].real, '-k') ax.plot(t, W[i].imag, '--k') ax.text(0.04, 0.95, "$f_0 = %i$\n$Q = %.1f$" % (f0[i], Q[i]), ha='left', va='top', transform=ax.transAxes) ax.set_ylim(-1.2, 1.2) ax.set_xlim(-0.35, 0.35) ax.xaxis.set_major_locator(plt.MultipleLocator(0.2)) if i in (0, 1): ax.xaxis.set_major_formatter(plt.NullFormatter()) else: ax.set_xlabel('$t$') if i in (1, 3): ax.yaxis.set_major_formatter(plt.NullFormatter()) else: ax.set_ylabel('$w(t)$') plt.show()
jgrizou/explauto
explauto/sensorimotor_model/inverse/__init__.py
Python
gpl-3.0
284
0.014085
fro
m .inverse import RandomInverseModel from .sciopt import BFGSInverseModel, COBYLAInverseModel from .nn import NNInverseModel from .wnn import WeightedNNInverseModel, ESWNNInverseModel from .cmamodel import CMAESInverseModel from .jacobian import JacobianInverseMod
el
kwailamchan/programming-languages
python/django/artdepot/artdepot/depot/migrations/0003_auto_20140930_2137.py
Python
mit
942
0.002123
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import datetime class Migration(migrations.Migration): dependencies = [ ('depot', '0002_lineitem'), ] operations = [ migrations.CreateModel( name='Order', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=50)), ('address', models.T
extField()), ('email', models.EmailField(max_length=75)), ], options={ },
bases=(models.Model,), ), migrations.AddField( model_name='lineitem', name='order', field=models.ForeignKey(default=datetime.date(2014, 9, 30), to='depot.Order'), preserve_default=False, ), ]
pswaminathan/python_efficiency_tweaks
plots/plot_string_subst_bar.py
Python
gpl-3.0
686
0.008746
# Plotting performance of string_subst_.py scripts # bar chart of relative comparison with variances as error bars import numpy as np import matplotlib.pyplot as plt performance = [10.3882388499416,1,10.3212
281215746] variance = [0.790435196936213,0,0.827207394592818] scripts = ['string_subst_1.py', 'string_subst_2.py', 'string_subst_3.py'] x_pos = np.arange(len(scripts)) plt.bar(x_pos, performance, yerr=variance, align='center', alpha=0.5) plt.xticks(x_pos, scripts) plt.axhline(y=1, linestyle='--', color='black') plt.ylim([0,12]) plt.ylabel('rel. performance gain') plt.title('String subs
titution - Speed improvements') #plt.show() plt.savefig('PNGs/string_subst_bar.png')
andymckay/addons-server
src/olympia/amo/tests/test_decorators.py
Python
bsd-3-clause
5,822
0
from datetime import datetime, timedelta from django import http from django.conf import settings from django.core.exceptions import PermissionDenied import mock import pytest from olympia.amo.tests import BaseTestCase, TestCase from olympia.amo import decorators, get_user, set_user from olympia.amo.urlresolvers import reverse from olympia.users.models import UserProfile pytestmark = pytest.mark.django_db def test_post_required(): def func(request): return mock.sentinel.response g = decorators.post_required(func) request = mock.Mock() request.method = 'GET' assert isinstance(g(request), http.HttpResponseNotAllowed) request.method = 'POST' assert g(request) == mock.sentinel.response def test_json_view(): """Turns a Python object into a response.""" def func(request): return {'x': 1} response = decorators.json_view(func)(mock.Mock()) assert isinstance(response, http.HttpResponse) assert response.content == '{"x": 1}' assert response['Content-Type'] == 'application/json' assert response.status_code == 200 def test_json_view_normal_response(): """Normal responses get passed through.""" expected = http.HttpResponseForbidden() def func(request): return expected response = decorators.json_view(func)(mock.Mock()) assert expected is response assert response['Content-Type'] == 'text/html; charset=utf-8' def test_json_view_error(): """json_view.error returns 400 responses.""" response = decorators.json_view.error({'msg': 'error'}) assert isinstance(response, http.HttpResponseBadRequest) assert response.content == '{"msg": "error"}' assert response['Content-Type'] == 'application/json' def test_json_vie
w_status(): def func(request): return {'x': 1} response = decorators.json_view(func, status_code=202)(mock.Mock()) assert response.status_code == 202 def test_json_view_response_status(): response = decorators.json_response({'msg': 'error'}, status_code=202) assert response.content == '{"msg": "error"}' assert response['Content-Type'] == 'application/json' assert re
sponse.status_code == 202 class TestTaskUser(TestCase): fixtures = ['base/users'] def test_set_task_user(self): @decorators.set_task_user def some_func(): return get_user() set_user(UserProfile.objects.get(username='regularuser')) assert get_user().pk == 999 assert some_func().pk == int(settings.TASK_USER_ID) assert get_user().pk == 999 class TestLoginRequired(BaseTestCase): def setUp(self): super(TestLoginRequired, self).setUp() self.f = mock.Mock() self.f.__name__ = 'function' self.request = mock.Mock() self.request.user.is_authenticated.return_value = False self.request.get_full_path.return_value = 'path' def test_normal(self): func = decorators.login_required(self.f) response = func(self.request) assert not self.f.called assert response.status_code == 302 assert response['Location'] == ( '%s?to=%s' % (reverse('users.login'), 'path')) def test_no_redirect(self): func = decorators.login_required(self.f, redirect=False) response = func(self.request) assert not self.f.called assert response.status_code == 401 def test_decorator_syntax(self): # @login_required(redirect=False) func = decorators.login_required(redirect=False)(self.f) response = func(self.request) assert not self.f.called assert response.status_code == 401 def test_no_redirect_success(self): func = decorators.login_required(redirect=False)(self.f) self.request.user.is_authenticated.return_value = True func(self.request) assert self.f.called class TestSetModifiedOn(TestCase): fixtures = ['base/users'] @decorators.set_modified_on def some_method(self, worked): return worked def test_set_modified_on(self): users = list(UserProfile.objects.all()[:3]) self.some_method(True, set_modified_on=users) for user in users: assert UserProfile.objects.get(pk=user.pk).modified.date() == ( datetime.today().date()) def test_not_set_modified_on(self): yesterday = datetime.today() - timedelta(days=1) qs = UserProfile.objects.all() qs.update(modified=yesterday) users = list(qs[:3]) self.some_method(False, set_modified_on=users) for user in users: date = UserProfile.objects.get(pk=user.pk).modified.date() assert date < datetime.today().date() class TestPermissionRequired(TestCase): def setUp(self): super(TestPermissionRequired, self).setUp() self.f = mock.Mock() self.f.__name__ = 'function' self.request = mock.Mock() @mock.patch('olympia.access.acl.action_allowed') def test_permission_not_allowed(self, action_allowed): action_allowed.return_value = False func = decorators.permission_required('', '')(self.f) with self.assertRaises(PermissionDenied): func(self.request) @mock.patch('olympia.access.acl.action_allowed') def test_permission_allowed(self, action_allowed): action_allowed.return_value = True func = decorators.permission_required('', '')(self.f) func(self.request) assert self.f.called @mock.patch('olympia.access.acl.action_allowed') def test_permission_allowed_correctly(self, action_allowed): func = decorators.permission_required('Admin', '%')(self.f) func(self.request) action_allowed.assert_called_with(self.request, 'Admin', '%')