code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1
value | license stringclasses 15
values | size int64 3 1.05M |
|---|---|---|---|---|---|
"""
edraak_i18n app url
"""
from django.conf.urls import patterns, url
from edunext_openedx_extensions.edraak_i18n.views import set_language
urlpatterns = patterns(
'',
url(r'^changelang/$', set_language, name='edraak_setlang'),
)
| eduNEXT/edunext-openedx-extensions | edunext_openedx_extensions/edraak_i18n/urls.py | Python | agpl-3.0 | 243 |
#! /usr/bin/env python
# coding: utf-8
# Python Script Collection for GEOS-Chem Chemistry Transport Model (gchem)
# Copyright (C) 2012 Gerrit Kuhlmann
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""\
Miscellaneous routine(s) (gchem.misc), which currently don't fit
somewhere else.
"""
import collections
import datetime
import struct
import numpy
import uff
def iter_dates(start, end, step=1):
"""\
Iterate over datetime objects from `start` till `end` with `step`
(default 1) days.
Example:
>>> from datetime import datetime
>>> for date in iter_dates(datetime(2011,1,1), datetime(2011,2,1)):
print date
"""
current = start
while current < end:
yield current
current += datetime.timedelta(days=1)
def tau2time(tau, reference=datetime.datetime(1985,1,1)):
""" hours since reference (01.01.1985 00:00) -> datetime object """
return reference + datetime.timedelta(hours=tau)
def time2tau(time, reference=datetime.datetime(1985,1,1)):
""" datetime object -> hours since reference (01.01.1985 00:00) """
return (time - reference).total_seconds() / 3600.0
def read_gmao(filename, endian='>', skip_rows=1):
"""\
read(filename, endian='>', skip_rows=1)
Read GMAO met fields from `filename`. Data are returned as nested
dictionary with: field_name -> timestamp -> data.
"""
SIZE2SHAPE = {
943488: (144,91,72),
956592: (144,91,73),
13104: (144,91)
}
data = collections.defaultdict(dict)
with uff.FortranFile(filename, 'rb', endian=endian) as f:
for i in range(skip_rows):
f.readline()
while True:
try:
name = f.readline().strip()
content = f.readline('ii*f')
time = '%08d %06d' % content[:2]
time = datetime.datetime.strptime(time, '%Y%m%d %H%M%S')
values = numpy.array(content[2:])
try:
values = values.reshape(SIZE2SHAPE[values.size], order='F')
except KeyError, ValueError:
pass
data[name][time] = values
except EOFError:
break
return data
| gkuhl/gchem | gchem/misc.py | Python | gpl-3.0 | 2,831 |
#!/usr/bin/env python
# coding : utf-8
"""
CUI interface for VOI analyzer.
"""
import pandas as pd
import argparse
from base import voi_analysis
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Extract VOI statistics")
parser.add_argument('images', help="Images to extract Statistics",
nargs='+', type=str)
parser.add_argument('-v', '--voi', dest='voi',
help="VOI map file",
nargs=1, type=str, required=True)
parser.add_argument('-o', '--out', dest='out',
help="Output file [CSV]",
nargs=1, type=str, required=True)
parser.add_argument('-l', '--lut', dest='lut',
help="VOI look-up table file",
nargs=1, type=str,
default=[None])
args = parser.parse_args()
out_tab = pd.concat([voi_analysis(img, args.voi[0],
lut_file=args.lut[0])
for img in args.images])
out_tab.to_csv(args.out[0], index=False)
| spikefairway/VOIAnalyzer | VOIAnalyzerCUI.py | Python | mit | 1,113 |
"""2
3
: :
=+.+.
:T:Z
.+.+.
E:J:
1
O
T Stuff({"сокровище": 1})
E Exit(LEFT)
J Stuff({"дубина": 1})
Z EffectorSquare(lambda: Sleep(3, Position(1,0,0)))
"""
| nzinov/maze | test/1.py | Python | gpl-3.0 | 174 |
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: [email protected]
#
#
# #END_LICENSE#############################################################
from __future__ import absolute_import
import os
import sys
import logging
log = logging.getLogger("main")
from ..master_task import AlgTask
from ..master_job import Job
from ..utils import (read_fasta, OrderedDict, GLOBALS, pjoin)
__all__ = ["Clustalo"]
class Clustalo(AlgTask):
def __init__(self, nodeid, multiseq_file, seqtype, conf, confname):
GLOBALS["citator"].add('clustalo')
base_args = OrderedDict({
'-i': None,
'-o': None,
'--outfmt': "fa",
})
self.confname = confname
self.conf = conf
# Initialize task
AlgTask.__init__(self, nodeid, "alg", "Clustal-Omega",
base_args, self.conf[self.confname])
self.seqtype = seqtype
self.multiseq_file = multiseq_file
self.init()
def load_jobs(self):
appname = self.conf[self.confname]["_app"]
# Only one Muscle job is necessary to run this task
args = OrderedDict(self.args)
args["-i"] = pjoin(GLOBALS["input_dir"], self.multiseq_file)
args["-o"] = "clustalo_alg.fasta"
job = Job(self.conf["app"][appname], args, parent_ids=[self.nodeid])
job.cores = self.conf["threading"].get(appname, 1)
job.add_input_file(self.multiseq_file)
self.jobs.append(job)
def finish(self):
# Once executed, alignment is converted into relaxed
# interleaved phylip format.
alg_file = os.path.join(self.jobs[0].jobdir, "clustalo_alg.fasta")
# ClustalO returns a tricky fasta file
alg = read_fasta(alg_file, header_delimiter=" ")
fasta = alg.write(format="fasta")
phylip = alg.write(format="iphylip_relaxed")
AlgTask.store_data(self, fasta, phylip)
| karrtikr/ete | ete3/tools/phylobuild_lib/task/clustalo.py | Python | gpl-3.0 | 3,233 |
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
dummy = p = ListNode(-1)
p.next = head
while p.next and p.next.next:
if p.next.val == p.next.next.val:
val = p.next.val
while p.next and p.next.val == val:
p.next = p.next.next
else:
p = p.next
return dummy.next
| jiadaizhao/LeetCode | 0001-0100/0082-Remove Duplicates from Sorted List II/0082-Remove Duplicates from Sorted List II.py | Python | mit | 539 |
import asyncio
import io
import re
import sys
# The display classes deal with output from subprocesses. The FancyDisplay
# gives a multi-line, real-time view of each running process that looks nice in
# the terminal. The VerboseDisplay collects output from each job and prints it
# all when the job is finished, in a way that's suitable for logs. The
# QuietDisplay prints nothing.
#
# All of the display types inherit from BaseDisplay and provide the same
# interface. Callers use get_handle() to get a display handle for each
# subprocess job that's going to run. The handle is used as a context manager
# (inside a with statement) to indicate when the job is starting and stopping,
# and all of the output from the subprocess is passed to the handle's write()
# method. There is also a print() method on the display, for output that's not
# tied to a particular job, which prints to the terminal in a way that won't
# get stomped on by FancyDisplay's redrawing.
#
# Like other errors, we handle job errors by throwing a PrintableError, which
# get caught in main. So the displays don't need to do anything special to show
# errors.
ANSI_CURSOR_UP_ONE_LINE = '\x1b[1A'
ANSI_CLEAR_LINE = '\x1b[2K'
ANSI_DISABLE_LINE_WRAP = '\x1b[?7l'
ANSI_ENABLE_LINE_WRAP = '\x1b[?7h'
class BaseDisplay:
def __init__(self, output=None):
self.output = output or sys.stdout
# Every job/handle gets a unique id.
self._next_job_id = 0
# Output from each job is buffered.
self.buffers = {}
# Each job has a title, like the name of the module being fetched.
self.titles = {}
# We also keep track of any handles that haven't been entered yet, so
# that the FancyDisplay can know when to finally clean up.
self.outstanding_jobs = set()
def get_handle(self, title):
job_id = self._next_job_id
self._next_job_id += 1
self.titles[job_id] = title
self.buffers[job_id] = io.StringIO()
self.outstanding_jobs.add(job_id)
return _DisplayHandle(self, job_id)
# FancyDisplay overrides print() to avoid conflicting with redraws.
def print(self, *args, **kwargs):
print(*args, file=self.output, **kwargs)
# Callbacks that get overridden by subclasses.
def _job_started(self, job_id):
pass
def _job_written(self, job_id, string):
pass
def _job_finished(self, job_id):
pass
# Callbacks for handles.
def _handle_start(self, job_id):
self._job_started(job_id)
def _handle_write(self, job_id, string):
self.buffers[job_id].write(string)
self._job_written(job_id, string)
def _handle_finish(self, job_id):
self.outstanding_jobs.remove(job_id)
self._job_finished(job_id)
class QuietDisplay(BaseDisplay):
'''Prints nothing.'''
pass
class VerboseDisplay(BaseDisplay):
'''Waits until jobs are finished and then prints all of their output at
once, to make sure jobs don't get interleaved. We use '===' as a delimiter
to try to separate jobs from one another, and from other output.'''
def _job_started(self, job_id):
print('===', 'started', self.titles[job_id], '===', file=self.output)
def _job_finished(self, job_id):
print('===', 'finished', self.titles[job_id], '===', file=self.output)
outputstr = self.buffers[job_id].getvalue()
if outputstr:
self.output.write(outputstr)
print('===', file=self.output)
class FancyDisplay(BaseDisplay):
'''Prints a multi-line, real-time display of all the latest output lines
from each job.'''
def __init__(self, *args):
super().__init__(*args)
# Every time we draw we need to erase the lines that were printed
# before. This keeps track of that number. Note that we split output on
# newlines and use no-wrap control codes in the terminal, so we only
# need to count the number of jobs drawn.
self._lines_printed = 0
# This is the list of all active jobs. There's no guarantee that jobs
# start in any particular order, so this list also helps us keep the
# order stable.
self._job_slots = []
# The last line output from each job. This is what gets drawn.
self._output_lines = {}
# Lines that need to be printed above the display. This has to happen
# during the next draw, right after the display is cleared.
self._to_print = []
# To avoid flicker, we draw on a short timeout instead of every time we
# receive output. When this asyncio handle is set, it means a draw is
# already pending.
self._draw_later_handle = None
def print(self, *args, **kwargs):
output = io.StringIO()
print(*args, file=output, **kwargs)
self._to_print.append(output.getvalue())
# If we use _draw_later, the program might exit before the draw timer
# fires. Drawing right now ensures that output never gets dropped.
self._draw()
def _draw(self):
self._cancel_draw_later()
# Erase everything we printed before.
for i in range(self._lines_printed):
self.output.write(ANSI_CURSOR_UP_ONE_LINE)
self.output.write(ANSI_CLEAR_LINE)
self._lines_printed = 0
# If we have any lines from print(), print them now. They will end up
# above the display like regular output.
for string in self._to_print:
self.output.write(string)
self._to_print.clear()
# Redraw all the jobs.
self.output.write(ANSI_DISABLE_LINE_WRAP)
for slot, job_id in enumerate(self._job_slots):
# Fancy unicode box characters in the left column.
if slot == 0:
self.output.write('┌' if len(self._job_slots) > 1 else '╶')
elif slot < len(self._job_slots) - 1:
self.output.write('├')
else:
self.output.write('└')
self.output.write(' ')
self.output.write(self.titles[job_id])
self.output.write(': ')
self.output.write(self._output_lines[job_id])
# Some terminals keep overwriting the last character in no-wrap
# mode. Make the trailing character a space.
self.output.write(' ')
self.output.write('\n')
self._lines_printed += 1
self.output.write(ANSI_ENABLE_LINE_WRAP)
# Finally, flush output to the terminal. Hopefully everything gets
# painted in one frame.
self.output.flush()
def _draw_later(self):
if self._draw_later_handle:
# There is already a draw pending.
return
self._draw_later_handle = asyncio.get_event_loop().call_later(
0.1, self._draw)
def _cancel_draw_later(self):
if self._draw_later_handle:
self._draw_later_handle.cancel()
self._draw_later_handle = None
def _job_started(self, job_id):
self._job_slots.append(job_id)
self._output_lines[job_id] = ''
self._draw_later()
def _job_written(self, job_id, string):
# We need to split output on newlines. Some programs (git) also use
# carriage return to redraw a line, so we split on that too.
any_newlines = '(?:\n|\r)+' # (?: is non-capturing, for split()
lines = [line.strip() for line in re.split(any_newlines, string)]
# NB: We don't make any attempt here to join lines that might span
# multiple write() calls. `create_subprocess_with_handle()` reads
# output in 4096 byte chunks, so this isn't likely, but it's possible.
for line in lines:
# Ignore empty lines, both from the job and from re.split().
if line:
self._output_lines[job_id] = line
self._draw_later()
def _job_finished(self, job_id):
self._job_slots.remove(job_id)
if not self.outstanding_jobs:
# If the last job is finished, the event loop might be about to
# stop. Clear the terminal right now, because _draw_later might
# never run.
self._draw()
else:
# If there are pending jobs, don't clear the display immediately.
# This avoids flickering between jobs when only one job is running
# at a time (-j1).
self._draw_later()
class _DisplayHandle:
def __init__(self, display, job_id):
self._display = display
self._job_id = job_id
self._opened = False
self._closed = False
def write(self, string):
assert self._opened and not self._closed
self._display._handle_write(self._job_id, string)
# Context manager interface. We're extra careful to make sure that the
# handle is only written to inside a with statment, and only used once.
def __enter__(self):
assert not self._opened and not self._closed
self._opened = True
self._display._handle_start(self._job_id)
return self
def __exit__(self, *args):
assert self._opened and not self._closed
self._display._handle_finish(self._job_id)
self._job_id = None
self._closed = True
| oconnor663/peru | peru/display.py | Python | mit | 9,334 |
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base Test Case for all Unit Tests"""
import contextlib
import gc
import logging
import os
import os.path
import sys
import weakref
import eventlet.timeout
import fixtures
import mock
from oslo.config import cfg
from oslo.messaging import conffixture as messaging_conffixture
import testtools
from neutron.common import config
from neutron.common import rpc as n_rpc
from neutron.db import agentschedulers_db
from neutron import manager
from neutron.tests import fake_notifier
from neutron.tests import post_mortem_debug
CONF = cfg.CONF
CONF.import_opt('state_path', 'neutron.common.config')
TRUE_STRING = ['True', '1']
LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
ROOTDIR = os.path.dirname(__file__)
ETCDIR = os.path.join(ROOTDIR, 'etc')
def etcdir(*p):
return os.path.join(ETCDIR, *p)
def fake_use_fatal_exceptions(*args):
return True
def fake_consume_in_threads(self):
return []
class BaseTestCase(testtools.TestCase):
def cleanup_core_plugin(self):
"""Ensure that the core plugin is deallocated."""
nm = manager.NeutronManager
if not nm.has_instance():
return
#TODO(marun) Fix plugins that do not properly initialize notifiers
agentschedulers_db.AgentSchedulerDbMixin.agent_notifiers = {}
# Perform a check for deallocation only if explicitly
# configured to do so since calling gc.collect() after every
# test increases test suite execution time by ~50%.
check_plugin_deallocation = (
os.environ.get('OS_CHECK_PLUGIN_DEALLOCATION') in TRUE_STRING)
if check_plugin_deallocation:
plugin = weakref.ref(nm._instance.plugin)
nm.clear_instance()
if check_plugin_deallocation:
gc.collect()
#TODO(marun) Ensure that mocks are deallocated?
if plugin() and not isinstance(plugin(), mock.Base):
self.fail('The plugin for this test was not deallocated.')
def setup_coreplugin(self, core_plugin=None):
if core_plugin is not None:
cfg.CONF.set_override('core_plugin', core_plugin)
def setup_notification_driver(self, notification_driver=None):
self.addCleanup(fake_notifier.reset)
if notification_driver is None:
notification_driver = [fake_notifier.__name__]
cfg.CONF.set_override("notification_driver", notification_driver)
@staticmethod
def config_parse(conf=None, args=None):
"""Create the default configurations."""
# neutron.conf.test includes rpc_backend which needs to be cleaned up
if args is None:
args = ['--config-file', etcdir('neutron.conf.test')]
if conf is None:
config.init(args=args)
else:
conf(args)
def setUp(self):
super(BaseTestCase, self).setUp()
# Ensure plugin cleanup is triggered last so that
# test-specific cleanup has a chance to release references.
self.addCleanup(self.cleanup_core_plugin)
# Configure this first to ensure pm debugging support for setUp()
if os.environ.get('OS_POST_MORTEM_DEBUG') in TRUE_STRING:
self.addOnException(post_mortem_debug.exception_handler)
if os.environ.get('OS_DEBUG') in TRUE_STRING:
_level = logging.DEBUG
else:
_level = logging.INFO
capture_logs = os.environ.get('OS_LOG_CAPTURE') in TRUE_STRING
if not capture_logs:
logging.basicConfig(format=LOG_FORMAT, level=_level)
self.log_fixture = self.useFixture(
fixtures.FakeLogger(
format=LOG_FORMAT,
level=_level,
nuke_handlers=capture_logs,
))
# suppress all but errors here
self.useFixture(
fixtures.FakeLogger(
name='neutron.api.extensions',
format=LOG_FORMAT,
level=logging.ERROR,
nuke_handlers=capture_logs,
))
test_timeout = int(os.environ.get('OS_TEST_TIMEOUT', 0))
if test_timeout == -1:
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
# If someone does use tempfile directly, ensure that it's cleaned up
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
self.temp_dir = self.useFixture(fixtures.TempDir()).path
cfg.CONF.set_override('state_path', self.temp_dir)
self.addCleanup(mock.patch.stopall)
self.addCleanup(CONF.reset)
if os.environ.get('OS_STDOUT_CAPTURE') in TRUE_STRING:
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if os.environ.get('OS_STDERR_CAPTURE') in TRUE_STRING:
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.useFixture(fixtures.MonkeyPatch(
'neutron.common.exceptions.NeutronException.use_fatal_exceptions',
fake_use_fatal_exceptions))
# don't actually start RPC listeners when testing
self.useFixture(fixtures.MonkeyPatch(
'neutron.common.rpc.Connection.consume_in_threads',
fake_consume_in_threads))
self.useFixture(fixtures.MonkeyPatch(
'oslo.messaging.Notifier', fake_notifier.FakeNotifier))
self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
self.messaging_conf.transport_driver = 'fake'
self.messaging_conf.response_timeout = 15
self.useFixture(self.messaging_conf)
self.addCleanup(n_rpc.clear_extra_exmods)
n_rpc.add_extra_exmods('neutron.test')
self.addCleanup(n_rpc.cleanup)
n_rpc.init(CONF)
if sys.version_info < (2, 7) and getattr(self, 'fmt', '') == 'xml':
raise self.skipException('XML Testing Skipped in Py26')
def config(self, **kw):
"""Override some configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a group argument is supplied, the overrides are applied to
the specified configuration option group.
All overrides are automatically cleared at the end of the current
test by the fixtures cleanup process.
"""
group = kw.pop('group', None)
for k, v in kw.iteritems():
CONF.set_override(k, v, group)
@contextlib.contextmanager
def assert_max_execution_time(self, max_execution_time=5):
with eventlet.timeout.Timeout(max_execution_time, False):
yield
return
self.fail('Execution of this test timed out')
| onecloud/neutron | neutron/tests/base.py | Python | apache-2.0 | 7,506 |
#!/bin/python
import sys
n = int(raw_input().strip())
A = map(int,raw_input().strip().split(' '))
res = sys.maxint
dic = {}
for i in xrange(n):
if A[i] not in dic:
dic[A[i]] = i
else:
res = min(res, i - dic[A[i]])
dic[A[i]] = i
print res if res < sys.maxint else -1
| shree-shubham/Unitype | Minimum Distances.py | Python | gpl-3.0 | 301 |
from functools import wraps
import json
import decimal
from django.core.serializers import serialize
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models.query import QuerySet
from django.http import HttpResponse, HttpResponseBadRequest
class EDXJSONEncoder(DjangoJSONEncoder):
"""
Encoder for Decimal object, other objects will be encoded as per DjangoJSONEncoder default implementation.
NOTE:
Please see https://docs.djangoproject.com/en/1.8/releases/1.5/#system-version-of-simplejson-no-longer-used
DjangoJSONEncoder will now use the Python's json module but Python's json module don't know about how to
encode Decimal object, so as per default implementation Decimal objects will be encoded to `str` which we don't
want and also this is different from Django 1.4, In Django 1.4 if Decimal object has zeros after the decimal
point then object will be serialized as `int` else `float`, so we are keeping this behavior.
"""
def default(self, o): # pylint: disable=method-hidden
"""
Encode Decimal objects. If decimal object has zeros after the
decimal point then object will be serialized as `int` else `float`
"""
if isinstance(o, decimal.Decimal):
if o == o.to_integral():
return int(o)
return float(o)
else:
return super(EDXJSONEncoder, self).default(o)
def expect_json(view_function):
"""
View decorator for simplifying handing of requests that expect json. If the request's
CONTENT_TYPE is application/json, parses the json dict from request.body, and updates
request.POST with the contents.
"""
@wraps(view_function)
def parse_json_into_request(request, *args, **kwargs):
# cdodge: fix postback errors in CMS. The POST 'content-type' header can include additional information
# e.g. 'charset', so we can't do a direct string compare
if "application/json" in request.META.get('CONTENT_TYPE', '') and request.body:
try:
request.json = json.loads(request.body)
except ValueError:
return JsonResponseBadRequest({"error": "Invalid JSON"})
else:
request.json = {}
return view_function(request, *args, **kwargs)
return parse_json_into_request
class JsonResponse(HttpResponse):
"""
Django HttpResponse subclass that has sensible defaults for outputting JSON.
"""
def __init__(self, resp_obj=None, status=None, encoder=EDXJSONEncoder,
*args, **kwargs):
if resp_obj in (None, ""):
content = ""
status = status or 204
elif isinstance(resp_obj, QuerySet):
content = serialize('json', resp_obj)
else:
content = json.dumps(resp_obj, cls=encoder, indent=2, ensure_ascii=False)
kwargs.setdefault("content_type", "application/json")
if status:
kwargs["status"] = status
super(JsonResponse, self).__init__(content, *args, **kwargs)
class JsonResponseBadRequest(HttpResponseBadRequest):
"""
Subclass of HttpResponseBadRequest that defaults to outputting JSON.
Use this to send BadRequestResponse & some Json object along with it.
Defaults:
dictionary: empty dictionary
status: 400
encoder: DjangoJSONEncoder
"""
def __init__(self, obj=None, status=400, encoder=DjangoJSONEncoder, *args, **kwargs):
if obj in (None, ""):
content = ""
else:
content = json.dumps(obj, cls=encoder, indent=2, ensure_ascii=False)
kwargs.setdefault("content_type", "application/json")
kwargs["status"] = status
super(JsonResponseBadRequest, self).__init__(content, *args, **kwargs)
| solashirai/edx-platform | common/djangoapps/util/json_request.py | Python | agpl-3.0 | 3,842 |
"""
Cohorts API serializers.
"""
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from rest_framework import serializers
class CohortUsersAPISerializer(serializers.ModelSerializer):
"""
Serializer for cohort users.
"""
name = serializers.SerializerMethodField('get_full_name')
def get_full_name(self, model):
"""Return the full name of the user."""
return f'{model.first_name} {model.last_name}'
class Meta:
model = User
fields = ('username', 'email', 'name')
| eduNEXT/edx-platform | openedx/core/djangoapps/course_groups/serializers.py | Python | agpl-3.0 | 569 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-14 16:41
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0006_auto_20160209_2232'),
]
operations = [
migrations.AddField(
model_name='app',
name='owner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| inmagik/pollicino | pollicino/core/migrations/0007_app_owner.py | Python | mit | 657 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from nerve import files
from nerve.logs import log
from nerve import types
from nerve.objects import public, is_public, join_path, ObjectNode
from nerve import modules
from nerve.core import QueryHandler, Request, Controller, View, Server, Model, Device, PyCodeQuery, SymbolicLink, NotFoundError, ControllerError, TextView, PlainTextView, JsonView, FileView, HTMLView, delistify, singleton
from nerve import events
from nerve.db import Database
from nerve.threads import Thread
from nerve import asyncs
from nerve import users
from nerve.main import Main, loop, new_root, root, quit, save_config, set_object, get_object, del_object, has_object, register_scheme, query, subscribe, unsubscribe
| transistorfet/nerve | nerve/__init__.py | Python | gpl-3.0 | 737 |
#!/usr/bin/python
# Copyright (C) 2010-2014 by the Free Software Foundation, Inc.
#
# This file is part of mailman.client.
#
# mailman.client is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, version 3 of the License.
#
# mailman.client is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with mailman.client. If not, see <http://www.gnu.org/licenses/>.
#
# This file is part of the Mailman CLI Project, Google Summer Of Code, 2014
#
# Author : Rajeev S <[email protected]>
# Mentors : Stephen J. Turnbull <[email protected]>
# Abhilash Raj <[email protected]>
# Barry Warsaw <[email protected]>
from mailmanclient import Client, MailmanConnectionError
from mailman.config import config
from mailmanclient.cli.lib.utils import Utils
class MailmanUtils(Utils):
""" Utilities relating to Mailman
Client or the REST API
"""
def __init__(self):
config.load()
def connect(self, *args, **kwargs):
""" Connect to Mailman REST API using the arguments specified.
Missing arguments are decided from the mailman.cfg file
return a client object.
"""
host, port, username, password = self.get_credentials_from_config()
if 'host' in kwargs and kwargs['host']:
host = kwargs['host']
if 'port' in kwargs and kwargs['port']:
port = kwargs['port']
if 'username' in kwargs and kwargs['username']:
username = kwargs['username']
if 'password' in kwargs and kwargs['password']:
password = kwargs['password']
client = Client('%s:%s/3.0' % (host, port),
username,
password)
try:
client.system
except MailmanConnectionError as e:
self.error(e)
exit(1)
return client
def get_credentials_from_config(self):
""" Returns the credentials required for logging on to
the Mailman REST API, that are read from the Mailman
configuration.
"""
host = 'http://' + config.schema['webservice']['hostname']
port = config.schema['webservice']['port']
username = config.schema['webservice']['admin_user']
password = config.schema['webservice']['admin_pass']
return host, port, username, password
def get_new_domain_name(self):
""" Generates the name of a non existent domain """
client = self.connect()
while True:
domain_name = self.get_random_string(10) + '.com'
try:
client.get_domain(domain_name)
continue
except Exception:
return domain_name
def add_shell_vars(self, arg, shell):
""" Replaces the variables used in the command with thier respective
values if the values are present in the shell environment, else
use the variable as such.
"""
if not shell.env_on or not arg:
return arg
if arg[0] == '$' and arg[1:] in shell.env:
arg = shell.env[arg[1:]]
return arg
def add_reserved_vars(self, args, shell):
""" Adds the reserved variables to a filter query. The reserved variables
are domain, list and user, which are added to respective scopes and
atrribute names.
"""
scope = args['scope']
if 'filters' not in args:
args['filters'] = []
if not shell.env_on:
return args
filters = args['filters']
if scope == 'list':
if 'domain' in shell.env:
filters.append(('mail_host', '=', shell.env['domain']))
elif scope == 'user':
if 'list' in shell.env:
filters.append((shell.env['list'], 'in', 'subscriptions'))
args['filters'] = filters
return args
| rajeevs1992/mailmancli | src/mailmanclient/cli/lib/mailman_utils.py | Python | lgpl-3.0 | 4,303 |
from pycp2k.inputsection import InputSection
class _job1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Directory = None
self.Input_file_name = None
self.Output_file_name = None
self.Job_id = None
self.Dependencies = None
self._name = "JOB"
self._keywords = {'Job_id': 'JOB_ID', 'Directory': 'DIRECTORY', 'Dependencies': 'DEPENDENCIES', 'Input_file_name': 'INPUT_FILE_NAME', 'Output_file_name': 'OUTPUT_FILE_NAME'}
| SINGROUP/pycp2k | pycp2k/classes/_job1.py | Python | lgpl-3.0 | 507 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'lucy'
def error(obj, msg):
print('[%s][error] %s' % (obj.__class__.__name__, msg))
def info(obj, msg):
print('[%s][info] %s' % (obj.__class__.__name__, msg))
def warn(obj, msg):
print('[%s][warn] %s' % (obj.__class__.__name__, msg)) | derlin/pyton3-websocket-server-template | src/python/utils/log.py | Python | gpl-2.0 | 308 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class DummyOperator(BaseOperator):
"""
Operator that does literally nothing. It can be used to group tasks in a
DAG.
"""
ui_color = '#e8f7e4'
@apply_defaults
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
def execute(self, context):
pass
| wileeam/airflow | airflow/operators/dummy_operator.py | Python | apache-2.0 | 1,203 |
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = ('Loops through all of the photos to create a cached version.')
def handle(self, *args, **options):
from tendenci.apps.photos.models import Image
from tendenci.apps.photos.utils.caching import cache_photo_size
for photo in Image.objects.all().order_by('-pk'):
cache_kwargs_list = []
cache_kwargs_list.append({"id": photo.pk, "size": "422x700", "constrain": True})
cache_kwargs_list.append({"id": photo.pk, "size": "102x78", "crop": True})
cache_kwargs_list.append({"id": photo.pk, "size": "640x640", "constrain": True})
for cache_kwargs in cache_kwargs_list:
cache_photo_size(**cache_kwargs)
print photo.pk
| alirizakeles/tendenci | tendenci/apps/photos/management/commands/cache_photos.py | Python | gpl-3.0 | 821 |
import sys
SECRET_KEY = 'fake-key'
INSTALLED_APPS = [
"tests",
]
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.spatialite',
'NAME': 'db.sqlite3',
}
}
STATIC_URL = '/static/'
if sys.platform == 'darwin':
SPATIALITE_LIBRARY_PATH = '/usr/local/lib/mod_spatialite.dylib'
| Mixser/django-location-field | tests/test_settings.py | Python | mit | 329 |
class RecursionError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class FailedIntegrationError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
__all__ = ["RecursionError", "FailedIntegrationError"]
| Microno95/DESolver | desolver/exception_types/exception_types.py | Python | mit | 321 |
from pyramid.response import Response
from pyramid.view import view_config
from sqlalchemy.exc import DBAPIError
from .models import (
DBSession,
MyModel,
)
@view_config(route_name='home', renderer='templates/mytemplate.pt')
def my_view(request):
try:
one = DBSession.query(MyModel).filter(MyModel.name == 'one').first()
except DBAPIError:
return Response(conn_err_msg, content_type='text/plain', status_int=500)
return {'one': one, 'project': 'source'}
conn_err_msg = """\
Pyramid is having a problem using your SQL database. The problem
might be caused by one of the following things:
1. You may need to run the "initialize_source_db" script
to initialize your database tables. Check your virtual
environment's "bin" directory for this script and try to run it.
2. Your database server may not be running. Check that the
database server referred to by the "sqlalchemy.url" setting in
your "development.ini" file is running.
After you fix the problem, please restart the Pyramid application to
try it again.
"""
| jmercouris/PyramidStarter | source/source/views.py | Python | bsd-3-clause | 1,084 |
# Authors: Rob Crittenden <[email protected]>
# John Dennis <[email protected]>
#
# Copyright (C) 2009 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import sys
import httplib
import getpass
import socket
from ipapython.ipa_log_manager import *
from nss.error import NSPRError
import nss.io as io
import nss.nss as nss
import nss.ssl as ssl
import nss.error as error
from ipaplatform.paths import paths
# NSS database currently open
current_dbdir = None
def auth_certificate_callback(sock, check_sig, is_server, certdb):
cert_is_valid = False
cert = sock.get_peer_certificate()
root_logger.debug("auth_certificate_callback: check_sig=%s is_server=%s\n%s",
check_sig, is_server, str(cert))
pin_args = sock.get_pkcs11_pin_arg()
if pin_args is None:
pin_args = ()
# Define how the cert is being used based upon the is_server flag. This may
# seem backwards, but isn't. If we're a server we're trying to validate a
# client cert. If we're a client we're trying to validate a server cert.
if is_server:
intended_usage = nss.certificateUsageSSLClient
else:
intended_usage = nss.certificateUsageSSLServer
try:
# If the cert fails validation it will raise an exception, the errno attribute
# will be set to the error code matching the reason why the validation failed
# and the strerror attribute will contain a string describing the reason.
approved_usage = cert.verify_now(certdb, check_sig, intended_usage, *pin_args)
except Exception as e:
root_logger.error('cert validation failed for "%s" (%s)', cert.subject, e.strerror)
cert_is_valid = False
return cert_is_valid
root_logger.debug("approved_usage = %s intended_usage = %s",
', '.join(nss.cert_usage_flags(approved_usage)),
', '.join(nss.cert_usage_flags(intended_usage)))
# Is the intended usage a proper subset of the approved usage
if approved_usage & intended_usage:
cert_is_valid = True
else:
cert_is_valid = False
# If this is a server, we're finished
if is_server or not cert_is_valid:
root_logger.debug('cert valid %s for "%s"', cert_is_valid, cert.subject)
return cert_is_valid
# Certificate is OK. Since this is the client side of an SSL
# connection, we need to verify that the name field in the cert
# matches the desired hostname. This is our defense against
# man-in-the-middle attacks.
hostname = sock.get_hostname()
try:
# If the cert fails validation it will raise an exception
cert_is_valid = cert.verify_hostname(hostname)
except Exception as e:
root_logger.error('failed verifying socket hostname "%s" matches cert subject "%s" (%s)',
hostname, cert.subject, e.strerror)
cert_is_valid = False
return cert_is_valid
root_logger.debug('cert valid %s for "%s"', cert_is_valid, cert.subject)
return cert_is_valid
def client_auth_data_callback(ca_names, chosen_nickname, password, certdb):
cert = None
if chosen_nickname:
try:
cert = nss.find_cert_from_nickname(chosen_nickname, password)
priv_key = nss.find_key_by_any_cert(cert, password)
return cert, priv_key
except NSPRError:
return False
else:
nicknames = nss.get_cert_nicknames(certdb, nss.SEC_CERT_NICKNAMES_USER)
for nickname in nicknames:
try:
cert = nss.find_cert_from_nickname(nickname, password)
if cert.check_valid_times():
if cert.has_signer_in_ca_names(ca_names):
priv_key = nss.find_key_by_any_cert(cert, password)
return cert, priv_key
except NSPRError:
return False
return False
_af_dict = {
socket.AF_INET: io.PR_AF_INET,
socket.AF_INET6: io.PR_AF_INET6,
socket.AF_UNSPEC: io.PR_AF_UNSPEC
}
class NSSAddressFamilyFallback(object):
def __init__(self, family):
self.sock_family = family
self.family = self._get_nss_family(self.sock_family)
def _get_nss_family(self, sock_family):
"""
Translate a family from python socket module to nss family.
"""
try:
return _af_dict[sock_family]
except KeyError:
raise ValueError('Uknown socket family %d\n', sock_family)
def _create_socket(self):
self.sock = io.Socket(family=self.family)
def connect_socket(self, host, port):
try:
addr_info = io.AddrInfo(host, family=self.family)
except Exception:
raise NSPRError(
error_code=error.PR_ADDRESS_NOT_SUPPORTED_ERROR,
error_message="Cannot resolve %s using family %s" % (host,
io.addr_family_name(self.family)))
for net_addr in addr_info:
root_logger.debug("Connecting: %s", net_addr)
net_addr.port = port
self.family = net_addr.family
try:
self._create_socket()
self.sock.connect(net_addr)
return
except Exception as e:
root_logger.debug("Could not connect socket to %s, error: %s",
net_addr, str(e))
root_logger.debug("Try to continue with next family...")
continue
raise NSPRError(
error_code=error.PR_ADDRESS_NOT_SUPPORTED_ERROR,
error_message="Could not connect to %s using any address" % host)
class NSSConnection(httplib.HTTPConnection, NSSAddressFamilyFallback):
default_port = httplib.HTTPSConnection.default_port
def __init__(self, host, port=None, strict=None,
dbdir=None, family=socket.AF_UNSPEC, no_init=False,
tls_version_min='tls1.1', tls_version_max='tls1.2'):
"""
:param host: the server to connect to
:param port: the port to use (default is set in HTTPConnection)
:param dbdir: the NSS database directory
:param family: network family to use (default AF_UNSPEC)
:param no_init: do not initialize the NSS database. This requires
that the database has already been initialized or
the request will fail.
:param tls_min_version: mininum version of SSL/TLS supported
:param tls_max_version: maximum version of SSL/TLS supported.
"""
httplib.HTTPConnection.__init__(self, host, port, strict)
NSSAddressFamilyFallback.__init__(self, family)
root_logger.debug('%s init %s', self.__class__.__name__, host)
# If initialization is requested, initialize the new database.
if not no_init:
if nss.nss_is_initialized():
ssl.clear_session_cache()
try:
nss.nss_shutdown()
except NSPRError as e:
if e.errno != error.SEC_ERROR_NOT_INITIALIZED:
raise e
if not dbdir:
raise RuntimeError("dbdir is required")
nss.nss_init(dbdir)
global current_dbdir
current_dbdir = dbdir
ssl.set_domestic_policy()
nss.set_password_callback(self.password_callback)
self.tls_version_min = str(tls_version_min)
self.tls_version_max = str(tls_version_max)
def _create_socket(self):
# TODO: remove the try block once python-nss is guaranteed to contain
# these values
try:
#pylint: disable=E1101
ssl_enable_renegotiation = ssl.SSL_ENABLE_RENEGOTIATION
ssl_require_safe_negotiation = ssl.SSL_REQUIRE_SAFE_NEGOTIATION
ssl_renegotiate_requires_xtn = ssl.SSL_RENEGOTIATE_REQUIRES_XTN
except:
ssl_enable_renegotiation = 20
ssl_require_safe_negotiation = 21
ssl_renegotiate_requires_xtn = 2
# Create the socket here so we can do things like let the caller
# override the NSS callbacks
self.sock = ssl.SSLSocket(family=self.family)
self.sock.set_ssl_option(ssl.SSL_SECURITY, True)
self.sock.set_ssl_option(ssl.SSL_HANDSHAKE_AS_CLIENT, True)
try:
self.sock.set_ssl_version_range(self.tls_version_min, self.tls_version_max)
except NSPRError as e:
root_logger.error('Failed to set TLS range to %s, %s' % (self.tls_version_min, self.tls_version_max))
raise
self.sock.set_ssl_option(ssl_require_safe_negotiation, False)
self.sock.set_ssl_option(ssl_enable_renegotiation, ssl_renegotiate_requires_xtn)
# Provide a callback which notifies us when the SSL handshake is complete
self.sock.set_handshake_callback(self.handshake_callback)
# Provide a callback to verify the servers certificate
self.sock.set_auth_certificate_callback(auth_certificate_callback,
nss.get_default_certdb())
self.sock.set_hostname(self.host)
def password_callback(self, slot, retry, password):
if not retry and password: return password
return getpass.getpass("Enter password for %s: " % slot.token_name);
def handshake_callback(self, sock):
"""
Verify callback. If we get here then the certificate is ok.
"""
channel = sock.get_ssl_channel_info()
suite = ssl.get_cipher_suite_info(channel.cipher_suite)
root_logger.debug("handshake complete, peer = %s", sock.get_peer_name())
root_logger.debug('Protocol: %s' % channel.protocol_version_str.upper())
root_logger.debug('Cipher: %s' % suite.cipher_suite_name)
def connect(self):
self.connect_socket(self.host, self.port)
def close(self):
"""Close the connection to the HTTP server."""
if self.sock:
self.sock.close() # close it manually... there may be other refs
self.sock = None
ssl.clear_session_cache()
def endheaders(self, message=None):
"""
Explicitly close the connection if an error is returned after the
headers are sent. This will likely mean the initial SSL handshake
failed. If this isn't done then the connection is never closed and
subsequent NSS activities will fail with a BUSY error.
"""
try:
# FIXME: httplib uses old-style classes so super doesn't work
# Python 2.7 changed the API for endheaders. This is an attempt
# to work across versions
(major, minor, micro, releaselevel, serial) = sys.version_info
if major == 2 and minor < 7:
httplib.HTTPConnection.endheaders(self)
else:
httplib.HTTPConnection.endheaders(self, message)
except NSPRError as e:
self.close()
raise e
class NSSHTTPS(httplib.HTTP):
# We would like to use HTTP 1.1 not the older HTTP 1.0 but xmlrpclib
# and httplib do not play well together. httplib when the protocol
# is 1.1 will add a host header in the request. But xmlrpclib
# always adds a host header irregardless of the HTTP protocol
# version. That means the request ends up with 2 host headers,
# but Apache freaks out if it sees 2 host headers, a known Apache
# issue. httplib has a mechanism to skip adding the host header
# (i.e. skip_host in HTTPConnection.putrequest()) but xmlrpclib
# doesn't use it. Oh well, back to 1.0 :-(
#
#_http_vsn = 11
#_http_vsn_str = 'HTTP/1.1'
_connection_class = NSSConnection
def __init__(self, host='', port=None, strict=None, dbdir=None, no_init=False):
# provide a default host, pass the X509 cert info
# urf. compensate for bad input.
if port == 0:
port = None
self._setup(self._connection_class(host, port, strict, dbdir=dbdir, no_init=no_init))
def getreply(self):
"""
Override so we can close duplicated file connection on non-200
responses. This was causing nss_shutdown() to fail with a busy
error.
"""
(status, reason, msg) = httplib.HTTP.getreply(self)
if status != 200:
self.file.close()
return (status, reason, msg)
#------------------------------------------------------------------------------
if __name__ == "__main__":
standard_logging_setup('nsslib.log', debug=True, filemode='a')
root_logger.info("Start")
if False:
conn = NSSConnection("www.verisign.com", 443, dbdir=paths.NSS_DB_DIR)
conn.set_debuglevel(1)
conn.connect()
conn.request("GET", "/")
response = conn.getresponse()
print(response.status)
#print response.msg
print(response.getheaders())
data = response.read()
#print data
conn.close()
if True:
h = NSSHTTPS("www.verisign.com", 443, dbdir=paths.NSS_DB_DIR)
h.connect()
h.putrequest('GET', '/')
h.endheaders()
http_status, http_reason, headers = h.getreply()
print("status = %s %s" % (http_status, http_reason))
print("headers:\n%s" % headers)
f = h.getfile()
data = f.read() # Get the raw HTML
f.close()
#print data
| msimacek/freeipa | ipapython/nsslib.py | Python | gpl-3.0 | 14,219 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def btc2satoshis(btc):
if isinstance(btc, str):
if '.' in btc:
parts = btc.split('.')
if len(parts) == 2:
whole_btc = int(parts[0])
satoshis = int(parts[1].ljust(8, '0')) # add zeros up to 8 decimals
return int(whole_btc*1e8 + satoshis)
else:
raise Exception('String containing BTC value can only contain a single "."')
else:
return int(int(btc)*1e8)
elif isinstance(btc, (int, float)):
return int(btc*1e8)
else:
raise Exception('Invalid type for btc: %s' % type(btc))
| ValyrianTech/BitcoinSpellbook-v0.3 | helpers/conversionhelpers.py | Python | gpl-3.0 | 675 |
import os
import sys
from setuptools import setup, find_packages
os.chdir(os.path.dirname(sys.argv[0]) or ".")
import libsongtext
version = '%s.%s.%s' % libsongtext.__version__
try:
long_description = open('README.rst', 'U').read()
except IOError:
long_description = 'See https://github.com/ysim/songtext'
setup(
name='songtext',
version=version,
description='a command-line song lyric fetcher',
long_description=long_description,
url='https://github.com/ysim/songtext',
author='ysim',
author_email='[email protected]',
packages=find_packages(),
entry_points={
'console_scripts': [
'songtext = libsongtext.songtext:main',
],
},
install_requires=[
'click==7.0',
'cssselect==1.0.3',
'lxml==4.3.4',
'requests==2.21.0',
],
license='BSD',
keywords='console command line music song lyrics',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Operating System :: OS Independent',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
],
)
| ysim/songtext | setup.py | Python | bsd-2-clause | 1,179 |
"""
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ying Liu, Cisco Systems, Inc.
#
"""
import logging
from webob import exc
from quantum import wsgi
from quantum.extensions import _credential_view as credential_view
from quantum.api import api_common as common
from quantum.extensions import extensions
from quantum.manager import QuantumManager
from quantum.plugins.cisco.common import cisco_exceptions as exception
from quantum.plugins.cisco.common import cisco_faults as faults
LOG = logging.getLogger('quantum.api.credentials')
class Credential(object):
"""extension class Credential"""
def __init__(self):
pass
@classmethod
def get_name(cls):
""" Returns Ext Resource Name """
return "Cisco Credential"
@classmethod
def get_alias(cls):
""" Returns Ext Resource Alias """
return "Cisco Credential"
@classmethod
def get_description(cls):
""" Returns Ext Resource Description """
return "Credential include username and password"
@classmethod
def get_namespace(cls):
""" Returns Ext Resource Namespace """
return "http://docs.ciscocloud.com/api/ext/credential/v1.0"
@classmethod
def get_updated(cls):
""" Returns Ext Resource Update Time """
return "2011-07-25T13:25:27-06:00"
@classmethod
def get_resources(cls):
""" Returns Ext Resources """
parent_resource = dict(member_name="tenant",
collection_name="extensions/csco/tenants")
controller = CredentialController(QuantumManager.get_plugin())
return [extensions.ResourceExtension('credentials', controller,
parent=parent_resource)]
class CredentialController(common.QuantumController, wsgi.Controller):
""" credential API controller
based on QuantumController """
_credential_ops_param_list = [{
'param-name': 'credential_name',
'required': True}, {
'param-name': 'user_name',
'required': True}, {
'param-name': 'password',
'required': True}]
_serialization_metadata = {
"application/xml": {
"attributes": {
"credential": ["id", "name"],
},
},
}
def __init__(self, plugin):
self._resource_name = 'credential'
self._plugin = plugin
def index(self, request, tenant_id):
""" Returns a list of credential ids """
return self._items(request, tenant_id, is_detail=False)
def _items(self, request, tenant_id, is_detail):
""" Returns a list of credentials. """
credentials = self._plugin.get_all_credentials(tenant_id)
builder = credential_view.get_view_builder(request)
result = [builder.build(credential, is_detail)['credential']
for credential in credentials]
return dict(credentials=result)
# pylint: disable-msg=E1101,W0613
def show(self, request, tenant_id, id):
""" Returns credential details for the given credential id """
try:
credential = self._plugin.get_credential_details(
tenant_id, id)
builder = credential_view.get_view_builder(request)
#build response with details
result = builder.build(credential, True)
return dict(credentials=result)
except exception.CredentialNotFound as exp:
return faults.Fault(faults.CredentialNotFound(exp))
def create(self, request, tenant_id):
""" Creates a new credential for a given tenant """
try:
body = self._deserialize(request.body, request.get_content_type())
req_body = \
self._prepare_request_body(body,
self._credential_ops_param_list)
req_params = req_body[self._resource_name]
except exc.HTTPError as exp:
return faults.Fault(exp)
credential = self._plugin.\
create_credential(tenant_id,
req_params['credential_name'],
req_params['user_name'],
req_params['password'])
builder = credential_view.get_view_builder(request)
result = builder.build(credential)
return dict(credentials=result)
def update(self, request, tenant_id, id):
""" Updates the name for the credential with the given id """
try:
body = self._deserialize(request.body, request.get_content_type())
req_body = \
self._prepare_request_body(body,
self._credential_ops_param_list)
req_params = req_body[self._resource_name]
except exc.HTTPError as exp:
return faults.Fault(exp)
try:
credential = self._plugin.\
rename_credential(tenant_id,
id, req_params['credential_name'])
builder = credential_view.get_view_builder(request)
result = builder.build(credential, True)
return dict(credentials=result)
except exception.CredentialNotFound as exp:
return faults.Fault(faults.CredentialNotFound(exp))
def delete(self, request, tenant_id, id):
""" Destroys the credential with the given id """
try:
self._plugin.delete_credential(tenant_id, id)
return exc.HTTPOk()
except exception.CredentialNotFound as exp:
return faults.Fault(faults.CredentialNotFound(exp))
| sileht/deb-openstack-quantum | quantum/extensions/credential.py | Python | apache-2.0 | 6,330 |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Description:
Given a triangle, find the minimum path sum from top to bottom.
Each step you may move to adjacent numbers on the row below.
For example, given the following triangle
[
[2],
[3,4],
[6,5,7],
[4,1,8,3]
]
The minimum path sum from top to bottom is 11 (i.e., 2 + 3 + 5 + 1 = 11).
Note:
Bonus point if you are able to do this using only O(n) extra space, where n is the total number of rows in the triangle.
Tags: Array, Dynamic Programming
'''
class Solution(object):
def minimumTotal(self, triangle):
"""
:type triangle: List[List[int]]
:rtype: int
"""
if not triangle:
return 0
current = triangle[0] + [float('inf')]
for i in xrange(1, len(triangle)):
next = []
for j in xrange(0, i+1):
next.append(triangle[i][j] + min(current[j-1], current[j]))
current = next + [float('inf')]
return reduce(min, current)
if __name__ == '__main__':
triangle = [
[2],
[3,4],
[6,5,7],
[4,1,8,3]
]
print Solution().minimumTotal(triangle)
| Jan-zou/LeetCode | python/Array/120_triangle.py | Python | mit | 1,281 |
'''
____ _ _ _ _ _ ____ ___
| _ \ ___ __| | \ | | ___| |_ / \ | _ \_ _|
| |_) / _ \ / _` | \| |/ _ \ __| / _ \ | |_) | |
| __/ (_) | (_| | |\ | __/ |_ / ___ \| __/| |
|_| \___/ \__,_|_| \_|\___|\__| /_/ \_\_| |___|
File: PodNet.py
Author: Zach Podbielniak
Last Update: 01/05/2018
Overview: This file sets forth forwarding the PodNet C API to Python, as
well as exposing all other Python related utilties.
This file is part of the PodNet API and comes with no warranty,
use with your own discretion.
'''
from PodNet.CAlgorithms import *
from PodNet.CAtomTable import *
from PodNet.CClock import *
from PodNet.CError import *
from PodNet.CSystem import *
from PodNet.CEvent import *
from PodNet.CCriticalSection import *
from PodNet.CHandle import *
from PodNet.CMemory import *
from PodNet.CLock import *
from PodNet.CMutex import *
from PodNet.CSemaphore import *
from PodNet.CSpinLock import *
from PodNet.CThread import *
from PodNet.CPromise import *
from PodNet.CFuture import *
from PodNet.CCallOnce import *
from PodNet.CLog import *
from PodNet.CFile import *
from PodNet.CShapes2D import *
from PodNet.CCoordinates import *
from PodNet.CLua import *
from PodNet.CGpio import *
from PodNet.CSerial import *
from PodNet.CFastApprox import *
from PodNet.CIpv4 import *
from PodNet.CModule import *
from PodNet.PyScripting import *
from PodNet.PyString import *
| zachpodbielniak/PodNet | Linux/Python/__init__.py | Python | gpl-3.0 | 1,448 |
#!/usr/bin/env python2.6
import sys
import setuptools
if sys.version_info >= (3,):
protobuf = 'protobuf-py3>=2.5.1,<3.0.0'
else:
protobuf = 'protobuf>=2.3.0,<3.0.0'
setuptools.setup(
name='riemann-client',
version='6.1.2',
author="Sam Clements",
author_email="[email protected]",
url="https://github.com/borntyping/python-riemann-client",
description="A Riemann client and command line tool",
long_description=open('README.rst').read(),
license="MIT",
packages=[
'riemann_client',
],
install_requires=[
'click>=3.1,<4.0',
protobuf
],
extras_require={
'docs': [
'sphinx',
'sphinx_rtd_theme'
]
},
entry_points={
'console_scripts': [
'riemann-client = riemann_client.command:main',
]
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Monitoring',
'Topic :: System :: Networking',
'Topic :: System :: Systems Administration'
],
)
| jrmccarthy/python-riemann-client | setup.py | Python | mit | 1,554 |
import os
mmsis = [370580000,
209108000,
209587000,
210228000,
210582000,
212338000,
226022000,
226270000,
228030800,
228036800,
228045600,
228045800,
228050600,
233694000,
235113838,
235338000,
236111851,
236111902,
236403000,
236483000,
236538000,
244711000,
246454000,
247028700,
257022000,
257239000,
257557000,
257728000,
257871000,
257908000,
258308000,
258325000,
258438000,
258739000,
258826000,
258830000,
258830000,
258992000,
259482000,
259645000,
259671000,
259765000,
259798000,
273350140,
273450600,
273452600,
273453600,
273454600,
273454700,
273455310,
273455700,
273457600,
273458600,
308014000,
308036000,
309325000,
309593000,
309666000,
311000065,
311000084,
311000115,
311000180,
311000238,
311000262,
311000263,
311012200,
311012300,
311012400,
311012500,
311029900,
311030800,
311049200,
311052800,
311052900,
311056100,
311065700,
311070100,
311070100,
311777000,
311778000,
311781000,
311783000,
311784000,
311786000,
311971000,
312516000,
312695000,
319011200,
319016000,
338100000,
338855000,
351133000,
352769000,
353208000,
353249000,
353481000,
353599000,
354915000,
355242000,
355242000,
355645000,
357065000,
357149000,
357229000,
357767000,
357937000,
366336000,
366404000,
366549000,
366625000,
366625000,
366813750,
366916540,
366947110,
367056202,
367074880,
367074890,
367338620,
367357020,
367454720,
367454750,
367454770,
367494190,
367494210,
367494220,
367581180,
367648560,
371235000,
371507000,
371729000,
371931000,
372048000,
372124000,
372334000,
372564000,
372620000,
372698000,
374283000,
412301230,
412461390,
412901000,
412902000,
412971000,
412974000,
413300120,
413300130,
413302640,
413303010,
413305080,
413305430,
413305460,
419006700,
436000013,
436000029,
436000037,
436000229,
508027000,
525005259,
525015926,
525015982,
533130951,
538002914,
563045000,
565799000,
574001490,
576250000,
577318000,
636013688,
710000340]
command = ''
for year in range(2013,2016):
for m in mmsis:
command = "python cs* ../data/seismic*/seismic_cvs/"+str(m)+"_"+str(year)+".csv ../data/seismic_vessels/seismic_kmls/"+str(m)+"_"+str(year)+".kml"
print command
| GlobalFishingWatch/vessel-maps | utilities/convert_many.py | Python | apache-2.0 | 2,796 |
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns(
'',
url(r'^$', views.PaymentList.as_view(), name='payments'),
url(r'^(?P<pk>[0-9]+)/$', views.payment_details, name='payment-details')
)
| hongquan/saleor | saleor/dashboard/payments/urls.py | Python | bsd-3-clause | 239 |
# Natural Language Toolkit: WordNet stemmer interface
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Steven Bird <[email protected]>
# Edward Loper <[email protected]>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
from nltk.corpus.reader.wordnet import NOUN
from nltk.corpus import wordnet
class WordNetLemmatizer(object):
"""
WordNet Lemmatizer
Lemmatize using WordNet's built-in morphy function.
Returns the input word unchanged if it cannot be found in WordNet.
>>> from nltk.stem import WordNetLemmatizer
>>> wnl = WordNetLemmatizer()
>>> wnl.lemmatize('dogs')
'dog'
>>> wnl.lemmatize('churches')
'church'
>>> wnl.lemmatize('aardwolves')
'aardwolf'
>>> wnl.lemmatize('abaci')
'abacus'
>>> wnl.lemmatize('hardrock')
'hardrock'
"""
def __init__(self):
pass
def lemmatize(self, word, pos=NOUN):
lemmas = wordnet._morphy(word, pos)
return min(lemmas, key=len) if lemmas else word
def __repr__(self):
return '<WordNetLemmatizer>'
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| cortext/crawtextV2 | ~/venvs/crawler/lib/python2.7/site-packages/nltk/stem/wordnet.py | Python | mit | 1,263 |
from stanczyk.proxy import connectProxy
from stanczyk.test.util import ConnectedCommandTestMixin
from twisted.trial.unittest import SynchronousTestCase
from twisted.test.proto_helpers import MemoryReactor
class ConnectProxyTests(ConnectedCommandTestMixin, SynchronousTestCase):
command = staticmethod(connectProxy)
kwargs = {"identifier": "xyzzy"}
def test_connect(self):
"""When a user initiates a proxy connection, it starts listening on a
randomly available local port with a proxying factory with the
requested identifier. Once it has started listening, it
notifies the user. The command returns None.
"""
reactor = MemoryReactor()
result = connectProxy("xyzzy", self.namespace, _reactor=reactor)
self.assertIdentical(result, None)
port, factory, _backlog, interface = reactor.tcpServers[0]
self.assertEqual(port, 0)
self.assertEqual(interface, "localhost")
self.assertEqual(self.manhole.line, 'xyzzy is now listening on 0.0.0.0:0')
| crypto101/stanczyk | stanczyk/test/test_proxy.py | Python | isc | 1,051 |
"""
Script to visualize the "Noise vs. Exposure" data from DevWare
According to section 2.2.4.7 of the DevWareX help (http://is.gd/mt7FyF) we get
signal levels and different kinds of noise measurements by using the "Sensor
Control" > "Diagnostics" > "Noise vs. Exposure" tool.
The analyis report gives
- Signal
- RMS Dyn (temporal noise), Avg Dyn (temporal noise)
- FPN (fixed pattern noise), Col FPN (columnwise FPN), Row FPN (rowwise FPN)
- Col Dyn (columnwise temporal noise) and Row Dyn (rowwise temporal noise).
See the wiki page linkes above to see how the values are calulated.
"""
import glob
import os
import numpy
import matplotlib.pyplot as plt
def AskUser(Blurb, Choices):
""" Ask for input. Based on function in MasterThesisIvan.ini """
print(Blurb)
for Counter, Item in enumerate(sorted(Choices)):
print ' * [' + str(Counter) + ']:', Item
Selection = []
while Selection not in range(len(Choices)):
try:
Selection = int(input(' '.join(['Please enter the choice you',
'want [0-' +
str(len(Choices) - 1) +
']:'])))
except SyntaxError:
print 'You actually have to select *something*'
if Selection not in range(len(Choices)):
print 'Try again with a valid choice'
print 'You selected', sorted(Choices)[Selection]
return sorted(Choices)[Selection]
Root = '/afs/psi.ch/project/EssentialMed/Images/NoiseVsExposure'
DataFiles = [os.path.basename(i) for i in
glob.glob(os.path.join(Root, '*.txt'))]
# Which plot do we show?
whichone = DataFiles.index(AskUser('Which file should I show you?', DataFiles))
# If no manual selection, we can do
# for whichone in range(len(DataFiles)):
# in a loop...
# Tell what we do
Sensor = DataFiles[whichone][:-4].split('_')[0]
Lens = DataFiles[whichone][:-4].split('_')[1]
FramesPerSample = DataFiles[whichone][:-4].split('_')[2]
MaximumExposure = DataFiles[whichone][:-4].split('_')[3]
Decades = DataFiles[whichone][:-4].split('_')[4]
SamplesPerDecade = DataFiles[whichone][:-4].split('_')[5]
print 'We are showing the data from the', Sensor, 'CMOS with the', Lens, \
'lens. The analysis was done with', FramesPerSample, \
'frames per sample,', MaximumExposure, 'ms maximum exposure over', \
Decades, 'decades with', SamplesPerDecade, 'samples per decade.'
print
print 'If the exposure has not been recorded in "log scale" (you will see', \
'it in the plots), the "Decades" correspond to the "minimal exposure"', \
'and the "samples per decade" correspond to the "numbers of samples".'
# Generate figure title, so we can distinguish the output
Title = Sensor, Lens, FramesPerSample, 'Frames per Sample', \
MaximumExposure, 'ms Maximum Exposure', Decades, 'Decades', \
SamplesPerDecade, 'Samples/Decade'
# Load the data from the file
File = os.path.join(Root, DataFiles[whichone])
Data = numpy.loadtxt(File, skiprows=3)
# First line gives the full range. Read it with the snippet based on
# http://stackoverflow.com/a/1904455
with open(File, 'r') as f:
FullRange = int(f.readline().split('=')[1])
# Plot the data
Labels = ['Exposure time [ms]', 'Signal', 'RMS Dyn (temporal noise)',
'Avg Dyn (temporal noise)', 'FPN (fixed pattern noise)',
'columnwise FPN', 'rowwise FPN', 'columnwise temporal noise',
'rowwise temporal noise']
# The title of the plot is split over all the suplots, otherwise it destroys
# the layout due to its long length
plt.figure(' '.join(Title), figsize=(16, 9))
# Signal
ax = plt.subplot(131)
plt.plot(Data[:, 0], Data[:, 1], 'o-', label=Labels[1])
plt.axhline(FullRange, linestyle='--', label='Full range')
plt.xlabel(Labels[0])
plt.ylabel(Labels[1])
plt.title(' '.join(Title[:2]))
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.25,
box.width, box.height * 0.75])
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1))
# We want to fit some of the data, thus make a linspace to fit over
PolyfitRange = numpy.linspace(min(Data[:, 0]) - max(Data[:, 0]),
2 * max(Data[:, 0]), 200)
fit = 9
# Fixed pattern noise
ax = plt.subplot(132)
maxy = 0
for i in range(4, 7):
plt.plot(Data[:, 0], Data[:, i], 'o-', label=Labels[i])
maxy = max(max(Data[:, i]), maxy)
plt.plot(Data[:, 0], (Data[:, 1] / max(Data[:, 1])) * max(Data[:, 4]), '--',
label='"Signal" scaled to max(FPN)')
polynomial = numpy.poly1d(numpy.polyfit(Data[:, 0], Data[:, 4], fit))
plt.plot(PolyfitRange, polynomial(PolyfitRange), '--',
label='Polynomial fit (' + str(fit) + ') of FPN')
plt.xlim([min(Data[:, 0]), max(Data[:, 0])])
plt.ylim([0, maxy * 1.1])
plt.xlabel(Labels[0])
plt.ylabel('FPN')
plt.title(' '.join(Title[2:6]))
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.25,
box.width, box.height * 0.75])
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1))
# Temporal noise
ax = plt.subplot(133)
maxy = 0
for i in [2, 3, 7, 8]:
plt.plot(Data[:, 0], Data[:, i], 'o-', label=Labels[i])
maxy = max(max(Data[:, i]), maxy)
plt.plot(Data[:, 0], Data[:, 1] / max(Data[:, 1]) * max(Data[:, 2]), '--',
label='"Signal" scaled to max(RMS Dyn)')
polynomial = numpy.poly1d(numpy.polyfit(Data[:, 0], Data[:, 2], fit))
plt.plot(PolyfitRange, polynomial(PolyfitRange), '--',
label='Polynomial fit (' + str(fit) + ') of RMS Dyn')
plt.xlim([min(Data[:, 0]), max(Data[:, 0])])
plt.ylim([0, maxy * 1.1])
plt.xlabel(Labels[0])
plt.ylabel('Dyn')
plt.title(' '.join(Title[6:]))
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.25,
box.width, box.height * 0.75])
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1))
plt.savefig(os.path.join(Root, DataFiles[whichone][:-4] + '.png'),
Transparent=True, bbox_inches='tight')
plt.draw()
plt.show()
# ~ # Polynomial fit stuff
# ~ plt.figure()
# ~ x = Data[:, 0]
# ~ y = Data[:, 9]
# ~ xp = numpy.linspace(min(x)-max(x), 2*max(x), 222)
# ~
# ~ plt.plot(x, y, '-x', label='original')
# ~ for i in range(3,10,2):
# ~ polynomial = numpy.poly1d(numpy.polyfit(x, y, i))
# ~ plt.plot(xp, polynomial(xp), '--', label=str(i))
# ~ plt.legend(loc='best')
# ~ plt.xlim([min(x), max(x)])
# ~ plt.ylim([min(y), max(y)])
# ~ plt.draw()
# ~ plt.show()
| habi/GlobalDiagnostiX | aptina/NoiseVsExposure.py | Python | unlicense | 6,444 |
import kombu.connection
import kombu.entity
import kombu.messaging
import uuid
params = {
'hostname': 'localhost',
'port': 5672,
'virtual_host': '/',
}
connection = kombu.connection.BrokerConnection(**params)
connection.connect()
channel = connection.channel()
exchange = kombu.entity.Exchange(name='direct-test',
type='direct')
producer = kombu.messaging.Producer(exchange=exchange,
channel=channel,
routing_key='black')
# These three queues have the same routing key as the producer
queue = kombu.Queue(name='queue1', exchange=exchange, routing_key='black')
queue.maybe_bind(connection)
queue.declare()
queue = kombu.Queue(name='queue2', exchange=exchange, routing_key='black')
queue.maybe_bind(connection)
queue.declare()
queue = kombu.Queue(name='queue3', exchange=exchange, routing_key='black')
queue.maybe_bind(connection)
queue.declare()
# Note this queue has a different routing_key and therefore the producer does
# not send messages to it
queue = kombu.Queue(name='queue4', exchange=exchange, routing_key='red')
queue.maybe_bind(connection)
queue.declare()
producer.publish('Foo {0}'.format(uuid.uuid4()))
| Skablam/Python-RabbitMQ | kombu-examples/direct_exchange/addmessage-direct.py | Python | mit | 1,245 |
#!/usr/bin/env python
import os, sys
from setuptools import setup, find_packages
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit(0)
with open('README.rst') as f:
long_description = f.read()
VERSION = __import__('delivery').__version__
setup(
name='django-delivery',
version=VERSION,
url='https://github.com/dakrauth/django-delivery',
author_email='[email protected]',
description='Basic Email Delivery.',
long_description=long_description,
author='David A Krauth',
platforms=['any'],
license='MIT License',
classifiers=(
'Environment :: Web Environment',
'Framework :: Django',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
),
packages=find_packages(),
)
| dakrauth/django-delivery | setup.py | Python | mit | 864 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from nyc_inspections import app
from database import db_session
from extensions import cache
from flask import abort
from flask import jsonify
from flask import request
from models import Cuisine
from models import Inspection
from models import Restaurant
def _make_cache_key(*args, **kwargs):
path = request.path
args = str(hash(frozenset(request.values.items())))
return (path + args).encode('utf-8')
@app.teardown_request
def shutdown_session(exception=None):
db_session.remove()
@app.route("/restaurants/by_name", methods=['POST'])
@cache.cached(key_prefix=_make_cache_key)
def find_restaurants_by_name():
results = []
name = request.form.get('name', '', type=str).strip()
if name:
filterspec = Restaurant.name.like("%{0}%".format(name.upper()))
restaurants = Restaurant.query.filter(filterspec)
for restaurant in restaurants:
results.append(restaurant.serialize)
return jsonify(restaurants=results)
@app.route("/restaurants/by_cuisine", methods=['POST'])
@cache.cached(key_prefix=_make_cache_key)
def find_restaurants_by_cuisine():
results = []
cuisine = request.form.get('cuisine', '', type=str).strip()
if cuisine:
filterspec = Cuisine.name.like("%{0}%".format(cuisine.upper()))
inspections = Inspection.query.\
join(Cuisine).join(Restaurant).filter(filterspec)
for inspection in inspections:
results.append(inspection.restaurant.serialize)
return jsonify(restaurants=results)
@app.route("/restaurants/<int:restaurant_id>", methods=['GET'])
@cache.cached(key_prefix=_make_cache_key)
def get_restaurant_by_id(restaurant_id):
restaurant = {}
if restaurant_id:
restaurant = Restaurant.query.get(restaurant_id)
if restaurant:
return jsonify(restaurant.serialize)
return abort(404)
# vim: filetype=python
| ryankanno/nyc-inspections | nyc_inspections/api.py | Python | bsd-3-clause | 1,943 |
# coding: utf-8
import os
import timeit
import tempfile
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('seaborn')
def get_performance_multiplication():
data = {'shape': []}
for i in range(1, FLAGS.max_degree):
sshape = 2 ** i
min_max_value = sshape * 2
a = np.random.randint(
-min_max_value, min_max_value, size=(sshape, sshape)).tolist()
b = np.random.randint(
-min_max_value, min_max_value, size=(sshape, sshape)).tolist()
data['shape'].append(sshape)
for algorithm, desc in [
('naive_square_matrix_product', 'naive O(n^3)'),
('strassen_square_matrix_product', 'Strassen O(n^2.81)'),
]:
duration = timeit.Timer(
algorithm + '({}, {})'.format(a, b),
"""from __main__ import {}""".format(algorithm)
).timeit(number=1)
if desc not in data:
data[desc] = []
data[desc].append(duration)
return data
def plot_chart():
""" Read result file and plot chart """
# check results file
if not os.path.isfile(FLAGS.results_file):
raise IOError("No such file '{}'".format(FLAGS.results_file))
# read DataFrame from results file
results = pd.read_csv(FLAGS.results_file, index_col='shape')
# plot chart
fig, ax = plt.subplots(1)
for name in results.columns:
(results[name]).plot(ax=ax)
ax.set_title('Comparison of matrix multiplication algorithms')
ax.set_ylabel('time duration, s')
ax.set_xlabel('shape of square matrix')
ax.legend()
plt.show()
def main():
if FLAGS.force or not os.path.isfile(FLAGS.results_file):
if not os.path.isdir(os.path.dirname(FLAGS.results_file)):
os.makedirs(os.path.dirname(FLAGS.results_file))
data = get_performance_multiplication()
dataframe = pd.DataFrame(data).set_index('shape')
dataframe.to_csv(FLAGS.results_file)
print('Data saved to "{}" file'.format(FLAGS.results_file))
plot_chart()
if __name__ in "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--force', action='store_true')
parser.add_argument('--max_degree', type=int, default=10)
parser.add_argument(
'--results_file',
type=str,
default=os.path.join(tempfile.gettempdir(),
'fun-with-algorithms',
'matrix_multiplication.csv'),
help='File with results')
FLAGS, unparsed = parser.parse_known_args()
main()
| dsysoev/fun-with-algorithms | matrix/performance.py | Python | mit | 2,703 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, json
from frappe.website.website_generator import WebsiteGenerator
from frappe import _
from frappe.utils.file_manager import save_file, remove_file_by_url
class WebForm(WebsiteGenerator):
template = "templates/generators/web_form.html"
condition_field = "published"
page_title_field = "title"
no_cache = 1
def get_context(self, context):
context.params = frappe.form_dict
if self.login_required and frappe.session.user != "Guest":
if self.allow_edit:
if self.allow_multiple:
meta = frappe.get_meta(self.doc_type)
context.items = frappe.db.sql("""select name,
{0} as title, creation
from `tab{1}`
where owner=%s and docstatus = 0
order by creation desc""".format(meta.title_field or "name",
self.doc_type), frappe.session.user, as_dict=True)
else:
name = frappe.db.get_value(self.doc_type, {"owner": frappe.session.user},
"name")
if name:
context.doc_name = name
if frappe.form_dict.name:
context.doc = frappe.get_doc(self.doc_type, frappe.form_dict.name)
context.types = [f.fieldtype for f in self.web_form_fields]
return context
def get_parents(self, context):
if self.breadcrumbs:
return json.loads(self.breadcrumbs)
@frappe.whitelist(allow_guest=True)
def accept():
args = frappe.form_dict
files = []
web_form = frappe.get_doc("Web Form", args.web_form)
if args.doctype != web_form.doc_type:
frappe.throw(_("Invalid Request"))
if args.name:
# update
doc = frappe.get_doc(args.doctype, args.name)
else:
# insert
doc = frappe.new_doc(args.doctype)
# set values
for fieldname, value in args.iteritems():
if fieldname not in ("web_form", "cmd", "owner"):
if value and value.startswith("{"):
try:
filedata = json.loads(value)
if "__file_attachment" in filedata:
files.append((fieldname, filedata))
continue
except ValueError:
pass
doc.set(fieldname, value)
if args.name:
if doc.owner==frappe.session.user:
doc.save(ignore_permissions=True)
else:
# only if permissions are present
doc.save()
else:
# insert
if web_form.login_required and frappe.session.user=="Guest":
frappe.throw(_("You must login to submit this form"))
doc.insert(ignore_permissions = True)
# add files
if files:
for f in files:
fieldname, filedata = f
# remove earlier attachmed file (if exists)
if doc.get(fieldname):
remove_file_by_url(doc.get(fieldname), doc.doctype, doc.name)
# save new file
filedoc = save_file(filedata["filename"], filedata["dataurl"],
doc.doctype, doc.name, decode=True)
# update values
doc.set(fieldname, filedoc.file_url)
doc.save()
@frappe.whitelist()
def delete(web_form, name):
web_form = frappe.get_doc("Web Form", web_form)
owner = frappe.db.get_value(web_form.doc_type, name, "owner")
if frappe.session.user == owner and web_form.allow_delete:
frappe.delete_doc(web_form.doc_type, name, ignore_permissions=True)
else:
raise frappe.PermissionError, "Not Allowed"
| geo-poland/frappe | frappe/website/doctype/web_form/web_form.py | Python | mit | 3,187 |
import gc, os
from tempfile import mkstemp
import warnings
import numpy as np
import nose.tools
from nipy.modalities.fmri.api import fmri_generator, FmriImageList
from nipy.core.api import parcels, fromarray, Image
from nipy.io.api import load_image, save_image
from nipy.testing import funcfile
def setup():
# Suppress warnings during tests to reduce noise
warnings.simplefilter("ignore")
def teardown():
# Clear list of warning filters
warnings.resetwarnings()
def test_write():
fp, fname = mkstemp('.nii')
img = load_image(funcfile)
save_image(img, fname)
test = FmriImageList.from_image(load_image(fname))
yield nose.tools.assert_equal, test[0].affine.shape, (4,4)
yield nose.tools.assert_equal, img[0].affine.shape, (5,4)
yield nose.tools.assert_true, np.allclose(test[0].affine, img[0].affine[1:])
# Under windows, if you don't close before delete, you get a
# locking error.
os.close(fp)
os.remove(fname)
def test_iter():
img = load_image(funcfile)
# flip to time first version so this makes sense
from nipy.core.reference.coordinate_map import reorder_input
arr = np.asarray(img).T
coordmap = reorder_input(img.coordmap)
img_t1 = Image(arr, coordmap)
slice_shape = (arr.shape[0],) + arr.shape[2:]
j = 0
for i, d in fmri_generator(img_t1):
j += 1
yield nose.tools.assert_equal, d.shape, slice_shape
del(i); gc.collect()
yield nose.tools.assert_equal, j, 3
def test_subcoordmap():
img = load_image(funcfile)
subcoordmap = img[3].coordmap
xform = img.coordmap.affine[:,1:]
nose.tools.assert_true(np.allclose(subcoordmap.affine[1:], xform[1:]))
## XXX FIXME: why is it [0,0] entry instead of [0] below?
nose.tools.assert_true(np.allclose(subcoordmap.affine[0], [0,0,0,img.coordmap([3,0,0,0])[0,0]]))
def test_labels1():
img = load_image(funcfile)
parcelmap = fromarray(np.asarray(img[0]), 'kji', 'zyx')
parcelmap = (np.asarray(parcelmap) * 100).astype(np.int32)
v = 0
for i, d in fmri_generator(img, parcels(parcelmap)):
v += d.shape[1]
nose.tools.assert_equal(v, parcelmap.size)
| yarikoptic/NiPy-OLD | nipy/modalities/fmri/tests/test_fmri.py | Python | bsd-3-clause | 2,192 |
# This snakemake file performs the following actions on one biosample.
# This biosample may contain multiple subsamples and be sequenced multiple times.
# Further, each sequencing run can be missing some subsamples.
# This snakemake file does not perform optimization of thresholding perameters.
#
# 1. quality trimming
# 2. binning and removing overrepresented reads
# 3. assembly
# 4. assembly assessment and realignment
# 5. blast to nt
# 6. combine from different subsamples and assess total results
#
# This is how to call snakemake
#
# module load use.singlecell
# module load python/3.4.2
# snakemake -n -s snakefile argument (for running just the names)
# snakemake --cluster "sbatch --job-name={params.name} --ntasks=1 --cpus-per-task={threads} \
# --partition={params.partition} --mem={params.mem} -o slurm_output/%j-{params.name}" -p -j -k
# to kill snakemake process call killall snakemake
#
# Cool Commands
# nohup: doesn't kill a process if a shell command is closed
# git add -A: to update changed, added, and deleted file before committing
#
# Revision History
# 2015.05.18 Brian Yu Created
# 2015.05.26 Updated to include combined analysis as well
# 2015.07.13 Updated to continue from Snakemake_toplevel1.py with super_contig
# related processing. Super contigs are created by spades on
# sherlock.stanford.edu bigmem nodes and the fasta files are copied back.
# Import packages
import os, glob, subprocess
import pandas as pd
import numpy as np
from collections import defaultdict
from snakemake.utils import read_job_properties
# Importing variables NEED TO CHANGE THIS ARGUMENT
# "/datastore/brianyu/2014.11.21_YNP_LowerGeyserBasin/"
root_folder = config["location"]
# Importing relavent bio/sub-sample folders, IDs and variables
sample_table = pd.read_table(root_folder+"/code_analysis/subsamples.txt", header=0, index_col=0)
subsampleIDs = list(set(sample_table.index))
parameters = pd.read_table(root_folder+"/code_analysis/parameter.txt", index_col=0)
#print(parameters)
# Pulling out variables from parameter.txt
biosample = parameters.ix["biosample_name",'entry']
code_dir = parameters.ix["code_directory",'entry']
tool_dir = parameters.ix['tool_directory','entry']
# similarity = parameters.ix['similarity','entry']
# kmer_len = parameters.ix['kmer_len','entry']
# split_size = parameters.ix['split_size','entry'] # this is in terms of reads
# contig_thresh = parameters.ix['contig_thresh','entry'] # this is contig length threshold for Blast
# Compute the number of files subcontigs from each subsamples (and total) needs to be split into
depth_table = pd.read_table(root_folder+"/results/snakemake_reads.cnt", index_col=0, header=None)
# total reads should be the 0th column with header 1. there should only be one column
depth_table.rename(columns={1: 'total_reads'}, inplace=True)
depth_table['subsample_clust_file_numbers'] = [int(max(1,np.floor(x/int(parameters.ix['subsample_split_size','entry'])))) \
for x in depth_table['total_reads']]
# biosample_clust_file_numbers = 4
biosample_clust_file_numbers = int(max(1, depth_table.sum(0)['total_reads']/int(parameters.ix['biosample_split_size','entry'])))
#print(depth_table)
#print(biosample_clust_file_numbers)
# Add include files or other snakefile rule files
include: "Snakefile.utils_Mark"
include: "Snakefile.utils_Felix"
include: "Snakefile_helper_Brian.py"
include: "Snakefile_import.py"
include: "Snakefile_subsample_assembly.py"
include: "Snakefile_biosample_assembly.py"
include: "Snakefile_combined_analysis.py"
include: "Snakefile_superContigAnalysis.py"
# User defined constants
workdir: "/local10G/brianyu/snakemake_results/"+parameters.ix["biosample_name",0]
#################################
# A list of all the rules
#################################
rule all:
# sample should be a list of the subsample names in a biosample.
# These are the long names in Miseq runs but ILxxxx-N7xx-N5xx in Nextseq runs
# input: expand("{subsample}/BlastResults.{subsample}.txt", subsample=subsampleIDs)
input:
# These are possible outputs to request
# expand("Combined_Analysis/super_contigs.{id}.similarity_matrix.txt", id=biosample),
# expand("Combined_Analysis/subsample_contigs.{id}.similarity_matrix.txt", id=biosample),
# expand("Combined_Analysis/super_contigs_distribution.{id}.txt", id=biosample),
expand("Combined_Analysis/subsample_species_abundance.{id}.txt", id=biosample),
expand("Combined_Analysis/BlastResults.{id}.txt", id=biosample),
expand("Combined_Analysis/quast_report.{id}.txt", id=biosample),
expand("Combined_Analysis/super_contigs.{id}.alignment_report.txt", id=biosample)
# Combined analysis results
# expand("{subsample}/contigCoverage.{subsample}.cnt", subsample=subsampleIDs),
# expand("{subsample}/BlastResults.{subsample}.txt", subsample=subsampleIDs),
# expand("{subsample}/quast_report.{subsample}.txt", subsample=subsampleIDs),
# fastqc of fastq reads in subsamples
# expand("{subsample}/P1.{subsample}.fastqc_results.txt", subsample=subsampleIDs),
# expand("{subsample}/P2.{subsample}.fastqc_results.txt", subsample=subsampleIDs)
params:
name="top_level_assembly",
partition="general",
mem="3000"
threads: 1
version: "1.0"
| brianyu2010/Mini-Metagenomic_Analyses | Snakefile_toplevel2.py | Python | gpl-3.0 | 5,289 |
#
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# Oracle-specific stuff
#
# FIXME: need more documentation
#
import sys
from backend import Backend
from backendLib import DBint, DBstring, DBdateTime, Table, \
TableCollection
from spacewalk.server import rhnSQL
from spacewalk.server.rhnSQL.const import ORACLE, POSTGRESQL
from spacewalk.common.rhnConfig import CFG
from spacewalk.common import timezone_utils
class OracleBackend(Backend):
tables = TableCollection(
# NOTE: pk = primary keys
# attribute = attribute this table links back to
# map = mapping from database fields to generic attribute names
Table('rhnPackageProvides',
fields={
'package_id': DBint(),
'capability_id': DBint(),
'sense': DBint(),
},
pk=['package_id', 'capability_id', 'sense'],
attribute='provides',
map={'sense': 'flags', },
),
Table('rhnPackageRequires',
fields={
'package_id': DBint(),
'capability_id': DBint(),
'sense': DBint(),
},
pk=['package_id', 'capability_id', 'sense'],
attribute='requires',
map={'sense': 'flags', },
),
Table('rhnPackageConflicts',
fields={
'package_id': DBint(),
'capability_id': DBint(),
'sense': DBint(),
},
pk=['package_id', 'capability_id', 'sense'],
attribute='conflicts',
map={'sense': 'flags', },
),
Table('rhnPackageObsoletes',
fields={
'package_id': DBint(),
'capability_id': DBint(),
'sense': DBint(),
},
pk=['package_id', 'capability_id', 'sense'],
attribute='obsoletes',
map={'sense': 'flags', },
),
Table('rhnPackageRecommends',
fields={
'package_id': DBint(),
'capability_id': DBint(),
'sense': DBint(),
},
pk=['package_id', 'capability_id', 'sense'],
attribute='recommends',
map={'sense': 'flags', },
),
Table('rhnPackageSuggests',
fields={
'package_id': DBint(),
'capability_id': DBint(),
'sense': DBint(),
},
pk=['package_id', 'capability_id', 'sense'],
attribute='suggests',
map={'sense': 'flags', },
),
Table('rhnPackageSupplements',
fields={
'package_id': DBint(),
'capability_id': DBint(),
'sense': DBint(),
},
pk=['package_id', 'capability_id', 'sense'],
attribute='supplements',
map={'sense': 'flags', },
),
Table('rhnPackageEnhances',
fields={
'package_id': DBint(),
'capability_id': DBint(),
'sense': DBint(),
},
pk=['package_id', 'capability_id', 'sense'],
attribute='enhances',
map={'sense': 'flags', },
),
Table('rhnPackageBreaks',
fields={
'package_id': DBint(),
'capability_id': DBint(),
'sense': DBint(),
},
pk=['package_id', 'capability_id', 'sense'],
attribute='breaks',
map={'sense': 'flags', },
),
Table('rhnPackagePredepends',
fields={
'package_id': DBint(),
'capability_id': DBint(),
'sense': DBint(),
},
pk=['package_id', 'capability_id', 'sense'],
attribute='predepends',
map={'sense': 'flags', },
),
Table('rhnPackageChangeLogRec',
fields={
'id': DBint(),
'package_id': DBint(),
'changelog_data_id': DBint(),
},
pk=['package_id', 'changelog_data_id'],
attribute='changelog',
sequenceColumn='id',
),
Table('rhnPackageChangeLogData',
fields={
'id': DBint(),
'name': DBstring(128),
'text': DBstring(3000),
'time': DBdateTime()
},
),
Table('rhnPackageFile',
fields={
'package_id': DBint(),
'capability_id': DBint(),
'device': DBint(),
'inode': DBint(),
'file_mode': DBint(),
'username': DBstring(32),
'groupname': DBstring(32),
'rdev': DBint(),
'file_size': DBint(),
'mtime': DBdateTime(),
'checksum_id': DBint(),
'linkto': DBstring(256),
'flags': DBint(),
'verifyflags': DBint(),
'lang': DBstring(32),
},
pk=['package_id', 'capability_id'],
attribute='files',
severityHash={
'mtime': 0,
'file_size': 4,
},
),
Table('rhnPackage',
fields={
'id': DBint(),
'org_id': DBint(),
'name_id': DBint(),
'evr_id': DBint(),
'package_arch_id': DBint(),
'package_group': DBint(),
'rpm_version': DBstring(16),
'description': DBstring(4000),
'summary': DBstring(4000),
'package_size': DBint(),
'payload_size': DBint(),
'installed_size': DBint(),
'build_host': DBstring(256),
'build_time': DBdateTime(),
'source_rpm_id': DBint(),
'checksum_id': DBint(),
'vendor': DBstring(64),
'payload_format': DBstring(32),
'path': DBstring(1000),
'copyright': DBstring(128),
'cookie': DBstring(128),
'header_start': DBint(),
'header_end': DBint(),
'last_modified': DBdateTime(),
},
pk=['org_id', 'name_id', 'evr_id', 'package_arch_id',
'checksum_id'],
nullable=['org_id'],
severityHash={
'path': 1,
'package_size': 2,
'build_time': 3,
'build_host': 3,
'last_modified': 0.5,
# rpm got it wrong so now we have to ignore it
'payload_size': 0,
},
),
Table('rhnChannelPackage',
fields={
'package_id': DBint(),
'channel_id': DBint(),
},
pk=['channel_id', 'package_id'],
),
Table('rhnErrata',
fields={
'id': DBint(),
'advisory': DBstring(100),
'advisory_type': DBstring(32),
'advisory_name': DBstring(100),
'advisory_rel': DBint(),
'product': DBstring(64),
'description': DBstring(4000),
'synopsis': DBstring(4000),
'topic': DBstring(4000),
'solution': DBstring(4000),
'notes': DBstring(4000),
'refers_to': DBstring(4000),
'org_id': DBint(),
'locally_modified': DBstring(1),
'severity_id': DBint(),
'errata_from': DBstring(127),
# We will treat issue_date and update_date as regular dates
# with times instead of DBdate types, otherwise we'd have
# issues with timezones
'issue_date': DBdateTime(),
'update_date': DBdateTime(),
'last_modified': DBdateTime(),
},
pk=['advisory_name', 'org_id'],
defaultSeverity=4,
),
Table('rhnErrataBugList',
fields={
'errata_id': DBint(),
'bug_id': DBint(),
'summary': DBstring(4000),
'href': DBstring(255),
},
pk=['errata_id', 'bug_id'],
attribute='bugs',
defaultSeverity=4,
),
Table('rhnCVE',
fields={
'id': DBint(),
'name': DBstring(20),
},
pk=['name'],
),
Table('rhnErrataCVE',
fields={
'errata_id': DBint(),
'cve_id': DBint(),
},
pk=['errata_id', 'cve_id'],
attribute='cve',
defaultSeverity=4,
),
Table('rhnErrataFile',
fields={
'id': DBint(),
'errata_id': DBint(),
'type': DBint(),
'checksum_id': DBint(),
'filename': DBstring(4000),
},
pk=['errata_id', 'filename', 'checksum_id'],
attribute='files',
defaultSeverity=4,
sequenceColumn='id',
),
Table('rhnErrataFilePackage',
fields={
'errata_file_id': DBint(),
'package_id': DBint(),
},
pk=['errata_file_id', 'package_id'],
),
Table('rhnErrataFilePackageSource',
fields={
'errata_file_id': DBint(),
'package_id': DBint(),
},
pk=['errata_file_id', 'package_id'],
),
Table('rhnErrataFileChannel',
fields={
'errata_file_id': DBint(),
'channel_id': DBint(),
},
pk=['errata_file_id', 'channel_id'],
),
Table('rhnErrataKeyword',
fields={
'errata_id': DBint(),
'keyword': DBstring(64),
},
pk=['errata_id', 'keyword'],
attribute='keywords',
defaultSeverity=4,
),
Table('rhnErrataPackage',
fields={
'errata_id': DBint(),
'package_id': DBint(),
},
pk=['errata_id', 'package_id'],
attribute='packages',
defaultSeverity=4,
),
Table('rhnChannelErrata',
fields={
'errata_id': DBint(),
'channel_id': DBint(),
},
pk=['errata_id', 'channel_id'],
attribute='channels',
defaultSeverity=4,
),
Table('rhnChannel',
fields={
'id': DBint(),
'parent_channel': DBint(),
'org_id': DBint(),
'channel_arch_id': DBint(),
'label': DBstring(128),
'basedir': DBstring(256),
'name': DBstring(256),
'summary': DBstring(500),
'description': DBstring(4000),
'product_name_id': DBint(),
'gpg_key_url': DBstring(256),
'gpg_key_id': DBstring(14),
'gpg_key_fp': DBstring(50),
'end_of_life': DBdateTime(),
'receiving_updates': DBstring(1),
'last_modified': DBdateTime(),
'channel_product_id': DBint(),
'checksum_type_id': DBint(),
'channel_access': DBstring(10),
},
pk=['label'],
severityHash={
'channel_product_id': 0,
},
),
Table('rhnChannelFamily',
fields={
'id': DBint(),
'name': DBstring(128),
'label': DBstring(128),
'product_url': DBstring(128),
},
pk=['label'],
defaultSeverity=4,
),
Table('rhnDistChannelMap',
fields={
'os': DBstring(64),
'release': DBstring(64),
'channel_arch_id': DBint(),
'channel_id': DBint(),
'org_id': DBint(),
},
pk=['release', 'channel_arch_id', 'org_id'],
attribute='dists',
defaultSeverity=4,
),
Table('rhnReleaseChannelMap',
fields={
'product': DBstring(64),
'version': DBstring(64),
'release': DBstring(64),
'channel_arch_id': DBint(),
'channel_id': DBint()
},
pk=['product', 'version', 'release', 'channel_arch_id', 'channel_id'],
attribute='release',
defaultSeverity=4,
),
Table('rhnChannelTrust',
fields={
'channel_id': DBint(),
'org_trust_id': DBint(),
},
pk=['channel_id', 'org_trust_id'],
attribute='trust_list',
defaultSeverity=4,
),
Table('rhnChannelFamilyMembers',
fields={
'channel_id': DBint(),
'channel_family_id': DBint(),
},
pk=['channel_id', 'channel_family_id'],
attribute='families',
defaultSeverity=4,
),
Table('rhnPackageSource',
fields={
'id': DBint(),
'org_id': DBint(),
'source_rpm_id': DBint(),
'package_group': DBint(),
'rpm_version': DBstring(16),
'payload_size': DBint(),
'build_host': DBstring(256),
'build_time': DBdateTime(),
'path': DBstring(1000),
'package_size': DBint(),
'checksum_id': DBint(),
'sigchecksum_id': DBint(),
'vendor': DBstring(64),
'cookie': DBstring(128),
'last_modified': DBdateTime(),
},
pk=['source_rpm_id', 'org_id',
'sigchecksum_id', 'checksum_id'],
nullable=['org_id'],
severityHash={
'path': 1,
'file_size': 2,
'build_host': 3,
'build_time': 3,
# rpm got it wrong so now we have to ignore it
'payload_size': 0,
'last_modified': 0.5,
},
),
Table('rhnServerArch',
fields={
'id': DBint(),
'label': DBstring(64),
'name': DBstring(64),
'arch_type_id': DBint(),
},
pk=['label'],
),
Table('rhnPackageArch',
fields={
'id': DBint(),
'label': DBstring(64),
'name': DBstring(64),
'arch_type_id': DBint(),
},
pk=['label'],
),
Table('rhnChannelArch',
fields={
'id': DBint(),
'label': DBstring(64),
'name': DBstring(64),
'arch_type_id': DBint(),
},
pk=['label'],
),
Table('rhnCPUArch',
fields={
'id': DBint(),
'label': DBstring(64),
'name': DBstring(64),
},
pk=['label'],
),
Table('rhnServerPackageArchCompat',
fields={
'server_arch_id': DBint(),
'package_arch_id': DBint(),
'preference': DBint(),
},
pk=['server_arch_id', 'package_arch_id', 'preference'],
),
Table('rhnServerChannelArchCompat',
fields={
'server_arch_id': DBint(),
'channel_arch_id': DBint(),
},
pk=['server_arch_id', 'channel_arch_id'],
),
Table('rhnChannelPackageArchCompat',
fields={
'channel_arch_id': DBint(),
'package_arch_id': DBint(),
},
pk=['channel_arch_id', 'package_arch_id'],
),
Table('rhnServerServerGroupArchCompat',
fields={
'server_arch_id': DBint(),
'server_group_type': DBint(),
},
pk=['server_arch_id', 'server_group_type'],
),
Table('rhnKickstartableTree',
fields={
'id': DBint(),
'org_id': DBint(),
'base_path': DBstring(256),
'channel_id': DBint(),
'label': DBstring(64),
'boot_image': DBstring(128),
'kstree_type': DBint(),
'install_type': DBint(),
'last_modified': DBdateTime()
},
pk=['label', 'org_id'],
nullable=['org_id'],
),
Table('rhnKSTreeType',
# not used at the moment
fields={
'id': DBint(),
'label': DBstring(32),
'name': DBstring(64),
},
pk=['label'],
),
Table('rhnKSInstallType',
# not used at the moment
fields={
'id': DBint(),
'label': DBstring(32),
'name': DBstring(64),
},
pk=['label'],
),
Table('rhnKSTreeFile',
fields={
'kstree_id': DBint(),
'relative_filename': DBstring(256),
'checksum_id': DBint(),
'file_size': DBint(),
'last_modified': DBdateTime()
},
pk=['kstree_id', 'relative_filename', 'checksum_id'],
attribute='files',
map={
'relative_filename': 'relative_path',
},
),
Table('rhnProductName',
fields={
'id': DBint(),
'label': DBstring(128),
'name': DBstring(128),
},
pk=['id', 'label', 'name'],
),
Table('rhnContentSource',
fields={
'id': DBint(),
'org_id': DBint(),
'label': DBstring(128),
'source_url': DBstring(2048),
'type_id': DBint(),
},
pk=['label', 'org_id', 'type_id'],
nullable=['org_id'],
),
Table('rhnContentSourceSsl',
fields={
'content_source_id': DBint(),
'ssl_ca_cert_id': DBint(),
'ssl_client_cert_id': DBint(),
'ssl_client_key_id': DBint()
},
attribute='ssl-sets',
pk=['content_source_id', 'ssl_ca_cert_id', 'ssl_client_cert_id', 'ssl_client_key_id'],
nullable=['ssl_client_cert_id', 'ssl_client_key_id'],
),
)
def __init__(self):
Backend.__init__(self, rhnSQL)
def setSessionTimeZoneToLocalTimeZone(self):
sth = self.dbmodule.prepare("alter session set time_zone = '%s'"
% timezone_utils.get_utc_offset())
sth.execute()
def init(self):
"""
Override parent to do explicit setting of the date format. (Oracle
specific)
"""
# Set date format
self.setSessionTimeZoneToLocalTimeZone()
self.setDateFormat("YYYY-MM-DD HH24:MI:SS")
return Backend.init(self)
class PostgresqlBackend(OracleBackend):
"""
PostgresqlBackend specific implementation. The bulk of the OracleBackend
is not actually Oracle specific, so we'll re-use as much as we can and just
avoid the few bits that are.
"""
def setSessionTimeZoneToLocalTimeZone(self):
sth = self.dbmodule.prepare("set session time zone '%s'"
% timezone_utils.get_utc_offset())
sth.execute()
def init(self):
"""
Avoid the Oracle specific stuff here in parent method.
"""
self.setSessionTimeZoneToLocalTimeZone()
return Backend.init(self)
# Postgres doesn't support autonomous transactions. We could use
# dblink_exec like we do in other stored procedures to open a new
# connection to the db and do our inserts there, but there are a lot of
# capabilities and opening several million connections to the db in the
# middle of a sat-sync is slow. Instead we keep open a secondary db
# connection which we only use here, so we can directly commit to that
# instead of opening a new connection for each insert.
def processCapabilities(self, capabilityHash):
# must lock the table to keep rhnpush or whomever from causing
# this transaction to fail
lock_sql = "lock table rhnPackageCapability in exclusive mode"
sql = "select lookup_package_capability_fast(:name, :version) as id from dual"
try:
self.dbmodule.execute_secondary(lock_sql)
h = self.dbmodule.prepare_secondary(sql)
for name, version in capabilityHash.keys():
ver = version
if version == '':
ver = None
h.execute(name=name, version=ver)
row = h.fetchone_dict()
capabilityHash[(name, version)] = row['id']
self.dbmodule.commit_secondary() # commit also unlocks the table
except Exception:
e = sys.exc_info()[1]
self.dbmodule.execute_secondary("rollback")
raise e
# Same as processCapabilities
def lookupChecksums(self, checksumHash):
if not checksumHash:
return
# must lock the table to keep rhnpush or whomever from causing
# this transaction to fail
lock_sql = "lock table rhnChecksum in exclusive mode"
sql = "select lookup_checksum_fast(:ctype, :csum) id from dual"
try:
self.dbmodule.execute_secondary(lock_sql)
h = self.dbmodule.prepare_secondary(sql)
for k in checksumHash.keys():
ctype, csum = k
if csum != '':
h.execute(ctype=ctype, csum=csum)
row = h.fetchone_dict()
if row:
checksumHash[k] = row['id']
self.dbmodule.commit_secondary() # commit also unlocks the table
except Exception:
e = sys.exc_info()[1]
self.dbmodule.execute_secondary("rollback")
raise e
def SQLBackend():
if CFG.DB_BACKEND == ORACLE:
backend = OracleBackend()
elif CFG.DB_BACKEND == POSTGRESQL:
backend = PostgresqlBackend()
backend.init()
return backend
| lhellebr/spacewalk | backend/server/importlib/backendOracle.py | Python | gpl-2.0 | 24,786 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
'''
This software has been developed by:
GI Genética, Fisiología e Historia Forestal
Dpto. Sistemas y Recursos Naturales
ETSI Montes, Forestal y del Medio Natural
Universidad Politécnica de Madrid
http://gfhforestal.com/
https://github.com/ggfhf/
Licence: GNU General Public Licence Version 3.
'''
#-------------------------------------------------------------------------------
'''
This file contains functions related to the GAMP-GSNAP process used in both console
mode and gui mode.
'''
#-------------------------------------------------------------------------------
import os
import re
import sys
import xbioinfoapp
import xcluster
import xconfiguration
import xec2
import xlib
import xssh
#-------------------------------------------------------------------------------
def create_gmap_config_file(experiment_id='exp001', reference_dataset_id='NONE', reference_file='NONE', assembly_dataset_id='sdnt-170101-235959', assembly_type='CONTIGS'):
'''
Create GMAP config file with the default options. It is necessary
update the options in each run.
'''
# initialize the control variable and the error list
OK = True
error_list = []
# set the app
if assembly_dataset_id.startswith(xlib.get_soapdenovotrans_code()):
assembly_software = xlib.get_soapdenovotrans_code()
elif assembly_dataset_id.startswith(xlib.get_transabyss_code()):
assembly_software = xlib.get_transabyss_code()
elif assembly_dataset_id.startswith(xlib.get_trinity_code()):
assembly_software = xlib.get_trinity_code()
elif assembly_dataset_id.startswith(xlib.get_star_code()):
assembly_software = xlib.get_star_code()
elif assembly_dataset_id.startswith(xlib.get_cd_hit_est_code()):
assembly_software = xlib.get_cd_hit_est_code()
elif assembly_dataset_id.startswith(xlib.get_transcript_filter_code()):
assembly_software = xlib.get_transcript_filter_code()
# create the GMAP config file and write the default options
try:
if not os.path.exists(os.path.dirname(get_gmap_config_file())):
os.makedirs(os.path.dirname(get_gmap_config_file()))
with open(get_gmap_config_file(), mode='w', encoding='utf8') as file_id:
file_id.write('{0}\n'.format('# You must review the information of this file and update the values with the corresponding ones to the current run.'))
file_id.write('{0}\n'.format('#'))
file_id.write('{0}\n'.format('# The reference file must be located in the cluster directory {0}/experiment_id/reference_dataset_id'.format(xlib.get_cluster_reference_dir())))
file_id.write('{0}\n'.format('# The assembly files must be located in the cluster directory {0}/experiment_id/assembly_dataset_id'.format(xlib.get_cluster_result_dir())))
file_id.write('{0}\n'.format('# The experiment_id, reference_dataset_id, reference_file and assembly_dataset_id are fixed in the identification section.'))
file_id.write('{0}\n'.format('#'))
file_id.write('{0}\n'.format('# You can consult the parameters of GMAP and their meaning in http://research-pub.gene.com/gmap/.'))
file_id.write('{0}\n'.format('#'))
file_id.write('{0}\n'.format('# In section "GMAP parameters", the key "other_parameters" allows you to input additional parameters in the format:'))
file_id.write('{0}\n'.format('#'))
file_id.write('{0}\n'.format('# other_parameters = --parameter-1[=value-1][; --parameter-2[=value-2][; ...; --parameter-n[=value-n]]]'))
file_id.write('{0}\n'.format('#'))
file_id.write('{0}\n'.format('# parameter-i is a parameter name of GMAP and value-i a valid value of parameter-i, e.g.'))
file_id.write('{0}\n'.format('#'))
file_id.write('{0}\n'.format('# other_parameters = --no-chimeras; --canonical-mode=2'))
file_id.write('{0}\n'.format(''))
file_id.write('{0}\n'.format('# This section has the information identifies the experiment.'))
file_id.write('{0}\n'.format('[identification]'))
file_id.write('{0:<50} {1}\n'.format('experiment_id = {0}'.format(experiment_id), '# experiment identification'))
file_id.write('{0:<50} {1}\n'.format('reference_dataset_id = {0}'.format(reference_dataset_id), '# reference dataset identification or NONE'))
file_id.write('{0:<50} {1}\n'.format('reference_file = {0}'.format(reference_file), '# reference file name or NONE'))
file_id.write('{0:<50} {1}\n'.format('assembly_software = {0}'.format(assembly_software), '# assembly software: {0} ({1}) or {2} ({3}) or {4} ({5}) or {6} ({7}) or {8} ({9}) or {10} ({11})'.format(xlib.get_soapdenovotrans_code(), xlib.get_soapdenovotrans_name(), xlib.get_transabyss_code(), xlib.get_transabyss_name(), xlib.get_trinity_code(), xlib.get_trinity_name(), xlib.get_star_code(), xlib.get_star_name(), xlib.get_cd_hit_est_code(), xlib.get_cd_hit_est_name(), xlib.get_transcript_filter_code(), xlib.get_transcript_filter_name())))
file_id.write('{0:<50} {1}\n'.format('assembly_dataset_id = {0}'.format(assembly_dataset_id), '# assembly dataset identification'))
file_id.write('{0:<50} {1}\n'.format('assembly_type = {0}'.format(assembly_type), '# CONTIGS or SCAFFOLDS in {0}; NONE in {1}, {2}, {3}, {4} and {5}'.format(xlib.get_soapdenovotrans_name(), xlib.get_transabyss_name(), xlib.get_trinity_name(), xlib.get_star_name(), xlib.get_cd_hit_est_name(), xlib.get_transcript_filter_name())))
file_id.write('{0}\n'.format(''))
file_id.write('{0}\n'.format('# This section has the information to set the GMAP parameters'))
file_id.write('{0}\n'.format('[GMAP parameters]'))
file_id.write('{0:<50} {1}\n'.format('threads = 2', '# number of threads for use'))
file_id.write('{0:<50} {1}\n'.format('kmer = NONE', '# kmer size to use in genome database or NONE (the program will find the highest available kmer size in the genome database)'))
file_id.write('{0:<50} {1}\n'.format('sampling = NONE', '# Sampling to use in genome database or NONE (the program will find the smallest available sampling value in the genome database within selected k-mer size)'))
file_id.write('{0:<50} {1}\n'.format('input-buffer-size = 1000', '# size of input buffer'))
file_id.write('{0:<50} {1}\n'.format('output-buffer-size = 1000', '# size of buffer size in queries for output thread'))
file_id.write('{0:<50} {1}\n'.format('prunelevel = 0', '# pruning level: 0 (no pruning) or 1 (poor seqs) or 2 (repetitive seqs) or 3 (poor and repetitive)'))
file_id.write('{0:<50} {1}\n'.format('format = COMPRESS', '# format for output: COMPRESS or SUMMARY or ALIGN or PLS or GFF3_GENE or SPLICESITES or INTRONS or MAP_EXONS or MAP_RANGES or COORDS'))
file_id.write('{0:<50} {1}\n'.format('other_parameters = NONE', '# additional parameters to the previous ones or NONE'))
except:
error_list.append('*** ERROR: The file {0} can not be recreated'.format(get_gmap_config_file()))
OK = False
# return the control variable and the error list
return (OK, error_list)
#-------------------------------------------------------------------------------
def run_gmap_process(cluster_name, log, function=None):
'''
Run a GMAP process.
'''
# initialize the control variable
OK = True
# get the GMAP option dictionary
gmap_option_dict = xlib.get_option_dict(get_gmap_config_file())
# get the experiment identification
experiment_id = gmap_option_dict['identification']['experiment_id']
# warn that the log window must not be closed
if not isinstance(log, xlib.DevStdOut):
log.write('This process might take several minutes. Do not close this window, please wait!\n')
# validate the GMAP config file
log.write('{0}\n'.format(xlib.get_separator()))
log.write('Validating the {0} config file ...\n'.format(xlib.get_gmap_name()))
(OK, error_list) = validate_gmap_config_file(strict=True)
if OK:
log.write('The config file is OK.\n')
else:
log.write('*** ERROR: The config file is not valid.\n')
log.write('Please correct this file or recreate the config files.\n')
# create the SSH client connection
if OK:
log.write('{0}\n'.format(xlib.get_separator()))
log.write('Connecting the SSH client ...\n')
(OK, error_list, ssh_client) = xssh.create_ssh_client_connection(cluster_name, 'master')
if OK:
log.write('The SSH client is connected.\n')
else:
for error in error_list:
log.write('{0}\n'.format(error))
# create the SSH transport connection
if OK:
log.write('{0}\n'.format(xlib.get_separator()))
log.write('Connecting the SSH transport ...\n')
(OK, error_list, ssh_transport) = xssh.create_ssh_transport_connection(cluster_name, 'master')
if OK:
log.write('The SSH transport is connected.\n')
else:
for error in error_list:
log.write('{0}\n'.format(error))
# create the SFTP client
if OK:
log.write('{0}\n'.format(xlib.get_separator()))
log.write('Connecting the SFTP client ...\n')
sftp_client = xssh.create_sftp_client(ssh_transport)
log.write('The SFTP client is connected.\n')
# warn that the requirements are being verified
if OK:
log.write('{0}\n'.format(xlib.get_separator()))
log.write('Verifying process requirements ...\n')
# verify the master is running
if OK:
(master_state_code, master_state_name) = xec2.get_node_state(cluster_name, 'master')
if master_state_code != 16:
log.write('*** ERROR: The cluster {0} is not running. Its state is {1} ({2}).\n'.format(cluster_name, master_state_code, master_state_name))
OK = False
# verify the GMAP-GSNAP is setup
if OK:
(OK, error_list, is_setup) = xbioinfoapp.is_setup_bioconda_package(xlib.get_gmap_gsnap_bioconda_code(), cluster_name, True, ssh_client)
if OK:
if not is_setup:
log.write('*** ERROR: {0} is not setup.\n'.format(xlib.get_gmap_name()))
OK = False
else:
log.write('*** ERROR: The verification of {0} setup could not be performed.\n'.format(xlib.get_gmap_name()))
# warn that the requirements are OK
if OK:
log.write('Process requirements are OK.\n')
# determine the run directory in the cluster
if OK:
log.write('{0}\n'.format(xlib.get_separator()))
log.write('Determining the run directory in the cluster ...\n')
current_run_dir = xlib.get_cluster_current_run_dir(experiment_id, xlib.get_gmap_code())
command = 'mkdir --parents {0}'.format(current_run_dir)
(OK, stdout, stderr) = xssh.execute_cluster_command(ssh_client, command)
if OK:
log.write('The directory path is {0}.\n'.format(current_run_dir))
else:
log.write('*** ERROR: Wrong command ---> {0}\n'.format(command))
# build the GMAP process script
if OK:
log.write('{0}\n'.format(xlib.get_separator()))
log.write('Building the process script {0} ...\n'.format(get_gmap_process_script()))
(OK, error_list) = build_gmap_process_script(cluster_name, current_run_dir)
if OK:
log.write('The file is built.\n')
if not OK:
log.write('*** ERROR: The file could not be built.\n')
# upload the GMAP process script in the cluster
if OK:
log.write('{0}\n'.format(xlib.get_separator()))
log.write('Uploading the process script {0} in the directory {1} of the master ...\n'.format(get_gmap_process_script(), current_run_dir))
cluster_path = '{0}/{1}'.format(current_run_dir, os.path.basename(get_gmap_process_script()))
(OK, error_list) = xssh.put_file(sftp_client, get_gmap_process_script(), cluster_path)
if OK:
log.write('The file is uploaded.\n')
else:
for error in error_list:
log.write('{0}\n'.format(error))
# set run permision to the GMAP process script in the cluster
if OK:
log.write('{0}\n'.format(xlib.get_separator()))
log.write('Setting on the run permision of {0}/{1} ...\n'.format(current_run_dir, os.path.basename(get_gmap_process_script())))
command = 'chmod u+x {0}/{1}'.format(current_run_dir, os.path.basename(get_gmap_process_script()))
(OK, stdout, stderr) = xssh.execute_cluster_command(ssh_client, command)
if OK:
log.write('The run permision is set.\n')
else:
log.write('*** ERROR: Wrong command ---> {0}\n'.format(command))
# build the GMAP process starter
if OK:
log.write('{0}\n'.format(xlib.get_separator()))
log.write('Building the process starter {0} ...\n'.format(get_gmap_process_starter()))
(OK, error_list) = build_gmap_process_starter(current_run_dir)
if OK:
log.write('The file is built.\n')
if not OK:
log.write('***ERROR: The file could not be built.\n')
# upload the GMAP process starter in the cluster
if OK:
log.write('{0}\n'.format(xlib.get_separator()))
log.write('Uploading the process starter {0} in the directory {1} of the master ...\n'.format(get_gmap_process_starter(), current_run_dir))
cluster_path = '{0}/{1}'.format(current_run_dir, os.path.basename(get_gmap_process_starter()))
(OK, error_list) = xssh.put_file(sftp_client, get_gmap_process_starter(), cluster_path)
if OK:
log.write('The file is uploaded.\n')
else:
for error in error_list:
log.write('{0}\n'.format(error))
# set run permision to the GMAP process starter in the cluster
if OK:
log.write('{0}\n'.format(xlib.get_separator()))
log.write('Setting on the run permision of {0}/{1} ...\n'.format(current_run_dir, os.path.basename(get_gmap_process_starter())))
command = 'chmod u+x {0}/{1}'.format(current_run_dir, os.path.basename(get_gmap_process_starter()))
(OK, stdout, stderr) = xssh.execute_cluster_command(ssh_client, command)
if OK:
log.write('The run permision is set.\n')
else:
log.write('*** ERROR: Wrong command ---> {0}\n'.format(command))
# submit the GMAP process
if OK:
log.write('{0}\n'.format(xlib.get_separator()))
log.write('Submitting the process script {0}/{1} ...\n'.format(current_run_dir, os.path.basename(get_gmap_process_starter())))
sge_env = xcluster.get_sge_env()
command = '{0}; qsub -V -b n -cwd {1}/{2}'.format(sge_env, current_run_dir, os.path.basename(get_gmap_process_starter()))
(OK, stdout, stderr) = xssh.execute_cluster_command(ssh_client, command)
if OK:
for line in stdout:
log.write('{0}\n'.format(line))
else:
log.write('*** ERROR: Wrong command ---> {0}\n'.format(command))
# close the SSH transport connection
if OK:
log.write('{0}\n'.format(xlib.get_separator()))
log.write('Closing the SSH transport connection ...\n')
xssh.close_ssh_transport_connection(ssh_transport)
log.write('The connection is closed.\n')
# close the SSH client connection
if OK:
log.write('{0}\n'.format(xlib.get_separator()))
log.write('Closing the SSH client connection ...\n')
xssh.close_ssh_client_connection(ssh_client)
log.write('The connection is closed.\n')
# warn that the log window can be closed
if not isinstance(log, xlib.DevStdOut):
log.write('{0}\n'.format(xlib.get_separator()))
log.write('You can close this window now.\n')
# execute final function
if function is not None:
function()
# return the control variable
return OK
#-------------------------------------------------------------------------------
def validate_gmap_config_file(strict):
'''
Validate the GMAP config file of a run.
'''
# initialize the control variable and the error list
OK = True
error_list = []
# intitialize variable used when value is not found
not_found = '***NOTFOUND***'.upper()
# get the option dictionary
try:
gmap_option_dict = xlib.get_option_dict(get_gmap_config_file())
except:
error_list.append('*** ERROR: The syntax is WRONG.')
OK = False
else:
# get the sections list
sections_list = []
for section in gmap_option_dict.keys():
sections_list.append(section)
sections_list.sort()
# check section "identification"
if 'identification' not in sections_list:
error_list.append('*** ERROR: the section "identification" is not found.')
OK = False
else:
# check section "identification" - key "experiment_id"
experiment_id = gmap_option_dict.get('identification', {}).get('experiment_id', not_found)
is_experiment_id_OK = True
if experiment_id == not_found:
error_list.append('*** ERROR: the key "experiment_id" is not found in the section "identification".')
is_experiment_id_OK = False
OK = False
# check section "identification" - key "reference_dataset_id"
reference_dataset_id = gmap_option_dict.get('identification', {}).get('reference_dataset_id', not_found)
is_reference_dataset_id_OK = True
if reference_dataset_id == not_found:
error_list.append('*** ERROR: the key "reference_dataset_id" is not found in the section "identification".')
is_reference_dataset_id_OK = False
OK = False
# check section "identification" - key "reference_file"
reference_file = gmap_option_dict.get('identification', {}).get('reference_file', not_found)
is_reference_file_OK = True
if reference_file == not_found:
error_list.append('*** ERROR: the key "reference_file" is not found in the section "identification".')
is_reference_file_OK = False
OK = False
# check section "identification" - key "assembly_software"
assembly_software = gmap_option_dict.get('identification', {}).get('assembly_software', not_found)
is_assembly_software_OK = True
if assembly_software == not_found:
error_list.append('*** ERROR: the key "assembly_software" is not found in the section "identification".')
is_assembly_software_OK = False
OK = False
elif assembly_software not in [xlib.get_soapdenovotrans_code(), xlib.get_transabyss_code(), xlib.get_trinity_code(), xlib.get_star_code(), xlib.get_cd_hit_est_code(), xlib.get_transcript_filter_code()]:
error_list.append('*** ERROR: the key "assembly_software" value in the section "identification" must be {0} or {1} or {2} or {3} or {4} OR {5}.'.format(xlib.get_soapdenovotrans_code(), xlib.get_transabyss_code(), xlib.get_trinity_code(), xlib.get_star_code(), xlib.get_cd_hit_est_code(), xlib.get_transcript_filter_code()))
is_assembly_software_OK = False
OK = False
# check section "identification" - key "assembly_dataset_id"
assembly_dataset_id = gmap_option_dict.get('identification', {}).get('assembly_dataset_id', not_found)
is_assembly_dataset_id_OK = True
if assembly_dataset_id == not_found:
error_list.append('*** ERROR: the key "assembly_dataset_id" is not found in the section "identification".')
is_assembly_dataset_id_OK = False
OK = False
elif not assembly_dataset_id.startswith(xlib.get_soapdenovotrans_code()) and not assembly_dataset_id.startswith(xlib.get_transabyss_code()) and not assembly_dataset_id.startswith(xlib.get_trinity_code()) and not assembly_dataset_id.startswith(xlib.get_star_code()) and not assembly_dataset_id.startswith(xlib.get_cd_hit_est_code()) and not assembly_dataset_id.startswith(xlib.get_transcript_filter_code()):
error_list.append('*** ERROR: the key "assembly_dataset_id" value is not a {0} nor {1} nor {2} nor {3} nor {4} nor {5} assembly.'.format(xlib.get_soapdenovotrans_name(), xlib.get_transabyss_name(), xlib.get_trinity_name(), xlib.get_star_name(), xlib.get_cd_hit_est_name(), xlib.get_transcript_filter_code()))
is_assembly_dataset_id_OK = False
OK = False
# check section "identification" - key "assembly_type"
assembly_type = gmap_option_dict.get('identification', {}).get('assembly_type', not_found)
is_assembly_type_OK = True
if assembly_type == not_found:
error_list.append('*** ERROR: the key "assembly_type" is not found in the section "identification".')
is_assembly_type_OK = False
OK = False
elif assembly_dataset_id.startswith(xlib.get_soapdenovotrans_code()):
if assembly_type.upper() not in ['CONTIGS', 'SCAFFOLDS']:
error_list.append('*** ERROR: the key "assembly_type" must be "CONTIGS" or "SCAFFOLDS" when {0} is the assembly software.'.format(xlib.get_soapdenovotrans_name()))
is_assembly_type_OK = False
OK = False
elif assembly_dataset_id.startswith(xlib.get_transabyss_code()) or assembly_dataset_id.startswith(xlib.get_trinity_code()) or assembly_dataset_id.startswith(xlib.get_star_code()) or assembly_dataset_id.startswith(xlib.get_cd_hit_est_code()) or assembly_dataset_id.startswith(xlib.get_transcript_filter_code()):
if assembly_type.upper() != 'NONE':
error_list.append('*** ERROR: the key "assembly_type" must be "NONE" when {0} or {1} or {2} or {3} or {4} is the assembly software.'.format(xlib.get_transabyss_name(), xlib.get_trinity_name(), xlib.get_star_name(), xlib.get_cd_hit_est_name(), xlib.get_transcript_filter_name()))
is_assembly_type_OK = False
OK = False
# check section "GMAP parameters"
if 'GMAP parameters' not in sections_list:
error_list.append('*** ERROR: the section "GMAP parameters" is not found.')
OK = False
else:
# check section "GMAP parameters" - key "threads"
threads = gmap_option_dict.get('GMAP parameters', {}).get('threads', not_found)
is_threads_OK = True
if threads == not_found:
error_list.append('*** ERROR: the key "threads" is not found in the section "GMAP parameters".')
is_threads_OK = False
OK = False
else:
try:
if int(threads) < 1:
error_list.append('*** ERROR: the key "threads" in the section "GMAP parameters" must be an integer value greater or equal to 1.')
is_threads_OK = False
OK = False
except:
error_list.append('*** ERROR: the key "threads" in the section "GMAP parameters" must be an integer value greater or equal to 1.')
is_threads_OK = False
OK = False
# check section "GMAP parameters" - key "kmer"
kmer = gmap_option_dict.get('GMAP parameters', {}).get('kmer', not_found)
is_kmer_OK = True
if kmer == not_found:
error_list.append('*** ERROR: the key "kmer" is not found in the section "GMAP parameters".')
is_kmer_OK = False
OK = False
else:
try:
if kmer.upper() != 'NONE' and (int(kmer) < 1 or int(kmer) > 16):
error_list.append('*** ERROR: the key "kmer" in the section "GMAP parameters" must be an integer value between 1 and 16 or NONE.')
is_kmer_OK = False
OK = False
except:
error_list.append('*** ERROR: the key "kmer" in the section "GMAP parameters" must be an integer value between 1 and 16 or NONE.')
is_kmer_OK = False
OK = False
# check section "GMAP parameters" - key "sampling"
sampling = gmap_option_dict.get('GMAP parameters', {}).get('sampling', not_found)
is_sampling_OK = True
if sampling == not_found:
error_list.append('*** ERROR: the key "sampling" is not found in the section "GMAP parameters".')
is_sampling_OK = False
OK = False
else:
try:
if sampling.upper() != 'NONE' and int(sampling) < 1:
error_list.append('*** ERROR: the key "sampling" in the section "GMAP parameters" must be an integer value greater or equal to 1 or NONE.')
is_sampling_OK = False
OK = False
except:
error_list.append('*** ERROR: the key "sampling" in the section "GMAP parameters" must be an integer value greater or equal to 1 or NONE.')
is_sampling_OK = False
OK = False
# check section "GMAP parameters" - key "input-buffer-size"
input_buffer_size = gmap_option_dict.get('GMAP parameters', {}).get('input-buffer-size', not_found)
is_input_buffer_size_OK = True
if input_buffer_size == not_found:
error_list.append('*** ERROR: the key "input-buffer-size" is not found in the section "GMAP parameters".')
is_input_buffer_size_OK = False
OK = False
else:
try:
if int(input_buffer_size) < 1:
error_list.append('*** ERROR: the key "input-buffer-size" in the section "GMAP parameters" must be an integer value greater or equal to 1.')
is_input_buffer_size_OK = False
OK = False
except:
error_list.append('*** ERROR: the key "input-buffer-size" in the section "GMAP parameters" must be an integer value greater or equal to 1.')
is_input_buffer_size_OK = False
OK = False
# check section "GMAP parameters" - key "output-buffer-size"
output_buffer_size = gmap_option_dict.get('GMAP parameters', {}).get('output-buffer-size', not_found)
is_output_buffer_size_OK = True
if output_buffer_size == not_found:
error_list.append('*** ERROR: the key "output-buffer-size" is not found in the section "GMAP parameters".')
is_output_buffer_size_OK = False
OK = False
else:
try:
if int(output_buffer_size) < 1:
error_list.append('*** ERROR: the key "output-buffer-size" in the section "GMAP parameters" must be an integer value greater or equal to 1.')
is_output_buffer_size_OK = False
OK = False
except:
error_list.append('*** ERROR: the key "output-buffer-size" in the section "GMAP parameters" must be an integer value greater or equal to 1.')
is_output_buffer_size_OK = False
OK = False
# check section "GMAP parameters" - key "prunelevel"
prunelevel = gmap_option_dict.get('GMAP parameters', {}).get('prunelevel', not_found)
is_prunelevel_OK = True
if prunelevel == not_found:
error_list.append('*** ERROR: the key "prunelevel" is not found in the section "GMAP parameters".')
is_prunelevel_OK = False
OK = False
else:
if prunelevel not in ['0', '1', '2', '3']:
error_list.append('*** ERROR: the key "prunelevel" in the section "GMAP parameters" must be 0 (no pruning) or 1 (poor seqs) or 2 (repetitive seqs) or 3 (poor and repetitive).')
is_prunelevel_OK = False
OK = False
# check section "GMAP parameters" - key "format"
format = gmap_option_dict.get('GMAP parameters', {}).get('format', not_found)
is_format_OK = True
if format == not_found:
error_list.append('*** ERROR: the key "format" is not found in the section "GMAP parameters".')
is_format_OK = False
OK = False
else:
if format.upper() not in ['COMPRESS', 'SUMMARY', 'ALIGN', 'PLS', 'GFF3_GENE', 'SPLICESITES', 'INTRONS', 'MAP_EXONS', 'MAP_RANGES', 'COORDS']:
error_list.append('*** ERROR: the key "format" in the section "GMAP parameters" must be COMPRESS or SUMMARY or ALIGN or PLS or GFF3_GENE or SPLICESITES or INTRONS or MAP_EXONS or MAP_RANGES or COORDS.')
is_format_OK = False
OK = False
# check section "GMAP parameters" - key "other_parameters"
not_allowed_parameters_list = ['nthreads', 'kmer', 'sampling', 'input-buffer-size', 'output-buffer-size', 'prunelevel', 'compress', 'summary', 'align', 'format' ]
other_parameters = gmap_option_dict.get('GMAP parameters', {}).get('other_parameters', not_found)
is_other_parameters_OK = True
if other_parameters == not_found:
error_list.append('*** ERROR: the key "other_parameters" is not found in the section "GMAP parameters".')
is_other_parameters_OK = False
OK = False
else:
if other_parameters.upper() != 'NONE':
parameter_list = [x.strip() for x in other_parameters.split(';')]
for parameter in parameter_list:
try:
if parameter.find('=') > 0:
pattern = r'^--(.+)=(.+)$'
mo = re.search(pattern, parameter)
parameter_name = mo.group(1).strip()
parameter_value = mo.group(2).strip()
else:
pattern = r'^--(.+)$'
mo = re.search(pattern, parameter)
parameter_name = mo.group(1).strip()
except:
error_list.append('*** ERROR: the value of the key "other_parameters" in the section "GMAP parameters" must be NONE or a valid parameter list.')
is_other_parameters_OK = False
OK = False
break
else:
if parameter_name in not_allowed_parameters_list:
error_list.append('*** ERROR: the parameter {0} is not allowed in the key "other_parameters" of the section "GMAP parameters" because it is controled by NGScloud.'.format(parameter_name))
is_other_parameters_OK = False
OK = False
# warn that the results config file is not valid if there are any errors
if not OK:
error_list.append('\nThe {0} config file is not valid. Please, correct this file or recreate it.'.format(xlib.get_gmap_name()))
# return the control variable and the error list
return (OK, error_list)
#-------------------------------------------------------------------------------
def build_gmap_process_script(cluster_name, current_run_dir):
'''
Build the current GMAP process script.
'''
# initialize the control variable and the error list
OK = True
error_list = []
# get the GMAP option dictionary
gmap_option_dict = xlib.get_option_dict(get_gmap_config_file())
# get the options
experiment_id = gmap_option_dict['identification']['experiment_id']
reference_dataset_id = gmap_option_dict['identification']['reference_dataset_id']
reference_file = gmap_option_dict['identification']['reference_file']
assembly_software = gmap_option_dict['identification']['assembly_software']
assembly_dataset_id = gmap_option_dict['identification']['assembly_dataset_id']
assembly_type = gmap_option_dict['identification']['assembly_type']
threads = gmap_option_dict['GMAP parameters']['threads']
kmer = gmap_option_dict['GMAP parameters']['kmer']
sampling = gmap_option_dict['GMAP parameters']['sampling']
input_buffer_size = gmap_option_dict['GMAP parameters']['input-buffer-size']
output_buffer_size = gmap_option_dict['GMAP parameters']['output-buffer-size']
prunelevel = gmap_option_dict['GMAP parameters']['prunelevel']
format = gmap_option_dict['GMAP parameters']['format']
other_parameters = gmap_option_dict['GMAP parameters']['other_parameters']
# set the cluster reference dataset directory
cluster_reference_dataset_dir = xlib.get_cluster_reference_dataset_dir(reference_dataset_id)
# set the cluster reference file
cluster_reference_file = xlib.get_cluster_reference_file(reference_dataset_id, reference_file)
# set the GMAP database name
reference_file_name, reference_file_extension = os.path.splitext(reference_file)
gmap_database = '{0}-gmap_database'.format(reference_file_name)
# set the transcriptome file path
if assembly_software == xlib.get_soapdenovotrans_code():
if assembly_type.upper() == 'CONTIGS':
transcriptome_file = '{0}/{1}-{2}.contig'.format(xlib.get_cluster_experiment_result_dataset_dir(experiment_id, assembly_dataset_id), experiment_id, assembly_dataset_id)
elif assembly_type.upper() == 'SCAFFOLDS':
transcriptome_file = '{0}/{1}-{2}.scafSeq'.format(xlib.get_cluster_experiment_result_dataset_dir(experiment_id, assembly_dataset_id), experiment_id, assembly_dataset_id)
elif assembly_software == xlib.get_transabyss_code():
transcriptome_file = '{0}/transabyss-final.fa'.format(xlib.get_cluster_experiment_result_dataset_dir(experiment_id, assembly_dataset_id))
elif assembly_software == xlib.get_trinity_code():
transcriptome_file = '{0}/Trinity.fasta'.format(xlib.get_cluster_experiment_result_dataset_dir(experiment_id, assembly_dataset_id))
elif assembly_software == xlib.get_star_code():
transcriptome_file = '{0}/Trinity-GG.fasta'.format(xlib.get_cluster_experiment_result_dataset_dir(experiment_id, assembly_dataset_id))
elif assembly_software == xlib.get_cd_hit_est_code():
transcriptome_file = '{0}/clustered-transcriptome.fasta'.format(xlib.get_cluster_experiment_result_dataset_dir(experiment_id, assembly_dataset_id))
elif assembly_software == xlib.get_transcript_filter_code():
transcriptome_file = '{0}/filtered-transcriptome.fasta'.format(xlib.get_cluster_experiment_result_dataset_dir(experiment_id, assembly_dataset_id))
# set the output file path
output_file = 'gmap_output_{0}.txt'.format(format.lower())
# get the GMAP process script name
gmap_process_script = get_gmap_process_script()
# write the GMAP process script
try:
if not os.path.exists(os.path.dirname(gmap_process_script)):
os.makedirs(os.path.dirname(gmap_process_script))
with open(gmap_process_script, mode='w', encoding='utf8', newline='\n') as file_id:
file_id.write('{0}\n'.format('#!/bin/bash'))
file_id.write('{0}\n'.format('#-------------------------------------------------------------------------------'))
file_id.write('{0}\n'.format('GMAP_GSNAP_PATH={0}/{1}/envs/{2}/bin'.format(xlib.get_cluster_app_dir(), xlib.get_miniconda3_name(), xlib.get_gmap_gsnap_bioconda_code())))
file_id.write('{0}\n'.format('PATH=$GMAP_GSNAP_PATH:$PATH'))
file_id.write('{0}\n'.format('SEP="#########################################"'))
file_id.write('{0}\n'.format('cd {0}/{1}/bin'.format(xlib.get_cluster_app_dir(), xlib.get_miniconda3_name())))
file_id.write('{0}\n'.format('source activate {0}'.format(xlib.get_gmap_gsnap_bioconda_code())))
file_id.write('{0}\n'.format('#-------------------------------------------------------------------------------'))
file_id.write('{0}\n'.format('function init'))
file_id.write('{0}\n'.format('{'))
file_id.write('{0}\n'.format(' INIT_DATETIME=`date --utc +%s`'))
file_id.write('{0}\n'.format(' FORMATTED_INIT_DATETIME=`date --date="@$INIT_DATETIME" "+%Y-%m-%d %H:%M:%S"`'))
file_id.write('{0}\n'.format(' echo "$SEP"'))
file_id.write('{0}\n'.format(' echo "Script started in node $HOSTNAME of cluster {0} at $FORMATTED_INIT_DATETIME UTC."'.format(cluster_name)))
file_id.write('{0}\n'.format('}'))
file_id.write('{0}\n'.format('#-------------------------------------------------------------------------------'))
file_id.write('{0}\n'.format('function build_gmap_database'))
file_id.write('{0}\n'.format('{'))
file_id.write('{0}\n'.format(' cd {0}'.format(current_run_dir)))
file_id.write('{0}\n'.format(' echo "$SEP"'))
file_id.write('{0}\n'.format(' /usr/bin/time \\'))
file_id.write('{0}\n'.format(' --format="$SEP\\nElapsed real time (s): %e\\nCPU time in kernel mode (s): %S\\nCPU time in user mode (s): %U\\nPercentage of CPU: %P\\nMaximum resident set size(Kb): %M\\nAverage total memory use (Kb):%K" \\'))
file_id.write('{0}\n'.format(' gmap_build \\'))
file_id.write('{0}\n'.format(' --dir={0}\\'.format(cluster_reference_dataset_dir)))
file_id.write('{0}\n'.format(' --db={0}\\'.format(gmap_database)))
if kmer.upper() != 'NONE':
file_id.write('{0}\n'.format(' --kmer={0} \\'.format(kmer)))
file_id.write('{0}\n'.format(' {0}'.format(cluster_reference_file)))
file_id.write('{0}\n'.format('}'))
file_id.write('{0}\n'.format('#-------------------------------------------------------------------------------'))
file_id.write('{0}\n'.format('function run_gmap_process'))
file_id.write('{0}\n'.format('{'))
file_id.write('{0}\n'.format(' cd {0}'.format(current_run_dir)))
file_id.write('{0}\n'.format(' echo "$SEP"'))
file_id.write('{0}\n'.format(' gmap --version'))
file_id.write('{0}\n'.format(' echo "$SEP"'))
file_id.write('{0}\n'.format(' /usr/bin/time \\'))
file_id.write('{0}\n'.format(' --format="$SEP\\nElapsed real time (s): %e\\nCPU time in kernel mode (s): %S\\nCPU time in user mode (s): %U\\nPercentage of CPU: %P\\nMaximum resident set size(Kb): %M\\nAverage total memory use (Kb):%K" \\'))
file_id.write('{0}\n'.format(' gmap \\'))
file_id.write('{0}\n'.format(' --nthreads={0} \\'.format(threads)))
file_id.write('{0}\n'.format(' --dir={0} \\'.format(cluster_reference_dataset_dir)))
file_id.write('{0}\n'.format(' --db={0} \\'.format(gmap_database)))
if kmer.upper() != 'NONE':
file_id.write('{0}\n'.format(' --kmer={0} \\'.format(kmer)))
if sampling.upper() != 'NONE':
file_id.write('{0}\n'.format(' --sampling={0} \\'.format(sampling)))
file_id.write('{0}\n'.format(' --input-buffer-size={0} \\'.format(input_buffer_size)))
file_id.write('{0}\n'.format(' --output-buffer-size={0} \\'.format(output_buffer_size)))
file_id.write('{0}\n'.format(' --prunelevel={0} \\'.format(prunelevel)))
if format.upper() == 'COMPRESS':
file_id.write('{0}\n'.format(' --compress \\'))
elif format.upper() == 'SUMMARY':
file_id.write('{0}\n'.format(' --summary \\'))
elif format.upper() == 'ALIGN':
file_id.write('{0}\n'.format(' --align \\'))
else:
file_id.write('{0}\n'.format(' --format={0} \\'.format(format.lower())))
file_id.write('{0}\n'.format(' --ordered \\'))
file_id.write('{0}\n'.format(' --nofails \\'))
if other_parameters.upper() != 'NONE':
parameter_list = [x.strip() for x in other_parameters.split(';')]
for i in range(len(parameter_list)):
if parameter_list[i].find('=') > 0:
pattern = r'^--(.+)=(.+)$'
mo = re.search(pattern, parameter_list[i])
parameter_name = mo.group(1).strip()
parameter_value = mo.group(2).strip()
file_id.write('{0}\n'.format(' --{0}={1} \\'.format(parameter_name, parameter_value)))
else:
pattern = r'^--(.+)$'
mo = re.search(pattern, parameter_list[i])
parameter_name = mo.group(1).strip()
file_id.write('{0}\n'.format(' --{0} \\'.format(parameter_name)))
file_id.write('{0}\n'.format(' {0} \\'.format(transcriptome_file)))
file_id.write('{0}\n'.format(' > {0}'.format(output_file)))
file_id.write('{0}\n'.format(' RC=$?'))
file_id.write('{0}\n'.format(' if [ $RC -ne 0 ]; then manage_error gmap $RC; fi'))
file_id.write('{0}\n'.format('}'))
file_id.write('{0}\n'.format('#-------------------------------------------------------------------------------'))
file_id.write('{0}\n'.format('function end'))
file_id.write('{0}\n'.format('{'))
file_id.write('{0}\n'.format(' END_DATETIME=`date --utc +%s`'))
file_id.write('{0}\n'.format(' FORMATTED_END_DATETIME=`date --date="@$END_DATETIME" "+%Y-%m-%d %H:%M:%S"`'))
file_id.write('{0}\n'.format(' calculate_duration'))
file_id.write('{0}\n'.format(' echo "$SEP"'))
file_id.write('{0}\n'.format(' echo "Script ended OK at $FORMATTED_END_DATETIME UTC with a run duration of $DURATION s ($FORMATTED_DURATION)."'))
file_id.write('{0}\n'.format(' echo "$SEP"'))
file_id.write('{0}\n'.format(' RECIPIENT={0}'.format(xconfiguration.get_contact_data())))
file_id.write('{0}\n'.format(' SUBJECT="{0}: {1} process"'.format(xlib.get_project_name(), xlib.get_gmap_name())))
file_id.write('{0}\n'.format(' MESSAGE="The {0} process in node $HOSTNAME of cluster {1} ended OK at $FORMATTED_END_DATETIME with a run duration of $DURATION s ($FORMATTED_DURATION). Please review its log.<br/><br/>Regards,<br/>GI Genetica, Fisiologia e Historia Forestal<br/>Dpto. Sistemas y Recursos Naturales<br/>ETSI Montes, Forestal y del Medio Natural<br/>Universidad Politecnica de Madrid<br/>https://github.com/ggfhf/"'.format(xlib.get_gmap_name(), cluster_name)))
file_id.write('{0}\n'.format(' mail --append "Content-type: text/html;" --subject "$SUBJECT" "$RECIPIENT" <<< "$MESSAGE"'))
file_id.write('{0}\n'.format(' exit 0'))
file_id.write('{0}\n'.format('}'))
file_id.write('{0}\n'.format('#-------------------------------------------------------------------------------'))
file_id.write('{0}\n'.format('function manage_error'))
file_id.write('{0}\n'.format('{'))
file_id.write('{0}\n'.format(' END_DATETIME=`date --utc +%s`'))
file_id.write('{0}\n'.format(' FORMATTED_END_DATETIME=`date --date="@$END_DATETIME" "+%Y-%m-%d %H:%M:%S"`'))
file_id.write('{0}\n'.format(' calculate_duration'))
file_id.write('{0}\n'.format(' echo "$SEP"'))
file_id.write('{0}\n'.format(' echo "ERROR: $1 returned error $2"'))
file_id.write('{0}\n'.format(' echo "Script ended WRONG at $FORMATTED_END_DATETIME UTC with a run duration of $DURATION s ($FORMATTED_DURATION)."'))
file_id.write('{0}\n'.format(' echo "$SEP"'))
file_id.write('{0}\n'.format(' RECIPIENT={0}'.format(xconfiguration.get_contact_data())))
file_id.write('{0}\n'.format(' SUBJECT="{0}: {1} process"'.format(xlib.get_project_name(), xlib.get_gmap_name())))
file_id.write('{0}\n'.format(' MESSAGE="The {0} process in node $HOSTNAME of cluster {1} ended WRONG at $FORMATTED_END_DATETIME with a run duration of $DURATION s ($FORMATTED_DURATION). Please review its log.<br/><br/>Regards,<br/>GI Genetica, Fisiologia e Historia Forestal<br/>Dpto. Sistemas y Recursos Naturales<br/>ETSI Montes, Forestal y del Medio Natural<br/>Universidad Politecnica de Madrid<br/>https://github.com/ggfhf/"'.format(xlib.get_gmap_name(), cluster_name)))
file_id.write('{0}\n'.format(' mail --append "Content-type: text/html;" --subject "$SUBJECT" "$RECIPIENT" <<< "$MESSAGE"'))
file_id.write('{0}\n'.format(' exit 3'))
file_id.write('{0}\n'.format('}'))
file_id.write('{0}\n'.format('#-------------------------------------------------------------------------------'))
file_id.write('{0}\n'.format('function calculate_duration'))
file_id.write('{0}\n'.format('{'))
file_id.write('{0}\n'.format(' DURATION=`expr $END_DATETIME - $INIT_DATETIME`'))
file_id.write('{0}\n'.format(' HH=`expr $DURATION / 3600`'))
file_id.write('{0}\n'.format(' MM=`expr $DURATION % 3600 / 60`'))
file_id.write('{0}\n'.format(' SS=`expr $DURATION % 60`'))
file_id.write('{0}\n'.format(' FORMATTED_DURATION=`printf "%03d:%02d:%02d\\n" $HH $MM $SS`'))
file_id.write('{0}\n'.format('}'))
file_id.write('{0}\n'.format('#-------------------------------------------------------------------------------'))
file_id.write('{0}\n'.format('init'))
file_id.write('{0}\n'.format('build_gmap_database'))
file_id.write('{0}\n'.format('run_gmap_process'))
file_id.write('{0}\n'.format('end'))
except:
error_list.append('*** ERROR: The file {0} can not be created'.format(gmap_process_script))
OK = False
# return the control variable and the error list
return (OK, error_list)
#-------------------------------------------------------------------------------
def build_gmap_process_starter(current_run_dir):
'''
Build the starter of the current GMAP process.
'''
# initialize the control variable and the error list
OK = True
error_list = []
# write the GMAP process starter
try:
if not os.path.exists(os.path.dirname(get_gmap_process_starter())):
os.makedirs(os.path.dirname(get_gmap_process_starter()))
with open(get_gmap_process_starter(), mode='w', encoding='utf8', newline='\n') as file_id:
file_id.write('{0}\n'.format('#!/bin/bash'))
file_id.write('{0}\n'.format('#-------------------------------------------------------------------------------'))
file_id.write('{0}\n'.format('{0}/{1} &>{0}/{2}'.format(current_run_dir, os.path.basename(get_gmap_process_script()), xlib.get_cluster_log_file())))
except:
error_list.append('*** ERROR: The file {0} can not be created'.format(get_gmap_process_starter()))
OK = False
# return the control variable and the error list
return (OK, error_list)
#-------------------------------------------------------------------------------
def get_gmap_config_file():
'''
Get the GMAP config file path.
'''
# assign the GMAP config file path
gmap_config_file = '{0}/{1}-config.txt'.format(xlib.get_config_dir(), xlib.get_gmap_code())
# return the GMAP config file path
return gmap_config_file
#-------------------------------------------------------------------------------
def get_gmap_process_script():
'''
Get the GMAP process script path in the local computer.
'''
# assign the GMAP script path
gmap_process_script = '{0}/{1}-process.sh'.format(xlib.get_temp_dir(), xlib.get_gmap_code())
# return the GMAP script path
return gmap_process_script
#-------------------------------------------------------------------------------
def get_gmap_process_starter():
'''
Get the GMAP process starter path in the local computer.
'''
# assign the GMAP process starter path
gmap_process_starter = '{0}/{1}-process-starter.sh'.format(xlib.get_temp_dir(), xlib.get_gmap_code())
# return the GMAP starter path
return gmap_process_starter
#-------------------------------------------------------------------------------
if __name__ == '__main__':
print('This file contains functions related to the GMAP-GSNAP process used in both console mode and gui mode.')
sys.exit(0)
#-------------------------------------------------------------------------------
| GGFHF/NGScloud | Package/xgmap.py | Python | gpl-3.0 | 50,068 |
"""
Helper functions for creating Form classes from Django models
and database field objects.
"""
from __future__ import unicode_literals
from collections import OrderedDict
import warnings
from django.core.exceptions import ValidationError, NON_FIELD_ERRORS, FieldError
from django.forms.fields import Field, ChoiceField
from django.forms.forms import DeclarativeFieldsMetaclass, BaseForm
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.utils import ErrorList
from django.forms.widgets import (SelectMultiple, HiddenInput,
MultipleHiddenInput, CheckboxSelectMultiple)
from django.utils.encoding import smart_text, force_text
from django.utils import six
from django.utils.text import get_text_list, capfirst
from django.utils.translation import ugettext_lazy as _, ugettext, string_concat
__all__ = (
'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model',
'save_instance', 'ModelChoiceField', 'ModelMultipleChoiceField',
'ALL_FIELDS', 'BaseModelFormSet', 'modelformset_factory',
'BaseInlineFormSet', 'inlineformset_factory',
)
ALL_FIELDS = '__all__'
def construct_instance(form, instance, fields=None, exclude=None):
"""
Constructs and returns a model instance from the bound ``form``'s
``cleaned_data``, but does not save the returned instance to the
database.
"""
from django.db import models
opts = instance._meta
cleaned_data = form.cleaned_data
file_field_list = []
for f in opts.fields:
if not f.editable or isinstance(f, models.AutoField) \
or not f.name in cleaned_data:
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
# Defer saving file-type fields until after the other fields, so a
# callable upload_to can use the values from other fields.
if isinstance(f, models.FileField):
file_field_list.append(f)
else:
f.save_form_data(instance, cleaned_data[f.name])
for f in file_field_list:
f.save_form_data(instance, cleaned_data[f.name])
return instance
def save_instance(form, instance, fields=None, fail_message='saved',
commit=True, exclude=None, construct=True):
"""
Saves bound Form ``form``'s cleaned_data into model instance ``instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
If construct=False, assume ``instance`` has already been constructed and
just needs to be saved.
"""
if construct:
instance = construct_instance(form, instance, fields, exclude)
opts = instance._meta
if form.errors:
raise ValueError("The %s could not be %s because the data didn't"
" validate." % (opts.object_name, fail_message))
# Wrap up the saving of m2m data as a function.
def save_m2m():
cleaned_data = form.cleaned_data
# Note that for historical reasons we want to include also
# virtual_fields here. (GenericRelation was previously a fake
# m2m field).
for f in opts.many_to_many + opts.virtual_fields:
if not hasattr(f, 'save_form_data'):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if f.name in cleaned_data:
f.save_form_data(instance, cleaned_data[f.name])
if commit:
# If we are committing, save the instance and the m2m data immediately.
instance.save()
save_m2m()
else:
# We're not committing. Add a method to the form to allow deferred
# saving of m2m data.
form.save_m2m = save_m2m
return instance
# ModelForms #################################################################
def model_to_dict(instance, fields=None, exclude=None):
"""
Returns a dict containing the data in ``instance`` suitable for passing as
a Form's ``initial`` keyword argument.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned dict.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned dict, even if they are listed in
the ``fields`` argument.
"""
# avoid a circular import
from django.db.models.fields.related import ManyToManyField
opts = instance._meta
data = {}
for f in opts.concrete_fields + opts.virtual_fields + opts.many_to_many:
if not getattr(f, 'editable', False):
continue
if fields and not f.name in fields:
continue
if exclude and f.name in exclude:
continue
if isinstance(f, ManyToManyField):
# If the object doesn't have a primary key yet, just use an empty
# list for its m2m fields. Calling f.value_from_object will raise
# an exception.
if instance.pk is None:
data[f.name] = []
else:
# MultipleChoiceWidget needs a list of pks, not object instances.
qs = f.value_from_object(instance)
if qs._result_cache is not None:
data[f.name] = [item.pk for item in qs]
else:
data[f.name] = list(qs.values_list('pk', flat=True))
else:
data[f.name] = f.value_from_object(instance)
return data
def fields_for_model(model, fields=None, exclude=None, widgets=None,
formfield_callback=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None):
"""
Returns a ``OrderedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be localized.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
"""
field_list = []
ignored = []
opts = model._meta
# Avoid circular import
from django.db.models.fields import Field as ModelField
sortable_virtual_fields = [f for f in opts.virtual_fields
if isinstance(f, ModelField)]
for f in sorted(opts.concrete_fields + sortable_virtual_fields + opts.many_to_many):
if not getattr(f, 'editable', False):
continue
if fields is not None and not f.name in fields:
continue
if exclude and f.name in exclude:
continue
kwargs = {}
if widgets and f.name in widgets:
kwargs['widget'] = widgets[f.name]
if localized_fields == ALL_FIELDS or (localized_fields and f.name in localized_fields):
kwargs['localize'] = True
if labels and f.name in labels:
kwargs['label'] = labels[f.name]
if help_texts and f.name in help_texts:
kwargs['help_text'] = help_texts[f.name]
if error_messages and f.name in error_messages:
kwargs['error_messages'] = error_messages[f.name]
if formfield_callback is None:
formfield = f.formfield(**kwargs)
elif not callable(formfield_callback):
raise TypeError('formfield_callback must be a function or callable')
else:
formfield = formfield_callback(f, **kwargs)
if formfield:
field_list.append((f.name, formfield))
else:
ignored.append(f.name)
field_dict = OrderedDict(field_list)
if fields:
field_dict = OrderedDict(
[(f, field_dict.get(f)) for f in fields
if ((not exclude) or (exclude and f not in exclude)) and (f not in ignored)]
)
return field_dict
class ModelFormOptions(object):
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.widgets = getattr(options, 'widgets', None)
self.localized_fields = getattr(options, 'localized_fields', None)
self.labels = getattr(options, 'labels', None)
self.help_texts = getattr(options, 'help_texts', None)
self.error_messages = getattr(options, 'error_messages', None)
class ModelFormMetaclass(DeclarativeFieldsMetaclass):
def __new__(mcs, name, bases, attrs):
formfield_callback = attrs.pop('formfield_callback', None)
new_class = super(ModelFormMetaclass, mcs).__new__(mcs, name, bases, attrs)
if bases == (BaseModelForm,):
return new_class
opts = new_class._meta = ModelFormOptions(getattr(new_class, 'Meta', None))
# We check if a string was passed to `fields` or `exclude`,
# which is likely to be a mistake where the user typed ('foo') instead
# of ('foo',)
for opt in ['fields', 'exclude', 'localized_fields']:
value = getattr(opts, opt)
if isinstance(value, six.string_types) and value != ALL_FIELDS:
msg = ("%(model)s.Meta.%(opt)s cannot be a string. "
"Did you mean to type: ('%(value)s',)?" % {
'model': new_class.__name__,
'opt': opt,
'value': value,
})
raise TypeError(msg)
if opts.model:
# If a model is defined, extract form fields from it.
if opts.fields is None and opts.exclude is None:
# This should be some kind of assertion error once deprecation
# cycle is complete.
warnings.warn("Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is deprecated - form %s "
"needs updating" % name,
DeprecationWarning, stacklevel=2)
if opts.fields == ALL_FIELDS:
# Sentinel for fields_for_model to indicate "get the list of
# fields from the model"
opts.fields = None
fields = fields_for_model(opts.model, opts.fields, opts.exclude,
opts.widgets, formfield_callback,
opts.localized_fields, opts.labels,
opts.help_texts, opts.error_messages)
# make sure opts.fields doesn't specify an invalid field
none_model_fields = [k for k, v in six.iteritems(fields) if not v]
missing_fields = (set(none_model_fields) -
set(new_class.declared_fields.keys()))
if missing_fields:
message = 'Unknown field(s) (%s) specified for %s'
message = message % (', '.join(missing_fields),
opts.model.__name__)
raise FieldError(message)
# Override default model fields with any custom declared ones
# (plus, include all the other declared fields).
fields.update(new_class.declared_fields)
else:
fields = new_class.declared_fields
new_class.base_fields = fields
return new_class
class BaseModelForm(BaseForm):
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=None,
empty_permitted=False, instance=None):
opts = self._meta
if opts.model is None:
raise ValueError('ModelForm has no model class specified.')
if instance is None:
# if we didn't get an instance, instantiate a new one
self.instance = opts.model()
object_data = {}
else:
self.instance = instance
object_data = model_to_dict(instance, opts.fields, opts.exclude)
# if initial was provided, it should override the values from instance
if initial is not None:
object_data.update(initial)
# self._validate_unique will be set to True by BaseModelForm.clean().
# It is False by default so overriding self.clean() and failing to call
# super will stop validate_unique from being called.
self._validate_unique = False
super(BaseModelForm, self).__init__(data, files, auto_id, prefix, object_data,
error_class, label_suffix, empty_permitted)
# Apply ``limit_choices_to`` to each field.
for field_name in self.fields:
formfield = self.fields[field_name]
if hasattr(formfield, 'queryset'):
limit_choices_to = formfield.limit_choices_to
if limit_choices_to is not None:
if callable(limit_choices_to):
limit_choices_to = limit_choices_to()
formfield.queryset = formfield.queryset.complex_filter(limit_choices_to)
def _get_validation_exclusions(self):
"""
For backwards-compatibility, several types of fields need to be
excluded from model validation. See the following tickets for
details: #12507, #12521, #12553
"""
exclude = []
# Build up a list of fields that should be excluded from model field
# validation and unique checks.
for f in self.instance._meta.fields:
field = f.name
# Exclude fields that aren't on the form. The developer may be
# adding these values to the model after form validation.
if field not in self.fields:
exclude.append(f.name)
# Don't perform model validation on fields that were defined
# manually on the form and excluded via the ModelForm's Meta
# class. See #12901.
elif self._meta.fields and field not in self._meta.fields:
exclude.append(f.name)
elif self._meta.exclude and field in self._meta.exclude:
exclude.append(f.name)
# Exclude fields that failed form validation. There's no need for
# the model fields to validate them as well.
elif field in self._errors.keys():
exclude.append(f.name)
# Exclude empty fields that are not required by the form, if the
# underlying model field is required. This keeps the model field
# from raising a required error. Note: don't exclude the field from
# validation if the model field allows blanks. If it does, the blank
# value may be included in a unique check, so cannot be excluded
# from validation.
else:
form_field = self.fields[field]
field_value = self.cleaned_data.get(field, None)
if not f.blank and not form_field.required and field_value in form_field.empty_values:
exclude.append(f.name)
return exclude
def clean(self):
self._validate_unique = True
return self.cleaned_data
def _update_errors(self, errors):
# Override any validation error messages defined at the model level
# with those defined at the form level.
opts = self._meta
for field, messages in errors.error_dict.items():
if (field == NON_FIELD_ERRORS and opts.error_messages and
NON_FIELD_ERRORS in opts.error_messages):
error_messages = opts.error_messages[NON_FIELD_ERRORS]
elif field in self.fields:
error_messages = self.fields[field].error_messages
else:
continue
for message in messages:
if (isinstance(message, ValidationError) and
message.code in error_messages):
message.message = error_messages[message.code]
self.add_error(None, errors)
def _post_clean(self):
opts = self._meta
# Update the model instance with self.cleaned_data.
self.instance = construct_instance(self, self.instance, opts.fields, opts.exclude)
exclude = self._get_validation_exclusions()
# Foreign Keys being used to represent inline relationships
# are excluded from basic field value validation. This is for two
# reasons: firstly, the value may not be supplied (#12507; the
# case of providing new values to the admin); secondly the
# object being referred to may not yet fully exist (#12749).
# However, these fields *must* be included in uniqueness checks,
# so this can't be part of _get_validation_exclusions().
for name, field in self.fields.items():
if isinstance(field, InlineForeignKeyField):
exclude.append(name)
try:
self.instance.full_clean(exclude=exclude, validate_unique=False)
except ValidationError as e:
self._update_errors(e)
# Validate uniqueness if needed.
if self._validate_unique:
self.validate_unique()
def validate_unique(self):
"""
Calls the instance's validate_unique() method and updates the form's
validation errors if any were raised.
"""
exclude = self._get_validation_exclusions()
try:
self.instance.validate_unique(exclude=exclude)
except ValidationError as e:
self._update_errors(e)
def save(self, commit=True):
"""
Saves this ``form``'s cleaned_data into model instance
``self.instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
"""
if self.instance.pk is None:
fail_message = 'created'
else:
fail_message = 'changed'
return save_instance(self, self.instance, self._meta.fields,
fail_message, commit, self._meta.exclude,
construct=False)
save.alters_data = True
class ModelForm(six.with_metaclass(ModelFormMetaclass, BaseModelForm)):
pass
def modelform_factory(model, form=ModelForm, fields=None, exclude=None,
formfield_callback=None, widgets=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None):
"""
Returns a ModelForm containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields. If omitted or '__all__',
all fields will be used.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be localized.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
"""
# Create the inner Meta class. FIXME: ideally, we should be able to
# construct a ModelForm without creating and passing in a temporary
# inner class.
# Build up a list of attributes that the Meta object will have.
attrs = {'model': model}
if fields is not None:
attrs['fields'] = fields
if exclude is not None:
attrs['exclude'] = exclude
if widgets is not None:
attrs['widgets'] = widgets
if localized_fields is not None:
attrs['localized_fields'] = localized_fields
if labels is not None:
attrs['labels'] = labels
if help_texts is not None:
attrs['help_texts'] = help_texts
if error_messages is not None:
attrs['error_messages'] = error_messages
# If parent form class already has an inner Meta, the Meta we're
# creating needs to inherit from the parent's inner meta.
parent = (object,)
if hasattr(form, 'Meta'):
parent = (form.Meta, object)
Meta = type(str('Meta'), parent, attrs)
# Give this new form class a reasonable name.
class_name = model.__name__ + str('Form')
# Class attributes for the new form class.
form_class_attrs = {
'Meta': Meta,
'formfield_callback': formfield_callback
}
# The ModelFormMetaclass will trigger a similar warning/error, but this will
# be difficult to debug for code that needs updating, so we produce the
# warning here too.
if (getattr(Meta, 'fields', None) is None and
getattr(Meta, 'exclude', None) is None):
warnings.warn("Calling modelform_factory without defining 'fields' or "
"'exclude' explicitly is deprecated",
DeprecationWarning, stacklevel=2)
# Instatiate type(form) in order to use the same metaclass as form.
return type(form)(class_name, (form,), form_class_attrs)
# ModelFormSets ##############################################################
class BaseModelFormSet(BaseFormSet):
"""
A ``FormSet`` for editing a queryset and/or adding new objects to it.
"""
model = None
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
queryset=None, **kwargs):
self.queryset = queryset
self.initial_extra = kwargs.pop('initial', None)
defaults = {'data': data, 'files': files, 'auto_id': auto_id, 'prefix': prefix}
defaults.update(kwargs)
super(BaseModelFormSet, self).__init__(**defaults)
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if not (self.data or self.files):
return len(self.get_queryset())
return super(BaseModelFormSet, self).initial_form_count()
def _existing_object(self, pk):
if not hasattr(self, '_object_dict'):
self._object_dict = dict((o.pk, o) for o in self.get_queryset())
return self._object_dict.get(pk)
def _get_to_python(self, field):
"""
If the field is a related field, fetch the concrete field's (that
is, the ultimate pointed-to field's) get_prep_value.
"""
while field.rel is not None:
field = field.rel.get_related_field()
return field.to_python
def _construct_form(self, i, **kwargs):
if self.is_bound and i < self.initial_form_count():
pk_key = "%s-%s" % (self.add_prefix(i), self.model._meta.pk.name)
pk = self.data[pk_key]
pk_field = self.model._meta.pk
to_python = self._get_to_python(pk_field)
pk = to_python(pk)
kwargs['instance'] = self._existing_object(pk)
if i < self.initial_form_count() and 'instance' not in kwargs:
kwargs['instance'] = self.get_queryset()[i]
if i >= self.initial_form_count() and self.initial_extra:
# Set initial values for extra forms
try:
kwargs['initial'] = self.initial_extra[i - self.initial_form_count()]
except IndexError:
pass
return super(BaseModelFormSet, self)._construct_form(i, **kwargs)
def get_queryset(self):
if not hasattr(self, '_queryset'):
if self.queryset is not None:
qs = self.queryset
else:
qs = self.model._default_manager.get_queryset()
# If the queryset isn't already ordered we need to add an
# artificial ordering here to make sure that all formsets
# constructed from this queryset have the same form order.
if not qs.ordered:
qs = qs.order_by(self.model._meta.pk.name)
# Removed queryset limiting here. As per discussion re: #13023
# on django-dev, max_num should not prevent existing
# related objects/inlines from being displayed.
self._queryset = qs
return self._queryset
def save_new(self, form, commit=True):
"""Saves and returns a new model instance for the given form."""
return form.save(commit=commit)
def save_existing(self, form, instance, commit=True):
"""Saves and returns an existing model instance for the given form."""
return form.save(commit=commit)
def save(self, commit=True):
"""Saves model instances for every form, adding and changing instances
as necessary, and returns the list of instances.
"""
if not commit:
self.saved_forms = []
def save_m2m():
for form in self.saved_forms:
form.save_m2m()
self.save_m2m = save_m2m
return self.save_existing_objects(commit) + self.save_new_objects(commit)
save.alters_data = True
def clean(self):
self.validate_unique()
def validate_unique(self):
# Collect unique_checks and date_checks to run from all the forms.
all_unique_checks = set()
all_date_checks = set()
forms_to_delete = self.deleted_forms
valid_forms = [form for form in self.forms if form.is_valid() and form not in forms_to_delete]
for form in valid_forms:
exclude = form._get_validation_exclusions()
unique_checks, date_checks = form.instance._get_unique_checks(exclude=exclude)
all_unique_checks = all_unique_checks.union(set(unique_checks))
all_date_checks = all_date_checks.union(set(date_checks))
errors = []
# Do each of the unique checks (unique and unique_together)
for uclass, unique_check in all_unique_checks:
seen_data = set()
for form in valid_forms:
# get data for each field of each of unique_check
row_data = (form.cleaned_data[field]
for field in unique_check if field in form.cleaned_data)
# Reduce Model instances to their primary key values
row_data = tuple(d._get_pk_val() if hasattr(d, '_get_pk_val') else d
for d in row_data)
if row_data and not None in row_data:
# if we've already seen it then we have a uniqueness failure
if row_data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_unique_error_message(unique_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
for field in unique_check:
if field in form.cleaned_data:
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(row_data)
# iterate over each of the date checks now
for date_check in all_date_checks:
seen_data = set()
uclass, lookup, field, unique_for = date_check
for form in valid_forms:
# see if we have data for both fields
if (form.cleaned_data and form.cleaned_data[field] is not None
and form.cleaned_data[unique_for] is not None):
# if it's a date lookup we need to get the data for all the fields
if lookup == 'date':
date = form.cleaned_data[unique_for]
date_data = (date.year, date.month, date.day)
# otherwise it's just the attribute on the date/datetime
# object
else:
date_data = (getattr(form.cleaned_data[unique_for], lookup),)
data = (form.cleaned_data[field],) + date_data
# if we've already seen it then we have a uniqueness failure
if data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_date_error_message(date_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(data)
if errors:
raise ValidationError(errors)
def get_unique_error_message(self, unique_check):
if len(unique_check) == 1:
return ugettext("Please correct the duplicate data for %(field)s.") % {
"field": unique_check[0],
}
else:
return ugettext("Please correct the duplicate data for %(field)s, "
"which must be unique.") % {
"field": get_text_list(unique_check, six.text_type(_("and"))),
}
def get_date_error_message(self, date_check):
return ugettext("Please correct the duplicate data for %(field_name)s "
"which must be unique for the %(lookup)s in %(date_field)s.") % {
'field_name': date_check[2],
'date_field': date_check[3],
'lookup': six.text_type(date_check[1]),
}
def get_form_error(self):
return ugettext("Please correct the duplicate values below.")
def save_existing_objects(self, commit=True):
self.changed_objects = []
self.deleted_objects = []
if not self.initial_forms:
return []
saved_instances = []
forms_to_delete = self.deleted_forms
for form in self.initial_forms:
obj = form.instance
if form in forms_to_delete:
# If the pk is None, it means that the object can't be
# deleted again. Possible reason for this is that the
# object was already deleted from the DB. Refs #14877.
if obj.pk is None:
continue
self.deleted_objects.append(obj)
if commit:
obj.delete()
elif form.has_changed():
self.changed_objects.append((obj, form.changed_data))
saved_instances.append(self.save_existing(form, obj, commit=commit))
if not commit:
self.saved_forms.append(form)
return saved_instances
def save_new_objects(self, commit=True):
self.new_objects = []
for form in self.extra_forms:
if not form.has_changed():
continue
# If someone has marked an add form for deletion, don't save the
# object.
if self.can_delete and self._should_delete_form(form):
continue
self.new_objects.append(self.save_new(form, commit=commit))
if not commit:
self.saved_forms.append(form)
return self.new_objects
def add_fields(self, form, index):
"""Add a hidden field for the object's primary key."""
from django.db.models import AutoField, OneToOneField, ForeignKey
self._pk_field = pk = self.model._meta.pk
# If a pk isn't editable, then it won't be on the form, so we need to
# add it here so we can tell which object is which when we get the
# data back. Generally, pk.editable should be false, but for some
# reason, auto_created pk fields and AutoField's editable attribute is
# True, so check for that as well.
def pk_is_not_editable(pk):
return ((not pk.editable) or (pk.auto_created or isinstance(pk, AutoField))
or (pk.rel and pk.rel.parent_link and pk_is_not_editable(pk.rel.to._meta.pk)))
if pk_is_not_editable(pk) or pk.name not in form.fields:
if form.is_bound:
pk_value = form.instance.pk
else:
try:
if index is not None:
pk_value = self.get_queryset()[index].pk
else:
pk_value = None
except IndexError:
pk_value = None
if isinstance(pk, OneToOneField) or isinstance(pk, ForeignKey):
qs = pk.rel.to._default_manager.get_queryset()
else:
qs = self.model._default_manager.get_queryset()
qs = qs.using(form.instance._state.db)
if form._meta.widgets:
widget = form._meta.widgets.get(self._pk_field.name, HiddenInput)
else:
widget = HiddenInput
form.fields[self._pk_field.name] = ModelChoiceField(qs, initial=pk_value, required=False, widget=widget)
super(BaseModelFormSet, self).add_fields(form, index)
def modelformset_factory(model, form=ModelForm, formfield_callback=None,
formset=BaseModelFormSet, extra=1, can_delete=False,
can_order=False, max_num=None, fields=None, exclude=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None):
"""
Returns a FormSet class for the given Django model class.
"""
# modelform_factory will produce the same warning/error, but that will be
# difficult to debug for code that needs upgrading, so we produce the
# warning here too. This logic is reproducing logic inside
# modelform_factory, but it can be removed once the deprecation cycle is
# complete, since the validation exception will produce a helpful
# stacktrace.
meta = getattr(form, 'Meta', None)
if meta is None:
meta = type(str('Meta'), (object,), {})
if (getattr(meta, 'fields', fields) is None and
getattr(meta, 'exclude', exclude) is None):
warnings.warn("Calling modelformset_factory without defining 'fields' or "
"'exclude' explicitly is deprecated",
DeprecationWarning, stacklevel=2)
form = modelform_factory(model, form=form, fields=fields, exclude=exclude,
formfield_callback=formfield_callback,
widgets=widgets, localized_fields=localized_fields,
labels=labels, help_texts=help_texts, error_messages=error_messages)
FormSet = formset_factory(form, formset, extra=extra, max_num=max_num,
can_order=can_order, can_delete=can_delete,
validate_max=validate_max)
FormSet.model = model
return FormSet
# InlineFormSets #############################################################
class BaseInlineFormSet(BaseModelFormSet):
"""A formset for child objects related to a parent."""
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None, queryset=None, **kwargs):
if instance is None:
self.instance = self.fk.rel.to()
else:
self.instance = instance
self.save_as_new = save_as_new
if queryset is None:
queryset = self.model._default_manager
if self.instance.pk is not None:
qs = queryset.filter(**{self.fk.name: self.instance})
else:
qs = queryset.none()
super(BaseInlineFormSet, self).__init__(data, files, prefix=prefix,
queryset=qs, **kwargs)
def initial_form_count(self):
if self.save_as_new:
return 0
return super(BaseInlineFormSet, self).initial_form_count()
def _construct_form(self, i, **kwargs):
form = super(BaseInlineFormSet, self)._construct_form(i, **kwargs)
if self.save_as_new:
# Remove the primary key from the form's data, we are only
# creating new instances
form.data[form.add_prefix(self._pk_field.name)] = None
# Remove the foreign key from the form's data
form.data[form.add_prefix(self.fk.name)] = None
# Set the fk value here so that the form can do its validation.
setattr(form.instance, self.fk.get_attname(), self.instance.pk)
return form
@classmethod
def get_default_prefix(cls):
from django.db.models.fields.related import RelatedObject
return RelatedObject(cls.fk.rel.to, cls.model, cls.fk).get_accessor_name().replace('+', '')
def save_new(self, form, commit=True):
# Use commit=False so we can assign the parent key afterwards, then
# save the object.
obj = form.save(commit=False)
pk_value = getattr(self.instance, self.fk.rel.field_name)
setattr(obj, self.fk.get_attname(), getattr(pk_value, 'pk', pk_value))
if commit:
obj.save()
# form.save_m2m() can be called via the formset later on if commit=False
if commit and hasattr(form, 'save_m2m'):
form.save_m2m()
return obj
def add_fields(self, form, index):
super(BaseInlineFormSet, self).add_fields(form, index)
if self._pk_field == self.fk:
name = self._pk_field.name
kwargs = {'pk_field': True}
else:
# The foreign key field might not be on the form, so we poke at the
# Model field to get the label, since we need that for error messages.
name = self.fk.name
kwargs = {
'label': getattr(form.fields.get(name), 'label', capfirst(self.fk.verbose_name))
}
if self.fk.rel.field_name != self.fk.rel.to._meta.pk.name:
kwargs['to_field'] = self.fk.rel.field_name
form.fields[name] = InlineForeignKeyField(self.instance, **kwargs)
# Add the generated field to form._meta.fields if it's defined to make
# sure validation isn't skipped on that field.
if form._meta.fields:
if isinstance(form._meta.fields, tuple):
form._meta.fields = list(form._meta.fields)
form._meta.fields.append(self.fk.name)
def get_unique_error_message(self, unique_check):
unique_check = [field for field in unique_check if field != self.fk.name]
return super(BaseInlineFormSet, self).get_unique_error_message(unique_check)
def _get_foreign_key(parent_model, model, fk_name=None, can_fail=False):
"""
Finds and returns the ForeignKey from model to parent if there is one
(returns None if can_fail is True and no such field exists). If fk_name is
provided, assume it is the name of the ForeignKey field. Unles can_fail is
True, an exception is raised if there is no ForeignKey from model to
parent_model.
"""
# avoid circular import
from django.db.models import ForeignKey
opts = model._meta
if fk_name:
fks_to_parent = [f for f in opts.fields if f.name == fk_name]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
if not isinstance(fk, ForeignKey) or \
(fk.rel.to != parent_model and
fk.rel.to not in parent_model._meta.get_parent_list()):
raise ValueError(
"fk_name '%s' is not a ForeignKey to '%s.%'."
% (fk_name, parent_model._meta.app_label, parent_model._meta.object_name))
elif len(fks_to_parent) == 0:
raise ValueError(
"'%s.%s' has no field named '%s'."
% (model._meta.app_label, model._meta.object_name, fk_name))
else:
# Try to discover what the ForeignKey from model to parent_model is
fks_to_parent = [
f for f in opts.fields
if isinstance(f, ForeignKey)
and (f.rel.to == parent_model
or f.rel.to in parent_model._meta.get_parent_list())
]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
elif len(fks_to_parent) == 0:
if can_fail:
return
raise ValueError(
"'%s.%s' has no ForeignKey to '%s.%s'."
% (model._meta.app_label, model._meta.object_name, parent_model._meta.app_label, parent_model._meta.object_name))
else:
raise ValueError(
"'%s.%s' has more than one ForeignKey to '%s.%s'."
% (model._meta.app_label, model._meta.object_name, parent_model._meta.app_label, parent_model._meta.object_name))
return fk
def inlineformset_factory(parent_model, model, form=ModelForm,
formset=BaseInlineFormSet, fk_name=None,
fields=None, exclude=None, extra=3, can_order=False,
can_delete=True, max_num=None, formfield_callback=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None):
"""
Returns an ``InlineFormSet`` for the given kwargs.
You must provide ``fk_name`` if ``model`` has more than one ``ForeignKey``
to ``parent_model``.
"""
fk = _get_foreign_key(parent_model, model, fk_name=fk_name)
# enforce a max_num=1 when the foreign key to the parent model is unique.
if fk.unique:
max_num = 1
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'max_num': max_num,
'widgets': widgets,
'validate_max': validate_max,
'localized_fields': localized_fields,
'labels': labels,
'help_texts': help_texts,
'error_messages': error_messages,
}
FormSet = modelformset_factory(model, **kwargs)
FormSet.fk = fk
return FormSet
# Fields #####################################################################
class InlineForeignKeyField(Field):
"""
A basic integer field that deals with validating the given value to a
given parent instance in an inline.
"""
widget = HiddenInput
default_error_messages = {
'invalid_choice': _('The inline foreign key did not match the parent instance primary key.'),
}
def __init__(self, parent_instance, *args, **kwargs):
self.parent_instance = parent_instance
self.pk_field = kwargs.pop("pk_field", False)
self.to_field = kwargs.pop("to_field", None)
if self.parent_instance is not None:
if self.to_field:
kwargs["initial"] = getattr(self.parent_instance, self.to_field)
else:
kwargs["initial"] = self.parent_instance.pk
kwargs["required"] = False
super(InlineForeignKeyField, self).__init__(*args, **kwargs)
def clean(self, value):
if value in self.empty_values:
if self.pk_field:
return None
# if there is no value act as we did before.
return self.parent_instance
# ensure the we compare the values as equal types.
if self.to_field:
orig = getattr(self.parent_instance, self.to_field)
else:
orig = self.parent_instance.pk
if force_text(value) != force_text(orig):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return self.parent_instance
def _has_changed(self, initial, data):
return False
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
self.queryset = field.queryset
def __iter__(self):
if self.field.empty_label is not None:
yield ("", self.field.empty_label)
if self.field.cache_choices:
if self.field.choice_cache is None:
self.field.choice_cache = [
self.choice(obj) for obj in self.queryset.all()
]
for choice in self.field.choice_cache:
yield choice
else:
for obj in self.queryset.all():
yield self.choice(obj)
def __len__(self):
return (len(self.queryset) +
(1 if self.field.empty_label is not None else 0))
def choice(self, obj):
return (self.field.prepare_value(obj), self.field.label_from_instance(obj))
class ModelChoiceField(ChoiceField):
"""A ChoiceField whose choices are a model QuerySet."""
# This class is a subclass of ChoiceField for purity, but it doesn't
# actually use any of ChoiceField's implementation.
default_error_messages = {
'invalid_choice': _('Select a valid choice. That choice is not one of'
' the available choices.'),
}
def __init__(self, queryset, empty_label="---------", cache_choices=False,
required=True, widget=None, label=None, initial=None,
help_text='', to_field_name=None, limit_choices_to=None,
*args, **kwargs):
if required and (initial is not None):
self.empty_label = None
else:
self.empty_label = empty_label
self.cache_choices = cache_choices
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
Field.__init__(self, required, widget, label, initial, help_text,
*args, **kwargs)
self.queryset = queryset
self.limit_choices_to = limit_choices_to # limit the queryset later.
self.choice_cache = None
self.to_field_name = to_field_name
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
# Need to force a new ModelChoiceIterator to be created, bug #11183
result.queryset = result.queryset
return result
def _get_queryset(self):
return self._queryset
def _set_queryset(self, queryset):
self._queryset = queryset
self.widget.choices = self.choices
queryset = property(_get_queryset, _set_queryset)
# this method will be used to create object labels by the QuerySetIterator.
# Override it to customize the label.
def label_from_instance(self, obj):
"""
This method is used to convert objects into strings; it's used to
generate the labels for the choices presented by this object. Subclasses
can override this method to customize the display of the choices.
"""
return smart_text(obj)
def _get_choices(self):
# If self._choices is set, then somebody must have manually set
# the property self.choices. In this case, just return self._choices.
if hasattr(self, '_choices'):
return self._choices
# Otherwise, execute the QuerySet in self.queryset to determine the
# choices dynamically. Return a fresh ModelChoiceIterator that has not been
# consumed. Note that we're instantiating a new ModelChoiceIterator *each*
# time _get_choices() is called (and, thus, each time self.choices is
# accessed) so that we can ensure the QuerySet has not been consumed. This
# construct might look complicated but it allows for lazy evaluation of
# the queryset.
return ModelChoiceIterator(self)
choices = property(_get_choices, ChoiceField._set_choices)
def prepare_value(self, value):
if hasattr(value, '_meta'):
if self.to_field_name:
return value.serializable_value(self.to_field_name)
else:
return value.pk
return super(ModelChoiceField, self).prepare_value(value)
def to_python(self, value):
if value in self.empty_values:
return None
try:
key = self.to_field_name or 'pk'
value = self.queryset.get(**{key: value})
except (ValueError, self.queryset.model.DoesNotExist):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return value
def validate(self, value):
return Field.validate(self, value)
def _has_changed(self, initial, data):
initial_value = initial if initial is not None else ''
data_value = data if data is not None else ''
return force_text(self.prepare_value(initial_value)) != force_text(data_value)
class ModelMultipleChoiceField(ModelChoiceField):
"""A MultipleChoiceField whose choices are a model QuerySet."""
widget = SelectMultiple
hidden_widget = MultipleHiddenInput
default_error_messages = {
'list': _('Enter a list of values.'),
'invalid_choice': _('Select a valid choice. %(value)s is not one of the'
' available choices.'),
'invalid_pk_value': _('"%(pk)s" is not a valid value for a primary key.')
}
def __init__(self, queryset, cache_choices=False, required=True,
widget=None, label=None, initial=None,
help_text='', *args, **kwargs):
super(ModelMultipleChoiceField, self).__init__(queryset, None,
cache_choices, required, widget, label, initial, help_text,
*args, **kwargs)
# Remove this in Django 1.8
if isinstance(self.widget, SelectMultiple) and not isinstance(self.widget, CheckboxSelectMultiple):
msg = _('Hold down "Control", or "Command" on a Mac, to select more than one.')
self.help_text = string_concat(self.help_text, ' ', msg)
def to_python(self, value):
if not value:
return []
to_py = super(ModelMultipleChoiceField, self).to_python
return [to_py(val) for val in value]
def clean(self, value):
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
elif not self.required and not value:
return self.queryset.none()
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'], code='list')
key = self.to_field_name or 'pk'
for pk in value:
try:
self.queryset.filter(**{key: pk})
except ValueError:
raise ValidationError(
self.error_messages['invalid_pk_value'],
code='invalid_pk_value',
params={'pk': pk},
)
qs = self.queryset.filter(**{'%s__in' % key: value})
pks = set(force_text(getattr(o, key)) for o in qs)
for val in value:
if force_text(val) not in pks:
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
# Since this overrides the inherited ModelChoiceField.clean
# we run custom validators here
self.run_validators(value)
return qs
def prepare_value(self, value):
if (hasattr(value, '__iter__') and
not isinstance(value, six.text_type) and
not hasattr(value, '_meta')):
return [super(ModelMultipleChoiceField, self).prepare_value(v) for v in value]
return super(ModelMultipleChoiceField, self).prepare_value(value)
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set(force_text(value) for value in self.prepare_value(initial))
data_set = set(force_text(value) for value in data)
return data_set != initial_set
def modelform_defines_fields(form_class):
return (form_class is not None and (
hasattr(form_class, '_meta') and
(form_class._meta.fields is not None or
form_class._meta.exclude is not None)
))
| beckastar/django | django/forms/models.py | Python | bsd-3-clause | 53,499 |
# ubuntu-drivers-common custom detect plugin for x86 CPU microcodes
#
# Author: Dimitri John Ledkov <[email protected]>
#
# This plugin detects CPU microcode packages based on pattern matching
# against the "vendor_id" line in /proc/cpuinfo.
#
# To add a new microcode family, simply insert a line into the db
# variable with the following format:
#
# '<Pattern from your cpuinfo output>': '<Name of the driver package>',
#
import logging
db = {
'GenuineIntel': 'intel-microcode',
'AuthenticAMD': 'amd64-microcode',
}
def detect(apt_cache):
try:
with open('/proc/cpuinfo') as file:
for line in file:
if line.startswith('vendor_id'):
cpu = line.split(':')[1].strip()
if cpu in db:
return [db.get(cpu)]
except IOError as err:
logging.debug('could not open /proc/cpuinfo: %s', err)
| linuxmint/ubuntu-drivers-common | detect-plugins/cpu-microcode.py | Python | gpl-2.0 | 920 |
# -*- coding: utf-8 -*-
# Copyright: (C) 2013 Canonical
#
# Authors:
# Didier Roche
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import os
from ..tools import basetestcase
class BaseUnitTestCase(basetestcase.BaseTestCase):
'''Base unit test case module, add all mocks to PATH'''
@classmethod
def setUpClass(cls):
super(BaseUnitTestCase, cls).setUpClass()
cls.addToPath(os.path.join("mocks", "offline"))
cls.addToPath(os.path.join("mocks", "online"))
def setUp(self):
super(BaseUnitTestCase, self).setUp()
self.create_temp_workdir()
os.environ['MOCK_MODE'] = "0"
os.environ['MOCK_ERROR_MODE'] = "0"
class BaseUnitTestCaseWithErrors(BaseUnitTestCase):
'''Base unit test case module, simulating errors in mock objects'''
def setUp(self):
'''Reset the error mode to 1'''
super(BaseUnitTestCaseWithErrors, self).setUp()
os.environ['MOCK_ERROR_MODE'] = "1"
@classmethod
def tearDownClass(cls):
super(BaseUnitTestCase, cls).setUpClass()
try:
os.environ.pop('MOCK_ERROR_MODE')
except:
pass
| didrocks/cupstream2distro | tests/unit/__init__.py | Python | gpl-3.0 | 1,751 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._file_services_operations import build_get_service_properties_request, build_list_request, build_set_service_properties_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class FileServicesOperations:
"""FileServicesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def list(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> "_models.FileServiceItems":
"""List all file services in storage accounts.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileServiceItems, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.FileServiceItems
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FileServiceItems"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FileServiceItems', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices'} # type: ignore
@distributed_trace_async
async def set_service_properties(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.FileServiceProperties",
**kwargs: Any
) -> "_models.FileServiceProperties":
"""Sets the properties of file services in storage accounts, including CORS (Cross-Origin Resource
Sharing) rules.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The properties of file services in storage accounts, including CORS
(Cross-Origin Resource Sharing) rules.
:type parameters: ~azure.mgmt.storage.v2021_02_01.models.FileServiceProperties
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileServiceProperties, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.FileServiceProperties
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FileServiceProperties"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'FileServiceProperties')
request = build_set_service_properties_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.set_service_properties.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FileServiceProperties', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_service_properties.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}'} # type: ignore
@distributed_trace_async
async def get_service_properties(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> "_models.FileServiceProperties":
"""Gets the properties of file services in storage accounts, including CORS (Cross-Origin Resource
Sharing) rules.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileServiceProperties, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.FileServiceProperties
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FileServiceProperties"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_service_properties_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
template_url=self.get_service_properties.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FileServiceProperties', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_service_properties.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/{FileServicesName}'} # type: ignore
| Azure/azure-sdk-for-python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2021_02_01/aio/operations/_file_services_operations.py | Python | mit | 10,108 |
"""PermMissingElem (https://codility.com/c/run/demoXVU8HJ-5VU)
Analysis:
- Space O(1) requires that there is only 1 variable for temporary storage.
- Time O(n) requires that we only iterate array A in linear time.
"""
__author__ = 'au9ustine'
def solution(A):
"""We might use sum for temporary storage"""
s = len(A)+1
for i, val in enumerate(A):
s += i+1 - val
return s
def solution1(A):
"""Or we use 'exclusive or' to build sum in F_2 field. See
https://en.wikipedia.org/wiki/Exclusive_or#Relation_to_modern_algebra"""
s = len(A)+1
for i, val in enumerate(A):
s ^= i+1 ^ val
return s
| au9ustine/org.au9ustine.puzzles.codility | lessons/lesson01_time_complexity/PermMissingElem.py | Python | mit | 650 |
# ------------------------------------------------------------------
# Copyright (c) 2020 PyInstaller Development Team.
#
# This file is distributed under the terms of the GNU General Public
# License (version 2.0 or later).
#
# The full license is available in LICENSE.GPL.txt, distributed with
# this software.
#
# SPDX-License-Identifier: GPL-2.0-or-later
# ------------------------------------------------------------------
from PyInstaller.utils.hooks import copy_metadata
datas = copy_metadata('google-cloud-pubsub')
| etherkit/OpenBeacon2 | macos/venv/lib/python3.8/site-packages/_pyinstaller_hooks_contrib/hooks/stdhooks/hook-google.cloud.pubsub_v1.py | Python | gpl-3.0 | 524 |
import flask
import flask.ext.login
import flask.ext.bcrypt
import rod
import rod.model.staff
import rod.model.schemas
auth_handler = flask.Blueprint('auth', __name__)
@auth_handler.route('/auth', methods=['GET'])
def verify():
staff = flask.ext.login.current_user
if not staff.is_authenticated:
raise rod.APIError('Not authorized', status_code=401)
return flask.jsonify(rod.model.schemas.StaffSchema().dump(staff).data)
@auth_handler.route('/auth', methods=['POST'])
def login():
staff = rod.model.db.session.query(rod.model.staff.Staff).filter_by(
email=flask.request.json['email']
).first()
if staff is None:
# User doesn't exist
# Instantiate a fake user to cycle through the whole authentication process
staff = rod.model.staff.Staff()
staff.password = flask.ext.bcrypt.generate_password_hash('any pass') # Whatever
# Authentication will fail even if the typed password matches the one above,
# due to staff check
bcrypt = flask.ext.bcrypt.Bcrypt(flask.current_app)
is_password_correct = bcrypt.check_password_hash(
staff.password,
flask.request.json['password']
)
if staff and is_password_correct:
flask.ext.login.login_user(staff)
staff_schema = rod.model.schemas.StaffSchema()
return flask.jsonify(staff_schema.dump(staff).data)
raise rod.APIError('Authorization failed', status_code=401)
@auth_handler.route('/auth', methods=['DELETE'])
def logout():
flask.ext.login.logout_user()
return ''
| sssilver/angler | rod/rod/handler/auth.py | Python | bsd-3-clause | 1,576 |
from django.conf.urls import include, url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^users/$', views.searchUsers),
]
| bitsnbytes7c8/django-site | search/urls.py | Python | mit | 155 |
# Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import re
from neutron_lib.utils import helpers
from oslo_log import log as logging
from neutron._i18n import _
from neutron.agent import rpc as agent_rpc
from neutron.plugins.ml2.drivers.mech_sriov.agent.common \
import exceptions as exc
from neutron.plugins.ml2.drivers.mech_sriov.agent import pci_lib
LOG = logging.getLogger(__name__)
IP_LINK_CAPABILITY_STATE = 'state'
IP_LINK_CAPABILITY_VLAN = 'vlan'
IP_LINK_CAPABILITY_RATE = 'max_tx_rate'
IP_LINK_CAPABILITY_MIN_TX_RATE = 'min_tx_rate'
IP_LINK_CAPABILITY_SPOOFCHK = 'spoofchk'
IP_LINK_SUB_CAPABILITY_QOS = 'qos'
class PciOsWrapper(object):
"""OS wrapper for checking virtual functions"""
DEVICE_PATH = "/sys/class/net/%s/device"
PCI_PATH = "/sys/class/net/%s/device/virtfn%s/net"
NUMVFS_PATH = "/sys/class/net/%s/device/sriov_numvfs"
VIRTFN_FORMAT = r"^virtfn(?P<vf_index>\d+)"
VIRTFN_REG_EX = re.compile(VIRTFN_FORMAT)
MAC_VTAP_PREFIX = "upper_macvtap*"
@classmethod
def scan_vf_devices(cls, dev_name):
"""Scan os directories to get VF devices
@param dev_name: pf network device name
@return: list of virtual functions
"""
vf_list = []
dev_path = cls.DEVICE_PATH % dev_name
if not os.path.isdir(dev_path):
LOG.error("Failed to get devices for %s", dev_name)
raise exc.InvalidDeviceError(dev_name=dev_name,
reason=_("Device not found"))
file_list = os.listdir(dev_path)
for file_name in file_list:
pattern_match = cls.VIRTFN_REG_EX.match(file_name)
if pattern_match:
vf_index = int(pattern_match.group("vf_index"))
file_path = os.path.join(dev_path, file_name)
if os.path.islink(file_path):
file_link = os.readlink(file_path)
pci_slot = os.path.basename(file_link)
vf_list.append((pci_slot, vf_index))
return vf_list
@classmethod
def pf_device_exists(cls, dev_name):
return os.path.isdir(cls.DEVICE_PATH % dev_name)
@classmethod
def is_assigned_vf_direct(cls, dev_name, vf_index):
"""Check if VF is assigned.
Checks if a given vf index of a given device name is assigned
as PCI passthrough by checking the relevant path in the system:
VF is assigned if:
Direct VF: PCI_PATH does not exist.
@param dev_name: pf network device name
@param vf_index: vf index
@return: True if VF is assigned, False otherwise
"""
path = cls.PCI_PATH % (dev_name, vf_index)
return not os.path.isdir(path)
@classmethod
def get_vf_macvtap_upper_devs(cls, dev_name, vf_index):
"""Retrieve VF netdev upper (macvtap) devices.
@param dev_name: pf network device name
@param vf_index: vf index
@return: list of upper net devices associated with the VF
"""
path = cls.PCI_PATH % (dev_name, vf_index)
upper_macvtap_path = os.path.join(path, "*", cls.MAC_VTAP_PREFIX)
devs = [os.path.basename(dev) for dev in glob.glob(upper_macvtap_path)]
# file name is in the format of upper_<netdev_name> extract netdev name
return [dev.split('_')[1] for dev in devs]
@classmethod
def is_assigned_vf_macvtap(cls, dev_name, vf_index):
"""Check if VF is assigned.
Checks if a given vf index of a given device name is assigned
as macvtap by checking the relevant path in the system:
Macvtap VF: upper_macvtap path exists.
@param dev_name: pf network device name
@param vf_index: vf index
@return: True if VF is assigned, False otherwise
"""
return bool(cls.get_vf_macvtap_upper_devs(dev_name, vf_index))
@classmethod
def get_numvfs(cls, dev_name):
"""Get configured number of VFs on device
@param dev_name: pf network device name
@return: integer number of VFs or -1
if sriov_numvfs file not found (device doesn't support this config)
"""
try:
with open(cls.NUMVFS_PATH % dev_name) as f:
numvfs = int(f.read())
LOG.debug("Number of VFs configured on device %s: %s",
dev_name, numvfs)
return numvfs
except IOError:
LOG.warning("Error reading sriov_numvfs file for device %s, "
"probably not supported by this device", dev_name)
return -1
class EmbSwitch(object):
"""Class to manage logical embedded switch entity.
Embedded Switch object is logical entity representing all VFs
connected to same physical network
Each physical network is mapped to PF network device interface,
meaning all its VF, excluding the devices in exclude_device list.
@ivar pci_slot_map: dictionary for mapping each pci slot to vf index
@ivar pci_dev_wrapper: pci device wrapper
"""
def __init__(self, dev_name, exclude_devices):
"""Constructor
@param dev_name: network device name
@param exclude_devices: list of pci slots to exclude
"""
self.dev_name = dev_name
self.pci_slot_map = {}
self.scanned_pci_list = []
self.pci_dev_wrapper = pci_lib.PciDeviceIPWrapper(dev_name)
self._load_devices(exclude_devices)
def _load_devices(self, exclude_devices):
"""Load devices from driver and filter if needed.
@param exclude_devices: excluded devices mapping device_name: pci slots
"""
self.scanned_pci_list = PciOsWrapper.scan_vf_devices(self.dev_name)
for pci_slot, vf_index in self.scanned_pci_list:
if pci_slot not in exclude_devices:
self.pci_slot_map[pci_slot] = vf_index
def get_pci_slot_list(self):
"""Get list of VF addresses."""
return self.pci_slot_map.keys()
def get_assigned_devices_info(self):
"""Get assigned Virtual Functions mac and pci slot
information and populates vf_to_pci_slot mappings
@return: list of VF pair (mac address, pci slot)
"""
assigned_devices_info = []
for pci_slot, vf_index in self.pci_slot_map.items():
mac = self.get_pci_device(pci_slot)
if mac:
assigned_devices_info.append(
agent_rpc.DeviceInfo(mac, pci_slot))
return assigned_devices_info
def get_device_state(self, pci_slot):
"""Get device state.
@param pci_slot: Virtual Function address
"""
vf_index = self._get_vf_index(pci_slot)
return self.pci_dev_wrapper.get_vf_state(vf_index)
def set_device_state(self, pci_slot, state, propagate_uplink_state):
"""Set device state.
@param pci_slot: Virtual Function address
@param state: link state
"""
vf_index = self._get_vf_index(pci_slot)
return self.pci_dev_wrapper.set_vf_state(vf_index, state,
auto=propagate_uplink_state)
def set_device_rate(self, pci_slot, rate_type, rate_kbps):
"""Set device rate: max_tx_rate, min_tx_rate
@param pci_slot: Virtual Function address
@param rate_type: device rate name type. Could be 'max_tx_rate' and
'min_tx_rate' ('rate' is not supported anymore).
@param rate_kbps: device rate in kbps
"""
vf_index = self._get_vf_index(pci_slot)
# NOTE(ralonsoh): ip link sets rate in Mbps therefore we need to
# convert the rate_kbps value from kbps to Mbps.
# Zero means to disable the rate so the lowest rate available is 1Mbps.
# Floating numbers are not allowed
if 0 < rate_kbps < 1000:
rate_mbps = 1
else:
rate_mbps = helpers.round_val(rate_kbps / 1000.0)
log_dict = {
'rate_mbps': rate_mbps,
'rate_kbps': rate_kbps,
'vf_index': vf_index,
'rate_type': rate_type
}
if rate_kbps % 1000 != 0:
LOG.debug("'%(rate_type)s' for SR-IOV ports is counted in Mbps; "
"setting %(rate_mbps)s Mbps limit for port %(vf_index)s "
"instead of %(rate_kbps)s kbps",
log_dict)
else:
LOG.debug("Setting %(rate_mbps)s Mbps limit for port %(vf_index)s",
log_dict)
return self.pci_dev_wrapper.set_vf_rate(vf_index, rate_type, rate_mbps)
def _get_vf_index(self, pci_slot):
vf_index = self.pci_slot_map.get(pci_slot)
if vf_index is None:
LOG.warning("Cannot find vf index for pci slot %s",
pci_slot)
raise exc.InvalidPciSlotError(pci_slot=pci_slot)
return vf_index
def set_device_spoofcheck(self, pci_slot, enabled):
"""Set device spoofchecking
@param pci_slot: Virtual Function address
@param enabled: True to enable spoofcheck, False to disable
"""
vf_index = self.pci_slot_map.get(pci_slot)
if vf_index is None:
raise exc.InvalidPciSlotError(pci_slot=pci_slot)
return self.pci_dev_wrapper.set_vf_spoofcheck(vf_index, enabled)
def _get_macvtap_mac(self, vf_index):
upperdevs = PciOsWrapper.get_vf_macvtap_upper_devs(
self.dev_name, vf_index)
# NOTE(adrianc) although there can be many macvtap upper
# devices, we expect to have excatly one.
if len(upperdevs) > 1:
LOG.warning("Found more than one macvtap upper device for PF "
"%(pf)s with VF index %(vf_index)s.",
{"pf": self.dev_name, "vf_index": vf_index})
upperdev = upperdevs[0]
return pci_lib.PciDeviceIPWrapper(
upperdev).device(upperdev).link.address
def get_pci_device(self, pci_slot):
"""Get mac address for given Virtual Function address
@param pci_slot: pci slot
@return: MAC address of virtual function
"""
if not PciOsWrapper.pf_device_exists(self.dev_name):
# If the root PCI path does not exist, then the VF cannot
# actually have been allocated and there is no way we can
# manage it.
return None
vf_index = self.pci_slot_map.get(pci_slot)
mac = None
if vf_index is not None:
# NOTE(adrianc) for VF passthrough take administrative mac from PF
# netdevice, for macvtap take mac directly from macvtap interface.
# This is done to avoid relying on hypervisor [lack of] logic to
# keep effective and administrative mac in sync.
if PciOsWrapper.is_assigned_vf_direct(self.dev_name, vf_index):
macs = self.pci_dev_wrapper.get_assigned_macs([vf_index])
mac = macs.get(vf_index)
elif PciOsWrapper.is_assigned_vf_macvtap(
self.dev_name, vf_index):
mac = self._get_macvtap_mac(vf_index)
return mac
class ESwitchManager(object):
"""Manages logical Embedded Switch entities for physical network."""
def __new__(cls):
# make it a singleton
if not hasattr(cls, '_instance'):
cls._instance = super(ESwitchManager, cls).__new__(cls)
cls.emb_switches_map = {}
cls.pci_slot_map = {}
cls.skipped_devices = set()
return cls._instance
def device_exists(self, device_mac, pci_slot):
"""Verify if device exists.
Check if a device mac exists and matches the given VF pci slot
@param device_mac: device mac
@param pci_slot: VF address
"""
embedded_switch = self._get_emb_eswitch(device_mac, pci_slot)
if embedded_switch:
return True
return False
def get_assigned_devices_info(self, phys_net=None):
"""Get all assigned devices.
Get all assigned devices belongs to given embedded switch
@param phys_net: physical network, if none get all assigned devices
@return: set of assigned VFs (mac address, pci slot) pair
"""
if phys_net:
eswitch_objects = self.emb_switches_map.get(phys_net, set())
else:
eswitch_objects = set()
for eswitch_list in self.emb_switches_map.values():
eswitch_objects |= set(eswitch_list)
assigned_devices = set()
for embedded_switch in eswitch_objects:
for device in embedded_switch.get_assigned_devices_info():
assigned_devices.add(device)
return assigned_devices
def get_device_state(self, device_mac, pci_slot):
"""Get device state.
Get the device state (up/enable, down/disable, or auto)
@param device_mac: device mac
@param pci_slot: VF PCI slot
@return: device state (enable/disable/auto) None if failed
"""
embedded_switch = self._get_emb_eswitch(device_mac, pci_slot)
if embedded_switch:
return embedded_switch.get_device_state(pci_slot)
return pci_lib.LinkState.disable.name
def set_device_max_rate(self, device_mac, pci_slot, max_kbps):
"""Set device max rate
Sets the device max rate in kbps
@param device_mac: device mac
@param pci_slot: pci slot
@param max_kbps: device max rate in kbps
"""
embedded_switch = self._get_emb_eswitch(device_mac, pci_slot)
if embedded_switch:
embedded_switch.set_device_rate(
pci_slot, IP_LINK_CAPABILITY_RATE, max_kbps)
def set_device_min_tx_rate(self, device_mac, pci_slot, min_kbps):
"""Set device min_tx_rate
Sets the device min_tx_rate in kbps
@param device_mac: device mac
@param pci_slot: pci slot
@param max_kbps: device min_tx_rate in kbps
"""
embedded_switch = self._get_emb_eswitch(device_mac, pci_slot)
if embedded_switch:
embedded_switch.set_device_rate(
pci_slot, IP_LINK_CAPABILITY_MIN_TX_RATE, min_kbps)
def set_device_state(self, device_mac, pci_slot, admin_state_up,
propagate_uplink_state):
"""Set device state
Sets the device state (up or down)
@param device_mac: device mac
@param pci_slot: pci slot
@param admin_state_up: device admin state True/False
@param propagate_uplink_state: follow uplink state True/False
"""
embedded_switch = self._get_emb_eswitch(device_mac, pci_slot)
if embedded_switch:
embedded_switch.set_device_state(pci_slot,
admin_state_up,
propagate_uplink_state)
def set_device_spoofcheck(self, device_mac, pci_slot, enabled):
"""Set device spoofcheck
Sets device spoofchecking (enabled or disabled)
@param device_mac: device mac
@param pci_slot: pci slot
@param enabled: device spoofchecking
"""
embedded_switch = self._get_emb_eswitch(device_mac, pci_slot)
if embedded_switch:
embedded_switch.set_device_spoofcheck(pci_slot,
enabled)
def _process_emb_switch_map(self, phys_net, dev_name, exclude_devices):
"""Process emb_switch_map
@param phys_net: physical network
@param dev_name: device name
@param exclude_devices: PCI devices to ignore.
"""
emb_switches = self.emb_switches_map.get(phys_net, [])
for switch in emb_switches:
if switch.dev_name == dev_name:
if not PciOsWrapper.pf_device_exists(dev_name):
# If the device is given to the VM as PCI-PT
# then delete the respective emb_switch from map
self.emb_switches_map.get(phys_net).remove(switch)
return
# We don't know about this device at the moment, so add to the map.
if PciOsWrapper.pf_device_exists(dev_name):
self._create_emb_switch(
phys_net, dev_name,
exclude_devices.get(dev_name, set()))
def discover_devices(self, device_mappings, exclude_devices):
"""Discover which Virtual functions to manage.
Discover devices, and create embedded switch object for network device
@param device_mappings: device mapping physical_network:device_name
@param exclude_devices: excluded devices mapping device_name: pci slots
"""
if exclude_devices is None:
exclude_devices = {}
for phys_net, dev_names in device_mappings.items():
for dev_name in dev_names:
self._process_emb_switch_map(phys_net, dev_name,
exclude_devices)
def _create_emb_switch(self, phys_net, dev_name, exclude_devices):
embedded_switch = EmbSwitch(dev_name, exclude_devices)
numvfs = PciOsWrapper.get_numvfs(dev_name)
if numvfs == 0:
# numvfs might be 0 on pre-up state of a device
# giving such devices one more chance to initialize
if dev_name not in self.skipped_devices:
self.skipped_devices.add(dev_name)
LOG.info("Device %s has 0 VFs configured. Skipping "
"for now to let the device initialize", dev_name)
return
else:
# looks like device indeed has 0 VFs configured
# it is probably used just as direct-physical
LOG.info("Device %s has 0 VFs configured", dev_name)
numvfs_cur = len(embedded_switch.scanned_pci_list)
if numvfs >= 0 and numvfs > numvfs_cur:
LOG.info("Not all VFs were initialized on device %(device)s: "
"expected - %(expected)s, actual - %(actual)s. Skipping.",
{'device': dev_name, 'expected': numvfs,
'actual': numvfs_cur})
self.skipped_devices.add(dev_name)
return
self.emb_switches_map.setdefault(phys_net, []).append(embedded_switch)
for pci_slot in embedded_switch.get_pci_slot_list():
self.pci_slot_map[pci_slot] = embedded_switch
self.skipped_devices.discard(dev_name)
def _get_emb_eswitch(self, device_mac, pci_slot):
"""Get embedded switch.
Get embedded switch by pci slot and validate pci has device mac
@param device_mac: device mac
@param pci_slot: pci slot
"""
embedded_switch = self.pci_slot_map.get(pci_slot)
if embedded_switch:
used_device_mac = embedded_switch.get_pci_device(pci_slot)
if used_device_mac != device_mac:
LOG.warning("device pci mismatch: %(device_mac)s "
"- %(pci_slot)s",
{"device_mac": device_mac, "pci_slot": pci_slot})
embedded_switch = None
return embedded_switch
def clear_max_rate(self, pci_slot):
"""Clear the VF "rate" parameter
Clear the "rate" configuration from VF by setting it to 0.
@param pci_slot: VF PCI slot
"""
self._clear_rate(pci_slot, IP_LINK_CAPABILITY_RATE)
def clear_min_tx_rate(self, pci_slot):
"""Clear the VF "min_tx_rate" parameter
Clear the "min_tx_rate" configuration from VF by setting it to 0.
@param pci_slot: VF PCI slot
"""
self._clear_rate(pci_slot, IP_LINK_CAPABILITY_MIN_TX_RATE)
def _clear_rate(self, pci_slot, rate_type):
"""Clear the VF rate parameter specified in rate_type
Clear the rate configuration from VF by setting it to 0.
@param pci_slot: VF PCI slot
@param rate_type: rate to clear ('max_tx_rate', 'min_tx_rate')
"""
# NOTE(Moshe Levi): we don't use the self._get_emb_eswitch here,
# because when clearing the VF it may be not assigned. This happens
# when libvirt releases the VF back to the hypervisor on delete VM.
# Therefore we should just clear the VF rate according to pci_slot no
# matter if VF is assigned or not.
embedded_switch = self.pci_slot_map.get(pci_slot)
if embedded_switch:
# NOTE(Moshe Levi): check the pci_slot is not assigned to some
# other port before resetting the rate.
if embedded_switch.get_pci_device(pci_slot) is None:
embedded_switch.set_device_rate(pci_slot, rate_type, 0)
else:
LOG.warning("VF with PCI slot %(pci_slot)s is already "
"assigned; skipping reset for '%(rate_type)s' "
"device configuration parameter",
{'pci_slot': pci_slot, 'rate_type': rate_type})
else:
LOG.error("PCI slot %(pci_slot)s has no mapping to Embedded "
"Switch; skipping", {'pci_slot': pci_slot})
| mahak/neutron | neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py | Python | apache-2.0 | 21,892 |
#!/usr/bin/env python3
import subprocess
import sys
from os.path import abspath, dirname, join
from time import sleep, perf_counter
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class Test(object):
def run(self, ip):
self.br = webdriver.Chrome()
self.br.get('http://' + ip)
self.br.implicitly_wait(1)
self.install()
def install(self):
self.fill('jform[site_name]', 'test')
self.fill('jform[admin_email]', '[email protected]')
self.fill('jform[admin_user]', 'test')
self.fill('jform[admin_password]', 'test')
self.fill('jform[admin_password2]', 'test')
self.click('a[title=Next]')
self.fill('jform[db_host]', 'db')
self.fill('jform[db_user]', 'root')
self.fill('jform[db_name]', 'test')
self.fill('jform[db_prefix]', 'test_')
self.click('a[title=Next]')
self.br.find_element_by_css_selector('.alert.alert-error')
subprocess.run(['bash', '-c', "docker exec ${COMPOSE_PROJECT_NAME}_www_1 bash -c 'rm -f /var/www/html/installation/_Joomla*'"], check=True)
self.click('a[title=Next]')
self.click('a[title=Install]')
self.click('[value="Remove installation folder"]', wait=30)
self.wait_for('[value="Installation folder successfully removed."]')
self.click('a[title=Administrator]')
self.fill('username', 'test')
self.fill('passwd', 'test')
self.click('.login-button')
self.click_text('Install VirtueMart with sample data')
self.click('a.menu-install')
self.br.execute_script('arguments[0].style.display = ""', self.wait_for('#legacy-uploader'))
self.br.find_element_by_name('install_package').send_keys(
abspath(join(dirname(__file__), '..', 'dist', 'pkg_webwinkelkeur.zip')))
sleep(1)
self.click_text('Components')
self.click_text('WebwinkelKeur')
self.fill('webwinkelkeur_wwk_shop_id', '1')
self.fill('webwinkelkeur_wwk_api_key', 'abcd')
self.click('[name=webwinkelkeur_invite]')
self.click('.button-apply')
self.click('a[title="Preview test"]')
self.focus_tab()
self.wait_for('.wwk--sidebar')
def fill(self, name, value):
el = self.br.find_element_by_name(name)
self.br.execute_script('''
arguments[0].value = arguments[1]
var e = document.createEvent('HTMLEvents')
e.initEvent('change', false, true)
arguments[0].dispatchEvent(e)
''', el, str(value))
def click(self, css_selector, **kwargs):
self.click_by(By.CSS_SELECTOR, css_selector, **kwargs)
def click_text(self, text, **kwargs):
self.click_by(By.PARTIAL_LINK_TEXT, text, **kwargs)
def wait_for(self, css_selector, wait=30):
return self.wait_by(By.CSS_SELECTOR, css_selector, wait=wait)
def click_by(self, *by, wait=1):
el = self.wait_by(*by, wait=wait)
self.br.execute_script('arguments[0].click()', el)
def wait_by(self, *by, wait=1):
return WebDriverWait(self.br, wait).until(EC.presence_of_element_located(by))
def focus_tab(self):
wait_until = perf_counter() + 5
while len(self.br.window_handles) < 2:
if perf_counter() > wait_until:
raise RuntimeError("Waiting for a new tab, but none found")
sleep(0.1)
self.br.switch_to_window(self.br.window_handles[-1])
if __name__ == '__main__':
Test().run(*sys.argv[1:])
| apeschar/webwinkelkeur-virtuemart | test/test.py | Python | gpl-3.0 | 3,695 |
"""
This file defines the types for type annotations.
These names aren't part of the module namespace, but they are used in the
annotations in the function signatures. The functions in the module are only
valid for inputs that match the given type annotations.
"""
__all__ = [
"Array",
"Device",
"Dtype",
"SupportsDLPack",
"SupportsBufferProtocol",
"PyCapsule",
]
from typing import Any, Literal, Sequence, Type, Union
from . import (
Array,
int8,
int16,
int32,
int64,
uint8,
uint16,
uint32,
uint64,
float32,
float64,
)
# This should really be recursive, but that isn't supported yet. See the
# similar comment in numpy/typing/_array_like.py
NestedSequence = Sequence[Sequence[Any]]
Device = Literal["cpu"]
Dtype = Type[
Union[int8, int16, int32, int64, uint8, uint16, uint32, uint64, float32, float64]
]
SupportsDLPack = Any
SupportsBufferProtocol = Any
PyCapsule = Any
| simongibbons/numpy | numpy/array_api/_typing.py | Python | bsd-3-clause | 949 |
try:
import os
import json
import traceback
import IPython.lib
import pgcontents
c = get_config()
### Password protection ###
# http://jupyter-notebook.readthedocs.io/en/latest/security.html
if os.environ.get('JUPYTER_NOTEBOOK_PASSWORD_DISABLED') != 'DangerZone!':
passwd = os.environ['JUPYTER_NOTEBOOK_PASSWORD']
c.NotebookApp.password = IPython.lib.passwd(passwd)
else:
c.NotebookApp.token = ''
c.NotebookApp.password = ''
### PostresContentsManager ###
database_url = os.getenv('DATABASE_URL', None)
if database_url:
# Tell IPython to use PostgresContentsManager for all storage.
c.NotebookApp.contents_manager_class = pgcontents.PostgresContentsManager
# Set the url for the database used to store files. See
# http://docs.sqlalchemy.org/en/rel_0_9/core/engines.html#postgresql
# for more info on db url formatting.
c.PostgresContentsManager.db_url = database_url
# PGContents associates each running notebook server with a user, allowing
# multiple users to connect to the same database without trampling each other's
# notebooks. By default, we use the result of result of getpass.getuser(), but
# a username can be specified manually like so:
c.PostgresContentsManager.user_id = 'heroku'
# Set a maximum file size, if desired.
#c.PostgresContentsManager.max_file_size_bytes = 1000000 # 1MB File cap
### CloudFoundry specific settings
vcap_application_json = os.getenv('VCAP_APPLICATION', None)
if vcap_application_json:
vcap_application = json.loads(vcap_application_json)
uri = vcap_application['uris'][0]
c.NotebookApp.allow_origin = 'https://{}'.format(uri)
c.NotebookApp.websocket_url = 'wss://{}:4443'.format(uri)
except Exception:
traceback.print_exc()
# if an exception occues, notebook normally would get started
# without password set. For security reasons, execution is stopped.
exit(-1)
| chrisbarnettster/cfg-analysis-on-heroku-jupyter | .jupyter/jupyter_notebook_config.py | Python | mit | 2,062 |
#!/usr/bin/env python
from setuptools import setup
setup(
name='Flask-QiniuStorage',
version='0.9.5',
url='https://github.com/csuzhangxc/Flask-QiniuStorage',
license='MIT',
author='Zhang Xuecheng',
author_email='[email protected]',
description='Qiniu Storage for Flask',
long_description='Qiniu Storage for Flask. Please visit: https://github.com/csuzhangxc/Flask-QiniuStorage',
py_modules=['flask_qiniustorage'],
zip_safe=False,
include_package_data=True,
platforms='any',
keywords='qiniu for flask',
package_data={'': ['LICENSE']},
install_requires=[
'setuptools',
'Flask',
'qiniu'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| csuzhangxc/Flask-QiniuStorage | setup.py | Python | mit | 1,139 |
# -*- coding: utf-8 -*-
"""
Sycamore - revert action
This action allows you to revert a page. Note that the standard
config lists this action as excluded!
@copyright: 2006-2007 Philip Neustrom <[email protected]>,
2004 by Jürgen Hermann <[email protected]>
@license: GNU GPL, see COPYING for details.
"""
# Imports
import hashlib
import time
import types
from Sycamore import config
from Sycamore import wikiutil
from Sycamore import wikiaction
from Sycamore import caching
from Sycamore import search
from Sycamore.PageEditor import PageEditor
from Sycamore.Page import Page
def delete_all_newer(oldpage, request, showrc):
version_date = oldpage.prev_date
d = {'pagename':oldpage.page_name, 'version_date':version_date,
'wiki_id':oldpage.wiki_id}
caching.deleteNewerPageInfo(oldpage.page_name, version_date, request)
request.cursor.execute("""DELETE from allPages
where name=%(pagename)s and
editTime>%(version_date)s and
wiki_id=%(wiki_id)s""", d, isWrite=True)
# clear out map points set in this time period.
# move old map point into current map points.
request.cursor.execute("""DELETE from oldMapPoints
where pagename=%(pagename)s and
created_time>%(version_date)s and
wiki_id=%(wiki_id)s""", d, isWrite=True)
if not showrc:
# Check to see if there's actually a map on this page version.
# If there is, we move the map point over from oldMapPoints to
# mapPoints, accordingly.
request.cursor.execute(
"""SELECT pagename, x, y, created_time, created_by, created_by_ip,
pagename_propercased, address, wiki_id, deleted_time
from oldMapPoints
where wiki_id=%(wiki_id)s and pagename=%(pagename)s and
created_time <= %(version_date)s and
deleted_time >= %(version_date)s""", d)
mapdata = request.cursor.fetchall()
cleared_out_already = False
for (pagename, x, y, created_time, created_by, created_by_ip,
pagename_propercased, address, wiki_id, deleted_time) in mapdata:
this_version_has_map = version_date <= deleted_time
if this_version_has_map:
if not cleared_out_already:
request.cursor.execute(
"""DELETE from mapPoints
where pagename=%(pagename)s and
wiki_id=%(wiki_id)s""", d, isWrite=True)
cleared_out_already = True
# add this map point version into the current map points table
d['pagename'] = pagename
d['x'] = x
d['y'] = y
d['created_time'] = created_time
d['created_by'] = created_by
d['created_by_ip'] = created_by_ip
d['pagename_propercased'] = pagename_propercased
d['address'] = address
d['wiki_id'] = wiki_id
request.cursor.execute(
"""INSERT into mapPoints
(pagename, x, y, created_time, created_by,
created_by_ip, pagename_propercased, address, wiki_id)
values (%(pagename)s, %(x)s, %(y)s, %(created_time)s,
%(created_by)s, %(created_by_ip)s,
%(pagename_propercased)s, %(address)s,
%(wiki_id)s)""", d, isWrite=True)
# these are now in mapPoints, so it's safe to clear them out
# of the archive table
request.cursor.execute("""DELETE from oldMapPoints
where wiki_id=%(wiki_id)s and
pagename=%(pagename)s and
created_time <= %(version_date)s and
deleted_time >= %(version_date)s""", d)
def set_current_pagetext(oldpage, request):
version_date = oldpage.prev_date
d = {'pagename':oldpage.page_name, 'version_date':version_date,
'wiki_id':oldpage.wiki_id, 'oldtext': oldpage.get_raw_body()}
if oldpage.exists(fresh=True):
request.cursor.execute(
"""UPDATE curPages set
text=%(oldtext)s, cachedText=NULL, editTime=%(version_date)s,
cachedTime=NULL,
userEdited=(select userEdited from allPages
where name=%(pagename)s and wiki_id=%(wiki_id)s and
editTime=%(version_date)s),
propercased_name=(select propercased_name from allPages
where name=%(pagename)s and
wiki_id=%(wiki_id)s and
editTime=%(version_date)s)
where name=%(pagename)s and wiki_id=%(wiki_id)s""",
d, isWrite=True)
else:
request.cursor.execute("""SELECT editType from allPages
where name=%(pagename)s and
wiki_id=%(wiki_id)s and
editTime=%(version_date)s""", d)
result = request.cursor.fetchone()
if not result or result[0] != 'DELETE':
# page wasn't most recently deleted, so we restore it
request.cursor.execute(
"""INSERT into curPages
(name, text, cachedText, editTime, cachedTime, userEdited,
propercased_name, wiki_id)
values
(%(pagename)s, %(oldtext)s, NULL, %(version_date)s, NULL,
(select userEdited from allPages
where name=%(pagename)s and wiki_id=%(wiki_id)s and
editTime=%(version_date)s),
(select propercased_name from allPages
where name=%(pagename)s and wiki_id=%(wiki_id)s and
editTime=%(version_date)s),
%(wiki_id)s)""", d, isWrite=True)
def _set_proper_pagename(request, page):
d = {'pagename': page.page_name, 'wiki_id': page.wiki_id,
'version_date': page.prev_date}
request.cursor.execute("""SELECT propercased_name from allPages
where name=%(pagename)s and
wiki_id=%(wiki_id)s and
editTime=%(version_date)s""", d)
proper_pagename = request.cursor.fetchone()[0]
request.req_cache['pagenames'][(
page.page_name, request.config.wiki_name)] = proper_pagename
return proper_pagename
def revert_to_page(oldpg, request, pg, comment=None, permanent=False,
showrc=True):
_ = request.getText
if permanent:
delete_all_newer(oldpg, request, showrc)
if not showrc:
set_current_pagetext(oldpg, request)
try:
# don't log on RC if admin doesn't want it
if not (permanent and not showrc):
pg.saveText(oldpg.get_raw_body(), '0', stripspaces=0, notify=1,
comment=comment, action="SAVE/REVERT")
pagename = pg.proper_name()
else:
#doing hard work ourselves..
# should be abstracted into the page object.
pg.set_raw_body(oldpg.get_raw_body())
# deal with the case of macros / other items that change state
# by /not/ being in the page
search.add_to_index(pg)
pg.buildCache()
caching.CacheEntry(pg.page_name, request).clear()
caching.updateRecentChanges(pg)
# if we revert to a version with a differently-cased pagename
pagename = _set_proper_pagename(request, oldpg)
savemsg = _("Page reverted to version %s" % oldpg.version)
except pg.Unchanged:
savemsg = _("The current page is the same as the older page you wish "
"to revert to!")
except pg.SaveError:
savemsg = _("An error occurred while reverting the page.")
# clear req cache so user sees proper page state (exist)
request.req_cache['pagenames'][(
pagename.lower(), request.config.wiki_name)] = pagename
return savemsg
def execute(pagename, request):
from Sycamore.PageEditor import PageEditor
_ = request.getText
actname = __name__.split('.')[-1]
page = Page(pagename, request)
permanent = False
showrc = True
if not request.user.may.edit(page):
return page.send_page(
msg = _('You are not allowed to revert this page!'))
# check whether the user clicked the delete button
if request.form.has_key('button') and request.form.has_key('ticket'):
# check whether this is a valid deletion request (make outside
# attacks harder by requiring two full HTTP transactions)
if not _checkTicket(request.form['ticket'][0]):
return page.send_page(
msg = _('Please use the interactive user interface to '
'revert pages!'))
# revert the page
if request.form.has_key('version'):
version = int(request.form['version'][0])
oldpg = Page(pagename, request, version=version)
date = oldpg.prev_date
if request.form.has_key('comment'):
entered_comment = request.form['comment'][0]
else:
entered_comment = ''
if len(entered_comment) > wikiaction.MAX_COMMENT_LENGTH:
return page.send_page(msg = _(
'Comments must be less than %s characters long.' %
wikiaction.MAX_COMMENT_LENGTH))
else:
comment = 'v%s' % str(version)
comment = "%sc%s" % (comment, entered_comment)
else:
return
if (request.form.has_key('permanent') and
request.form['permanent'][0] and request.user.may.admin(page)):
permanent = True
if (request.form.has_key('noshowrc') and
request.form['noshowrc'][0]):
showrc = False
pg = PageEditor(pagename, request)
savemsg = revert_to_page(oldpg, request, pg, comment=comment,
permanent=permanent, showrc=showrc)
return pg.send_page(msg=savemsg,
force_regenerate_content=(permanent and not
showrc))
# get version
if request.form.has_key('version'):
version = request.form['version'][0]
else:
return page.send_page(msg= _(
'Please use the interactive user interface to revert pages!'))
oldpg = Page(pagename, request, version=version)
# send revert form
url = page.url()
ticket = _createTicket()
button = _('Revert')
comment_label = _("Reason for the revert:")
if request.user.may.admin(page):
admin_label = (
'<p>Permanently remove newer versions: '
'<input id="noshowrctoggle" type="checkbox" name="permanent" '
'value="1"><span id="noshowrc">'
"Don't log on Recent Changes: "
'<input type="checkbox" name="noshowrc" value="1"></span></p>\n'
'<script type="text/javascript">\n'
"document.getElementById('noshowrc').style.visibility = "
"'hidden';\n"
"document.getElementById('noshowrc').style.paddingLeft = "
"'1em';\n"
"document.getElementById('noshowrctoggle').onclick = "
"function () {\n"
"document.getElementById('noshowrc').style.visibility = "
"document.getElementById('noshowrctoggle').checked ? "
"'visible' : 'hidden'; \n"
"}\n"
"</script>\n"
)
else:
admin_label = ''
formhtml = (
'<form method="GET" action="%(url)s">'
'<input type="hidden" name="action" value="%(actname)s">'
'<input type="hidden" name="ticket" value="%(ticket)s">'
'<input type="hidden" name="version" value="%(version)s">'
'<p>'
'%(comment_label)s'
'</p>'
'<input type="text" name="comment" size="60" maxlength="80">'
'<input type="submit" name="button" value="%(button)s">'
'%(admin_label)s'
'</form>' % {
'url': url,
'actname': actname,
'ticket': ticket,
'button': button,
'comment_label': comment_label,
'version': version,
'admin_label': admin_label,
})
return oldpg.send_page(msg=formhtml)
def _createTicket(tm = None):
"""
Create a ticket using a site-specific secret (the config)
"""
ticket = (tm or "%010x" % time.time())
digest = hashlib.new('sha1')
digest.update(ticket)
cfgvars = vars(config)
for var in cfgvars.values():
if type(var) is types.StringType:
digest.update(repr(var))
return ticket + '.' + digest.hexdigest()
def _checkTicket(ticket):
"""
Check validity of a previously created ticket
"""
timestamp = ticket.split('.')[0]
ourticket = _createTicket(timestamp)
return ticket == ourticket
| rtucker/sycamore | Sycamore/action/revert.py | Python | gpl-2.0 | 13,563 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for application package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import functools
import json
import os
import shutil
import socket
import tempfile
try:
# python version >= 3.3
from unittest import mock
except ImportError:
import mock
import posixpath
import ntpath
import six
import tensorflow as tf
from werkzeug import test as werkzeug_test
from werkzeug import wrappers
from tensorboard import main as tensorboard
from tensorboard.backend import application
from tensorboard.backend.event_processing import plugin_event_multiplexer as event_multiplexer # pylint: disable=line-too-long
from tensorboard.plugins import base_plugin
class FakePlugin(base_plugin.TBPlugin):
"""A plugin with no functionality."""
def __init__(self,
context,
plugin_name,
is_active_value,
routes_mapping,
construction_callback=None):
"""Constructs a fake plugin.
Args:
context: The TBContext magic container. Contains properties that are
potentially useful to this plugin.
plugin_name: The name of this plugin.
is_active_value: Whether the plugin is active.
routes_mapping: A dictionary mapping from route (string URL path) to the
method called when a user issues a request to that route.
construction_callback: An optional callback called when the plugin is
constructed. The callback is passed the TBContext.
"""
self.plugin_name = plugin_name
self._is_active_value = is_active_value
self._routes_mapping = routes_mapping
if construction_callback:
construction_callback(context)
def get_plugin_apps(self):
"""Returns a mapping from routes to handlers offered by this plugin.
Returns:
A dictionary mapping from routes to handlers offered by this plugin.
"""
return self._routes_mapping
def is_active(self):
"""Returns whether this plugin is active.
Returns:
A boolean. Whether this plugin is active.
"""
return self._is_active_value
class TensorboardServerTest(tf.test.TestCase):
_only_use_meta_graph = False # Server data contains only a GraphDef
def setUp(self):
plugins = [
FakePlugin(
None, plugin_name='foo', is_active_value=True, routes_mapping={}),
FakePlugin(
None, plugin_name='bar', is_active_value=False, routes_mapping={}),
]
app = application.TensorBoardWSGI(plugins)
self.server = werkzeug_test.Client(app, wrappers.BaseResponse)
def _get_json(self, path):
response = self.server.get(path)
self.assertEqual(200, response.status_code)
self.assertEqual('application/json', response.headers.get('Content-Type'))
return json.loads(response.get_data().decode('utf-8'))
def testBasicStartup(self):
"""Start the server up and then shut it down immediately."""
pass
def testRequestNonexistentPage(self):
"""Request a page that doesn't exist; it should 404."""
response = self.server.get('/asdf')
self.assertEqual(404, response.status_code)
def testPluginsListing(self):
"""Test the format of the data/plugins_listing endpoint."""
parsed_object = self._get_json('/data/plugins_listing')
# Plugin foo is active. Plugin bar is not.
self.assertEqual(parsed_object, {'foo': True, 'bar': False})
class TensorboardServerBaseUrlTest(tf.test.TestCase):
_only_use_meta_graph = False # Server data contains only a GraphDef
path_prefix = '/test'
def setUp(self):
plugins = [
FakePlugin(
None, plugin_name='foo', is_active_value=True, routes_mapping={}),
FakePlugin(
None, plugin_name='bar', is_active_value=False, routes_mapping={}),
]
app = application.TensorBoardWSGI(plugins, path_prefix=self.path_prefix)
self.server = werkzeug_test.Client(app, wrappers.BaseResponse)
def _get_json(self, path):
response = self.server.get(path)
self.assertEqual(200, response.status_code)
self.assertEqual('application/json', response.headers.get('Content-Type'))
return json.loads(response.get_data().decode('utf-8'))
def testBaseUrlRequest(self):
"""Request a page that doesn't exist; it should 404."""
response = self.server.get(self.path_prefix)
self.assertEqual(404, response.status_code)
def testBaseUrlRequestNonexistentPage(self):
"""Request a page that doesn't exist; it should 404."""
response = self.server.get(self.path_prefix + '/asdf')
self.assertEqual(404, response.status_code)
def testBaseUrlNonexistentPluginsListing(self):
"""Test the format of the data/plugins_listing endpoint."""
response = self.server.get('/non_existent_prefix/data/plugins_listing')
self.assertEqual(404, response.status_code)
def testPluginsListing(self):
"""Test the format of the data/plugins_listing endpoint."""
parsed_object = self._get_json(self.path_prefix + '/data/plugins_listing')
# Plugin foo is active. Plugin bar is not.
self.assertEqual(parsed_object, {'foo': True, 'bar': False})
class TensorboardServerPluginNameTest(tf.test.TestCase):
def _test(self, name, should_be_okay):
temp_dir = tempfile.mkdtemp(prefix=self.get_temp_dir())
self.addCleanup(shutil.rmtree, temp_dir)
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=application.DEFAULT_SIZE_GUIDANCE,
purge_orphaned_data=True)
plugins = [
FakePlugin(
None, plugin_name='foo', is_active_value=True, routes_mapping={}),
FakePlugin(
None, plugin_name=name, is_active_value=True, routes_mapping={}),
FakePlugin(
None, plugin_name='bar', is_active_value=False, routes_mapping={}),
]
if should_be_okay:
application.TensorBoardWSGIApp(
temp_dir, plugins, multiplexer, reload_interval=0,
path_prefix='')
else:
with six.assertRaisesRegex(self, ValueError, r'invalid name'):
application.TensorBoardWSGIApp(
temp_dir, plugins, multiplexer, reload_interval=0,
path_prefix='')
def testEmptyName(self):
self._test('', False)
def testNameWithSlashes(self):
self._test('scalars/data', False)
def testNameWithSpaces(self):
self._test('my favorite plugin', False)
def testSimpleName(self):
self._test('scalars', True)
def testComprehensiveName(self):
self._test('Scalar-Dashboard_3000.1', True)
class TensorboardServerPluginRouteTest(tf.test.TestCase):
def _test(self, route, should_be_okay):
temp_dir = tempfile.mkdtemp(prefix=self.get_temp_dir())
self.addCleanup(shutil.rmtree, temp_dir)
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=application.DEFAULT_SIZE_GUIDANCE,
purge_orphaned_data=True)
plugins = [
FakePlugin(
None,
plugin_name='foo',
is_active_value=True,
routes_mapping={route: lambda environ, start_response: None}),
]
if should_be_okay:
application.TensorBoardWSGIApp(
temp_dir, plugins, multiplexer, reload_interval=0, path_prefix='')
else:
with six.assertRaisesRegex(self, ValueError, r'invalid route'):
application.TensorBoardWSGIApp(
temp_dir, plugins, multiplexer, reload_interval=0, path_prefix='')
def testNormalRoute(self):
self._test('/runs', True)
def testEmptyRoute(self):
self._test('', False)
def testSlashlessRoute(self):
self._test('runaway', False)
class TensorboardServerUsingMetagraphOnlyTest(TensorboardServerTest):
# Tests new ability to use only the MetaGraphDef
_only_use_meta_graph = True # Server data contains only a MetaGraphDef
class ParseEventFilesSpecTest(tf.test.TestCase):
def assertPlatformSpecificLogdirParsing(self, pathObj, logdir, expected):
"""
A custom assertion to test :func:`parse_event_files_spec` under various
systems.
Args:
pathObj: a custom replacement object for `os.path`, typically
`posixpath` or `ntpath`
logdir: the string to be parsed by
:func:`~application.TensorBoardWSGIApp.parse_event_files_spec`
expected: the expected dictionary as returned by
:func:`~application.TensorBoardWSGIApp.parse_event_files_spec`
"""
with mock.patch('os.path', pathObj):
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testRunName(self):
self.assertPlatformSpecificLogdirParsing(
posixpath, 'lol:/cat', {'/cat': 'lol'})
self.assertPlatformSpecificLogdirParsing(
ntpath, 'lol:C:\\cat', {'C:\\cat': 'lol'})
def testPathWithColonThatComesAfterASlash_isNotConsideredARunName(self):
self.assertPlatformSpecificLogdirParsing(
posixpath, '/lol:/cat', {'/lol:/cat': None})
def testMultipleDirectories(self):
self.assertPlatformSpecificLogdirParsing(
posixpath, '/a,/b', {'/a': None, '/b': None})
self.assertPlatformSpecificLogdirParsing(
ntpath, 'C:\\a,C:\\b', {'C:\\a': None, 'C:\\b': None})
def testNormalizesPaths(self):
self.assertPlatformSpecificLogdirParsing(
posixpath, '/lol/.//cat/../cat', {'/lol/cat': None})
self.assertPlatformSpecificLogdirParsing(
ntpath, 'C:\\lol\\.\\\\cat\\..\\cat', {'C:\\lol\\cat': None})
def testAbsolutifies(self):
self.assertPlatformSpecificLogdirParsing(
posixpath, 'lol/cat', {posixpath.realpath('lol/cat'): None})
self.assertPlatformSpecificLogdirParsing(
ntpath, 'lol\\cat', {ntpath.realpath('lol\\cat'): None})
def testRespectsGCSPath(self):
self.assertPlatformSpecificLogdirParsing(
posixpath, 'gs://foo/path', {'gs://foo/path': None})
self.assertPlatformSpecificLogdirParsing(
ntpath, 'gs://foo/path', {'gs://foo/path': None})
def testRespectsHDFSPath(self):
self.assertPlatformSpecificLogdirParsing(
posixpath, 'hdfs://foo/path', {'hdfs://foo/path': None})
self.assertPlatformSpecificLogdirParsing(
ntpath, 'hdfs://foo/path', {'hdfs://foo/path': None})
def testDoesNotExpandUserInGCSPath(self):
self.assertPlatformSpecificLogdirParsing(
posixpath, 'gs://~/foo/path', {'gs://~/foo/path': None})
self.assertPlatformSpecificLogdirParsing(
ntpath, 'gs://~/foo/path', {'gs://~/foo/path': None})
def testDoesNotNormalizeGCSPath(self):
self.assertPlatformSpecificLogdirParsing(
posixpath, 'gs://foo/./path//..', {'gs://foo/./path//..': None})
self.assertPlatformSpecificLogdirParsing(
ntpath, 'gs://foo/./path//..', {'gs://foo/./path//..': None})
def testRunNameWithGCSPath(self):
self.assertPlatformSpecificLogdirParsing(
posixpath, 'lol:gs://foo/path', {'gs://foo/path': 'lol'})
self.assertPlatformSpecificLogdirParsing(
ntpath, 'lol:gs://foo/path', {'gs://foo/path': 'lol'})
def testSingleLetterGroup(self):
self.assertPlatformSpecificLogdirParsing(
posixpath, 'A:/foo/path', {'/foo/path': 'A'})
# single letter groups are not supported on Windows
with self.assertRaises(AssertionError):
self.assertPlatformSpecificLogdirParsing(
ntpath, 'A:C:\\foo\\path', {'C:\\foo\\path': 'A'})
class TensorBoardPluginsTest(tf.test.TestCase):
def setUp(self):
self.context = None
plugins = [
functools.partial(
FakePlugin,
plugin_name='foo',
is_active_value=True,
routes_mapping={'/foo_route': self._foo_handler},
construction_callback=self._construction_callback),
functools.partial(
FakePlugin,
plugin_name='bar',
is_active_value=True,
routes_mapping={'/bar_route': self._bar_handler},
construction_callback=self._construction_callback),
]
# The application should have added routes for both plugins.
self.app = application.standard_tensorboard_wsgi('', True, 60, plugins)
def _foo_handler(self):
pass
def _bar_handler(self):
pass
def _construction_callback(self, context):
"""Called when a plugin is constructed."""
self.context = context
def testPluginsAdded(self):
# The routes are prefixed with /data/plugin/[plugin name].
self.assertDictContainsSubset({
'/data/plugin/foo/foo_route': self._foo_handler,
'/data/plugin/bar/bar_route': self._bar_handler,
}, self.app.data_applications)
def testNameToPluginMapping(self):
# The mapping from plugin name to instance should include both plugins.
mapping = self.context.plugin_name_to_instance
self.assertItemsEqual(['foo', 'bar'], list(mapping.keys()))
self.assertEqual('foo', mapping['foo'].plugin_name)
self.assertEqual('bar', mapping['bar'].plugin_name)
class TensorboardSimpleServerConstructionTest(tf.test.TestCase):
"""Tests that the default HTTP server is constructed without error.
Mostly useful for IPv4/IPv6 testing. This test should run with only IPv4, only
IPv6, and both IPv4 and IPv6 enabled.
"""
class _StubApplication(object):
tag = ''
def testMakeServerBlankHost(self):
# Test that we can bind to all interfaces without throwing an error
server, url = tensorboard.make_simple_server(
self._StubApplication(),
host='',
port=0) # Grab any available port
self.assertTrue(server)
self.assertTrue(url)
def testSpecifiedHost(self):
one_passed = False
try:
_, url = tensorboard.make_simple_server(
self._StubApplication(),
host='127.0.0.1',
port=0)
self.assertStartsWith(actual=url, expected_start='http://127.0.0.1:')
one_passed = True
except socket.error:
# IPv4 is not supported
pass
try:
_, url = tensorboard.make_simple_server(
self._StubApplication(),
host='::1',
port=0)
self.assertStartsWith(actual=url, expected_start='http://[::1]:')
one_passed = True
except socket.error:
# IPv6 is not supported
pass
self.assertTrue(one_passed) # We expect either IPv4 or IPv6 to be supported
class TensorBoardApplcationConstructionTest(tf.test.TestCase):
def testExceptions(self):
logdir = '/fake/foo'
multiplexer = event_multiplexer.EventMultiplexer()
# Fails if there is an unnamed plugin
with self.assertRaises(ValueError):
# This plugin lacks a name.
plugins = [
FakePlugin(
None, plugin_name=None, is_active_value=True, routes_mapping={}),
]
application.TensorBoardWSGIApp(logdir, plugins, multiplexer, 0, '')
# Fails if there are two plugins with same name
with self.assertRaises(ValueError):
plugins = [
FakePlugin(
None, plugin_name='foo', is_active_value=True, routes_mapping={}),
FakePlugin(
None, plugin_name='foo', is_active_value=True, routes_mapping={}),
]
application.TensorBoardWSGIApp(logdir, plugins, multiplexer, 0, '')
class DbTest(tf.test.TestCase):
def testSqliteDb(self):
db_uri = 'sqlite:' + os.path.join(self.get_temp_dir(), 'db')
db_module, db_connection_provider = application.get_database_info(db_uri)
self.assertTrue(hasattr(db_module, 'Date'))
with contextlib.closing(db_connection_provider()) as conn:
with conn:
with contextlib.closing(conn.cursor()) as c:
c.execute('create table peeps (name text)')
c.execute('insert into peeps (name) values (?)', ('justine',))
_, db_connection_provider = application.get_database_info(db_uri)
with contextlib.closing(db_connection_provider()) as conn:
with contextlib.closing(conn.cursor()) as c:
c.execute('select name from peeps')
self.assertEqual(('justine',), c.fetchone())
def testTransactionRollback(self):
db_uri = 'sqlite:' + os.path.join(self.get_temp_dir(), 'db')
_, db_connection_provider = application.get_database_info(db_uri)
with contextlib.closing(db_connection_provider()) as conn:
with conn:
with contextlib.closing(conn.cursor()) as c:
c.execute('create table peeps (name text)')
try:
with conn:
with contextlib.closing(conn.cursor()) as c:
c.execute('insert into peeps (name) values (?)', ('justine',))
raise IOError('hi')
except IOError:
pass
with contextlib.closing(conn.cursor()) as c:
c.execute('select name from peeps')
self.assertIsNone(c.fetchone())
def testTransactionRollback_doesntDoAnythingIfIsolationLevelIsNone(self):
# NOTE: This is a terrible idea. Don't do this.
db_uri = ('sqlite:' + os.path.join(self.get_temp_dir(), 'db') +
'?isolation_level=null')
_, db_connection_provider = application.get_database_info(db_uri)
with contextlib.closing(db_connection_provider()) as conn:
with conn:
with contextlib.closing(conn.cursor()) as c:
c.execute('create table peeps (name text)')
try:
with conn:
with contextlib.closing(conn.cursor()) as c:
c.execute('insert into peeps (name) values (?)', ('justine',))
raise IOError('hi')
except IOError:
pass
with contextlib.closing(conn.cursor()) as c:
c.execute('select name from peeps')
self.assertEqual(('justine',), c.fetchone())
def testSqliteUriErrors(self):
with self.assertRaises(ValueError):
application.create_sqlite_connection_provider("lol:cat")
with self.assertRaises(ValueError):
application.create_sqlite_connection_provider("sqlite::memory:")
with self.assertRaises(ValueError):
application.create_sqlite_connection_provider("sqlite://foo.example/bar")
if __name__ == '__main__':
tf.test.main()
| francoisluus/tensorboard-supervise | tensorboard/backend/application_test.py | Python | apache-2.0 | 18,600 |
"""
.. _tut_io_export_pandas:
=================================
Export epochs to Pandas DataFrame
=================================
In this example the pandas exporter will be used to produce a DataFrame
object. After exploring some basic features a split-apply-combine
work flow will be conducted to examine the latencies of the response
maxima across epochs and conditions.
Note. Equivalent methods are available for raw and evoked data objects.
Short Pandas Primer
-------------------
Pandas Data Frames
~~~~~~~~~~~~~~~~~~
A data frame can be thought of as a combination of matrix, list and dict:
It knows about linear algebra and element-wise operations but is size mutable
and allows for labeled access to its data. In addition, the pandas data frame
class provides many useful methods for restructuring, reshaping and visualizing
data. As most methods return data frame instances, operations can be chained
with ease; this allows to write efficient one-liners. Technically a DataFrame
can be seen as a high-level container for numpy arrays and hence switching
back and forth between numpy arrays and DataFrames is very easy.
Taken together, these features qualify data frames for inter operation with
databases and for interactive data exploration / analysis.
Additionally, pandas interfaces with the R statistical computing language that
covers a huge amount of statistical functionality.
Export Options
~~~~~~~~~~~~~~
The pandas exporter comes with a few options worth being commented.
Pandas DataFrame objects use a so called hierarchical index. This can be
thought of as an array of unique tuples, in our case, representing the higher
dimensional MEG data in a 2D data table. The column names are the channel names
from the epoch object. The channels can be accessed like entries of a
dictionary:
df['MEG 2333']
Epochs and time slices can be accessed with the .ix method:
epochs_df.ix[(1, 2), 'MEG 2333']
However, it is also possible to include this index as regular categorial data
columns which yields a long table format typically used for repeated measure
designs. To take control of this feature, on export, you can specify which
of the three dimensions 'condition', 'epoch' and 'time' is passed to the Pandas
index using the index parameter. Note that this decision is revertible any
time, as demonstrated below.
Similarly, for convenience, it is possible to scale the times, e.g. from
seconds to milliseconds.
Some Instance Methods
~~~~~~~~~~~~~~~~~~~~~
Most numpy methods and many ufuncs can be found as instance methods, e.g.
mean, median, var, std, mul, , max, argmax etc.
Below an incomplete listing of additional useful data frame instance methods:
apply : apply function to data.
Any kind of custom function can be applied to the data. In combination with
lambda this can be very useful.
describe : quickly generate summary stats
Very useful for exploring data.
groupby : generate subgroups and initialize a 'split-apply-combine' operation.
Creates a group object. Subsequently, methods like apply, agg, or transform
can be used to manipulate the underlying data separately but
simultaneously. Finally, reset_index can be used to combine the results
back into a data frame.
plot : wrapper around plt.plot
However it comes with some special options. For examples see below.
shape : shape attribute
gets the dimensions of the data frame.
values :
return underlying numpy array.
to_records :
export data as numpy record array.
to_dict :
export data as dict of arrays.
Reference
~~~~~~~~~
More information and additional introductory materials can be found at the
pandas doc sites: http://pandas.pydata.org/pandas-docs/stable/
"""
# Author: Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import mne
import matplotlib.pyplot as plt
import numpy as np
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
raw = mne.io.read_raw_fif(raw_fname)
# For simplicity we will only consider the first 10 epochs
events = mne.read_events(event_fname)[:10]
# Add a bad channel
raw.info['bads'] += ['MEG 2443']
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, exclude='bads')
tmin, tmax = -0.2, 0.5
baseline = (None, 0)
reject = dict(grad=4000e-13, eog=150e-6)
event_id = dict(auditory_l=1, auditory_r=2, visual_l=3, visual_r=4)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=baseline, preload=True, reject=reject)
###############################################################################
# Export DataFrame
# The following parameters will scale the channels and times plotting
# friendly. The info columns 'epoch' and 'time' will be used as hierarchical
# index whereas the condition is treated as categorial data. Note that
# this is optional. By passing None you could also print out all nesting
# factors in a long table style commonly used for analyzing repeated measure
# designs.
index, scale_time, scalings = ['epoch', 'time'], 1e3, dict(grad=1e13)
df = epochs.to_data_frame(picks=None, scalings=scalings, scale_time=scale_time,
index=index)
# Create MEG channel selector and drop EOG channel.
meg_chs = [c for c in df.columns if 'MEG' in c]
df.pop('EOG 061') # this works just like with a list.
###############################################################################
# Explore Pandas MultiIndex
# Pandas is using a MultiIndex or hierarchical index to handle higher
# dimensionality while at the same time representing data in a flat 2d manner.
print(df.index.names, df.index.levels)
# Inspecting the index object unveils that 'epoch', 'time' are used
# for subsetting data. We can take advantage of that by using the
# .ix attribute, where in this case the first position indexes the MultiIndex
# and the second the columns, that is, channels.
# Plot some channels across the first three epochs
xticks, sel = np.arange(3, 600, 120), meg_chs[:15]
df.ix[:3, sel].plot(xticks=xticks)
mne.viz.tight_layout()
# slice the time starting at t0 in epoch 2 and ending 500ms after
# the base line in epoch 3. Note that the second part of the tuple
# represents time in milliseconds from stimulus onset.
df.ix[(1, 0):(3, 500), sel].plot(xticks=xticks)
mne.viz.tight_layout()
# Note: For convenience the index was converted from floating point values
# to integer values. To restore the original values you can e.g. say
# df['times'] = np.tile(epoch.times, len(epochs_times)
# We now reset the index of the DataFrame to expose some Pandas
# pivoting functionality. To simplify the groupby operation we
# we drop the indices to treat epoch and time as categroial factors.
df = df.reset_index()
# The ensuing DataFrame then is split into subsets reflecting a crossing
# between condition and trial number. The idea is that we can broadcast
# operations into each cell simultaneously.
factors = ['condition', 'epoch']
sel = factors + ['MEG 1332', 'MEG 1342']
grouped = df[sel].groupby(factors)
# To make the plot labels more readable let's edit the values of 'condition'.
df.condition = df.condition.apply(lambda name: name + ' ')
# Now we compare the mean of two channels response across conditions.
grouped.mean().plot(kind='bar', stacked=True, title='Mean MEG Response',
color=['steelblue', 'orange'])
mne.viz.tight_layout()
# We can even accomplish more complicated tasks in a few lines calling
# apply method and passing a function. Assume we wanted to know the time
# slice of the maximum response for each condition.
max_latency = grouped[sel[2]].apply(lambda x: df.time[x.argmax()])
print(max_latency)
# Then make the plot labels more readable let's edit the values of 'condition'.
df.condition = df.condition.apply(lambda name: name + ' ')
plt.figure()
max_latency.plot(kind='barh', title='Latency of Maximum Reponse',
color=['steelblue'])
mne.viz.tight_layout()
# Finally, we will again remove the index to create a proper data table that
# can be used with statistical packages like statsmodels or R.
final_df = max_latency.reset_index()
final_df.rename(columns={0: sel[2]}) # as the index is oblivious of names.
# The index is now written into regular columns so it can be used as factor.
print(final_df)
plt.show()
# To save as csv file, uncomment the next line.
# final_df.to_csv('my_epochs.csv')
# Note. Data Frames can be easily concatenated, e.g., across subjects.
# E.g. say:
#
# import pandas as pd
# group = pd.concat([df_1, df_2])
# group['subject'] = np.r_[np.ones(len(df_1)), np.ones(len(df_2)) + 1]
| wronk/mne-python | tutorials/plot_epochs_to_data_frame.py | Python | bsd-3-clause | 8,814 |
#!/usr/bin/env python
'''
EC2 external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
AWS EC2 using the Boto library.
NOTE: This script assumes Ansible is being executed where the environment
variables needed for Boto have already been set:
export AWS_ACCESS_KEY_ID='AK123'
export AWS_SECRET_ACCESS_KEY='abc123'
This script also assumes there is an ec2.ini file alongside it. To specify a
different path to ec2.ini, define the EC2_INI_PATH environment variable:
export EC2_INI_PATH=/path/to/my_ec2.ini
If you're using eucalyptus you need to set the above variables and
you need to define:
export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
When run against a specific host, this script returns the following variables:
- ec2_ami_launch_index
- ec2_architecture
- ec2_association
- ec2_attachTime
- ec2_attachment
- ec2_attachmentId
- ec2_client_token
- ec2_deleteOnTermination
- ec2_description
- ec2_deviceIndex
- ec2_dns_name
- ec2_eventsSet
- ec2_group_name
- ec2_hypervisor
- ec2_id
- ec2_image_id
- ec2_instanceState
- ec2_instance_type
- ec2_ipOwnerId
- ec2_ip_address
- ec2_item
- ec2_kernel
- ec2_key_name
- ec2_launch_time
- ec2_monitored
- ec2_monitoring
- ec2_networkInterfaceId
- ec2_ownerId
- ec2_persistent
- ec2_placement
- ec2_platform
- ec2_previous_state
- ec2_private_dns_name
- ec2_private_ip_address
- ec2_publicIp
- ec2_public_dns_name
- ec2_ramdisk
- ec2_reason
- ec2_region
- ec2_requester_id
- ec2_root_device_name
- ec2_root_device_type
- ec2_security_group_ids
- ec2_security_group_names
- ec2_shutdown_state
- ec2_sourceDestCheck
- ec2_spot_instance_request_id
- ec2_state
- ec2_state_code
- ec2_state_reason
- ec2_status
- ec2_subnet_id
- ec2_tenancy
- ec2_virtualization_type
- ec2_vpc_id
These variables are pulled out of a boto.ec2.instance object. There is a lack of
consistency with variable spellings (camelCase and underscores) since this
just loops through all variables the object exposes. It is preferred to use the
ones with underscores when multiple exist.
In addition, if an instance has AWS Tags associated with it, each tag is a new
variable named:
- ec2_tag_[Key] = [Value]
Security groups are comma-separated in 'ec2_security_group_ids' and
'ec2_security_group_names'.
'''
# (c) 2012, Peter Sankauskas
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import sys
import os
import argparse
import re
from time import time
import boto
from boto import ec2
from boto import rds
from boto import route53
import ConfigParser
from collections import defaultdict
try:
import json
except ImportError:
import simplejson as json
class Ec2Inventory(object):
def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}}
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = self._empty_inventory()
# Index of hostname (address) to instance ID
self.index = {}
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
if self.inventory == self._empty_inventory():
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print data_to_print
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the ec2.ini file '''
config = ConfigParser.SafeConfigParser()
ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path)
config.read(ec2_ini_path)
# is eucalyptus?
self.eucalyptus_host = None
self.eucalyptus = False
if config.has_option('ec2', 'eucalyptus'):
self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
# Regions
self.regions = []
configRegions = config.get('ec2', 'regions')
configRegions_exclude = config.get('ec2', 'regions_exclude')
if (configRegions == 'all'):
if self.eucalyptus_host:
self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)
else:
for regionInfo in ec2.regions():
if regionInfo.name not in configRegions_exclude:
self.regions.append(regionInfo.name)
else:
self.regions = configRegions.split(",")
# Destination addresses
self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
# Route53
self.route53_enabled = config.getboolean('ec2', 'route53')
self.route53_excluded_zones = []
if config.has_option('ec2', 'route53_excluded_zones'):
self.route53_excluded_zones.extend(
config.get('ec2', 'route53_excluded_zones', '').split(','))
# Include RDS instances?
self.rds_enabled = True
if config.has_option('ec2', 'rds'):
self.rds_enabled = config.getboolean('ec2', 'rds')
# Return all EC2 and RDS instances (if RDS is enabled)
if config.has_option('ec2', 'all_instances'):
self.all_instances = config.getboolean('ec2', 'all_instances')
else:
self.all_instances = False
if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:
self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
else:
self.all_rds_instances = False
# Cache related
cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = cache_dir + "/ansible-ec2.cache"
self.cache_path_index = cache_dir + "/ansible-ec2.index"
self.cache_max_age = config.getint('ec2', 'cache_max_age')
# Configure nested groups instead of flat namespace.
if config.has_option('ec2', 'nested_groups'):
self.nested_groups = config.getboolean('ec2', 'nested_groups')
else:
self.nested_groups = False
# Do we need to just include hosts that match a pattern?
try:
pattern_include = config.get('ec2', 'pattern_include')
if pattern_include and len(pattern_include) > 0:
self.pattern_include = re.compile(pattern_include)
else:
self.pattern_include = None
except ConfigParser.NoOptionError, e:
self.pattern_include = None
# Do we need to exclude hosts that match a pattern?
try:
pattern_exclude = config.get('ec2', 'pattern_exclude');
if pattern_exclude and len(pattern_exclude) > 0:
self.pattern_exclude = re.compile(pattern_exclude)
else:
self.pattern_exclude = None
except ConfigParser.NoOptionError, e:
self.pattern_exclude = None
# Instance filters (see boto and EC2 API docs)
self.ec2_instance_filters = defaultdict(list)
if config.has_option('ec2', 'instance_filters'):
for x in config.get('ec2', 'instance_filters', '').split(','):
filter_key, filter_value = x.split('=')
self.ec2_instance_filters[filter_key].append(filter_value)
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
if self.route53_enabled:
self.get_route53_records()
for region in self.regions:
self.get_instances_by_region(region)
if self.rds_enabled:
self.get_rds_instances_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def get_instances_by_region(self, region):
''' Makes an AWS EC2 API call to the list of instances in a particular
region '''
try:
if self.eucalyptus:
conn = boto.connect_euca(host=self.eucalyptus_host)
conn.APIVersion = '2010-08-31'
else:
conn = ec2.connect_to_region(region)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
sys.exit(1)
reservations = []
if self.ec2_instance_filters:
for filter_key, filter_values in self.ec2_instance_filters.iteritems():
reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values }))
else:
reservations = conn.get_all_instances()
for reservation in reservations:
for instance in reservation.instances:
self.add_instance(instance, region)
except boto.exception.BotoServerError, e:
if not self.eucalyptus:
print "Looks like AWS is down again:"
print e
sys.exit(1)
def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular
region '''
try:
conn = rds.connect_to_region(region)
if conn:
instances = conn.get_all_dbinstances()
for instance in instances:
self.add_rds_instance(instance, region)
except boto.exception.BotoServerError, e:
if not e.reason == "Forbidden":
print "Looks like AWS RDS is down: "
print e
sys.exit(1)
def get_instance(self, region, instance_id):
''' Gets details about a specific instance '''
if self.eucalyptus:
conn = boto.connect_euca(self.eucalyptus_host)
conn.APIVersion = '2010-08-31'
else:
conn = ec2.connect_to_region(region)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
sys.exit(1)
reservations = conn.get_all_instances([instance_id])
for reservation in reservations:
for instance in reservation.instances:
return instance
def add_instance(self, instance, region):
''' Adds an instance to the inventory and index, as long as it is
addressable '''
# Only want running instances unless all_instances is True
if not self.all_instances and instance.state != 'running':
return
# Select the best destination address
if instance.subnet_id:
dest = getattr(instance, self.vpc_destination_variable)
else:
dest = getattr(instance, self.destination_variable)
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# if we only want to include hosts that match a pattern, skip those that don't
if self.pattern_include and not self.pattern_include.match(dest):
return
# if we need to exclude hosts that match a pattern, skip those
if self.pattern_exclude and self.pattern_exclude.match(dest):
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
self.inventory[instance.id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
else:
self.push(self.inventory, region, dest)
# Inventory: Group by availability zone
self.push(self.inventory, instance.placement, dest)
if self.nested_groups:
self.push_group(self.inventory, region, instance.placement)
# Inventory: Group by instance type
type_name = self.to_safe('type_' + instance.instance_type)
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by key pair
if instance.key_name:
key_name = self.to_safe('key_' + instance.key_name)
self.push(self.inventory, key_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'keys', key_name)
# Inventory: Group by security group
try:
for group in instance.groups:
key = self.to_safe("security_group_" + group.name)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
print 'Package boto seems a bit older.'
print 'Please upgrade boto >= 2.3.0.'
sys.exit(1)
# Inventory: Group by tag keys
for k, v in instance.tags.iteritems():
key = self.to_safe("tag_" + k + "=" + v)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
self.push_group(self.inventory, self.to_safe("tag_" + k), key)
# Inventory: Group by Route53 domain names if enabled
if self.route53_enabled:
route53_names = self.get_instance_route53_names(instance)
for name in route53_names:
self.push(self.inventory, name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'route53', name)
# Global Tag: tag all EC2 instances
self.push(self.inventory, 'ec2', dest)
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
def add_rds_instance(self, instance, region):
''' Adds an RDS instance to the inventory and index, as long as it is
addressable '''
# Only want available instances unless all_rds_instances is True
if not self.all_rds_instances and instance.status != 'available':
return
# Select the best destination address
#if instance.subnet_id:
#dest = getattr(instance, self.vpc_destination_variable)
#else:
#dest = getattr(instance, self.destination_variable)
dest = instance.endpoint[0]
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
self.inventory[instance.id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
else:
self.push(self.inventory, region, dest)
# Inventory: Group by availability zone
self.push(self.inventory, instance.availability_zone, dest)
if self.nested_groups:
self.push_group(self.inventory, region, instance.availability_zone)
# Inventory: Group by instance type
type_name = self.to_safe('type_' + instance.instance_class)
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by security group
try:
if instance.security_group:
key = self.to_safe("security_group_" + instance.security_group.name)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
print 'Package boto seems a bit older.'
print 'Please upgrade boto >= 2.3.0.'
sys.exit(1)
# Inventory: Group by engine
self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest)
if self.nested_groups:
self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
# Inventory: Group by parameter group
self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest)
if self.nested_groups:
self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
# Global Tag: all RDS instances
self.push(self.inventory, 'rds', dest)
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
def get_route53_records(self):
''' Get and store the map of resource records to domain names that
point to them. '''
r53_conn = route53.Route53Connection()
all_zones = r53_conn.get_zones()
route53_zones = [ zone for zone in all_zones if zone.name[:-1]
not in self.route53_excluded_zones ]
self.route53_records = {}
for zone in route53_zones:
rrsets = r53_conn.get_all_rrsets(zone.id)
for record_set in rrsets:
record_name = record_set.name
if record_name.endswith('.'):
record_name = record_name[:-1]
for resource in record_set.resource_records:
self.route53_records.setdefault(resource, set())
self.route53_records[resource].add(record_name)
def get_instance_route53_names(self, instance):
''' Check if an instance is referenced in the records we have from
Route53. If it is, return the list of domain names pointing to said
instance. If nothing points to it, return an empty list. '''
instance_attributes = [ 'public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address' ]
name_list = set()
for attrib in instance_attributes:
try:
value = getattr(instance, attrib)
except AttributeError:
continue
if value in self.route53_records:
name_list.update(self.route53_records[value])
return list(name_list)
def get_host_info_dict_from_instance(self, instance):
instance_vars = {}
for key in vars(instance):
value = getattr(instance, key)
key = self.to_safe('ec2_' + key)
# Handle complex types
# state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
if key == 'ec2__state':
instance_vars['ec2_state'] = instance.state or ''
instance_vars['ec2_state_code'] = instance.state_code
elif key == 'ec2__previous_state':
instance_vars['ec2_previous_state'] = instance.previous_state or ''
instance_vars['ec2_previous_state_code'] = instance.previous_state_code
elif type(value) in [int, bool]:
instance_vars[key] = value
elif type(value) in [str, unicode]:
instance_vars[key] = value.strip()
elif type(value) == type(None):
instance_vars[key] = ''
elif key == 'ec2_region':
instance_vars[key] = value.name
elif key == 'ec2__placement':
instance_vars['ec2_placement'] = value.zone
elif key == 'ec2_tags':
for k, v in value.iteritems():
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
group_ids = []
group_names = []
for group in value:
group_ids.append(group.id)
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join(group_ids)
instance_vars["ec2_security_group_names"] = ','.join(group_names)
else:
pass
# TODO Product codes if someone finds them useful
#print key
#print type(value)
#print value
return instance_vars
def get_host_info(self):
''' Get variables about a specific host '''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
# host migh not exist anymore
return self.json_format_dict({}, True)
(region, instance_id) = self.index[self.args.host]
instance = self.get_instance(region, instance_id)
return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
def push(self, my_dict, key, element):
''' Push an element onto an array that may not have been defined in
the dict '''
group_info = my_dict.setdefault(key, [])
if isinstance(group_info, dict):
host_list = group_info.setdefault('hosts', [])
host_list.append(element)
else:
group_info.append(element)
def push_group(self, my_dict, key, element):
''' Push a group as a child of another group. '''
parent_group = my_dict.setdefault(key, {})
if not isinstance(parent_group, dict):
parent_group = my_dict[key] = {'hosts': parent_group}
child_groups = parent_group.setdefault('children', [])
if element not in child_groups:
child_groups.append(element)
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index '''
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be
used as Ansible groups '''
return re.sub("[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
Ec2Inventory() | phyominhtun1990/ansible-for-devops | lamp-infrastructure/inventories/aws/ec2.py | Python | mit | 26,437 |
import subprocess
import sys
import csv
import re
import os, os.path
from fnmatch import fnmatch
CONFIG = "DATA_FILE={}\nALLOW_GREEDY=true\n"
DATE_CHECK = re.compile("[0-9]{1,2}([/\\-\\.])[0-9]{1,2}\\1[0-9]{4}")
def print_command_err(command, ret_code):
make_arg = lambda x: x.replace(" ", "\\ ")
command = [make_arg(_) for _ in command]
cmd_str = " ".join(command)
print "ExecutionError: command {%s} returned error code %d when executing" % (cmd_str, ret_code)
sys.exit(ret_code)
def call_command(command):
ret_code = subprocess.call(command)
if ret_code:
print_command_err(command, ret_code)
def write_config_file(filename):
with open("scheduler.config", "w+") as ff:
ff.write(CONFIG.format(filename))
def get_temp_file(original_file):
loc, ext = os.path.splitext(original_file)
return "{}.temp".format(loc.replace(" ", "_"))
def merge_prefs(pref_files):
""" Merges the primary preference files into one map. NOTE: Assumes csv files are identically structured """
ra_map = {}
name_index = 0
for ff in pref_files:
with open(ff, "r") as rf:
csv_reader = csv.reader(rf, lineterminator="\n")
header = None
for row in csv_reader:
if not header:
header = {row[i]: i for i in xrange(len(row))}
name_index = header["Name"]
else:
ra_map[row[name_index]] = {i: row[header[i]]
for i in header
if (DATE_CHECK.match(i) or i == "Duties")}
return ra_map, [_ for _ in header.keys()
if (DATE_CHECK.match(_) or _ == "Duties" or _ == "Name")]
def update_secondary_prefs(ra_map, primary_files):
for ff in primary_files:
with open(ff, "r") as rf:
csv_reader = csv.reader(rf, lineterminator="\n")
for row in csv_reader:
ra = ra_map[row[0]]
ra["Duties"] = "UNKNOWN"
for duty in row[1:]:
ra[duty] = "0"
return ra_map
def write_new_prefs(new_prefs, headings):
def key_function(x):
if x == "Name":
return 0
elif x == "Duties":
return 32
elif DATE_CHECK.match(x):
return int(x.split("/")[1])
with open("secondary.csv", "w+") as wf:
csv_writer = csv.writer(wf, lineterminator="\n")
headings = sorted(headings, key=key_function)
csv_writer.writerow(headings)
for name in new_prefs.keys():
prefs = new_prefs[name]
csv_writer.writerow([name] + [prefs[heading] for heading in headings if heading != "Name"])
def create_secondary_prefs(pref_files, primary_data_files):
primary_ra_map, headers = merge_prefs(pref_files)
secondary_ra_map = update_secondary_prefs(primary_ra_map, primary_data_files)
write_new_prefs(secondary_ra_map, headers)
if __name__ == "__main__":
excel_file = sys.argv[1]
call_command(["python2", "pref_parser.py", excel_file])
call_command(["ant", "compile"])
csv_files = [_ for _ in os.listdir(".") if fnmatch(_, "*.csv")]
for ff in csv_files:
os.rename(ff, "/".join(["build", ff]))
os.chdir("build/")
for filename in csv_files:
write_config_file(filename)
raw_input("Please fill in the number of primary duties for each RA in the file %s.\nPress Enter when finished" % (filename))
call_command(["java", "duty_scheduler.Scheduler"])
create_secondary_prefs(csv_files, [get_temp_file(filename) for filename in csv_files])
raw_input("Please fill in the number of secondary duties for each RA in the file secondary.csv.\nPress Enter when finished")
write_config_file("secondary.csv")
call_command(["java", "duty_scheduler.Scheduler", "-s"])
| Unit4TechProjects/ChoiceOptimization | run.py | Python | gpl-2.0 | 3,621 |
import pygame
import random
import item
import mob
import tile
class Mapgen(object):
def __init__(self, level):
self.xsiz = 10
self.ysiz = 10
self.biome = "random"
self.procedure = 0
self.zone = []
self.level = level
self.sizefactor = 2
#self.items = pygame.sprite.Group()
#self.mobs = pygame.sprite.Group()
#creates the base map
def generate(self,x,y,biome):
self.zone = []
self.xsiz = x
self.ysiz = y
self.biome = biome
self.sizefactor = (x/10)+(y/10)
landtype = 0
#for num in range(sizefactor*3):
# itemo = item.Item(self.level, self.level.items)
# itemo.set_type(random.randrange(6)+1)
#for umb in range(sizefactor*3):
# mobbo = mob.Mob(self.level, self.level.mobs)
# mobbo.set_type(random.randrange(7))
# mobbo.set_species(random.randrange(4)+1)
#main land generation
for a in range(x):
mapcol = []
for b in range(y):
#Purely Random
if (self.procedure == 0):
landtype = random.randrange(17)+1
#probability manipulation
if (self.procedure == 1):
if (biome == "grassland"):
common = [1,2,3,13]
uncommon = [4,5,6,7]
rare = [8,9,10]
vrare = [12,15]
self.level.passable = 1
if(biome == "forest"):
common = [3,4,5,9]
uncommon = [1,2,6]
rare = [7,13]
vrare = [10,11,12]
self.level.passable = 2
if(biome == "desert"):
common = [8,7]
uncommon = [16,17]
rare = [9,13]
vrare = [1,2]
self.level.passable = 7
landex = random.randrange(256)
if landex < 256:
landtype = random.choice(common)
if landex < 64:
landtype = random.choice(uncommon)
if landex < 16:
landtype = random.choice(rare)
if landex < 2:
landtype = random.choice(vrare)
#generate the tiles
acre = tile.Land(self.level, self.level.terrain)
if a == 0 or b == 0 or a == x-1 or b == y-1:
acre.set_type(0)
self.level.space.add(acre)
for mobbo in self.level.mobs:
mobbo.unpassable.add(acre)
else:
acre.set_type(landtype)
acre.get_image()
acre.spawn(a, b)
self.level.background.add(acre)
mapcol.append(acre)
self.zone.append( mapcol )
for a in range(len(self.zone)):
for b in range(len(self.zone[0])):
place = self.zone[a][b]
if place in self.level.space:
pass
else:
for wa in range(3):
for ha in range(3):
if a+wa-1 >= len(self.zone) or b+ha-1 >= len(self.zone[0]):
pass
else:
place.neighbors.add(self.zone[a+wa-1][b+ha-1])
return self.zone
#causes deserts to expand
def desertify(self):
for place in self.level.terrain:
place.desert_check()
#causes forests to grow
def grow_forest(self):
for place in self.level.terrain:
place.forest_check()
#lowers sea level
def sea_lower(self):
for place in self.level.terrain:
if place.flavnum == 15:
if random.randrange(100) < 80:
place.set_type(14)
if place.flavnum == 14:
if random.randrange(100) < 70:
place.set_type(13)
if place.flavnum == 13:
if random.randrange(100) < 60:
place.set_type(1)
#raises sea level
def sea_fill(self):
for place in self.level.terrain:
excepts = [0,15,14,12,11,10]
if place.flavnum == 15:
for location in place.neighbors:
if location.flavnum in excepts:
pass
else:
location.set_type(14)
if place.flavnum == 14:
for location in place.neighbors:
if location.flavnum in excepts:
pass
else:
location.set_type(13)
if place.flavnum == 13:
for location in place.neighbors:
if random.randrange(100) < 10:
if location.flavnum in excepts:
pass
else:
location.set_type(13)
#populates the map with mobs
def populate(self, density):
for a in range(self.sizefactor*density):
mobbo = mob.Mob(self.level, self.level.mobs)
mobbo.set_type(random.randrange(7))
mobbo.set_species(random.randrange(4)+1)
mobbo.unpassable.add(self.level.space)
mobbo.spawn(random.randrange(len(self.zone)-2)+1,random.randrange(len(self.zone[0])-2)+1)
if mobbo.mapx == self.level.player1.mapx and mobbo.mapy == self.level.player1.mapy:
mobbo.kill()
#adds items to the map
def litter(self, density):
for a in range(self.sizefactor*density):
itemo = item.Item(self.level, self.level.items)
itemo.set_type(random.randrange(8))
itemo.spawn(random.randrange(len(self.zone)-2)+1,random.randrange(len(self.zone[0])-2)+1)
if itemo.mapx == self.level.player1.mapx and itemo.mapy == self.level.player1.mapy:
itemo.kill()
#adds landmarks
def monumentalize(self, number):
for a in range(number):
monument = tile.Landmark(self.level, self.level.background)
monument.set_type(random.randrange(4))
monument.spawn(random.randrange(len(self.zone)-3)+1,random.randrange(len(self.zone[0])-3)+1)
pygame.sprite.spritecollide(monument, self.level.landmarks, True)
self.level.landmarks.add(monument)
| Lincoln-Cybernetics/Explore- | mapgen.py | Python | unlicense | 7,161 |
import pytest
from killranswers.connections import cassandra
@pytest.fixture(scope="module", autouse=True)
def connect():
cassandra()
| rustyrazorblade/killranswers | tests/conftest.py | Python | bsd-3-clause | 139 |
"""Provide functionality to interact with the vlc telnet interface."""
from __future__ import annotations
from datetime import datetime
from functools import wraps
from typing import Any, Callable, TypeVar, cast
from aiovlc.client import Client
from aiovlc.exceptions import AuthError, CommandError, ConnectError
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_NAME, STATE_IDLE, STATE_PAUSED, STATE_PLAYING
from homeassistant.core import HomeAssistant
from homeassistant.helpers.device_registry import DeviceEntryType
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
import homeassistant.util.dt as dt_util
from .const import DATA_AVAILABLE, DATA_VLC, DEFAULT_NAME, DOMAIN, LOGGER
MAX_VOLUME = 500
SUPPORT_VLC = (
SUPPORT_CLEAR_PLAYLIST
| SUPPORT_NEXT_TRACK
| SUPPORT_PAUSE
| SUPPORT_PLAY
| SUPPORT_PLAY_MEDIA
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_SEEK
| SUPPORT_SHUFFLE_SET
| SUPPORT_STOP
| SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_SET
)
Func = TypeVar("Func", bound=Callable[..., Any])
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the vlc platform."""
# CONF_NAME is only present in imported YAML.
name = entry.data.get(CONF_NAME) or DEFAULT_NAME
vlc = hass.data[DOMAIN][entry.entry_id][DATA_VLC]
available = hass.data[DOMAIN][entry.entry_id][DATA_AVAILABLE]
async_add_entities([VlcDevice(entry, vlc, name, available)], True)
def catch_vlc_errors(func: Func) -> Func:
"""Catch VLC errors."""
@wraps(func)
async def wrapper(self: VlcDevice, *args: Any, **kwargs: Any) -> Any:
"""Catch VLC errors and modify availability."""
try:
await func(self, *args, **kwargs)
except CommandError as err:
LOGGER.error("Command error: %s", err)
except ConnectError as err:
# pylint: disable=protected-access
if self._available:
LOGGER.error("Connection error: %s", err)
self._available = False
return cast(Func, wrapper)
class VlcDevice(MediaPlayerEntity):
"""Representation of a vlc player."""
def __init__(
self, config_entry: ConfigEntry, vlc: Client, name: str, available: bool
) -> None:
"""Initialize the vlc device."""
self._config_entry = config_entry
self._name = name
self._volume: float | None = None
self._muted: bool | None = None
self._state: str | None = None
self._media_position_updated_at: datetime | None = None
self._media_position: int | None = None
self._media_duration: int | None = None
self._vlc = vlc
self._available = available
self._volume_bkp = 0.0
self._media_artist: str | None = None
self._media_title: str | None = None
config_entry_id = config_entry.entry_id
self._attr_unique_id = config_entry_id
self._attr_device_info = DeviceInfo(
entry_type=DeviceEntryType.SERVICE,
identifiers={(DOMAIN, config_entry_id)},
manufacturer="VideoLAN",
name=name,
)
@catch_vlc_errors
async def async_update(self) -> None:
"""Get the latest details from the device."""
if not self._available:
try:
await self._vlc.connect()
except ConnectError as err:
LOGGER.debug("Connection error: %s", err)
return
try:
await self._vlc.login()
except AuthError:
LOGGER.debug("Failed to login to VLC")
self.hass.async_create_task(
self.hass.config_entries.async_reload(self._config_entry.entry_id)
)
return
self._state = STATE_IDLE
self._available = True
LOGGER.info("Connected to vlc host: %s", self._vlc.host)
status = await self._vlc.status()
LOGGER.debug("Status: %s", status)
self._volume = status.audio_volume / MAX_VOLUME
state = status.state
if state == "playing":
self._state = STATE_PLAYING
elif state == "paused":
self._state = STATE_PAUSED
else:
self._state = STATE_IDLE
if self._state != STATE_IDLE:
self._media_duration = (await self._vlc.get_length()).length
time_output = await self._vlc.get_time()
vlc_position = time_output.time
# Check if current position is stale.
if vlc_position != self._media_position:
self._media_position_updated_at = dt_util.utcnow()
self._media_position = vlc_position
info = await self._vlc.info()
data = info.data
LOGGER.debug("Info data: %s", data)
self._media_artist = data.get(0, {}).get("artist")
self._media_title = data.get(0, {}).get("title")
if not self._media_title:
# Fall back to filename.
if data_info := data.get("data"):
self._media_title = data_info["filename"]
@property
def name(self) -> str:
"""Return the name of the device."""
return self._name
@property
def state(self) -> str | None:
"""Return the state of the device."""
return self._state
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
@property
def volume_level(self) -> float | None:
"""Volume level of the media player (0..1)."""
return self._volume
@property
def is_volume_muted(self) -> bool | None:
"""Boolean if volume is currently muted."""
return self._muted
@property
def supported_features(self) -> int:
"""Flag media player features that are supported."""
return SUPPORT_VLC
@property
def media_content_type(self) -> str:
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self) -> int | None:
"""Duration of current playing media in seconds."""
return self._media_duration
@property
def media_position(self) -> int | None:
"""Position of current playing media in seconds."""
return self._media_position
@property
def media_position_updated_at(self) -> datetime | None:
"""When was the position of the current playing media valid."""
return self._media_position_updated_at
@property
def media_title(self) -> str | None:
"""Title of current playing media."""
return self._media_title
@property
def media_artist(self) -> str | None:
"""Artist of current playing media, music track only."""
return self._media_artist
@catch_vlc_errors
async def async_media_seek(self, position: float) -> None:
"""Seek the media to a specific location."""
await self._vlc.seek(round(position))
@catch_vlc_errors
async def async_mute_volume(self, mute: bool) -> None:
"""Mute the volume."""
assert self._volume is not None
if mute:
self._volume_bkp = self._volume
await self.async_set_volume_level(0)
else:
await self.async_set_volume_level(self._volume_bkp)
self._muted = mute
@catch_vlc_errors
async def async_set_volume_level(self, volume: float) -> None:
"""Set volume level, range 0..1."""
await self._vlc.set_volume(round(volume * MAX_VOLUME))
self._volume = volume
if self._muted and self._volume > 0:
# This can happen if we were muted and then see a volume_up.
self._muted = False
@catch_vlc_errors
async def async_media_play(self) -> None:
"""Send play command."""
await self._vlc.play()
self._state = STATE_PLAYING
@catch_vlc_errors
async def async_media_pause(self) -> None:
"""Send pause command."""
status = await self._vlc.status()
if status.state != "paused":
# Make sure we're not already paused since VLCTelnet.pause() toggles
# pause.
await self._vlc.pause()
self._state = STATE_PAUSED
@catch_vlc_errors
async def async_media_stop(self) -> None:
"""Send stop command."""
await self._vlc.stop()
self._state = STATE_IDLE
@catch_vlc_errors
async def async_play_media(
self, media_type: str, media_id: str, **kwargs: Any
) -> None:
"""Play media from a URL or file."""
if media_type != MEDIA_TYPE_MUSIC:
LOGGER.error(
"Invalid media type %s. Only %s is supported",
media_type,
MEDIA_TYPE_MUSIC,
)
return
await self._vlc.add(media_id)
self._state = STATE_PLAYING
@catch_vlc_errors
async def async_media_previous_track(self) -> None:
"""Send previous track command."""
await self._vlc.prev()
@catch_vlc_errors
async def async_media_next_track(self) -> None:
"""Send next track command."""
await self._vlc.next()
@catch_vlc_errors
async def async_clear_playlist(self) -> None:
"""Clear players playlist."""
await self._vlc.clear()
@catch_vlc_errors
async def async_set_shuffle(self, shuffle: bool) -> None:
"""Enable/disable shuffle mode."""
shuffle_command = "on" if shuffle else "off"
await self._vlc.random(shuffle_command)
| home-assistant/home-assistant | homeassistant/components/vlc_telnet/media_player.py | Python | apache-2.0 | 10,179 |
# -*- coding: utf-8 -*-
from antispoofing.spectralcubes.datasets.replayattack import ReplayAttack
from antispoofing.spectralcubes.datasets.casia import Casia
from antispoofing.spectralcubes.datasets.maskattack import MaskAttack
from antispoofing.spectralcubes.datasets.uvad import UVAD
registered_datasets = {0: ReplayAttack,
1: Casia,
2: MaskAttack,
3: UVAD,
}
| allansp84/spectralcubes | antispoofing/spectralcubes/datasets/registered_datasets.py | Python | agpl-3.0 | 456 |
#! /usr/bin/env python
from pySecDec.loop_integral import loop_package
import pySecDec as psd
li = psd.loop_integral.LoopIntegralFromPropagators(
propagators = ['k1^2', '(k1-k2)^2', '(k1-k2+p1)^2-mZ^2', '(k2)^2', '(k2+p2)^2', '(k1+p1+p2)^2', '(k2+p1)^2'],
loop_momenta = ['k1','k2'],
external_momenta = ['p1','p2','p3','p4'],
replacement_rules = [
('p1*p1', 0),
('p2*p2', 0),
('p3*p3', 's'),
('p1*p2', 's/2'),
('p2*p3', '-s/2'),
('p1*p3', '-s/2'),
('s', 'mZ^2'),
('mZ', 1)
],
dimensionality='6-2*eps',
powerlist=[2, 0, 2, 2, 0, 1, 0]
)
Mandelstam_symbols = []
mass_symbols = []
loop_package(
name = 'F1_45_2_alt',
additional_prefactor = '-exp(2*EulerGamma*eps)',
loop_integral = li,
real_parameters = Mandelstam_symbols + mass_symbols,
# the highest order of the final epsilon expansion --> change this value to whatever you think is appropriate
requested_order = 3,
# the optimization level to use in FORM (can be 0, 1, 2, 3, 4)
form_optimization_level = 2,
# the WorkSpace parameter for FORM
form_work_space = '1G',
# the method to be used for the sector decomposition
# valid values are ``iterative`` or ``geometric`` or ``geometric_ku``
decomposition_method = 'iterative',
# if you choose ``geometric[_ku]`` and 'normaliz' is not in your
# $PATH, you can set the path to the 'normaliz' command-line
# executable here
#normaliz_executable='/path/to/normaliz',
# whether or not to produce code to perform the contour deformation
# contour deformation is not required if we only want to compute euclidean points (all Mandelstam invariants negative)
contour_deformation = True,
)
| mppmu/secdec | nodist_examples/Zbb_vertex_correction_finite/F1_45_2_alt.py | Python | gpl-3.0 | 1,810 |
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.variables.variable import Variable
from variable_functions import my_attribute_label
from opus_core.misc import safe_array_divide
class average_land_value_per_acre_from_buildings(Variable):
"""Average land value per acre, computed by dividing the gridcell's land value by its
total number of acres (computed using buildings dataset)"""
land_value = "total_land_value_from_buildings"
acres = "acres_of_land"
def dependencies(self):
return [my_attribute_label(self.land_value),
my_attribute_label(self.acres)]
def compute(self, dataset_pool):
acres = self.get_dataset().get_attribute(self.acres)
return safe_array_divide(self.get_dataset().get_attribute(self.land_value),acres)
def post_check(self, values, dataset_pool):
self.do_check("x >= 0", values)
from opus_core.tests import opus_unittest
from opus_core.tests.utils.variable_tester import VariableTester
from numpy import array
class Tests(opus_unittest.OpusTestCase):
def test_my_inputs(self):
total_land_value = array([0, 20050, 20050])
acres_of_land = array([1995, 2005, 33])
tester = VariableTester(
__file__,
package_order=['urbansim'],
test_data={
"gridcell":{
"grid_id": array([1,2,3]),
"total_land_value_from_buildings":total_land_value,
"acres_of_land":acres_of_land
}
}
)
should_be = array([0.0, 10.0, 607.5757576])
tester.test_is_close_for_variable_defined_by_this_module(self, should_be)
if __name__=='__main__':
opus_unittest.main() | christianurich/VIBe2UrbanSim | 3rdparty/opus/src/urbansim/gridcell/average_land_value_per_acre_from_buildings.py | Python | gpl-2.0 | 1,881 |
# Copyright (C) 2008 University of Maryland
# All rights reserved.
# See LICENSE.txt for details.
# Author: Christopher Metting
#Starting Date:6/12/2009
from pylab import *
from numpy import *
import numpy as np
from time import time
from ..model.sample_prep import Q_space
from .approximations import wavefunction_format
SIMULATE_BA = False
def DWBA_form(cell,lattice,beam,q, angle_in):
scat = scatCalc(cell,lattice,beam,q, angle_in)
return scat[0], scat[1], scat[2]
def scatCalc(cell,lattice,beam,q, angle_in):
'''
Math from Kentzinger et al. in Physical Review B, 77, 1044335(2008)
'''
#Front of Eq (20)
m = 1.674e-27
h_bar = 6.62607e-14
Vfac = -m/(2*pi*h_bar**2)
wavelength = beam.wavelength
# convert angle to radians
angle_in = angle_in * (pi / 180)
# determine wave vector (k)
kvec = 2.0*pi/wavelength
# upper and lowerbounds for reflected angle
alphaf_min = angle_in
alphaf_max = 100 * angle_in
# upper and lowerbounds for in-plane angle
iptheta_max = arcsin((q.q_list[1][q.q_list[1].argmax()] / kvec))
iptheta_min = -iptheta_max
# grab equally spaced intervals between upper and lowerbound angles
angle_out = linspace(alphaf_min, alphaf_max, size(q.q_list[2]))
iptheta = linspace(iptheta_min, iptheta_max, size(q.q_list[1]))
#alphai = angle_in
#angle_in = np.zeros_like(angle_out)
#angle_in.fill(alphai)
kz_out = -kvec * sin( angle_out )
kx_out = kvec * cos( angle_out )
ky_out = -kvec * sin( iptheta )
kz_in = kvec * sin( angle_in )
kx_in = kvec * cos( angle_in )
ky_in = zeros_like( ky_out )
scat = zeros((q.points[1], q.points[2]), dtype = 'complex')
# PSI in one
# PSI in two
# PSI out one
# PSI out two
pio = [None]*cell.n[2]
pit = [None]*cell.n[2]
poo = [None]*cell.n[2]
pot = [None]*cell.n[2]
pil = [None]*cell.n[2]
pfl = [None]*cell.n[2]
q_piopoo = [None]*cell.n[2]
q_piopot = [None]*cell.n[2]
q_pitpoo = [None]*cell.n[2]
q_pitpot = [None]*cell.n[2]
x = cell.value_list[0].reshape((cell.n[0],1,1))
y = cell.value_list[1].reshape((1,cell.n[1],1))
z = cell.value_list[2].reshape((1,1,cell.n[2]))
#Averages the in-plane scattering length density and formats the new
#object as [SLD,Thickeness,Absorbtion] for each z layer
SLDArray = wavefunction_format(cell.unit, cell.step[2], absorbtion = None)
#This is the calculation of the critical edge. It is needed for the
#calculation of p.
pcl = sqrt(4*pi*SLDArray[:,0])
#The cell is originally oriented so that the the bottom of the unit cell
#is located at the origin. This flips the cell so that the stack is ordered
#in the opposite direction.
flipCell = zeros(shape(cell.unit))
for i in range(cell.n[2]):
flipCell[:,:,i] = cell.unit[:,:,shape(cell.unit)[2]-i-1]
#This calculates the residual potential by taking the difference between
#the reference potential and the actual potential
Vres = flipCell - (SLDArray[:,0]).reshape((1,1,cell.n[2]))
#This is the rho used in eq. 20. The integration is the residual potential
#relative to the reference potential.
rhoTilOverRho = Vres/(SLDArray[:,0]).reshape((1,1,cell.n[2]))
rhoTilOverRho[isnan(rhoTilOverRho)] = 0.0
#The next few lines calculate the c and d values for each layer.
#This is done by calculating the specular reflectivity and then
#tracing the final reflected intensity back into the sample.
for i in range(size(iptheta)):
print 'iptheta:', degrees(iptheta[i]), 'calculating (', i+1, 'of', size(iptheta), ')'
for ii in range(size(angle_out)):
if SIMULATE_BA:
pio = ones((cell.n[2],), dtype='complex')
pit = zeros((cell.n[2],), dtype='complex')
poo = zeros((cell.n[2],), dtype='complex')
pot = ones((cell.n[2],), dtype='complex')
else:
poskiWavePar = dwbaWavefunction(kz_in,SLDArray)
negkfWavePar = dwbaWavefunction(-kz_out[ii],SLDArray)
pio = poskiWavePar.c
pit = poskiWavePar.d
poo = negkfWavePar.c
pot = negkfWavePar.d
for l in range(cell.n[2]):
#Solves the equation shown after eq. 11 on page 5.
pil[l]=sqrt(asarray((kz_in**2)-(pcl[l]**2),
dtype = 'complex'))
pfl[l]=sqrt(asarray((kz_out[ii]**2)-(pcl[l]**2),
dtype = 'complex'))
#Equations directly after eq (18).
q_piopoo[l] = -pfl[l] - pil[l]
q_piopot[l] = -pfl[l] + pil[l]
q_pitpoo[l] = pfl[l] - pil[l]
q_pitpot[l] = pfl[l] + pil[l]
pil = asarray(pil)
pfl = asarray(pfl)
q_piopoo = asarray(q_piopoo)
q_piopot = asarray(q_piopot)
q_pitpoo = asarray(q_pitpoo)
q_pitpot = asarray(q_pitpot)
pio = asarray(pio)
pit = asarray(pit)
poo = asarray(poo)
pot = asarray(pot)
########
# EDIT: bbm 07/20/2012
# this is not Eq. 18, which refers only to the out-of-plane (z) Laue factor
# this is the necessary Laue factor to do the integral in eq. 20
# as a finite sum over blocks of constant rho in the x-y plane
########f (mask.all() != False):
qx = kx_in - kx_out[ii]
if qx != 0:
laux = ((-1j / qx) * (exp(1j * qx * cell.step[0]) - 1.0))
else:
laux = complex(cell.step[0])
qy = -ky_out[i]
if qy != 0:
lauy = ((-1j / qy) * (exp(1j * qy * cell.step[1]) - 1.0))
else:
lauy = complex(cell.step[1])
#Eq. 20 (including only rhoN - rhoM is assumed to be zero)
ftwRef = (Vfac*sum(sum(rhoTilOverRho * exp(1j*qx*x)*
exp(1j*qy*y),axis=0),axis=0))
# finite-sum corrections for the x and y directions
ftwRef *= laux
ftwRef *= lauy
#Eq. 19
ftwRef = ((SLDArray[:,0]).reshape((1,1,cell.n[2]))*
ftwRef.reshape((1,1,cell.n[2])))
ft = ftwRef.copy()
pioSel = pio.reshape((1,1,cell.n[2]))
pitSel = pit.reshape((1,1,cell.n[2]))
pooSel = poo.reshape((1,1,cell.n[2]))
potSel = pot.reshape((1,1,cell.n[2]))
q_piopoo_sel = q_piopoo.reshape((1,1,cell.n[2]))
q_piopot_sel = q_piopot.reshape((1,1,cell.n[2]))
q_pitpoo_sel = q_pitpoo.reshape((1,1,cell.n[2]))
q_pitpot_sel = q_pitpot.reshape((1,1,cell.n[2]))
pil_sel = pil.reshape((1,1,cell.n[2]))
pfl_sel = pfl.reshape((1,1,cell.n[2]))
#equation 15
scat_PioPoo = (pioSel * exp(1j*pil_sel*z)*ft*
exp(1j*pfl_sel*z) * pooSel)
scat_PioPot = (pioSel * exp(1j*pil_sel*z)*ft*
exp(-1j*pfl_sel*z)*potSel)
scat_PitPoo = (pitSel * exp(-1j*pil_sel*z)*ft*
exp(1j*pfl_sel*z) *pooSel)
scat_PitPot = (pitSel * exp(-1j*pil_sel*z)*ft*
exp(-1j*pfl_sel*z)* potSel)
mask = (q_piopoo_sel != 0)
scat_PioPoo[mask] *= ((-1j / q_piopoo_sel[mask]) *
(exp(1j *q_piopoo_sel[mask] * cell.step[2]) - 1.0))
scat_PioPoo[q_piopoo_sel == 0] *= cell.step[2]
mask = (q_piopot_sel != 0)
scat_PioPot[mask] *= ((-1j / q_piopot_sel[mask]) *
(exp(1j *q_piopot_sel[mask] * cell.step[2]) - 1.0))
scat_PioPot[q_piopot_sel == 0] *= cell.step[2]
mask = (q_pitpoo_sel != 0)
scat_PitPoo[mask] *= ((-1j / q_pitpoo_sel[mask]) *
(exp(1j *q_pitpoo_sel[mask] * cell.step[2]) - 1.0))
scat_PitPoo[q_pitpoo_sel == 0] *= cell.step[2]
mask = (q_pitpot_sel != 0)
scat_PitPot[mask] *= ((-1j / q_pitpot_sel[mask]) *
(exp(1j *q_pitpot_sel[mask] * cell.step[2]) - 1.0))
scat_PitPot[q_pitpot_sel == 0] *= cell.step[2]
#Exactly equation15
scat[i, ii]= sum(scat_PioPoo + scat_PioPot + scat_PitPoo + scat_PitPot)
xvals = degrees(iptheta)
yvals = degrees(angle_out)
return scat, xvals, yvals
class dwbaWavefunction:
def __init__(self, kz, SLDArray):
if not isinstance(kz, ndarray):
kz = array([kz], dtype=complex)
#kz = array([kz]).flatten().astype(complex)
self.kz = kz
kzlen = kz.shape
sldlen = len(SLDArray)
self.SLDArray = SLDArray
self.r = zeros(kzlen, dtype=complex)
self.kz_l = zeros((sldlen,) + kzlen, dtype=complex)
self.c = zeros((sldlen,) + kzlen, dtype=complex)
self.d = zeros((sldlen,) + kzlen, dtype=complex)
neg_k_mask = (self.kz < 0)
pos_k_mask = logical_not(neg_k_mask)
kz_ln, cn, dn, rn = self.calc_r_cd(self.kz[neg_k_mask], kz_neg=True)
self.kz_l[:,neg_k_mask] = kz_ln
self.c[:,neg_k_mask] = cn
self.d[:,neg_k_mask] = dn
self.r[neg_k_mask] = rn
kz_l, c, d, r = self.calc_r_cd(self.kz[pos_k_mask], kz_neg=False)
self.kz_l[:,pos_k_mask] = kz_l
self.c[:,pos_k_mask] = c
self.d[:,pos_k_mask] = d
self.r[pos_k_mask] = r
def calc_r_cd(self, kz, kz_neg=False):
if kz_neg==True:
workingSLD = self.SLDArray[::-1]
else:
workingSLD = self.SLDArray
layerCount = workingSLD.shape[0]
thickness = sum(workingSLD[1:-1,1])
SLD_inc = workingSLD[0,0] + 1j * workingSLD[0,2]
SLD_sub = workingSLD[-1,0] + 1j * workingSLD[-1,2]
B11 = ones(shape(kz),dtype='complex')
B22 = ones(shape(kz),dtype='complex')
B21 = zeros(shape(kz),dtype='complex')
B12 = zeros(shape(kz),dtype='complex')
M11 = [None]*layerCount
M12 = [None]*layerCount
M21 = [None]*layerCount
M22 = [None]*layerCount
Bl11 = [None]*layerCount
Bl12 = [None]*layerCount
Bl21 = [None]*layerCount
Bl22 = [None]*layerCount
Bl11[0] = B11
Bl12[0] = B22
Bl21[0] = B21
Bl22[0] = B12
c = [None]*layerCount
d = [None]*layerCount
nz =[None]*layerCount
k0z = sqrt(asarray(kz**2 + 4 * pi * SLD_inc,dtype = 'complex'))# always positive!
nz[0] = sqrt( complex(1) - 4 * pi * SLD_inc / k0z**2 )
nz[-1] = sqrt( complex(1) - 4 * pi * SLD_sub / k0z**2 )
for l in range(1, layerCount-1):
#leaving off the incident medium and substrate from sum
SLD,thickness,mu = workingSLD[l]
nz[l] = sqrt(complex(1) - 4 * pi * (SLD+1j*mu)/ k0z**2 )
kzl =( nz[l] * k0z ) # edit: BBM 02/10/2012
n = nz[l]
M11[l] = asarray(cos(kzl * thickness),dtype = 'complex')
M12[l] = asarray(1/n * sin(kzl * thickness),dtype = 'complex')
M21[l] = asarray((-n) * sin(kzl * thickness),dtype = 'complex')
M22[l] = asarray(cos(kzl * thickness),dtype = 'complex')
C1 = B11*M11[l] + B21*M12[l]
C2 = B11*M21[l] + B21*M22[l]
B11 = C1
B21 = C2
C1 = B12*M11[l] + B22*M12[l]
C2 = B12*M21[l] + B22*M22[l]
B12 = C1
B22 = C2
Bl11[l] = B11
Bl21[l] = B21
Bl12[l] = B12
Bl22[l] = B22
kz_l = nz * k0z
r = (B11 + (1j * nz[0] * B12) + (1/(1j * nz[-1])*(
-B21 - 1j * nz[0] * B22))) / (-B11 + (1j * nz[0] * B12) + (
1/(1j * nz[-1])*( B21 - 1j * nz[0] * B22)))
Bl11[-1] = ones(shape(kz))
Bl12[-1] = zeros(shape(kz))
Bl21[-1] = ones(shape(kz))
Bl22[-1] = zeros(shape(kz))
#self.r = r
self.t = zeros(shape(r),dtype = 'complex')
#self.t[nz[-1].real != 0.0] = 1.0 + self.r[nz[-1].real != 0.0]
c[0] = ones(shape(kz),dtype='complex') # incident beam has intensity 1
d[0] = r # reflected beam has intensity |r|**2
p = asarray(1.0 + r,dtype ='complex') #psi
pp = asarray(1j * kz_l[0] * (1 - r),dtype='complex') #psi prime
#M11[0] = ones(shape(kz),dtype='complex')
#M12[0] = ones(shape(kz),dtype='complex')
#M21[0] = ones(shape(kz),dtype='complex')
#M22[0] = ones(shape(kz),dtype='complex')
z_interface = 0.0
for l in range(1,layerCount-1):
## this algorithm works all the way into the substrate
SLD,thickness,mu = workingSLD[l]
pForDot = copy(p)
ppForDot = copy(pp)
#Fine, This is c and d
kzl =( nz[l] * k0z )
c[l] = (.5* exp(-1j*kzl*(z_interface))*(p + (pp/(1j*kzl))))
d[l] = (.5* exp( 1j*kzl*(z_interface))*(p - (pp/(1j*kzl))))
## Moved ^ above v to model wavefunction.js WRT 7/16/12
p = (M11[l]*pForDot) + (M12[l]*ppForDot/k0z)
pp = (k0z*M21[l]*pForDot) + (M22[l]*ppForDot)
z_interface += thickness
# fill final c,d
l=-1
kzl =( nz[l] * k0z )
c[l] = (.5* exp(-1j*kzl*(z_interface))*(p + (pp/(1j*kzl))))
#self.c[-1] = (.5* exp(-1j*kzl*(z_interface))*(p + (pp/(1j*kzl))))
d[-1] = zeros(shape(kz),dtype='complex')
if kz_neg==True:
print "neg_kz!", len(kz_l)
return [-kz_l[::-1], c[::-1], d[::-1], r[::-1]]
else:
return [kz_l, c, d, r]
def _test():
# run from ipython by starting in root osrefl directory,
# from osrefl.theory.DWBA import _test
# test()
# ...
from osrefl.model.sample_prep import Parallelapiped, Layer, Scene, GeomUnit, Rectilinear, Beam
Au = Parallelapiped(SLD = 4.506842e-6,dim=[3.7e4,3.7e4,630.0])#, curve = .56)
Cr = Layer(SLD = 3.01e-6,thickness_value = 48.0)
#Au.on_top_of(Cr)
#scene = Scene([Au,Cr])
scene = Scene([Au])
GeoUnit = GeomUnit(Dxyz = [10.0e4,10.0e4,700.0], n = [20,21,40],
#scene = scene, inc_sub = [0.0,0.0])
scene = scene, inc_sub = [0.0,2.07e-6])
unit = GeoUnit.buildUnit()
unit.add_media()
lattice = Rectilinear([20.0,20.0,1.0],unit)
beam = Beam(5.0,.02,None,0.02,None)
q = Q_space([-.0002,-0.002,0.00002],[.0002,.002,0.1],[100,5,150])
SLDArray = wavefunction_format(unit.unit, unit.step[2], absorbtion = None)
'''
kiWavePar = dwbaWavefunction(q.kin,SLDArray)
test = 2
bbmTest = neutron_wavefunction(q.kin[test,2,50],SLDArray)
cCollect = zeros(shape(kiWavePar.c)[0])
dCollect = zeros(shape(kiWavePar.d)[0])
c = asarray(kiWavePar.c)
d = asarray(kiWavePar.d)
for i in range(shape(kiWavePar.c)[0]):
temp = kiWavePar.c[i]
cCollect[i] = temp[test,2,50]
temp = kiWavePar.d[i]
dCollect[i] = temp[test,2,50]
cCollect=c[:,test,2,50]
dCollect=d[:,test,2,50]
plot(bbmTest.c,label = 'bbm')
plot(cCollect,label = 'py')
legend()
figure()
plot(bbmTest.d,label = 'bbm')
plot(dCollect,label = 'py')
legend()
figure()
diff = abs(bbmTest.c.real-cCollect.real)/((abs(bbmTest.c.real)+abs(cCollect.real))/2.0)
plot(diff,label = 'diff')
show()
'''
DWBA_form(unit,lattice,beam,q)
if __name__=="__main__": _test()
| reflectometry/osrefl | osrefl/theory/DWBAGISANS.py | Python | bsd-3-clause | 16,197 |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2012 Fco. Javier Lucena Lucena (https://forja.rediris.es/users/franlu/)
Copyright (C) 2012 Serafín Vélez Barrera (https://forja.rediris.es/users/seravb/)
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation; either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
| pupils/pupils | pupils/actividad/__init__.py | Python | agpl-3.0 | 894 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import utilities
from base import Base, Error, loadable
import media
class MalformedMangaPageError(media.MalformedMediaPageError):
"""Indicates that a manga-related page on MAL has irreparably broken markup in some way.
"""
pass
class InvalidMangaError(media.InvalidMediaError):
"""Indicates that the manga requested does not exist on MAL.
"""
pass
class Manga(media.Media):
"""Primary interface to manga resources on MAL.
"""
_status_terms = [
u'Unknown',
u'Publishing',
u'Finished',
u'Not yet published'
]
_consuming_verb = "read"
def __init__(self, session, manga_id):
"""Creates a new instance of Manga.
:type session: :class:`myanimelist.session.Session`
:param session: A valid MAL session
:type manga_id: int
:param manga_id: The desired manga's ID on MAL
:raises: :class:`.InvalidMangaError`
"""
if not isinstance(manga_id, int) or int(manga_id) < 1:
raise InvalidMangaError(manga_id)
super(Manga, self).__init__(session, manga_id)
self._volumes = None
self._chapters = None
self._published = None
self._authors = None
self._serialization = None
def parse_sidebar(self, manga_page):
"""Parses the DOM and returns manga attributes in the sidebar.
:type manga_page: :class:`bs4.BeautifulSoup`
:param manga_page: MAL manga page's DOM
:rtype: dict
:return: manga attributes
:raises: :class:`.InvalidMangaError`, :class:`.MalformedMangaPageError`
"""
# if MAL says the series doesn't exist, raise an InvalidMangaError.
error_tag = manga_page.find(u'div', {'class': 'badresult'})
if error_tag:
raise InvalidMangaError(self.id)
# otherwise, begin parsing.
manga_info = super(Manga, self).parse_sidebar(manga_page)
info_panel_first = manga_page.find(u'div', {'id': 'content'}).find(u'table').find(u'td')
try:
volumes_tag = info_panel_first.find(text=u'Volumes:').parent.parent
utilities.extract_tags(volumes_tag.find_all(u'span', {'class': 'dark_text'}))
manga_info[u'volumes'] = int(volumes_tag.text.strip()) if volumes_tag.text.strip() != 'Unknown' else None
except:
if not self.session.suppress_parse_exceptions:
raise
try:
chapters_tag = info_panel_first.find(text=u'Chapters:').parent.parent
utilities.extract_tags(chapters_tag.find_all(u'span', {'class': 'dark_text'}))
manga_info[u'chapters'] = int(chapters_tag.text.strip()) if chapters_tag.text.strip() != 'Unknown' else None
except:
if not self.session.suppress_parse_exceptions:
raise
try:
published_tag = info_panel_first.find(text=u'Published:').parent.parent
utilities.extract_tags(published_tag.find_all(u'span', {'class': 'dark_text'}))
published_parts = published_tag.text.strip().split(u' to ')
if len(published_parts) == 1:
# this published once.
try:
published_date = utilities.parse_profile_date(published_parts[0])
except ValueError:
raise MalformedMangaPageError(self.id, published_parts[0], message="Could not parse single publish date")
manga_info[u'published'] = (published_date,)
else:
# two publishing dates.
try:
publish_start = utilities.parse_profile_date(published_parts[0])
except ValueError:
raise MalformedMangaPageError(self.id, published_parts[0], message="Could not parse first of two publish dates")
if published_parts == u'?':
# this is still publishing.
publish_end = None
else:
try:
publish_end = utilities.parse_profile_date(published_parts[1])
except ValueError:
raise MalformedMangaPageError(self.id, published_parts[1], message="Could not parse second of two publish dates")
manga_info[u'published'] = (publish_start, publish_end)
except:
if not self.session.suppress_parse_exceptions:
raise
try:
authors_tag = info_panel_first.find(text=u'Authors:').parent.parent
utilities.extract_tags(authors_tag.find_all(u'span', {'class': 'dark_text'}))
manga_info[u'authors'] = {}
for author_link in authors_tag.find_all('a'):
link_parts = author_link.get('href').split('/')
# of the form /people/1867/Naoki_Urasawa
person = self.session.person(int(link_parts[2])).set({'name': author_link.text})
role = author_link.nextSibling.replace(' (', '').replace(')', '')
manga_info[u'authors'][person] = role
except:
if not self.session.suppress_parse_exceptions:
raise
try:
serialization_tag = info_panel_first.find(text=u'Serialization:').parent.parent
publication_link = serialization_tag.find('a')
manga_info[u'serialization'] = None
if publication_link:
link_parts = publication_link.get('href').split('mid=')
# of the form /manga.php?mid=1
manga_info[u'serialization'] = self.session.publication(int(link_parts[1])).set({'name': publication_link.text})
except:
if not self.session.suppress_parse_exceptions:
raise
return manga_info
@property
@loadable(u'load')
def volumes(self):
"""The number of volumes in this manga.
"""
return self._volumes
@property
@loadable(u'load')
def chapters(self):
"""The number of chapters in this manga.
"""
return self._chapters
@property
@loadable(u'load')
def published(self):
"""A tuple(2) containing up to two :class:`datetime.date` objects representing the start and end dates of this manga's publishing.
Potential configurations:
None -- Completely-unknown publishing dates.
(:class:`datetime.date`, None) -- Manga start date is known, end date is unknown.
(:class:`datetime.date`, :class:`datetime.date`) -- Manga start and end dates are known.
"""
return self._published
@property
@loadable(u'load')
def authors(self):
"""An author dict with :class:`myanimelist.person.Person` objects of the authors as keys, and strings describing the duties of these authors as values.
"""
return self._authors
@property
@loadable(u'load')
def serialization(self):
"""The :class:`myanimelist.publication.Publication` involved in the first serialization of this manga.
"""
return self._serialization
| XueAlfred/MALAnalysis | scraper-code/myanimelist/manga.py | Python | mit | 6,438 |
#!/usr/bin/env python
#
# Copyright 2014-2020 Matthew Wall
# Copyright 2014 Nate Bargmann <[email protected]>
# See the file LICENSE.txt for your rights.
#
# Credit to and contributions from:
# Jay Nugent (WB8TKL) and KRK6 for weather-2.kr6k-V2.1
# http://server1.nuge.com/~weather/
# Steve (sesykes71) for testing the first implementations of this driver
# Garret Power for improved decoding and proper handling of negative values
# Chris Thompstone for testing the fast-read implementation
#
# Thanks to PeetBros for publishing the communication protocols and details
# about each model they manufacture.
"""Driver for Peet Bros Ultimeter weather stations except the Ultimeter II
This driver assumes the Ultimeter is emitting data in Peet Bros Data Logger
mode format. This driver will set the mode automatically on stations
manufactured after 2004. Stations manufactured before 2004 must be set to
data logger mode using the buttons on the console.
Resources for the Ultimeter stations
Ultimeter Models 2100, 2000, 800, & 100 serial specifications:
http://www.peetbros.com/shop/custom.aspx?recid=29
Ultimeter 2000 Pinouts and Parsers:
http://www.webaugur.com/ham-radio/52-ultimeter-2000-pinouts-and-parsers.html
Ultimeter II
not supported by this driver
All models communicate over an RS-232 compatible serial port using three
wires--RXD, TXD, and Ground (except Ultimeter II which omits TXD). Port
parameters are 2400, 8N1, with no flow control.
The Ultimeter hardware supports several "modes" for providing station data
to the serial port. This driver utilizes the "modem mode" to set the date
and time of the Ultimeter upon initialization and then sets it into Data
Logger mode for continuous updates.
Modem Mode commands used by the driver
>Addddmmmm Set Date and Time (decimal digits dddd = day of year,
mmmm = minute of day; Jan 1 = 0000, Midnight = 0000)
>I Set output mode to Data Logger Mode (continuous output)
"""
from __future__ import with_statement
from __future__ import absolute_import
from __future__ import print_function
import logging
import time
import serial
import weewx.drivers
import weewx.wxformulas
from weewx.units import INHG_PER_MBAR, MILE_PER_KM
from weeutil.weeutil import timestamp_to_string
log = logging.getLogger(__name__)
DRIVER_NAME = 'Ultimeter'
DRIVER_VERSION = '0.41'
def loader(config_dict, _):
return UltimeterDriver(**config_dict[DRIVER_NAME])
def confeditor_loader():
return UltimeterConfEditor()
def _fmt(x):
return ' '.join(["%0.2X" % c for c in x])
class UltimeterDriver(weewx.drivers.AbstractDevice):
"""weewx driver that communicates with a Peet Bros Ultimeter station
model: station model, e.g., 'Ultimeter 2000' or 'Ultimeter 100'
[Optional. Default is 'Ultimeter']
port - serial port
[Required. Default is /dev/ttyUSB0]
max_tries - how often to retry serial communication before giving up
[Optional. Default is 5]
"""
def __init__(self, **stn_dict):
self.model = stn_dict.get('model', 'Ultimeter')
self.port = stn_dict.get('port', Station.DEFAULT_PORT)
self.max_tries = int(stn_dict.get('max_tries', 5))
self.retry_wait = int(stn_dict.get('retry_wait', 3))
debug_serial = int(stn_dict.get('debug_serial', 0))
self.last_rain = None
log.info('Driver version is %s', DRIVER_VERSION)
log.info('Using serial port %s', self.port)
self.station = Station(self.port, debug_serial=debug_serial)
self.station.open()
def closePort(self):
if self.station:
self.station.close()
self.station = None
@property
def hardware_name(self):
return self.model
def DISABLED_getTime(self):
return self.station.get_time()
def DISABLED_setTime(self):
self.station.set_time(int(time.time()))
def genLoopPackets(self):
self.station.set_logger_mode()
while True:
packet = {'dateTime': int(time.time() + 0.5),
'usUnits': weewx.US}
readings = self.station.get_readings_with_retry(self.max_tries,
self.retry_wait)
data = Station.parse_readings(readings)
packet.update(data)
self._augment_packet(packet)
yield packet
def _augment_packet(self, packet):
packet['rain'] = weewx.wxformulas.calculate_rain(packet['rain_total'], self.last_rain)
self.last_rain = packet['rain_total']
class Station(object):
DEFAULT_PORT = '/dev/ttyUSB0'
def __init__(self, port, debug_serial=0):
self._debug_serial = debug_serial
self.port = port
self.baudrate = 2400
self.timeout = 3 # seconds
self.serial_port = None
# setting the year works only for models 2004 and later
self.can_set_year = True
# modem mode is available only on models 2004 and later
# not available on pre-2004 models 50/100/500/700/800
self.has_modem_mode = True
def __enter__(self):
self.open()
return self
def __exit__(self, _, value, traceback):
self.close()
def open(self):
log.debug("Open serial port %s", self.port)
self.serial_port = serial.Serial(self.port, self.baudrate, timeout=self.timeout)
self.serial_port.flushInput()
def close(self):
if self.serial_port:
log.debug("Close serial port %s", self.port)
self.serial_port.close()
self.serial_port = None
def get_time(self):
try:
self.set_logger_mode()
buf = self.get_readings_with_retry()
data = Station.parse_readings(buf)
d = data['day_of_year'] # seems to start at 0
m = data['minute_of_day'] # 0 is midnight before start of day
tt = time.localtime()
y = tt.tm_year
s = tt.tm_sec
ts = time.mktime((y, 1, 1, 0, 0, s, 0, 0, -1)) + d * 86400 + m * 60
log.debug("Station time: day:%s min:%s (%s)", d, m, timestamp_to_string(ts))
return ts
except (serial.serialutil.SerialException, weewx.WeeWxIOError) as e:
log.error("get_time failed: %s", e)
return int(time.time())
def set_time(self, ts):
# go to modem mode so we do not get logger chatter
self.set_modem_mode()
# set time should work on all models
tt = time.localtime(ts)
cmd = b">A%04d%04d" % (tt.tm_yday - 1, tt.tm_min + tt.tm_hour * 60)
log.debug("Set station time to %s (%s)", timestamp_to_string(ts), cmd)
self.serial_port.write(b"%s\r" % cmd)
# year works only for models 2004 and later
if self.can_set_year:
cmd = b">U%s" % tt.tm_year
log.debug("Set station year to %s (%s)", tt.tm_year, cmd)
self.serial_port.write(b"%s\r" % cmd)
def set_logger_mode(self):
# in logger mode, station sends logger mode records continuously
if self._debug_serial:
log.debug("Set station to logger mode")
self.serial_port.write(b">I\r")
def set_modem_mode(self):
# setting to modem mode should stop data logger output
if self.has_modem_mode:
if self._debug_serial:
log.debug("Set station to modem mode")
self.serial_port.write(b">\r")
def get_readings(self):
"""Read an Ultimeter sentence.
Returns: a bytearray containing the sentence.
"""
# Search for the character '!', which marks the beginning of a "sentence":
while True:
c = self.serial_port.read(1)
if c == b'!':
break
# Save the first '!' ...
buf = bytearray(c)
# ... then read until we get to a '\r' or '\n'
while True:
c = self.serial_port.read(1)
if c == b'\n' or c == b'\r':
# We found a carriage return or newline, so we have the complete sentence.
# NB: Because the Ultimeter terminates a sentence with a '\r\n', this will
# leave a newline in the buffer. We don't care: it will get skipped over when
# we search for the next sentence.
break
buf += c
if self._debug_serial:
log.debug("Station said: %s", _fmt(buf))
return buf
@staticmethod
def validate_string(buf):
if len(buf) not in [42, 46, 50]:
raise weewx.WeeWxIOError("Unexpected buffer length %d" % len(buf))
if buf[0:2] != b'!!':
raise weewx.WeeWxIOError("Unexpected header bytes '%s'" % buf[0:2])
return buf
def get_readings_with_retry(self, max_tries=5, retry_wait=3):
for ntries in range(max_tries):
try:
buf = self.get_readings()
self.validate_string(buf)
return buf
except (serial.serialutil.SerialException, weewx.WeeWxIOError) as e:
log.info("Failed attempt %d of %d to get readings: %s", ntries + 1, max_tries, e)
time.sleep(retry_wait)
else:
msg = "Max retries (%d) exceeded for readings" % max_tries
log.error(msg)
raise weewx.RetriesExceeded(msg)
@staticmethod
def parse_readings(raw):
"""Ultimeter stations emit data in PeetBros format. Each line has 52
characters - 2 header bytes, 48 data bytes, and a carriage return
and line feed (new line):
!!000000BE02EB000027700000023A023A0025005800000000\r\n
SSSSXXDDTTTTLLLLPPPPttttHHHHhhhhddddmmmmRRRRWWWW
SSSS - wind speed (0.1 kph)
XX - wind direction calibration
DD - wind direction (0-255)
TTTT - outdoor temperature (0.1 F)
LLLL - long term rain (0.01 in)
PPPP - pressure (0.1 mbar)
tttt - indoor temperature (0.1 F)
HHHH - outdoor humidity (0.1 %)
hhhh - indoor humidity (0.1 %)
dddd - date (day of year)
mmmm - time (minute of day)
RRRR - daily rain (0.01 in)
WWWW - one minute wind average (0.1 kph)
"pressure" reported by the Ultimeter 2000 is correlated to the local
official barometer reading as part of the setup of the station
console so this value is assigned to the 'barometer' key and
the pressure and altimeter values are calculated from it.
Some stations may omit daily_rain or wind_average, so check for those.
raw: A bytearray containing the sentence.
returns: A dictionary containing the data.
"""
# Convert from bytearray to text
buf = raw[2:].decode('ascii')
data = dict()
data['windSpeed'] = Station._decode(buf[0:4], 0.1 * MILE_PER_KM) # mph
data['windDir'] = Station._decode(buf[6:8], 1.411764) # compass deg
data['outTemp'] = Station._decode(buf[8:12], 0.1, neg=True) # degree_F
data['rain_total'] = Station._decode(buf[12:16], 0.01) # inch
data['barometer'] = Station._decode(buf[16:20], 0.1 * INHG_PER_MBAR) # inHg
data['inTemp'] = Station._decode(buf[20:24], 0.1, neg=True) # degree_F
data['outHumidity'] = Station._decode(buf[24:28], 0.1) # percent
data['inHumidity'] = Station._decode(buf[28:32], 0.1) # percent
data['day_of_year'] = Station._decode(buf[32:36])
data['minute_of_day'] = Station._decode(buf[36:40])
if len(buf) > 40:
data['daily_rain'] = Station._decode(buf[40:44], 0.01) # inch
if len(buf) > 44:
data['wind_average'] = Station._decode(buf[44:48], 0.1 * MILE_PER_KM) # mph
return data
@staticmethod
def _decode(s, multiplier=None, neg=False):
"""Ultimeter puts hyphens in the string when a sensor is not installed. When we get a
hyphen or any other non-hex character, return None. Negative values are represented in twos
complement format. Only do the check for negative values if requested, since some
parameters use the full set of bits (e.g., wind direction) and some do not (e.g.,
temperature).
s: A text string, encoding the value as hexadecimal digits.
multiplier: Multiply the results by this value
neg: If True, calculate twos-complement
"""
v = None
try:
# Under Python 2, the variable s must be a string, not a bytearray.
v = int(s, 16)
if neg:
bits = 4 * len(s)
if v & (1 << (bits - 1)) != 0:
v -= (1 << bits)
if multiplier is not None:
v *= multiplier
except ValueError as e:
if s != b'----':
log.debug("Decode failed for '%s': %s", s, e)
return v
class UltimeterConfEditor(weewx.drivers.AbstractConfEditor):
@property
def default_stanza(self):
return """
[Ultimeter]
# This section is for the PeetBros Ultimeter series of weather stations.
# Serial port such as /dev/ttyS0, /dev/ttyUSB0, or /dev/cua0
port = %s
# The station model, e.g., Ultimeter 2000, Ultimeter 100
model = Ultimeter
# The driver to use:
driver = weewx.drivers.ultimeter
""" % Station.DEFAULT_PORT
def prompt_for_settings(self):
print("Specify the serial port on which the station is connected, for")
print("example: /dev/ttyUSB0 or /dev/ttyS0 or /dev/cua0.")
port = self._prompt('port', Station.DEFAULT_PORT)
return {'port': port}
# define a main entry point for basic testing of the station without weewx
# engine and service overhead. invoke this as follows from the weewx root dir:
#
# PYTHONPATH=bin python bin/weewx/drivers/ultimeter.py
if __name__ == '__main__':
import optparse
import weewx
import weeutil.logger
usage = """%prog [options] [--help]"""
parser = optparse.OptionParser(usage=usage)
parser.add_option('--version', dest='version', action='store_true',
help='display driver version')
parser.add_option('--debug', dest='debug', action='store_true',
help='provide additional debug output in log')
parser.add_option('--port', dest='port', metavar='PORT',
help='serial port to which the station is connected',
default=Station.DEFAULT_PORT)
(options, args) = parser.parse_args()
if options.version:
print("ultimeter driver version %s" % DRIVER_VERSION)
exit(0)
if options.debug:
weewx.debug = 1
weeutil.logger.setup('ultimeter', {})
with Station(options.port, debug_serial=options.debug) as station:
station.set_logger_mode()
while True:
print(time.time(), _fmt(station.get_readings()))
| hes19073/hesweewx | bin/weewx/drivers/ultimeter.py | Python | gpl-3.0 | 15,014 |
import numpy as np
import os
import speakerInfo as sinfo
import infoSingleFile
from unpackMFC import run as unmfc
from pyAudioAnalysis import audioBasicIO, audioFeatureExtraction
from datetime import datetime
import sklearn
from threading import Thread, BoundedSemaphore
import modelStorage as mds
from enum import Enum
WindowGTVmodes = Enum('average', 'midpoint')
# primary inputs
inputPath = "/home/jkih/Music/sukwoo_2min_utt/"
manualTrainTestSet = False
trainLabels = ['kim', 'lee', 'seo', 'yoon']
testLabels = ['joo']
autoflipOutputIfBelow50 = True
# leave blank to ignore
manualTestFile = ""
manualTestDiaFilePath = "joo proc pass 3.wav.diarization.comp"
# outputPath = inputPath + '1 0.1 avg'
outputPath = inputPath + str(datetime.now().time()) + '/'
numSets = 1
numThreads = 3
printTestingTimes = True
normalizeTrainingSet = True
# if true normalizes testing set using the normalization parameters found during the training set normalization
# unless it is a single file testing set, in which case we use a per window normalization
normalizeTestSet = True
windowGTVmode = WindowGTVmodes.average
# in number of the feature vectors used. MFCC is 30ms
# large window sizes leads to OOM failure
# at least I think it's OOM; python quits silently after filling avilable RAM (16GB)
# might be able to batch SVM training? Depends on how svm.fit() works
svmWindowSize = 1000 // 30
# also in number of feature vectors
svmStride = int(svmWindowSize *.1)
# pAA settings
# https://github.com/tyiannak/pyAudioAnalysis/wiki/3.-Feature-Extraction
# in ms
windowSize = 25.625
timeStep = 10
# don't change unless necessary
zeroThresh = 1e-10
featureVectorSize = 13
threadSemaphore = BoundedSemaphore(value=numThreads)
# no touch
featureVectors = dict()
featureVectorCache = dict()
MfccCache = dict()
groundTruths = dict()
lastSpeaker = -1
def clearVariables():
global featureVectors
global groundTruths
global lastSpeaker
featureVectors = dict()
groundTruths = dict()
lastSpeaker = -1
def forgivingFloatEquivalence(value, standard):
return value < -1 * standard - zeroThresh or value > standard + zeroThresh
def pairwiseComparison(a, b):
retval = []
for i, j in zip(a, b):
if i == j:
retval.append(True)
else:
retval.append(False)
return retval
def recallCalc(test, truth):
correct = 0
dividend = 0
for tst, trt in zip(test, truth):
if trt:
dividend += 1
if tst:
correct +=1
return float(correct) / dividend
def validateNormalization(featureVector):
for mean in featureVector.mean(axis=0):
if forgivingFloatEquivalence(mean, 0):
print "WARN: validateNormalization failed with mean " + str(mean)
return False
for stdv in featureVector.std(axis=0):
if forgivingFloatEquivalence(stdv, 1):
print "WARN: validateNormalization failed with stddev " + str(stdv)
return False
return True
def loadFeatureVector(inputPath, featureType, paaFunction = -1):
if featureType == 'mfcc':
loadMFCCFiles(inputPath)
elif featureType == 'paa':
loadWAVwithPAA(inputPath, paaFunction)
else:
print "ERR: unknown feature type", featureType
assert False
def storeFeature(sid, data, filePath):
global featureVectors
global groundTruths
if sid in featureVectors:
featureVectors[sid].append(data)
groundTruths[sid].append(np.full(len(data), sinfo.getTruthValue(filePath), dtype='int8'))
else:
if type(data) is np.ndarray:
data = data.tolist()
featureVectors[sid] = [data]
groundTruths[sid] = [np.full(len(data), sinfo.getTruthValue(filePath), dtype='int8').tolist()]
def loadMFCCFiles(inputPath):
filePaths = [inputPath+f for f in os.listdir(inputPath) if os.path.isfile(inputPath+f) and f.endswith('.mfc')]
for filePath in filePaths:
sid = sinfo.getSpeakerID(filePath)
data = None
if filePath in MfccCache.keys():
data = MfccCache[filePath]
else:
data = unmfc(filePath, featureVectorSize)
MfccCache[filePath] = data
storeFeature(sid, data, filePath)
def loadWAVwithPAA(inputPath, paaFunction):
filePaths = [inputPath+f for f in os.listdir(inputPath) if os.path.isfile(inputPath+f) and f.endswith('.wav')]
for filePath in filePaths:
sid = sinfo.getSpeakerID(filePath)
data = None
if filePath in featureVectorCache.keys():
data = featureVectorCache[filePath]
else:
[Fs, x] = audioBasicIO.readAudioFile(filePath)
assert paaFunction > -1 and paaFunction < 34
data = audioFeatureExtraction.stFeatureExtraction(x, Fs, 0.001 * windowSize * Fs, 0.001 * timeStep * Fs)
featureVectorCache[filePath] = data
data = data[paaFunction,:]
# using 1D feature vector breaks my code, sklearn code, and probably the law
if len(np.array(data).shape) < 2:
data = [[datum] for datum in data]
storeFeature(sid, data, filePath)
def windowing(x, y, normalizeEachWindow = False):
def reduceArrDimension(a):
retval = []
for iter in a:
retval.extend(iter)
return retval
newX = []
newY = []
iterRange = len(x) - svmWindowSize + 1
if iterRange % svmStride > 0:
print "WARN: SVM window stride misaligned by:", iterRange % svmStride
i = 0
while i < iterRange:
xi = x[i : i + svmWindowSize]
if normalizeEachWindow:
sklSS = sklearn.preprocessing.StandardScaler()
xi = sklSS.fit_transform(xi)
xi = reduceArrDimension(xi)
newX.append(xi)
if windowGTVmode == WindowGTVmodes.midpoint:
newY.append(y[int(i + svmWindowSize / 2)])
elif windowGTVmode == WindowGTVmodes.average:
newY.append(round(np.mean(y[i : i + svmWindowSize])))
else:
print 'ERR: invalid windowGTVmode:', windowGTVmode
assert False
i += svmStride
return newX, newY
# returns: feature vector array (2D), ground truth array (1D)
def collateData(speakerList, divider = None, subtractor = None, shuffle = False):
def reduceArrDimension(a):
retval = []
for iter in a:
retval.extend(iter)
return retval
x = []
y = []
for speaker in speakerList:
if speaker in featureVectors:
xi = featureVectors[speaker]
yi = groundTruths[speaker]
if shuffle:
rng_state = np.random.get_state()
np.random.shuffle(xi)
np.random.set_state(rng_state)
np.random.shuffle(yi)
else:
print "ERR: unknown speaker", str(speaker)
print featureVectors.keys()
print groundTruths.keys()
assert False
for i in range(len(xi)):
x.append(xi[i])
y.append(yi[i])
x = reduceArrDimension(x)
y = reduceArrDimension(y)
sklSS = sklearn.preprocessing.StandardScaler()
if divider == None:
x = sklSS.fit_transform(x)
if not validateNormalization(x):
print "ERR: data not normalized for speakers " + str(speakerList)
print "Check if bounds are too close"
assert False
elif divider[0] == False:
# Don't normalize
pass
else:
sklSS.scale_ = divider
sklSS.mean_ = subtractor
x = sklSS.transform(x)
if not validateNormalization(x):
print "WARN: data not normalized for speakers " + str(speakerList)
print "divider", divider
print "subtractor", subtractor
x, y = windowing(x, y)
retScale = None
retMean = None
try:
retScale = sklSS.scale_
retMean = sklSS.mean_
except AttributeError:
pass
return x, y, retScale, retMean
def loadManualTestFile(filePath, diarizationFilePath, divider, subtractor):
if not (filePath in MfccCache.keys()):
MfccCache[filePath] = unmfc(filePath, featureVectorSize)
infoSingleFile.init(diarizationFilePath, len(MfccCache[filePath]))
x = MfccCache[filePath]
if not ((divider == None) or (divider[0] == False)):
sklSS = sklearn.preprocessing.StandardScaler()
sklSS.scale_ = divider
sklSS.mean_ = subtractor
x = sklSS.transform(x)
x, y = windowing(x, infoSingleFile.getTruthValues(), True)
x = np.array(x)
if not validateNormalization(x):
print "WARN: data not normalized for the manual test set"
print "divider", divider
print "subtractor", subtractor
return x, y
def getSubset():
if manualTrainTestSet:
datA = None
if not normalizeTrainingSet:
datA = [False]
trainFeatureVector, trainTruthVector, datA, datB = collateData(trainLabels, shuffle = True, divider = datA)
if not normalizeTestSet:
datA = [False]
if len(manualTestFile) > 0:
testFeatureVector, testTruthVector = loadManualTestFile(manualTestFile, manualTestDiaFilePath, datA, datB)
else:
testFeatureVector, testTruthVector, datA, datB = collateData(testLabels, datA, datB, True)
else:
global lastSpeaker
testSpeaker = lastSpeaker + 1
if testSpeaker >= len(featureVectors.keys()):
testSpeaker = 0
speakers = featureVectors.keys()
datA = None
if not normalizeTrainingSet:
datA = [False]
trainFeatureVector, trainTruthVector, datA, datB = collateData([speaker for speaker in speakers if speaker != speakers[testSpeaker]], shuffle = True, divider = datA)
if not normalizeTestSet:
datA = [False]
testFeatureVector, testTruthVector, datA, datB = collateData([speakers[testSpeaker]], datA, datB, True)
lastSpeaker = testSpeaker
print "Testing with speaker #" + str(testSpeaker) + ", label: " + str(speakers[testSpeaker])
return trainFeatureVector, testFeatureVector, trainTruthVector, testTruthVector
# flips 0 to 1 and non-0 to 0 for any given 1D array
def flipTruthValues(truthVect):
def flip(item):
if item == 0:
return 1
return 0
return map(flip, truthVect)
def modelProcess(modelFunc, tag, ms):
global threadSemaphore
def resetModel():
if ms.args == 'ensembleOverride':
return modelFunc
elif ms.args != None:
return modelFunc(ms.args)
else:
return modelFunc()
gtvWasFlipped = False
trainFeatureVector = ms.trainFeatureVector
trainTruthVector = ms.trainTruthVector
testFeatureVector = ms.testFeatureVector
testTruthVector = ms.testTruthVector
model = resetModel()
model.fit(trainFeatureVector, trainTruthVector)
accuracy = -1
f1 = -1
try:
if modelFunc == mds.model_MiniK:
model = resetModel()
model.dummyattributethatdoesntexist
# MiniK score is not accuracy
# raise an attribute error to skip in to the hand-written accuracy code
if printTestingTimes:
print 'TESTING BEGIN', datetime.now()
predicted_labels = model.predict(testFeatureVector)
if printTestingTimes:
print 'TESTING END', datetime.now()
accuracy = model.score(testFeatureVector, testTruthVector)
if autoflipOutputIfBelow50 and accuracy < .5:
accuracy = 1 - accuracy
gtvWasFlipped = True
testTruthVector = flipTruthValues(testTruthVector)
f1 = sklearn.metrics.f1_score(testTruthVector, predicted_labels)
except AttributeError:
# some models only have online modes
if printTestingTimes:
print 'TESTING BEGIN', datetime.now()
predicted_labels = model.fit_predict(testFeatureVector)
if printTestingTimes:
print 'TESTING END', datetime.now()
accuracy = float(pairwiseComparison(predicted_labels, testTruthVector).count(True)) / len(testTruthVector)
if autoflipOutputIfBelow50 and accuracy < .5:
accuracy = 1 - accuracy
gtvWasFlipped = True
testTruthVector = flipTruthValues(testTruthVector)
recall = recallCalc(predicted_labels, testTruthVector)
f1 = float(2) * accuracy * recall / (accuracy + recall)
if accuracy < 0 or accuracy > 1:
print 'INVALID ACC ' + str(accuracy)
print 'MODEL ' + str(model)
print str(predicted_labels)
print str(testTruthVector)
os.exit
elif f1 < 0 or f1 > 1:
print 'INVALID F1 ' + str(f1)
print 'MODEL ' + str(model)
print str(predicted_labels)
print str(testTruthVector)
os.exit
f = open(outputPath + tag + '.log', 'w')
f.write('accuracy: ' + str(accuracy) + '\tf1: ' + str(f1))
f.write('\n')
f.write('predicted labels followed by truth values')
f.write('\n')
f.write(str(predicted_labels.tolist()))
f.write('\n')
f.write(str(testTruthVector))
f.write('\n')
f.write('Ground Truth Values Auto-Flipped: ' + str(gtvWasFlipped))
f.close()
threadSemaphore.release()
def runPaaFunctions():
if not os.path.exists(outputPath):
os.mkdir(outputPath)
for paaFunction in [21, 20]:
print "Running feature extraction #" + str(paaFunction)
clearVariables()
loadFeatureVector(inputPath, 'paa', paaFunction)
for i in range(numSets * len(featureVectors.keys())):
trainFeatureVector, testFeatureVector, trainTruthVector, testTruthVector = getSubset()
ms = mds.ModelSettings(i, paaFunction, trainFeatureVector, testFeatureVector, trainTruthVector, testTruthVector, featureVectors.keys()[lastSpeaker])
mds.runAllModelsPAA(ms, windowSize, iterDone, iterTotal)
def runSphinxFiles():
if not os.path.exists(outputPath):
os.mkdir(outputPath)
clearVariables()
loadFeatureVector(inputPath, 'mfcc')
iterlen = numSets * len(featureVectors.keys())
for i in range(iterlen):
print "PROCESSING: " + str(i) + " / " + str(iterlen)
trainFeatureVector, testFeatureVector, trainTruthVector, testTruthVector = getSubset()
ms = mds.ModelSettings(i, -1, trainFeatureVector, testFeatureVector, trainTruthVector, testTruthVector, featureVectors.keys()[lastSpeaker])
mds.runAllModelsMFCC(ms, iterDone, iterTotal)
def runRBFvariants():
if not os.path.exists(outputPath):
os.mkdir(outputPath)
clearVariables()
loadFeatureVector(inputPath, 'mfcc')
if manualTrainTestSet:
iterlen = numSets
else:
iterlen = numSets * len(featureVectors.keys())
for i in range(iterlen):
print "PROCESSING: " + str(i) + " / " + str(iterlen)
trainFeatureVector, testFeatureVector, trainTruthVector, testTruthVector = getSubset()
testSpeaker = featureVectors.keys()[lastSpeaker]
if lastSpeaker < 0:
testSpeaker = 'manual'
ms = mds.ModelSettings(i, -1, trainFeatureVector, testFeatureVector, trainTruthVector, testTruthVector, testSpeaker)
mds.runRBFvariantsGamma(ms, [0.015], i, iterlen)
# mds.runRBFvariants2DList(ms, [1, 10, 50, 100], [50, 0.01, 0.02, 0.03, 0.04, 0.5, 2, .78125, .617284], i, iterlen)
# mds.runRBFvariantsCList(ms, np.arange(1.98, 3, 0.02), 0.03, i, iterlen)
# mds.runRBFvariantsCList(ms, [1], 0.03, i, iterlen)
def runRandomForest():
global outputPath
outputPathPrefix = outputPath
clearVariables()
loadFeatureVector(inputPath, 'mfcc')
if manualTrainTestSet:
iterlen = numSets
else:
iterlen = numSets * len(featureVectors.keys())
forestCount = [1024, 2048, 3072, 4096, 5121, 6045, 8193]
maxDepth = [3, 5, 10, 20]
mds.resetETAtimer(iterlen * len(forestCount) * len(maxDepth))
for fc in forestCount:
for md in maxDepth:
for i in range(iterlen):
# outputPath = outputPathPrefix + ' ' + str(fc) + 'forests ' + str(md) + 'depth/'
if not os.path.exists(outputPath):
os.mkdir(outputPath)
print "PROCESSING: " + str(i) + " / " + str(iterlen) + ' ' + str(fc) + ' forests ' + str(md) + ' depth'
trainFeatureVector, testFeatureVector, trainTruthVector, testTruthVector = getSubset()
testSpeaker = featureVectors.keys()[lastSpeaker]
if lastSpeaker < 0:
testSpeaker = 'manual'
ms = mds.ModelSettings(i, -1, trainFeatureVector, testFeatureVector, trainTruthVector, testTruthVector, testSpeaker, mds.factory_RandomForest(fc, 4, md))
mds.runModel(mds.model_RandomForest, 'MFCC_' + str(ms.paaFunction) + '_RandomForest_fc_' + str(fc) + '_md_' + str(md) + '_' + str(ms.i) + '_' + ms.speakerName, ms)
mds.incrementETAtimer()
def runSvmRfEnsemble():
clearVariables()
loadFeatureVector(inputPath, 'mfcc')
if manualTrainTestSet:
iterlen = numSets
else:
iterlen = numSets * len(featureVectors.keys())
forestCount = 4096
maxDepth = 3
gamma = 0.015
cVal = 1
if not os.path.exists(outputPath):
os.mkdir(outputPath)
mds.resetETAtimer(iterlen)
for i in range(iterlen):
fc = forestCount
md = maxDepth
g = gamma
c = cVal
trainFeatureVector, testFeatureVector, trainTruthVector, testTruthVector = getSubset()
testSpeaker = featureVectors.keys()[lastSpeaker]
if lastSpeaker < 0:
testSpeaker = 'manual'
ms = mds.ModelSettings(i, -1, trainFeatureVector, testFeatureVector, trainTruthVector, testTruthVector, testSpeaker, 'ensembleOverride')
mds.runModel(mds.ensemble_VotingSvmRf(g, c, fc, md), 'MFCC_' + str(ms.paaFunction) + '_E_SVMRF_fc_' + str(fc) + '_md_' + str(md) + '_' + str(ms.i) + '_' + ms.speakerName, ms)
mds.incrementETAtimer()
mds.init(threadSemaphore, modelProcess)
# runPaaFunctions()
# runSphinxFiles()
# runRBFvariants()
# runRandomForest()
runSvmRfEnsemble() | dare0021/ClusteringBasedID | run.py | Python | mit | 16,148 |
#decode.py
#Driver program for translating Morse code strings.
#By Timothy Cohen
from morsetree import MorseCodeTree
def main(filename):
file = open(filename, 'r')
for line in file:
line = line.rstrip("\n")
tree = MorseCodeTree()
words = tree.translate(line)
words = ''.join(words)#Convert the discrete letters into one string
print(line)
print (words)
filename = "morse.txt"
if __name__ == "__main__":#Call the main
main(filename) | Tacohen/Morse-Code | decode.py | Python | bsd-2-clause | 510 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import os
import sys
import unittest
from fedora.rest.api import Fedora
from fedora.rest.ds import DatastreamProfile, FileItemMetadata, RelsExt, AdministrativeMetadata, ObjectDatastreams, \
EasyMetadata
test_file = "easy-file:1950715"
test_dataset = "easy-dataset:5958"
#@unittest.skip("on-line test")
class TestDatastreamProfile(unittest.TestCase):
@classmethod
def setUpClass(cls):
# set up logging
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
# set up connection to teasy
cfg_file = os.path.join(os.path.expanduser("~"), "src", "teasy.cfg")
cls.fedora = Fedora(cfg_file=cfg_file)
def test_fetch_easy_file_profile(self):
dsp = DatastreamProfile(test_file, "EASY_FILE")
dsp.fetch()
print(dsp.props)
self.assertEqual(15, len(dsp.props))
self.assertIsNotNone(dsp.ds_size)
def test_fetch_easy_file_metadata_profile(self):
dsp = DatastreamProfile(test_file, "EASY_FILE_METADATA")
dsp.fetch()
print(dsp.props)
self.assertEqual(15, len(dsp.props))
self.assertIsNotNone(dsp.ds_size)
def test_fetch_rels_ext_profile(self):
dsp = DatastreamProfile(test_file, "RELS-EXT")
dsp.fetch()
print(dsp.props)
self.assertEqual(15, len(dsp.props))
self.assertIsNotNone(dsp.ds_size)
class TestFileItemMetadata(unittest.TestCase):
@classmethod
def setUpClass(cls):
# set up logging
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
def test_fetch(self):
fim = FileItemMetadata(test_file)
fim.fetch()
print(fim.props)
self.assertEqual(10, len(fim.props))
self.assertIsNotNone(fim.fmd_size)
class TestAdministrativeMetadata(unittest.TestCase):
@classmethod
def setUpClass(cls):
# set up logging
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
# set up connection to teasy
Fedora(cfg_file=os.path.expanduser("~/src/teasy.cfg"))
def test_fetch(self):
amd = AdministrativeMetadata(test_dataset)
amd.fetch()
print(amd.props)
class TestEasyMetadata(unittest.TestCase):
@classmethod
def setUpClass(cls):
# set up logging
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
# set up connection to teasy
Fedora(cfg_file=os.path.expanduser("~/src/teasy.cfg"))
def test_fetch(self):
emd = EasyMetadata('easy-dataset:20')
emd.fetch()
self.assertEqual('10.5072/dans-249-exct', emd.doi)
class TestRelsExt(unittest.TestCase):
@classmethod
def setUpClass(cls):
# set up logging
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
def test_fetch(self):
rex = RelsExt(test_file)
rex.fetch()
dsid = rex.get_is_subordinate_to()
print(dsid)
class TestObjectDatastreams(unittest.TestCase):
@classmethod
def setUpClass(cls):
# set up logging
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
# set up connection to teasy
Fedora.reset()
cfg_file = os.path.join(os.path.expanduser("~"), "src", "teasy.cfg")
cls.fedora = Fedora(cfg_file=cfg_file)
def test_fetch(self):
pid = 'easy-dataset:450'
ods = ObjectDatastreams(pid)
dss = ods.fetch()
print(dss['DATASET_LICENSE'])
print('EMD' in dss)
| DANS-repo/fedora-api | fedora/rest/test/test_ds.py | Python | apache-2.0 | 5,008 |
import json
import datetime
import os
import urlparse
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, Group, Permission
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.utils import timezone
import mock
from nose.tools import eq_, ok_, assert_raises
from eventlog.models import Log
from crashstats.status.models import StatusMessage
from crashstats.symbols.models import SymbolsUpload
from crashstats.tokens.models import Token
from crashstats.supersearch.models import (
SuperSearchFields,
SuperSearchMissingFields,
)
from crashstats.crashstats import models
from crashstats.crashstats.tests.test_views import (
BaseTestViews,
)
class TestViews(BaseTestViews):
def setUp(self):
super(TestViews, self).setUp()
def mocked_product_versions(**params):
hits = [
{
'is_featured': True,
'throttle': 1.0,
'end_date': 'string',
'start_date': 'integer',
'build_type': 'string',
'product': 'WaterWolf',
'version': '19.0',
'has_builds': True
}
]
return {
'hits': hits,
'total': len(hits),
}
models.ProductVersions.implementation().get.side_effect = (
mocked_product_versions
)
# prime the cache
models.ProductVersions().get(active=True)
def _login(self, is_superuser=True):
user = super(TestViews, self)._login(
username='kairo',
email='[email protected]',
)
user.is_superuser = is_superuser
user.save()
return user
def _create_permission(self, name='Mess Around', codename='mess_around'):
ct, __ = ContentType.objects.get_or_create(
model='',
app_label='crashstats',
)
return Permission.objects.create(
name=name,
codename=codename,
content_type=ct
)
def test_home_page_not_signed_in(self):
home_url = reverse('manage:home')
response = self.client.get(home_url)
assert response.status_code == 302
# because the home also redirects to the first product page
# we can't use assertRedirects
eq_(
urlparse.urlparse(response['location']).path,
settings.LOGIN_URL
)
# if you're logged in, but not a superuser you'll get thrown
# back on the home page with a message
self._login(is_superuser=False)
response = self.client.get(home_url, follow=True)
assert response.status_code == 200
ok_(
'You are signed in but you do not have sufficient permissions '
'to reach the resource you requested.' in response.content
)
def test_home_page_signed_in(self):
user = self._login()
# at the moment it just redirects
home_url = reverse('manage:home')
response = self.client.get(home_url)
eq_(response.status_code, 200)
# certain links on that page
fields_missing_url = reverse('manage:supersearch_fields_missing')
ok_(fields_missing_url in response.content)
users_url = reverse('manage:users')
ok_(users_url in response.content)
products_url = reverse('manage:products')
ok_(products_url in response.content)
releases_url = reverse('manage:releases')
ok_(releases_url in response.content)
user.is_active = False
user.save()
home_url = reverse('manage:home')
response = self.client.get(home_url)
eq_(response.status_code, 302)
def test_users_page(self):
url = reverse('manage:users')
response = self.client.get(url)
eq_(response.status_code, 302)
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
Group.objects.create(name='Wackos')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Wackos' in response.content)
def test_users_data(self):
url = reverse('manage:users_data')
response = self.client.get(url)
eq_(response.status_code, 302)
user = self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(data['count'], 1)
eq_(data['users'][0]['email'], user.email)
eq_(data['users'][0]['id'], user.pk)
eq_(data['users'][0]['is_superuser'], True)
eq_(data['users'][0]['is_active'], True)
eq_(data['users'][0]['groups'], [])
austrians = Group.objects.create(name='Austrians')
user.groups.add(austrians)
response = self.client.get(url)
eq_(response.status_code, 200)
data = json.loads(response.content)
groups = data['users'][0]['groups']
group = groups[0]
eq_(group['name'], 'Austrians')
eq_(group['id'], austrians.pk)
def test_users_data_pagination(self):
url = reverse('manage:users_data')
response = self.client.get(url)
eq_(response.status_code, 302)
user = self._login()
user.last_login -= datetime.timedelta(days=365)
user.save()
now = timezone.now()
for i in range(1, 101): # 100 times, 1-100
User.objects.create(
username='user%03d' % i,
email='user%[email protected]' % i,
last_login=now - datetime.timedelta(days=i)
)
response = self.client.get(url)
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(data['count'], 101)
# because it's sorted by last_login
eq_(data['users'][0]['email'], '[email protected]')
eq_(len(data['users']), settings.USERS_ADMIN_BATCH_SIZE)
eq_(data['page'], 1)
eq_(data['batch_size'], settings.USERS_ADMIN_BATCH_SIZE)
# let's go to page 2
response = self.client.get(url, {'page': 2})
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(data['count'], 101)
# because it's sorted by last_login
eq_(data['users'][0]['email'], '[email protected]')
eq_(len(data['users']), settings.USERS_ADMIN_BATCH_SIZE)
eq_(data['page'], 2)
eq_(data['batch_size'], settings.USERS_ADMIN_BATCH_SIZE)
response = self.client.get(url, {'page': 11})
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(data['count'], 101)
# because it's sorted by last_login
eq_(data['users'][0]['email'], user.email)
eq_(len(data['users']), 1)
eq_(data['page'], 11)
eq_(data['batch_size'], settings.USERS_ADMIN_BATCH_SIZE)
def test_users_data_pagination_bad_request(self):
url = reverse('manage:users_data')
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
response = self.client.get(url, {'page': 0})
eq_(response.status_code, 400)
response = self.client.get(url, {'page': -1})
eq_(response.status_code, 400)
response = self.client.get(url, {'page': 'NaN'})
eq_(response.status_code, 400)
def test_users_data_filter(self):
url = reverse('manage:users_data')
self._login()
group_a = Group.objects.create(name='Group A')
group_b = Group.objects.create(name='Group B')
def create_user(username, **kwargs):
return User.objects.create(
username=username,
email=username + '@example.com',
last_login=datetime.datetime.utcnow(),
**kwargs
)
bob = create_user('bob')
bob.groups.add(group_a)
dick = create_user('dick')
dick.groups.add(group_b)
harry = create_user('harry')
harry.groups.add(group_b)
harry.groups.add(group_b)
create_user('bill', is_active=False)
# filter by email
response = self.client.get(url, {'email': 'b'})
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(data['count'], 2)
eq_(
['[email protected]', '[email protected]'],
[x['email'] for x in data['users']]
)
# filter by email and group
response = self.client.get(url, {
'email': 'b',
'group': group_a.pk
})
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(data['count'], 1)
eq_(
['[email protected]'],
[x['email'] for x in data['users']]
)
# filter by active and superuser
response = self.client.get(url, {
'active': '1',
'superuser': '-1'
})
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(data['count'], 3)
eq_(
['[email protected]', '[email protected]', '[email protected]'],
[x['email'] for x in data['users']]
)
# don't send in junk
response = self.client.get(url, {
'group': 'xxx',
})
eq_(response.status_code, 400)
def test_edit_user(self):
group_a = Group.objects.create(name='Group A')
group_b = Group.objects.create(name='Group B')
bob = User.objects.create(
username='bob',
email='[email protected]',
is_active=False,
is_superuser=True
)
bob.groups.add(group_a)
url = reverse('manage:user', args=(bob.pk,))
response = self.client.get(url)
eq_(response.status_code, 302)
user = self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('[email protected]' in response.content)
response = self.client.post(url, {
'groups': group_b.pk,
'is_active': 'true',
'is_superuser': ''
})
eq_(response.status_code, 302)
# reload from database
bob = User.objects.get(pk=bob.pk)
ok_(bob.is_active)
ok_(not bob.is_superuser)
eq_(list(bob.groups.all()), [group_b])
# check that the event got logged
event, = Log.objects.all()
eq_(event.user, user)
eq_(event.action, 'user.edit')
eq_(event.extra['id'], bob.pk)
change = event.extra['change']
eq_(change['is_superuser'], [True, False])
eq_(change['is_active'], [False, True])
eq_(change['groups'], [['Group A'], ['Group B']])
def test_groups(self):
url = reverse('manage:groups')
response = self.client.get(url)
eq_(response.status_code, 302)
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
wackos = Group.objects.create(name='Wackos')
# Attach a known permission to it
permission = self._create_permission()
wackos.permissions.add(permission)
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Wackos' in response.content)
ok_('Mess Around' in response.content)
def test_group(self):
url = reverse('manage:groups')
user = self._login()
ct = ContentType.objects.create(
model='',
app_label='crashstats.crashstats',
)
p1 = Permission.objects.create(
name='Mess Around',
codename='mess_around',
content_type=ct
)
p2 = Permission.objects.create(
name='Launch Missiles',
codename='launch_missiles',
content_type=ct
)
response = self.client.get(url)
eq_(response.status_code, 200)
ok_(p1.name in response.content)
ok_(p2.name in response.content)
data = {
'name': 'New Group',
'permissions': [p2.id]
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
group = Group.objects.get(name=data['name'])
eq_(list(group.permissions.all()), [p2])
# check that it got logged
event, = Log.objects.all()
eq_(event.user, user)
eq_(event.action, 'group.add')
eq_(event.extra, {
'id': group.id,
'name': 'New Group',
'permissions': ['Launch Missiles']
})
# edit it
edit_url = reverse('manage:group', args=(group.pk,))
response = self.client.get(edit_url)
eq_(response.status_code, 200)
data = {
'name': 'New New Group',
'permissions': [p1.id]
}
response = self.client.post(edit_url, data)
eq_(response.status_code, 302)
group = Group.objects.get(name=data['name'])
eq_(list(group.permissions.all()), [p1])
event, = Log.objects.all()[:1]
eq_(event.user, user)
eq_(event.action, 'group.edit')
eq_(event.extra['change']['name'], ['New Group', 'New New Group'])
eq_(event.extra['change']['permissions'], [
['Launch Missiles'],
['Mess Around']
])
# delete it
response = self.client.post(url, {'delete': group.pk})
eq_(response.status_code, 302)
ok_(not Group.objects.filter(name=data['name']))
event, = Log.objects.all()[:1]
eq_(event.user, user)
eq_(event.action, 'group.delete')
eq_(event.extra['name'], data['name'])
def test_analyze_model_fetches(self):
self._login()
url = reverse('manage:analyze_model_fetches')
response = self.client.get(url)
eq_(response.status_code, 200)
def test_render_graphics_devices_page(self):
url = reverse('manage:graphics_devices')
response = self.client.get(url)
eq_(response.status_code, 302)
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
def test_graphics_devices_lookup(self):
self._login()
url = reverse('manage:graphics_devices_lookup')
def mocked_get(**params):
if (
'adapter_hex' in params and
params['adapter_hex'] == 'xyz123' and
'vendor_hex' in params and
params['vendor_hex'] == 'abc123'
):
return {
"hits": [
{
"vendor_hex": "abc123",
"adapter_hex": "xyz123",
"vendor_name": "Logictech",
"adapter_name": "Webcamera"
}
],
"total": 1
}
raise NotImplementedError(url)
models.GraphicsDevices.implementation().get.side_effect = (
mocked_get
)
response = self.client.get(url)
eq_(response.status_code, 400)
response = self.client.get(url, {
'vendor_hex': 'abc123',
'adapter_hex': 'xyz123',
})
eq_(response.status_code, 200)
content = json.loads(response.content)
eq_(content['total'], 1)
eq_(
content['hits'][0],
{
'vendor_hex': 'abc123',
'adapter_hex': 'xyz123',
'vendor_name': 'Logictech',
'adapter_name': 'Webcamera'
}
)
def test_graphics_devices_edit(self):
user = self._login()
url = reverse('manage:graphics_devices')
def mocked_post(**payload):
data = payload['data']
eq_(
data[0],
{
'vendor_hex': 'abc123',
'adapter_hex': 'xyz123',
'vendor_name': 'Logictech',
'adapter_name': 'Webcamera'
}
)
return True
models.GraphicsDevices.implementation().post.side_effect = (
mocked_post
)
data = {
'vendor_hex': 'abc123',
'adapter_hex': 'xyz123',
'vendor_name': 'Logictech',
'adapter_name': 'Webcamera'
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
ok_(url in response['location'])
event, = Log.objects.all()
eq_(event.user, user)
eq_(event.action, 'graphicsdevices.add')
eq_(event.extra['payload'], [data])
eq_(event.extra['success'], True)
def test_graphics_devices_csv_upload_pcidatabase_com(self):
user = self._login()
url = reverse('manage:graphics_devices')
def mocked_post(**payload):
data = payload['data']
eq_(
data[0],
{
'vendor_hex': '0x0033',
'adapter_hex': '0x002f',
'vendor_name': 'Paradyne Corp.',
'adapter_name': '.43 ieee 1394 controller'
}
)
eq_(len(data), 7)
return True
models.GraphicsDevices.implementation().post.side_effect = (
mocked_post
)
sample_file = os.path.join(
os.path.dirname(__file__),
'sample-graphics.csv'
)
with open(sample_file) as fp:
response = self.client.post(url, {
'file': fp,
'database': 'pcidatabase.com',
})
eq_(response.status_code, 302)
ok_(url in response['location'])
event, = Log.objects.all()
eq_(event.user, user)
eq_(event.action, 'graphicsdevices.post')
eq_(event.extra['success'], True)
eq_(event.extra['database'], 'pcidatabase.com')
eq_(event.extra['no_lines'], 7)
def test_graphics_devices_csv_upload_pci_ids(self):
user = self._login()
url = reverse('manage:graphics_devices')
def mocked_post(**payload):
data = payload['data']
eq_(
data[0],
{
'vendor_hex': '0x0010',
'adapter_hex': '0x8139',
'vendor_name': 'Allied Telesis, Inc',
'adapter_name': 'AT-2500TX V3 Ethernet'
}
)
eq_(len(data), 6)
return True
models.GraphicsDevices.implementation().post.side_effect = (
mocked_post
)
sample_file = os.path.join(
os.path.dirname(__file__),
'sample-pci.ids'
)
with open(sample_file) as fp:
response = self.client.post(url, {
'file': fp,
'database': 'pci.ids',
})
eq_(response.status_code, 302)
ok_(url in response['location'])
event, = Log.objects.all()
eq_(event.user, user)
eq_(event.action, 'graphicsdevices.post')
eq_(event.extra['success'], True)
eq_(event.extra['database'], 'pci.ids')
eq_(event.extra['no_lines'], 6)
def test_symbols_uploads(self):
url = reverse('manage:symbols_uploads')
response = self.client.get(url)
eq_(response.status_code, 302)
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
def test_supersearch_fields_missing(self):
self._login()
url = reverse('manage:supersearch_fields_missing')
def mocked_supersearchfields(**params):
return {
'product': {
'name': 'product',
'namespace': 'processed_crash',
'in_database_name': 'product',
'query_type': 'enum',
'form_field_choices': None,
'permissions_needed': [],
'default_value': None,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
}
}
def mocked_supersearchfields_get_missing_fields(**params):
return {
'hits': [
'field_a',
'namespace1.field_b',
'namespace2.subspace1.field_c',
],
'total': 3
}
supersearchfields_mock_get = mock.Mock()
supersearchfields_mock_get.side_effect = mocked_supersearchfields
SuperSearchFields.get = supersearchfields_mock_get
SuperSearchMissingFields.implementation().get.side_effect = (
mocked_supersearchfields_get_missing_fields
)
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('field_a' in response.content)
ok_('namespace1.field_b' in response.content)
ok_('namespace2.subspace1.field_c' in response.content)
def test_create_product(self):
def mocked_post(**options):
eq_(options['product'], 'WaterCat')
eq_(options['version'], '1.0')
return True
models.ProductVersions.implementation().post.side_effect = (
mocked_post
)
user = self._login()
url = reverse('manage:products')
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('value="1.0"' in response.content)
# first attempt to create an existing combo
response = self.client.post(url, {
'product': 'WaterWolf',
'initial_version': '1.0'
})
eq_(response.status_code, 200)
ok_('WaterWolf already exists' in response.content)
# now with a new unique product
response = self.client.post(url, {
'product': 'WaterCat',
'initial_version': '1.0'
})
eq_(response.status_code, 302)
event, = Log.objects.all()
eq_(event.user, user)
eq_(event.action, 'product.add')
eq_(event.extra['product'], 'WaterCat')
def test_create_release(self):
def mocked_release_post(**params):
eq_(params['product'], 'WaterCat')
eq_(params['version'], '19.0')
eq_(params['beta_number'], 1)
eq_(params['throttle'], 0)
return True
models.Releases.implementation().post.side_effect = mocked_release_post
user = self._login()
url = reverse('manage:releases')
response = self.client.get(url)
eq_(response.status_code, 200)
# there should be a dropdown with some known platforms
ok_('value="Windows"' in response.content)
ok_('value="Mac OS X"' in response.content)
# first attempt to create with a product version that doesn't exist
now = datetime.datetime.utcnow()
data = {
'product': 'WaterCat',
'version': '99.9',
'update_channel': 'beta',
'build_id': now.strftime('%Y%m%d%H%M'),
'platform': 'Windows',
'beta_number': '0',
'release_channel': 'Beta',
'throttle': '1'
}
# set some bad values that won't pass validation
data['throttle'] = 'xxx'
data['beta_number'] = 'yyy'
data['version'] = '19.0'
data['build_id'] = 'XX'
response = self.client.post(url, data)
eq_(response.status_code, 200)
ok_('Must start with YYYYMMDD' in response.content)
eq_(response.content.count('not a number'), 2)
data['build_id'] = '20140101XXXXX'
response = self.client.post(url, data)
eq_(response.status_code, 200)
ok_('Date older than 30 days' in response.content)
# finally, all with good parameters
data['beta_number'] = '1'
data['throttle'] = '0'
data['build_id'] = now.strftime('%Y%m%d%H%M')
response = self.client.post(url, data)
eq_(response.status_code, 302)
event, = Log.objects.all()
eq_(event.user, user)
eq_(event.action, 'release.add')
eq_(event.extra['product'], 'WaterCat')
@mock.patch('requests.post')
def test_create_release_with_null_beta_number(self, rpost):
mock_calls = []
def mocked_release_post(**params):
mock_calls.append(True)
eq_(params['beta_number'], None)
return True
models.Releases.implementation().post.side_effect = mocked_release_post
self._login()
now = datetime.datetime.utcnow()
data = {
'product': 'WaterWolf',
'version': '99.9',
'update_channel': 'beta',
'build_id': now.strftime('%Y%m%d%H%M'),
'platform': 'Windows',
'beta_number': ' ',
'release_channel': 'Beta',
'throttle': '1'
}
url = reverse('manage:releases')
response = self.client.post(url, data)
eq_(response.status_code, 302)
# make sure it really called the POST to /releases/release/
ok_(mock_calls)
def test_view_events_page(self):
url = reverse('manage:events')
response = self.client.get(url)
eq_(response.status_code, 302)
user = self._login()
# this page will iterate over all unique possible Log actions
Log.objects.create(
user=user,
action='actionA'
)
Log.objects.create(
user=user,
action='actionB'
)
Log.objects.create(
user=user,
action='actionA'
)
response = self.client.get(url)
eq_(response.status_code, 200)
# for the action filter drop-downs
eq_(response.content.count('value="actionA"'), 1)
eq_(response.content.count('value="actionB"'), 1)
def test_events_data(self):
url = reverse('manage:events_data')
response = self.client.get(url)
eq_(response.status_code, 302)
user = self._login()
Log.objects.create(
user=user,
action='actionA',
extra={'foo': True}
)
other_user = User.objects.create(
username='other',
email='[email protected]'
)
Log.objects.create(
user=other_user,
action='actionB',
extra={'bar': False}
)
third_user = User.objects.create(
username='third',
email='[email protected]',
)
now = timezone.now()
for i in range(settings.EVENTS_ADMIN_BATCH_SIZE * 2):
Log.objects.create(
user=third_user,
action='actionX',
timestamp=now - datetime.timedelta(
seconds=i + 1
)
)
response = self.client.get(url)
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(data['count'], 2 + settings.EVENTS_ADMIN_BATCH_SIZE * 2)
# the most recent should be "actionB"
eq_(len(data['events']), settings.EVENTS_ADMIN_BATCH_SIZE)
first = data['events'][0]
eq_(first['action'], 'actionB')
eq_(first['extra'], {'bar': False})
# try to go to another page
response = self.client.get(url, {'page': 'xxx'})
eq_(response.status_code, 400)
response = self.client.get(url, {'page': '0'})
eq_(response.status_code, 400)
response = self.client.get(url, {'page': '2'})
eq_(response.status_code, 200)
data = json.loads(response.content)
first = data['events'][0]
# we should now be on one of the actionX events
eq_(first['action'], 'actionX')
# we can filter by user
response = self.client.get(url, {'user': 'other'})
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(data['count'], 1)
# we can filter by action
response = self.client.get(url, {'action': 'actionX'})
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(data['count'], settings.EVENTS_ADMIN_BATCH_SIZE * 2)
def test_events_data_urls(self):
"""some logged events have a URL associated with them"""
user = self._login()
Log.objects.create(
user=user,
action='user.edit',
extra={'id': user.id}
)
group = Group.objects.create(name='Wackos')
Log.objects.create(
user=user,
action='group.add',
extra={'id': group.id}
)
Log.objects.create(
user=user,
action='group.edit',
extra={'id': group.id}
)
url = reverse('manage:events_data')
response = self.client.get(url)
data = json.loads(response.content)
eq_(data['count'], 3)
three, two, one = data['events']
eq_(one['url'], reverse('manage:user', args=(user.id,)))
eq_(two['url'], reverse('manage:group', args=(group.id,)))
eq_(three['url'], reverse('manage:group', args=(group.id,)))
def test_api_tokens(self):
permission = self._create_permission()
url = reverse('manage:api_tokens')
response = self.client.get(url)
# because we're not logged in
eq_(response.status_code, 302)
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
# expects some permissions to be available as dropdowns
ok_(
'<option value="%s">%s</option>' % (
permission.id,
permission.name
) in response.content
)
def test_create_api_token(self):
superuser = self._login()
user = User.objects.create_user(
'user',
'[email protected]',
'secret'
)
permission = self._create_permission()
# the user must belong to a group that has this permission
wackos = Group.objects.create(name='Wackos')
wackos.permissions.add(permission)
user.groups.add(wackos)
assert user.has_perm('crashstats.' + permission.codename)
url = reverse('manage:api_tokens')
response = self.client.post(url, {
'user': user.email.upper(),
'permissions': [permission.id],
'notes': 'Some notes',
'expires': 7
})
eq_(response.status_code, 302)
token = Token.objects.get(
user=user,
notes='Some notes',
)
eq_(list(token.permissions.all()), [permission])
lasting = (timezone.now() - token.expires).days * -1
eq_(lasting, 7)
event, = Log.objects.all()
eq_(event.user, superuser)
eq_(event.action, 'api_token.create')
eq_(event.extra['notes'], 'Some notes')
ok_(event.extra['expires'])
eq_(event.extra['expires_days'], 7)
eq_(event.extra['permissions'], permission.name)
def test_create_api_token_with_no_permissions(self):
superuser = self._login()
user = User.objects.create_user(
'user',
'[email protected]',
'secret'
)
permission = self._create_permission()
# the user must belong to a group that has this permission
wackos = Group.objects.create(name='Wackos')
wackos.permissions.add(permission)
user.groups.add(wackos)
assert user.has_perm('crashstats.' + permission.codename)
url = reverse('manage:api_tokens')
response = self.client.post(url, {
'user': user.email.upper(),
'notes': 'Some notes',
'expires': 7
})
eq_(response.status_code, 302)
token = Token.objects.get(
user=user,
notes='Some notes',
)
eq_(list(token.permissions.all()), [])
lasting = (timezone.now() - token.expires).days * -1
eq_(lasting, 7)
event, = Log.objects.all()
eq_(event.user, superuser)
eq_(event.action, 'api_token.create')
eq_(event.extra['notes'], 'Some notes')
ok_(event.extra['expires'])
eq_(event.extra['expires_days'], 7)
eq_(event.extra['permissions'], '')
def test_create_api_token_rejected(self):
self._login()
user = User.objects.create_user(
'koala',
'[email protected]',
'secret'
)
permission = self._create_permission()
url = reverse('manage:api_tokens')
response = self.client.post(url, {
'user': 'xxx',
'permissions': [permission.id],
'notes': '',
'expires': 7
})
eq_(response.status_code, 200)
ok_('No user found by that email address' in response.content)
response = self.client.post(url, {
'user': 'k', # there will be two users whose email starts with k
'permissions': [permission.id],
'notes': '',
'expires': 7
})
eq_(response.status_code, 200)
ok_(
'More than one user found by that email address'
in response.content
)
response = self.client.post(url, {
'user': 'koala@example',
'permissions': [permission.id],
'notes': '',
'expires': 7
})
eq_(response.status_code, 200)
ok_(
'[email protected] does not have the permission '
'"Mess Around"'
in response.content
)
ok_(
'[email protected] has no permissions!'
in response.content
)
# suppose the user has some other permission, only
permission2 = self._create_permission(
'Do Things',
'do_things'
)
group = Group.objects.create(name='Noobs')
group.permissions.add(permission2)
user.groups.add(group)
assert user.has_perm('crashstats.do_things')
response = self.client.post(url, {
'user': 'koala@example',
'permissions': [permission.id],
'notes': '',
'expires': 7
})
eq_(response.status_code, 200)
ok_(
'[email protected] does not have the permission '
'"Mess Around"'
in response.content
)
ok_(
'Only permissions possible are: Do Things'
in response.content
)
# you can't create a token for an inactive user
user.is_active = False
user.save()
response = self.client.post(url, {
'user': 'koala',
'permissions': [permission.id],
'notes': '',
'expires': 7
})
eq_(response.status_code, 200)
ok_(
'[email protected] is not an active user'
in response.content
)
def test_api_tokens_data(self):
url = reverse('manage:api_tokens_data')
response = self.client.get(url)
eq_(response.status_code, 302)
user = self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
result = json.loads(response.content)
eq_(result['tokens'], [])
eq_(result['count'], 0)
eq_(result['page'], 1)
eq_(result['batch_size'], settings.API_TOKENS_ADMIN_BATCH_SIZE)
expires = timezone.now()
expires += datetime.timedelta(
days=settings.TOKENS_DEFAULT_EXPIRATION_DAYS
)
token = Token.objects.create(
user=user,
notes='Some notes',
expires=expires
)
assert token.key # automatically generated
permission = self._create_permission()
token.permissions.add(permission)
response = self.client.get(url)
eq_(response.status_code, 200)
result = json.loads(response.content)
expected_token = {
'created': token.created.isoformat(),
'notes': 'Some notes',
'expires': expires.isoformat(),
'id': token.id,
'expired': False,
'permissions': [permission.name],
'user': user.email,
'key': token.key,
}
eq_(result['tokens'], [expected_token])
eq_(result['count'], 1)
# mess with the page parameter
response = self.client.get(url, {'page': '0'})
eq_(response.status_code, 400)
response = self.client.get(url, {'expired': 'junk'})
eq_(response.status_code, 400)
# filter by email
response = self.client.get(url, {'email': user.email[:5]})
eq_(response.status_code, 200)
result = json.loads(response.content)
eq_(result['tokens'], [expected_token])
eq_(result['count'], 1)
response = self.client.get(url, {'user': 'junk'})
eq_(response.status_code, 200)
result = json.loads(response.content)
eq_(result['tokens'], [])
eq_(result['count'], 0)
# filter by key
response = self.client.get(url, {'key': token.key[:5]})
eq_(response.status_code, 200)
result = json.loads(response.content)
eq_(result['tokens'], [expected_token])
eq_(result['count'], 1)
response = self.client.get(url, {'key': 'junk'})
eq_(response.status_code, 200)
result = json.loads(response.content)
eq_(result['tokens'], [])
eq_(result['count'], 0)
# filter by expired
response = self.client.get(url, {'expired': 'no'})
eq_(response.status_code, 200)
result = json.loads(response.content)
eq_(result['tokens'], [expected_token])
eq_(result['count'], 1)
response = self.client.get(url, {'expired': 'yes'})
eq_(response.status_code, 200)
result = json.loads(response.content)
eq_(result['tokens'], [])
eq_(result['count'], 0)
token.expires = timezone.now() - datetime.timedelta(days=1)
token.save()
response = self.client.get(url, {'expired': 'yes'})
eq_(response.status_code, 200)
result = json.loads(response.content)
expected_token['expires'] = token.expires.isoformat()
expected_token['expired'] = True
eq_(result['tokens'], [expected_token])
eq_(result['count'], 1)
def test_api_tokens_delete(self):
url = reverse('manage:api_tokens_delete')
response = self.client.get(url)
eq_(response.status_code, 405)
response = self.client.post(url)
eq_(response.status_code, 302)
user = self._login()
response = self.client.post(url)
eq_(response.status_code, 400)
response = self.client.post(url, {'id': '99999'})
eq_(response.status_code, 404)
expires = timezone.now()
expires += datetime.timedelta(
days=settings.TOKENS_DEFAULT_EXPIRATION_DAYS
)
token = Token.objects.create(
user=user,
notes='Some notes',
expires=expires
)
assert token.key # automatically generated
permission = self._create_permission()
token.permissions.add(permission)
response = self.client.post(url, {'id': token.id})
eq_(response.status_code, 200) # it's AJAX
ok_(not Token.objects.all())
event, = Log.objects.all()
eq_(event.user, user)
eq_(event.action, 'api_token.delete')
eq_(event.extra['notes'], 'Some notes')
eq_(event.extra['user'], user.email)
eq_(event.extra['permissions'], permission.name)
def test_status_message(self):
url = reverse('manage:status_message')
# Test while logged out.
response = self.client.get(url)
eq_(response.status_code, 302)
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
# expects some severity options to be available as dropdowns
ok_(
'<option value="%s">%s</option>' % (
'critical',
'Critical'
) in response.content
)
def test_create_status_message(self):
url = reverse('manage:status_message')
# Test while logged out.
response = self.client.post(url)
eq_(response.status_code, 302)
user = self._login()
response = self.client.post(url)
eq_(response.status_code, 200)
ok_('This field is required' in response.content)
response = self.client.post(url, {
'message': 'Foo',
'severity': 'critical'
})
eq_(response.status_code, 302)
event, = Log.objects.all()
eq_(event.user, user)
eq_(event.action, 'status_message.create')
eq_(event.extra['severity'], 'critical')
def test_disable_status_message(self):
url = reverse('manage:status_message_disable', args=('99999',))
response = self.client.get(url)
eq_(response.status_code, 302)
user = self._login()
response = self.client.get(url)
eq_(response.status_code, 405)
response = self.client.post(url)
eq_(response.status_code, 404)
status = StatusMessage.objects.create(
message='foo',
severity='critical',
)
url = reverse('manage:status_message_disable', args=(status.id,))
response = self.client.post(url)
eq_(response.status_code, 302) # redirect on success
# Verify there is no enabled statuses anymore.
ok_(not StatusMessage.objects.filter(enabled=True))
event, = Log.objects.all()
eq_(event.user, user)
eq_(event.action, 'status_message.disable')
def test_crash_me_now(self):
url = reverse('manage:crash_me_now')
response = self.client.get(url)
eq_(response.status_code, 302)
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
assert_raises(
NameError,
self.client.post,
url,
{
'exception_type': 'NameError',
'exception_value': 'Crash!'
}
)
def test_symbols_uploads_data_pagination(self):
url = reverse('manage:symbols_uploads_data')
response = self.client.get(url)
eq_(response.status_code, 302)
self._login()
other = User.objects.create(username='o', email='[email protected]')
for i in range(settings.SYMBOLS_UPLOADS_ADMIN_BATCH_SIZE):
SymbolsUpload.objects.create(
user=other,
filename='file-%d.zip' % i,
size=1000 + i,
content='Some Content'
)
# add this last so it shows up first
user = User.objects.create(username='user', email='[email protected]')
upload = SymbolsUpload.objects.create(
user=user,
filename='file.zip',
size=123456,
content='Some Content'
)
response = self.client.get(url)
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(data['count'], settings.SYMBOLS_UPLOADS_ADMIN_BATCH_SIZE + 1)
eq_(data['batch_size'], settings.SYMBOLS_UPLOADS_ADMIN_BATCH_SIZE)
eq_(data['page'], 1)
items = data['items']
eq_(len(items), settings.SYMBOLS_UPLOADS_ADMIN_BATCH_SIZE)
first, = items[:1]
eq_(first['id'], upload.id)
eq_(first['created'], upload.created.isoformat())
eq_(first['filename'], 'file.zip')
eq_(first['size'], 123456)
eq_(first['url'], reverse('symbols:content', args=(first['id'],)))
eq_(first['user'], {
'email': user.email,
'url': reverse('manage:user', args=(user.id,)),
'id': user.id,
})
# let's go to page 2
response = self.client.get(url, {'page': 2})
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(data['count'], settings.SYMBOLS_UPLOADS_ADMIN_BATCH_SIZE + 1)
items = data['items']
eq_(len(items), 1)
eq_(data['page'], 2)
# filter by user email
response = self.client.get(url, {'email': user.email[:5].upper()})
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(data['count'], 1)
first, = data['items']
eq_(first['user']['id'], user.id)
# filter by filename
response = self.client.get(url, {'filename': 'FILE.ZI'})
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(data['count'], 1)
first, = data['items']
eq_(first['filename'], 'file.zip')
def test_symbols_uploads_data_content_search(self):
url = reverse('manage:symbols_uploads_data')
response = self.client.get(url)
eq_(response.status_code, 302)
self._login()
other = User.objects.create(username='o', email='[email protected]')
for i in range(3):
SymbolsUpload.objects.create(
user=other,
filename='file-%d.zip' % i,
size=1000 + i,
content='Some Content {}'.format(i)
)
response = self.client.get(url)
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(data['count'], 3)
response = self.client.get(url, {'content': 'Some Content'})
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(data['count'], 3)
response = self.client.get(url, {'content': 'Some Content 1'})
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(data['count'], 1)
response = self.client.get(url, {'content': 'Some Content X'})
eq_(response.status_code, 200)
data = json.loads(response.content)
eq_(data['count'], 0)
def test_symbols_uploads_data_pagination_bad_request(self):
url = reverse('manage:symbols_uploads_data')
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
response = self.client.get(url, {'page': 0})
eq_(response.status_code, 400)
response = self.client.get(url, {'page': -1})
eq_(response.status_code, 400)
response = self.client.get(url, {'page': 'NaN'})
eq_(response.status_code, 400)
def test_reprocessing(self):
url = reverse('manage:reprocessing')
response = self.client.get(url)
eq_(response.status_code, 302)
good_crash_id = '11cb72f5-eb28-41e1-a8e4-849982120611'
bad_crash_id = '00000000-0000-0000-0000-000000020611'
def mocked_reprocess(crash_ids):
assert isinstance(crash_ids, list)
if crash_ids == [good_crash_id]:
return True
elif crash_ids == [bad_crash_id]:
return
raise NotImplementedError(crash_ids)
models.Reprocessing.implementation().reprocess = mocked_reprocess
self._login()
response = self.client.get(url)
eq_(response.status_code, 200)
response = self.client.post(
url,
{'crash_id': 'junk'},
)
eq_(response.status_code, 200)
ok_('Does not appear to be a valid crash ID' in response.content)
response = self.client.post(
url,
{'crash_id': good_crash_id},
)
eq_(response.status_code, 302)
self.assertRedirects(
response,
url + '?crash_id=' + good_crash_id
)
response = self.client.post(
url,
{'crash_id': bad_crash_id},
)
eq_(response.status_code, 302)
self.assertRedirects(
response,
url # note lack of `?crash_id=...`
)
| m8ttyB/socorro | webapp-django/crashstats/manage/tests/test_views.py | Python | mpl-2.0 | 48,676 |
'''
Template tags for WorldPay offsite payments
'''
from django import template
from django.template.loader import render_to_string
register = template.Library()
class WorldPayNode(template.Node):
def __init__(self, integration):
self.integration = template.Variable(integration)
def render(self, context):
int_obj = self.integration.resolve(context)
form_str = render_to_string("billing/world_pay.html",
{"form": int_obj.generate_form(),
"integration": int_obj}, context)
return form_str
@register.tag
def world_pay(parser, token):
try:
tag, int_obj = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError("%r was expecting a single argument" % token.split_contents()[0])
return WorldPayNode(int_obj)
| yceruto/merchant | billing/templatetags/world_pay_tags.py | Python | bsd-3-clause | 872 |
"""
Common utilities for the course experience, including course outline.
"""
from django.utils import timezone
from opaque_keys.edx.keys import CourseKey
from lms.djangoapps.course_api.blocks.api import get_blocks
from lms.djangoapps.course_blocks.api import get_course_blocks
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.lib.cache_utils import request_cached
from openedx.features.course_experience import RELATIVE_DATES_FLAG
from common.djangoapps.student.models import CourseEnrollment
from xmodule.modulestore.django import modulestore
@request_cached()
def get_course_outline_block_tree(request, course_id, user=None, allow_start_dates_in_future=False): # lint-amnesty, pylint: disable=too-many-statements
"""
Returns the root block of the course outline, with children as blocks.
allow_start_dates_in_future (bool): When True, will allow blocks to be
returned that can bypass the StartDateTransformer's filter to show
blocks with start dates in the future.
"""
assert user is None or user.is_authenticated
def populate_children(block, all_blocks):
"""
Replace each child id with the full block for the child.
Given a block, replaces each id in its children array with the full
representation of that child, which will be looked up by id in the
passed all_blocks dict. Recursively do the same replacement for children
of those children.
"""
children = block.get('children', [])
for i in range(len(children)):
child_id = block['children'][i]
child_detail = populate_children(all_blocks[child_id], all_blocks)
block['children'][i] = child_detail
return block
def recurse_mark_scored(block):
"""
Mark this block as 'scored' if any of its descendents are 'scored' (that is, 'has_score' and 'weight' > 0).
"""
is_scored = block.get('has_score', False) and block.get('weight', 1) > 0
# Use a list comprehension to force the recursion over all children, rather than just stopping
# at the first child that is scored.
children_scored = any(recurse_mark_scored(child) for child in block.get('children', []))
if is_scored or children_scored:
block['scored'] = True
return True
else:
block['scored'] = False
return False
def recurse_num_graded_problems(block):
"""
Marks each block with the number of graded and scored leaf blocks below it as 'num_graded_problems'
"""
is_scored = block.get('has_score') and block.get('weight', 1) > 0
is_graded = block.get('graded')
is_countable = block.get('type') not in ('lti', 'lti_consumer')
is_graded_problem = is_scored and is_graded and is_countable
num_graded_problems = 1 if is_graded_problem else 0
num_graded_problems += sum(recurse_num_graded_problems(child) for child in block.get('children', []))
block['num_graded_problems'] = num_graded_problems
return num_graded_problems
def recurse_mark_auth_denial(block):
"""
Mark this block as 'scored' if any of its descendents are 'scored' (that is, 'has_score' and 'weight' > 0).
"""
own_denial_reason = {block['authorization_denial_reason']} if 'authorization_denial_reason' in block else set()
# Use a list comprehension to force the recursion over all children, rather than just stopping
# at the first child that is scored.
child_denial_reasons = own_denial_reason.union(
*(recurse_mark_auth_denial(child) for child in block.get('children', []))
)
if child_denial_reasons:
block['all_denial_reasons'] = child_denial_reasons
return child_denial_reasons
course_key = CourseKey.from_string(course_id)
course_usage_key = modulestore().make_course_usage_key(course_key)
all_blocks = get_blocks(
request,
course_usage_key,
user=user,
nav_depth=3,
requested_fields=[
'children',
'contains_gated_content',
'display_name',
'due',
'effort_activities',
'effort_time',
'format',
'graded',
'has_score',
'show_gated_sections',
'special_exam_info',
'start',
'type',
'weight',
'completion',
'complete',
'resume_block',
],
allow_start_dates_in_future=allow_start_dates_in_future,
)
course_outline_root_block = all_blocks['blocks'].get(all_blocks['root'], None)
if course_outline_root_block:
populate_children(course_outline_root_block, all_blocks['blocks'])
recurse_mark_scored(course_outline_root_block)
recurse_num_graded_problems(course_outline_root_block)
recurse_mark_auth_denial(course_outline_root_block)
return course_outline_root_block
def get_resume_block(block):
"""
Gets the deepest block marked as 'resume_block'.
"""
if block.get('authorization_denial_reason') or not block.get('resume_block'):
return None
if not block.get('children'):
return block
for child in block['children']:
resume_block = get_resume_block(child)
if resume_block:
return resume_block
return block
def get_start_block(block):
"""
Gets the deepest block to use as the starting block.
"""
if not block.get('children'):
return block
first_child = block['children'][0]
return get_start_block(first_child)
def dates_banner_should_display(course_key, user):
"""
Return whether or not the reset banner should display,
determined by whether or not a course has any past-due,
incomplete sequentials and which enrollment mode is being
dealt with for the current user and course.
Returns:
(missed_deadlines, missed_gated_content):
missed_deadlines is True if the user has missed any graded content deadlines
missed_gated_content is True if the first content that the user missed was gated content
"""
if not RELATIVE_DATES_FLAG.is_enabled(course_key):
return False, False
course_overview = CourseOverview.objects.get(id=str(course_key))
course_end_date = getattr(course_overview, 'end_date', None)
is_self_paced = getattr(course_overview, 'self_paced', False)
# Only display the banner for self-paced courses
if not is_self_paced:
return False, False
# Only display the banner for enrolled users
if not CourseEnrollment.is_enrolled(user, course_key):
return False, False
# Don't display the banner if the course has ended
if course_end_date and course_end_date < timezone.now():
return False, False
store = modulestore()
course_usage_key = store.make_course_usage_key(course_key)
block_data = get_course_blocks(user, course_usage_key, include_completion=True)
for section_key in block_data.get_children(course_usage_key):
for subsection_key in block_data.get_children(section_key):
subsection_due_date = block_data.get_xblock_field(subsection_key, 'due', None)
if (subsection_due_date and subsection_due_date < timezone.now() and
not is_block_structure_complete_for_assignments(block_data, subsection_key)):
# Display the banner if the due date for an incomplete graded subsection has passed
return True, block_data.get_xblock_field(subsection_key, 'contains_gated_content', False)
# Don't display the banner if there were no missed deadlines
return False, False
def is_block_structure_complete_for_assignments(block_data, block_key):
"""
Considers a block complete only if all scored & graded leaf blocks are complete.
This is different from the normal `complete` flag because children of the block that are informative (like
readings or videos) do not count. We only care about actual homework content.
"""
children = block_data.get_children(block_key)
if children:
return all(is_block_structure_complete_for_assignments(block_data, child_key) for child_key in children)
category = block_data.get_xblock_field(block_key, 'category')
if category in ('course', 'chapter', 'sequential', 'vertical'):
# If there are no children for these "hierarchy" block types, just bail. This could be because the
# content isn't available yet (start date in future) or we're too late and the block has hide_after_due
# set. Or maybe a different transformer cut off content for whatever reason. Regardless of the cause - if the
# user can't see this content and we continue, we might accidentally say this block is complete because it
# isn't scored (which most hierarchy blocks wouldn't be).
return False
complete = block_data.get_xblock_field(block_key, 'complete', False)
graded = block_data.get_xblock_field(block_key, 'graded', False)
has_score = block_data.get_xblock_field(block_key, 'has_score', False)
weight = block_data.get_xblock_field(block_key, 'weight', 1)
scored = has_score and (weight is None or weight > 0)
return complete or not graded or not scored
| EDUlib/edx-platform | openedx/features/course_experience/utils.py | Python | agpl-3.0 | 9,472 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'Gallery', fields ['title']
db.delete_unique('photologue_gallery', ['title'])
def backwards(self, orm):
# Adding unique constraint on 'Gallery', fields ['title']
db.create_unique('photologue_gallery', ['title'])
models = {
'photologue.gallery': {
'Meta': {'object_name': 'Gallery'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'photos': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'galleries'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['photologue.Photo']"}),
'tags': ('tagging.fields.TagField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title_slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'photologue.galleryupload': {
'Meta': {'object_name': 'GalleryUpload'},
'caption': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'gallery': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['photologue.Gallery']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'zip_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
'photologue.photo': {
'Meta': {'object_name': 'Photo'},
'caption': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photo_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'tags': ('tagging.fields.TagField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'title_slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'photologue.photoeffect': {
'Meta': {'object_name': 'PhotoEffect'},
'background_color': ('django.db.models.fields.CharField', [], {'default': "'#FFFFFF'", 'max_length': '7'}),
'brightness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'color': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'contrast': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'filters': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'reflection_size': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'reflection_strength': ('django.db.models.fields.FloatField', [], {'default': '0.59999999999999998'}),
'sharpness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'transpose_method': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'})
},
'photologue.photosize': {
'Meta': {'object_name': 'PhotoSize'},
'crop': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photo_sizes'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'height': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'increment_count': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'pre_cache': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'quality': ('django.db.models.fields.PositiveIntegerField', [], {'default': '70'}),
'upscale': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'watermark': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photo_sizes'", 'null': 'True', 'to': "orm['photologue.Watermark']"}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'photologue.set': {
'Meta': {'object_name': 'Set'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'galleries': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'gallery_groups'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['photologue.Gallery']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'photologue.watermark': {
'Meta': {'object_name': 'Watermark'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'opacity': ('django.db.models.fields.FloatField', [], {'default': '1'}),
'style': ('django.db.models.fields.CharField', [], {'default': "'scale'", 'max_length': '5'})
}
}
complete_apps = ['photologue']
| iberben/django-photologue | photologue/migrations/0003_auto__del_unique_gallery_title.py | Python | bsd-3-clause | 7,918 |
# Work out the first ten digits of the sum of the following one-hundred 50-digit numbers.
#
# Answer: 5537376230
def run():
numbers = [
37107287533902102798797998220837590246510135740250,
46376937677490009712648124896970078050417018260538,
74324986199524741059474233309513058123726617309629,
91942213363574161572522430563301811072406154908250,
23067588207539346171171980310421047513778063246676,
89261670696623633820136378418383684178734361726757,
28112879812849979408065481931592621691275889832738,
44274228917432520321923589422876796487670272189318,
47451445736001306439091167216856844588711603153276,
70386486105843025439939619828917593665686757934951,
62176457141856560629502157223196586755079324193331,
64906352462741904929101432445813822663347944758178,
92575867718337217661963751590579239728245598838407,
58203565325359399008402633568948830189458628227828,
80181199384826282014278194139940567587151170094390,
35398664372827112653829987240784473053190104293586,
86515506006295864861532075273371959191420517255829,
71693888707715466499115593487603532921714970056938,
54370070576826684624621495650076471787294438377604,
53282654108756828443191190634694037855217779295145,
36123272525000296071075082563815656710885258350721,
45876576172410976447339110607218265236877223636045,
17423706905851860660448207621209813287860733969412,
81142660418086830619328460811191061556940512689692,
51934325451728388641918047049293215058642563049483,
62467221648435076201727918039944693004732956340691,
15732444386908125794514089057706229429197107928209,
55037687525678773091862540744969844508330393682126,
18336384825330154686196124348767681297534375946515,
80386287592878490201521685554828717201219257766954,
78182833757993103614740356856449095527097864797581,
16726320100436897842553539920931837441497806860984,
48403098129077791799088218795327364475675590848030,
87086987551392711854517078544161852424320693150332,
59959406895756536782107074926966537676326235447210,
69793950679652694742597709739166693763042633987085,
41052684708299085211399427365734116182760315001271,
65378607361501080857009149939512557028198746004375,
35829035317434717326932123578154982629742552737307,
94953759765105305946966067683156574377167401875275,
88902802571733229619176668713819931811048770190271,
25267680276078003013678680992525463401061632866526,
36270218540497705585629946580636237993140746255962,
24074486908231174977792365466257246923322810917141,
91430288197103288597806669760892938638285025333403,
34413065578016127815921815005561868836468420090470,
23053081172816430487623791969842487255036638784583,
11487696932154902810424020138335124462181441773470,
63783299490636259666498587618221225225512486764533,
67720186971698544312419572409913959008952310058822,
95548255300263520781532296796249481641953868218774,
76085327132285723110424803456124867697064507995236,
37774242535411291684276865538926205024910326572967,
23701913275725675285653248258265463092207058596522,
29798860272258331913126375147341994889534765745501,
18495701454879288984856827726077713721403798879715,
38298203783031473527721580348144513491373226651381,
34829543829199918180278916522431027392251122869539,
40957953066405232632538044100059654939159879593635,
29746152185502371307642255121183693803580388584903,
41698116222072977186158236678424689157993532961922,
62467957194401269043877107275048102390895523597457,
23189706772547915061505504953922979530901129967519,
86188088225875314529584099251203829009407770775672,
11306739708304724483816533873502340845647058077308,
82959174767140363198008187129011875491310547126581,
97623331044818386269515456334926366572897563400500,
42846280183517070527831839425882145521227251250327,
55121603546981200581762165212827652751691296897789,
32238195734329339946437501907836945765883352399886,
75506164965184775180738168837861091527357929701337,
62177842752192623401942399639168044983993173312731,
32924185707147349566916674687634660915035914677504,
99518671430235219628894890102423325116913619626622,
73267460800591547471830798392868535206946944540724,
76841822524674417161514036427982273348055556214818,
97142617910342598647204516893989422179826088076852,
87783646182799346313767754307809363333018982642090,
10848802521674670883215120185883543223812876952786,
71329612474782464538636993009049310363619763878039,
62184073572399794223406235393808339651327408011116,
66627891981488087797941876876144230030984490851411,
60661826293682836764744779239180335110989069790714,
85786944089552990653640447425576083659976645795096,
66024396409905389607120198219976047599490197230297,
64913982680032973156037120041377903785566085089252,
16730939319872750275468906903707539413042652315011,
94809377245048795150954100921645863754710598436791,
78639167021187492431995700641917969777599028300699,
15368713711936614952811305876380278410754449733078,
40789923115535562561142322423255033685442488917353,
44889911501440648020369068063960672322193204149535,
41503128880339536053299340368006977710650566631954,
81234880673210146739058568557934581403627822703280,
82616570773948327592232845941706525094512325230608,
22918802058777319719839450180888072429661980811197,
77158542502016545090413245809786882778948721859617,
72107838435069186155435662884062257473692284509516,
20849603980134001723930671666823555245252804609722,
53503534226472524250874054075591789781264330331690
]
return str(sum(numbers))[0:10]
| tgetzoya/project-euler-python | problems/pe13.py | Python | bsd-2-clause | 6,182 |
# saved_state.py
# Ronald L. Rivest
# August 1, 2017
# Routines to save and restore some stage between audit stages.
import json
import os
import multi
import utils
def write_initial_saved_state(e):
"""
Write the first saved-state, after the election-spec has been read."
"""
initial_stage_time = "0000-00-00-00-00-00" # stage_time for initial saved-state
e.sn_tp[initial_stage_time] = {}
for pbcid in e.pbcids:
# no sampling done yet
e.sn_tp[initial_stage_time][pbcid] = 0
e.plan_tp[initial_stage_time] = {}
for pbcid in e.pbcids:
e.plan_tp[initial_stage_time][pbcid] = int(e.max_audit_rate_p[pbcid])
e.status_tm[initial_stage_time] = {}
for mid in e.mids:
# initial contest state
e.status_tm[initial_stage_time][mid] = e.initial_status_m[mid]
ss = {} # saved state dict, to be written out
ss["stage_time"] = initial_stage_time
ss["sn_tp"] = e.sn_tp # sample sizes, by stage and pbcid
ss["status_tm"] = e.status_tm # measurement statuses, by stage and mid
ss["plan_tp"] = e.plan_tp # plan for next stage of audit
write_state(e, ss)
def write_intermediate_saved_state(e):
"""
Write an intermediate saved-state,
after the election-spec has been read and the first audit stage done.
"""
ss = {} # saved state dict, to be written out
ss["stage_time"] = e.stage_time
ss["sn_tp"] = e.sn_tp # sample sizes, by stage and pbcid
ss["status_tm"] = e.status_tm # measurement statuses, by stage and mid
ss["plan_tp"] = e.plan_tp # plan for next stage of audit
write_state(e, ss)
def write_state(e, ss):
"""
Save some state to 3-audit/34-audit-output/audit-output-saved-state.json
Data ss saved is needed in the next audit stage.
ss is a dict with the saved-state information, including
the stage_time.
"""
dirpath = os.path.join(multi.ELECTIONS_ROOT,
e.election_dirname,
"3-audit",
"34-audit-output")
os.makedirs(dirpath, exist_ok=True)
filename = os.path.join(dirpath,
"audit-output-saved-state-"+ss["stage_time"]+".json")
with open(filename, "w") as file:
json.dump(ss, file, indent=2)
def read_saved_state(e):
"""
Read state from latest 3-audit/34-audit-output/audit-output-saved-state.json
"""
dirpath = os.path.join(multi.ELECTIONS_ROOT,
e.election_dirname,
"3-audit",
"34-audit-output")
filename = utils.greatest_name(dirpath,
"audit-output-saved-state",
".json")
file_pathname = os.path.join(dirpath, filename)
file = open(file_pathname, "r")
e.saved_state = json.load(file)
if __name__ == "__main__":
pass
| ron-rivest/2017-bayes-audit | 2017-code/saved_state.py | Python | mit | 3,044 |
from __future__ import unicode_literals
import boto
import sure # noqa
from freezegun import freeze_time
from moto import mock_dynamodb
from boto.dynamodb import condition
from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError
from boto.exception import DynamoDBResponseError
def create_table(conn):
message_table_schema = conn.create_schema(
hash_key_name='forum_name',
hash_key_proto_value=str,
)
table = conn.create_table(
name='messages',
schema=message_table_schema,
read_units=10,
write_units=10
)
return table
@freeze_time("2012-01-14")
@mock_dynamodb
def test_create_table():
conn = boto.connect_dynamodb()
create_table(conn)
expected = {
'Table': {
'CreationDateTime': 1326499200.0,
'ItemCount': 0,
'KeySchema': {
'HashKeyElement': {
'AttributeName': 'forum_name',
'AttributeType': 'S'
},
},
'ProvisionedThroughput': {
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
},
'TableName': 'messages',
'TableSizeBytes': 0,
'TableStatus': 'ACTIVE',
}
}
conn.describe_table('messages').should.equal(expected)
@mock_dynamodb
def test_delete_table():
conn = boto.connect_dynamodb()
create_table(conn)
conn.list_tables().should.have.length_of(1)
conn.layer1.delete_table('messages')
conn.list_tables().should.have.length_of(0)
conn.layer1.delete_table.when.called_with('messages').should.throw(DynamoDBResponseError)
@mock_dynamodb
def test_update_table_throughput():
conn = boto.connect_dynamodb()
table = create_table(conn)
table.read_units.should.equal(10)
table.write_units.should.equal(10)
table.update_throughput(5, 6)
table.refresh()
table.read_units.should.equal(5)
table.write_units.should.equal(6)
@mock_dynamodb
def test_item_add_and_describe_and_update():
conn = boto.connect_dynamodb()
table = create_table(conn)
item_data = {
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
}
item = table.new_item(
hash_key='LOLCat Forum',
attrs=item_data,
)
item.put()
returned_item = table.get_item(
hash_key='LOLCat Forum',
attributes_to_get=['Body', 'SentBy']
)
dict(returned_item).should.equal({
'forum_name': 'LOLCat Forum',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
})
item['SentBy'] = 'User B'
item.put()
returned_item = table.get_item(
hash_key='LOLCat Forum',
attributes_to_get=['Body', 'SentBy']
)
dict(returned_item).should.equal({
'forum_name': 'LOLCat Forum',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User B',
})
@mock_dynamodb
def test_item_put_without_table():
conn = boto.connect_dynamodb()
conn.layer1.put_item.when.called_with(
table_name='undeclared-table',
item=dict(
hash_key='LOLCat Forum',
),
).should.throw(DynamoDBResponseError)
@mock_dynamodb
def test_get_missing_item():
conn = boto.connect_dynamodb()
table = create_table(conn)
table.get_item.when.called_with(
hash_key='tester',
).should.throw(DynamoDBKeyNotFoundError)
@mock_dynamodb
def test_get_item_with_undeclared_table():
conn = boto.connect_dynamodb()
conn.layer1.get_item.when.called_with(
table_name='undeclared-table',
key={
'HashKeyElement': {'S': 'tester'},
},
).should.throw(DynamoDBKeyNotFoundError)
@mock_dynamodb
def test_delete_item():
conn = boto.connect_dynamodb()
table = create_table(conn)
item_data = {
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
}
item = table.new_item(
hash_key='LOLCat Forum',
attrs=item_data,
)
item.put()
table.refresh()
table.item_count.should.equal(1)
response = item.delete()
response.should.equal({u'Attributes': [], u'ConsumedCapacityUnits': 0.5})
table.refresh()
table.item_count.should.equal(0)
item.delete.when.called_with().should.throw(DynamoDBResponseError)
@mock_dynamodb
def test_delete_item_with_attribute_response():
conn = boto.connect_dynamodb()
table = create_table(conn)
item_data = {
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
}
item = table.new_item(
hash_key='LOLCat Forum',
attrs=item_data,
)
item.put()
table.refresh()
table.item_count.should.equal(1)
response = item.delete(return_values='ALL_OLD')
response.should.equal({
u'Attributes': {
u'Body': u'http://url_to_lolcat.gif',
u'forum_name': u'LOLCat Forum',
u'ReceivedTime': u'12/9/2011 11:36:03 PM',
u'SentBy': u'User A',
},
u'ConsumedCapacityUnits': 0.5
})
table.refresh()
table.item_count.should.equal(0)
item.delete.when.called_with().should.throw(DynamoDBResponseError)
@mock_dynamodb
def test_delete_item_with_undeclared_table():
conn = boto.connect_dynamodb()
conn.layer1.delete_item.when.called_with(
table_name='undeclared-table',
key={
'HashKeyElement': {'S': 'tester'},
},
).should.throw(DynamoDBResponseError)
@mock_dynamodb
def test_query():
conn = boto.connect_dynamodb()
table = create_table(conn)
item_data = {
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
}
item = table.new_item(
hash_key='the-key',
attrs=item_data,
)
item.put()
results = table.query(hash_key='the-key')
results.response['Items'].should.have.length_of(1)
@mock_dynamodb
def test_query_with_undeclared_table():
conn = boto.connect_dynamodb()
conn.layer1.query.when.called_with(
table_name='undeclared-table',
hash_key_value={'S': 'the-key'},
).should.throw(DynamoDBResponseError)
@mock_dynamodb
def test_scan():
conn = boto.connect_dynamodb()
table = create_table(conn)
item_data = {
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
}
item = table.new_item(
hash_key='the-key',
attrs=item_data,
)
item.put()
item = table.new_item(
hash_key='the-key2',
attrs=item_data,
)
item.put()
item_data = {
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User B',
'ReceivedTime': '12/9/2011 11:36:03 PM',
'Ids': set([1, 2, 3]),
'PK': 7,
}
item = table.new_item(
hash_key='the-key3',
attrs=item_data,
)
item.put()
results = table.scan()
results.response['Items'].should.have.length_of(3)
results = table.scan(scan_filter={'SentBy': condition.EQ('User B')})
results.response['Items'].should.have.length_of(1)
results = table.scan(scan_filter={'Body': condition.BEGINS_WITH('http')})
results.response['Items'].should.have.length_of(3)
results = table.scan(scan_filter={'Ids': condition.CONTAINS(2)})
results.response['Items'].should.have.length_of(1)
results = table.scan(scan_filter={'Ids': condition.NOT_NULL()})
results.response['Items'].should.have.length_of(1)
results = table.scan(scan_filter={'Ids': condition.NULL()})
results.response['Items'].should.have.length_of(2)
results = table.scan(scan_filter={'PK': condition.BETWEEN(8, 9)})
results.response['Items'].should.have.length_of(0)
results = table.scan(scan_filter={'PK': condition.BETWEEN(5, 8)})
results.response['Items'].should.have.length_of(1)
@mock_dynamodb
def test_scan_with_undeclared_table():
conn = boto.connect_dynamodb()
conn.layer1.scan.when.called_with(
table_name='undeclared-table',
scan_filter={
"SentBy": {
"AttributeValueList": [{
"S": "User B"}
],
"ComparisonOperator": "EQ"
}
},
).should.throw(DynamoDBResponseError)
@mock_dynamodb
def test_write_batch():
conn = boto.connect_dynamodb()
table = create_table(conn)
batch_list = conn.new_batch_write_list()
items = []
items.append(table.new_item(
hash_key='the-key',
attrs={
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
},
))
items.append(table.new_item(
hash_key='the-key2',
attrs={
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User B',
'ReceivedTime': '12/9/2011 11:36:03 PM',
'Ids': set([1, 2, 3]),
'PK': 7,
},
))
batch_list.add_batch(table, puts=items)
conn.batch_write_item(batch_list)
table.refresh()
table.item_count.should.equal(2)
batch_list = conn.new_batch_write_list()
batch_list.add_batch(table, deletes=[('the-key')])
conn.batch_write_item(batch_list)
table.refresh()
table.item_count.should.equal(1)
@mock_dynamodb
def test_batch_read():
conn = boto.connect_dynamodb()
table = create_table(conn)
item_data = {
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
}
item = table.new_item(
hash_key='the-key1',
attrs=item_data,
)
item.put()
item = table.new_item(
hash_key='the-key2',
attrs=item_data,
)
item.put()
item_data = {
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User B',
'ReceivedTime': '12/9/2011 11:36:03 PM',
'Ids': set([1, 2, 3]),
'PK': 7,
}
item = table.new_item(
hash_key='another-key',
attrs=item_data,
)
item.put()
items = table.batch_get_item([('the-key1'), ('another-key')])
# Iterate through so that batch_item gets called
count = len([x for x in items])
count.should.have.equal(2)
| pior/moto | tests/test_dynamodb/test_dynamodb_table_without_range_key.py | Python | apache-2.0 | 10,450 |
# Copyright (C) 2012 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
| ASlave2Audio/Restaurant-App | mingw/share/gdb/python/gdb/function/__init__.py | Python | mit | 687 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registro', '0015_auto_20151220_1216'),
]
operations = [
migrations.AlterField(
model_name='certificacion',
name='obra',
field=models.ForeignKey(to='core.Obras', related_name='certificaciones_obras'),
),
migrations.AlterField(
model_name='certificacion',
name='periodo',
field=models.ForeignKey(related_name='certificaciones_periodo', verbose_name='Periodo', to='parametros.Periodo'),
),
migrations.AlterField(
model_name='registro',
name='dia',
field=models.CharField(choices=[('otro', 'OTRO'), ('SAB', 'SÁBADO'), ('DOM', 'DOMINGO')], db_column='DIA', max_length=16, help_text='Indica si es sábado, domingo o otro.', verbose_name='Tipo de día'),
),
]
| infoINGenieria/Zweb | z_web/registro/migrations/0016_auto_20151230_0957.py | Python | gpl-2.0 | 1,002 |
#!/usr/bin/python
import logging
import os
from optparse import OptionParser
import random
import time
from cinderclient import client as cinderclient
from novaclient.v2 import client as novaclient
USER = os.getenv('OS_USERNAME')
TENANT = os.getenv('OS_TENANT_NAME')
PASSWORD = os.getenv('OS_PASSWORD')
AUTH_URL = os.getenv('OS_AUTH_URL')
def process_options():
config = {}
usage = "usage: %prog [options]\nboot_volumes.py."
parser = OptionParser(usage, version='%prog 1.0')
parser.add_option('-c', '--instance-count', action='store',
type='int',
default=2,
dest='instance_count',
help='Number of instances to boot (default = 2).')
parser.add_option('-n', '--name', action='store',
type='string',
default='verification',
dest='base_name',
help='Base name to use, this is the base name '
'name for the volume-templates you want to boot. '
'you probably dont want to run the template itself'
'(default: verification)')
# Other tests use random flavors, but this one we MUST specify flavor
parser.add_option('-f', '--flavors', action='store',
type='string',
dest='flavor_list',
help='Comma seperated list of flavors to choose from')
parser.add_option('-t', '--template', action='store',
type='string',
default='-template',
dest='template',
help='The suffix to designate the template (default: -template)')
parser.add_option('-e', '--network', action='store',
type='string',
dest='net_UUID',
help='The UUID of the network to attach the instances to')
(options, args) = parser.parse_args()
return options
def init_clients():
cc = cinderclient.Client('2', USER,
PASSWORD, TENANT,
AUTH_URL)
nc = novaclient.Client(USER, PASSWORD,
TENANT, AUTH_URL,
service_type="compute")
return cc, nc
if __name__ == '__main__':
start_time = time.time()
options = process_options()
(cc, nc) = init_clients()
counter = 0
flavor_list = options.flavor_list.split(',')
# Only ask for a fresh list of ready volumes when we need to
# ie don't grab an update every iteration, no need to and it
# introduces significant overhead
def _ready():
return [v for v in cc.volumes.list(search_opts={'status': 'available'})
if options.base_name in v.name
if options.template not in v.name]
ready_vlist = _ready()
instance_start_time = time.time()
for i in xrange(options.instance_count):
while len(ready_vlist) < 1:
print('No ready volumes to boot, wait and rescan...')
ready_vlist = _ready()
counter = 0
time.sleep(1)
continue
src_vol = random.choice(ready_vlist)
create_kwargs = {}
bdm = {'vda': src_vol.id + ':::0'}
create_kwargs['block_device_mapping'] = bdm
create_kwargs['nics'] = [{ 'net-id': options.net_UUID }]
flavor_id = random.choice(flavor_list)
try:
nc.servers.create(src_vol.name, None, flavor_id, **create_kwargs)
ready_vlist.remove(src_vol)
except Exception as ex:
print 'Caught exception booting instance: %s' % ex
pass
counter += 1
if counter % 10 == 0:
time.sleep(5)
print('Boot process completed in %s secs (elapsed test time %s secs)' %
(time.time() - instance_start_time, time.time() - start_time))
# Now we just have to wait for the instances to become ACTIVE
done_count = 0
while done_count < options.instance_count:
active_list = [s for s in nc.servers.list(search_opts={'status': 'ACTIVE'})
if options.base_name in s.name]
error_list = nc.servers.list(search_opts={'status': 'ERROR'})
done_count = len(active_list) + len(error_list)
print " Active/Ready Instances: %s" % len(active_list)
print " Error/Failed Instances: %s\n" % len(error_list)
time.sleep(5)
if ((time.time() - start_time) / 60) > 30:
break
print " Active/Ready Instances: %s" % len(active_list)
print " Error/Failed Instances: %s\n" % len(error_list)
print "completion time: %s minutes" % ((time.time() - start_time) / 60)
| solidfire/solidfire-ai | sfai-openstack/verification_scripts/boot_volumes.py | Python | apache-2.0 | 4,798 |
__author__ = 'mslabicki'
import pygmo as pg
#
from pyltes.powerOptimizationProblemsDef import maximalThroughputProblemRR
from pyltes.powerOptimizationProblemsDef import local_maximalThroughputProblemRR
from pyltes.powerOptimizationProblemsDef import maximalMedianThrProblemRR
from pyltes.powerOptimizationProblemsDef import local_maximalMedianThrProblemRR
from pyltes.powerOptimizationProblemsDef import minInterQuartileRangeroblemRR
from pyltes.powerOptimizationProblemsDef import local_minInterQuartileRangeroblemRR
import copy
import math
import numpy as np
class pygmoPowerConfigurator:
def __init__(self,parent):
self.parent = parent
def findPowersRR(self, objectiveFunction="averageThr", sgaGenerations = 100, numberOfThreads = 11, numOfIndividuals = 10, evolveTimes = 10, method="global", x_arg=None, y_arg=None, expectedSignalLoss_arg=None):
if method == "local":
if x_arg == None:
x = self.parent.constraintAreaMaxX/2
else:
x = x_arg
if y_arg == None:
y = self.parent.constraintAreaMaxY/2
else:
y = y_arg
if expectedSignalLoss_arg == None:
maxDistance = min(self.parent.constraintAreaMaxX/2, self.parent.constraintAreaMaxY/2)
else:
maxDistance = returnDistanceFromSNR(expectedSignalLoss_arg)
localBsVector = []
for bs in self.parent.bs:
if math.sqrt((bs.x - x)**2 + (bs.y - y)**2) < maxDistance:
row = []
row.append(int(bs.ID))
row.append(math.sqrt((bs.x - x)**2 + (bs.y - y)**2))
localBsVector.append(row)
localBsVector = np.asarray(localBsVector)
if objectiveFunction == "averageThr":
if method == "local":
localListBS = []
for i in range(len(localBsVector)):
localListBS.append(localBsVector[i,0])
prob = pg.problem(local_maximalThroughputProblemRR(dim=len(localBsVector), networkInstance=self.parent, lowerTxLimit=self.parent.minTxPower, upperTxLimit=self.parent.maxTxPower, localListBS=localListBS))
if method == "global":
prob = pg.problem(maximalThroughputProblemRR(dim=len(self.parent.bs), networkInstance=self.parent, lowerTxLimit=self.parent.minTxPower, upperTxLimit=self.parent.maxTxPower))
if objectiveFunction == "medianThr":
if method == "local":
localListBS = []
for i in range(len(localBsVector)):
localListBS.append(localBsVector[i,0])
prob = pg.problem(local_maximalMedianThrProblemRR(dim=len(localBsVector), networkInstance=self.parent, lowerTxLimit=self.parent.minTxPower, upperTxLimit=self.parent.maxTxPower, localListBS=localListBS))
if method == "global":
prob = pg.problem(maximalMedianThrProblemRR(dim=len(self.parent.bs), networkInstance=self.parent, lowerTxLimit=self.parent.minTxPower, upperTxLimit=self.parent.maxTxPower))
if objectiveFunction == "minIQRthr":
if method == "local":
localListBS = []
for i in range(len(localBsVector)):
localListBS.append(localBsVector[i,0])
prob = pg.problem(local_minInterQuartileRangeroblemRR(dim=len(localBsVector), networkInstance=self.parent, lowerTxLimit=self.parent.minTxPower, upperTxLimit=self.parent.maxTxPower, localListBS=localListBS))
if method == "global":
prob = pg.problem(minInterQuartileRangeroblemRR(dim=len(self.parent.bs), networkInstance=self.parent, lowerTxLimit=self.parent.minTxPower, upperTxLimit=self.parent.maxTxPower))
prob.siec = copy.deepcopy(self.parent)
# algo = algorithm.sga(gen=sgaGenerations)
algo = pg.algorithm(pg.sga(gen=sgaGenerations))
# archi = archipelago(algo, prob, numberOfThreads, numOfIndividuals, topology = topology.barabasi_albert())
# archi.evolve(evolveTimes)
# archi.join()
population = pg.population(prob, numOfIndividuals)
population = algo.evolve(population)
theBestCostF = 0
islandNumber = -1
islandCounter = 0
# for island in archi:
# if theBestCostF > island.population.champion.f[0]:
# theBestCostF = island.population.champion.f[0]
# islandNumber = islandCounter
# islandCounter = islandCounter + 1
if method == "global":
for i in range(len(self.parent.bs)):
self.parent.bs[i].outsidePower = population.champion_x[i]
if method == "local":
for i in range(len(localListBS)):
# self.parent.bs[int(prob.bsList[i])].outsidePower = archi[islandNumber].population.champion.x[i]
self.parent.bs[int(localListBS[i])].outsidePower = population.champion_x[i]
return len(localBsVector)
def returnDistanceFromSNR(expectedSignalLoss):
lambda_val = 0.142758313333
a = 4.0
b = 0.0065
c = 17.1
d = 10.8
s = 15.8
ht = 40
hr = 1.5
f = 2.1
gamma = a - b*ht + c/ht
Xf = 6 * math.log10( f/2 )
Xh = -d * math.log10( hr/2 )
R0 = 100.0
R0p = R0 * pow(10.0,-( (Xf+Xh) / (10*gamma) ))
bandwidth=20
k = 1.3806488 * math.pow(10, -23)
T = 293.0
BW = bandwidth * 1000 * 1000
N = 10*math.log10(k*T) + 10*math.log10(BW)
alpha = 20 * math.log10( (4*math.pi*R0p) / lambda_val )
R = R0 * math.pow(10, (expectedSignalLoss - alpha-Xf-Xh-s - N)/(10*gamma))
return R
| iitis/PyLTEs | pyltes/powerConfigurator.py | Python | mit | 5,818 |
from locust import HttpLocust, TaskSet, task
class WebsiteTasks(TaskSet):
@task
def page1(self):
self.client.get("/sugestoes-para/6a-feira-da-quarta-semana-da-pascoa/")
@task
def page2(self):
self.client.get("/sugestoes-para/5a-feira-da-quarta-semana-da-pascoa/")
@task
def page3(self):
self.client.get("/sugestoes-para/4a-feira-da-quarta-semana-da-pascoa/")
@task
def page4(self):
self.client.get("/sugestoes-para/3a-feira-da-quarta-semana-da-pascoa/")
@task
def musica1(self):
self.client.get("/musica/ressuscitou/")
@task
def musica2(self):
self.client.get("/musica/prova-de-amor-maior-nao-ha/")
@task
def musica3(self):
self.client.get("/musica/porque-ele-vive/")
@task
def musica4(self):
self.client.get("/musica/o-senhor-ressuscitou-aleluia/")
class WebsiteUser(HttpLocust):
task_set = WebsiteTasks
min_wait = 5000
max_wait = 15000
| gustavofoa/pympm | scripts/locustfile.py | Python | apache-2.0 | 988 |
from corehq.apps.users.models import CommCareUser
from corehq.apps.hqcase.management.commands.ptop_fast_reindexer import PtopReindexer
from corehq.pillows.user import UserPillow
CHUNK_SIZE = 500
POOL_SIZE = 15
class Command(PtopReindexer):
help = "Fast reindex of user elastic index by using the domain view and reindexing users"
doc_class = CommCareUser
view_name = 'users/by_username'
pillow_class = UserPillow
| gmimano/commcaretest | corehq/apps/users/management/commands/ptop_fast_reindex_users.py | Python | bsd-3-clause | 433 |
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
import numpy as np
import numpy.testing as npt
from hypothesis import given
import hypothesis.strategies as st
import functools
def primefac(n):
ret = []
divisor = 2
while divisor * divisor <= n:
while (n % divisor) == 0:
ret.append(divisor)
n = n // divisor
divisor = divisor + 1
if n > 1:
ret.append(n)
return ret
class TestReBatchingQueue(TestCase):
def test_rebatching_queue_single_enqueue_dequeue(self):
net = core.Net('net')
tensors = [
net.ConstantFill([], 1, value=1.0, run_once=False)
for times in range(3)
]
queue = net.CreateRebatchingQueue([], 1, capacity=10, num_blobs=1)
net.EnqueueRebatchingQueue([queue, tensors[0]], [])
net.EnqueueRebatchingQueue([queue, tensors[1]], [])
net.EnqueueRebatchingQueue([queue, tensors[2]], [])
results = [
net.DequeueRebatchingQueue([queue], 1),
net.DequeueRebatchingQueue([queue], 1),
net.DequeueRebatchingQueue([queue], 1),
]
workspace.RunNetOnce(net)
for idx in range(3):
self.assertEquals(workspace.FetchBlob(results[idx]), [1.0])
def test_rebatching_queue_multi_enqueue_dequeue(self):
net = core.Net('net')
workspace.FeedBlob(
"tensors", np.array([x for x in range(10)], np.int32)
)
queue = net.CreateRebatchingQueue([], 1, capacity=10, num_blobs=1)
net.EnqueueRebatchingQueue([queue, "tensors"], [], enqueue_batch=True)
results = [
net.DequeueRebatchingQueue([queue], 1, num_elements=5),
net.DequeueRebatchingQueue([queue], 1, num_elements=5),
]
workspace.RunNetOnce(net)
npt.assert_array_equal(
workspace.FetchBlob(results[0]), workspace.FetchBlob("tensors")[:5]
)
npt.assert_array_equal(
workspace.FetchBlob(results[1]), workspace.FetchBlob("tensors")[5:]
)
def test_rebatching_queue_closes_properly(self):
net = core.Net('net')
workspace.FeedBlob(
"tensors", np.array([x for x in range(10)], np.int32)
)
queue = net.CreateRebatchingQueue([], 1, capacity=10, num_blobs=1)
net.EnqueueRebatchingQueue([queue, "tensors"], 0, enqueue_batch=True)
net.CloseRebatchingQueue([queue], 0)
results = [
net.DequeueRebatchingQueue([queue], 1, num_elements=5),
net.DequeueRebatchingQueue([queue], 1, num_elements=5),
]
workspace.RunNetOnce(net)
npt.assert_array_equal(
workspace.FetchBlob(results[0]), workspace.FetchBlob("tensors")[:5]
)
npt.assert_array_equal(
workspace.FetchBlob(results[1]), workspace.FetchBlob("tensors")[5:]
)
# Enqueuing more should fail now since the queue is closed
net.EnqueueRebatchingQueue([queue, "tensors"], [], enqueue_batch=True)
with self.assertRaises(RuntimeError):
workspace.RunNetOnce(net)
# Dequeuing more should fail now since the queue is closed
results = [
net.DequeueRebatchingQueue([queue], 1, num_elements=5),
]
with self.assertRaises(RuntimeError):
workspace.RunNetOnce(net)
def test_rebatching_queue_multiple_components(self):
NUM_BLOBS = 4
NUM_ELEMENTS = 10
net = core.Net('net')
workspace.blobs['complex_tensor'] = np.array(
[[x, x + 1] for x in range(NUM_ELEMENTS)], dtype=np.int32
)
tensors = [
net.GivenTensorIntFill(
[],
1,
shape=[NUM_ELEMENTS],
values=[x for x in range(NUM_ELEMENTS)]
),
net.GivenTensorFill(
[],
1,
shape=[NUM_ELEMENTS],
values=[x * 1.0 for x in range(NUM_ELEMENTS)]
),
net.GivenTensorBoolFill(
[],
1,
shape=[NUM_ELEMENTS],
values=[(x % 2 == 0) for x in range(NUM_ELEMENTS)]
),
'complex_tensor',
]
queue = net.CreateRebatchingQueue(
[], 1, capacity=10, num_blobs=NUM_BLOBS
)
net.EnqueueRebatchingQueue([queue] + tensors, [], enqueue_batch=True)
results = net.DequeueRebatchingQueue([queue], NUM_BLOBS, num_elements=5)
workspace.RunNetOnce(net)
for idx in range(NUM_BLOBS):
npt.assert_array_equal(
workspace.FetchBlob(results[idx]),
workspace.FetchBlob(tensors[idx])[:5]
)
@given(
num_producers=st.integers(1, 5),
num_consumers=st.integers(1, 5),
producer_input_size=st.integers(1, 10),
producer_num_iterations=st.integers(1, 10),
capacity=st.integers(1, 10)
)
def test_rebatching_parallel_producer_consumer(
self, num_producers, num_consumers, producer_input_size,
producer_num_iterations, capacity
):
### Init ###
total_inputs = producer_num_iterations * producer_input_size * num_producers
inputs = []
init_net = core.Net('init_net')
queue = init_net.CreateRebatchingQueue(
[], 1, capacity=capacity, num_blobs=1
)
### Producers ###
producer_steps = []
for i in range(num_producers):
name = 'producer_%d' % i
net = core.Net(name)
values = [
producer_input_size * i + x for x in range(producer_input_size)
]
for _ in range(producer_num_iterations):
inputs.extend(values)
tensors = net.GivenTensorIntFill(
[], 1, shape=[producer_input_size], values=values
)
net.EnqueueRebatchingQueue([queue, tensors], [], enqueue_batch=True)
step = core.execution_step(
name, net, num_iter=producer_num_iterations
)
producer_steps.append(step)
producer_step = core.execution_step(
'producer', [
core.execution_step(
'producers', producer_steps, concurrent_substeps=True
)
]
)
### Consumers ###
outputs = []
def append(ins, outs):
# Extend is atomic
outputs.extend(ins[0].data.tolist())
consumer_steps = []
for i in range(num_consumers):
# This is just a way of deterministally read all the elements.
# We make `num_consumers` almost equal splits
# (the reminder goes to the last consumer).
num_elements_to_read = total_inputs // num_consumers
if i == num_consumers - 1:
num_elements_to_read = num_elements_to_read \
+ total_inputs % num_consumers
# If we have nothing to read this consumer will be idle
if (num_elements_to_read == 0):
continue
# Now we have to make a split on number of iterations and the read
# size for each iteration. This is again just one of many
# deterministic ways of doing it. We factorize the total number of
# elements we have to read and assign half of the factors to the
# iterations half to the read size.
factors = list(primefac(num_elements_to_read))
num_elements_per_iteration = functools.reduce(
lambda x, y: x * y, factors[len(factors) // 2:], 1
)
num_iterations = functools.reduce(
lambda x, y: x * y, factors[:len(factors) // 2], 1
)
name = 'consumer_%d' % i
net = core.Net(name)
blobs = net.DequeueRebatchingQueue(
[queue], 1, num_elements=num_elements_per_iteration
)
net.Python(append)([blobs], 0)
consumer_steps.append(
core.execution_step(name, net, num_iter=num_iterations)
)
consumer_step = core.execution_step(
'consumer', consumer_steps, concurrent_substeps=True
)
init_step = core.execution_step('init', init_net)
worker_step = core.execution_step(
'worker', [consumer_step, producer_step], concurrent_substeps=True
)
### Execute Plan ###
plan = core.Plan('test')
plan.AddStep(init_step)
plan.AddStep(worker_step)
self.ws.run(plan)
### Check Results ###
# We check that the outputs are a permutation of inputs
inputs.sort()
outputs.sort()
self.assertEquals(inputs, outputs)
if __name__ == "__main__":
import unittest
unittest.main()
| davinwang/caffe2 | caffe2/python/operator_test/rebatching_queue_test.py | Python | apache-2.0 | 9,823 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=80:
__version__ = '1.2'
import os, sys
import csv
from lxml import etree
from types import *
def main(config):
import string
translationTable = string.maketrans(' ','_')
# Read each line of the csv file, find the xml file to edit
# then do a find&replace for each csv column.
#
# TODO define appropriate dialect (excel, excel-tab or own)
# see http://docs.python.org/library/csv.html#csv-fmt-params
csvReader = csv.DictReader(open(config.csvfile,'rU'))
for cLine in csvReader:
name = cLine['name'].translate(translationTable,'-"\'').lower()
gFilename = os.path.join(config.xmlfiles, name + '.xml')
if not os.path.isfile(gFilename):
print("ERROR: file {} doesn't exists.".format(gFilename))
continue
xmlReader = etree.parse(gFilename)
expr = '*/%s'
for (cKey, cValue) in cLine.iteritems():
if cKey == 'name':
continue
elem = xmlReader.find(expr % cKey)
if type(elem) is NoneType:
print('Cannot find tag {} in current file. Process cancelled.'.format(cKey))
exit()
elem.text = cValue
newxmlname = os.path.join(os.path.dirname(gFilename),
'test_' + os.path.basename(gFilename))
if not os.path.exists(newxmlname):
open(newxmlname,'w').close()
print("Since it's beta, please do a diff and validate %s"%newxmlname)
xmlReader.write( newxmlname )
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser(description="""
Naev csv to xml tool v%s.
This is a very basic tool, do not expect too much.
""" % __version__)
parser.add_argument('--version', action='version',
version='%(prog)s '+__version__)
parser.add_argument('--verbose', action='store_true', default=False,
help='Going verbose to see hidden secrets')
parser.add_argument('csvfile',
help='Path to csv files directory')
parser.add_argument('xmlfiles',
help='Path to xml files')
args = parser.parse_args()
args.csvfile = os.path.abspath(args.csvfile)
args.xmlfiles = os.path.abspath(args.xmlfiles)
main(args)
| Mutos/StarsOfCall-NAEV | utils/csv2xml/main.py | Python | gpl-3.0 | 2,441 |
from coilsnake.modules.eb.EbModule import EbModule
from coilsnake.util.common.yml import yml_load, yml_dump
from coilsnake.util.eb.pointer import to_snes_address
from coilsnake.util.eb.text import standard_text_to_byte_list
class SkipNamingModule(EbModule):
NAME = "Skip Names"
def write_to_project(self, resource_open):
out = {"Enable Skip": False,
"Enable Summary": False,
"Name1": "Ness",
"Name2": "Paula",
"Name3": "Jeff",
"Name4": "Poo",
"Pet": "King",
"Food": "Steak",
"Thing": "Rockin"}
with resource_open("naming_skip", "yml", True) as f:
yml_dump(out, f, default_flow_style=False)
def read_from_project(self, resource_open):
with resource_open("naming_skip", "yml", True) as f:
self.data = yml_load(f)
def write_loader_asm(self, rom, offset, s, strlen, mem_offset, byte2):
i = 0
byte_list = standard_text_to_byte_list(s, strlen, False)
for byte in byte_list:
rom[offset:offset+5] = [0xa9, byte, 0x8d, mem_offset + i, byte2]
i += 1
offset += 5
return offset
def write_to_rom(self, rom):
if self.data["Enable Skip"]:
rom[0x1faae] = 0x5c
offset = rom.allocate(size=(10 + 4 * 5 * 5 + 3 * 6 * 5))
rom.write_multi(0x1faaf, to_snes_address(offset), 3)
rom[offset:offset+4] = [0x48, 0x08, 0xe2, 0x20]
offset += 4
offset = self.write_loader_asm(rom, offset, self.data["Name1"], 5, 0xce, 0x99)
offset = self.write_loader_asm(rom, offset, self.data["Name2"], 5, 0x2d, 0x9a)
offset = self.write_loader_asm(rom, offset, self.data["Name3"], 5, 0x8c, 0x9a)
offset = self.write_loader_asm(rom, offset, self.data["Name4"], 5, 0xeb, 0x9a)
offset = self.write_loader_asm(rom, offset, self.data["Pet"], 6, 0x19, 0x98)
offset = self.write_loader_asm(rom, offset, self.data["Food"], 6, 0x1f, 0x98)
offset = self.write_loader_asm(rom, offset, self.data["Thing"], 6, 0x29, 0x98)
if self.data["Enable Summary"]:
rom[offset:offset+6] = [0x28, 0x68, 0x5c, 0xc0, 0xfa, 0xc1]
else:
rom[offset:offset+6] = [0x28, 0x68, 0x5c, 0x05, 0xfd, 0xc1] | mrtenda/CoilSnake | coilsnake/modules/eb/SkipNamingModule.py | Python | gpl-3.0 | 2,396 |
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
#-*- coding: utf-8 -*-
from __future__ import absolute_import, with_statement
__all__ = ['EventHandler', ]
from treeio.events.models import Event
from treeio.events.forms import EventForm
from treeio.core.api.handlers import ObjectHandler
class EventHandler(ObjectHandler):
"Entrypoint for Event model."
model = Event
form = EventForm
@staticmethod
def resource_uri():
return ('api_events', ['id'])
def check_create_permission(self, request, mode):
return True
def flatten_dict(self, request):
dct = super(self.__class__, self).flatten_dict(request)
dct["date"] = None
dct["hour"] = None
return dct
| havard024/prego | events/api/handlers.py | Python | mit | 797 |
import json
import os
import sys
import gettext as gettext_module
from django import http
from django.conf import settings
from django.template import Context, Template
from django.utils import importlib
from django.utils.translation import to_locale
from django.utils.encoding import smart_text
from django.utils.formats import get_format_modules, get_format
from django.utils import six
try:
from django.utils._os import upath
except ImportError:
def upath(path):
"""
Always return a unicode path.
"""
if six.PY2 and not isinstance(path, six.text_type):
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
return path.decode(fs_encoding)
return path
def get_formats():
"""
Returns all formats strings required for i18n to work
"""
FORMAT_SETTINGS = (
'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT',
'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR', 'NUMBER_GROUPING',
'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'
)
result = {}
for module in [settings] + get_format_modules(reverse=True):
for attr in FORMAT_SETTINGS:
result[attr] = get_format(attr)
formats = {}
for k, v in result.items():
if isinstance(v, (six.string_types, int)):
formats[k] = smart_text(v)
elif isinstance(v, (tuple, list)):
formats[k] = [smart_text(value) for value in v]
return formats
js_catalog_template = r"""
{% autoescape off %}
(function (globals) {
var django = globals.django || (globals.django = {});
{% if plural %}
django.pluralidx = function (n) {
var v={{ plural }};
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
};
{% else %}
django.pluralidx = function (count) { return (count == 1) ? 0 : 1; };
{% endif %}
{% if catalog_str %}
/* gettext library */
django.catalog = {{ catalog_str }};
django.gettext = function (msgid) {
var value = django.catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
};
django.ngettext = function (singular, plural, count) {
var value = django.catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value[django.pluralidx(count)];
}
};
django.gettext_noop = function (msgid) { return msgid; };
django.pgettext = function (context, msgid) {
var value = django.gettext(context + '\x04' + msgid);
if (value.indexOf('\x04') != -1) {
value = msgid;
}
return value;
};
django.npgettext = function (context, singular, plural, count) {
var value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count);
if (value.indexOf('\x04') != -1) {
value = django.ngettext(singular, plural, count);
}
return value;
};
{% else %}
/* gettext identity library */
django.gettext = function (msgid) { return msgid; };
django.ngettext = function (singular, plural, count) { return (count == 1) ? singular : plural; };
django.gettext_noop = function (msgid) { return msgid; };
django.pgettext = function (context, msgid) { return msgid; };
django.npgettext = function (context, singular, plural, count) { return (count == 1) ? singular : plural; };
{% endif %}
django.interpolate = function (fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
};
/* formatting library */
django.formats = {{ formats_str }};
django.get_format = function (format_type) {
var value = django.formats[format_type];
if (typeof(value) == 'undefined') {
return format_type;
} else {
return value;
}
};
/* add to global namespace */
globals.pluralidx = django.pluralidx;
globals.gettext = django.gettext;
globals.ngettext = django.ngettext;
globals.gettext_noop = django.gettext_noop;
globals.pgettext = django.pgettext;
globals.npgettext = django.npgettext;
globals.interpolate = django.interpolate;
globals.get_format = django.get_format;
}(this));
{% endautoescape %}
"""
def render_javascript_catalog(catalog=None, plural=None):
template = Template(js_catalog_template)
indent = lambda s: s.replace('\n', '\n ')
context = Context({
'catalog_str': indent(json.dumps(
catalog, sort_keys=True, indent=2)) if catalog else None,
'formats_str': indent(json.dumps(
get_formats(), sort_keys=True, indent=2)),
'plural': plural,
})
return http.HttpResponse(template.render(context), 'text/javascript')
def get_javascript_catalog(locale, domain, packages):
default_locale = to_locale(settings.LANGUAGE_CODE)
packages = [p for p in packages if p == 'django.conf' or p in settings.INSTALLED_APPS]
t = {}
paths = []
en_selected = locale.startswith('en')
en_catalog_missing = True
# paths of requested packages
for package in packages:
p = importlib.import_module(package)
path = os.path.join(os.path.dirname(upath(p.__file__)), 'locale')
paths.append(path)
# add the filesystem paths listed in the LOCALE_PATHS setting
paths.extend(list(reversed(settings.LOCALE_PATHS)))
# first load all english languages files for defaults
for path in paths:
try:
catalog = gettext_module.translation(domain, path, ['en'])
t.update(catalog._catalog)
except IOError:
pass
else:
# 'en' is the selected language and at least one of the packages
# listed in `packages` has an 'en' catalog
if en_selected:
en_catalog_missing = False
# next load the settings.LANGUAGE_CODE translations if it isn't english
if default_locale != 'en':
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [default_locale])
except IOError:
catalog = None
if catalog is not None:
t.update(catalog._catalog)
# last load the currently selected language, if it isn't identical to the default.
if locale != default_locale:
# If the currently selected language is English but it doesn't have a
# translation catalog (presumably due to being the language translated
# from) then a wrong language catalog might have been loaded in the
# previous step. It needs to be discarded.
if en_selected and en_catalog_missing:
t = {}
else:
locale_t = {}
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [locale])
except IOError:
catalog = None
if catalog is not None:
locale_t.update(catalog._catalog)
if locale_t:
t = locale_t
plural = None
if '' in t:
for l in t[''].split('\n'):
if l.startswith('Plural-Forms:'):
plural = l.split(':', 1)[1].strip()
if plural is not None:
# this should actually be a compiled function of a typical plural-form:
# Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;
plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=', 1)[1]
pdict = {}
maxcnts = {}
catalog = {}
for k, v in t.items():
if k == '':
continue
if isinstance(k, six.string_types):
catalog[k] = v
elif isinstance(k, tuple):
msgid = k[0]
cnt = k[1]
maxcnts[msgid] = max(cnt, maxcnts.get(msgid, 0))
pdict.setdefault(msgid, {})[cnt] = v
else:
raise TypeError(k)
for k, v in pdict.items():
catalog[k] = [v.get(i, '') for i in range(maxcnts[msgid] + 1)]
return catalog, plural
| devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/statici18n/compat.py | Python | agpl-3.0 | 8,409 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for morphological filtering operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class DilationTest(test.TestCase):
def _VerifyValues(self, image, kernel, strides, rates, padding, out, use_gpu):
"""Verifies the output values of the dilation function.
Args:
image: Input tensor with shape: [batch, in_height, in_width, channels].
kernel: Filter tensor with shape: [filter_height, filter_width, channels].
strides: Output strides, specified as [stride_height, stride_width].
rates: Atrous rates, specified as [rate_height, rate_width].
padding: Padding type.
out: Expected output.
use_gpu: Whether we are running on GPU.
"""
strides = [1] + strides + [1]
rates = [1] + rates + [1]
with self.cached_session(use_gpu=use_gpu):
out_tensor = nn_ops.dilation2d(
constant_op.constant(image),
constant_op.constant(kernel),
strides=strides,
rates=rates,
padding=padding,
name="dilation2d")
self.assertAllClose(out, self.evaluate(out_tensor))
def _testDilationValidPadding(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [1, 1, 1, 1]
out = [[[[.5]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def _testDilationSamePadding(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [1, 2, 2, 1]
out = [[[[.5], [.6]], [[.7], [.8]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testDilationSamePaddingDepth(self, use_gpu):
# [1, 2, 2, 3]
image = [[[[.1, .2, .0], [.2, .3, .1]], [[.3, .4, .2], [.4, .5, .3]]]]
# [2, 2, 3]
kernel = [[[.4, .5, .3], [.3, .4, .2]], [[.1, .2, .0], [.0, .1, -.1]]]
# [1, 2, 2, 3]
out = [[[[.5, .7, .3], [.6, .8, .4]], [[.7, .9, .5], [.8, 1., .6]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testDilationSamePaddingBatch(self, use_gpu):
# [2, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]], [[[.2], [.3]], [[.4], [.5]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [2, 2, 2, 1]
out = [[[[.5], [.6]], [[.7], [.8]]], [[[.6], [.7]], [[.8], [.9]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testDilationValidPaddingNonSquareWindow(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [1, 2, 1]
kernel = [[[.4], [.3]]]
# [1, 2, 1, 1]
out = [[[[.5]], [[.7]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def _testDilationSamePaddingRate(self, use_gpu):
# [1, 3, 3, 1]
image = [[[[.1], [.2], [.3]], [[.4], [.5], [.6]], [[.7], [.8], [.9]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.2]]]
# Because rate = 2, the effective kernel is [3, 3, 1]:
# kernel_eff = [[[.4], [.0], [.3]],
# [[.0], [.0], [.0]],
# [[.1], [.0], [.2]]]
# [1, 3, 3, 1]
out = [[[[.7], [.8], [.6]], [[1.0], [1.1], [.9]], [[.8], [.9], [.9]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[2, 2],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testDilationValidPaddingUnevenStride(self, use_gpu):
# [1, 3, 3, 1]
image = [[[[.1], [.2], [.3], [.4]], [[.5], [.6], [.7], [.8]],
[[.9], [1.0], [1.1], [1.2]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.2]]]
# [1, 2, 2, 1]
out = [[[[.8], [1.0]], [[1.2], [1.4]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 2],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def testDilation(self):
for use_gpu in True, False:
self._testDilationValidPadding(use_gpu)
self._testDilationSamePadding(use_gpu)
self._testDilationSamePaddingDepth(use_gpu)
self._testDilationSamePaddingBatch(use_gpu)
self._testDilationValidPaddingNonSquareWindow(use_gpu)
self._testDilationSamePaddingRate(use_gpu)
self._testDilationValidPaddingUnevenStride(use_gpu)
def _ConstructAndTestGradient(self, image_shape, kernel_shape, strides, rates,
padding, use_gpu):
"""Verifies the gradients of the dilation function.
Args:
image_shape: Input shape, [batch, in_height, in_width, channels].
kernel_shape: Filter shape, [filter_height, filter_width, channels].
strides: Output strides, specified as [stride_height, stride_width].
rates: Atrous rates, specified as [rate_height, rate_width].
padding: Padding type.
use_gpu: Whether we are running on GPU.
"""
assert image_shape[3] == kernel_shape[2]
np.random.seed(1) # Make it reproducible.
image = np.random.random_sample(image_shape).astype(np.float32)
kernel = np.random.random_sample(kernel_shape).astype(np.float32)
image_init = np.random.random_sample(image_shape).astype(np.float32)
kernel_init = np.random.random_sample(kernel_shape).astype(np.float32)
strides = [1] + strides + [1]
rates = [1] + rates + [1]
with self.cached_session(use_gpu=use_gpu):
image_tensor = constant_op.constant(
image, shape=image_shape, name="input")
kernel_tensor = constant_op.constant(
kernel, shape=kernel_shape, name="filter")
out_tensor = nn_ops.dilation2d(
image_tensor,
kernel_tensor,
strides=strides,
rates=rates,
padding=padding,
name="dilation2d")
out_shape = self.evaluate(out_tensor).shape
# Small delta is necessary for argmax to remain the same.
err = gradient_checker.compute_gradient_error(
[image_tensor, kernel_tensor], [image_shape, kernel_shape],
out_tensor,
out_shape, [image_init, kernel_init],
delta=1e-3)
print("Dilation gradient error = %f" % err)
self.assertLess(err, 1e-4)
def _testDilationGradValidPadding_1x1x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[1, 1, 1],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
use_gpu=use_gpu)
def _testDilationGradSamePadding_1x1x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[1, 1, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testDilationGradSamePadding_1x1x2(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 2],
kernel_shape=[1, 1, 2],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testDilationGradValidPadding_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
use_gpu=use_gpu)
def _testDilationGradSamePadding_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testDilationGradSamePaddingBatch_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[4, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testDilationGradSamePadding_2x2x4(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 4],
kernel_shape=[2, 2, 4],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def testDilationGrad(self):
for use_gpu in True, False:
self._testDilationGradValidPadding_1x1x1(use_gpu)
self._testDilationGradSamePadding_1x1x1(use_gpu)
self._testDilationGradSamePadding_1x1x2(use_gpu)
self._testDilationGradValidPadding_2x2x1(use_gpu)
self._testDilationGradSamePadding_2x2x1(use_gpu)
self._testDilationGradSamePaddingBatch_2x2x1(use_gpu)
self._testDilationGradSamePadding_2x2x4(use_gpu)
class ErosionTest(test.TestCase):
def _VerifyValues(self, image, kernel, strides, rates, padding, out, use_gpu):
"""Verifies the output values of the erosion function.
Args:
image: Input tensor with shape: [batch, in_height, in_width, channels].
kernel: Filter tensor with shape: [filter_height, filter_width, channels].
strides: Output strides, specified as [stride_height, stride_width].
rates: Atrous rates, specified as [rate_height, rate_width].
padding: Padding type.
out: Expected output.
use_gpu: Whether we are running on GPU.
"""
strides = [1] + strides + [1]
rates = [1] + rates + [1]
with self.cached_session(use_gpu=use_gpu):
out_tensor = nn_ops.erosion2d(
constant_op.constant(image),
constant_op.constant(kernel),
strides=strides,
rates=rates,
padding=padding,
name="erosion2d")
self.assertAllClose(out, self.evaluate(out_tensor))
def _testErosionValidPadding(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [1, 1, 1, 1]
out = [[[[.0]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def _testErosionSamePadding(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [1, 2, 2, 1]
out = [[[[.0], [.1]], [[.3], [.4]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testErosionSamePaddingDepth(self, use_gpu):
# [1, 2, 2, 3]
image = [[[[.1, .2, .0], [.2, .3, .1]], [[.3, .4, .2], [.4, .5, .3]]]]
# [2, 2, 3]
kernel = [[[.4, .5, .3], [.3, .4, .2]], [[.1, .2, .0], [.0, .1, -.1]]]
# [1, 2, 2, 3]
out = [[[[.0, .0, .0], [.1, .1, .1]], [[.3, .3, .3], [.4, .4, .4]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testErosionSamePaddingBatch(self, use_gpu):
# [2, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]], [[[.2], [.3]], [[.4], [.5]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.0]]]
# [2, 2, 2, 1]
out = [[[[.0], [.1]], [[.3], [.4]]], [[[.1], [.2]], [[.4], [.5]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testErosionValidPaddingNonSquareWindow(self, use_gpu):
# [1, 2, 2, 1]
image = [[[[.1], [.2]], [[.3], [.4]]]]
# [1, 2, 1]
kernel = [[[.4], [.3]]]
# [1, 2, 1, 1]
out = [[[[-.2]], [[.0]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def _testErosionSamePaddingRate(self, use_gpu):
# [1, 3, 3, 1]
image = [[[[.1], [.2], [.3]], [[.4], [.5], [.6]], [[.7], [.8], [.9]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.2]]]
# Because rate = 2, the effective kernel is [3, 3, 1]:
# kernel_eff = [[[.4], [.0], [.3]],
# [[.0], [.0], [.0]],
# [[.1], [.0], [.2]]]
# [1, 3, 3, 1]
out = [[[[.1], [.1], [.2]], [[0.1], [-.1], [.0]], [[.4], [.2], [.3]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 1],
rates=[2, 2],
padding="SAME",
out=out,
use_gpu=use_gpu)
def _testErosionValidPaddingUnevenStride(self, use_gpu):
# [1, 3, 3, 1]
image = [[[[.1], [.2], [.3], [.4]], [[.5], [.6], [.7], [.8]],
[[.9], [1.0], [1.1], [1.2]]]]
# [2, 2, 1]
kernel = [[[.4], [.3]], [[.1], [.2]]]
# [1, 2, 2, 1]
out = [[[[-.1], [.1]], [[.3], [.5]]]]
self._VerifyValues(
image,
kernel,
strides=[1, 2],
rates=[1, 1],
padding="VALID",
out=out,
use_gpu=use_gpu)
def testErosion(self):
for use_gpu in True, False:
self._testErosionValidPadding(use_gpu)
self._testErosionSamePadding(use_gpu)
self._testErosionSamePaddingDepth(use_gpu)
self._testErosionSamePaddingBatch(use_gpu)
self._testErosionValidPaddingNonSquareWindow(use_gpu)
self._testErosionSamePaddingRate(use_gpu)
self._testErosionValidPaddingUnevenStride(use_gpu)
def _ConstructAndTestGradient(self, image_shape, kernel_shape, strides, rates,
padding, use_gpu):
"""Verifies the gradients of the erosion function.
Args:
image_shape: Input shape, [batch, in_height, in_width, channels].
kernel_shape: Filter shape, [filter_height, filter_width, channels].
strides: Output strides, specified as [stride_height, stride_width].
rates: Atrous rates, specified as [rate_height, rate_width].
padding: Padding type.
use_gpu: Whether we are running on GPU.
"""
assert image_shape[3] == kernel_shape[2]
np.random.seed(1) # Make it reproducible.
image = np.random.random_sample(image_shape).astype(np.float32)
kernel = np.random.random_sample(kernel_shape).astype(np.float32)
image_init = np.random.random_sample(image_shape).astype(np.float32)
kernel_init = np.random.random_sample(kernel_shape).astype(np.float32)
strides = [1] + strides + [1]
rates = [1] + rates + [1]
with self.cached_session(use_gpu=use_gpu):
image_tensor = constant_op.constant(
image, shape=image_shape, name="input")
kernel_tensor = constant_op.constant(
kernel, shape=kernel_shape, name="filter")
out_tensor = nn_ops.erosion2d(
image_tensor,
kernel_tensor,
strides=strides,
rates=rates,
padding=padding,
name="erosion2d")
out_shape = self.evaluate(out_tensor).shape
# Small delta is necessary for argmax to remain the same.
err = gradient_checker.compute_gradient_error(
[image_tensor, kernel_tensor], [image_shape, kernel_shape],
out_tensor,
out_shape, [image_init, kernel_init],
delta=1e-3)
print("Erosion gradient error = %f" % err)
self.assertLess(err, 1e-4)
def _testErosionGradValidPadding_1x1x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[1, 1, 1],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
use_gpu=use_gpu)
def _testErosionGradSamePadding_1x1x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[1, 1, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testErosionGradSamePadding_1x1x2(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 2],
kernel_shape=[1, 1, 2],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testErosionGradValidPadding_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="VALID",
use_gpu=use_gpu)
def _testErosionGradSamePadding_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testErosionGradSamePaddingBatch_2x2x1(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[4, 3, 3, 1],
kernel_shape=[2, 2, 1],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def _testErosionGradSamePadding_2x2x4(self, use_gpu):
self._ConstructAndTestGradient(
image_shape=[1, 3, 3, 4],
kernel_shape=[2, 2, 4],
strides=[1, 1],
rates=[1, 1],
padding="SAME",
use_gpu=use_gpu)
def testErosionGrad(self):
for use_gpu in True, False:
self._testErosionGradValidPadding_1x1x1(use_gpu)
self._testErosionGradSamePadding_1x1x1(use_gpu)
self._testErosionGradSamePadding_1x1x2(use_gpu)
self._testErosionGradValidPadding_2x2x1(use_gpu)
self._testErosionGradSamePadding_2x2x1(use_gpu)
self._testErosionGradSamePaddingBatch_2x2x1(use_gpu)
self._testErosionGradSamePadding_2x2x4(use_gpu)
if __name__ == "__main__":
test.main()
| hehongliang/tensorflow | tensorflow/python/kernel_tests/morphological_ops_test.py | Python | apache-2.0 | 18,659 |
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Switch postgres to json native type.
Revision ID: 2ad4984f2864
Revises: 3162c0f3f8e4
Create Date: 2015-06-04 13:08:36.667948
"""
# revision identifiers, used by Alembic.
revision = '2ad4984f2864'
down_revision = '3162c0f3f8e4'
from alembic import op
_ALTER_TO_JSON_TPL = 'ALTER TABLE %s ALTER COLUMN %s TYPE JSON USING %s::JSON'
_TABLES_COLS = tuple([
('logbooks', 'meta'),
('flowdetails', 'meta'),
('atomdetails', 'meta'),
('atomdetails', 'failure'),
('atomdetails', 'revert_failure'),
('atomdetails', 'results'),
('atomdetails', 'revert_results'),
])
_ALTER_TO_TEXT_TPL = 'ALTER TABLE %s ALTER COLUMN %s TYPE TEXT'
def upgrade():
b = op.get_bind()
if b.dialect.name.startswith('postgresql'):
for (table_name, col_name) in _TABLES_COLS:
q = _ALTER_TO_JSON_TPL % (table_name, col_name, col_name)
op.execute(q)
def downgrade():
b = op.get_bind()
if b.dialect.name.startswith('postgresql'):
for (table_name, col_name) in _TABLES_COLS:
q = _ALTER_TO_TEXT_TPL % (table_name, col_name)
op.execute(q)
| jimbobhickville/taskflow | taskflow/persistence/backends/sqlalchemy/alembic/versions/2ad4984f2864_switch_postgres_to_json_native.py | Python | apache-2.0 | 1,778 |
#!/usr/bin/env python
import socket, ssl
# This is a copy of _RESTRICTED_SERVER_CIPHERS from the current tip of ssl.py
# <https://hg.python.org/cpython/file/af793c7580f1/Lib/ssl.py#l174> except that
# RC4 has been added back in, since it was removed in Python 2.7.10,
# but SSLStreamConnection only supports RC4 ciphers.
CIPHERS = (
'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:!aNULL:'
'!eNULL:!MD5:!DSS:RC4'
)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('localhost', 54443))
s.listen(5)
while True:
newsocket, fromaddr = s.accept()
try:
connstream = ssl.wrap_socket(newsocket,
server_side=True,
certfile="cert.pem",
keyfile="cert.pem",
ciphers=CIPHERS)
except ssl.SSLError as e:
# Catch occurrences of:
# ssl.SSLEOFError: EOF occurred in violation of protocol (_ssl.c:581)
#
# In theory, setting ssl_version to ssl.PROTOCOL_TLSv1 will resolve
# the problem, but it didn't do so for me, and it caused the error:
# ssl.SSLError: [SSL: WRONG_VERSION_NUMBER] wrong version number (_ssl.c:581)
#
# Whereas the SSLEOFError doesn't prevent the server from working
# (it seems to happen only when the server is first started, and it
# stops happening if we simply ignore it and try again a few times)
# so we leave ssl_version at ssl.PROTOCOL_SSLv3 and ignore that error.
#
# If we catch SSLEOFError specifically, then Travis fails with:
# AttributeError: 'module' object has no attribute 'SSLEOFError'
# So we catch the more general exception SSLError.
continue
try:
data = connstream.read()
while data:
connstream.write(data)
data = connstream.read()
finally:
try:
connstream.shutdown(socket.SHUT_RDWR)
except socket.error as e:
# On Mac, if the other side has already closed the connection,
# then socket.shutdown will fail, but we can ignore this failure.
pass
connstream.close()
| marco-c/pluotsorbet | tests/sslEchoServer.py | Python | gpl-2.0 | 2,382 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listtransactions API
from test_framework.test_framework import InfinitumTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, COIN
from io import BytesIO
def txFromHex(hexstring):
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(hexstring))
tx.deserialize(f)
return tx
class ListTransactionsTest(InfinitumTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def setup_nodes(self):
#This test requires mocktime
enable_mocktime()
return start_nodes(self.num_nodes, self.options.tmpdir)
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0)
assert_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True),
{"category":"receive","amount":Decimal("0.1")},
{"txid":txid, "account" : "watchonly"} )
self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert(not is_opt_in(self.nodes[0], txid_1))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
# Create tx2 using createrawtransaction
inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert(not is_opt_in(self.nodes[1], txid_2))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = txFromHex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = bytes_to_hex_str(tx3_modified.serialize())
tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert(is_opt_in(self.nodes[0], txid_3))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert(not is_opt_in(self.nodes[1], txid_4))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee
tx3_b = bytes_to_hex_str(tx3_b.serialize())
tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True)
assert(is_opt_in(self.nodes[0], txid_3b))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert(txid_3b not in self.nodes[0].getrawmempool())
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
| fcecin/infinitum | qa/rpc-tests/listtransactions.py | Python | mit | 10,223 |
# -*- coding: utf-8 -*-
############################################################################
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
############################################################################
from module.plugins.internal.MultiHoster import MultiHoster
class DebridItaliaCom(MultiHoster):
__name__ = "DebridItaliaCom"
__version__ = "0.07"
__type__ = "hook"
__config__ = [("activated", "bool", "Activated", "False"),
("hosterListMode", "all;listed;unlisted", "Use for hosters (if supported)", "all"),
("hosterList", "str", "Hoster list (comma separated)", ""),
("unloadFailing", "bool", "Revert to standard download if download fails", "False"),
("interval", "int", "Reload interval in hours (0 to disable)", "24")]
__description__ = """Debriditalia.com hook plugin"""
__author_name__ = ("stickell")
__author_mail__ = ("[email protected]")
def getHoster(self):
return ["netload.in", "hotfile.com", "rapidshare.com", "multiupload.com",
"uploading.com", "megashares.com", "crocko.com", "filepost.com",
"bitshare.com", "share-links.biz", "putlocker.com", "uploaded.to",
"speedload.org", "rapidgator.net", "likeupload.net", "cyberlocker.ch",
"depositfiles.com", "extabit.com", "filefactory.com", "sharefiles.co",
"ryushare.com", "tusfiles.net", "nowvideo.co", "cloudzer.net", "letitbit.net",
"easybytez.com", "uptobox.com", "ddlstorage.com"]
| Rusk85/pyload | module/plugins/hooks/DebridItaliaCom.py | Python | gpl-3.0 | 2,462 |
"""
This module implements a set of functions that are commonly used by plugins.
"""
from traceback import print_exc as debug
from os.path import exists, dirname, join
from vyapp.app import root
from vyapp.areavi import AreaVi
from os.path import abspath
import sys
def set_line(area, line, col=0):
"""
This function receives an AreaVi widget instance and a line number
then sets the focus to the AreaVi widget and the cursor at line.
"""
sys.stderr.write(area.filename + '\n')
root.note.select(area.master.master.master)
area.focus()
area.setcur(line, col)
def findline(filename, line, col=0):
files = AreaVi.get_opened_files(root)
filename = abspath(filename)
try:
area = files[filename]
except KeyError:
area = root.note.open(filename)
else:
pass
finally:
set_line(area, line)
def match_sub_pattern(pattern, lst):
# pattern = buffer(pattern)
for indi in lst:
for indj in range(0, len(pattern)):
if indi.startswith(pattern[indj:]):
yield indi, indj
def error(handle):
def shell(*args, **kwargs):
try:
return handle(*args, **kwargs)
except Exception as e:
root.status.set_msg('Error :%s' % e)
raise
return shell
def get_project_root(path):
"""
Return the project root or the file path.
"""
# In case it receives '/file'
# and there is '/__init__.py' file.
if path == dirname(path):
return path
while True:
tmp = dirname(path)
if not exists(join(tmp, '__init__.py')):
return path
path = tmp
def execute(handle, *args, **kwargs):
"""
It executes handle and avoids throwing a exception but it prints the exception.
Example:
def func(a, b):
return a/b
# It wouldnt throw an exception.
r = execute(func, 1, 0)
# It would print None.
print r
"""
try:
val = handle(*args, **kwargs)
except Exception:
debug()
else:
return val
def exec_quiet(handle, *args, **kwargs):
"""
Like exe.execute but doesnt print the exception.
"""
try:
val = handle(*args, **kwargs)
except Exception:
pass
else:
return val
def exec_pipe(data, env):
"""
This function is used to execute python code and it sets
the sys.stderr to sys.stdout so exceptions would be printed on sys.stdout.
After the code being executed then sys.stderr is restored to its
default value.
The data argument is python code to be executed and env is a dictionary where
the code will be executed.
Note: It is mostly used to execute python code from vy.
"""
import sys
# It has to be set before because
# if some data code catches an exception
# then prints use print_exc it will go to
# sys.__stderr__.
tmp = sys.stderr
sys.stderr = sys.stdout
try:
exec(data, env)
except Exception:
debug()
finally:
sys.stderr = tmp
| iogf/vy | vyapp/tools.py | Python | mit | 3,130 |
"""Contains the implementation of the command entry widget."""
from mudpyl.gui.commandhistory import CommandHistory
from mudpyl.gui.tabcomplete import Trie
from mudpyl.gui.keychords import from_gtk_event
import gtk
import pango
class CommandView(gtk.Entry):
"""The area where the user enters commands to be sent to the MUD."""
def __init__(self, gui):
gtk.Entry.__init__(self)
self.realm = gui.realm
self.gui = gui
self.tabdict = Trie()
self.hist = CommandHistory(200)
self.connect('key-press-event', self.key_pressed_cb)
self.modify_font(pango.FontDescription('monospace 8'))
def key_pressed_cb(self, widget, event):
"""The user's pressed a key.
First, this checks to see if there is a macro bound for the keychord,
and if there is then it is run; if not, the key is handled by PyGTK.
"""
chord = from_gtk_event(event)
if not self.gui.realm.maybe_do_macro(chord):
#not a macro, so keep processing.
return False
return True
def history_up(self):
"""Move up (ie, back in time) one command in the history."""
#cursor will be at the end of the line, as it has no left gravity.
self.set_text(self.hist.advance())
self.set_position(-1)
def history_down(self):
"""Move down (ie, forwards in time) one command in the history."""
self.set_text(self.hist.retreat())
self.set_position(-1)
def get_all_text(self):
"""Finger-saving mathod to get all the text from the buffer."""
bytes = self.get_chars(0, -1)
return bytes.decode('utf-8')
def escape_pressed(self):
"""Add the current line to the list of previous commands, and clear
the buffer.
"""
self.hist.add_command(self.get_all_text())
self.set_text('')
def submit_line(self):
"""Send the current line to the MUD and clear the buffer."""
text = self.get_all_text()
self.set_text('')
self.realm.receive_gui_line(text)
if not self.realm.server_echo:
self.hist.add_command(text)
def tab_complete(self):
"""Tab-completion."""
line = self.get_all_text()
#cursor position as an integer from the start of the line
ind = self.get_position()
line, ind = self.tabdict.complete(line, ind)
self.set_text(line)
#move the cursor to where the tabdict wants it
self.set_position(ind)
def add_line_to_tabdict(self, line):
"""Add all the new words in the line to our tabdict."""
self.tabdict.add_line(line)
| Nik0las1984/mudpyl | mudpyl/gui/gtkcommandline.py | Python | gpl-2.0 | 2,673 |
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.params import *
from m5.proxy import *
from BaseTLB import BaseTLB
from MemObject import MemObject
class X86PagetableWalker(MemObject):
type = 'X86PagetableWalker'
cxx_class = 'X86ISA::Walker'
cxx_header = 'arch/x86/pagetable_walker.hh'
port = MasterPort("Port for the hardware table walker")
system = Param.System(Parent.any, "system object")
class X86TLB(BaseTLB):
type = 'X86TLB'
cxx_class = 'X86ISA::TLB'
cxx_header = 'arch/x86/tlb.hh'
size = Param.Int(64, "TLB size")
walker = Param.X86PagetableWalker(\
X86PagetableWalker(), "page table walker")
| Dexhub/MTX | src/arch/x86/X86TLB.py | Python | bsd-3-clause | 2,737 |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Streamlet for Apache Pulsar"""
from heronpy.streamlet.src.python.streamlet import Streamlet
from heronpy.connectors.pulsar.src.python.pulsarspout import PulsarSpout
# pylint: disable=access-member-before-definition
# pylint: disable=attribute-defined-outside-init
class PulsarStreamlet(Streamlet):
"""Streamlet facade on top of PulsarSpout"""
def __init__(self, service_url, topic_name, stage_name=None, parallelism=None,
receive_timeout_ms=None, input_schema=None):
super(PulsarStreamlet, self).__init__(parents=[],
stage_name=stage_name,
parallelism=parallelism)
self._pulsar_service_url = service_url
self._pulsar_topic_name = topic_name
self._pulsar_receive_timeout_ms = receive_timeout_ms
self._pulsar_input_schema = input_schema
@staticmethod
def pulsarStreamlet(service_url, topic_name, stage_name=None, parallelism=None,
receive_timeout_ms=None, input_schema=None):
if service_url is None:
raise RuntimeError("Pulsar Service Url cannot be None")
if topic_name is None:
raise RuntimeError("Pulsar Topic Name cannot be None")
return PulsarStreamlet(service_url, topic_name, stage_name=stage_name,
parallelism=parallelism, receive_timeout_ms=receive_timeout_ms,
input_schema=input_schema)
def _calculate_stage_name(self, existing_stage_names):
index = 1
stage_name = "pulsarspout-" + self._pulsar_topic_name
while stage_name in existing_stage_names:
index = index + 1
stage_name = "pulsarspout-" + self._pulsar_topic_name + str(index)
return stage_name
def _build_this(self, bldr):
config = {PulsarSpout.serviceUrl : self._pulsar_service_url,
PulsarSpout.topicName : self._pulsar_topic_name}
if self._pulsar_receive_timeout_ms is not None:
config.update({PulsarSpout.receiveTimeoutMs : self._pulsar_receive_timeout_ms})
if self._pulsar_input_schema is not None:
config.update({PulsarSpout.deserializer : self._pulsar_input_schema})
bldr.add_spout(self._stage_name, PulsarSpout, par=self._parallelism, config=config)
| twitter/heron | heronpy/connectors/pulsar/pulsarstreamlet.py | Python | apache-2.0 | 3,080 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.