code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1
value | license stringclasses 15
values | size int64 3 1.05M |
|---|---|---|---|---|---|
import os
from fabric.api import run, sudo, cd, env, task, settings
from fabric.contrib.files import upload_template
from ..literals import OS_UBUNTU, OS_FEDORA, OS_DEBIAN
def install_site():
"""
Install Mayan EDMS's site file in Apache configuration
"""
# TODO: configurable site name
if env.os in [OS_UBUNTU, OS_DEBIAN]:
upload_template(filename=os.path.join('fabfile', 'templates', 'apache_site'), destination='/etc/apache2/sites-available/mayan', context=env, use_sudo=True)
sudo('a2ensite mayan')
elif env.os == OS_FEDORA:
upload_template(filename=os.path.join('fabfile', 'templates', 'apache_site'), destination='/etc/httpd/conf.d/mayan.conf', context=env, use_sudo=True)
def remove_site():
"""
Install Mayan EDMS's site file from Apache's configuration
"""
if env.os in [OS_UBUNTU, OS_DEBIAN]:
with settings(warn_only=True):
sudo('a2dissite mayan')
elif env.os == OS_FEDORA:
with settings(warn_only=True):
sudo('rm /etc/httpd/conf.d/mayan.conf')
def restart():
"""
Restart Apache
"""
if env.os in [OS_UBUNTU, OS_DEBIAN]:
sudo('/etc/init.d/apache2 restart')
elif env.os == OS_FEDORA:
sudo('systemctl restart httpd.service')
def reload():
"""
Reload Apache configuration files
"""
if env.os in [OS_UBUNTU, OS_DEBIAN]:
sudo('/etc/init.d/apache2 reload')
elif env.os == OS_FEDORA:
sudo('systemctl reload httpd.service')
| appsembler/mayan_appsembler | fabfile/webservers/apache.py | Python | gpl-3.0 | 1,515 |
from builtins import next
from builtins import object
import mock
import unittest
try:
from bugwarrior.services.mplan import MegaplanService
except SyntaxError:
raise unittest.SkipTest(
'Upstream python-megaplan does not support python3 yet.')
from .base import ServiceTest, AbstractServiceTest
class FakeMegaplanClient(object):
def __init__(self, record):
self.record = record
def get_actual_tasks(self):
return [self.record]
class TestMegaplanIssue(AbstractServiceTest, ServiceTest):
SERVICE_CONFIG = {
'megaplan.hostname': 'something',
'megaplan.login': 'something_else',
'megaplan.password': 'aljlkj',
}
name_parts = ['one', 'two', 'three']
arbitrary_issue = {
'Id': 10,
'Name': '|'.join(name_parts)
}
def setUp(self):
super(TestMegaplanIssue, self).setUp()
with mock.patch('megaplan.Client'):
self.service = self.get_mock_service(MegaplanService)
def get_mock_service(self, *args, **kwargs):
service = super(TestMegaplanIssue, self).get_mock_service(
*args, **kwargs)
service.client = FakeMegaplanClient(self.arbitrary_issue)
return service
def test_to_taskwarrior(self):
arbitrary_project = 'one'
arbitrary_url = 'http://one.com/'
issue = self.service.get_issue_for_record(self.arbitrary_issue)
expected_output = {
'project': arbitrary_project,
'priority': self.service.default_priority,
issue.FOREIGN_ID: self.arbitrary_issue['Id'],
issue.URL: arbitrary_url,
issue.TITLE: self.name_parts[-1]
}
def get_url(*args):
return arbitrary_url
def get_project(*args):
return arbitrary_project
with mock.patch.multiple(
issue, get_project=mock.DEFAULT, get_issue_url=mock.DEFAULT
) as mocked:
mocked['get_project'].side_effect = get_project
mocked['get_issue_url'].side_effect = get_url
actual_output = issue.to_taskwarrior()
self.assertEqual(actual_output, expected_output)
def test_issues(self):
issue = next(self.service.issues())
expected = {
'description':
'(bw)Is#10 - three .. https://something/task/10/card/',
'megaplanid': 10,
'megaplantitle': 'three',
'megaplanurl': 'https://something/task/10/card/',
'priority': 'M',
'project': 'something',
'tags': []}
self.assertEqual(issue.get_taskwarrior_record(), expected)
| lyarwood/bugwarrior | tests/test_megaplan.py | Python | gpl-3.0 | 2,656 |
"""
Line plotting functions, draw boundary and gridlines.
"""
from numpy import arange
from matplotlib.lines import Line2D
from .helpers import project_point
## Lines ##
def line(ax, p1, p2, permutation=None, **kwargs):
"""
Draws a line on `ax` from p1 to p2.
Parameters
----------
ax: Matplotlib AxesSubplot, None
The subplot to draw on.
p1: 2-tuple
The (x,y) starting coordinates
p2: 2-tuple
The (x,y) ending coordinates
kwargs:
Any kwargs to pass through to Matplotlib.
"""
pp1 = project_point(p1, permutation=permutation)
pp2 = project_point(p2, permutation=permutation)
ax.add_line(Line2D((pp1[0], pp2[0]), (pp1[1], pp2[1]), **kwargs))
def horizontal_line(ax, scale, i, **kwargs):
"""
Draws the i-th horizontal line parallel to the lower axis.
Parameters
----------
ax: Matplotlib AxesSubplot
The subplot to draw on.
scale: float, 1.0
Simplex scale size.
i: float
The index of the line to draw
kwargs: Dictionary
Any kwargs to pass through to Matplotlib.
"""
p1 = (0, i, scale - i)
p2 = (scale - i, i, 0)
line(ax, p1, p2, **kwargs)
def left_parallel_line(ax, scale, i, **kwargs):
"""
Draws the i-th line parallel to the left axis.
Parameters
----------
ax: Matplotlib AxesSubplot
The subplot to draw on.
scale: float
Simplex scale size.
i: float
The index of the line to draw
kwargs: Dictionary
Any kwargs to pass through to Matplotlib.
"""
p1 = (i, scale - i, 0)
p2 = (i, 0, scale - i)
line(ax, p1, p2, **kwargs)
def right_parallel_line(ax, scale, i, **kwargs):
"""
Draws the i-th line parallel to the right axis.
Parameters
----------
ax: Matplotlib AxesSubplot
The subplot to draw on.
scale: float
Simplex scale size.
i: float
The index of the line to draw
kwargs: Dictionary
Any kwargs to pass through to Matplotlib.
"""
p1 = (0, scale - i, i)
p2 = (scale - i, 0, i)
line(ax, p1, p2, **kwargs)
## Boundary, Gridlines ##
def boundary(ax, scale, axes_colors=None, **kwargs):
"""
Plots the boundary of the simplex. Creates and returns matplotlib axis if
none given.
Parameters
----------
ax: Matplotlib AxesSubplot, None
The subplot to draw on.
scale: float
Simplex scale size.
kwargs:
Any kwargs to pass through to matplotlib.
axes_colors: dict
Option for coloring boundaries different colors.
e.g. {'l': 'g'} for coloring the left axis boundary green
"""
# Set default color as black.
if axes_colors is None:
axes_colors = dict()
for _axis in ['l', 'r', 'b']:
if _axis not in axes_colors.keys():
axes_colors[_axis] = 'black'
horizontal_line(ax, scale, 0, color=axes_colors['b'], **kwargs)
left_parallel_line(ax, scale, 0, color=axes_colors['l'], **kwargs)
right_parallel_line(ax, scale, 0, color=axes_colors['r'], **kwargs)
return ax
def merge_dicts(base, updates):
"""
Given two dicts, merge them into a new dict as a shallow copy.
Parameters
----------
base: dict
The base dictionary.
updates: dict
Secondary dictionary whose values override the base.
"""
if not base:
base = dict()
if not updates:
updates = dict()
z = base.copy()
z.update(updates)
return z
def gridlines(ax, scale, multiple=None, horizontal_kwargs=None,
left_kwargs=None, right_kwargs=None, **kwargs):
"""
Plots grid lines excluding boundary.
Parameters
----------
ax: Matplotlib AxesSubplot, None
The subplot to draw on.
scale: float
Simplex scale size.
multiple: float, None
Specifies which inner gridelines to draw. For example, if scale=30 and
multiple=6, only 5 inner gridlines will be drawn.
horizontal_kwargs: dict, None
Any kwargs to pass through to matplotlib for horizontal gridlines
left_kwargs: dict, None
Any kwargs to pass through to matplotlib for left parallel gridlines
right_kwargs: dict, None
Any kwargs to pass through to matplotlib for right parallel gridlines
kwargs:
Any kwargs to pass through to matplotlib, if not using
horizontal_kwargs, left_kwargs, or right_kwargs
"""
if 'linewidth' not in kwargs:
kwargs["linewidth"] = 0.5
if 'linestyle' not in kwargs:
kwargs["linestyle"] = ':'
horizontal_kwargs = merge_dicts(kwargs, horizontal_kwargs)
left_kwargs = merge_dicts(kwargs, left_kwargs)
right_kwargs = merge_dicts(kwargs, right_kwargs)
if not multiple:
multiple = 1.
## Draw grid-lines
# Parallel to horizontal axis
for i in arange(0, scale, multiple):
horizontal_line(ax, scale, i, **horizontal_kwargs)
# Parallel to left and right axes
for i in arange(0, scale + multiple, multiple):
left_parallel_line(ax, scale, i, **left_kwargs)
right_parallel_line(ax, scale, i, **right_kwargs)
return ax
def normalize_tick_formats(tick_formats):
if type(tick_formats) == dict:
return tick_formats
if tick_formats is None:
s = '%d'
elif type(tick_formats) == str:
s = tick_formats
else:
raise TypeError("tick_formats must be a dictionary of strings"
" a string, or None.")
return {'b': s, 'l': s, 'r': s}
def ticks(ax, scale, ticks=None, locations=None, multiple=1, axis='b',
offset=0.01, clockwise=False, axes_colors=None, fontsize=10,
tick_formats=None, **kwargs):
"""
Sets tick marks and labels.
Parameters
----------
ax: Matplotlib AxesSubplot, None
The subplot to draw on.
scale: float, 1.0
Simplex scale size.
ticks: list of strings, None
The tick labels
locations: list of points, None
The locations of the ticks
multiple: float, None
Specifies which ticks gridelines to draw. For example, if scale=30 and
multiple=6, only 5 ticks will be drawn.
axis: str, 'b'
The axis or axes to draw the ticks for. `axis` must be a substring of
'lrb' (as sets)
offset: float, 0.01
controls the length of the ticks
clockwise: bool, False
Draw ticks marks clockwise or counterclockwise
axes_colors: Dict, None
Option to color ticks differently for each axis, 'l', 'r', 'b'
e.g. {'l': 'g', 'r':'b', 'b': 'y'}
tick_formats: None, Dict, Str
If None, all axes will be labelled with ints. If Dict, the keys are
'b', 'l' and 'r' and the values are format strings e.g. "%.3f" for
a float with 3 decimal places or "%.3e" for scientific format with
3 decimal places or "%d" for ints. If tick_formats is a string, it
is assumed that this is a format string to be applied to all axes.
kwargs:
Any kwargs to pass through to matplotlib.
"""
axis = axis.lower()
valid_axis_chars = set(['l', 'r', 'b'])
axis_chars = set(axis)
if not axis_chars.issubset(valid_axis_chars):
raise ValueError("axis must be some combination of 'l', 'r', and 'b'")
if ticks and not locations:
num_ticks = len(ticks)
if num_ticks != 0:
multiple = scale / (num_ticks - 1)
locations = arange(0, scale + multiple, multiple)
if not ticks:
locations = arange(0, scale + multiple, multiple)
ticks = locations
tick_formats = normalize_tick_formats(tick_formats)
# Default color: black
if axes_colors is None:
axes_colors = dict()
for _axis in valid_axis_chars:
if _axis not in axes_colors:
axes_colors[_axis] = 'black'
offset *= scale
if 'r' in axis:
for index, i in enumerate(locations):
loc1 = (scale - i, i, 0)
if clockwise:
# Right parallel
loc2 = (scale - i, i + offset, 0)
text_location = (scale - i, i + 2 * offset, 0)
tick = ticks[-(index+1)]
else:
# Horizontal
loc2 = (scale - i + offset, i, 0)
text_location = (scale - i + 3.1 * offset, i - 0.5 * offset, 0)
tick = ticks[index]
line(ax, loc1, loc2, color=axes_colors['r'], **kwargs)
x, y = project_point(text_location)
if isinstance(tick, str):
s = tick
else:
s = tick_formats['r'] % tick
ax.text(x, y, s, horizontalalignment="center",
color=axes_colors['r'], fontsize=fontsize)
if 'l' in axis:
for index, i in enumerate(locations):
loc1 = (0, i, 0)
if clockwise:
# Horizontal
loc2 = (-offset, i, 0)
text_location = (-2 * offset, i - 0.5 * offset, 0)
tick = ticks[index]
else:
# Right parallel
loc2 = (-offset, i + offset, 0)
text_location = (-2 * offset, i + 1.5 * offset, 0)
tick = ticks[-(index+1)]
line(ax, loc1, loc2, color=axes_colors['l'], **kwargs)
x, y = project_point(text_location)
if isinstance(tick, str):
s = tick
else:
s = tick_formats['l'] % tick
ax.text(x, y, s, horizontalalignment="center",
color=axes_colors['l'], fontsize=fontsize)
if 'b' in axis:
for index, i in enumerate(locations):
loc1 = (i, 0, 0)
if clockwise:
# Right parallel
loc2 = (i + offset, -offset, 0)
text_location = (i + 3 * offset, -3.5 * offset, 0)
tick = ticks[-(index+1)]
else:
# Left parallel
loc2 = (i, -offset, 0)
text_location = (i + 0.5 * offset, -3.5 * offset, 0)
tick = ticks[index]
line(ax, loc1, loc2, color=axes_colors['b'], **kwargs)
x, y = project_point(text_location)
if isinstance(tick, str):
s = tick
else:
s = tick_formats['b'] % tick
ax.text(x, y, s, horizontalalignment="center",
color=axes_colors['b'], fontsize=fontsize)
| marcharper/python-ternary | ternary/lines.py | Python | mit | 10,552 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Isaku Yamahata <yamahata at private email ne jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import exc as sa_exc
from sqlalchemy import func
from sqlalchemy.orm import exc as orm_exc
from quantum.common import exceptions as q_exc
import quantum.db.api as db
from quantum.db import models_v2
from quantum.openstack.common import log as logging
from quantum.plugins.ryu.db import models_v2 as ryu_models_v2
LOG = logging.getLogger(__name__)
def set_ofp_servers(hosts):
session = db.get_session()
session.query(ryu_models_v2.OFPServer).delete()
for (host_address, host_type) in hosts:
host = ryu_models_v2.OFPServer(address=host_address,
host_type=host_type)
session.add(host)
session.flush()
def network_all_tenant_list():
session = db.get_session()
return session.query(models_v2.Network).all()
class TunnelKey(object):
# VLAN: 12 bits
# GRE, VXLAN: 24bits
# TODO(yamahata): STT: 64bits
_KEY_MIN_HARD = 1
_KEY_MAX_HARD = 0xffffffff
def __init__(self, key_min=_KEY_MIN_HARD, key_max=_KEY_MAX_HARD):
self.key_min = key_min
self.key_max = key_max
if (key_min < self._KEY_MIN_HARD or key_max > self._KEY_MAX_HARD or
key_min > key_max):
raise ValueError(_('Invalid tunnel key options '
'tunnel_key_min: %(key_min)d '
'tunnel_key_max: %(key_max)d. '
'Using default value') % {'key_min': key_min,
'key_max': key_max})
def _last_key(self, session):
try:
return session.query(ryu_models_v2.TunnelKeyLast).one()
except orm_exc.MultipleResultsFound:
max_key = session.query(
func.max(ryu_models_v2.TunnelKeyLast.last_key))
if max_key > self.key_max:
max_key = self.key_min
session.query(ryu_models_v2.TunnelKeyLast).delete()
last_key = ryu_models_v2.TunnelKeyLast(last_key=max_key)
except orm_exc.NoResultFound:
last_key = ryu_models_v2.TunnelKeyLast(last_key=self.key_min)
session.add(last_key)
session.flush()
return session.query(ryu_models_v2.TunnelKeyLast).one()
def _find_key(self, session, last_key):
"""
Try to find unused tunnel key in TunnelKey table starting
from last_key + 1.
When all keys are used, raise sqlalchemy.orm.exc.NoResultFound
"""
# key 0 is used for special meanings. So don't allocate 0.
# sqlite doesn't support
# '(select order by limit) union all (select order by limit) '
# 'order by limit'
# So do it manually
# new_key = session.query("new_key").from_statement(
# # If last_key + 1 isn't used, it's the result
# 'SELECT new_key '
# 'FROM (SELECT :last_key + 1 AS new_key) q1 '
# 'WHERE NOT EXISTS '
# '(SELECT 1 FROM tunnelkeys WHERE tunnel_key = :last_key + 1) '
#
# 'UNION ALL '
#
# # if last_key + 1 used,
# # find the least unused key from last_key + 1
# '(SELECT t.tunnel_key + 1 AS new_key '
# 'FROM tunnelkeys t '
# 'WHERE NOT EXISTS '
# '(SELECT 1 FROM tunnelkeys ti '
# ' WHERE ti.tunnel_key = t.tunnel_key + 1) '
# 'AND t.tunnel_key >= :last_key '
# 'ORDER BY new_key LIMIT 1) '
#
# 'ORDER BY new_key LIMIT 1'
# ).params(last_key=last_key).one()
try:
new_key = session.query("new_key").from_statement(
# If last_key + 1 isn't used, it's the result
'SELECT new_key '
'FROM (SELECT :last_key + 1 AS new_key) q1 '
'WHERE NOT EXISTS '
'(SELECT 1 FROM tunnelkeys WHERE tunnel_key = :last_key + 1) '
).params(last_key=last_key).one()
except orm_exc.NoResultFound:
new_key = session.query("new_key").from_statement(
# if last_key + 1 used,
# find the least unused key from last_key + 1
'(SELECT t.tunnel_key + 1 AS new_key '
'FROM tunnelkeys t '
'WHERE NOT EXISTS '
'(SELECT 1 FROM tunnelkeys ti '
' WHERE ti.tunnel_key = t.tunnel_key + 1) '
'AND t.tunnel_key >= :last_key '
'ORDER BY new_key LIMIT 1) '
).params(last_key=last_key).one()
new_key = new_key[0] # the result is tuple.
LOG.debug(_("last_key %(last_key)s new_key %(new_key)s") %
{"last_key": last_key, "new_key": new_key})
if new_key > self.key_max:
LOG.debug(_("no key found"))
raise orm_exc.NoResultFound()
return new_key
def _allocate(self, session, network_id):
last_key = self._last_key(session)
try:
new_key = self._find_key(session, last_key.last_key)
except orm_exc.NoResultFound:
new_key = self._find_key(session, self.key_min)
tunnel_key = ryu_models_v2.TunnelKey(network_id=network_id,
tunnel_key=new_key)
last_key.last_key = new_key
session.add(tunnel_key)
return new_key
_TRANSACTION_RETRY_MAX = 16
def allocate(self, session, network_id):
count = 0
while True:
session.begin(subtransactions=True)
try:
new_key = self._allocate(session, network_id)
session.commit()
break
except sa_exc.SQLAlchemyError:
session.rollback()
count += 1
if count > self._TRANSACTION_RETRY_MAX:
# if this happens too often, increase _TRANSACTION_RETRY_MAX
LOG.warn(_("Transaction retry reaches to %d. "
"abandan to allocate tunnel key."), count)
raise q_exc.ResourceExhausted()
return new_key
def delete(self, session, network_id):
session.query(ryu_models_v2.TunnelKey).filter_by(
network_id=network_id).delete()
session.flush()
def all_list(self):
session = db.get_session()
return session.query(ryu_models_v2.TunnelKey).all()
def set_port_status(session, port_id, status):
try:
port = session.query(models_v2.Port).filter_by(id=port_id).one()
port['status'] = status
session.merge(port)
session.flush()
except orm_exc.NoResultFound:
raise q_exc.PortNotFound(port_id=port_id, net_id=None)
| aristanetworks/arista-ovs-quantum | quantum/plugins/ryu/db/api_v2.py | Python | apache-2.0 | 7,427 |
#!/usr/bin/env python3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is generated, do not edit. Update BuildConfigGenerator.groovy and
# 3ppFetch.template instead.
import argparse
import json
import os
import re
import urllib.request
_REPO_URL = 'https://repo.maven.apache.org/maven2'
_GROUP_NAME = 'org/robolectric'
_MODULE_NAME = 'shadows-multidex'
_FILE_EXT = 'jar'
_OVERRIDE_LATEST = None
_PATCH_VERSION = 'cr1'
def do_latest():
if _OVERRIDE_LATEST is not None:
print(_OVERRIDE_LATEST + f'.{_PATCH_VERSION}')
return
maven_metadata_url = '{}/{}/{}/maven-metadata.xml'.format(
_REPO_URL, _GROUP_NAME, _MODULE_NAME)
metadata = urllib.request.urlopen(maven_metadata_url).read().decode(
'utf-8')
# Do not parse xml with the python included parser since it is susceptible
# to maliciously crafted xmls. Only use regular expression parsing to be
# safe. RE should be enough to handle what we need to extract.
match = re.search('<latest>([^<]+)</latest>', metadata)
if match:
latest = match.group(1)
else:
# if no latest info was found just hope the versions are sorted and the
# last one is the latest (as is commonly the case).
latest = re.findall('<version>([^<]+)</version>', metadata)[-1]
print(latest + f'.{_PATCH_VERSION}')
def get_download_url(version):
# Remove the patch version when getting the download url
version_no_patch, patch = version.rsplit('.', 1)
if patch.startswith('cr'):
version = version_no_patch
file_url = '{0}/{1}/{2}/{3}/{2}-{3}.{4}'.format(_REPO_URL, _GROUP_NAME,
_MODULE_NAME, version,
_FILE_EXT)
file_name = file_url.rsplit('/', 1)[-1]
partial_manifest = {
'url': [file_url],
'name': [file_name],
'ext': '.' + _FILE_EXT,
}
print(json.dumps(partial_manifest))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
latest = sub.add_parser('latest')
latest.set_defaults(func=lambda _opts: do_latest())
download = sub.add_parser('get_url')
download.set_defaults(
func=lambda _opts: get_download_url(os.environ['_3PP_VERSION']))
opts = ap.parse_args()
opts.func(opts)
if __name__ == '__main__':
main()
| chromium/chromium | third_party/android_deps/libs/org_robolectric_shadows_multidex/3pp/fetch.py | Python | bsd-3-clause | 2,494 |
##
# Copyright (c) 2007-2013 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from caldavclientlibrary.admin.xmlaccounts.commands.command import Command
import getopt
class ChangePassword(Command):
"""
Command to change the password of an existing directory record.
"""
CMDNAME = "passwd"
def __init__(self):
super(ChangePassword, self).__init__(self.CMDNAME, "Change the password for a record.")
self.uid = None
def usage(self):
print """USAGE: %s TYPE [OPTIONS]
TYPE: One of "users", "groups", "locations" or "resources". Also,
"u", "g", "l" or "r" as shortcuts.
Options:
-f file path to accounts.xml
--uid UID of record to change
""" % (self.cmdname,)
def execute(self, argv):
"""
Execute the command specified by the command line arguments.
@param argv: command line arguments.
@type argv: C{list}
@return: 1 for success, 0 for failure.
@rtype: C{int}
"""
# Check first argument for type
argv = self.getTypeArgument(argv)
if argv is None:
return 0
opts, args = getopt.getopt(argv, 'f:h', ["help", "uid=", ])
for name, value in opts:
if name == "-f":
self.path = value
elif name in ("-h", "--help"):
self.usage()
return 1
elif name == "--uid":
self.uid = value
else:
print "Unknown option: %s." % (name,)
self.usage()
return 0
if not self.path:
print "Must specify a path."
self.usage()
return 0
if not self.uid:
print "Must specify a UID."
self.usage()
return 0
if args:
print "Arguments not allowed."
self.usage()
return 0
if not self.loadAccounts():
return 0
return self.doCommand()
def doCommand(self):
"""
Run the command.
"""
if self.doChangePassword():
return self.writeAccounts()
return 0
def doChangePassword(self):
"""
Prompts the user for details and then changes the password of a record in the directory.
"""
# First check record exists
record = self.directory.getRecord(self.recordType, self.uid)
if record is None:
print "No '%s' record matching uid '%s'" % (self.recordType, self.uid,)
return 0
record.password = self.promptPassword()
return 1
| skarra/CalDAVClientLibrary | caldavclientlibrary/admin/xmlaccounts/commands/changepassword.py | Python | apache-2.0 | 3,153 |
import webapp2
import jinja2
import os
from google.appengine.api import users,mail
from google.appengine.ext import ndb
from google.appengine.api import urlfetch
from webapp2_extras import sessions
from time import sleep
import json
import logging
import model
import time
import collections
DEBUG = False
if os.environ.get('SERVER_SOFTWARE','').startswith('Development'):
DEBUG = True
config = {}
config['webapp2_extras.sessions'] = {
'secret_key': 'prahji',
}
JINJA_ENVIRONMENT = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),extensions=['jinja2.ext.autoescape'])
#Lets create a universal session handler
class BaseHandler(webapp2.RequestHandler):
def get_current_user(self):
if self.session.has_key('user'):
return self.session['user']
else:
return ''
def dispatch(self):
# Get a session store for this request.
self.session_store = sessions.get_store(request=self.request)
try:
# Dispatch the request.
webapp2.RequestHandler.dispatch(self)
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
@webapp2.cached_property
def session(self):
# Returns a session using the default cookie key.
sess = self.session_store.get_session()
#add some default values:
if not sess.get('theme'):
sess['theme']='bootstrap'#'slate'
return sess
class Home(BaseHandler):
def get(self):
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render())
#self.response.write('hello world')
class JsonRefresh(BaseHandler):
def get(self):
#fetch top 50 from ndb store
#self.response.write('working')
#logging.debug('received request')
#return
results = model.Result.query().order(-model.Result.timestamp).fetch(40)
#logging.debug('I have: '+str(len(results)))
#logging.debug('proxy object')
obj = [collections.OrderedDict(p.to_dict()) for p in results]
#logging.debug(obj)
#logging.debug(type(obj))
logging.debug('now calling encoder.')
#sleep(2)
robj= json.dumps(obj, cls=model.MyJsonEncoder)
#logging.debug(robj)
self.response.write(robj)
#return in json
class JsonShowDetails(BaseHandler):
def get(self):
stime = self.request.get('timestamp')
#fetch time from datastore
sleep(3) #time in seconds
#return details
li = ['here is the key']
self.response.write(json.dumps(li));
class JsonDoTest(BaseHandler):
def get(self):
#host = self.request.get('provider')
#lang = self.request.get('language')
#for k,v in self.request.GET.iteritems():
#for k,v in self.request.params.iteritems():
#Presently, there will be only one result.
#I might consider multiple in future.
results={}
for k,v in self.request.params.iteritems():
#s+= k + ":" + v
#logging.debug('The key is:'+k)
#logging.debug('The val is:'+v)
#s = v.split('->')
#host = s[0]
#lang = s[1]
#cat = s[2]
host= v
logging.debug('host:'+host)
#part=''
#if (lang=='php'):
# part='/benchmark.php?type=' + cat
#elif (lang=='python'):
# part='/python/benchmark.py'
#url = model.hosturls[host] + part
url = model.hosturls[host]
logging.debug('the url:'+url)
logging.debug('now calling urlfetch')
#start the test
micros = time.time()
result = urlfetch.fetch(url, deadline=60)
micros = time.time() - micros
millis = int(round(micros * 1000))
#secs = millis*1000
#req_per_sec = 1/millis
logging.debug('result received: ' + str(len(result.content)))
#logging.debug('before: ' + result.content)
if result.status_code == 200 and len(result.content)>108000:
#pass
#try:
obj = json.loads(result.content, object_pairs_hook=collections.OrderedDict)
#logging.debug(obj)
cnt = len(obj['payload'])
results[host] = collections.OrderedDict({})
#results[host] = {}
results[host]['host'] = host
results[host]['time_taken_millis'] = millis
results[host].update( obj)
results[host]['payload'] = str(cnt) + ' bytes' #convert payload to its length
#model.Result.save(results[host])
r = model.Result.save(results[host])
js=json.dumps( r.to_dict(), cls=model.MyJsonEncoder)
#js=json.dumps(results[host], cls=model.MyJsonEncoder)
#except:
# logging.debug('error occured')
# results[host] = 0
else:
logging.debug('Request failed')
results[host] = 'Request Failed'
js=json.dumps( results)
#logging.debug(js)
self.response.write(js)
class MailHandler(BaseHandler):
def get(self, address):
self.response.write('Handler is working fine')
def post(self, address):
logging.debug('START_EMAIL_ARRIVED for ' + address)
#for k,v in self.request.params.iteritems():
# logging.debug(k + '::' + V)
logging.debug('END_EMAIL_ARRIVED')
application = webapp2.WSGIApplication([
('/', Home),
(r'/_ah/mail/(\w+)@hostmetric.appspotmail.com', MailHandler), #(\d+)
('/json_show_details', JsonShowDetails),
('/json_do_test', JsonDoTest),
('/json_refresh', JsonRefresh),
], debug=True,config=config)
| prahladyeri/benchthemall | client/gae/main.py | Python | mit | 6,012 |
"""
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This file serves two main purposes
- it serves up the main html page
- and it provides a simple set of apis to the javascript
"""
import httplib2
import time
import jinja2
import json
import logging
import os
import re
import socket
import webapp2
socket.setdefaulttimeout(60)
http = httplib2.Http(timeout=60)
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
autoescape=True,
extensions=['jinja2.ext.autoescape'])
# TODO: Dataset information should come from the list datasets api call
# But that call is not in the GA4GH API yet
SUPPORTED_BACKENDS = {
'Ensembl' : {'name': 'Ensembl',
'url': 'http://193.62.52.232:8081/%s?%s',
'datasets': {'1000 genomes pilot 2': '2'}}
}
# Google requires a valid API key. If the file 'google_api_key.txt' exists
# then the Google API will be enabled.
google_api_key_file = os.path.join(os.path.dirname(__file__), 'google_api_key.txt')
if os.path.isfile(google_api_key_file):
with open(google_api_key_file, 'r') as file:
api_key = file.readline().strip()
SUPPORTED_BACKENDS['GOOGLE'] = {
'name': 'Google',
'url': 'https://www.googleapis.com/genomics/v1beta2/%s?key='
+ api_key + '&%s',
'supportsPartialResponse': True,
'datasets': {'1000 Genomes': '10473108253681171589',
'Platinum Genomes': '3049512673186936334',
'DREAM SMC Challenge': '337315832689',
'PGP': '383928317087',
'Simons Foundation' : '461916304629'}
}
class ApiException(Exception):
pass
# Request handlers
class BaseRequestHandler(webapp2.RequestHandler):
def handle_exception(self, exception, debug_mode):
if isinstance(exception, ApiException):
# ApiExceptions are expected, and will return nice error
# messages to the client
self.response.write(exception.message)
self.response.set_status(400)
else:
# All other exceptions are unexpected and should be logged
logging.exception('Unexpected exception')
self.response.write('Unexpected internal exception')
self.response.set_status(500)
def get_backend(self):
backend = self.request.get('backend')
if not backend:
raise ApiException('Backend parameter must be set')
return backend
def supports_name_filter(self):
return SUPPORTED_BACKENDS[self.get_backend()].has_key('supportsNameFilter')
def supports_partial_response(self):
return SUPPORTED_BACKENDS[self.get_backend()]\
.has_key('supportsPartialResponse')
def get_base_api_url(self):
return SUPPORTED_BACKENDS[self.get_backend()]['url']
def get_content(self, path, method='POST', body=None, params=''):
uri= self.get_base_api_url() % (path, params)
startTime = time.clock()
response, content = http.request(
uri,
method=method, body=json.dumps(body) if body else None,
headers={'Content-Type': 'application/json; charset=UTF-8'})
contentLen = len(content)
try:
content = json.loads(content)
except ValueError:
logging.error('while requesting {}'.format(uri))
logging.error('non-json api content %s' % content[:1000])
raise ApiException('The API returned invalid JSON')
if response.status >= 300:
logging.error('error api response %s' % response)
logging.error('error api content %s' % content)
if 'error' in content:
raise ApiException(content['error']['message'])
else:
raise ApiException('Something went wrong with the API call!')
logging.info('get_content {}: {}kb {}s'
.format(uri, contentLen/1024, time.clock() - startTime))
return content
def write_response(self, content):
self.response.headers['Content-Type'] = "application/json"
self.response.write(json.dumps(content))
def write_content(self, path, method='POST', body=None, params=''):
self.write_response(self.get_content(path, method, body, params))
class SetSearchHandler(BaseRequestHandler):
def write_read_group_sets(self, dataset_id, name):
self.write_content('readgroupsets/search',
body={'datasetIds': [dataset_id], 'name': name},
params='fields=readGroupSets(id,name)')
def write_call_sets(self, dataset_id, name):
variant_sets = self.get_content('variantsets/search',
body={ 'datasetIds' : [dataset_id]})
variant_set_id = variant_sets['variantSets'][0]['id']
self.write_content('callsets/search',
body={'variantSetIds': [variant_set_id], 'name': name},
params='fields=callSets(id,name)')
def write_read_group_set(self, set_id):
set = self.get_content('readgroupsets/%s' % set_id, method='GET')
# For read group sets, we also load up the reference set data
reference_set_id = set.get('referenceSetId') or \
set['readGroups'][0].get('referenceSetId')
if not reference_set_id:
# TODO: Get coverage API added to GA4GH
buckets = self.get_content('readgroupsets/%s/coveragebuckets' % set_id,
method='GET')
set['references'] = [{'name': b['range']['referenceName'],
'length': b['range']['end']}
for b in buckets['coverageBuckets']]
else:
# TODO: Get search by refSetId added to GA4GH
references = self.get_content('references/search',
body={'referenceSetId': [reference_set_id]},
params='fields=references(name,length)')
set['references'] = references['references']
self.response.write(json.dumps(set))
def write_call_set(self, set_id):
set = self.get_content('callsets/%s' % set_id, method='GET')
# For call sets, we also load up the variant set data to get
# the available reference names and lengths
variant_set_id = set['variantSetIds'][0]
variant_set = self.get_content('variantsets/%s' % variant_set_id,
method="GET")
# TODO: Get variantset.refSetId added to GA4GH
set['references'] = [{'name': b['referenceName'],
'length': b['upperBound']}
for b in variant_set['referenceBounds']]
self.response.write(json.dumps(set))
def get(self):
use_callsets = self.request.get('setType') == 'CALLSET'
set_id = self.request.get('setId')
if set_id:
if use_callsets:
self.write_call_set(set_id)
else:
self.write_read_group_set(set_id)
else:
dataset_id = self.request.get('datasetId')
name = self.request.get('name')
try:
if use_callsets:
self.write_call_sets(dataset_id, name)
else:
self.write_read_group_sets(dataset_id, name)
except:
self.response.write('{}')
class ReadSearchHandler(BaseRequestHandler):
def get(self):
body = {
'readGroupSetIds': self.request.get('setIds').split(','),
'referenceName': self.request.get('sequenceName'),
'start': max(0, int(self.request.get('sequenceStart'))),
'end': int(self.request.get('sequenceEnd')),
}
readFields = self.request.get('readFields')
params = ''
if readFields and self.supports_partial_response():
params = 'fields=nextPageToken,alignments(%s)' % readFields
body['pageSize'] = 1024
pageToken = self.request.get('pageToken')
if pageToken:
body['pageToken'] = pageToken
content = self.get_content('reads/search', body=body, params=params)
# Emulate support for partial responses by supplying only the
# requested fields to the client.
if readFields and not self.supports_partial_response():
fields = readFields.split(',')
def filterKeys(dict, keys):
return { key: dict[key] for key in keys }
newReads = [ filterKeys(read, fields) for read in content['alignments']]
content['alignments'] = newReads
self.write_response(content)
class VariantSearchHandler(BaseRequestHandler):
def get(self):
body = {
'callSetIds': self.request.get('setIds').split(','),
'referenceName': self.request.get('sequenceName'),
'start': max(0, int(self.request.get('sequenceStart'))),
'end': int(self.request.get('sequenceEnd')),
'pageSize': 100
}
pageToken = self.request.get('pageToken')
if pageToken:
body['pageToken'] = pageToken
self.write_content('variants/search', body=body)
class BaseSnpediaHandler(webapp2.RequestHandler):
def getSnppediaPageContent(self, snp):
uri = "http://bots.snpedia.com/api.php?action=query&prop=revisions&" \
"format=json&rvprop=content&titles=%s" % snp
response, content = http.request(uri=uri)
page_id, page = json.loads(content)['query']['pages'].popitem()
return page['revisions'][0]['*']
def getContentValue(self, content, key):
try:
matcher = '%s=(.*)\n' % key
return re.search(matcher, content, re.I).group(1)
except (KeyError, AttributeError):
return ''
def complement(self, base):
return {'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G'}[base]
class SnpSearchHandler(BaseSnpediaHandler):
def getSnpResponse(self, name, content):
return {
'name': name,
'link': 'http://www.snpedia.com/index.php/%s' % name,
'position': self.getContentValue(content, 'position'),
'chr': self.getContentValue(content, 'chromosome')
}
def get(self):
snp = self.request.get('snp')
try:
content = self.getSnppediaPageContent(snp)
if snp[:2].lower() == 'rs':
snps = [self.getSnpResponse(snp, content)]
else:
# Try a gene format
snps = re.findall('\[\[(rs\d+?)\]\]', content, re.I)
snps = [self.getSnpResponse(s, self.getSnppediaPageContent(s))
for s in set(snps)]
except (ValueError, KeyError, AttributeError):
snps = []
self.response.write(json.dumps({'snps' : snps}))
class AlleleSearchHandler(BaseSnpediaHandler):
def getAlleleResponse(self, name, content):
return {
'name': name,
'link': 'http://www.snpedia.com/index.php/%s' % name,
'repute': self.getContentValue(content, 'repute'),
'summary': self.getContentValue(content, 'summary') or 'Unknown',
'magnitude': self.getContentValue(content, 'magnitude')
}
def get(self):
snp = self.request.get('snp')
a1 = self.request.get('a1')
a2 = self.request.get('a2')
a1c = self.complement(a1)
a2c = self.complement(a2)
possible_names = [(snp, a1, a2), (snp, a2, a1),
(snp, a1c, a2c), (snp, a2c, a1c)]
for name in possible_names:
try:
page = "%s(%s;%s)" % name
content = self.getSnppediaPageContent(page)
self.response.write(json.dumps(self.getAlleleResponse(page, content)))
return
except (ValueError, KeyError, AttributeError):
pass # Continue trying the next allele name
self.response.write(json.dumps({}))
class MainHandler(webapp2.RequestHandler):
def get(self):
template = JINJA_ENVIRONMENT.get_template('main.html')
self.response.write(template.render({
'backends': SUPPORTED_BACKENDS,
}))
web_app = webapp2.WSGIApplication(
[
('/', MainHandler),
('/api/reads', ReadSearchHandler),
('/api/variants', VariantSearchHandler),
('/api/sets', SetSearchHandler),
('/api/snps', SnpSearchHandler),
('/api/alleles', AlleleSearchHandler),
],
debug=True)
| bcl-io/hmDNA | api-client-python/main.py | Python | artistic-2.0 | 12,222 |
import numpy as np
def dataprep(puf, Stage_I_factors, Stage_II_targets, year):
print("Preparing coefficient matrix for year {} .....".format(year))
s006 = np.where(
puf.e02400 > 0,
puf.s006 * Stage_I_factors[year]["APOPSNR"] / 100,
puf.s006 * Stage_I_factors[year]["ARETS"] / 100,
)
single_return = np.where((puf.mars == 1) & (puf.filer == 1), s006, 0)
joint_return = np.where(
((puf.mars == 2) | (puf.mars == 3)) & (puf.filer == 1), s006, 0
)
hh_return = np.where((puf.mars == 4) & (puf.filer == 1), s006, 0)
return_w_SS = np.where((puf.e02400 > 0) & (puf.filer == 1), s006, 0)
dependent_exempt_num = (puf.xocah + puf.xocawh + puf.xoodep + puf.xopar) * s006
interest = puf.e00300 * s006
dividend = puf.e00600 * s006
biz_income = np.where(puf.e00900 > 0, puf.e00900, 0) * s006
biz_loss = np.where(puf.e00900 < 0, -puf.e00900, 0) * s006
cap_gain = (
np.where((puf.p23250 + puf.p22250) > 0, puf.p23250 + puf.p22250, 0) * s006
)
annuity_pension = puf.e01700 * s006
sch_e_income = np.where(puf.e02000 > 0, puf.e02000, 0) * s006
sch_e_loss = np.where(puf.e02000 < 0, -puf.e02000, 0) * s006
ss_income = np.where(puf.filer == 1, puf.e02400, 0) * s006
unemployment_comp = puf.e02300 * s006
# Wage distribution
wage_1 = np.where(puf.e00100 <= 0, puf.e00200, 0) * s006
wage_2 = np.where((puf.e00100 > 0) & (puf.e00100 <= 10000), puf.e00200, 0) * s006
wage_3 = (
np.where((puf.e00100 > 10000) & (puf.e00100 <= 20000), puf.e00200, 0) * s006
)
wage_4 = (
np.where((puf.e00100 > 20000) & (puf.e00100 <= 30000), puf.e00200, 0) * s006
)
wage_5 = (
np.where((puf.e00100 > 30000) & (puf.e00100 <= 40000), puf.e00200, 0) * s006
)
wage_6 = (
np.where((puf.e00100 > 40000) & (puf.e00100 <= 50000), puf.e00200, 0) * s006
)
wage_7 = (
np.where((puf.e00100 > 50000) & (puf.e00100 <= 75000), puf.e00200, 0) * s006
)
wage_8 = (
np.where((puf.e00100 > 75000) & (puf.e00100 <= 100000), puf.e00200, 0) * s006
)
wage_9 = (
np.where((puf.e00100 > 100000) & (puf.e00100 <= 200000), puf.e00200, 0) * s006
)
wage_10 = (
np.where((puf.e00100 > 200000) & (puf.e00100 <= 500000), puf.e00200, 0) * s006
)
wage_11 = (
np.where((puf.e00100 > 500000) & (puf.e00100 <= 1000000), puf.e00200, 0) * s006
)
wage_12 = np.where(puf.e00100 > 1000000, puf.e00200, 0) * s006
# Set up the matrix
One_half_LHS = np.vstack(
(
single_return,
joint_return,
hh_return,
return_w_SS,
dependent_exempt_num,
interest,
dividend,
biz_income,
biz_loss,
cap_gain,
annuity_pension,
sch_e_income,
sch_e_loss,
ss_income,
unemployment_comp,
wage_1,
wage_2,
wage_3,
wage_4,
wage_5,
wage_6,
wage_7,
wage_8,
wage_9,
wage_10,
wage_11,
wage_12,
)
)
# Coefficients for r and s
A1 = np.array(One_half_LHS)
A2 = np.array(-One_half_LHS)
print("Preparing targets for year {} .....".format(year))
APOPN = Stage_I_factors[year]["APOPN"]
b = []
ystr = "{}".format(year)
b.append(Stage_II_targets[ystr]["Single Returns"] - single_return.sum())
b.append(Stage_II_targets[ystr]["Joint Returns"] - joint_return.sum())
target_name = "Head of Household Returns"
b.append(Stage_II_targets[ystr][target_name] - hh_return.sum())
target_name = "Number of Returns w/ Gross Security Income"
b.append(Stage_II_targets[ystr][target_name] - return_w_SS.sum())
target_name = "Number of Dependent Exemptions"
b.append(Stage_II_targets[ystr][target_name] - dependent_exempt_num.sum())
AINTS = Stage_I_factors[year]["AINTS"]
INTEREST = (
Stage_II_targets[ystr]["Taxable Interest Income"] * APOPN / AINTS * 1000
- interest.sum()
)
ADIVS = Stage_I_factors[year]["ADIVS"]
DIVIDEND = (
Stage_II_targets[ystr]["Ordinary Dividends"] * APOPN / ADIVS * 1000
- dividend.sum()
)
ASCHCI = Stage_I_factors[year]["ASCHCI"]
BIZ_INCOME = (
Stage_II_targets[ystr]["Business Income (Schedule C)"] * APOPN / ASCHCI * 1000
- biz_income.sum()
)
ASCHCL = Stage_I_factors[year]["ASCHCL"]
BIZ_LOSS = (
Stage_II_targets[ystr]["Business Loss (Schedule C)"] * APOPN / ASCHCL * 1000
- biz_loss.sum()
)
ACGNS = Stage_I_factors[year]["ACGNS"]
CAP_GAIN = (
Stage_II_targets[ystr]["Net Capital Gains in AGI"] * APOPN / ACGNS * 1000
- cap_gain.sum()
)
ATXPY = Stage_I_factors[year]["ATXPY"]
target_name = "Taxable Pensions and Annuities"
ANNUITY_PENSION = (
Stage_II_targets[ystr][target_name] * APOPN / ATXPY * 1000
- annuity_pension.sum()
)
ASCHEI = Stage_I_factors[year]["ASCHEI"]
target_name = "Supplemental Income (Schedule E)"
SCH_E_INCOME = (
Stage_II_targets[ystr][target_name] * APOPN / ASCHEI * 1000 - sch_e_income.sum()
)
ASCHEL = Stage_I_factors[year]["ASCHEL"]
SCH_E_LOSS = (
Stage_II_targets[ystr]["Supplemental Loss (Schedule E)"] * APOPN / ASCHEL * 1000
- sch_e_loss.sum()
)
ASOCSEC = Stage_I_factors[year]["ASOCSEC"]
APOPSNR = Stage_I_factors[year]["APOPSNR"]
SS_INCOME = (
Stage_II_targets[ystr]["Gross Social Security Income"]
* APOPSNR
/ ASOCSEC
* 1000
- ss_income.sum()
)
AUCOMP = Stage_I_factors[year]["AUCOMP"]
UNEMPLOYMENT_COMP = (
Stage_II_targets[ystr]["Unemployment Compensation"] * APOPN / AUCOMP * 1000
- unemployment_comp.sum()
)
AWAGE = Stage_I_factors[year]["AWAGE"]
target_name = "Wages and Salaries: Zero or Less"
WAGE_1 = Stage_II_targets[ystr][target_name] * APOPN / AWAGE * 1000 - wage_1.sum()
target_name = "Wages and Salaries: $1 Less Than $10,000"
WAGE_2 = Stage_II_targets[ystr][target_name] * APOPN / AWAGE * 1000 - wage_2.sum()
target_name = "Wages and Salaries: $10,000 Less Than $20,000"
WAGE_3 = Stage_II_targets[ystr][target_name] * APOPN / AWAGE * 1000 - wage_3.sum()
target_name = "Wages and Salaries: $20,000 Less Than $30,000"
WAGE_4 = Stage_II_targets[ystr][target_name] * APOPN / AWAGE * 1000 - wage_4.sum()
target_name = "Wages and Salaries: $30,000 Less Than $40,000"
WAGE_5 = Stage_II_targets[ystr][target_name] * APOPN / AWAGE * 1000 - wage_5.sum()
target_name = "Wages and Salaries: $40,000 Less Than $50,000"
WAGE_6 = Stage_II_targets[ystr][target_name] * APOPN / AWAGE * 1000 - wage_6.sum()
target_name = "Wages and Salaries: $50,000 Less Than $75,000"
WAGE_7 = Stage_II_targets[ystr][target_name] * APOPN / AWAGE * 1000 - wage_7.sum()
target_name = "Wages and Salaries: $75,000 Less Than $100,000"
WAGE_8 = Stage_II_targets[ystr][target_name] * APOPN / AWAGE * 1000 - wage_8.sum()
target_name = "Wages and Salaries: $100,000 Less Than $200,000"
WAGE_9 = Stage_II_targets[ystr][target_name] * APOPN / AWAGE * 1000 - wage_9.sum()
target_name = "Wages and Salaries: $200,000 Less Than $500,000"
WAGE_10 = Stage_II_targets[ystr][target_name] * APOPN / AWAGE * 1000 - wage_10.sum()
target_name = "Wages and Salaries: $500,000 Less Than $1 Million"
WAGE_11 = Stage_II_targets[ystr][target_name] * APOPN / AWAGE * 1000 - wage_11.sum()
target_name = "Wages and Salaries: $1 Million and Over"
WAGE_12 = Stage_II_targets[ystr][target_name] * APOPN / AWAGE * 1000 - wage_12.sum()
temp = [
INTEREST,
DIVIDEND,
BIZ_INCOME,
BIZ_LOSS,
CAP_GAIN,
ANNUITY_PENSION,
SCH_E_INCOME,
SCH_E_LOSS,
SS_INCOME,
UNEMPLOYMENT_COMP,
WAGE_1,
WAGE_2,
WAGE_3,
WAGE_4,
WAGE_5,
WAGE_6,
WAGE_7,
WAGE_8,
WAGE_9,
WAGE_10,
WAGE_11,
WAGE_12,
]
for m in temp:
b.append(m)
# export to .npz file
np.savez(str(str(year) + "_input.npz"), A1=A1, A2=A2, b=b)
| OpenSourcePolicyCenter/taxdata | puf_stage2/dataprep.py | Python | mit | 8,370 |
import driver
import time
from job import Job, AsyncExecute
class PrintObject:
def __init__(self, start_digit: int=0, nbr_digit: int=None):
if nbr_digit is None:
nbr_digit = 4-start_digit
self.nbr_digit = nbr_digit
self.start_digit = start_digit
def need_print(self):
raise NotImplementedError
def print(self, drv: driver.Driver_7Seg):
raise NotImplementedError
class BaseStatic(PrintObject):
def __init__(self, start_digit: int=0, nbr_digit: int=None):
super(BaseStatic, self).__init__(start_digit, nbr_digit)
self.redraw=True
def need_print(self):
return self.redraw
def print(self, drv: driver.Driver_7Seg):
self.redraw = False
class Percentage(BaseStatic):
def __init__(self, max_value: int):
self.max_value = max_value
self.first = True
self.percent = 0
super(Percentage, self).__init__(0, 4)
def print_percent_sign(self, drv):
drv.write_dot(drv.DOT_APOSTROPHE | drv.DOT_DIGIT4)
drv.write_seg(3, drv.SEG_G)
def update(self, i):
self.percent = i * 100.0 / self.max_value
self.redraw = True
def print(self, drv: driver.Driver_7Seg):
if self.first:
self.print_percent_sign(drv)
self.first = False
drv.set_cursor(0)
super(Percentage, self).print(drv)
drv.print('%3d' % self.percent)
class BaseAnim(PrintObject):
def __init__(self, nbr_step: int, step_time: float, start_digit: int=0, nbr_digit: int=None):
super(BaseAnim, self).__init__(start_digit, nbr_digit)
self.nbr_step = nbr_step
self.step = 0
self.step_time = step_time
self.next_update = 0
def need_print(self):
now = time.time()
if self.next_update < now:
self.next_update = now + self.step_time
return True
return False
def print(self, drv: driver.Driver_7Seg):
self.step += 1
self.step %= self.nbr_step
class WheelAnim(BaseAnim):
def __init__(self, digit: int, step_time: float=0.2):
super(WheelAnim, self).__init__(4, step_time, digit, 1)
def print(self, drv: driver.Driver_7Seg):
if self.step == 0:
drv.write_seg(self.start_digit, drv.SEG_A)
elif self.step == 1:
drv.write_seg(self.start_digit, drv.SEG_B)
elif self.step == 2:
drv.write_seg(self.start_digit, drv.SEG_G)
elif self.step == 3:
drv.write_seg(self.start_digit, drv.SEG_F)
super(WheelAnim, self).print(drv)
class TextAnim(BaseAnim):
def __init__(self, txt: str, start_digit: int=0, nbr_digit: int=None, step_time: float=0.7):
if len(txt) > 4:
nb_step = len(txt)
else:
nb_step = 1
super(TextAnim, self).__init__(nb_step, step_time, start_digit, nbr_digit)
self.txt = txt
def print(self, drv: driver.Driver_7Seg):
drv.set_cursor(self.start_digit)
sub_txt = self.txt[self.step:]
sub_txt += ' ' * (self.nbr_digit-len(sub_txt)) # feed with spaces
sub_txt = sub_txt[:self.nbr_digit]
drv.print(sub_txt)
super(TextAnim, self).print(drv)
class Simple7Seg(Job):
""" Base class for helper with more than one printable object """
def __init__(self, driver_settings):
self.do_clear = True
self.drv = driver.driver_factory(driver_settings)
self.anim= []
def print_anim(self, anim: []):
self.anim = anim
self.do_clear = True
def process(self):
if self.do_clear:
self.drv.clear()
self.do_clear = False
for anim in self.anim:
if anim.need_print():
anim.print(self.drv)
if __name__ == '__main__':
from settings import Settings
helper = Simple7Seg(Settings.Driver)
task = AsyncExecute(helper)
task.start()
progress = Percentage(15)
helper.print_anim([progress])
for i in range(15):
progress.update(i)
time.sleep(0.4)
anims = []
anims.append(TextAnim('123456', 1, 2))
anims.append(WheelAnim(0))
anims.append(WheelAnim(3))
helper.print_anim(anims)
time.sleep(10)
task.join()
| AmberAussie/SevenSegments | simple7seg.py | Python | gpl-3.0 | 4,284 |
#!/usr/bin/python3
# coding: utf-8
# # TensorFlow Tutorial
#
# Welcome to this week's programming assignment. Until now, you've always used numpy to build neural networks. Now we will step you through a deep learning framework that will allow you to build neural networks more easily. Machine learning frameworks like TensorFlow, PaddlePaddle, Torch, Caffe, Keras, and many others can speed up your machine learning development significantly. All of these frameworks also have a lot of documentation, which you should feel free to read. In this assignment, you will learn to do the following in TensorFlow:
#
# - Initialize variables
# - Start your own session
# - Train algorithms
# - Implement a Neural Network
#
# Programing frameworks can not only shorten your coding time, but sometimes also perform optimizations that speed up your code.
#
# ## 1 - Exploring the Tensorflow Library
#
# To start, you will import the library:
#
# In[38]:
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
from tf_utils import load_dataset, random_mini_batches, convert_to_one_hot, predict
#get_ipython().magic('matplotlib inline')
np.random.seed(1)
# Now that you have imported the library, we will walk you through its different applications. You will start with an example, where we compute for you the loss of one training example.
# $$loss = \mathcal{L}(\hat{y}, y) = (\hat y^{(i)} - y^{(i)})^2 \tag{1}$$
# In[39]:
y_hat = tf.constant(36, name='y_hat') # Define y_hat constant. Set to 36.
y = tf.constant(39, name='y') # Define y. Set to 39
loss = tf.Variable((y - y_hat)**2, name='loss') # Create a variable for the loss
init = tf.global_variables_initializer() # When init is run later (session.run(init)),
# the loss variable will be initialized and ready to be computed
with tf.Session() as session: # Create a session and print the output
session.run(init) # Initializes the variables
print(session.run(loss)) # Prints the loss
# Writing and running programs in TensorFlow has the following steps:
#
# 1. Create Tensors (variables) that are not yet executed/evaluated.
# 2. Write operations between those Tensors.
# 3. Initialize your Tensors.
# 4. Create a Session.
# 5. Run the Session. This will run the operations you'd written above.
#
# Therefore, when we created a variable for the loss, we simply defined the loss as a function of other quantities, but did not evaluate its value. To evaluate it, we had to run `init=tf.global_variables_initializer()`. That initialized the loss variable, and in the last line we were finally able to evaluate the value of `loss` and print its value.
#
# Now let us look at an easy example. Run the cell below:
# In[40]:
a = tf.constant(2)
b = tf.constant(10)
c = tf.multiply(a,b)
print(c)
# As expected, you will not see 20! You got a tensor saying that the result is a tensor that does not have the shape attribute, and is of type "int32". All you did was put in the 'computation graph', but you have not run this computation yet. In order to actually multiply the two numbers, you will have to create a session and run it.
# In[41]:
sess = tf.Session()
print(sess.run(c))
# Great! To summarize, **remember to initialize your variables, create a session and run the operations inside the session**.
#
# Next, you'll also have to know about placeholders. A placeholder is an object whose value you can specify only later.
# To specify values for a placeholder, you can pass in values by using a "feed dictionary" (`feed_dict` variable). Below, we created a placeholder for x. This allows us to pass in a number later when we run the session.
# In[42]:
# Change the value of x in the feed_dict
x = tf.placeholder(tf.int64, name = 'x')
print(sess.run(2 * x, feed_dict = {x: 3}))
sess.close()
# When you first defined `x` you did not have to specify a value for it. A placeholder is simply a variable that you will assign data to only later, when running the session. We say that you **feed data** to these placeholders when running the session.
#
# Here's what's happening: When you specify the operations needed for a computation, you are telling TensorFlow how to construct a computation graph. The computation graph can have some placeholders whose values you will specify only later. Finally, when you run the session, you are telling TensorFlow to execute the computation graph.
# ### 1.1 - Linear function
#
# Lets start this programming exercise by computing the following equation: $Y = WX + b$, where $W$ and $X$ are random matrices and b is a random vector.
#
# **Exercise**: Compute $WX + b$ where $W, X$, and $b$ are drawn from a random normal distribution. W is of shape (4, 3), X is (3,1) and b is (4,1). As an example, here is how you would define a constant X that has shape (3,1):
# ```python
# X = tf.constant(np.random.randn(3,1), name = "X")
#
# ```
# You might find the following functions helpful:
# - tf.matmul(..., ...) to do a matrix multiplication
# - tf.add(..., ...) to do an addition
# - np.random.randn(...) to initialize randomly
#
# In[43]:
# GRADED FUNCTION: linear_function
def linear_function():
"""
Implements a linear function:
Initializes W to be a random tensor of shape (4,3)
Initializes X to be a random tensor of shape (3,1)
Initializes b to be a random tensor of shape (4,1)
Returns:
result -- runs the session for Y = WX + b
"""
np.random.seed(1)
### START CODE HERE ### (4 lines of code)
X = tf.constant(np.random.randn(3,1), name = "X")
W = tf.constant(np.random.randn(4,3), name = "W")
b = tf.constant(np.random.randn(4,1), name = "b")
Y = tf.add(tf.matmul(W, X), b)
### END CODE HERE ###
# Create the session using tf.Session() and run it with sess.run(...) on the variable you want to calculate
### START CODE HERE ###
sess = tf.Session()
result = sess.run(Y)
### END CODE HERE ###
# close the session
sess.close()
return result
# In[44]:
print( "result = " + str(linear_function()))
# *** Expected Output ***:
#
# <table>
# <tr>
# <td>
# **result**
# </td>
# <td>
# [[-2.15657382]
# [ 2.95891446]
# [-1.08926781]
# [-0.84538042]]
# </td>
# </tr>
#
# </table>
# ### 1.2 - Computing the sigmoid
# Great! You just implemented a linear function. Tensorflow offers a variety of commonly used neural network functions like `tf.sigmoid` and `tf.softmax`. For this exercise lets compute the sigmoid function of an input.
#
# You will do this exercise using a placeholder variable `x`. When running the session, you should use the feed dictionary to pass in the input `z`. In this exercise, you will have to (i) create a placeholder `x`, (ii) define the operations needed to compute the sigmoid using `tf.sigmoid`, and then (iii) run the session.
#
# ** Exercise **: Implement the sigmoid function below. You should use the following:
#
# - `tf.placeholder(tf.float32, name = "...")`
# - `tf.sigmoid(...)`
# - `sess.run(..., feed_dict = {x: z})`
#
#
# Note that there are two typical ways to create and use sessions in tensorflow:
#
# **Method 1:**
# ```python
# sess = tf.Session()
# # Run the variables initialization (if needed), run the operations
# result = sess.run(..., feed_dict = {...})
# sess.close() # Close the session
# ```
# **Method 2:**
# ```python
# with tf.Session() as sess:
# # run the variables initialization (if needed), run the operations
# result = sess.run(..., feed_dict = {...})
# # This takes care of closing the session for you :)
# ```
#
# In[45]:
# GRADED FUNCTION: sigmoid
def sigmoid(z):
"""
Computes the sigmoid of z
Arguments:
z -- input value, scalar or vector
Returns:
results -- the sigmoid of z
"""
### START CODE HERE ### ( approx. 4 lines of code)
# Create a placeholder for x. Name it 'x'.
x = tf.placeholder(tf.float32, name = 'x')
# compute sigmoid(x)
sigmoid = tf.sigmoid(x)
# Create a session, and run it. Please use the method 2 explained above.
# You should use a feed_dict to pass z's value to x.
with tf.Session() as sess:
# Run session and call the output "result"
result = sess.run(sigmoid, feed_dict = {x:z})
### END CODE HERE ###
return result
# In[46]:
print ("sigmoid(0) = " + str(sigmoid(0)))
print ("sigmoid(12) = " + str(sigmoid(12)))
# *** Expected Output ***:
#
# <table>
# <tr>
# <td>
# **sigmoid(0)**
# </td>
# <td>
# 0.5
# </td>
# </tr>
# <tr>
# <td>
# **sigmoid(12)**
# </td>
# <td>
# 0.999994
# </td>
# </tr>
#
# </table>
# <font color='blue'>
# **To summarize, you how know how to**:
# 1. Create placeholders
# 2. Specify the computation graph corresponding to operations you want to compute
# 3. Create the session
# 4. Run the session, using a feed dictionary if necessary to specify placeholder variables' values.
# ### 1.3 - Computing the Cost
#
# You can also use a built-in function to compute the cost of your neural network. So instead of needing to write code to compute this as a function of $a^{[2](i)}$ and $y^{(i)}$ for i=1...m:
# $$ J = - \frac{1}{m} \sum_{i = 1}^m \large ( \small y^{(i)} \log a^{ [2] (i)} + (1-y^{(i)})\log (1-a^{ [2] (i)} )\large )\small\tag{2}$$
#
# you can do it in one line of code in tensorflow!
#
# **Exercise**: Implement the cross entropy loss. The function you will use is:
#
#
# - `tf.nn.sigmoid_cross_entropy_with_logits(logits = ..., labels = ...)`
#
# Your code should input `z`, compute the sigmoid (to get `a`) and then compute the cross entropy cost $J$. All this can be done using one call to `tf.nn.sigmoid_cross_entropy_with_logits`, which computes
#
# $$- \frac{1}{m} \sum_{i = 1}^m \large ( \small y^{(i)} \log \sigma(z^{[2](i)}) + (1-y^{(i)})\log (1-\sigma(z^{[2](i)})\large )\small\tag{2}$$
#
#
# In[47]:
# GRADED FUNCTION: cost
def cost(logits, labels):
"""
Computes the cost using the sigmoid cross entropy
Arguments:
logits -- vector containing z, output of the last linear unit (before the final sigmoid activation)
labels -- vector of labels y (1 or 0)
Note: What we've been calling "z" and "y" in this class are respectively called "logits" and "labels"
in the TensorFlow documentation. So logits will feed into z, and labels into y.
Returns:
cost -- runs the session of the cost (formula (2))
"""
### START CODE HERE ###
# Create the placeholders for "logits" (z) and "labels" (y) (approx. 2 lines)
z = tf.placeholder(tf.float32, name = 'z')
y = tf.placeholder(tf.float32, name = 'x')
# Use the loss function (approx. 1 line)
cost = tf.nn.sigmoid_cross_entropy_with_logits(logits =z, labels =y)
# Create a session (approx. 1 line). See method 1 above.
sess = tf.Session()
# Run the session (approx. 1 line).
cost = sess.run(cost, feed_dict={z:logits, y:labels})
# Close the session (approx. 1 line). See method 1 above.
sess.close()
### END CODE HERE ###
return cost
# In[48]:
logits = sigmoid(np.array([0.2,0.4,0.7,0.9]))
cost = cost(logits, np.array([0,0,1,1]))
print ("cost = " + str(cost))
# ** Expected Output** :
#
# <table>
# <tr>
# <td>
# **cost**
# </td>
# <td>
# [ 1.00538719 1.03664088 0.41385433 0.39956614]
# </td>
# </tr>
#
# </table>
# ### 1.4 - Using One Hot encodings
#
# Many times in deep learning you will have a y vector with numbers ranging from 0 to C-1, where C is the number of classes. If C is for example 4, then you might have the following y vector which you will need to convert as follows:
#
#
# <img src="images/onehot.png" style="width:600px;height:150px;">
#
# This is called a "one hot" encoding, because in the converted representation exactly one element of each column is "hot" (meaning set to 1). To do this conversion in numpy, you might have to write a few lines of code. In tensorflow, you can use one line of code:
#
# - tf.one_hot(labels, depth, axis)
#
# **Exercise:** Implement the function below to take one vector of labels and the total number of classes $C$, and return the one hot encoding. Use `tf.one_hot()` to do this.
# In[49]:
# GRADED FUNCTION: one_hot_matrix
def one_hot_matrix(labels, C):
"""
Creates a matrix where the i-th row corresponds to the ith class number and the jth column
corresponds to the jth training example. So if example j had a label i. Then entry (i,j)
will be 1.
Arguments:
labels -- vector containing the labels
C -- number of classes, the depth of the one hot dimension
Returns:
one_hot -- one hot matrix
"""
### START CODE HERE ###
# Create a tf.constant equal to C (depth), name it 'C'. (approx. 1 line)
C = tf.constant(C, name="C")
# Use tf.one_hot, be careful with the axis (approx. 1 line)
one_hot_matrix = tf.one_hot(labels, C, axis=0)
# Create the session (approx. 1 line)
sess = tf.Session()
# Run the session (approx. 1 line)
one_hot = sess.run(one_hot_matrix)
# Close the session (approx. 1 line). See method 1 above.
sess.close()
### END CODE HERE ###
return one_hot
# In[50]:
labels = np.array([1,2,3,0,2,1])
one_hot = one_hot_matrix(labels, C = 4)
print ("one_hot = " + str(one_hot))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **one_hot**
# </td>
# <td>
# [[ 0. 0. 0. 1. 0. 0.]
# [ 1. 0. 0. 0. 0. 1.]
# [ 0. 1. 0. 0. 1. 0.]
# [ 0. 0. 1. 0. 0. 0.]]
# </td>
# </tr>
#
# </table>
#
# ### 1.5 - Initialize with zeros and ones
#
# Now you will learn how to initialize a vector of zeros and ones. The function you will be calling is `tf.ones()`. To initialize with zeros you could use tf.zeros() instead. These functions take in a shape and return an array of dimension shape full of zeros and ones respectively.
#
# **Exercise:** Implement the function below to take in a shape and to return an array (of the shape's dimension of ones).
#
# - tf.ones(shape)
#
# In[51]:
# GRADED FUNCTION: ones
def ones(shape):
"""
Creates an array of ones of dimension shape
Arguments:
shape -- shape of the array you want to create
Returns:
ones -- array containing only ones
"""
### START CODE HERE ###
# Create "ones" tensor using tf.ones(...). (approx. 1 line)
ones = tf.ones(shape)
# Create the session (approx. 1 line)
sess = tf.Session()
# Run the session to compute 'ones' (approx. 1 line)
ones = sess.run(ones)
# Close the session (approx. 1 line). See method 1 above.
sess.close()
### END CODE HERE ###
return ones
# In[52]:
print ("ones = " + str(ones([3])))
# **Expected Output:**
#
# <table>
# <tr>
# <td>
# **ones**
# </td>
# <td>
# [ 1. 1. 1.]
# </td>
# </tr>
#
# </table>
# # 2 - Building your first neural network in tensorflow
#
# In this part of the assignment you will build a neural network using tensorflow. Remember that there are two parts to implement a tensorflow model:
#
# - Create the computation graph
# - Run the graph
#
# Let's delve into the problem you'd like to solve!
#
# ### 2.0 - Problem statement: SIGNS Dataset
#
# One afternoon, with some friends we decided to teach our computers to decipher sign language. We spent a few hours taking pictures in front of a white wall and came up with the following dataset. It's now your job to build an algorithm that would facilitate communications from a speech-impaired person to someone who doesn't understand sign language.
#
# - **Training set**: 1080 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (180 pictures per number).
# - **Test set**: 120 pictures (64 by 64 pixels) of signs representing numbers from 0 to 5 (20 pictures per number).
#
# Note that this is a subset of the SIGNS dataset. The complete dataset contains many more signs.
#
# Here are examples for each number, and how an explanation of how we represent the labels. These are the original pictures, before we lowered the image resolutoion to 64 by 64 pixels.
# <img src="images/hands.png" style="width:800px;height:350px;"><caption><center> <u><font color='purple'> **Figure 1**</u><font color='purple'>: SIGNS dataset <br> <font color='black'> </center>
#
#
# Run the following code to load the dataset.
# In[53]:
# Loading the dataset
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Change the index below and run the cell to visualize some examples in the dataset.
# In[54]:
# Example of a picture
index = 0
plt.imshow(X_train_orig[index])
print ("y = " + str(np.squeeze(Y_train_orig[:, index])))
# As usual you flatten the image dataset, then normalize it by dividing by 255. On top of that, you will convert each label to a one-hot vector as shown in Figure 1. Run the cell below to do so.
# In[55]:
# Flatten the training and test images
X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T
X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T
# Normalize image vectors
X_train = X_train_flatten/255.
X_test = X_test_flatten/255.
# Convert training and test labels to one hot matrices
Y_train = convert_to_one_hot(Y_train_orig, 6)
Y_test = convert_to_one_hot(Y_test_orig, 6)
print ("number of training examples = " + str(X_train.shape[1]))
print ("number of test examples = " + str(X_test.shape[1]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
# **Note** that 12288 comes from $64 \times 64 \times 3$. Each image is square, 64 by 64 pixels, and 3 is for the RGB colors. Please make sure all these shapes make sense to you before continuing.
# **Your goal** is to build an algorithm capable of recognizing a sign with high accuracy. To do so, you are going to build a tensorflow model that is almost the same as one you have previously built in numpy for cat recognition (but now using a softmax output). It is a great occasion to compare your numpy implementation to the tensorflow one.
#
# **The model** is *LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX*. The SIGMOID output layer has been converted to a SOFTMAX. A SOFTMAX layer generalizes SIGMOID to when there are more than two classes.
# ### 2.1 - Create placeholders
#
# Your first task is to create placeholders for `X` and `Y`. This will allow you to later pass your training data in when you run your session.
#
# **Exercise:** Implement the function below to create the placeholders in tensorflow.
# In[56]:
# GRADED FUNCTION: create_placeholders
def create_placeholders(n_x, n_y):
"""
Creates the placeholders for the tensorflow session.
Arguments:
n_x -- scalar, size of an image vector (num_px * num_px = 64 * 64 * 3 = 12288)
n_y -- scalar, number of classes (from 0 to 5, so -> 6)
Returns:
X -- placeholder for the data input, of shape [n_x, None] and dtype "float"
Y -- placeholder for the input labels, of shape [n_y, None] and dtype "float"
Tips:
- You will use None because it let's us be flexible on the number of examples you will for the placeholders.
In fact, the number of examples during test/train is different.
"""
### START CODE HERE ### (approx. 2 lines)
X = tf.placeholder(tf.float32, shape=(n_x, None))
Y = tf.placeholder(tf.float32, shape=(n_y, None))
### END CODE HERE ###
return X, Y
# In[57]:
X, Y = create_placeholders(12288, 6)
print ("X = " + str(X))
print ("Y = " + str(Y))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **X**
# </td>
# <td>
# Tensor("Placeholder_1:0", shape=(12288, ?), dtype=float32) (not necessarily Placeholder_1)
# </td>
# </tr>
# <tr>
# <td>
# **Y**
# </td>
# <td>
# Tensor("Placeholder_2:0", shape=(10, ?), dtype=float32) (not necessarily Placeholder_2)
# </td>
# </tr>
#
# </table>
# ### 2.2 - Initializing the parameters
#
# Your second task is to initialize the parameters in tensorflow.
#
# **Exercise:** Implement the function below to initialize the parameters in tensorflow. You are going use Xavier Initialization for weights and Zero Initialization for biases. The shapes are given below. As an example, to help you, for W1 and b1 you could use:
#
# ```python
# W1 = tf.get_variable("W1", [25,12288], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
# b1 = tf.get_variable("b1", [25,1], initializer = tf.zeros_initializer())
# ```
# Please use `seed = 1` to make sure your results match ours.
# In[58]:
# GRADED FUNCTION: initialize_parameters
def initialize_parameters():
"""
Initializes parameters to build a neural network with tensorflow. The shapes are:
W1 : [25, 12288]
b1 : [25, 1]
W2 : [12, 25]
b2 : [12, 1]
W3 : [6, 12]
b3 : [6, 1]
Returns:
parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3
"""
tf.set_random_seed(1) # so that your "random" numbers match ours
### START CODE HERE ### (approx. 6 lines of code)
W1 = tf.get_variable("W1", [25,12288], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b1 = tf.get_variable("b1", [25,1], initializer = tf.zeros_initializer())
W2 = tf.get_variable("W2", [12,25], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b2 = tf.get_variable("b2", [12,1], initializer = tf.zeros_initializer())
W3 = tf.get_variable("W3", [6,12], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b3 = tf.get_variable("b3", [6,1], initializer = tf.zeros_initializer())
### END CODE HERE ###
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2,
"W3": W3,
"b3": b3}
return parameters
# In[59]:
tf.reset_default_graph()
with tf.Session() as sess:
parameters = initialize_parameters()
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **W1**
# </td>
# <td>
# < tf.Variable 'W1:0' shape=(25, 12288) dtype=float32_ref >
# </td>
# </tr>
# <tr>
# <td>
# **b1**
# </td>
# <td>
# < tf.Variable 'b1:0' shape=(25, 1) dtype=float32_ref >
# </td>
# </tr>
# <tr>
# <td>
# **W2**
# </td>
# <td>
# < tf.Variable 'W2:0' shape=(12, 25) dtype=float32_ref >
# </td>
# </tr>
# <tr>
# <td>
# **b2**
# </td>
# <td>
# < tf.Variable 'b2:0' shape=(12, 1) dtype=float32_ref >
# </td>
# </tr>
#
# </table>
# As expected, the parameters haven't been evaluated yet.
# ### 2.3 - Forward propagation in tensorflow
#
# You will now implement the forward propagation module in tensorflow. The function will take in a dictionary of parameters and it will complete the forward pass. The functions you will be using are:
#
# - `tf.add(...,...)` to do an addition
# - `tf.matmul(...,...)` to do a matrix multiplication
# - `tf.nn.relu(...)` to apply the ReLU activation
#
# **Question:** Implement the forward pass of the neural network. We commented for you the numpy equivalents so that you can compare the tensorflow implementation to numpy. It is important to note that the forward propagation stops at `z3`. The reason is that in tensorflow the last linear layer output is given as input to the function computing the loss. Therefore, you don't need `a3`!
#
#
# In[60]:
# GRADED FUNCTION: forward_propagation
def forward_propagation(X, parameters):
"""
Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
"""
# Retrieve the parameters from the dictionary "parameters"
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
W3 = parameters['W3']
b3 = parameters['b3']
### START CODE HERE ### (approx. 5 lines) # Numpy Equivalents:
Z1 = tf.add(tf.matmul(W1, X), b1) # Z1 = np.dot(W1, X) + b1
A1 = tf.nn.relu(Z1) # A1 = relu(Z1)
Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, a1) + b2
A2 = tf.nn.relu(Z2) # A2 = relu(Z2)
Z3 = tf.add(tf.matmul(W3, A2), b3) # Z3 = np.dot(W3,Z2) + b3
### END CODE HERE ###
return Z3
# In[61]:
tf.reset_default_graph()
with tf.Session() as sess:
X, Y = create_placeholders(12288, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
print("Z3 = " + str(Z3))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **Z3**
# </td>
# <td>
# Tensor("Add_2:0", shape=(6, ?), dtype=float32)
# </td>
# </tr>
#
# </table>
# You may have noticed that the forward propagation doesn't output any cache. You will understand why below, when we get to brackpropagation.
# ### 2.4 Compute cost
#
# As seen before, it is very easy to compute the cost using:
# ```python
# tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = ..., labels = ...))
# ```
# **Question**: Implement the cost function below.
# - It is important to know that the "`logits`" and "`labels`" inputs of `tf.nn.softmax_cross_entropy_with_logits` are expected to be of shape (number of examples, num_classes). We have thus transposed Z3 and Y for you.
# - Besides, `tf.reduce_mean` basically does the summation over the examples.
# In[62]:
# GRADED FUNCTION: compute_cost
def compute_cost(Z3, Y):
"""
Computes the cost
Arguments:
Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples)
Y -- "true" labels vector placeholder, same shape as Z3
Returns:
cost - Tensor of the cost function
"""
# to fit the tensorflow requirement for tf.nn.softmax_cross_entropy_with_logits(...,...)
logits = tf.transpose(Z3)
labels = tf.transpose(Y)
### START CODE HERE ### (1 line of code)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = labels))
### END CODE HERE ###
return cost
# In[63]:
tf.reset_default_graph()
with tf.Session() as sess:
X, Y = create_placeholders(12288, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
cost = compute_cost(Z3, Y)
print("cost = " + str(cost))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **cost**
# </td>
# <td>
# Tensor("Mean:0", shape=(), dtype=float32)
# </td>
# </tr>
#
# </table>
# ### 2.5 - Backward propagation & parameter updates
#
# This is where you become grateful to programming frameworks. All the backpropagation and the parameters update is taken care of in 1 line of code. It is very easy to incorporate this line in the model.
#
# After you compute the cost function. You will create an "`optimizer`" object. You have to call this object along with the cost when running the tf.session. When called, it will perform an optimization on the given cost with the chosen method and learning rate.
#
# For instance, for gradient descent the optimizer would be:
# ```python
# optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(cost)
# ```
#
# To make the optimization you would do:
# ```python
# _ , c = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})
# ```
#
# This computes the backpropagation by passing through the tensorflow graph in the reverse order. From cost to inputs.
#
# **Note** When coding, we often use `_` as a "throwaway" variable to store values that we won't need to use later. Here, `_` takes on the evaluated value of `optimizer`, which we don't need (and `c` takes the value of the `cost` variable).
# ### 2.6 - Building the model
#
# Now, you will bring it all together!
#
# **Exercise:** Implement the model. You will be calling the functions you had previously implemented.
# In[65]:
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001,
num_epochs = 1500, minibatch_size = 32, print_cost = True):
"""
Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX.
Arguments:
X_train -- training set, of shape (input size = 12288, number of training examples = 1080)
Y_train -- test set, of shape (output size = 6, number of training examples = 1080)
X_test -- training set, of shape (input size = 12288, number of training examples = 120)
Y_test -- test set, of shape (output size = 6, number of test examples = 120)
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
print_cost -- True to print the cost every 100 epochs
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
tf.set_random_seed(1) # to keep consistent results
seed = 3 # to keep consistent results
(n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set)
n_y = Y_train.shape[0] # n_y : output size
costs = [] # To keep track of the cost
# Create Placeholders of shape (n_x, n_y)
### START CODE HERE ### (1 line)
X, Y = create_placeholders(n_x, n_y)
### END CODE HERE ###
# Initialize parameters
### START CODE HERE ### (1 line)
parameters = initialize_parameters()
### END CODE HERE ###
# Forward propagation: Build the forward propagation in the tensorflow graph
### START CODE HERE ### (1 line)
Z3 = forward_propagation(X, parameters)
### END CODE HERE ###
# Cost function: Add cost function to tensorflow graph
### START CODE HERE ### (1 line)
cost = compute_cost(Z3, Y)
### END CODE HERE ###
# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.
### START CODE HERE ### (1 line)
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
### END CODE HERE ###
# Initialize all the variables
init = tf.global_variables_initializer()
# Start the session to compute the tensorflow graph
with tf.Session() as sess:
# Run the initialization
sess.run(init)
# Do the training loop
for epoch in range(num_epochs):
epoch_cost = 0. # Defines a cost related to an epoch
num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
seed = seed + 1
minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# IMPORTANT: The line that runs the graph on a minibatch.
# Run the session to execute the "optimizer" and the "cost", the feedict should contain a minibatch for (X,Y).
### START CODE HERE ### (1 line)
_ , minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})
### END CODE HERE ###
epoch_cost += minibatch_cost / num_minibatches
# Print the cost every epoch
if print_cost == True and epoch % 100 == 0:
print ("Cost after epoch %i: %f" % (epoch, epoch_cost))
if print_cost == True and epoch % 5 == 0:
costs.append(epoch_cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# lets save the parameters in a variable
parameters = sess.run(parameters)
print ("Parameters have been trained!")
# Calculate the correct predictions
correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train}))
print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test}))
return parameters
# Run the following cell to train your model! On our machine it takes about 5 minutes. Your "Cost after epoch 100" should be 1.016458. If it's not, don't waste time; interrupt the training by clicking on the square (⬛) in the upper bar of the notebook, and try to correct your code. If it is the correct cost, take a break and come back in 5 minutes!
# In[ ]:
parameters = model(X_train, Y_train, X_test, Y_test)
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **Train Accuracy**
# </td>
# <td>
# 0.999074
# </td>
# </tr>
# <tr>
# <td>
# **Test Accuracy**
# </td>
# <td>
# 0.716667
# </td>
# </tr>
#
# </table>
#
# Amazing, your algorithm can recognize a sign representing a figure between 0 and 5 with 71.7% accuracy.
#
# **Insights**:
# - Your model seems big enough to fit the training set well. However, given the difference between train and test accuracy, you could try to add L2 or dropout regularization to reduce overfitting.
# - Think about the session as a block of code to train the model. Each time you run the session on a minibatch, it trains the parameters. In total you have run the session a large number of times (1500 epochs) until you obtained well trained parameters.
# ### 2.7 - Test with your own image (optional / ungraded exercise)
#
# Congratulations on finishing this assignment. You can now take a picture of your hand and see the output of your model. To do that:
# 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
# 2. Add your image to this Jupyter Notebook's directory, in the "images" folder
# 3. Write your image's name in the following code
# 4. Run the code and check if the algorithm is right!
# In[ ]:
import scipy
from PIL import Image
from scipy import ndimage
## START CODE HERE ## (PUT YOUR IMAGE NAME)
my_image = "thumbs_up.jpg"
## END CODE HERE ##
# We preprocess your image to fit your algorithm.
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(64,64)).reshape((1, 64*64*3)).T
my_image_prediction = predict(my_image, parameters)
plt.imshow(image)
print("Your algorithm predicts: y = " + str(np.squeeze(my_image_prediction)))
# You indeed deserved a "thumbs-up" although as you can see the algorithm seems to classify it incorrectly. The reason is that the training set doesn't contain any "thumbs-up", so the model doesn't know how to deal with it! We call that a "mismatched data distribution" and it is one of the various of the next course on "Structuring Machine Learning Projects".
# <font color='blue'>
# **What you should remember**:
# - Tensorflow is a programming framework used in deep learning
# - The two main object classes in tensorflow are Tensors and Operators.
# - When you code in tensorflow you have to take the following steps:
# - Create a graph containing Tensors (Variables, Placeholders ...) and Operations (tf.matmul, tf.add, ...)
# - Create a session
# - Initialize the session
# - Run the session to execute the graph
# - You can execute the graph multiple times as you've seen in model()
# - The backpropagation and optimization is automatically done when running the session on the "optimizer" object.
| jinzishuai/learn2deeplearn | deeplearning.ai/C2.ImproveDeepNN/week3/tensorflow_assignment/tf.py | Python | gpl-3.0 | 37,883 |
import logging
import time
import struct
from dispatch import receiver
from stoppable import StoppableLoopThread
import signals
from db import DB
import utils
import rlp
import blocks
import processblock
from transactions import Transaction
import indexdb
logger = logging.getLogger(__name__)
rlp_hash_hex = lambda data: utils.sha3(rlp.encode(data)).encode('hex')
NUM_BLOCKS_PER_REQUEST = 32
class Miner():
"""
Mines on the current head
Stores received transactions
"""
def __init__(self, parent, uncles, coinbase):
self.nonce = 0
block = self.block = blocks.Block.init_from_parent(
parent, coinbase, uncles=[u.hash for u in uncles])
block.finalize()
logger.debug('Mining #%d %s', block.number, block.hex_hash())
logger.debug('Difficulty %s', block.difficulty)
def add_transaction(self, transaction):
block_state = self.block.state_root
try:
success, output = processblock.apply_transaction(
self.block, transaction)
except processblock.InvalidTransaction as e:
# if unsuccessfull the prerequistes were not fullfilled
# and the tx isinvalid, state must not have changed
logger.debug('Invalid Transaction %r: %r', transaction, e)
assert block_state == self.block.state_root
return False
if not success:
logger.debug('transaction %r not applied', transaction)
assert block_state == self.block.state_root
else:
assert transaction in self.block.get_transactions()
logger.debug(
'transaction %r applied to %r res: %r',
transaction, self.block, output)
assert block_state != self.block.state_root
return True
def get_transactions(self):
return self.block.get_transactions()
def mine(self, steps=1000):
"""
It is formally defined as PoW: PoW(H, n) = BE(SHA3(SHA3(RLP(Hn)) o n))
where:
RLP(Hn) is the RLP encoding of the block header H, not including the
final nonce component;
SHA3 is the SHA3 hash function accepting an arbitrary length series of
bytes and evaluating to a series of 32 bytes (i.e. 256-bit);
n is the nonce, a series of 32 bytes;
o is the series concatenation operator;
BE(X) evaluates to the value equal to X when interpreted as a
big-endian-encoded integer.
"""
nonce_bin_prefix = '\x00' * (32 - len(struct.pack('>q', 0)))
target = 2 ** 256 / self.block.difficulty
rlp_Hn = self.block.serialize_header_without_nonce()
for nonce in range(self.nonce, self.nonce + steps):
nonce_bin = nonce_bin_prefix + struct.pack('>q', nonce)
# BE(SHA3(SHA3(RLP(Hn)) o n))
h = utils.sha3(utils.sha3(rlp_Hn) + nonce_bin)
l256 = utils.big_endian_to_int(h)
if l256 < target:
self.block.nonce = nonce_bin
assert self.block.check_proof_of_work(self.block.nonce) is True
assert self.block.get_parent()
logger.debug(
'Nonce found %d %r', nonce, self.block)
return self.block
self.nonce = nonce
return False
class ChainManager(StoppableLoopThread):
"""
Manages the chain and requests to it.
"""
def __init__(self):
super(ChainManager, self).__init__()
# initialized after configure
self.miner = None
self.blockchain = None
self._children_index = None
def configure(self, config, genesis=None):
self.config = config
logger.info('Opening chain @ %s', utils.get_db_path())
self.blockchain = DB(utils.get_db_path())
self._children_index = indexdb.Index('ci')
if genesis:
self._initialize_blockchain(genesis)
logger.debug('Chain @ #%d %s', self.head.number, self.head.hex_hash())
self.log_chain()
self.new_miner()
@property
def head(self):
if 'HEAD' not in self.blockchain:
self._initialize_blockchain()
ptr = self.blockchain.get('HEAD')
return blocks.get_block(ptr)
def _update_head(self, block):
bh = block.hash
self.blockchain.put('HEAD', block.hash)
self.blockchain.commit()
self.new_miner() # reset mining
def get(self, blockhash):
assert isinstance(blockhash, str)
assert len(blockhash) == 32
return blocks.get_block(blockhash)
def has_block(self, blockhash):
assert isinstance(blockhash, str)
assert len(blockhash) == 32
return blockhash in self.blockchain
def __contains__(self, blockhash):
return self.has_block(blockhash)
def _store_block(self, block):
self.blockchain.put(block.hash, block.serialize())
self.blockchain.commit()
def _initialize_blockchain(self, genesis=None):
logger.info('Initializing new chain @ %s', utils.get_db_path())
if not genesis:
genesis = blocks.genesis()
self._store_block(genesis)
self._update_head(genesis)
def synchronize_newer_blockchain(self):
logger.info('sync newer request for head %r', self.head)
signals.remote_chain_requested.send(
sender=None, parents=[self.head.hash], count=NUM_BLOCKS_PER_REQUEST)
def synchronize_older_blockchain(self, block_number):
# block_number: for block we search the parent for
# seek 1st possible branching block
logger.info('sync older request for parent of block #%r', block_number)
blk = self.head
while blk.number > block_number:
blk = blk.get_parent()
# collect blocks
requested = []
while len(requested) < NUM_BLOCKS_PER_REQUEST and blk.has_parent():
blk = blk.get_parent()
requested.append(blk)
logger.debug('requesting %d blocks', len(requested))
# newest first, GetChain, will try to answer w/ older ones if the
# the newest is not in the canonical chain
# expected answer is the first here known block in the canonical chain
signals.remote_chain_requested.send(sender=None,
parents=[b.hash for b in requested], count=NUM_BLOCKS_PER_REQUEST)
def loop_body(self):
ts = time.time()
pct_cpu = self.config.getint('misc', 'mining')
if pct_cpu > 0:
self.mine()
delay = (time.time() - ts) * (100. / pct_cpu - 1)
time.sleep(min(delay, 1.))
else:
time.sleep(.01)
def new_miner(self):
"new miner is initialized if HEAD is updated"
uncles = self.get_uncles(self.head)
miner = Miner(self.head, uncles, self.config.get('wallet', 'coinbase'))
if self.miner:
for tx in self.miner.get_transactions():
miner.add_transaction(tx)
self.miner = miner
def mine(self):
with self.lock:
block = self.miner.mine()
if block:
# create new block
self.add_block(block)
logger.debug("broadcasting new %r" % block)
signals.send_local_blocks.send(
sender=None, blocks=[block])
def receive_chain(self, transient_blocks, disconnect_cb=None):
old_head = self.head
with self.lock:
# assuming to receive chain order w/ newest block first
for t_block in reversed(transient_blocks):
logger.debug('Trying to deserialize %r', t_block)
logger.debug(t_block.rlpdata.encode('hex'))
try:
block = blocks.Block.deserialize(t_block.rlpdata)
except processblock.InvalidTransaction as e:
logger.debug(
'Malicious %r w/ invalid Transaction %r', t_block, e)
continue
except blocks.UnknownParentException:
if t_block.prevhash == blocks.GENESIS_PREVHASH:
logger.debug('Rec Incompatible Genesis %r', t_block)
if disconnect_cb:
disconnect_cb(reason='Wrong genesis block')
else:
logger.debug('%s with unknown parent', t_block)
if t_block.number > self.head.number:
self.synchronize_newer_blockchain()
else:
logger.debug(
'Need parent of %s', t_block)
self.synchronize_older_blockchain(t_block.number)
break
if block.hash in self:
logger.debug('Known %r', block)
else:
if block.has_parent():
success = self.add_block(block)
if success:
logger.debug('Added %r', block)
else:
logger.debug('Orphant %r', block)
if self.head != old_head:
self.synchronize_newer_blockchain()
def add_block(self, block):
"returns True if block was added sucessfully"
# make sure we know the parent
if not block.has_parent() and not block.is_genesis():
logger.debug('Missing parent for block %r', block)
return False
# make sure we know the uncles
for uncle_hash in block.uncles:
if not uncle_hash in self:
logger.debug('Missing uncle for block %r', block)
return False
# check PoW
if not len(block.nonce) == 32:
logger.debug('Nonce not set %r', block)
return False
elif not block.check_proof_of_work(block.nonce) and\
not block.is_genesis():
logger.debug('Invalid nonce %r', block)
return False
if block.has_parent():
try:
processblock.verify(block, block.get_parent())
except AssertionError as e:
logger.debug('verification failed: %s', str(e))
processblock.verify(block, block.get_parent())
return False
self._children_index.append(block.prevhash, block.hash)
self._store_block(block)
# set to head if this makes the longest chain w/ most work
if block.chain_difficulty() > self.head.chain_difficulty():
logger.debug('New Head %r', block)
self._update_head(block)
return True
def get_children(self, block):
return [self.get(c) for c in self._children_index.get(block.hash)]
def get_uncles(self, block):
if not block.has_parent():
return []
parent = block.get_parent()
if not parent.has_parent():
return []
return [u for u in self.get_children(parent.get_parent())
if u != parent]
def add_transaction(self, transaction):
logger.debug("add transaction %r" % transaction)
with self.lock:
res = self.miner.add_transaction(transaction)
if res:
logger.debug("broadcasting valid %r" % transaction)
signals.send_local_transactions.send(
sender=None, transactions=[transaction])
def get_transactions(self):
logger.debug("get_transactions called")
return self.miner.get_transactions()
def get_chain(self, start='', count=NUM_BLOCKS_PER_REQUEST):
"return 'count' blocks starting from head or start"
logger.debug("get_chain: start:%s count%d", start.encode('hex'), count)
blocks = []
block = self.head
if start:
if start not in self:
return []
block = self.get(start)
if not self.in_main_branch(block):
return []
for i in range(count):
blocks.append(block)
if block.is_genesis():
break
block = block.get_parent()
return blocks
def in_main_branch(self, block):
if block.is_genesis():
return True
return block == self.get_descendents(block.get_parent(), count=1)[0]
def get_descendents(self, block, count=1):
logger.debug("get_descendents: %r ", block)
assert block.hash in self
# FIXME inefficient implementation
res = []
cur = self.head
while cur != block:
res.append(cur)
if cur.has_parent():
cur = cur.get_parent()
else:
break
if cur.number == block.number and cur != block:
# no descendents on main branch
logger.debug("no descendents on main branch for: %r ", block)
return []
res.reverse()
return res[:count]
def log_chain(self):
num = self.head.number + 1
for b in reversed(self.get_chain(count=num)):
logger.debug(b)
for tx in b.get_transactions():
logger.debug('\t%r', tx)
chain_manager = ChainManager()
@receiver(signals.local_chain_requested)
def handle_local_chain_requested(sender, peer, block_hashes, count, **kwargs):
"""
[0x14, Parent1, Parent2, ..., ParentN, Count]
Request the peer to send Count (to be interpreted as an integer) blocks
in the current canonical block chain that are children of Parent1
(to be interpreted as a SHA3 block hash). If Parent1 is not present in
the block chain, it should instead act as if the request were for Parent2
&c. through to ParentN.
If none of the parents are in the current
canonical block chain, then NotInChain should be sent along with ParentN
(i.e. the last Parent in the parents list).
If the designated parent is the present block chain head,
an empty reply should be sent.
If no parents are passed, then reply need not be made.
"""
logger.debug(
"local_chain_requested: %r %d",
[b.encode('hex') for b in block_hashes], count)
found_blocks = []
for i, b in enumerate(block_hashes):
if b in chain_manager:
block = chain_manager.get(b)
logger.debug("local_chain_requested: found: %r", block)
found_blocks = chain_manager.get_descendents(block, count=count)
if found_blocks:
logger.debug("sending: found: %r ", found_blocks)
# if b == head: no descendents == no reply
with peer.lock:
peer.send_Blocks(found_blocks)
return
if len(block_hashes):
# handle genesis special case
if block_hashes[-1] in chain_manager:
assert chain_manager.get(block_hashes[-1]).is_genesis()
block_hashes.pop(-1)
if not block_hashes:
return
assert block_hashes[-1] not in chain_manager
# If none of the parents are in the current
logger.debug(
"Sending NotInChain: %r", block_hashes[-1].encode('hex')[:4])
peer.send_NotInChain(block_hashes[-1])
else:
# If no parents are passed, then reply need not be made.
pass
@receiver(signals.config_ready)
def config_chainmanager(sender, config, **kwargs):
chain_manager.configure(config)
@receiver(signals.peer_handshake_success)
def new_peer_connected(sender, peer, **kwargs):
logger.debug("received new_peer_connected")
# request transactions
with peer.lock:
logger.debug("send get transactions")
peer.send_GetTransactions()
# request chain
blocks = [b.hash for b in chain_manager.get_chain(
count=NUM_BLOCKS_PER_REQUEST)]
with peer.lock:
peer.send_GetChain(blocks, count=NUM_BLOCKS_PER_REQUEST)
logger.debug("send get chain %r", [b.encode('hex') for b in blocks])
@receiver(signals.remote_transactions_received)
def remote_transactions_received_handler(sender, transactions, **kwargs):
"receives rlp.decoded serialized"
txl = [Transaction.deserialize(rlp.encode(tx)) for tx in transactions]
logger.debug('remote_transactions_received: %r', txl)
for tx in txl:
chain_manager.add_transaction(tx)
@receiver(signals.local_transaction_received)
def local_transaction_received_handler(sender, transaction, **kwargs):
"receives transaction object"
logger.debug('local_transaction_received: %r', transaction)
chain_manager.add_transaction(transaction)
@receiver(signals.gettransactions_received)
def gettransactions_received_handler(sender, peer, **kwargs):
transactions = chain_manager.get_transactions()
transactions = [rlp.decode(x.serialize()) for x in transactions]
peer.send_Transactions(transactions)
@receiver(signals.remote_blocks_received)
def remote_blocks_received_handler(sender, transient_blocks, peer, **kwargs):
logger.debug("recv %d remote blocks: %r", len(
transient_blocks), transient_blocks)
chain_manager.receive_chain(
transient_blocks, disconnect_cb=peer.send_Disconnect)
| elkingtowa/azove | azove/chainmanager.py | Python | mit | 17,385 |
# -*- coding: utf-8 -*-
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DarkLangConfig',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('released_languages', models.TextField(help_text=u'A comma-separated list of language codes to release to the public.', blank=True)),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
options={
'ordering': ('-change_date',),
'abstract': False,
},
),
]
| stvstnfrd/edx-platform | openedx/core/djangoapps/dark_lang/migrations/0001_initial.py | Python | agpl-3.0 | 1,164 |
from pe3 import listPrimeFactors
def nSmallestMultiples(n):
multipliers = list()
for i in range(n,0,-1):
pf = listPrimeFactors(i)
to_add = list()
these_multipliers = list(multipliers)
for factor in pf:
isIncluded = False
for m in these_multipliers:
if factor == m:
these_multipliers.remove(m)
isIncluded = True
break
if not isIncluded:
multipliers.append(factor)
return multipliers
if __name__=="__main__":
n = 20
sm = nSmallestMultiples(n)
smallest_multiple = 1
for m in sm:
smallest_multiple *= m
print(smallest_multiple) #232792560
| henxing/projecteulerslns | python/pe005.py | Python | mit | 742 |
# Future
# Django
from django.db import models
from django.utils.translation import ugettext_lazy as _
# Third Party
from image_cropping import ImageRatioField
class TeamMember(models.Model):
name = models.CharField(_("Name"), max_length=150)
job = models.CharField(_("Job"), max_length=150)
info = models.TextField(_("Info"), max_length=1000)
image = models.ImageField(upload_to="images/team", blank=True, default="")
cropped = ImageRatioField("image", "400x400")
position = models.PositiveIntegerField(default=0, blank=False, null=False)
def __str__(self):
return self.name
def __unicode__(self):
return self.name
def admin_image(self):
img = "http://placehold.it/75x75" if not self.image else self.image.url
return '<img src="%s" height="75"/>' % img
admin_image.allow_tags = True
class Meta:
ordering = ("position",)
verbose_name = _("Team Member")
verbose_name_plural = _("Team Members")
| bengosney/romrescue.org | team/models.py | Python | gpl-3.0 | 1,004 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# Search
from __future__ import unicode_literals
import frappe, json
from frappe.utils import cstr, unique
from frappe import _
from six import string_types
# this is called by the Link Field
@frappe.whitelist()
def search_link(doctype, txt, query=None, filters=None, page_length=20, searchfield=None):
search_widget(doctype, txt, query, searchfield=searchfield, page_length=page_length, filters=filters)
frappe.response['results'] = build_for_autosuggest(frappe.response["values"])
del frappe.response["values"]
# this is called by the search box
@frappe.whitelist()
def search_widget(doctype, txt, query=None, searchfield=None, start=0,
page_length=10, filters=None, filter_fields=None, as_dict=False):
if isinstance(filters, string_types):
filters = json.loads(filters)
meta = frappe.get_meta(doctype)
if not searchfield:
searchfield = "name"
standard_queries = frappe.get_hooks().standard_queries or {}
if query and query.split()[0].lower()!="select":
# by method
frappe.response["values"] = frappe.call(query, doctype, txt,
searchfield, start, page_length, filters, as_dict=as_dict)
elif not query and doctype in standard_queries:
# from standard queries
search_widget(doctype, txt, standard_queries[doctype][0],
searchfield, start, page_length, filters)
else:
if query:
frappe.throw(_("This query style is discontinued"))
# custom query
# frappe.response["values"] = frappe.db.sql(scrub_custom_query(query, searchfield, txt))
else:
if isinstance(filters, dict):
filters_items = filters.items()
filters = []
for f in filters_items:
if isinstance(f[1], (list, tuple)):
filters.append([doctype, f[0], f[1][0], f[1][1]])
else:
filters.append([doctype, f[0], "=", f[1]])
if filters==None:
filters = []
or_filters = []
# build from doctype
if txt:
search_fields = ["name"]
if meta.title_field:
search_fields.append(meta.title_field)
if meta.search_fields:
search_fields.extend(meta.get_search_fields())
for f in search_fields:
fmeta = meta.get_field(f.strip())
if f == "name" or (fmeta and fmeta.fieldtype in ["Data", "Text", "Small Text", "Long Text",
"Link", "Select", "Read Only", "Text Editor"]):
or_filters.append([doctype, f.strip(), "like", "%{0}%".format(txt)])
if meta.get("fields", {"fieldname":"enabled", "fieldtype":"Check"}):
filters.append([doctype, "enabled", "=", 1])
if meta.get("fields", {"fieldname":"disabled", "fieldtype":"Check"}):
filters.append([doctype, "disabled", "!=", 1])
# format a list of fields combining search fields and filter fields
fields = get_std_fields_list(meta, searchfield or "name")
if filter_fields:
fields = list(set(fields + json.loads(filter_fields)))
formatted_fields = ['`tab%s`.`%s`' % (meta.name, f.strip()) for f in fields]
# find relevance as location of search term from the beginning of string `name`. used for sorting results.
formatted_fields.append("""locate("{_txt}", `tab{doctype}`.`name`) as `_relevance`""".format(
_txt=frappe.db.escape((txt or "").replace("%", "")), doctype=frappe.db.escape(doctype)))
# In order_by, `idx` gets second priority, because it stores link count
from frappe.model.db_query import get_order_by
order_by_based_on_meta = get_order_by(doctype, meta)
order_by = "if(_relevance, _relevance, 99999), idx desc, {0}".format(order_by_based_on_meta)
values = frappe.get_list(doctype,
filters=filters, fields=formatted_fields,
or_filters = or_filters, limit_start = start,
limit_page_length=page_length,
order_by=order_by,
ignore_permissions = True if doctype == "DocType" else False, # for dynamic links
as_list=not as_dict)
# remove _relevance from results
if as_dict:
for r in values:
r.pop("_relevance")
frappe.response["values"] = values
else:
frappe.response["values"] = [r[:-1] for r in values]
def get_std_fields_list(meta, key):
# get additional search fields
sflist = meta.search_fields and meta.search_fields.split(",") or []
title_field = [meta.title_field] if (meta.title_field and meta.title_field not in sflist) else []
sflist = ['name'] + sflist + title_field
if not key in sflist:
sflist = sflist + [key]
return sflist
def build_for_autosuggest(res):
results = []
for r in res:
out = {"value": r[0], "description": ", ".join(unique(cstr(d) for d in r if d)[1:])}
results.append(out)
return results
def scrub_custom_query(query, key, txt):
if '%(key)s' in query:
query = query.replace('%(key)s', key)
if '%s' in query:
query = query.replace('%s', ((txt or '') + '%'))
return query
| bohlian/frappe | frappe/desk/search.py | Python | mit | 4,772 |
import os
from wb import preprocess, home_fn, autogen, dict_def
def main(config):
preprocess(config,
in_fn=home_fn('tap-win32/SOURCES.in'),
out_fn=home_fn('tap-win32/SOURCES'),
quote_begin='@@',
quote_end='@@',
head_comment='# %s\n\n' % autogen)
preprocess(config,
in_fn=home_fn('tap-win32/i386/OemWin2k.inf.in'),
out_fn=home_fn('tap-win32/i386/OemWin2k.inf'),
quote_begin='@@',
quote_end='@@',
if_prefix='!',
head_comment='; %s\n\n' % autogen)
try:
os.mkdir(home_fn('tap-win32/amd64'))
except:
pass
preprocess(dict_def(config, [('AMD64', '1')]),
in_fn=home_fn('tap-win32/i386/OemWin2k.inf.in'),
out_fn=home_fn('tap-win32/amd64/OemWin2k.inf'),
quote_begin='@@',
quote_end='@@',
if_prefix='!',
head_comment='; %s\n\n' % autogen)
# if we are run directly, and not loaded as a module
if __name__ == "__main__":
from wb import config
main(config)
| jjo/openvpn-ipv6-legacy21 | win/config_tap.py | Python | gpl-2.0 | 1,181 |
# -*- coding: utf-8 -*-
import os
def get(key):
varname = "PRJ_%s" % key.upper()
if varname in os.environ:
return os.environ[varname]
else:
return None
| caiyunapp/gaia | gaia/environ.py | Python | mit | 182 |
#!/usr/bin/env python
import re
from setuptools import setup, find_packages
from $PROJECT$.constants import __PROJECT__, __DESCRIPTION__, __VERSION__
def _get_requirements(path):
try:
with open(path) as f:
packages = f.read().splitlines()
except (IOError, OSError) as ex:
raise RuntimeError("Can't open file with requirements: %s", repr(ex))
packages = (p.strip() for p in packages if not re.match("^\s*#", p))
packages = list(filter(None, packages))
return packages
def _install_requirements():
requirements = _get_requirements('requirements.txt')
return requirements
setup(
name = __PROJECT__,
version = __VERSION__,
description = __DESCRIPTION__,
author = "Vaclav Pavlin",
author_email = '[email protected]',
url = '',
license = "LGPL3",
entry_points = {
'console_scripts': ['%s=%s.cli.main:main' % (__PROJECT__, __PROJECT__)],
},
packages = find_packages(),
install_requires = _install_requirements()
)
| vpavlin/python-project | setup.py | Python | lgpl-3.0 | 1,022 |
#!/usr/bin/python
# check_glider_netcdf.py - Verifies that a glider NetCDF file from a provider
# contains all the required global attributes, dimensions, scalar variables
# and dimensioned variables. Prints out missing items.
#
# Returns:
# 0 - File complies to NGDAC standard
# 1+ - Number of errors
#
# By: Michael Lindemuth <[email protected]>
# University of South Florida
# College of Marine Science
# Ocean Technology Group
import argparse
import sys
from os import path
import json
from netCDF4 import Dataset
def test_global_attributes(nc, requirements):
""" Tests for required global attributes
"""
retVal = 0
global_attributes = nc.ncattrs()
for req_attribute in requirements['global_attributes']:
if req_attribute not in global_attributes:
print "Global Attribute Missing: %s" % (req_attribute)
retVal += 1
return retVal
def test_dimensions(nc, requirements):
""" Tests for required dimensions
"""
retVal = 0
for req_dimension in requirements['dimensions']:
if req_dimension not in nc.dimensions:
print "Dimension Missing: %s" % (req_dimension)
retVal += 1
return retVal
def test_required_variables(nc, requirements):
""" Tests for required variables
"""
retVal = 0
for req_variable in requirements['required_variables']:
variables = nc.variables
if req_variable not in variables:
print "Missing required variable %s" % req_variable
retVal += 1
return retVal
def test_variable_attributes(nc, requirements):
""" Tests for required variable attributes
"""
retVal = 0
for variable_name in nc.variables:
# Skip QC variables
if variable_name[-2:] == "qc":
continue
# Ignore configured variables
if variable_name in requirements['ignore_variable_check']:
continue
variable = nc.variables[variable_name]
# Skip scalar and descriptive variables
if variable.size < 2:
continue
var_attrs = nc.variables[variable_name].ncattrs()
for req_var_attr in requirements['variable_attributes']:
if req_var_attr not in var_attrs:
print("Variable attribute %s "
"missing in %s variable" % (req_var_attr, variable_name))
retVal += 1
return retVal
def test_qc_variables(nc, requirements):
""" Tests that all variables have a corresponding qc variable
"""
retVal = 0
for variable_name in nc.variables:
# Skip QC variables
if variable_name[-2:] == "qc":
continue
# Ignore configured variables
if variable_name in requirements['ignore_variable_check']:
continue
variable = nc.variables[variable_name]
if variable.size < 2:
continue
qc_name = "%s_qc" % variable_name
if qc_name not in nc.variables:
print("QC variable missing for %s" % variable_name)
retVal += 1
return retVal
def test_platform_attributes(nc, requirements):
""" Tests for required platform attributes
"""
retVal = 0
platform_attrs = nc.variables['platform'].ncattrs()
for req_platform_attr in requirements['platform_attributes']:
if req_platform_attr not in platform_attrs:
print "Platform attribute %s missing" % req_platform_attr
retVal += 1
return retVal
def test_ctd_attributes(nc, requirements):
""" Tests for required ctd attributes
"""
retVal = 0
ctd_attrs = nc.variables['instrument_ctd'].ncattrs()
for req_ctd_attr in requirements['ctd_attributes']:
if req_ctd_attr not in ctd_attrs:
print "CTD attribute %s missing" % req_ctd_attr
retVal += 1
return retVal
test_functions = [
test_global_attributes,
test_dimensions,
test_required_variables,
test_variable_attributes,
test_qc_variables,
test_platform_attributes,
test_ctd_attributes
]
def main():
parser = argparse.ArgumentParser(
description='Verifies that a glider NetCDF file from a provider '
'contains all the required global attributes, dimensions,'
'scalar variables and dimensioned variables.'
)
default_standard_path = (
path.join(
path.dirname(__file__),
'..',
'etc',
'glider_DAC-2.0.json'
)
)
parser.add_argument(
'-s', '--path_to_standard',
default=default_standard_path
)
parser.add_argument(
'path_to_glider_netcdf',
help='Path to Glider NetCDF file.'
)
args = parser.parse_args()
# Load requirements spec
with open(args.path_to_standard, 'r') as f:
contents = f.read()
requirements = json.loads(contents)
# Load NetCDF file
nc = Dataset(
args.path_to_glider_netcdf, 'r',
format='NETCDF4_CLASSIC'
)
# Initialize return value
retVal = 0
for test_fun in test_functions:
retVal += test_fun(nc, requirements)
if retVal == 0:
print "PASS"
return retVal
if __name__ == '__main__':
sys.exit(main())
| USF-COT/glider_netcdf_writer | scripts/scripts-bin/check_glider_netcdf.py | Python | mit | 5,287 |
"""
Parser for to pofile translation format.
"""
from datetime import datetime
from django.utils import timezone
import polib
from pontoon.base.formats.base import ParsedResource
from pontoon.base.vcs_models import VCSTranslation
class POEntity(VCSTranslation):
def __init__(self, po_entry, order):
self.po_entry = po_entry
if po_entry.msgstr_plural:
strings = po_entry.msgstr_plural
else:
strings = {None: po_entry.msgstr}
# Remove empty strings from the string dict.
strings = {key: value for key, value in strings.items() if value}
super(POEntity, self).__init__(
key=po_entry.msgid, # Pofiles use the source as the key.
source_string=po_entry.msgid,
source_string_plural=po_entry.msgid_plural,
strings=strings,
comments=po_entry.comment.split('\n') if po_entry.comment else [],
fuzzy='fuzzy' in po_entry.flags,
order=order,
source=po_entry.occurrences
)
def update_entry(self, locale):
"""Update the POEntry associated with this translation."""
if self.po_entry.msgstr_plural:
self.po_entry.msgstr_plural = {
plural_form: self.strings.get(plural_form, '')
for plural_form in range(locale.nplurals or 1)
}
else:
self.po_entry.msgstr = self.strings.get(None, '')
if self.fuzzy and 'fuzzy' not in self.po_entry.flags:
self.po_entry.flags.append('fuzzy')
elif not self.fuzzy and 'fuzzy' in self.po_entry.flags:
self.po_entry.flags.remove('fuzzy')
def __repr__(self):
return '<POEntity {key}>'.format(key=self.key.encode('utf-8'))
class POResource(ParsedResource):
def __init__(self, pofile):
self.pofile = pofile
self.entities = [
POEntity(entry, k) for k, entry in enumerate(self.pofile)
if not entry.obsolete
]
@property
def translations(self):
return self.entities
def save(self, locale):
for entity in self.translations:
entity.update_entry(locale)
metadata = self.pofile.metadata
if len(self.translations) > 0:
latest_translation = max(
self.translations,
key=lambda t: t.last_updated or timezone.make_aware(datetime.min)
)
if latest_translation.last_updated:
metadata['PO-Revision-Date'] = latest_translation.last_updated.strftime(
'%Y-%m-%d %H:%M%z'
)
if latest_translation.last_translator:
metadata['Last-Translator'] = latest_translation.last_translator.display_name
metadata.update({
'Language': locale.code.replace('-', '_'),
'X-Generator': 'Pontoon',
'Plural-Forms': ('nplurals={locale.nplurals}; plural={locale.plural_rule};'
.format(locale=locale))
})
self.pofile.save()
def __repr__(self):
return '<POResource {self.pofile.fpath}>'.format(self=self)
def parse(path, source_path=None):
pofile = polib.pofile(path)
return POResource(pofile)
| vivekanand1101/pontoon | pontoon/base/formats/po.py | Python | bsd-3-clause | 3,269 |
## @file
# This file is used to create a database used by build tool
#
# Copyright (c) 2008 - 2011, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import sqlite3
import os
import os.path
import pickle
import uuid
import Common.EdkLogger as EdkLogger
import Common.GlobalData as GlobalData
from Common.String import *
from Common.DataType import *
from Common.Misc import *
from types import *
from CommonDataClass.CommonClass import SkuInfoClass
from MetaDataTable import *
from MetaFileTable import *
from MetaFileParser import *
from BuildClassObject import *
## Platform build information from DSC file
#
# This class is used to retrieve information stored in database and convert them
# into PlatformBuildClassObject form for easier use for AutoGen.
#
class DscBuildData(PlatformBuildClassObject):
# dict used to convert PCD type in database to string used by build tool
_PCD_TYPE_STRING_ = {
MODEL_PCD_FIXED_AT_BUILD : "FixedAtBuild",
MODEL_PCD_PATCHABLE_IN_MODULE : "PatchableInModule",
MODEL_PCD_FEATURE_FLAG : "FeatureFlag",
MODEL_PCD_DYNAMIC : "Dynamic",
MODEL_PCD_DYNAMIC_DEFAULT : "Dynamic",
MODEL_PCD_DYNAMIC_HII : "DynamicHii",
MODEL_PCD_DYNAMIC_VPD : "DynamicVpd",
MODEL_PCD_DYNAMIC_EX : "DynamicEx",
MODEL_PCD_DYNAMIC_EX_DEFAULT : "DynamicEx",
MODEL_PCD_DYNAMIC_EX_HII : "DynamicExHii",
MODEL_PCD_DYNAMIC_EX_VPD : "DynamicExVpd",
}
# dict used to convert part of [Defines] to members of DscBuildData directly
_PROPERTY_ = {
#
# Required Fields
#
TAB_DSC_DEFINES_PLATFORM_NAME : "_PlatformName",
TAB_DSC_DEFINES_PLATFORM_GUID : "_Guid",
TAB_DSC_DEFINES_PLATFORM_VERSION : "_Version",
TAB_DSC_DEFINES_DSC_SPECIFICATION : "_DscSpecification",
#TAB_DSC_DEFINES_OUTPUT_DIRECTORY : "_OutputDirectory",
#TAB_DSC_DEFINES_SUPPORTED_ARCHITECTURES : "_SupArchList",
#TAB_DSC_DEFINES_BUILD_TARGETS : "_BuildTargets",
#TAB_DSC_DEFINES_SKUID_IDENTIFIER : "_SkuName",
#TAB_DSC_DEFINES_FLASH_DEFINITION : "_FlashDefinition",
TAB_DSC_DEFINES_BUILD_NUMBER : "_BuildNumber",
TAB_DSC_DEFINES_MAKEFILE_NAME : "_MakefileName",
TAB_DSC_DEFINES_BS_BASE_ADDRESS : "_BsBaseAddress",
TAB_DSC_DEFINES_RT_BASE_ADDRESS : "_RtBaseAddress",
#TAB_DSC_DEFINES_RFC_LANGUAGES : "_RFCLanguages",
#TAB_DSC_DEFINES_ISO_LANGUAGES : "_ISOLanguages",
}
# used to compose dummy library class name for those forced library instances
_NullLibraryNumber = 0
## Constructor of DscBuildData
#
# Initialize object of DscBuildData
#
# @param FilePath The path of platform description file
# @param RawData The raw data of DSC file
# @param BuildDataBase Database used to retrieve module/package information
# @param Arch The target architecture
# @param Platform (not used for DscBuildData)
# @param Macros Macros used for replacement in DSC file
#
def __init__(self, FilePath, RawData, BuildDataBase, Arch='COMMON', Target=None, Toolchain=None):
self.MetaFile = FilePath
self._RawData = RawData
self._Bdb = BuildDataBase
self._Arch = Arch
self._Target = Target
self._Toolchain = Toolchain
self._Clear()
## XXX[key] = value
def __setitem__(self, key, value):
self.__dict__[self._PROPERTY_[key]] = value
## value = XXX[key]
def __getitem__(self, key):
return self.__dict__[self._PROPERTY_[key]]
## "in" test support
def __contains__(self, key):
return key in self._PROPERTY_
## Set all internal used members of DscBuildData to None
def _Clear(self):
self._Header = None
self._PlatformName = None
self._Guid = None
self._Version = None
self._DscSpecification = None
self._OutputDirectory = None
self._SupArchList = None
self._BuildTargets = None
self._SkuName = None
self._FlashDefinition = None
self._BuildNumber = None
self._MakefileName = None
self._BsBaseAddress = None
self._RtBaseAddress = None
self._SkuIds = None
self._Modules = None
self._LibraryInstances = None
self._LibraryClasses = None
self._Pcds = None
self._BuildOptions = None
self._LoadFixAddress = None
self._RFCLanguages = None
self._ISOLanguages = None
self._VpdToolGuid = None
self.__Macros = None
## Get current effective macros
def _GetMacros(self):
if self.__Macros == None:
self.__Macros = {}
self.__Macros.update(GlobalData.gPlatformDefines)
self.__Macros.update(GlobalData.gGlobalDefines)
self.__Macros.update(GlobalData.gCommandLineDefines)
return self.__Macros
## Get architecture
def _GetArch(self):
return self._Arch
## Set architecture
#
# Changing the default ARCH to another may affect all other information
# because all information in a platform may be ARCH-related. That's
# why we need to clear all internal used members, in order to cause all
# information to be re-retrieved.
#
# @param Value The value of ARCH
#
def _SetArch(self, Value):
if self._Arch == Value:
return
self._Arch = Value
self._Clear()
## Retrieve all information in [Defines] section
#
# (Retriving all [Defines] information in one-shot is just to save time.)
#
def _GetHeaderInfo(self):
RecordList = self._RawData[MODEL_META_DATA_HEADER, self._Arch]
for Record in RecordList:
Name = Record[1]
# items defined _PROPERTY_ don't need additional processing
if Name in self:
self[Name] = Record[2]
# some special items in [Defines] section need special treatment
elif Name == TAB_DSC_DEFINES_OUTPUT_DIRECTORY:
self._OutputDirectory = NormPath(Record[2], self._Macros)
if ' ' in self._OutputDirectory:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in OUTPUT_DIRECTORY",
File=self.MetaFile, Line=Record[-1],
ExtraData=self._OutputDirectory)
elif Name == TAB_DSC_DEFINES_FLASH_DEFINITION:
self._FlashDefinition = PathClass(NormPath(Record[2], self._Macros), GlobalData.gWorkspace)
ErrorCode, ErrorInfo = self._FlashDefinition.Validate('.fdf')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, File=self.MetaFile, Line=Record[-1],
ExtraData=ErrorInfo)
elif Name == TAB_DSC_DEFINES_SUPPORTED_ARCHITECTURES:
self._SupArchList = GetSplitValueList(Record[2], TAB_VALUE_SPLIT)
elif Name == TAB_DSC_DEFINES_BUILD_TARGETS:
self._BuildTargets = GetSplitValueList(Record[2])
elif Name == TAB_DSC_DEFINES_SKUID_IDENTIFIER:
if self._SkuName == None:
self._SkuName = Record[2]
elif Name == TAB_FIX_LOAD_TOP_MEMORY_ADDRESS:
try:
self._LoadFixAddress = int (Record[2], 0)
except:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS %s is not valid dec or hex string" % (Record[2]))
elif Name == TAB_DSC_DEFINES_RFC_LANGUAGES:
if not Record[2] or Record[2][0] != '"' or Record[2][-1] != '"' or len(Record[2]) == 1:
EdkLogger.error('build', FORMAT_NOT_SUPPORTED, 'language code for RFC_LANGUAGES must have double quotes around it, for example: RFC_LANGUAGES = "en-us;zh-hans"',
File=self.MetaFile, Line=Record[-1])
LanguageCodes = Record[2][1:-1]
if not LanguageCodes:
EdkLogger.error('build', FORMAT_NOT_SUPPORTED, 'one or more RFC4646 format language code must be provided for RFC_LANGUAGES statement',
File=self.MetaFile, Line=Record[-1])
LanguageList = GetSplitValueList(LanguageCodes, TAB_SEMI_COLON_SPLIT)
# check whether there is empty entries in the list
if None in LanguageList:
EdkLogger.error('build', FORMAT_NOT_SUPPORTED, 'one or more empty language code is in RFC_LANGUAGES statement',
File=self.MetaFile, Line=Record[-1])
self._RFCLanguages = LanguageList
elif Name == TAB_DSC_DEFINES_ISO_LANGUAGES:
if not Record[2] or Record[2][0] != '"' or Record[2][-1] != '"' or len(Record[2]) == 1:
EdkLogger.error('build', FORMAT_NOT_SUPPORTED, 'language code for ISO_LANGUAGES must have double quotes around it, for example: ISO_LANGUAGES = "engchn"',
File=self.MetaFile, Line=Record[-1])
LanguageCodes = Record[2][1:-1]
if not LanguageCodes:
EdkLogger.error('build', FORMAT_NOT_SUPPORTED, 'one or more ISO639-2 format language code must be provided for ISO_LANGUAGES statement',
File=self.MetaFile, Line=Record[-1])
if len(LanguageCodes)%3:
EdkLogger.error('build', FORMAT_NOT_SUPPORTED, 'bad ISO639-2 format for ISO_LANGUAGES',
File=self.MetaFile, Line=Record[-1])
LanguageList = []
for i in range(0, len(LanguageCodes), 3):
LanguageList.append(LanguageCodes[i:i+3])
self._ISOLanguages = LanguageList
elif Name == TAB_DSC_DEFINES_VPD_TOOL_GUID:
#
# try to convert GUID to a real UUID value to see whether the GUID is format
# for VPD_TOOL_GUID is correct.
#
try:
uuid.UUID(Record[2])
except:
EdkLogger.error("build", FORMAT_INVALID, "Invalid GUID format for VPD_TOOL_GUID", File=self.MetaFile)
self._VpdToolGuid = Record[2]
# set _Header to non-None in order to avoid database re-querying
self._Header = 'DUMMY'
## Retrieve platform name
def _GetPlatformName(self):
if self._PlatformName == None:
if self._Header == None:
self._GetHeaderInfo()
if self._PlatformName == None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No PLATFORM_NAME", File=self.MetaFile)
return self._PlatformName
## Retrieve file guid
def _GetFileGuid(self):
if self._Guid == None:
if self._Header == None:
self._GetHeaderInfo()
if self._Guid == None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No PLATFORM_GUID", File=self.MetaFile)
return self._Guid
## Retrieve platform version
def _GetVersion(self):
if self._Version == None:
if self._Header == None:
self._GetHeaderInfo()
if self._Version == None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No PLATFORM_VERSION", File=self.MetaFile)
return self._Version
## Retrieve platform description file version
def _GetDscSpec(self):
if self._DscSpecification == None:
if self._Header == None:
self._GetHeaderInfo()
if self._DscSpecification == None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No DSC_SPECIFICATION", File=self.MetaFile)
return self._DscSpecification
## Retrieve OUTPUT_DIRECTORY
def _GetOutpuDir(self):
if self._OutputDirectory == None:
if self._Header == None:
self._GetHeaderInfo()
if self._OutputDirectory == None:
self._OutputDirectory = os.path.join("Build", self._PlatformName)
return self._OutputDirectory
## Retrieve SUPPORTED_ARCHITECTURES
def _GetSupArch(self):
if self._SupArchList == None:
if self._Header == None:
self._GetHeaderInfo()
if self._SupArchList == None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No SUPPORTED_ARCHITECTURES", File=self.MetaFile)
return self._SupArchList
## Retrieve BUILD_TARGETS
def _GetBuildTarget(self):
if self._BuildTargets == None:
if self._Header == None:
self._GetHeaderInfo()
if self._BuildTargets == None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No BUILD_TARGETS", File=self.MetaFile)
return self._BuildTargets
## Retrieve SKUID_IDENTIFIER
def _GetSkuName(self):
if self._SkuName == None:
if self._Header == None:
self._GetHeaderInfo()
if self._SkuName == None or self._SkuName not in self.SkuIds:
self._SkuName = 'DEFAULT'
return self._SkuName
## Override SKUID_IDENTIFIER
def _SetSkuName(self, Value):
if Value in self.SkuIds:
self._SkuName = Value
# Needs to re-retrieve the PCD information
self._Pcds = None
def _GetFdfFile(self):
if self._FlashDefinition == None:
if self._Header == None:
self._GetHeaderInfo()
if self._FlashDefinition == None:
self._FlashDefinition = ''
return self._FlashDefinition
## Retrieve FLASH_DEFINITION
def _GetBuildNumber(self):
if self._BuildNumber == None:
if self._Header == None:
self._GetHeaderInfo()
if self._BuildNumber == None:
self._BuildNumber = ''
return self._BuildNumber
## Retrieve MAKEFILE_NAME
def _GetMakefileName(self):
if self._MakefileName == None:
if self._Header == None:
self._GetHeaderInfo()
if self._MakefileName == None:
self._MakefileName = ''
return self._MakefileName
## Retrieve BsBaseAddress
def _GetBsBaseAddress(self):
if self._BsBaseAddress == None:
if self._Header == None:
self._GetHeaderInfo()
if self._BsBaseAddress == None:
self._BsBaseAddress = ''
return self._BsBaseAddress
## Retrieve RtBaseAddress
def _GetRtBaseAddress(self):
if self._RtBaseAddress == None:
if self._Header == None:
self._GetHeaderInfo()
if self._RtBaseAddress == None:
self._RtBaseAddress = ''
return self._RtBaseAddress
## Retrieve the top address for the load fix address
def _GetLoadFixAddress(self):
if self._LoadFixAddress == None:
if self._Header == None:
self._GetHeaderInfo()
if self._LoadFixAddress == None:
self._LoadFixAddress = self._Macros.get(TAB_FIX_LOAD_TOP_MEMORY_ADDRESS, '0')
try:
self._LoadFixAddress = int (self._LoadFixAddress, 0)
except:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS %s is not valid dec or hex string" % (self._LoadFixAddress))
#
# If command line defined, should override the value in DSC file.
#
if 'FIX_LOAD_TOP_MEMORY_ADDRESS' in GlobalData.gCommandLineDefines.keys():
try:
self._LoadFixAddress = int(GlobalData.gCommandLineDefines['FIX_LOAD_TOP_MEMORY_ADDRESS'], 0)
except:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS %s is not valid dec or hex string" % (GlobalData.gCommandLineDefines['FIX_LOAD_TOP_MEMORY_ADDRESS']))
if self._LoadFixAddress < 0:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS is set to the invalid negative value 0x%x" % (self._LoadFixAddress))
if self._LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self._LoadFixAddress % 0x1000 != 0:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS is set to the invalid unaligned 4K value 0x%x" % (self._LoadFixAddress))
return self._LoadFixAddress
## Retrieve RFCLanguage filter
def _GetRFCLanguages(self):
if self._RFCLanguages == None:
if self._Header == None:
self._GetHeaderInfo()
if self._RFCLanguages == None:
self._RFCLanguages = []
return self._RFCLanguages
## Retrieve ISOLanguage filter
def _GetISOLanguages(self):
if self._ISOLanguages == None:
if self._Header == None:
self._GetHeaderInfo()
if self._ISOLanguages == None:
self._ISOLanguages = []
return self._ISOLanguages
## Retrieve the GUID string for VPD tool
def _GetVpdToolGuid(self):
if self._VpdToolGuid == None:
if self._Header == None:
self._GetHeaderInfo()
if self._VpdToolGuid == None:
self._VpdToolGuid = ''
return self._VpdToolGuid
## Retrieve [SkuIds] section information
def _GetSkuIds(self):
if self._SkuIds == None:
self._SkuIds = sdict()
RecordList = self._RawData[MODEL_EFI_SKU_ID, self._Arch]
for Record in RecordList:
if Record[0] in [None, '']:
EdkLogger.error('build', FORMAT_INVALID, 'No Sku ID number',
File=self.MetaFile, Line=Record[-1])
if Record[1] in [None, '']:
EdkLogger.error('build', FORMAT_INVALID, 'No Sku ID name',
File=self.MetaFile, Line=Record[-1])
self._SkuIds[Record[1]] = Record[0]
if 'DEFAULT' not in self._SkuIds:
self._SkuIds['DEFAULT'] = '0'
return self._SkuIds
## Retrieve [Components] section information
def _GetModules(self):
if self._Modules != None:
return self._Modules
self._Modules = sdict()
RecordList = self._RawData[MODEL_META_DATA_COMPONENT, self._Arch]
Macros = self._Macros
Macros["EDK_SOURCE"] = GlobalData.gEcpSource
for Record in RecordList:
ModuleFile = PathClass(NormPath(Record[0], Macros), GlobalData.gWorkspace, Arch=self._Arch)
ModuleId = Record[5]
LineNo = Record[6]
# check the file validation
ErrorCode, ErrorInfo = ModuleFile.Validate('.inf')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, File=self.MetaFile, Line=LineNo,
ExtraData=ErrorInfo)
# Check duplication
# If arch is COMMON, no duplicate module is checked since all modules in all component sections are selected
if self._Arch != 'COMMON' and ModuleFile in self._Modules:
EdkLogger.error('build', FILE_DUPLICATED, File=self.MetaFile, ExtraData=str(ModuleFile), Line=LineNo)
Module = ModuleBuildClassObject()
Module.MetaFile = ModuleFile
# get module override path
RecordList = self._RawData[MODEL_META_DATA_COMPONENT_SOURCE_OVERRIDE_PATH, self._Arch, None, ModuleId]
if RecordList != []:
Module.SourceOverridePath = os.path.join(GlobalData.gWorkspace, NormPath(RecordList[0][0], Macros))
# Check if the source override path exists
if not os.path.isdir(Module.SourceOverridePath):
EdkLogger.error('build', FILE_NOT_FOUND, Message = 'Source override path does not exist:', File=self.MetaFile, ExtraData=Module.SourceOverridePath, Line=LineNo)
#Add to GlobalData Variables
GlobalData.gOverrideDir[ModuleFile.Key] = Module.SourceOverridePath
# get module private library instance
RecordList = self._RawData[MODEL_EFI_LIBRARY_CLASS, self._Arch, None, ModuleId]
for Record in RecordList:
LibraryClass = Record[0]
LibraryPath = PathClass(NormPath(Record[1], Macros), GlobalData.gWorkspace, Arch=self._Arch)
LineNo = Record[-1]
# check the file validation
ErrorCode, ErrorInfo = LibraryPath.Validate('.inf')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, File=self.MetaFile, Line=LineNo,
ExtraData=ErrorInfo)
if LibraryClass == '' or LibraryClass == 'NULL':
self._NullLibraryNumber += 1
LibraryClass = 'NULL%d' % self._NullLibraryNumber
EdkLogger.verbose("Found forced library for %s\n\t%s [%s]" % (ModuleFile, LibraryPath, LibraryClass))
Module.LibraryClasses[LibraryClass] = LibraryPath
if LibraryPath not in self.LibraryInstances:
self.LibraryInstances.append(LibraryPath)
# get module private PCD setting
for Type in [MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_PATCHABLE_IN_MODULE, \
MODEL_PCD_FEATURE_FLAG, MODEL_PCD_DYNAMIC, MODEL_PCD_DYNAMIC_EX]:
RecordList = self._RawData[Type, self._Arch, None, ModuleId]
for TokenSpaceGuid, PcdCName, Setting, Dummy1, Dummy2, Dummy3, Dummy4 in RecordList:
TokenList = GetSplitValueList(Setting)
DefaultValue = TokenList[0]
if len(TokenList) > 1:
MaxDatumSize = TokenList[1]
else:
MaxDatumSize = ''
TypeString = self._PCD_TYPE_STRING_[Type]
Pcd = PcdClassObject(
PcdCName,
TokenSpaceGuid,
TypeString,
'',
DefaultValue,
'',
MaxDatumSize,
{},
False,
None
)
Module.Pcds[PcdCName, TokenSpaceGuid] = Pcd
# get module private build options
RecordList = self._RawData[MODEL_META_DATA_BUILD_OPTION, self._Arch, None, ModuleId]
for ToolChainFamily, ToolChain, Option, Dummy1, Dummy2, Dummy3, Dummy4 in RecordList:
if (ToolChainFamily, ToolChain) not in Module.BuildOptions:
Module.BuildOptions[ToolChainFamily, ToolChain] = Option
else:
OptionString = Module.BuildOptions[ToolChainFamily, ToolChain]
Module.BuildOptions[ToolChainFamily, ToolChain] = OptionString + " " + Option
self._Modules[ModuleFile] = Module
return self._Modules
## Retrieve all possible library instances used in this platform
def _GetLibraryInstances(self):
if self._LibraryInstances == None:
self._GetLibraryClasses()
return self._LibraryInstances
## Retrieve [LibraryClasses] information
def _GetLibraryClasses(self):
if self._LibraryClasses == None:
self._LibraryInstances = []
#
# tdict is a special dict kind of type, used for selecting correct
# library instance for given library class and module type
#
LibraryClassDict = tdict(True, 3)
# track all library class names
LibraryClassSet = set()
RecordList = self._RawData[MODEL_EFI_LIBRARY_CLASS, self._Arch, None, -1]
Macros = self._Macros
for Record in RecordList:
LibraryClass, LibraryInstance, Dummy, Arch, ModuleType, Dummy, LineNo = Record
if LibraryClass == '' or LibraryClass == 'NULL':
self._NullLibraryNumber += 1
LibraryClass = 'NULL%d' % self._NullLibraryNumber
EdkLogger.verbose("Found forced library for arch=%s\n\t%s [%s]" % (Arch, LibraryInstance, LibraryClass))
LibraryClassSet.add(LibraryClass)
LibraryInstance = PathClass(NormPath(LibraryInstance, Macros), GlobalData.gWorkspace, Arch=self._Arch)
# check the file validation
ErrorCode, ErrorInfo = LibraryInstance.Validate('.inf')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, File=self.MetaFile, Line=LineNo,
ExtraData=ErrorInfo)
if ModuleType != 'COMMON' and ModuleType not in SUP_MODULE_LIST:
EdkLogger.error('build', OPTION_UNKNOWN, "Unknown module type [%s]" % ModuleType,
File=self.MetaFile, ExtraData=LibraryInstance, Line=LineNo)
LibraryClassDict[Arch, ModuleType, LibraryClass] = LibraryInstance
if LibraryInstance not in self._LibraryInstances:
self._LibraryInstances.append(LibraryInstance)
# resolve the specific library instance for each class and each module type
self._LibraryClasses = tdict(True)
for LibraryClass in LibraryClassSet:
# try all possible module types
for ModuleType in SUP_MODULE_LIST:
LibraryInstance = LibraryClassDict[self._Arch, ModuleType, LibraryClass]
if LibraryInstance == None:
continue
self._LibraryClasses[LibraryClass, ModuleType] = LibraryInstance
# for Edk style library instances, which are listed in different section
Macros["EDK_SOURCE"] = GlobalData.gEcpSource
RecordList = self._RawData[MODEL_EFI_LIBRARY_INSTANCE, self._Arch]
for Record in RecordList:
File = PathClass(NormPath(Record[0], Macros), GlobalData.gWorkspace, Arch=self._Arch)
LineNo = Record[-1]
# check the file validation
ErrorCode, ErrorInfo = File.Validate('.inf')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, File=self.MetaFile, Line=LineNo,
ExtraData=ErrorInfo)
if File not in self._LibraryInstances:
self._LibraryInstances.append(File)
#
# we need the module name as the library class name, so we have
# to parse it here. (self._Bdb[] will trigger a file parse if it
# hasn't been parsed)
#
Library = self._Bdb[File, self._Arch, self._Target, self._Toolchain]
self._LibraryClasses[Library.BaseName, ':dummy:'] = Library
return self._LibraryClasses
## Retrieve all PCD settings in platform
def _GetPcds(self):
if self._Pcds == None:
self._Pcds = sdict()
self._Pcds.update(self._GetPcd(MODEL_PCD_FIXED_AT_BUILD))
self._Pcds.update(self._GetPcd(MODEL_PCD_PATCHABLE_IN_MODULE))
self._Pcds.update(self._GetPcd(MODEL_PCD_FEATURE_FLAG))
self._Pcds.update(self._GetDynamicPcd(MODEL_PCD_DYNAMIC_DEFAULT))
self._Pcds.update(self._GetDynamicHiiPcd(MODEL_PCD_DYNAMIC_HII))
self._Pcds.update(self._GetDynamicVpdPcd(MODEL_PCD_DYNAMIC_VPD))
self._Pcds.update(self._GetDynamicPcd(MODEL_PCD_DYNAMIC_EX_DEFAULT))
self._Pcds.update(self._GetDynamicHiiPcd(MODEL_PCD_DYNAMIC_EX_HII))
self._Pcds.update(self._GetDynamicVpdPcd(MODEL_PCD_DYNAMIC_EX_VPD))
return self._Pcds
## Retrieve [BuildOptions]
def _GetBuildOptions(self):
if self._BuildOptions == None:
self._BuildOptions = sdict()
#
# Retrieve build option for EDKII style module
#
RecordList = self._RawData[MODEL_META_DATA_BUILD_OPTION, self._Arch, EDKII_NAME]
for ToolChainFamily, ToolChain, Option, Dummy1, Dummy2, Dummy3, Dummy4 in RecordList:
self._BuildOptions[ToolChainFamily, ToolChain, EDKII_NAME] = Option
#
# Retrieve build option for EDK style module
#
RecordList = self._RawData[MODEL_META_DATA_BUILD_OPTION, self._Arch, EDK_NAME]
for ToolChainFamily, ToolChain, Option, Dummy1, Dummy2, Dummy3, Dummy4 in RecordList:
self._BuildOptions[ToolChainFamily, ToolChain, EDK_NAME] = Option
return self._BuildOptions
## Retrieve non-dynamic PCD settings
#
# @param Type PCD type
#
# @retval a dict object contains settings of given PCD type
#
def _GetPcd(self, Type):
Pcds = sdict()
#
# tdict is a special dict kind of type, used for selecting correct
# PCD settings for certain ARCH
#
PcdDict = tdict(True, 3)
PcdSet = set()
# Find out all possible PCD candidates for self._Arch
RecordList = self._RawData[Type, self._Arch]
for TokenSpaceGuid, PcdCName, Setting, Arch, SkuName, Dummy3, Dummy4 in RecordList:
PcdSet.add((PcdCName, TokenSpaceGuid))
PcdDict[Arch, PcdCName, TokenSpaceGuid] = Setting
# Remove redundant PCD candidates
for PcdCName, TokenSpaceGuid in PcdSet:
Setting = PcdDict[self._Arch, PcdCName, TokenSpaceGuid]
if Setting == None:
continue
PcdValue, DatumType, MaxDatumSize = AnalyzePcdData(Setting)
Pcds[PcdCName, TokenSpaceGuid] = PcdClassObject(
PcdCName,
TokenSpaceGuid,
self._PCD_TYPE_STRING_[Type],
DatumType,
PcdValue,
'',
MaxDatumSize,
{},
False,
None
)
return Pcds
## Retrieve dynamic PCD settings
#
# @param Type PCD type
#
# @retval a dict object contains settings of given PCD type
#
def _GetDynamicPcd(self, Type):
Pcds = sdict()
#
# tdict is a special dict kind of type, used for selecting correct
# PCD settings for certain ARCH and SKU
#
PcdDict = tdict(True, 4)
PcdList = []
# Find out all possible PCD candidates for self._Arch
RecordList = self._RawData[Type, self._Arch]
for TokenSpaceGuid, PcdCName, Setting, Arch, SkuName, Dummy3, Dummy4 in RecordList:
PcdList.append((PcdCName, TokenSpaceGuid))
PcdDict[Arch, SkuName, PcdCName, TokenSpaceGuid] = Setting
# Remove redundant PCD candidates, per the ARCH and SKU
for PcdCName, TokenSpaceGuid in PcdList:
Setting = PcdDict[self._Arch, self.SkuName, PcdCName, TokenSpaceGuid]
if Setting == None:
continue
PcdValue, DatumType, MaxDatumSize = AnalyzePcdData(Setting)
SkuInfo = SkuInfoClass(self.SkuName, self.SkuIds[self.SkuName], '', '', '', '', '', PcdValue)
Pcds[PcdCName, TokenSpaceGuid] = PcdClassObject(
PcdCName,
TokenSpaceGuid,
self._PCD_TYPE_STRING_[Type],
DatumType,
PcdValue,
'',
MaxDatumSize,
{self.SkuName : SkuInfo},
False,
None
)
return Pcds
## Retrieve dynamic HII PCD settings
#
# @param Type PCD type
#
# @retval a dict object contains settings of given PCD type
#
def _GetDynamicHiiPcd(self, Type):
Pcds = sdict()
#
# tdict is a special dict kind of type, used for selecting correct
# PCD settings for certain ARCH and SKU
#
PcdDict = tdict(True, 4)
PcdSet = set()
RecordList = self._RawData[Type, self._Arch]
# Find out all possible PCD candidates for self._Arch
for TokenSpaceGuid, PcdCName, Setting, Arch, SkuName, Dummy3, Dummy4 in RecordList:
PcdSet.add((PcdCName, TokenSpaceGuid))
PcdDict[Arch, SkuName, PcdCName, TokenSpaceGuid] = Setting
# Remove redundant PCD candidates, per the ARCH and SKU
for PcdCName, TokenSpaceGuid in PcdSet:
Setting = PcdDict[self._Arch, self.SkuName, PcdCName, TokenSpaceGuid]
if Setting == None:
continue
VariableName, VariableGuid, VariableOffset, DefaultValue = AnalyzeHiiPcdData(Setting)
SkuInfo = SkuInfoClass(self.SkuName, self.SkuIds[self.SkuName], VariableName, VariableGuid, VariableOffset, DefaultValue)
Pcds[PcdCName, TokenSpaceGuid] = PcdClassObject(
PcdCName,
TokenSpaceGuid,
self._PCD_TYPE_STRING_[Type],
'',
DefaultValue,
'',
'',
{self.SkuName : SkuInfo},
False,
None
)
return Pcds
## Retrieve dynamic VPD PCD settings
#
# @param Type PCD type
#
# @retval a dict object contains settings of given PCD type
#
def _GetDynamicVpdPcd(self, Type):
Pcds = sdict()
#
# tdict is a special dict kind of type, used for selecting correct
# PCD settings for certain ARCH and SKU
#
PcdDict = tdict(True, 4)
PcdList = []
# Find out all possible PCD candidates for self._Arch
RecordList = self._RawData[Type, self._Arch]
for TokenSpaceGuid, PcdCName, Setting, Arch, SkuName, Dummy3, Dummy4 in RecordList:
PcdList.append((PcdCName, TokenSpaceGuid))
PcdDict[Arch, SkuName, PcdCName, TokenSpaceGuid] = Setting
# Remove redundant PCD candidates, per the ARCH and SKU
for PcdCName, TokenSpaceGuid in PcdList:
Setting = PcdDict[self._Arch, self.SkuName, PcdCName, TokenSpaceGuid]
if Setting == None:
continue
#
# For the VOID* type, it can have optional data of MaxDatumSize and InitialValue
# For the Integer & Boolean type, the optional data can only be InitialValue.
# At this point, we put all the data into the PcdClssObject for we don't know the PCD's datumtype
# until the DEC parser has been called.
#
VpdOffset, MaxDatumSize, InitialValue = AnalyzeVpdPcdData(Setting)
SkuInfo = SkuInfoClass(self.SkuName, self.SkuIds[self.SkuName], '', '', '', '', VpdOffset, InitialValue)
Pcds[PcdCName, TokenSpaceGuid] = PcdClassObject(
PcdCName,
TokenSpaceGuid,
self._PCD_TYPE_STRING_[Type],
'',
'',
'',
MaxDatumSize,
{self.SkuName : SkuInfo},
False,
None
)
return Pcds
## Add external modules
#
# The external modules are mostly those listed in FDF file, which don't
# need "build".
#
# @param FilePath The path of module description file
#
def AddModule(self, FilePath):
FilePath = NormPath(FilePath)
if FilePath not in self.Modules:
Module = ModuleBuildClassObject()
Module.MetaFile = FilePath
self.Modules.append(Module)
## Add external PCDs
#
# The external PCDs are mostly those listed in FDF file to specify address
# or offset information.
#
# @param Name Name of the PCD
# @param Guid Token space guid of the PCD
# @param Value Value of the PCD
#
def AddPcd(self, Name, Guid, Value):
if (Name, Guid) not in self.Pcds:
self.Pcds[Name, Guid] = PcdClassObject(Name, Guid, '', '', '', '', '', {}, False, None)
self.Pcds[Name, Guid].DefaultValue = Value
def IsPlatformPcdDeclared(self, DecPcds):
for PcdType in (MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_PATCHABLE_IN_MODULE, MODEL_PCD_FEATURE_FLAG,
MODEL_PCD_DYNAMIC_DEFAULT, MODEL_PCD_DYNAMIC_HII, MODEL_PCD_DYNAMIC_VPD,
MODEL_PCD_DYNAMIC_EX_DEFAULT, MODEL_PCD_DYNAMIC_EX_HII, MODEL_PCD_DYNAMIC_EX_VPD):
RecordList = self._RawData[PcdType, self._Arch]
for TokenSpaceGuid, PcdCName, Setting, Arch, SkuName, Dummy3, Dummy4 in RecordList:
if (PcdCName, TokenSpaceGuid) not in DecPcds:
EdkLogger.error('build', PARSER_ERROR,
"Pcd (%s.%s) defined in DSC is not declared in DEC files." % (TokenSpaceGuid, PcdCName),
File=self.MetaFile, Line=Dummy4)
PcdValue = ''
if PcdType in (MODEL_PCD_DYNAMIC_VPD, MODEL_PCD_DYNAMIC_EX_VPD):
if DecPcds[PcdCName, TokenSpaceGuid].DatumType == "VOID*":
PcdValue = AnalyzeVpdPcdData(Setting)[2]
else:
PcdValue = AnalyzeVpdPcdData(Setting)[1]
elif PcdType in (MODEL_PCD_DYNAMIC_HII, MODEL_PCD_DYNAMIC_EX_HII):
PcdValue = AnalyzeHiiPcdData(Setting)[3]
else:
PcdValue = AnalyzePcdData(Setting)[0]
if PcdValue:
Valid, ErrStr = CheckPcdDatum(DecPcds[PcdCName, TokenSpaceGuid].DatumType, PcdValue)
if not Valid:
EdkLogger.error('build', FORMAT_INVALID, ErrStr, File=self.MetaFile, Line=Dummy4,
ExtraData="%s.%s" % (TokenSpaceGuid, PcdCName))
_Macros = property(_GetMacros)
Arch = property(_GetArch, _SetArch)
Platform = property(_GetPlatformName)
PlatformName = property(_GetPlatformName)
Guid = property(_GetFileGuid)
Version = property(_GetVersion)
DscSpecification = property(_GetDscSpec)
OutputDirectory = property(_GetOutpuDir)
SupArchList = property(_GetSupArch)
BuildTargets = property(_GetBuildTarget)
SkuName = property(_GetSkuName, _SetSkuName)
FlashDefinition = property(_GetFdfFile)
BuildNumber = property(_GetBuildNumber)
MakefileName = property(_GetMakefileName)
BsBaseAddress = property(_GetBsBaseAddress)
RtBaseAddress = property(_GetRtBaseAddress)
LoadFixAddress = property(_GetLoadFixAddress)
RFCLanguages = property(_GetRFCLanguages)
ISOLanguages = property(_GetISOLanguages)
VpdToolGuid = property(_GetVpdToolGuid)
SkuIds = property(_GetSkuIds)
Modules = property(_GetModules)
LibraryInstances = property(_GetLibraryInstances)
LibraryClasses = property(_GetLibraryClasses)
Pcds = property(_GetPcds)
BuildOptions = property(_GetBuildOptions)
## Platform build information from DEC file
#
# This class is used to retrieve information stored in database and convert them
# into PackageBuildClassObject form for easier use for AutoGen.
#
class DecBuildData(PackageBuildClassObject):
# dict used to convert PCD type in database to string used by build tool
_PCD_TYPE_STRING_ = {
MODEL_PCD_FIXED_AT_BUILD : "FixedAtBuild",
MODEL_PCD_PATCHABLE_IN_MODULE : "PatchableInModule",
MODEL_PCD_FEATURE_FLAG : "FeatureFlag",
MODEL_PCD_DYNAMIC : "Dynamic",
MODEL_PCD_DYNAMIC_DEFAULT : "Dynamic",
MODEL_PCD_DYNAMIC_HII : "DynamicHii",
MODEL_PCD_DYNAMIC_VPD : "DynamicVpd",
MODEL_PCD_DYNAMIC_EX : "DynamicEx",
MODEL_PCD_DYNAMIC_EX_DEFAULT : "DynamicEx",
MODEL_PCD_DYNAMIC_EX_HII : "DynamicExHii",
MODEL_PCD_DYNAMIC_EX_VPD : "DynamicExVpd",
}
# dict used to convert part of [Defines] to members of DecBuildData directly
_PROPERTY_ = {
#
# Required Fields
#
TAB_DEC_DEFINES_PACKAGE_NAME : "_PackageName",
TAB_DEC_DEFINES_PACKAGE_GUID : "_Guid",
TAB_DEC_DEFINES_PACKAGE_VERSION : "_Version",
TAB_DEC_DEFINES_PKG_UNI_FILE : "_PkgUniFile",
}
## Constructor of DecBuildData
#
# Initialize object of DecBuildData
#
# @param FilePath The path of package description file
# @param RawData The raw data of DEC file
# @param BuildDataBase Database used to retrieve module information
# @param Arch The target architecture
# @param Platform (not used for DecBuildData)
# @param Macros Macros used for replacement in DSC file
#
def __init__(self, File, RawData, BuildDataBase, Arch='COMMON', Target=None, Toolchain=None):
self.MetaFile = File
self._PackageDir = File.Dir
self._RawData = RawData
self._Bdb = BuildDataBase
self._Arch = Arch
self._Target = Target
self._Toolchain = Toolchain
self._Clear()
## XXX[key] = value
def __setitem__(self, key, value):
self.__dict__[self._PROPERTY_[key]] = value
## value = XXX[key]
def __getitem__(self, key):
return self.__dict__[self._PROPERTY_[key]]
## "in" test support
def __contains__(self, key):
return key in self._PROPERTY_
## Set all internal used members of DecBuildData to None
def _Clear(self):
self._Header = None
self._PackageName = None
self._Guid = None
self._Version = None
self._PkgUniFile = None
self._Protocols = None
self._Ppis = None
self._Guids = None
self._Includes = None
self._LibraryClasses = None
self._Pcds = None
self.__Macros = None
## Get current effective macros
def _GetMacros(self):
if self.__Macros == None:
self.__Macros = {}
self.__Macros.update(GlobalData.gGlobalDefines)
return self.__Macros
## Get architecture
def _GetArch(self):
return self._Arch
## Set architecture
#
# Changing the default ARCH to another may affect all other information
# because all information in a platform may be ARCH-related. That's
# why we need to clear all internal used members, in order to cause all
# information to be re-retrieved.
#
# @param Value The value of ARCH
#
def _SetArch(self, Value):
if self._Arch == Value:
return
self._Arch = Value
self._Clear()
## Retrieve all information in [Defines] section
#
# (Retriving all [Defines] information in one-shot is just to save time.)
#
def _GetHeaderInfo(self):
RecordList = self._RawData[MODEL_META_DATA_HEADER, self._Arch]
for Record in RecordList:
Name = Record[1]
if Name in self:
self[Name] = Record[2]
self._Header = 'DUMMY'
## Retrieve package name
def _GetPackageName(self):
if self._PackageName == None:
if self._Header == None:
self._GetHeaderInfo()
if self._PackageName == None:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "No PACKAGE_NAME", File=self.MetaFile)
return self._PackageName
## Retrieve file guid
def _GetFileGuid(self):
if self._Guid == None:
if self._Header == None:
self._GetHeaderInfo()
if self._Guid == None:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "No PACKAGE_GUID", File=self.MetaFile)
return self._Guid
## Retrieve package version
def _GetVersion(self):
if self._Version == None:
if self._Header == None:
self._GetHeaderInfo()
if self._Version == None:
self._Version = ''
return self._Version
## Retrieve protocol definitions (name/value pairs)
def _GetProtocol(self):
if self._Protocols == None:
#
# tdict is a special kind of dict, used for selecting correct
# protocol defition for given ARCH
#
ProtocolDict = tdict(True)
NameList = []
# find out all protocol definitions for specific and 'common' arch
RecordList = self._RawData[MODEL_EFI_PROTOCOL, self._Arch]
for Name, Guid, Dummy, Arch, ID, LineNo in RecordList:
if Name not in NameList:
NameList.append(Name)
ProtocolDict[Arch, Name] = Guid
# use sdict to keep the order
self._Protocols = sdict()
for Name in NameList:
#
# limit the ARCH to self._Arch, if no self._Arch found, tdict
# will automatically turn to 'common' ARCH for trying
#
self._Protocols[Name] = ProtocolDict[self._Arch, Name]
return self._Protocols
## Retrieve PPI definitions (name/value pairs)
def _GetPpi(self):
if self._Ppis == None:
#
# tdict is a special kind of dict, used for selecting correct
# PPI defition for given ARCH
#
PpiDict = tdict(True)
NameList = []
# find out all PPI definitions for specific arch and 'common' arch
RecordList = self._RawData[MODEL_EFI_PPI, self._Arch]
for Name, Guid, Dummy, Arch, ID, LineNo in RecordList:
if Name not in NameList:
NameList.append(Name)
PpiDict[Arch, Name] = Guid
# use sdict to keep the order
self._Ppis = sdict()
for Name in NameList:
#
# limit the ARCH to self._Arch, if no self._Arch found, tdict
# will automatically turn to 'common' ARCH for trying
#
self._Ppis[Name] = PpiDict[self._Arch, Name]
return self._Ppis
## Retrieve GUID definitions (name/value pairs)
def _GetGuid(self):
if self._Guids == None:
#
# tdict is a special kind of dict, used for selecting correct
# GUID defition for given ARCH
#
GuidDict = tdict(True)
NameList = []
# find out all protocol definitions for specific and 'common' arch
RecordList = self._RawData[MODEL_EFI_GUID, self._Arch]
for Name, Guid, Dummy, Arch, ID, LineNo in RecordList:
if Name not in NameList:
NameList.append(Name)
GuidDict[Arch, Name] = Guid
# use sdict to keep the order
self._Guids = sdict()
for Name in NameList:
#
# limit the ARCH to self._Arch, if no self._Arch found, tdict
# will automatically turn to 'common' ARCH for trying
#
self._Guids[Name] = GuidDict[self._Arch, Name]
return self._Guids
## Retrieve public include paths declared in this package
def _GetInclude(self):
if self._Includes == None:
self._Includes = []
RecordList = self._RawData[MODEL_EFI_INCLUDE, self._Arch]
Macros = self._Macros
Macros["EDK_SOURCE"] = GlobalData.gEcpSource
for Record in RecordList:
File = PathClass(NormPath(Record[0], Macros), self._PackageDir, Arch=self._Arch)
LineNo = Record[-1]
# validate the path
ErrorCode, ErrorInfo = File.Validate()
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo, File=self.MetaFile, Line=LineNo)
# avoid duplicate include path
if File not in self._Includes:
self._Includes.append(File)
return self._Includes
## Retrieve library class declarations (not used in build at present)
def _GetLibraryClass(self):
if self._LibraryClasses == None:
#
# tdict is a special kind of dict, used for selecting correct
# library class declaration for given ARCH
#
LibraryClassDict = tdict(True)
LibraryClassSet = set()
RecordList = self._RawData[MODEL_EFI_LIBRARY_CLASS, self._Arch]
Macros = self._Macros
for LibraryClass, File, Dummy, Arch, ID, LineNo in RecordList:
File = PathClass(NormPath(File, Macros), self._PackageDir, Arch=self._Arch)
# check the file validation
ErrorCode, ErrorInfo = File.Validate()
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo, File=self.MetaFile, Line=LineNo)
LibraryClassSet.add(LibraryClass)
LibraryClassDict[Arch, LibraryClass] = File
self._LibraryClasses = sdict()
for LibraryClass in LibraryClassSet:
self._LibraryClasses[LibraryClass] = LibraryClassDict[self._Arch, LibraryClass]
return self._LibraryClasses
## Retrieve PCD declarations
def _GetPcds(self):
if self._Pcds == None:
self._Pcds = sdict()
self._Pcds.update(self._GetPcd(MODEL_PCD_FIXED_AT_BUILD))
self._Pcds.update(self._GetPcd(MODEL_PCD_PATCHABLE_IN_MODULE))
self._Pcds.update(self._GetPcd(MODEL_PCD_FEATURE_FLAG))
self._Pcds.update(self._GetPcd(MODEL_PCD_DYNAMIC))
self._Pcds.update(self._GetPcd(MODEL_PCD_DYNAMIC_EX))
return self._Pcds
## Retrieve PCD declarations for given type
def _GetPcd(self, Type):
Pcds = sdict()
#
# tdict is a special kind of dict, used for selecting correct
# PCD declaration for given ARCH
#
PcdDict = tdict(True, 3)
# for summarizing PCD
PcdSet = set()
# find out all PCDs of the 'type'
RecordList = self._RawData[Type, self._Arch]
for TokenSpaceGuid, PcdCName, Setting, Arch, Dummy1, Dummy2 in RecordList:
PcdDict[Arch, PcdCName, TokenSpaceGuid] = Setting
PcdSet.add((PcdCName, TokenSpaceGuid))
for PcdCName, TokenSpaceGuid in PcdSet:
#
# limit the ARCH to self._Arch, if no self._Arch found, tdict
# will automatically turn to 'common' ARCH and try again
#
Setting = PcdDict[self._Arch, PcdCName, TokenSpaceGuid]
if Setting == None:
continue
DefaultValue, DatumType, TokenNumber = AnalyzePcdData(Setting)
Pcds[PcdCName, TokenSpaceGuid, self._PCD_TYPE_STRING_[Type]] = PcdClassObject(
PcdCName,
TokenSpaceGuid,
self._PCD_TYPE_STRING_[Type],
DatumType,
DefaultValue,
TokenNumber,
'',
{},
False,
None
)
return Pcds
_Macros = property(_GetMacros)
Arch = property(_GetArch, _SetArch)
PackageName = property(_GetPackageName)
Guid = property(_GetFileGuid)
Version = property(_GetVersion)
Protocols = property(_GetProtocol)
Ppis = property(_GetPpi)
Guids = property(_GetGuid)
Includes = property(_GetInclude)
LibraryClasses = property(_GetLibraryClass)
Pcds = property(_GetPcds)
## Module build information from INF file
#
# This class is used to retrieve information stored in database and convert them
# into ModuleBuildClassObject form for easier use for AutoGen.
#
class InfBuildData(ModuleBuildClassObject):
# dict used to convert PCD type in database to string used by build tool
_PCD_TYPE_STRING_ = {
MODEL_PCD_FIXED_AT_BUILD : "FixedAtBuild",
MODEL_PCD_PATCHABLE_IN_MODULE : "PatchableInModule",
MODEL_PCD_FEATURE_FLAG : "FeatureFlag",
MODEL_PCD_DYNAMIC : "Dynamic",
MODEL_PCD_DYNAMIC_DEFAULT : "Dynamic",
MODEL_PCD_DYNAMIC_HII : "DynamicHii",
MODEL_PCD_DYNAMIC_VPD : "DynamicVpd",
MODEL_PCD_DYNAMIC_EX : "DynamicEx",
MODEL_PCD_DYNAMIC_EX_DEFAULT : "DynamicEx",
MODEL_PCD_DYNAMIC_EX_HII : "DynamicExHii",
MODEL_PCD_DYNAMIC_EX_VPD : "DynamicExVpd",
}
# dict used to convert part of [Defines] to members of InfBuildData directly
_PROPERTY_ = {
#
# Required Fields
#
TAB_INF_DEFINES_BASE_NAME : "_BaseName",
TAB_INF_DEFINES_FILE_GUID : "_Guid",
TAB_INF_DEFINES_MODULE_TYPE : "_ModuleType",
#
# Optional Fields
#
#TAB_INF_DEFINES_INF_VERSION : "_AutoGenVersion",
TAB_INF_DEFINES_COMPONENT_TYPE : "_ComponentType",
TAB_INF_DEFINES_MAKEFILE_NAME : "_MakefileName",
#TAB_INF_DEFINES_CUSTOM_MAKEFILE : "_CustomMakefile",
TAB_INF_DEFINES_DPX_SOURCE :"_DxsFile",
TAB_INF_DEFINES_VERSION_NUMBER : "_Version",
TAB_INF_DEFINES_VERSION_STRING : "_Version",
TAB_INF_DEFINES_VERSION : "_Version",
TAB_INF_DEFINES_PCD_IS_DRIVER : "_PcdIsDriver",
TAB_INF_DEFINES_SHADOW : "_Shadow",
TAB_COMPONENTS_SOURCE_OVERRIDE_PATH : "_SourceOverridePath",
}
# dict used to convert Component type to Module type
_MODULE_TYPE_ = {
"LIBRARY" : "BASE",
"SECURITY_CORE" : "SEC",
"PEI_CORE" : "PEI_CORE",
"COMBINED_PEIM_DRIVER" : "PEIM",
"PIC_PEIM" : "PEIM",
"RELOCATABLE_PEIM" : "PEIM",
"PE32_PEIM" : "PEIM",
"BS_DRIVER" : "DXE_DRIVER",
"RT_DRIVER" : "DXE_RUNTIME_DRIVER",
"SAL_RT_DRIVER" : "DXE_SAL_DRIVER",
"DXE_SMM_DRIVER" : "DXE_SMM_DRIVER",
# "SMM_DRIVER" : "DXE_SMM_DRIVER",
# "BS_DRIVER" : "DXE_SMM_DRIVER",
# "BS_DRIVER" : "UEFI_DRIVER",
"APPLICATION" : "UEFI_APPLICATION",
"LOGO" : "BASE",
}
# regular expression for converting XXX_FLAGS in [nmake] section to new type
_NMAKE_FLAG_PATTERN_ = re.compile("(?:EBC_)?([A-Z]+)_(?:STD_|PROJ_|ARCH_)?FLAGS(?:_DLL|_ASL|_EXE)?", re.UNICODE)
# dict used to convert old tool name used in [nmake] section to new ones
_TOOL_CODE_ = {
"C" : "CC",
"LIB" : "SLINK",
"LINK" : "DLINK",
}
## Constructor of DscBuildData
#
# Initialize object of DscBuildData
#
# @param FilePath The path of platform description file
# @param RawData The raw data of DSC file
# @param BuildDataBase Database used to retrieve module/package information
# @param Arch The target architecture
# @param Platform The name of platform employing this module
# @param Macros Macros used for replacement in DSC file
#
def __init__(self, FilePath, RawData, BuildDatabase, Arch='COMMON', Target=None, Toolchain=None):
self.MetaFile = FilePath
self._ModuleDir = FilePath.Dir
self._RawData = RawData
self._Bdb = BuildDatabase
self._Arch = Arch
self._Target = Target
self._Toolchain = Toolchain
self._Platform = 'COMMON'
self._SourceOverridePath = None
if FilePath.Key in GlobalData.gOverrideDir:
self._SourceOverridePath = GlobalData.gOverrideDir[FilePath.Key]
self._Clear()
## XXX[key] = value
def __setitem__(self, key, value):
self.__dict__[self._PROPERTY_[key]] = value
## value = XXX[key]
def __getitem__(self, key):
return self.__dict__[self._PROPERTY_[key]]
## "in" test support
def __contains__(self, key):
return key in self._PROPERTY_
## Set all internal used members of InfBuildData to None
def _Clear(self):
self._Header_ = None
self._AutoGenVersion = None
self._BaseName = None
self._DxsFile = None
self._ModuleType = None
self._ComponentType = None
self._BuildType = None
self._Guid = None
self._Version = None
self._PcdIsDriver = None
self._BinaryModule = None
self._Shadow = None
self._MakefileName = None
self._CustomMakefile = None
self._Specification = None
self._LibraryClass = None
self._ModuleEntryPointList = None
self._ModuleUnloadImageList = None
self._ConstructorList = None
self._DestructorList = None
self._Defs = None
self._Binaries = None
self._Sources = None
self._LibraryClasses = None
self._Libraries = None
self._Protocols = None
self._Ppis = None
self._Guids = None
self._Includes = None
self._Packages = None
self._Pcds = None
self._BuildOptions = None
self._Depex = None
self._DepexExpression = None
self.__Macros = None
## Get current effective macros
def _GetMacros(self):
if self.__Macros == None:
self.__Macros = {}
# EDK_GLOBAL defined macros can be applied to EDK module
if self.AutoGenVersion < 0x00010005:
self.__Macros.update(GlobalData.gEdkGlobal)
self.__Macros.update(GlobalData.gGlobalDefines)
# VBox hack begin - Required for referencing files outside the workspace, like the reset vectors and logo.
self.__Macros.update(GlobalData.gCommandLineDefines);
# VBox hack end.
return self.__Macros
## Get architecture
def _GetArch(self):
return self._Arch
## Set architecture
#
# Changing the default ARCH to another may affect all other information
# because all information in a platform may be ARCH-related. That's
# why we need to clear all internal used members, in order to cause all
# information to be re-retrieved.
#
# @param Value The value of ARCH
#
def _SetArch(self, Value):
if self._Arch == Value:
return
self._Arch = Value
self._Clear()
## Return the name of platform employing this module
def _GetPlatform(self):
return self._Platform
## Change the name of platform employing this module
#
# Changing the default name of platform to another may affect some information
# because they may be PLATFORM-related. That's why we need to clear all internal
# used members, in order to cause all information to be re-retrieved.
#
def _SetPlatform(self, Value):
if self._Platform == Value:
return
self._Platform = Value
self._Clear()
## Retrieve all information in [Defines] section
#
# (Retriving all [Defines] information in one-shot is just to save time.)
#
def _GetHeaderInfo(self):
RecordList = self._RawData[MODEL_META_DATA_HEADER, self._Arch, self._Platform]
for Record in RecordList:
Name, Value = Record[1], ReplaceMacro(Record[2], self._Macros, False)
# items defined _PROPERTY_ don't need additional processing
if Name in self:
self[Name] = Value
# some special items in [Defines] section need special treatment
elif Name in ('EFI_SPECIFICATION_VERSION', 'UEFI_SPECIFICATION_VERSION', 'EDK_RELEASE_VERSION', 'PI_SPECIFICATION_VERSION'):
if Name in ('EFI_SPECIFICATION_VERSION', 'UEFI_SPECIFICATION_VERSION'):
Name = 'UEFI_SPECIFICATION_VERSION'
if self._Specification == None:
self._Specification = sdict()
self._Specification[Name] = GetHexVerValue(Value)
if self._Specification[Name] == None:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED,
"'%s' format is not supported for %s" % (Value, Name),
File=self.MetaFile, Line=Record[-1])
elif Name == 'LIBRARY_CLASS':
if self._LibraryClass == None:
self._LibraryClass = []
ValueList = GetSplitValueList(Value)
LibraryClass = ValueList[0]
if len(ValueList) > 1:
SupModuleList = GetSplitValueList(ValueList[1], ' ')
else:
SupModuleList = SUP_MODULE_LIST
self._LibraryClass.append(LibraryClassObject(LibraryClass, SupModuleList))
elif Name == 'ENTRY_POINT':
if self._ModuleEntryPointList == None:
self._ModuleEntryPointList = []
self._ModuleEntryPointList.append(Value)
elif Name == 'UNLOAD_IMAGE':
if self._ModuleUnloadImageList == None:
self._ModuleUnloadImageList = []
if not Value:
continue
self._ModuleUnloadImageList.append(Value)
elif Name == 'CONSTRUCTOR':
if self._ConstructorList == None:
self._ConstructorList = []
if not Value:
continue
self._ConstructorList.append(Value)
elif Name == 'DESTRUCTOR':
if self._DestructorList == None:
self._DestructorList = []
if not Value:
continue
self._DestructorList.append(Value)
elif Name == TAB_INF_DEFINES_CUSTOM_MAKEFILE:
TokenList = GetSplitValueList(Value)
if self._CustomMakefile == None:
self._CustomMakefile = {}
if len(TokenList) < 2:
self._CustomMakefile['MSFT'] = TokenList[0]
self._CustomMakefile['GCC'] = TokenList[0]
else:
if TokenList[0] not in ['MSFT', 'GCC']:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED,
"No supported family [%s]" % TokenList[0],
File=self.MetaFile, Line=Record[-1])
self._CustomMakefile[TokenList[0]] = TokenList[1]
else:
if self._Defs == None:
self._Defs = sdict()
self._Defs[Name] = Value
#
# Retrieve information in sections specific to Edk.x modules
#
if self.AutoGenVersion >= 0x00010005:
if not self._ModuleType:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE,
"MODULE_TYPE is not given", File=self.MetaFile)
if self._ModuleType not in SUP_MODULE_LIST:
RecordList = self._RawData[MODEL_META_DATA_HEADER, self._Arch, self._Platform]
for Record in RecordList:
Name = Record[1]
if Name == "MODULE_TYPE":
LineNo = Record[6]
break
EdkLogger.error("build", FORMAT_NOT_SUPPORTED,
"MODULE_TYPE %s is not supported for EDK II, valid values are:\n %s" % (self._ModuleType,' '.join(l for l in SUP_MODULE_LIST)),
File=self.MetaFile, Line=LineNo)
if (self._Specification == None) or (not 'PI_SPECIFICATION_VERSION' in self._Specification) or (int(self._Specification['PI_SPECIFICATION_VERSION'], 16) < 0x0001000A):
if self._ModuleType == SUP_MODULE_SMM_CORE:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "SMM_CORE module type can't be used in the module with PI_SPECIFICATION_VERSION less than 0x0001000A", File=self.MetaFile)
if self._Defs and 'PCI_DEVICE_ID' in self._Defs and 'PCI_VENDOR_ID' in self._Defs \
and 'PCI_CLASS_CODE' in self._Defs:
self._BuildType = 'UEFI_OPTIONROM'
elif self._Defs and 'UEFI_HII_RESOURCE_SECTION' in self._Defs \
and self._Defs['UEFI_HII_RESOURCE_SECTION'] == 'TRUE':
self._BuildType = 'UEFI_HII'
else:
self._BuildType = self._ModuleType.upper()
if self._DxsFile:
File = PathClass(NormPath(self._DxsFile), self._ModuleDir, Arch=self._Arch)
# check the file validation
ErrorCode, ErrorInfo = File.Validate(".dxs", CaseSensitive=False)
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo,
File=self.MetaFile, Line=LineNo)
if self.Sources == None:
self._Sources = []
self._Sources.append(File)
else:
if not self._ComponentType:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE,
"COMPONENT_TYPE is not given", File=self.MetaFile)
self._BuildType = self._ComponentType.upper()
if self._ComponentType in self._MODULE_TYPE_:
self._ModuleType = self._MODULE_TYPE_[self._ComponentType]
if self._ComponentType == 'LIBRARY':
self._LibraryClass = [LibraryClassObject(self._BaseName, SUP_MODULE_LIST)]
# make use some [nmake] section macros
Macros = self._Macros
Macros["EDK_SOURCE"] = GlobalData.gEcpSource
Macros['PROCESSOR'] = self._Arch
RecordList = self._RawData[MODEL_META_DATA_NMAKE, self._Arch, self._Platform]
for Name,Value,Dummy,Arch,Platform,ID,LineNo in RecordList:
Value = ReplaceMacro(Value, Macros, True)
if Name == "IMAGE_ENTRY_POINT":
if self._ModuleEntryPointList == None:
self._ModuleEntryPointList = []
self._ModuleEntryPointList.append(Value)
elif Name == "DPX_SOURCE":
File = PathClass(NormPath(Value), self._ModuleDir, Arch=self._Arch)
# check the file validation
ErrorCode, ErrorInfo = File.Validate(".dxs", CaseSensitive=False)
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo,
File=self.MetaFile, Line=LineNo)
if self.Sources == None:
self._Sources = []
self._Sources.append(File)
else:
ToolList = self._NMAKE_FLAG_PATTERN_.findall(Name)
if len(ToolList) == 0 or len(ToolList) != 1:
pass
# EdkLogger.warn("build", "Don't know how to do with macro [%s]" % Name,
# File=self.MetaFile, Line=LineNo)
else:
if self._BuildOptions == None:
self._BuildOptions = sdict()
if ToolList[0] in self._TOOL_CODE_:
Tool = self._TOOL_CODE_[ToolList[0]]
else:
Tool = ToolList[0]
ToolChain = "*_*_*_%s_FLAGS" % Tool
ToolChainFamily = 'MSFT' # Edk.x only support MSFT tool chain
#ignore not replaced macros in value
ValueList = GetSplitList(' ' + Value, '/D')
Dummy = ValueList[0]
for Index in range(1, len(ValueList)):
if ValueList[Index][-1] == '=' or ValueList[Index] == '':
continue
Dummy = Dummy + ' /D ' + ValueList[Index]
Value = Dummy.strip()
if (ToolChainFamily, ToolChain) not in self._BuildOptions:
self._BuildOptions[ToolChainFamily, ToolChain] = Value
else:
OptionString = self._BuildOptions[ToolChainFamily, ToolChain]
self._BuildOptions[ToolChainFamily, ToolChain] = OptionString + " " + Value
# set _Header to non-None in order to avoid database re-querying
self._Header_ = 'DUMMY'
## Retrieve file version
def _GetInfVersion(self):
if self._AutoGenVersion == None:
RecordList = self._RawData[MODEL_META_DATA_HEADER, self._Arch, self._Platform]
for Record in RecordList:
if Record[1] == TAB_INF_DEFINES_INF_VERSION:
self._AutoGenVersion = int(Record[2], 0)
break
if self._AutoGenVersion == None:
self._AutoGenVersion = 0x00010000
return self._AutoGenVersion
## Retrieve BASE_NAME
def _GetBaseName(self):
if self._BaseName == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._BaseName == None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No BASE_NAME name", File=self.MetaFile)
return self._BaseName
## Retrieve DxsFile
def _GetDxsFile(self):
if self._DxsFile == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._DxsFile == None:
self._DxsFile = ''
return self._DxsFile
## Retrieve MODULE_TYPE
def _GetModuleType(self):
if self._ModuleType == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._ModuleType == None:
self._ModuleType = 'BASE'
if self._ModuleType not in SUP_MODULE_LIST:
self._ModuleType = "USER_DEFINED"
return self._ModuleType
## Retrieve COMPONENT_TYPE
def _GetComponentType(self):
if self._ComponentType == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._ComponentType == None:
self._ComponentType = 'USER_DEFINED'
return self._ComponentType
## Retrieve "BUILD_TYPE"
def _GetBuildType(self):
if self._BuildType == None:
if self._Header_ == None:
self._GetHeaderInfo()
if not self._BuildType:
self._BuildType = "BASE"
return self._BuildType
## Retrieve file guid
def _GetFileGuid(self):
if self._Guid == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._Guid == None:
self._Guid = '00000000-0000-0000-000000000000'
return self._Guid
## Retrieve module version
def _GetVersion(self):
if self._Version == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._Version == None:
self._Version = '0.0'
return self._Version
## Retrieve PCD_IS_DRIVER
def _GetPcdIsDriver(self):
if self._PcdIsDriver == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._PcdIsDriver == None:
self._PcdIsDriver = ''
return self._PcdIsDriver
## Retrieve SHADOW
def _GetShadow(self):
if self._Shadow == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._Shadow != None and self._Shadow.upper() == 'TRUE':
self._Shadow = True
else:
self._Shadow = False
return self._Shadow
## Retrieve CUSTOM_MAKEFILE
def _GetMakefile(self):
if self._CustomMakefile == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._CustomMakefile == None:
self._CustomMakefile = {}
return self._CustomMakefile
## Retrieve EFI_SPECIFICATION_VERSION
def _GetSpec(self):
if self._Specification == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._Specification == None:
self._Specification = {}
return self._Specification
## Retrieve LIBRARY_CLASS
def _GetLibraryClass(self):
if self._LibraryClass == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._LibraryClass == None:
self._LibraryClass = []
return self._LibraryClass
## Retrieve ENTRY_POINT
def _GetEntryPoint(self):
if self._ModuleEntryPointList == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._ModuleEntryPointList == None:
self._ModuleEntryPointList = []
return self._ModuleEntryPointList
## Retrieve UNLOAD_IMAGE
def _GetUnloadImage(self):
if self._ModuleUnloadImageList == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._ModuleUnloadImageList == None:
self._ModuleUnloadImageList = []
return self._ModuleUnloadImageList
## Retrieve CONSTRUCTOR
def _GetConstructor(self):
if self._ConstructorList == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._ConstructorList == None:
self._ConstructorList = []
return self._ConstructorList
## Retrieve DESTRUCTOR
def _GetDestructor(self):
if self._DestructorList == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._DestructorList == None:
self._DestructorList = []
return self._DestructorList
## Retrieve definies other than above ones
def _GetDefines(self):
if self._Defs == None:
if self._Header_ == None:
self._GetHeaderInfo()
if self._Defs == None:
self._Defs = sdict()
return self._Defs
## Retrieve binary files
def _GetBinaryFiles(self):
if self._Binaries == None:
self._Binaries = []
RecordList = self._RawData[MODEL_EFI_BINARY_FILE, self._Arch, self._Platform]
Macros = self._Macros
Macros["EDK_SOURCE"] = GlobalData.gEcpSource
Macros['PROCESSOR'] = self._Arch
for Record in RecordList:
FileType = Record[0]
LineNo = Record[-1]
Target = 'COMMON'
FeatureFlag = []
if Record[2]:
TokenList = GetSplitValueList(Record[2], TAB_VALUE_SPLIT)
if TokenList:
Target = TokenList[0]
if len(TokenList) > 1:
FeatureFlag = Record[1:]
File = PathClass(NormPath(Record[1], Macros), self._ModuleDir, '', FileType, True, self._Arch, '', Target)
# check the file validation
ErrorCode, ErrorInfo = File.Validate()
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo, File=self.MetaFile, Line=LineNo)
self._Binaries.append(File)
return self._Binaries
## Retrieve source files
def _GetSourceFiles(self):
if self._Sources == None:
self._Sources = []
RecordList = self._RawData[MODEL_EFI_SOURCE_FILE, self._Arch, self._Platform]
Macros = self._Macros
for Record in RecordList:
LineNo = Record[-1]
ToolChainFamily = Record[1]
TagName = Record[2]
ToolCode = Record[3]
FeatureFlag = Record[4]
if self.AutoGenVersion < 0x00010005:
Macros["EDK_SOURCE"] = GlobalData.gEcpSource
Macros['PROCESSOR'] = self._Arch
# old module source files (Edk)
File = PathClass(NormPath(Record[0], Macros), self._ModuleDir, self._SourceOverridePath,
'', False, self._Arch, ToolChainFamily, '', TagName, ToolCode)
# check the file validation
ErrorCode, ErrorInfo = File.Validate(CaseSensitive=False)
if ErrorCode != 0:
if File.Ext.lower() == '.h':
EdkLogger.warn('build', 'Include file not found', ExtraData=ErrorInfo,
File=self.MetaFile, Line=LineNo)
continue
else:
EdkLogger.error('build', ErrorCode, ExtraData=File, File=self.MetaFile, Line=LineNo)
else:
File = PathClass(NormPath(Record[0], Macros), self._ModuleDir, '',
'', False, self._Arch, ToolChainFamily, '', TagName, ToolCode)
# check the file validation
ErrorCode, ErrorInfo = File.Validate()
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo, File=self.MetaFile, Line=LineNo)
self._Sources.append(File)
return self._Sources
## Retrieve library classes employed by this module
def _GetLibraryClassUses(self):
if self._LibraryClasses == None:
self._LibraryClasses = sdict()
RecordList = self._RawData[MODEL_EFI_LIBRARY_CLASS, self._Arch, self._Platform]
for Record in RecordList:
Lib = Record[0]
Instance = Record[1]
if Instance:
Instance = NormPath(Instance, self._Macros)
self._LibraryClasses[Lib] = Instance
return self._LibraryClasses
## Retrieve library names (for Edk.x style of modules)
def _GetLibraryNames(self):
if self._Libraries == None:
self._Libraries = []
RecordList = self._RawData[MODEL_EFI_LIBRARY_INSTANCE, self._Arch, self._Platform]
for Record in RecordList:
LibraryName = ReplaceMacro(Record[0], self._Macros, False)
# in case of name with '.lib' extension, which is unusual in Edk.x inf
LibraryName = os.path.splitext(LibraryName)[0]
if LibraryName not in self._Libraries:
self._Libraries.append(LibraryName)
return self._Libraries
## Retrieve protocols consumed/produced by this module
def _GetProtocols(self):
if self._Protocols == None:
self._Protocols = sdict()
RecordList = self._RawData[MODEL_EFI_PROTOCOL, self._Arch, self._Platform]
for Record in RecordList:
CName = Record[0]
Value = ProtocolValue(CName, self.Packages)
if Value == None:
PackageList = "\n\t".join([str(P) for P in self.Packages])
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE,
"Value of Protocol [%s] is not found under [Protocols] section in" % CName,
ExtraData=PackageList, File=self.MetaFile, Line=Record[-1])
self._Protocols[CName] = Value
return self._Protocols
## Retrieve PPIs consumed/produced by this module
def _GetPpis(self):
if self._Ppis == None:
self._Ppis = sdict()
RecordList = self._RawData[MODEL_EFI_PPI, self._Arch, self._Platform]
for Record in RecordList:
CName = Record[0]
Value = PpiValue(CName, self.Packages)
if Value == None:
PackageList = "\n\t".join([str(P) for P in self.Packages])
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE,
"Value of PPI [%s] is not found under [Ppis] section in " % CName,
ExtraData=PackageList, File=self.MetaFile, Line=Record[-1])
self._Ppis[CName] = Value
return self._Ppis
## Retrieve GUIDs consumed/produced by this module
def _GetGuids(self):
if self._Guids == None:
self._Guids = sdict()
RecordList = self._RawData[MODEL_EFI_GUID, self._Arch, self._Platform]
for Record in RecordList:
CName = Record[0]
Value = GuidValue(CName, self.Packages)
if Value == None:
PackageList = "\n\t".join([str(P) for P in self.Packages])
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE,
"Value of Guid [%s] is not found under [Guids] section in" % CName,
ExtraData=PackageList, File=self.MetaFile, Line=Record[-1])
self._Guids[CName] = Value
return self._Guids
## Retrieve include paths necessary for this module (for Edk.x style of modules)
def _GetIncludes(self):
if self._Includes == None:
self._Includes = []
if self._SourceOverridePath:
self._Includes.append(self._SourceOverridePath)
Macros = self._Macros
if 'PROCESSOR' in GlobalData.gEdkGlobal.keys():
Macros['PROCESSOR'] = GlobalData.gEdkGlobal['PROCESSOR']
else:
Macros['PROCESSOR'] = self._Arch
RecordList = self._RawData[MODEL_EFI_INCLUDE, self._Arch, self._Platform]
for Record in RecordList:
if Record[0].find('EDK_SOURCE') > -1:
Macros['EDK_SOURCE'] = GlobalData.gEcpSource
File = NormPath(Record[0], self._Macros)
if File[0] == '.':
File = os.path.join(self._ModuleDir, File)
else:
File = os.path.join(GlobalData.gWorkspace, File)
File = RealPath(os.path.normpath(File))
if File:
self._Includes.append(File)
#TRICK: let compiler to choose correct header file
Macros['EDK_SOURCE'] = GlobalData.gEdkSource
File = NormPath(Record[0], self._Macros)
if File[0] == '.':
File = os.path.join(self._ModuleDir, File)
else:
File = os.path.join(GlobalData.gWorkspace, File)
File = RealPath(os.path.normpath(File))
if File:
self._Includes.append(File)
else:
File = NormPath(Record[0], Macros)
if File[0] == '.':
File = os.path.join(self._ModuleDir, File)
else:
File = os.path.join(GlobalData.gWorkspace, File)
File = RealPath(os.path.normpath(File))
if File:
self._Includes.append(File)
return self._Includes
## Retrieve packages this module depends on
def _GetPackages(self):
if self._Packages == None:
self._Packages = []
RecordList = self._RawData[MODEL_META_DATA_PACKAGE, self._Arch, self._Platform]
Macros = self._Macros
Macros['EDK_SOURCE'] = GlobalData.gEcpSource
for Record in RecordList:
File = PathClass(NormPath(Record[0], Macros), GlobalData.gWorkspace, Arch=self._Arch)
LineNo = Record[-1]
# check the file validation
ErrorCode, ErrorInfo = File.Validate('.dec')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo, File=self.MetaFile, Line=LineNo)
# parse this package now. we need it to get protocol/ppi/guid value
Package = self._Bdb[File, self._Arch, self._Target, self._Toolchain]
self._Packages.append(Package)
return self._Packages
## Retrieve PCDs used in this module
def _GetPcds(self):
if self._Pcds == None:
self._Pcds = sdict()
self._Pcds.update(self._GetPcd(MODEL_PCD_FIXED_AT_BUILD))
self._Pcds.update(self._GetPcd(MODEL_PCD_PATCHABLE_IN_MODULE))
self._Pcds.update(self._GetPcd(MODEL_PCD_FEATURE_FLAG))
self._Pcds.update(self._GetPcd(MODEL_PCD_DYNAMIC))
self._Pcds.update(self._GetPcd(MODEL_PCD_DYNAMIC_EX))
return self._Pcds
## Retrieve build options specific to this module
def _GetBuildOptions(self):
if self._BuildOptions == None:
self._BuildOptions = sdict()
RecordList = self._RawData[MODEL_META_DATA_BUILD_OPTION, self._Arch, self._Platform]
for Record in RecordList:
ToolChainFamily = Record[0]
ToolChain = Record[1]
Option = Record[2]
if (ToolChainFamily, ToolChain) not in self._BuildOptions:
self._BuildOptions[ToolChainFamily, ToolChain] = Option
else:
# concatenate the option string if they're for the same tool
OptionString = self._BuildOptions[ToolChainFamily, ToolChain]
self._BuildOptions[ToolChainFamily, ToolChain] = OptionString + " " + Option
return self._BuildOptions
## Retrieve dependency expression
def _GetDepex(self):
if self._Depex == None:
self._Depex = tdict(False, 2)
RecordList = self._RawData[MODEL_EFI_DEPEX, self._Arch]
# If the module has only Binaries and no Sources, then ignore [Depex]
if self.Sources == None or self.Sources == []:
if self.Binaries != None and self.Binaries != []:
return self._Depex
# PEIM and DXE drivers must have a valid [Depex] section
if len(self.LibraryClass) == 0 and len(RecordList) == 0:
if self.ModuleType == 'DXE_DRIVER' or self.ModuleType == 'PEIM' or self.ModuleType == 'DXE_SMM_DRIVER' or \
self.ModuleType == 'DXE_SAL_DRIVER' or self.ModuleType == 'DXE_RUNTIME_DRIVER':
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE, "No [Depex] section or no valid expression in [Depex] section for [%s] module" \
% self.ModuleType, File=self.MetaFile)
Depex = sdict()
for Record in RecordList:
DepexStr = ReplaceMacro(Record[0], self._Macros, False)
Arch = Record[3]
ModuleType = Record[4]
TokenList = DepexStr.split()
if (Arch, ModuleType) not in Depex:
Depex[Arch, ModuleType] = []
DepexList = Depex[Arch, ModuleType]
for Token in TokenList:
if Token in DEPEX_SUPPORTED_OPCODE:
DepexList.append(Token)
elif Token.endswith(".inf"): # module file name
ModuleFile = os.path.normpath(Token)
Module = self.BuildDatabase[ModuleFile]
if Module == None:
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE, "Module is not found in active platform",
ExtraData=Token, File=self.MetaFile, Line=Record[-1])
DepexList.append(Module.Guid)
else:
# get the GUID value now
Value = ProtocolValue(Token, self.Packages)
if Value == None:
Value = PpiValue(Token, self.Packages)
if Value == None:
Value = GuidValue(Token, self.Packages)
if Value == None:
PackageList = "\n\t".join([str(P) for P in self.Packages])
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE,
"Value of [%s] is not found in" % Token,
ExtraData=PackageList, File=self.MetaFile, Line=Record[-1])
DepexList.append(Value)
for Arch, ModuleType in Depex:
self._Depex[Arch, ModuleType] = Depex[Arch, ModuleType]
return self._Depex
## Retrieve depedency expression
def _GetDepexExpression(self):
if self._DepexExpression == None:
self._DepexExpression = tdict(False, 2)
RecordList = self._RawData[MODEL_EFI_DEPEX, self._Arch]
DepexExpression = sdict()
for Record in RecordList:
DepexStr = ReplaceMacro(Record[0], self._Macros, False)
Arch = Record[3]
ModuleType = Record[4]
TokenList = DepexStr.split()
if (Arch, ModuleType) not in DepexExpression:
DepexExpression[Arch, ModuleType] = ''
for Token in TokenList:
DepexExpression[Arch, ModuleType] = DepexExpression[Arch, ModuleType] + Token.strip() + ' '
for Arch, ModuleType in DepexExpression:
self._DepexExpression[Arch, ModuleType] = DepexExpression[Arch, ModuleType]
return self._DepexExpression
## Retrieve PCD for given type
def _GetPcd(self, Type):
Pcds = sdict()
PcdDict = tdict(True, 4)
PcdList = []
RecordList = self._RawData[Type, self._Arch, self._Platform]
for TokenSpaceGuid, PcdCName, Setting, Arch, Platform, Dummy1, LineNo in RecordList:
PcdDict[Arch, Platform, PcdCName, TokenSpaceGuid] = (Setting, LineNo)
PcdList.append((PcdCName, TokenSpaceGuid))
# get the guid value
if TokenSpaceGuid not in self.Guids:
Value = GuidValue(TokenSpaceGuid, self.Packages)
if Value == None:
PackageList = "\n\t".join([str(P) for P in self.Packages])
EdkLogger.error('build', RESOURCE_NOT_AVAILABLE,
"Value of Guid [%s] is not found under [Guids] section in" % TokenSpaceGuid,
ExtraData=PackageList, File=self.MetaFile, Line=LineNo)
self.Guids[TokenSpaceGuid] = Value
# resolve PCD type, value, datum info, etc. by getting its definition from package
for PcdCName, TokenSpaceGuid in PcdList:
Setting, LineNo = PcdDict[self._Arch, self.Platform, PcdCName, TokenSpaceGuid]
if Setting == None:
continue
ValueList = AnalyzePcdData(Setting)
DefaultValue = ValueList[0]
Pcd = PcdClassObject(
PcdCName,
TokenSpaceGuid,
'',
'',
DefaultValue,
'',
'',
{},
False,
self.Guids[TokenSpaceGuid]
)
# get necessary info from package declaring this PCD
for Package in self.Packages:
#
# 'dynamic' in INF means its type is determined by platform;
# if platform doesn't give its type, use 'lowest' one in the
# following order, if any
#
# "FixedAtBuild", "PatchableInModule", "FeatureFlag", "Dynamic", "DynamicEx"
#
PcdType = self._PCD_TYPE_STRING_[Type]
if Type == MODEL_PCD_DYNAMIC:
Pcd.Pending = True
for T in ["FixedAtBuild", "PatchableInModule", "FeatureFlag", "Dynamic", "DynamicEx"]:
if (PcdCName, TokenSpaceGuid, T) in Package.Pcds:
PcdType = T
break
else:
Pcd.Pending = False
if (PcdCName, TokenSpaceGuid, PcdType) in Package.Pcds:
PcdInPackage = Package.Pcds[PcdCName, TokenSpaceGuid, PcdType]
Pcd.Type = PcdType
Pcd.TokenValue = PcdInPackage.TokenValue
#
# Check whether the token value exist or not.
#
if Pcd.TokenValue == None or Pcd.TokenValue == "":
EdkLogger.error(
'build',
FORMAT_INVALID,
"No TokenValue for PCD [%s.%s] in [%s]!" % (TokenSpaceGuid, PcdCName, str(Package)),
File =self.MetaFile, Line=LineNo,
ExtraData=None
)
#
# Check hexadecimal token value length and format.
#
ReIsValidPcdTokenValue = re.compile(r"^[0][x|X][0]*[0-9a-fA-F]{1,8}$", re.DOTALL)
if Pcd.TokenValue.startswith("0x") or Pcd.TokenValue.startswith("0X"):
if ReIsValidPcdTokenValue.match(Pcd.TokenValue) == None:
EdkLogger.error(
'build',
FORMAT_INVALID,
"The format of TokenValue [%s] of PCD [%s.%s] in [%s] is invalid:" % (Pcd.TokenValue, TokenSpaceGuid, PcdCName, str(Package)),
File =self.MetaFile, Line=LineNo,
ExtraData=None
)
#
# Check decimal token value length and format.
#
else:
try:
TokenValueInt = int (Pcd.TokenValue, 10)
if (TokenValueInt < 0 or TokenValueInt > 4294967295):
EdkLogger.error(
'build',
FORMAT_INVALID,
"The format of TokenValue [%s] of PCD [%s.%s] in [%s] is invalid, as a decimal it should between: 0 - 4294967295!"% (Pcd.TokenValue, TokenSpaceGuid, PcdCName, str(Package)),
File =self.MetaFile, Line=LineNo,
ExtraData=None
)
except:
EdkLogger.error(
'build',
FORMAT_INVALID,
"The format of TokenValue [%s] of PCD [%s.%s] in [%s] is invalid, it should be hexadecimal or decimal!"% (Pcd.TokenValue, TokenSpaceGuid, PcdCName, str(Package)),
File =self.MetaFile, Line=LineNo,
ExtraData=None
)
Pcd.DatumType = PcdInPackage.DatumType
Pcd.MaxDatumSize = PcdInPackage.MaxDatumSize
Pcd.InfDefaultValue = Pcd.DefaultValue
if Pcd.DefaultValue in [None, '']:
Pcd.DefaultValue = PcdInPackage.DefaultValue
break
else:
EdkLogger.error(
'build',
FORMAT_INVALID,
"PCD [%s.%s] in [%s] is not found in dependent packages:" % (TokenSpaceGuid, PcdCName, self.MetaFile),
File =self.MetaFile, Line=LineNo,
ExtraData="\t%s" % '\n\t'.join([str(P) for P in self.Packages])
)
Pcds[PcdCName, TokenSpaceGuid] = Pcd
return Pcds
_Macros = property(_GetMacros)
Arch = property(_GetArch, _SetArch)
Platform = property(_GetPlatform, _SetPlatform)
AutoGenVersion = property(_GetInfVersion)
BaseName = property(_GetBaseName)
ModuleType = property(_GetModuleType)
ComponentType = property(_GetComponentType)
BuildType = property(_GetBuildType)
Guid = property(_GetFileGuid)
Version = property(_GetVersion)
PcdIsDriver = property(_GetPcdIsDriver)
Shadow = property(_GetShadow)
CustomMakefile = property(_GetMakefile)
Specification = property(_GetSpec)
LibraryClass = property(_GetLibraryClass)
ModuleEntryPointList = property(_GetEntryPoint)
ModuleUnloadImageList = property(_GetUnloadImage)
ConstructorList = property(_GetConstructor)
DestructorList = property(_GetDestructor)
Defines = property(_GetDefines)
DxsFile = property(_GetDxsFile)
Binaries = property(_GetBinaryFiles)
Sources = property(_GetSourceFiles)
LibraryClasses = property(_GetLibraryClassUses)
Libraries = property(_GetLibraryNames)
Protocols = property(_GetProtocols)
Ppis = property(_GetPpis)
Guids = property(_GetGuids)
Includes = property(_GetIncludes)
Packages = property(_GetPackages)
Pcds = property(_GetPcds)
BuildOptions = property(_GetBuildOptions)
Depex = property(_GetDepex)
DepexExpression = property(_GetDepexExpression)
## Database
#
# This class defined the build database for all modules, packages and platform.
# It will call corresponding parser for the given file if it cannot find it in
# the database.
#
# @param DbPath Path of database file
# @param GlobalMacros Global macros used for replacement during file parsing
# @prarm RenewDb=False Create new database file if it's already there
#
class WorkspaceDatabase(object):
# default database file path
_DB_PATH_ = "Conf/.cache/build.db"
#
# internal class used for call corresponding file parser and caching the result
# to avoid unnecessary re-parsing
#
class BuildObjectFactory(object):
_FILE_TYPE_ = {
".inf" : MODEL_FILE_INF,
".dec" : MODEL_FILE_DEC,
".dsc" : MODEL_FILE_DSC,
}
# file parser
_FILE_PARSER_ = {
MODEL_FILE_INF : InfParser,
MODEL_FILE_DEC : DecParser,
MODEL_FILE_DSC : DscParser,
}
# convert to xxxBuildData object
_GENERATOR_ = {
MODEL_FILE_INF : InfBuildData,
MODEL_FILE_DEC : DecBuildData,
MODEL_FILE_DSC : DscBuildData,
}
_CACHE_ = {} # (FilePath, Arch) : <object>
# constructor
def __init__(self, WorkspaceDb):
self.WorkspaceDb = WorkspaceDb
# key = (FilePath, Arch=None)
def __contains__(self, Key):
FilePath = Key[0]
if len(Key) > 1:
Arch = Key[1]
else:
Arch = None
return (FilePath, Arch) in self._CACHE_
# key = (FilePath, Arch=None, Target=None, Toochain=None)
def __getitem__(self, Key):
FilePath = Key[0]
KeyLength = len(Key)
if KeyLength > 1:
Arch = Key[1]
else:
Arch = None
if KeyLength > 2:
Target = Key[2]
else:
Target = None
if KeyLength > 3:
Toolchain = Key[3]
else:
Toolchain = None
# if it's generated before, just return the cached one
Key = (FilePath, Arch, Target, Toolchain)
if Key in self._CACHE_:
return self._CACHE_[Key]
# check file type
Ext = FilePath.Type
if Ext not in self._FILE_TYPE_:
return None
FileType = self._FILE_TYPE_[Ext]
if FileType not in self._GENERATOR_:
return None
# get the parser ready for this file
MetaFile = self._FILE_PARSER_[FileType](
FilePath,
FileType,
MetaFileStorage(self.WorkspaceDb.Cur, FilePath, FileType)
)
# alwasy do post-process, in case of macros change
MetaFile.DoPostProcess()
# object the build is based on
BuildObject = self._GENERATOR_[FileType](
FilePath,
MetaFile,
self,
Arch,
Target,
Toolchain
)
self._CACHE_[Key] = BuildObject
return BuildObject
# placeholder for file format conversion
class TransformObjectFactory:
def __init__(self, WorkspaceDb):
self.WorkspaceDb = WorkspaceDb
# key = FilePath, Arch
def __getitem__(self, Key):
pass
## Constructor of WorkspaceDatabase
#
# @param DbPath Path of database file
# @param GlobalMacros Global macros used for replacement during file parsing
# @prarm RenewDb=False Create new database file if it's already there
#
def __init__(self, DbPath, RenewDb=False):
self._DbClosedFlag = False
if not DbPath:
DbPath = os.path.normpath(os.path.join(GlobalData.gWorkspace, self._DB_PATH_))
# don't create necessary path for db in memory
if DbPath != ':memory:':
DbDir = os.path.split(DbPath)[0]
if not os.path.exists(DbDir):
os.makedirs(DbDir)
# remove db file in case inconsistency between db and file in file system
if self._CheckWhetherDbNeedRenew(RenewDb, DbPath):
os.remove(DbPath)
# create db with optimized parameters
self.Conn = sqlite3.connect(DbPath, isolation_level='DEFERRED')
self.Conn.execute("PRAGMA synchronous=OFF")
self.Conn.execute("PRAGMA temp_store=MEMORY")
self.Conn.execute("PRAGMA count_changes=OFF")
self.Conn.execute("PRAGMA cache_size=8192")
#self.Conn.execute("PRAGMA page_size=8192")
# to avoid non-ascii character conversion issue
self.Conn.text_factory = str
self.Cur = self.Conn.cursor()
# create table for internal uses
self.TblDataModel = TableDataModel(self.Cur)
self.TblFile = TableFile(self.Cur)
self.Platform = None
# conversion object for build or file format conversion purpose
self.BuildObject = WorkspaceDatabase.BuildObjectFactory(self)
self.TransformObject = WorkspaceDatabase.TransformObjectFactory(self)
## Check whether workspace database need to be renew.
# The renew reason maybe:
# 1) If user force to renew;
# 2) If user do not force renew, and
# a) If the time of last modified python source is newer than database file;
# b) If the time of last modified frozen executable file is newer than database file;
#
# @param force User force renew database
# @param DbPath The absolute path of workspace database file
#
# @return Bool value for whether need renew workspace databse
#
def _CheckWhetherDbNeedRenew (self, force, DbPath):
# if database does not exist, we need do nothing
if not os.path.exists(DbPath): return False
# if user force to renew database, then not check whether database is out of date
if force: return True
#
# Check the time of last modified source file or build.exe
# if is newer than time of database, then database need to be re-created.
#
timeOfToolModified = 0
if hasattr(sys, "frozen"):
exePath = os.path.abspath(sys.executable)
timeOfToolModified = os.stat(exePath).st_mtime
else:
curPath = os.path.dirname(__file__) # curPath is the path of WorkspaceDatabase.py
rootPath = os.path.split(curPath)[0] # rootPath is root path of python source, such as /BaseTools/Source/Python
if rootPath == "" or rootPath == None:
EdkLogger.verbose("\nFail to find the root path of build.exe or python sources, so can not \
determine whether database file is out of date!\n")
# walk the root path of source or build's binary to get the time last modified.
for root, dirs, files in os.walk (rootPath):
for dir in dirs:
# bypass source control folder
if dir.lower() in [".svn", "_svn", "cvs"]:
dirs.remove(dir)
for file in files:
ext = os.path.splitext(file)[1]
if ext.lower() == ".py": # only check .py files
fd = os.stat(os.path.join(root, file))
if timeOfToolModified < fd.st_mtime:
timeOfToolModified = fd.st_mtime
if timeOfToolModified > os.stat(DbPath).st_mtime:
EdkLogger.verbose("\nWorkspace database is out of data!")
return True
return False
## Initialize build database
def InitDatabase(self):
EdkLogger.verbose("\nInitialize build database started ...")
#
# Create new tables
#
self.TblDataModel.Create(False)
self.TblFile.Create(False)
#
# Initialize table DataModel
#
self.TblDataModel.InitTable()
EdkLogger.verbose("Initialize build database ... DONE!")
## Query a table
#
# @param Table: The instance of the table to be queried
#
def QueryTable(self, Table):
Table.Query()
def __del__(self):
self.Close()
## Close entire database
#
# Commit all first
# Close the connection and cursor
#
def Close(self):
if not self._DbClosedFlag:
self.Conn.commit()
self.Cur.close()
self.Conn.close()
self._DbClosedFlag = True
## Summarize all packages in the database
def GetPackageList(self, Platform, Arch, TargetName, ToolChainTag):
self.Platform = Platform
PackageList =[]
Pa = self.BuildObject[self.Platform, 'COMMON']
#
# Get Package related to Modules
#
for Module in Pa.Modules:
ModuleObj = self.BuildObject[Module, Arch, TargetName, ToolChainTag]
for Package in ModuleObj.Packages:
if Package not in PackageList:
PackageList.append(Package)
#
# Get Packages related to Libraries
#
for Lib in Pa.LibraryInstances:
LibObj = self.BuildObject[Lib, Arch, TargetName, ToolChainTag]
for Package in LibObj.Packages:
if Package not in PackageList:
PackageList.append(Package)
return PackageList
## Summarize all platforms in the database
def _GetPlatformList(self):
PlatformList = []
for PlatformFile in self.TblFile.GetFileList(MODEL_FILE_DSC):
try:
Platform = self.BuildObject[PathClass(PlatformFile), 'COMMON']
except:
Platform = None
if Platform != None:
PlatformList.append(Platform)
return PlatformList
PlatformList = property(_GetPlatformList)
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
pass
| svn2github/vbox | src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/Workspace/WorkspaceDatabase.py | Python | gpl-2.0 | 114,290 |
import sys
import copy
import functools
import datetime
import decimal
from functools import update_wrapper
from inspect import getargspec
from django import forms
from django.utils.encoding import force_unicode
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.core.serializers.json import DjangoJSONEncoder
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.template import Context, Template
from django.template.response import TemplateResponse
from django.utils.datastructures import SortedDict
from django.utils.decorators import method_decorator, classonlymethod
from django.utils.encoding import smart_unicode
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import csrf_protect
from django.views.generic import View
from xadmin.util import static, json, vendor, sortkeypicker
csrf_protect_m = method_decorator(csrf_protect)
class IncorrectPluginArg(Exception):
pass
def filter_chain(filters, token, func, *args, **kwargs):
if token == -1:
return func()
else:
def _inner_method():
fm = filters[token]
fargs = getargspec(fm)[0]
if len(fargs) == 1:
# Only self arg
result = func()
if result is None:
return fm()
else:
raise IncorrectPluginArg(u'Plugin filter method need a arg to receive parent method result.')
else:
return fm(func if fargs[1] == '__' else func(), *args, **kwargs)
return filter_chain(filters, token - 1, _inner_method, *args, **kwargs)
def filter_hook(func):
tag = func.__name__
func.__doc__ = "``filter_hook``\n\n" + (func.__doc__ or "")
@functools.wraps(func)
def method(self, *args, **kwargs):
def _inner_method():
return func(self, *args, **kwargs)
if self.plugins:
filters = [(getattr(getattr(p, tag), 'priority', 10), getattr(p, tag))
for p in self.plugins if callable(getattr(p, tag, None))]
filters = [f for p, f in sorted(filters, key=lambda x:x[0])]
return filter_chain(filters, len(filters) - 1, _inner_method, *args, **kwargs)
else:
return _inner_method()
return method
def inclusion_tag(file_name, context_class=Context, takes_context=False):
def wrap(func):
@functools.wraps(func)
def method(self, context, nodes, *arg, **kwargs):
_dict = func(self, context, nodes, *arg, **kwargs)
from django.template.loader import get_template, select_template
if isinstance(file_name, Template):
t = file_name
elif not isinstance(file_name, basestring) and is_iterable(file_name):
t = select_template(file_name)
else:
t = get_template(file_name)
new_context = context_class(_dict, **{
'autoescape': context.autoescape,
'current_app': context.current_app,
'use_l10n': context.use_l10n,
'use_tz': context.use_tz,
})
new_context['admin_view'] = context['admin_view']
csrf_token = context.get('csrf_token', None)
if csrf_token is not None:
new_context['csrf_token'] = csrf_token
nodes.append(t.render(new_context))
return method
return wrap
class JSONEncoder(DjangoJSONEncoder):
def default(self, o):
if isinstance(o, datetime.date):
return o.strftime('%Y-%m-%d')
elif isinstance(o, datetime.datetime):
return o.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(o, decimal.Decimal):
return str(o)
else:
try:
return super(JSONEncoder, self).default(o)
except Exception:
return smart_unicode(o)
class BaseAdminObject(object):
def get_view(self, view_class, option_class=None, *args, **kwargs):
opts = kwargs.pop('opts', {})
return self.admin_site.get_view_class(view_class, option_class, **opts)(self.request, *args, **kwargs)
def get_model_view(self, view_class, model, *args, **kwargs):
return self.get_view(view_class, self.admin_site._registry.get(model), *args, **kwargs)
def get_admin_url(self, name, *args, **kwargs):
return reverse('%s:%s' % (self.admin_site.app_name, name), args=args, kwargs=kwargs)
def get_model_url(self, model, name, *args, **kwargs):
return reverse(
'%s:%s_%s_%s' % (self.admin_site.app_name, model._meta.app_label,
model._meta.module_name, name),
args=args, kwargs=kwargs, current_app=self.admin_site.name)
def get_model_perm(self, model, name):
return '%s.%s_%s' % (model._meta.app_label, name, model._meta.module_name)
def has_model_perm(self, model, name, user=None):
user = user or self.user
return user.has_perm(self.get_model_perm(model, name)) or (name == 'view' and self.has_model_perm(model, 'change', user))
def get_query_string(self, new_params=None, remove=None):
if new_params is None:
new_params = {}
if remove is None:
remove = []
p = dict(self.request.GET.items()).copy()
for r in remove:
for k in p.keys():
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return '?%s' % urlencode(p)
def get_form_params(self, new_params=None, remove=None):
if new_params is None:
new_params = {}
if remove is None:
remove = []
p = dict(self.request.GET.items()).copy()
for r in remove:
for k in p.keys():
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return mark_safe(''.join(
'<input type="hidden" name="%s" value="%s"/>' % (k, v) for k, v in p.items() if v))
def render_response(self, content, response_type='json'):
if response_type == 'json':
response = HttpResponse(mimetype="application/json; charset=UTF-8")
response.write(
json.dumps(content, cls=JSONEncoder, ensure_ascii=False))
return response
return HttpResponse(content)
def template_response(self, template, context):
return TemplateResponse(self.request, template, context, current_app=self.admin_site.name)
def message_user(self, message, level='info'):
"""
Send a message to the user. The default implementation
posts a message using the django.contrib.messages backend.
"""
if hasattr(messages, level) and callable(getattr(messages, level)):
getattr(messages, level)(self.request, message)
def static(self, path):
return static(path)
def vendor(self, *tags):
return vendor(*tags)
class BaseAdminPlugin(BaseAdminObject):
def __init__(self, admin_view):
self.admin_view = admin_view
self.admin_site = admin_view.admin_site
if hasattr(admin_view, 'model'):
self.model = admin_view.model
self.opts = admin_view.model._meta
def init_request(self, *args, **kwargs):
pass
class BaseAdminView(BaseAdminObject, View):
""" Base Admin view, support some comm attrs."""
base_template = 'xadmin/base.html'
need_site_permission = True
def __init__(self, request, *args, **kwargs):
self.request = request
self.request_method = request.method.lower()
self.user = request.user
self.base_plugins = [p(self) for p in getattr(self,
"plugin_classes", [])]
self.args = args
self.kwargs = kwargs
self.init_plugin(*args, **kwargs)
self.init_request(*args, **kwargs)
@classonlymethod
def as_view(cls):
def view(request, *args, **kwargs):
self = cls(request, *args, **kwargs)
if hasattr(self, 'get') and not hasattr(self, 'head'):
self.head = self.get
if self.request_method in self.http_method_names:
handler = getattr(
self, self.request_method, self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
return handler(request, *args, **kwargs)
# take name and docstring from class
update_wrapper(view, cls, updated=())
view.need_site_permission = cls.need_site_permission
return view
def init_request(self, *args, **kwargs):
pass
def init_plugin(self, *args, **kwargs):
plugins = []
for p in self.base_plugins:
p.request = self.request
p.user = self.user
p.args = self.args
p.kwargs = self.kwargs
result = p.init_request(*args, **kwargs)
if result is not False:
plugins.append(p)
self.plugins = plugins
@filter_hook
def get_context(self):
return {'admin_view': self, 'media': self.media, 'base_template': self.base_template}
@property
def media(self):
return self.get_media()
@filter_hook
def get_media(self):
return forms.Media()
class CommAdminView(BaseAdminView):
base_template = 'xadmin/base_site.html'
menu_template = 'xadmin/includes/sitemenu_default.html'
site_title = None
global_models_icon = {}
default_model_icon = None
apps_label_title = {}
apps_icons = {}
apps_label_order = {}
def get_site_menu(self):
return None
@filter_hook
def get_nav_menu(self):
site_menu = list(self.get_site_menu() or [])
had_urls = []
def get_url(menu, had_urls):
if 'url' in menu:
had_urls.append(menu['url'])
if 'menus' in menu:
for m in menu['menus']:
get_url(m, had_urls)
get_url({'menus': site_menu}, had_urls)
nav_menu = SortedDict()
for model, model_admin in self.admin_site._registry.items():
if getattr(model_admin, 'hidden_menu', False):
continue
app_label = model._meta.app_label
app_icon = None
model_dict = {
'title': unicode(capfirst(model._meta.verbose_name_plural)),
'url': self.get_model_url(model, "changelist"),
'icon': self.get_model_icon(model),
'perm': self.get_model_perm(model, 'view'),
'order': model_admin.order,
}
if model_dict['url'] in had_urls:
continue
app_key = "app:%s" % app_label
if app_key in nav_menu:
nav_menu[app_key]['menus'].append(model_dict)
else:
# Find app title
app_title = unicode(app_label.title())
if app_label.lower() in self.apps_label_title:
app_title = self.apps_label_title[app_label.lower()]
else:
mods = model.__module__.split('.')
if len(mods) > 1:
mod = '.'.join(mods[0:-1])
if mod in sys.modules:
mod = sys.modules[mod]
if 'verbose_name' in dir(mod):
app_title = getattr(mod, 'verbose_name')
elif 'app_title' in dir(mod):
app_title = getattr(mod, 'app_title')
#find app icon
if app_label.lower() in self.apps_icons:
app_icon = self.apps_icons[app_label.lower()]
#find app order
app_order = None
if app_label in self.apps_label_order:
app_order = self.apps_label_order[app_label]
nav_menu[app_key] = {
'title': app_title,
'menus': [model_dict],
'order': app_order
}
app_menu = nav_menu[app_key]
if app_icon:
app_menu['first_icon'] = app_icon
elif ('first_icon' not in app_menu or
app_menu['first_icon'] == self.default_model_icon) and model_dict.get('icon'):
app_menu['first_icon'] = model_dict['icon']
if 'first_url' not in app_menu and model_dict.get('url'):
app_menu['first_url'] = model_dict['url']
for menu in nav_menu.values():
menu['menus'].sort(key=sortkeypicker(['order', 'title']))
nav_menu = nav_menu.values()
nav_menu.sort(key=lambda x: (x['order'], x['title']))
site_menu.extend(nav_menu)
return site_menu
@filter_hook
def get_context(self):
context = super(CommAdminView, self).get_context()
if not settings.DEBUG and 'nav_menu' in self.request.session:
nav_menu = json.loads(self.request.session['nav_menu'])
else:
menus = copy.copy(self.get_nav_menu())
def check_menu_permission(item):
need_perm = item.pop('perm', None)
if need_perm is None:
return True
elif callable(need_perm):
return need_perm(self.user)
elif need_perm == 'super':
return self.user.is_superuser
else:
return self.user.has_perm(need_perm)
def filter_item(item):
if 'menus' in item:
before_filter_length = len(item['menus'])
item['menus'] = [filter_item(
i) for i in item['menus'] if check_menu_permission(i)]
after_filter_length = len(item['menus'])
# if user have no access to all submenus, this menu item should be removed
if after_filter_length == 0 and before_filter_length > 0:
return None
return item
nav_menu = [filter_item(item) for item in menus if check_menu_permission(item)]
nav_menu = filter(lambda x:x, nav_menu)
if not settings.DEBUG:
self.request.session['nav_menu'] = json.dumps(nav_menu)
self.request.session.modified = True
def check_selected(menu, path):
selected = False
if 'url' in menu:
chop_index = menu['url'].find('?')
if chop_index == -1:
selected = path.startswith(menu['url'])
else:
selected = path.startswith(menu['url'][:chop_index])
if 'menus' in menu:
for m in menu['menus']:
_s = check_selected(m, path)
if _s:
selected = True
if selected:
menu['selected'] = True
return selected
for menu in nav_menu:
check_selected(menu, self.request.path)
context.update({
'menu_template': self.menu_template,
'nav_menu': nav_menu,
'site_title': self.site_title or _(u'Django Xadmin'),
'breadcrumbs': self.get_breadcrumb()
})
return context
@filter_hook
def get_model_icon(self, model):
icon = self.global_models_icon.get(model)
if icon is None and model in self.admin_site._registry:
icon = getattr(self.admin_site._registry[model],
'model_icon', self.default_model_icon)
return icon
@filter_hook
def get_breadcrumb(self):
return [{
'url': self.get_admin_url('index'),
'title': _('Home')
}]
class ModelAdminView(CommAdminView):
fields = None
exclude = None
ordering = None
model = None
remove_permissions = []
def __init__(self, request, *args, **kwargs):
self.opts = self.model._meta
self.app_label = self.model._meta.app_label
self.module_name = self.model._meta.module_name
self.model_info = (self.app_label, self.module_name)
super(ModelAdminView, self).__init__(request, *args, **kwargs)
@filter_hook
def get_context(self):
new_context = {
"opts": self.opts,
"app_label": self.app_label,
"module_name": self.module_name,
"verbose_name": force_unicode(self.opts.verbose_name),
'model_icon': self.get_model_icon(self.model),
}
context = super(ModelAdminView, self).get_context()
context.update(new_context)
return context
@filter_hook
def get_breadcrumb(self):
bcs = super(ModelAdminView, self).get_breadcrumb()
item = {'title': self.opts.verbose_name_plural}
if self.has_view_permission():
item['url'] = self.model_admin_url('changelist')
bcs.append(item)
return bcs
@filter_hook
def get_object(self, object_id):
"""
Get model object instance by object_id, used for change admin view
"""
# first get base admin view property queryset, return default model queryset
queryset = self.queryset()
model = queryset.model
try:
object_id = model._meta.pk.to_python(object_id)
return queryset.get(pk=object_id)
except (model.DoesNotExist, ValidationError):
return None
@filter_hook
def get_object_url(self, obj):
if self.has_change_permission(obj):
return self.model_admin_url("change", getattr(obj, self.opts.pk.attname))
elif self.has_view_permission(obj):
return self.model_admin_url("detail", getattr(obj, self.opts.pk.attname))
else:
return None
def model_admin_url(self, name, *args, **kwargs):
return reverse(
"%s:%s_%s_%s" % (self.admin_site.app_name, self.opts.app_label,
self.module_name, name), args=args, kwargs=kwargs)
def get_model_perms(self):
"""
Returns a dict of all perms for this model. This dict has the keys
``add``, ``change``, and ``delete`` mapping to the True/False for each
of those actions.
"""
return {
'view': self.has_view_permission(),
'add': self.has_add_permission(),
'change': self.has_change_permission(),
'delete': self.has_delete_permission(),
}
def get_template_list(self, template_name):
opts = self.opts
return (
"xadmin/%s/%s/%s" % (
opts.app_label, opts.object_name.lower(), template_name),
"xadmin/%s/%s" % (opts.app_label, template_name),
"xadmin/%s" % template_name,
)
def get_ordering(self):
"""
Hook for specifying field ordering.
"""
return self.ordering or () # otherwise we might try to *None, which is bad ;)
def queryset(self):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
return self.model._default_manager.get_query_set()
def has_view_permission(self, obj=None):
return ('view' not in self.remove_permissions) and (self.user.has_perm('%s.view_%s' % self.model_info) or \
self.user.has_perm('%s.change_%s' % self.model_info))
def has_add_permission(self):
return ('add' not in self.remove_permissions) and self.user.has_perm('%s.add_%s' % self.model_info)
def has_change_permission(self, obj=None):
return ('change' not in self.remove_permissions) and self.user.has_perm('%s.change_%s' % self.model_info)
def has_delete_permission(self, obj=None):
return ('delete' not in self.remove_permissions) and self.user.has_perm('%s.delete_%s' % self.model_info)
| cgcgbcbc/django-xadmin | xadmin/views/base.py | Python | bsd-3-clause | 20,760 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Red Hat, Inc.
#
# Authors:
# Thomas Woerner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os.path
from firewall.core.prog import runProg
from firewall.core.logger import log
from firewall.functions import tempFile, readfile
from firewall.config import COMMANDS
IPSET_MAXNAMELEN = 32
IPSET_TYPES = [
# bitmap and set types are currently not supported
# "bitmap:ip",
# "bitmap:ip,mac",
# "bitmap:port",
# "list:set",
"hash:ip",
#"hash:ip,port",
#"hash:ip,port,ip",
#"hash:ip,port,net",
#"hash:ip,mark",
"hash:net",
#"hash:net,net",
#"hash:net,port",
#"hash:net,port,net",
#"hash:net,iface",
"hash:mac",
]
IPSET_CREATE_OPTIONS = {
"family": "inet|inet6",
"hashsize": "value",
"maxelem": "value",
"timeout": "value in secs",
# "counters": None,
# "comment": None,
}
class ipset:
def __init__(self):
self._command = COMMANDS["ipset"]
def __run(self, args):
# convert to string list
_args = ["%s" % item for item in args]
log.debug2("%s: %s %s", self.__class__, self._command, " ".join(_args))
(status, ret) = runProg(self._command, _args)
if status != 0:
raise ValueError("'%s %s' failed: %s" % (self._command,
" ".join(_args), ret))
return ret
def check_name(self, name):
if len(name) > IPSET_MAXNAMELEN:
raise FirewallError(INVALID_NAME,
"ipset name '%s' is not valid" % name)
def supported_types(self):
ret = { }
output = ""
try:
output = self.__run(["--help"])
except ValueError as e:
log.debug1("ipset error: %s" % e)
lines = output.splitlines()
in_types = False
for line in lines:
#print(line)
if in_types:
splits = line.strip().split(None, 2)
ret[splits[0]] = splits[2]
if line.startswith("Supported set types:"):
in_types = True
return ret
def check_type(self, type_name):
if len(type_name) > IPSET_MAXNAMELEN or type_name not in IPSET_TYPES:
raise FirewallError(INVALID_TYPE,
"ipset type name '%s' is not valid" % type_name)
def create(self, set_name, type_name, options=None):
self.check_name(set_name)
self.check_type(type_name)
args = [ "create", set_name, type_name ]
if options:
for k,v in options.items():
args.append(k)
if v != "":
args.append(v)
return self.__run(args)
def destroy(self, set_name):
self.check_name(set_name)
return self.__run([ "destroy", set_name ])
def add(self, set_name, entry, options=None):
args = [ "add", set_name, entry ]
if options:
args.append("%s" % " ".join(options))
return self.__run(args)
def delete(self, set_name, entry, options=None):
args = [ "del", set_name, entry ]
if options:
args.append("%s" % " ".join(options))
return self.__run(args)
def test(self, set_name, entry, options=None):
args = [ "test", set_name, entry ]
if options:
args.append("%s" % " ".join(options))
return self.__run(args)
def list(self, set_name=None):
args = [ "list" ]
if set_name:
args.append(set_name)
return self.__run(args).split()
def save(self, set_name=None):
args = [ "save" ]
if set_name:
args.append(set_name)
return self.__run(args)
def restore(self, set_name, type_name, entries,
create_options=None, entry_options=None):
self.check_name(set_name)
self.check_type(type_name)
temp_file = tempFile()
if ' ' in set_name:
set_name = "'%s'" % set_name
args = [ "create", set_name, type_name, "-exist" ]
if create_options:
for k,v in create_options.items():
args.append(k)
if v != "":
args.append(v)
temp_file.write("%s\n" % " ".join(args))
for entry in entries:
if ' ' in entry:
entry = "'%s'" % entry
if entry_options:
temp_file.write("add %s %s %s\n" % (set_name, entry,
" ".join(entry_options)))
else:
temp_file.write("add %s %s\n" % (set_name, entry))
temp_file.close()
stat = os.stat(temp_file.name)
log.debug2("%s: %s restore %s", self.__class__, self._command,
"%s: %d" % (temp_file.name, stat.st_size))
args = [ "restore" ]
(status, ret) = runProg(self._command, args,
stdin=temp_file.name)
if log.getDebugLogLevel() > 2:
try:
lines = readfile(temp_file.name)
except:
pass
else:
i = 1
for line in readfile(temp_file.name):
log.debug3("%8d: %s" % (i, line), nofmt=1, nl=0)
if not line.endswith("\n"):
log.debug3("", nofmt=1)
i += 1
os.unlink(temp_file.name)
if status != 0:
raise ValueError("'%s %s' failed: %s" % (self._command,
" ".join(args), ret))
return ret
def flush(self, set_name):
args = [ "flush" ]
if set_name:
args.append(set_name)
return self.__run(args)
def rename(self, old_set_name, new_set_name):
return self.__run([ "rename", old_set_name, new_set_name ])
def swap(self, set_name_1, set_name_2):
return self.__run([ "swap", set_name_1, set_name_2 ])
def version(self):
return self.__run([ "version" ])
def check_ipset_name(ipset):
if len(ipset) > IPSET_MAXNAMELEN:
return False
return True
| divereigh/firewalld | src/firewall/core/ipset.py | Python | gpl-2.0 | 6,843 |
"""SCons
The main package for the SCons software construction utility.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "/home/scons/scons/branch.0/baseline/src/engine/SCons/__init__.py 0.96.1.D001 2004/08/23 09:55:29 knight"
__version__ = "0.96.1"
__build__ = "D002"
__buildsys__ = "casablanca"
__date__ = "2004/08/23 09:55:29"
__developer__ = "OpenSG"
| bilke/OpenSG-1.8 | SConsLocal/scons-local-0.96.1/SCons/__init__.py | Python | lgpl-2.1 | 1,456 |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import datetime
import logging
import os
import types
import urlparse
import uuid
import eventlet.event
import murano.common.config as config
import murano.common.exceptions as exceptions
import murano.common.messaging as messaging
import murano.dsl.murano_class as murano_class
import murano.dsl.murano_object as murano_object
import murano.dsl.yaql_expression as yaql_expression
import murano.engine.system.common as common
LOG = logging.getLogger(__name__)
class AgentException(Exception):
pass
@murano_class.classname('io.murano.system.Agent')
class Agent(murano_object.MuranoObject):
def initialize(self, _context, host):
self._enabled = False
if config.CONF.engine.disable_murano_agent:
LOG.debug('Use of murano-agent is disallowed '
'by the server configuration')
return
self._environment = self._get_environment(_context)
self._enabled = True
self._queue = str('e%s-h%s' % (
self._environment.object_id, host.object_id)).lower()
def _get_environment(self, _context):
return yaql_expression.YaqlExpression(
"$host.find('io.murano.Environment').require()"
).evaluate(_context)
@property
def enabled(self):
return self._enabled
def prepare(self):
# (sjmc7) - turn this into a no-op if agents are disabled
if config.CONF.engine.disable_murano_agent:
LOG.debug('Use of murano-agent is disallowed '
'by the server configuration')
return
with common.create_rmq_client() as client:
client.declare(self._queue, enable_ha=True, ttl=86400000)
def queueName(self):
return self._queue
def _check_enabled(self):
if config.CONF.engine.disable_murano_agent:
raise exceptions.PolicyViolationException(
'Use of murano-agent is disallowed '
'by the server configuration')
def _prepare_message(self, template, msg_id):
msg = messaging.Message()
msg.body = template
msg.id = msg_id
return msg
def _send(self, template, wait_results, timeout, _context):
"""Send a message over the MQ interface."""
msg_id = template.get('ID', uuid.uuid4().hex)
if wait_results:
event = eventlet.event.Event()
listener = self._environment.agentListener
listener.subscribe(msg_id, event, _context)
msg = self._prepare_message(template, msg_id)
with common.create_rmq_client() as client:
client.send(message=msg, key=self._queue)
if wait_results:
try:
with eventlet.Timeout(timeout):
result = event.wait()
except eventlet.Timeout:
listener.unsubscribe(msg_id)
raise exceptions.TimeoutException(
'The Agent does not respond'
'within {0} seconds'.format(timeout))
if not result:
return None
if result.get('FormatVersion', '1.0.0').startswith('1.'):
return self._process_v1_result(result)
else:
return self._process_v2_result(result)
else:
return None
def call(self, template, resources, _context, timeout=600):
self._check_enabled()
plan = self.buildExecutionPlan(template, resources)
return self._send(plan, True, timeout, _context)
def send(self, template, resources, _context):
self._check_enabled()
plan = self.buildExecutionPlan(template, resources)
return self._send(plan, False, 0, _context)
def callRaw(self, plan, _context, timeout=600):
self._check_enabled()
return self._send(plan, True, timeout, _context)
def sendRaw(self, plan, _context):
self._check_enabled()
return self._send(plan, False, 0, _context)
def isReady(self, _context, timeout=100):
try:
self.waitReady(_context, timeout)
except exceptions.TimeoutException:
return False
else:
return True
def waitReady(self, _context, timeout=100):
self._check_enabled()
template = {'Body': 'return', 'FormatVersion': '2.0.0', 'Scripts': {}}
self.call(template, False, _context, timeout)
def _process_v1_result(self, result):
if result['IsException']:
raise AgentException(dict(self._get_exception_info(
result.get('Result', [])), source='execution_plan'))
else:
results = result.get('Result', [])
if not result:
return None
value = results[-1]
if value['IsException']:
raise AgentException(dict(self._get_exception_info(
value.get('Result', [])), source='command'))
else:
return value.get('Result')
def _process_v2_result(self, result):
error_code = result.get('ErrorCode', 0)
if not error_code:
return result.get('Body')
else:
body = result.get('Body') or {}
err = {
'message': body.get('Message'),
'details': body.get('AdditionalInfo'),
'errorCode': error_code,
'time': result.get('Time')
}
for attr in ('Message', 'AdditionalInfo'):
if attr in body:
del body[attr]
err['extra'] = body if body else None
raise AgentException(err)
def _get_array_item(self, array, index):
return array[index] if len(array) > index else None
def _get_exception_info(self, data):
data = data or []
return {
'type': self._get_array_item(data, 0),
'message': self._get_array_item(data, 1),
'command': self._get_array_item(data, 2),
'details': self._get_array_item(data, 3),
'timestamp': datetime.datetime.now().isoformat()
}
def buildExecutionPlan(self, template, resources):
template = copy.deepcopy(template)
if not isinstance(template, types.DictionaryType):
raise ValueError('Incorrect execution plan ')
format_version = template.get('FormatVersion')
if not format_version or format_version.startswith('1.'):
return self._build_v1_execution_plan(template, resources)
else:
return self._build_v2_execution_plan(template, resources)
def _build_v1_execution_plan(self, template, resources):
scripts_folder = 'scripts'
script_files = template.get('Scripts', [])
scripts = []
for script in script_files:
script_path = os.path.join(scripts_folder, script)
scripts.append(resources.string(
script_path).encode('base64'))
template['Scripts'] = scripts
return template
def _build_v2_execution_plan(self, template, resources):
scripts_folder = 'scripts'
plan_id = uuid.uuid4().hex
template['ID'] = plan_id
if 'Action' not in template:
template['Action'] = 'Execute'
if 'Files' not in template:
template['Files'] = {}
files = {}
for file_id, file_descr in template['Files'].items():
files[file_descr['Name']] = file_id
for name, script in template.get('Scripts', {}).items():
if 'EntryPoint' not in script:
raise ValueError('No entry point in script ' + name)
if 'Application' in script['Type']:
script['EntryPoint'] = self._place_file(scripts_folder,
script['EntryPoint'],
template, resources,
files)
if 'Files' in script:
for i, file in enumerate(script['Files']):
script['Files'][i] = self._place_file(scripts_folder,
file, template,
resources,
files)
return template
def _is_url(self, file):
file = self._get_url(file)
parts = urlparse.urlsplit(file)
if not parts.scheme or not parts.netloc:
return False
else:
return True
def _get_url(self, file):
if isinstance(file, dict):
return file.values()[0]
else:
return file
def _get_name(self, file):
if isinstance(file, dict):
name = file.keys()[0]
else:
name = file
if self._is_url(name):
name = name[name.rindex('/') + 1:len(name)]
elif name.startswith('<') and name.endswith('>'):
name = name[1: -1]
return name
def _get_file_value(self, file):
if isinstance(file, dict):
file = file.values()[0]
return file
def _get_body(self, file, resources, folder):
use_base64 = self._is_base64(file)
if use_base64 and file.startswith('<') and file.endswith('>'):
file = file[1: -1]
body = resources.string(os.path.join(folder, file))
if use_base64:
body = body.encode('base64')
return body
def _is_base64(self, file):
return file.startswith('<') and file.endswith('>')
def _get_body_type(self, file):
return 'Base64' if self._is_base64(file) else 'Text'
def _place_file(self, folder, file, template, resources, files):
file_value = self._get_file_value(file)
name = self._get_name(file)
file_id = uuid.uuid4().hex
if self._is_url(file_value):
template['Files'][file_id] = self._get_file_des_downloadable(file)
files[name] = file_id
else:
template['Files'][file_id] = self._get_file_description(file,
resources,
folder)
files[name] = file_id
return file_id
def _get_file_des_downloadable(self, file):
name = self._get_name(file)
file = self._get_file_value(file)
return {
'Name': str(name),
'URL': file,
'Type': 'Downloadable'
}
def _get_file_description(self, file, resources, folder):
name = self._get_name(file)
file_value = self._get_file_value(file)
body_type = self._get_body_type(file_value)
body = self._get_body(file_value, resources, folder)
return {
'Name': name,
'BodyType': body_type,
'Body': body
}
| chenyujie/hybrid-murano | murano/engine/system/agent.py | Python | apache-2.0 | 11,603 |
#!/usr/bin/python
#
# bitehist.py Block I/O size histogram.
# For Linux, uses BCC, eBPF. Embedded C.
#
# Written as a basic example of using histograms to show a distribution.
#
# A Ctrl-C will print the gathered histogram then exit.
#
# Copyright (c) 2015 Brendan Gregg.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 15-Aug-2015 Brendan Gregg Created this.
# 03-Feb-2019 Xiaozhou Liu added linear histogram.
from __future__ import print_function
from bcc import BPF
from time import sleep
# load BPF program
b = BPF(text="""
#include <uapi/linux/ptrace.h>
#include <linux/blkdev.h>
BPF_HISTOGRAM(dist);
BPF_HISTOGRAM(dist_linear);
int kprobe__blk_account_io_done(struct pt_regs *ctx, struct request *req)
{
dist.increment(bpf_log2l(req->__data_len / 1024));
dist_linear.increment(req->__data_len / 1024);
return 0;
}
""")
# header
print("Tracing... Hit Ctrl-C to end.")
# trace until Ctrl-C
try:
sleep(99999999)
except KeyboardInterrupt:
print()
# output
print("log2 histogram")
print("~~~~~~~~~~~~~~")
b["dist"].print_log2_hist("kbytes")
print("\nlinear histogram")
print("~~~~~~~~~~~~~~~~")
b["dist_linear"].print_linear_hist("kbytes")
| tuxology/bcc | examples/tracing/bitehist.py | Python | apache-2.0 | 1,181 |
# vim:ts=4:sw=4:et:
# Copyright 2016-present Facebook, Inc.
# Licensed under the Apache License, Version 2.0
# no unicode literals
from __future__ import absolute_import, division, print_function
import os
import os.path
import signal
import subprocess
import Interrupt
import WatchmanInstance
try:
import unittest2 as unittest
except ImportError:
import unittest
WATCHMAN_SRC_DIR = os.environ.get("WATCHMAN_SRC_DIR", os.getcwd())
TEST_BINARY = (
os.environ["WATCHMAN_CPPCLIENT_BINARY"]
if "WATCHMAN_CPPCLIENT_BINARY" in os.environ.keys()
else os.path.join(WATCHMAN_SRC_DIR, "tests/integration/cppclient.t")
)
class TestCppClient(unittest.TestCase):
@unittest.skipIf(not os.path.isfile(TEST_BINARY), "test binary not built")
def test_cppclient(self):
env = os.environ.copy()
env["WATCHMAN_SOCK"] = WatchmanInstance.getSharedInstance().getSockPath()
proc = subprocess.Popen(
TEST_BINARY, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
(stdout, stderr) = proc.communicate()
status = proc.poll()
if status == -signal.SIGINT:
Interrupt.setInterrupted()
self.fail("Interrupted by SIGINT")
return
if status != 0:
self.fail(
"Exit status %d\n%s\n%s\n"
% (status, stdout.decode("utf-8"), stderr.decode("utf-8"))
)
return
self.assertTrue(True, TEST_BINARY)
| wez/watchman | tests/integration/test_cppclient.py | Python | apache-2.0 | 1,486 |
# --*-- coding: utf-8 --*--
from local.manager import Manager
class webdbauthManager(Manager):
"""
Выполняет манипуляции с реляционной базой данных POSTGRESQL
"""
_name = 'webdbauthManager'
_alias = 'auth'
_inherit = 'web.db'
def Login(self, **kwargs):
if self._dispatcher._isdbLogged():
res = [self._dispatcher._db_logged_in, 'Your a logged as %s' % (self._dispatcher._db_logged_kwargs['user'],)]
else:
res = self._dispatcher._dispatch(['db.pgdb.login'],kwargs)
if res[0]:
self._dispatcher._setdbLogin(kwargs)
else:
self._dispatcher._cleardbLogin()
return res
def Logout(self):
if self._dispatcher._db_logged_in:
return self._dispatch(['db.pgdb.logout'])
else:
return [self._dispatcher._db_logged_in, 'Your a as not logged in']
webdbauthManager()
| NikolayChesnokov/webgsrp3 | local/webdbauthmanager.py | Python | agpl-3.0 | 840 |
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import scipy.optimize
# This method is used to enforce the dimensionality of matrices since NumPy is a
# bit aggressive about allowing operators over non-matching dimensions.
def save_as_figure(arr, filepath="output/frame.png"):
array = (arr - np.min(arr))/(np.max(arr)-np.min(arr))
# plt.imshow(array, interpolation='nearest', cmap=plt.cm.gray)
plt.imshow(array, cmap=plt.cm.gray)
plt.savefig(filepath)
print "Saving to ", filepath
def ASSERT_SIZE(matrix, shape):
if matrix.shape != shape:
raise AssertionError("Wrong shape: %s expexted: %s" %
(matrix.shape, shape))
# This wraps the parameters for the sparse autoencoder.
class SparseAutoEncoderOptions:
# These network parameters are specified by by Andrew Ng specifically for
# the MNIST data set here:
# [[http://ufldl.stanford.edu/wiki/index.php/Exercise:Vectorization]]
def __init__(self,
visible_size,
hidden_size,
sparsity = 0.1,
learning_rate = 3e-3,
beta = 3,
output_dir = "output",
max_iterations = 500):
self.visible_size = visible_size
self.hidden_size = hidden_size
self.sparsity_param = sparsity
self.learning_rate = learning_rate
self.beta = beta
self.output_dir = output_dir
self.max_iterations = max_iterations
class SparseAutoEncoderSolution:
def __init__(self, W1, W2, b1, b2):
self.W1 = W1
self.W2 = W2
self.b1 = b1
self.b2 = b2
# The SparseAutoEncoder object wraps all the data needed in order to train a
# sparse autoencoder. Its constructor takes a SparseAutoEncoderOptions and a
# v x m matrix where v is the size of the visible layer of the network.
class SparseAutoEncoder:
def __init__(self, options, data):
self.options = options
self.data = data
self.frame_number = 0
# Convert the matrices to a flat vector. This is needed by 'fmin_l_bfgs_b'.
def flatten(self, W1, W2, b1, b2):
return np.array(np.hstack([W1.ravel('F'), W2.ravel('F'),
b1.ravel('F'), b2.ravel('F')]), order='F')
# Convert the flat vector back to the W1, W2, b1, and b2 matrices.
def unflatten(self, theta):
hidden_size = self.options.hidden_size
visible_size = self.options.visible_size
hv = hidden_size * visible_size
W1 = theta[0:hv].reshape([hidden_size, visible_size], order='F')
W2 = theta[hv:2*hv].reshape([visible_size, hidden_size], order='F')
b1 = theta[2*hv:2*hv+hidden_size].reshape([hidden_size, 1], order='F')
b2 = theta[2*hv+hidden_size:].reshape([visible_size, 1], order='F')
return (W1, W2, b1, b2)
# Create the random values for the parameters to begin learning.
def initialize_parameters(self):
hidden_size = self.options.hidden_size
visible_size = self.options.visible_size
r = np.sqrt(6) / np.sqrt(hidden_size + visible_size + 1)
W1 = np.random.random([hidden_size, visible_size]) * 2 * r - r;
W2 = np.random.random([visible_size, hidden_size]) * 2 * r - r;
b1 = np.zeros([hidden_size, 1])
b2 = np.zeros([visible_size, 1])
return self.flatten(W1, W2, b1, b2)
# <div class='math'>1/(1 + e^{-x})</div>
def sigmoid(self, x):
return 1.0 / (1.0 + np.exp(-x))
# ==Forward pass==
# Note: even though the dimensionality doesn't match because <p>$$b1$$</p>
# is a vector, numpy will apply b1 to every column.
def feed_forward(self, x, W1, W2, b1, b2):
visible_size = self.options.visible_size
hidden_size = self.options.hidden_size
ASSERT_SIZE(W1, (hidden_size, visible_size))
m = x.shape[1]
z2 = np.dot(W1, x) + b1
a2 = self.sigmoid(z2)
ASSERT_SIZE(a2, (hidden_size, m))
z3 = np.dot(W2, a2) + b2 # W2 * a2 + b2
a3 = self.sigmoid(z3)
ASSERT_SIZE(a3, (visible_size, m))
return a2, a3
# Compute the cost function J and the gradient for an input. Note that this
# takes a flattened W1, W2, b1, b2 because of fmin_l_bfgs_b.
def sparse_autoencoder(self, theta):
visible_size = self.options.visible_size
hidden_size = self.options.hidden_size
lamb = self.options.learning_rate
rho = self.options.sparsity_param
beta = self.options.beta
x = self.data
m = x.shape[1]
W1, W2, b1, b2 = self.unflatten(theta)
ASSERT_SIZE(W1, (hidden_size, visible_size))
ASSERT_SIZE(W2, (visible_size, hidden_size))
ASSERT_SIZE(b1, (hidden_size, 1))
ASSERT_SIZE(b2, (visible_size, 1))
if self.frame_number % 100 == 0:
save_as_figure(W1.T,
"%s/w1frame%03d.png" % (self.options.output_dir,
self.frame_number))
save_as_figure(W2.T,
"%s/w2frame%03d.png" % (self.options.output_dir,
self.frame_number))
self.frame_number += 1
a2, a3 = self.feed_forward(x, W1, W2, b1, b2)
# Compute average activation for an edge over all data
rho_hat = np.mean(a2, 1)[:, np.newaxis]
ASSERT_SIZE(rho_hat, (hidden_size, 1))
kl = rho*np.log(rho/rho_hat) + (1-rho)*np.log((1-rho)/(1-rho_hat))
cost = 0.5/m * np.sum((a3 - x)**2) + \
(lamb/2.)*(np.sum(W1**2) + np.sum(W2**2)) + \
beta*np.sum(kl)
# We set <span class='math'>y</span> equal to the input since we're learning
# an identity function
y = x
delta3 = -(y - a3) * a3*(1-a3)
ASSERT_SIZE(delta3, (visible_size, m))
sparsity = -rho/rho_hat + (1-rho)/(1-rho_hat)
ASSERT_SIZE(sparsity, (hidden_size, 1))
delta2 = (np.dot(W2.T, delta3) + beta * sparsity) * a2 * (1-a2)
ASSERT_SIZE(delta2, (hidden_size, m))
W2_grad = 1./m * np.dot(delta3, a2.T) + lamb * W2
ASSERT_SIZE(W2_grad, (visible_size, hidden_size))
# [:, newaxis] makes this into a matrix
b2_grad = 1./m * np.sum(delta3, 1)[:, np.newaxis]
ASSERT_SIZE(b2_grad, (visible_size, 1))
# sum the rows of delta3 and then mult by 1/m
W1_grad = 1./m * np.dot(delta2, x.T) + lamb * W1
ASSERT_SIZE(W1_grad, (hidden_size, visible_size))
b1_grad = 1./m * np.sum(delta2, 1)[:, np.newaxis]
ASSERT_SIZE(b1_grad, (hidden_size, 1))
grad = self.flatten(W1_grad, W2_grad, b1_grad, b2_grad)
return (cost, grad)
# Actually run gradient descent. Call mySparseAutoEncoder.learn() to learn
# the parameters of W1, W2, b1, and b2 for this network and this data.
def learn(self):
def f(theta):
return self.sparse_autoencoder(theta)
theta = self.initialize_parameters()
same_theta = theta.copy()
x, f, d = scipy.optimize.fmin_l_bfgs_b(f, theta,
maxfun= self.options.max_iterations,
iprint=1, m=20)
W1, W2, b1, b2 = self.unflatten(x)
save_as_figure(W1.T, "%s/network.png" % self.options.output_dir)
return SparseAutoEncoderSolution(W1, W2, b1, b2)
| AndreasMadsen/grace | Code/sAe.py | Python | mit | 7,042 |
import sys
sys.path.append('./..')
from gp2kronSum import gp2kronSum
#from gp2kronSumSvd import gp2kronSumSvd
from gp3kronSum import gp3kronSum
from gp3kronSumLR import gp3kronSumLR
from gp2kronSumLR import gp2kronSumLR
| PMBio/GNetLMM | GNetLMM/pycore/mtSet/gp/__init__.py | Python | apache-2.0 | 220 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
from eve.utils import date_to_str, config
import superdesk
from superdesk.celery_task_utils import is_task_running, mark_task_as_not_running
from superdesk.utc import utcnow
from superdesk.metadata.item import ITEM_STATE, CONTENT_STATE
from apps.archive.commands import get_overdue_scheduled_items
logger = logging.getLogger(__name__)
REMOVE_SPIKE_DEFAULT = {'minutes': 30}
UPDATE_OVERDUE_SCHEDULED_DEFAULT = {'minutes': 10}
class UpdateOverdueScheduledPublishedContent(superdesk.Command):
"""
Update the overdue scheduled stories
"""
def run(self):
self.update_overdue_scheduled()
def update_overdue_scheduled(self):
"""
Updates the overdue scheduled content on published collection.
"""
logger.info('Updating overdue scheduled content')
if is_task_running("publish", "update_overdue_scheduled", UPDATE_OVERDUE_SCHEDULED_DEFAULT):
return
try:
now = date_to_str(utcnow())
items = get_overdue_scheduled_items(now, 'published')
for item in items:
logger.info('updating overdue scheduled article with id {} and headline {} -- expired on: {} now: {}'.
format(item[config.ID_FIELD], item['headline'], item['publish_schedule'], now))
superdesk.get_resource_service('published').\
update_published_items(item['item_id'], ITEM_STATE, CONTENT_STATE.PUBLISHED)
finally:
mark_task_as_not_running("publish", "update_overdue_scheduled")
superdesk.command('publish:remove_overdue_scheduled', UpdateOverdueScheduledPublishedContent())
| sivakuna-aap/superdesk | server/apps/publish/commands.py | Python | agpl-3.0 | 1,987 |
#!/usr/bin/python2.7
"""
Read a MAF from standard input and determine the mean length of ungapped pieces
in each block.
usage: %prog < maf > out
"""
from __future__ import division
import sys
import bx.align.maf
def main():
for m in bx.align.maf.Reader( sys.stdin ):
ungapped_columns = 0
ungapped_runs = 0
in_ungapped = False
for col in m.column_iter():
is_gap = ( '-' in col )
if not is_gap: ungapped_columns += 1
if in_ungapped and is_gap:
ungapped_runs += 1
in_ungapped = not is_gap
if in_ungapped: ungapped_runs += 1
print ungapped_columns / ungapped_runs
if __name__ == "__main__": main()
| poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/bx_python-0.7.1-py2.7-linux-x86_64.egg/EGG-INFO/scripts/maf_mean_length_ungapped_piece.py | Python | apache-2.0 | 739 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('entity', '0002_entitykind_is_active'),
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('email', models.CharField(max_length=256)),
('is_active', models.BooleanField(default=True)),
('is_captain', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Competitor',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=64)),
('is_active', models.BooleanField(default=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='DummyModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('dummy_data', models.CharField(max_length=64)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='EntityPointer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('entity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='entity.Entity')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='M2mEntity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='MultiInheritEntity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('data', models.CharField(max_length=64)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PointsToAccount',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tests.Account')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PointsToM2mEntity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('m2m_entity', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='tests.M2mEntity')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=256)),
('is_active', models.BooleanField(default=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='TeamGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=256)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='team',
name='team_group',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tests.TeamGroup', null=True),
),
migrations.AddField(
model_name='m2mentity',
name='teams',
field=models.ManyToManyField(to='tests.Team'),
),
migrations.AddField(
model_name='account',
name='competitor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tests.Competitor', null=True),
),
migrations.AddField(
model_name='account',
name='team',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tests.Team', null=True),
),
migrations.AddField(
model_name='account',
name='team2',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='tests.Team', null=True),
),
migrations.AddField(
model_name='account',
name='team_group',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tests.TeamGroup', null=True),
),
]
| wesokes/django-entity | entity/tests/migrations/0001_initial.py | Python | mit | 5,497 |
from distutils.core import setup
setup(
name='Statpipe',
version='0.1.9',
author='Ben Whalley',
author_email='[email protected]',
packages=['statpipe'],
scripts=['bin/statpipe', 'bin/statpipe_image'],
url='https://github.com/benwhalley/statpipe',
license='LICENSE.txt',
description='Pipe stuff to Stata, get results back.',
long_description=open('README.rst').read(),
install_requires=['clint', ],
)
| benwhalley/statpipe | setup.py | Python | mit | 448 |
"""
Copyright (c) 2017, University of Southern Denmark
All rights reserved.
This code is licensed under BSD 2-clause license.
See LICENSE file in the project root for license terms.
"""
import unittest
import tempfile
import shutil
import json
import os
import random
import pandas as pd
import numpy as np
from modestpy.estim.ga_parallel.ga_parallel import MODESTGA
from modestpy.utilities.sysarch import get_sys_arch
from modestpy.loginit import config_logger
class TestMODESTGA(unittest.TestCase):
def setUp(self):
# Platform (win32, win64, linux32, linux64)
platform = get_sys_arch()
assert platform, 'Unsupported platform type!'
# Temp directory
self.tmpdir = tempfile.mkdtemp()
# Parent directory
parent = os.path.dirname(__file__)
# Resources
self.fmu_path = os.path.join(parent, 'resources', 'simple2R1C',
'Simple2R1C_{}.fmu'.format(platform))
inp_path = os.path.join(parent, 'resources', 'simple2R1C',
'inputs.csv')
ideal_path = os.path.join(parent, 'resources', 'simple2R1C',
'result.csv')
est_path = os.path.join(parent, 'resources', 'simple2R1C', 'est.json')
known_path = os.path.join(parent, 'resources', 'simple2R1C',
'known.json')
# Assert there is an FMU for this platform
assert os.path.exists(self.fmu_path), \
"FMU for this platform ({}) doesn't exist.\n".format(platform) + \
"No such file: {}".format(self.fmu_path)
self.inp = pd.read_csv(inp_path).set_index('time')
self.ideal = pd.read_csv(ideal_path).set_index('time')
with open(est_path) as f:
self.est = json.load(f)
with open(known_path) as f:
self.known = json.load(f)
# MODESTGA settings
self.gen = 2
self.pop = None # Set individually
self.trm = None # Set individually
self.workers = None # Set individually
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_modestga_default(self):
ga = MODESTGA(self.fmu_path, self.inp, self.known, self.est, self.ideal,
generations=self.gen)
par_df = ga.estimate()
assert type(par_df) is pd.DataFrame
def test_modestga_1_worker(self):
ga = MODESTGA(self.fmu_path, self.inp, self.known, self.est, self.ideal,
generations=self.gen,
pop_size=6,
trm_size=self.trm,
tol=1e-3,
inertia=5,
workers=1)
par_df = ga.estimate()
assert type(par_df) is pd.DataFrame
def test_modestga_2_workers_small_pop(self):
ga = MODESTGA(self.fmu_path, self.inp, self.known, self.est, self.ideal,
generations=self.gen,
pop_size=2,
trm_size=1,
tol=1e-3,
inertia=5,
workers=2)
par_df = ga.estimate()
assert type(par_df) is pd.DataFrame
def test_modestga_2_workers_large_pop(self):
ga = MODESTGA(self.fmu_path, self.inp, self.known, self.est, self.ideal,
generations=self.gen,
pop_size=32,
trm_size=3,
tol=1e-3,
inertia=5,
workers=2)
par_df = ga.estimate()
assert type(par_df) is pd.DataFrame
def suite():
suite = unittest.TestSuite()
suite.addTest(TestMODESTGA('test_modestga_default'))
suite.addTest(TestMODESTGA('test_modestga_1_worker'))
suite.addTest(TestMODESTGA('test_modestga_2_workers_small_pop'))
suite.addTest(TestMODESTGA('test_modestga_2_workers_large_pop'))
return suite
if __name__ == '__main__':
config_logger(filename='unit_tests.log', level='DEBUG')
unittest.main()
| sdu-cfei/modest-py | modestpy/test/test_ga_parallel.py | Python | bsd-2-clause | 4,063 |
# Copyright 2017 Priscilla Boyd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
The RNN_LSTM module implements a recurrent neural network using LSTM.
"""
import csv
import numpy as np
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from tools.Utils import current_dt, get_latest_dataset_folder, get_latest_dataset
def split_test_training(data_path, sequence_length):
"""
Split data between test and training examples.
:param string data_path: Location of CSV-formatted data
:param int sequence_length: Sequence length (temporal window) to be used
:return: Training examples (X_train), training targets (y_train), test examples (X_test) and test targets (y_test)
:rtype: dataframe, dataframe, dataframe, dataframe
"""
# logic for loading the CSV, using 'result' (2nd) column as basis for prediction
with open(data_path) as f:
record = csv.reader(f, delimiter=",")
next(record, None)
spat = []
nb_of_values = 0
for line in record:
spat.append(float(line[2]))
nb_of_values += 1
# break file into chunks based on sequence length
result = []
for index in range(len(spat) - sequence_length):
result.append(spat[index: index + sequence_length])
result = np.array(result)
# divide set into 20% for test, 80% for training
row = int(round(0.8 * result.shape[0]))
train = result[:row, :]
np.random.shuffle(train)
X_train = train[:, :-1]
y_train = train[:, -1]
X_test = result[row:, :-1]
y_test = result[row:, -1]
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
return [X_train, y_train, X_test, y_test]
def build_model():
"""
Build the learning RNN model using Keras (Sequential) module.
:return: RNN model
:rtype: History object
"""
model = Sequential()
# declare the sizes of the layers (1d input and output)
layers = [1, 50, 100, 1]
# first hidden layer, using linear activation (not specified)
model.add(LSTM(layers[1], input_shape=(None, layers[0]), return_sequences=True))
model.add(Dropout(0.2))
# second hidden layer
model.add(LSTM(layers[2], return_sequences=False))
model.add(Dropout(0.2))
# third hidden layer
model.add(Dense(layers[3]))
model.add(Activation("linear"))
# compile using MSE as loss function for regression, RMSPROP as optimiser
model.compile(loss="mse", optimizer="RMSProp", metrics=['accuracy'])
# return the model
return model
def run_rnn(file):
# define model params
"""
Run the process to train/test a recurrent neural network using LSTM using a given dataset file.
:param string file: Location of CSV-formatted dataset file
:return: Model with expected (test) targets and associated scores
:rtype: object, dataframe, object
"""
num_epochs = 2
sequence_length = 20
# grab train and test data from CSV
X_train, y_train, X_test, y_test = split_test_training(file, sequence_length)
print(X_train)
# build model
model = build_model()
model.fit(X_train, y_train, epochs=num_epochs, batch_size=64, validation_split=0.2)
# predict
predict = model.predict(X_test)
predict = np.reshape(predict, predict.size)
# evaluate
score = model.evaluate(X_test, y_test, verbose=0)
print("Accuracy: ", score[1]*100, "%")
# save model to h5 file (same folder as data)
model_location_folder = get_latest_dataset_folder()
model.save(model_location_folder + '/RNN_' + current_dt + '.h5')
return model, y_test, predict
| priscillaboyd/SPaT_Prediction | src/neural_network/RNN_LSTM.py | Python | apache-2.0 | 4,376 |
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame', gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| cemrich/opencv-experiments | camera-test.py | Python | mit | 448 |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Miscellaneous utility functions
"""
from cPickle import dumps, loads
import inspect
from distutils.version import LooseVersion
import numpy as np
from textwrap import dedent
import sys
import re
from nipype.external.six import Iterator
def human_order_sorted(l):
"""Sorts string in human order (i.e. 'stat10' will go after 'stat2')"""
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
if isinstance(text, tuple):
text = text[0]
return [ atoi(c) for c in re.split('(\d+)', text) ]
return sorted(l, key=natural_keys)
def trim(docstring, marker=None):
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxint
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxint:
for line in lines[1:]:
# replace existing REST marker with doc level marker
stripped = line.lstrip().strip().rstrip()
if marker is not None and stripped and \
all([s==stripped[0] for s in stripped]) and \
stripped[0] not in [':']:
line = line.replace(stripped[0], marker)
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def getsource(function):
"""Returns the source code of a function"""
src = dumps(dedent(inspect.getsource(function)))
return src
def create_function_from_source(function_source, imports=None):
"""Return a function object from a function source
Parameters
----------
function_source : pickled string
string in pickled form defining a function
imports : list of strings
list of import statements in string form that allow the function
to be executed in an otherwise empty namespace
"""
ns = {}
import_keys = []
try:
if imports is not None:
for statement in imports:
exec statement in ns
import_keys = ns.keys()
exec loads(function_source) in ns
except Exception, msg:
msg = str(msg) + '\nError executing function:\n %s\n'%function_source
msg += '\n'.join(["Functions in connection strings have to be standalone.",
"They cannot be declared either interactively or inside",
"another function or inline in the connect string. Any",
"imports should be done inside the function"
])
raise RuntimeError(msg)
ns_funcs = list(set(ns) - set(import_keys + ['__builtins__']))
assert len(ns_funcs) == 1, "Function or inputs are ill-defined"
funcname = ns_funcs[0]
func = ns[funcname]
return func
def find_indices(condition):
"Return the indices where ravel(condition) is true"
res, = np.nonzero(np.ravel(condition))
return res
def is_container(item):
"""Checks if item is a container (list, tuple, dict, set)
Parameters
----------
item : object
object to check for .__iter__
Returns
-------
output : Boolean
True if container
False if not (eg string)
"""
if hasattr(item, '__iter__'):
return True
else:
return False
def container_to_string(cont):
"""Convert a container to a command line string.
Elements of the container are joined with a space between them,
suitable for a command line parameter.
If the container `cont` is only a sequence, like a string and not a
container, it is returned unmodified.
Parameters
----------
cont : container
A container object like a list, tuple, dict, or a set.
Returns
-------
cont_str : string
Container elements joined into a string.
"""
if hasattr(cont, '__iter__'):
return ' '.join(cont)
else:
return str(cont)
# Dependency checks. Copied this from Nipy, with some modificiations
# (added app as a parameter).
def package_check(pkg_name, version=None, app=None, checker=LooseVersion,
exc_failed_import=ImportError,
exc_failed_check=RuntimeError):
"""Check that the minimal version of the required package is installed.
Parameters
----------
pkg_name : string
Name of the required package.
version : string, optional
Minimal version number for required package.
app : string, optional
Application that is performing the check. For instance, the
name of the tutorial being executed that depends on specific
packages. Default is *Nipype*.
checker : object, optional
The class that will perform the version checking. Default is
distutils.version.LooseVersion.
exc_failed_import : Exception, optional
Class of the exception to be thrown if import failed.
exc_failed_check : Exception, optional
Class of the exception to be thrown if version check failed.
Examples
--------
package_check('numpy', '1.3')
package_check('networkx', '1.0', 'tutorial1')
"""
if app:
msg = '%s requires %s' % (app, pkg_name)
else:
msg = 'Nipype requires %s' % pkg_name
if version:
msg += ' with version >= %s' % (version,)
try:
mod = __import__(pkg_name)
except ImportError:
raise exc_failed_import(msg)
if not version:
return
try:
have_version = mod.__version__
except AttributeError:
raise exc_failed_check('Cannot find version for %s' % pkg_name)
if checker(have_version) < checker(version):
raise exc_failed_check(msg)
def str2bool(v):
if isinstance(v, bool):
return v
lower = v.lower()
if lower in ("yes", "true", "t", "1"):
return True
elif lower in ("no", "false", "n", "f", "0"):
return False
else:
raise ValueError("%s cannot be converted to bool"%v)
def flatten(S):
if S == []:
return S
if isinstance(S[0], list):
return flatten(S[0]) + flatten(S[1:])
return S[:1] + flatten(S[1:])
def unflatten(in_list, prev_structure):
if not isinstance(in_list, Iterator):
in_list = iter(in_list)
if not isinstance(prev_structure, list):
return in_list.next()
else:
out = []
for item in prev_structure:
out.append(unflatten(in_list, item))
return out
| grlee77/nipype | nipype/utils/misc.py | Python | bsd-3-clause | 7,047 |
from django.db import models
class Creatable(models.Model):
name = models.CharField(max_length=100)
related = models.ManyToManyField('self', blank=True)
def __unicode__(self):
return self.name
| spookylukey/django-autocomplete-light | test_project/ajax_create/models.py | Python | mit | 216 |
#!/usr/bin/python
# encoding: utf-8
# (c) 2012, Matt Wright <[email protected]>
# (c) 2013, Alexander Saltanov <[email protected]>
# (c) 2014, Rutger Spiertz <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: apt_repository
short_description: Add and remove APT repositories
description:
- Add or remove an APT repositories in Ubuntu and Debian.
notes:
- This module works on Debian, Ubuntu and their derivatives.
- This module supports Debian Squeeze (version 6) as well as its successors.
options:
repo:
required: true
default: none
description:
- A source string for the repository.
state:
required: false
choices: [ "absent", "present" ]
default: "present"
description:
- A source string state.
mode:
required: false
default: 0644
description:
- The octal mode for newly created files in sources.list.d
version_added: "1.6"
update_cache:
description:
- Run the equivalent of C(apt-get update) when a change occurs. Cache updates are run after making changes.
required: false
default: "yes"
choices: [ "yes", "no" ]
validate_certs:
version_added: '1.8'
description:
- If C(no), SSL certificates for the target repo will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
filename:
version_added: '2.1'
description:
- Sets the name of the source list file in sources.list.d.
Defaults to a file name based on the repository source url.
The .list extension will be automatically added.
required: false
codename:
version_added: '2.3'
description:
- Override the distribution codename to use for PPA repositories.
Should usually only be set when working with a PPA on a non-Ubuntu target (e.g. Debian or Mint)
required: false
author: "Alexander Saltanov (@sashka)"
version_added: "0.7"
requirements:
- python-apt (python 2)
- python3-apt (python 3)
'''
EXAMPLES = '''
# Add specified repository into sources list.
apt_repository: repo='deb http://archive.canonical.com/ubuntu hardy partner' state=present
# Add specified repository into sources list using specified filename.
apt_repository: repo='deb http://dl.google.com/linux/chrome/deb/ stable main' state=present filename='google-chrome'
# Add source repository into sources list.
apt_repository: repo='deb-src http://archive.canonical.com/ubuntu hardy partner' state=present
# Remove specified repository from sources list.
apt_repository: repo='deb http://archive.canonical.com/ubuntu hardy partner' state=absent
# Add nginx stable repository from PPA and install its signing key.
# On Ubuntu target:
apt_repository: repo='ppa:nginx/stable'
# On Debian target
apt_repository: repo='ppa:nginx/stable' codename='trusty'
'''
import glob
import os
import re
import sys
import tempfile
try:
import apt
import apt_pkg
import aptsources.distro as aptsources_distro
distro = aptsources_distro.get_distro()
HAVE_PYTHON_APT = True
except ImportError:
distro = None
HAVE_PYTHON_APT = False
if sys.version_info[0] < 3:
PYTHON_APT = 'python-apt'
else:
PYTHON_APT = 'python3-apt'
DEFAULT_SOURCES_PERM = int('0644', 8)
VALID_SOURCE_TYPES = ('deb', 'deb-src')
def install_python_apt(module):
if not module.check_mode:
apt_get_path = module.get_bin_path('apt-get')
if apt_get_path:
rc, so, se = module.run_command([apt_get_path, 'update'])
if rc != 0:
module.fail_json(msg="Failed to auto-install %s. Error was: '%s'" % (PYTHON_APT, se.strip()))
rc, so, se = module.run_command([apt_get_path, 'install', PYTHON_APT, '-y', '-q'])
if rc == 0:
global apt, apt_pkg, aptsources_distro, distro, HAVE_PYTHON_APT
import apt
import apt_pkg
import aptsources.distro as aptsources_distro
distro = aptsources_distro.get_distro()
HAVE_PYTHON_APT = True
else:
module.fail_json(msg="Failed to auto-install %s. Error was: '%s'" % (PYTHON_APT, se.strip()))
else:
module.fail_json(msg="%s must be installed to use check mode" % PYTHON_APT)
class InvalidSource(Exception):
pass
# Simple version of aptsources.sourceslist.SourcesList.
# No advanced logic and no backups inside.
class SourcesList(object):
def __init__(self, module):
self.module = module
self.files = {} # group sources by file
# Repositories that we're adding -- used to implement mode param
self.new_repos = set()
self.default_file = self._apt_cfg_file('Dir::Etc::sourcelist')
# read sources.list if it exists
if os.path.isfile(self.default_file):
self.load(self.default_file)
# read sources.list.d
for file in glob.iglob('%s/*.list' % self._apt_cfg_dir('Dir::Etc::sourceparts')):
self.load(file)
def __iter__(self):
'''Simple iterator to go over all sources. Empty, non-source, and other not valid lines will be skipped.'''
for file, sources in self.files.items():
for n, valid, enabled, source, comment in sources:
if valid:
yield file, n, enabled, source, comment
raise StopIteration
def _expand_path(self, filename):
if '/' in filename:
return filename
else:
return os.path.abspath(os.path.join(self._apt_cfg_dir('Dir::Etc::sourceparts'), filename))
def _suggest_filename(self, line):
def _cleanup_filename(s):
filename = self.module.params['filename']
if filename is not None:
return filename
return '_'.join(re.sub('[^a-zA-Z0-9]', ' ', s).split())
def _strip_username_password(s):
if '@' in s:
s = s.split('@', 1)
s = s[-1]
return s
# Drop options and protocols.
line = re.sub('\[[^\]]+\]', '', line)
line = re.sub('\w+://', '', line)
# split line into valid keywords
parts = [part for part in line.split() if part not in VALID_SOURCE_TYPES]
# Drop usernames and passwords
parts[0] = _strip_username_password(parts[0])
return '%s.list' % _cleanup_filename(' '.join(parts[:1]))
def _parse(self, line, raise_if_invalid_or_disabled=False):
valid = False
enabled = True
source = ''
comment = ''
line = line.strip()
if line.startswith('#'):
enabled = False
line = line[1:]
# Check for another "#" in the line and treat a part after it as a comment.
i = line.find('#')
if i > 0:
comment = line[i+1:].strip()
line = line[:i]
# Split a source into substring to make sure that it is source spec.
# Duplicated whitespaces in a valid source spec will be removed.
source = line.strip()
if source:
chunks = source.split()
if chunks[0] in VALID_SOURCE_TYPES:
valid = True
source = ' '.join(chunks)
if raise_if_invalid_or_disabled and (not valid or not enabled):
raise InvalidSource(line)
return valid, enabled, source, comment
@staticmethod
def _apt_cfg_file(filespec):
'''
Wrapper for `apt_pkg` module for running with Python 2.5
'''
try:
result = apt_pkg.config.find_file(filespec)
except AttributeError:
result = apt_pkg.Config.FindFile(filespec)
return result
@staticmethod
def _apt_cfg_dir(dirspec):
'''
Wrapper for `apt_pkg` module for running with Python 2.5
'''
try:
result = apt_pkg.config.find_dir(dirspec)
except AttributeError:
result = apt_pkg.Config.FindDir(dirspec)
return result
def load(self, file):
group = []
f = open(file, 'r')
for n, line in enumerate(f):
valid, enabled, source, comment = self._parse(line)
group.append((n, valid, enabled, source, comment))
self.files[file] = group
def save(self):
for filename, sources in list(self.files.items()):
if sources:
d, fn = os.path.split(filename)
fd, tmp_path = tempfile.mkstemp(prefix=".%s-" % fn, dir=d)
f = os.fdopen(fd, 'w')
for n, valid, enabled, source, comment in sources:
chunks = []
if not enabled:
chunks.append('# ')
chunks.append(source)
if comment:
chunks.append(' # ')
chunks.append(comment)
chunks.append('\n')
line = ''.join(chunks)
try:
f.write(line)
except IOError:
err = get_exception()
self.module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, unicode(err)))
self.module.atomic_move(tmp_path, filename)
# allow the user to override the default mode
if filename in self.new_repos:
this_mode = self.module.params.get('mode', DEFAULT_SOURCES_PERM)
self.module.set_mode_if_different(filename, this_mode, False)
else:
del self.files[filename]
if os.path.exists(filename):
os.remove(filename)
def dump(self):
dumpstruct = {}
for filename, sources in self.files.items():
if sources:
lines = []
for n, valid, enabled, source, comment in sources:
chunks = []
if not enabled:
chunks.append('# ')
chunks.append(source)
if comment:
chunks.append(' # ')
chunks.append(comment)
chunks.append('\n')
lines.append(''.join(chunks))
dumpstruct[filename] = ''.join(lines)
return dumpstruct
def _choice(self, new, old):
if new is None:
return old
return new
def modify(self, file, n, enabled=None, source=None, comment=None):
'''
This function to be used with iterator, so we don't care of invalid sources.
If source, enabled, or comment is None, original value from line ``n`` will be preserved.
'''
valid, enabled_old, source_old, comment_old = self.files[file][n][1:]
self.files[file][n] = (n, valid, self._choice(enabled, enabled_old), self._choice(source, source_old), self._choice(comment, comment_old))
def _add_valid_source(self, source_new, comment_new, file):
# We'll try to reuse disabled source if we have it.
# If we have more than one entry, we will enable them all - no advanced logic, remember.
found = False
for filename, n, enabled, source, comment in self:
if source == source_new:
self.modify(filename, n, enabled=True)
found = True
if not found:
if file is None:
file = self.default_file
else:
file = self._expand_path(file)
if file not in self.files:
self.files[file] = []
files = self.files[file]
files.append((len(files), True, True, source_new, comment_new))
self.new_repos.add(file)
def add_source(self, line, comment='', file=None):
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
# Prefer separate files for new sources.
self._add_valid_source(source, comment, file=file or self._suggest_filename(source))
def _remove_valid_source(self, source):
# If we have more than one entry, we will remove them all (not comment, remove!)
for filename, n, enabled, src, comment in self:
if source == src and enabled:
self.files[filename].pop(n)
def remove_source(self, line):
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
self._remove_valid_source(source)
class UbuntuSourcesList(SourcesList):
LP_API = 'https://launchpad.net/api/1.0/~%s/+archive/%s'
def __init__(self, module, add_ppa_signing_keys_callback=None):
self.module = module
self.add_ppa_signing_keys_callback = add_ppa_signing_keys_callback
self.codename = module.params['codename'] or distro.codename
super(UbuntuSourcesList, self).__init__(module)
def _get_ppa_info(self, owner_name, ppa_name):
lp_api = self.LP_API % (owner_name, ppa_name)
headers = dict(Accept='application/json')
response, info = fetch_url(self.module, lp_api, headers=headers)
if info['status'] != 200:
self.module.fail_json(msg="failed to fetch PPA information, error was: %s" % info['msg'])
return json.loads(to_native(response.read()))
def _expand_ppa(self, path):
ppa = path.split(':')[1]
ppa_owner = ppa.split('/')[0]
try:
ppa_name = ppa.split('/')[1]
except IndexError:
ppa_name = 'ppa'
line = 'deb http://ppa.launchpad.net/%s/%s/ubuntu %s main' % (ppa_owner, ppa_name, self.codename)
return line, ppa_owner, ppa_name
def _key_already_exists(self, key_fingerprint):
rc, out, err = self.module.run_command('apt-key export %s' % key_fingerprint, check_rc=True)
return len(err) == 0
def add_source(self, line, comment='', file=None):
if line.startswith('ppa:'):
source, ppa_owner, ppa_name = self._expand_ppa(line)
if source in self.repos_urls:
# repository already exists
return
if self.add_ppa_signing_keys_callback is not None:
info = self._get_ppa_info(ppa_owner, ppa_name)
if not self._key_already_exists(info['signing_key_fingerprint']):
command = ['apt-key', 'adv', '--recv-keys', '--keyserver', 'hkp://keyserver.ubuntu.com:80', info['signing_key_fingerprint']]
self.add_ppa_signing_keys_callback(command)
file = file or self._suggest_filename('%s_%s' % (line, self.codename))
else:
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
file = file or self._suggest_filename(source)
self._add_valid_source(source, comment, file)
def remove_source(self, line):
if line.startswith('ppa:'):
source = self._expand_ppa(line)[0]
else:
source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
self._remove_valid_source(source)
@property
def repos_urls(self):
_repositories = []
for parsed_repos in self.files.values():
for parsed_repo in parsed_repos:
enabled = parsed_repo[1]
source_line = parsed_repo[3]
if not enabled:
continue
if source_line.startswith('ppa:'):
source, ppa_owner, ppa_name = self._expand_ppa(source_line)
_repositories.append(source)
else:
_repositories.append(source_line)
return _repositories
def get_add_ppa_signing_key_callback(module):
def _run_command(command):
module.run_command(command, check_rc=True)
if module.check_mode:
return None
else:
return _run_command
def main():
module = AnsibleModule(
argument_spec=dict(
repo=dict(required=True),
state=dict(choices=['present', 'absent'], default='present'),
mode=dict(required=False, type='raw'),
update_cache = dict(aliases=['update-cache'], type='bool', default='yes'),
filename=dict(required=False, default=None),
# this should not be needed, but exists as a failsafe
install_python_apt=dict(required=False, default="yes", type='bool'),
validate_certs = dict(default='yes', type='bool'),
codename = dict(required=False),
),
supports_check_mode=True,
)
params = module.params
repo = module.params['repo']
state = module.params['state']
update_cache = module.params['update_cache']
# Note: mode is referenced in SourcesList class via the passed in module (self here)
sourceslist = None
if not HAVE_PYTHON_APT:
if params['install_python_apt']:
install_python_apt(module)
else:
module.fail_json(msg='%s is not installed, and install_python_apt is False' % PYTHON_APT)
if isinstance(distro, aptsources_distro.Distribution):
sourceslist = UbuntuSourcesList(module,
add_ppa_signing_keys_callback=get_add_ppa_signing_key_callback(module))
else:
module.fail_json(msg='Module apt_repository is not supported on target.')
sources_before = sourceslist.dump()
try:
if state == 'present':
sourceslist.add_source(repo)
elif state == 'absent':
sourceslist.remove_source(repo)
except InvalidSource:
err = get_exception()
module.fail_json(msg='Invalid repository string: %s' % unicode(err))
sources_after = sourceslist.dump()
changed = sources_before != sources_after
if changed and module._diff:
diff = []
for filename in set(sources_before.keys()).union(sources_after.keys()):
diff.append({'before': sources_before.get(filename, ''),
'after': sources_after.get(filename, ''),
'before_header': (filename, '/dev/null')[filename not in sources_before],
'after_header': (filename, '/dev/null')[filename not in sources_after]})
else:
diff = {}
if changed and not module.check_mode:
try:
sourceslist.save()
if update_cache:
cache = apt.Cache()
cache.update()
except OSError:
err = get_exception()
module.fail_json(msg=unicode(err))
module.exit_json(changed=changed, repo=repo, state=state, diff=diff)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| maxamillion/ansible-modules-core | packaging/os/apt_repository.py | Python | gpl-3.0 | 19,659 |
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
from common.functions import *
from common.gradient import numerical_gradient
class TwoLayerNet:
def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):
# 重みの初期化
self.params = {}
self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size)
self.params['W2'] = weight_init_std * np.random.randn(hidden_size, output_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['b2'] = np.zeros(output_size)
def predict(self, x):
W1, W2 = self.params['W1'], self.params['W2']
b1, b2 = self.params['b1'], self.params['b2']
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
y = softmax(a2)
return y
# x: 入力データ
# t: 教師データ
def loss(self, x, t):
y = self.predict(x)
return cross_entropy_error(y, t)
# 認識制度を求める
def accuracy(self, x, t):
y = self.predict(x)
y = np.argmax(y, axis=1)
t = np.argmax(t, axis=1)
accuracy = np.sum(y == t) / float(x.shape[0])
return accuracy
# 重みパラメータに対する勾配を求める
def numerical_gradient(self, x, t):
loss_W = lambda W: self.loss(x, t)
grads = {}
grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
grads['b2'] = numerical_gradient(loss_W, self.params['b2'])
return grads
# 重みパラメータに対する勾配を求める(誤差逆伝播法)
def gradient(self, x, t):
W1, W2 = self.params['W1'], self.params['W2']
b1, b2 = self.params['b1'], self.params['b2']
grads = {}
batch_num = x.shape[0]
# forward
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
y = softmax(a2)
# backward
dy = (y - t) / batch_num
grads['W2'] = np.dot(z1.T, dy)
grads['b2'] = np.sum(dy, axis=0)
da1 = np.dot(dy, W2.T)
dz1 = sigmoid_grad(a1) * da1
grads['W1'] = np.dot(x.T, dz1)
grads['b1'] = np.sum(dz1, axis=0)
return grads
| nobukatsu/deep-learning-from-scratch | ch04/two_layer_net.py | Python | mit | 2,553 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'Click>=6.0',
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='pycovfefy',
version='0.1.0',
description="Python Boilerplate contains all the boilerplate you need to create a Python package.",
long_description=readme + '\n\n' + history,
author="Pyconfefy",
author_email='[email protected]',
url='https://github.com/pycovfefy/pycovfefy',
packages=[
'pycovfefy',
],
package_dir={'pycovfefy':
'pycovfefy'},
entry_points={
'console_scripts': [
'pycovfefy=pycovfefy.cli:main'
]
},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='pycovfefy',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements
)
| pycovfefy/pycovfefy | setup.py | Python | mit | 1,504 |
from datetime import datetime
import time
import bashtasks.rabbit_util as rabbit_util
def assertMessageInQueue(queue_name, channel=None, timeout=3,
host='127.0.0.1', usr='guest', port=5672, pas='guest'):
if not channel:
channel = rabbit_util.connect(host=host, usr=usr, port=port, pas=pas).channel()
start_waiting = datetime.now()
while True:
method_frame, header_frame, body = channel.basic_get(queue_name)
if body:
channel.basic_ack(method_frame.delivery_tag)
return body
else:
if (datetime.now() - start_waiting).total_seconds() > timeout:
raise Exception('Timeout ({}secs) exceeded waiting for message in queue: "{}"'
.format(timeout, queue_name))
time.sleep(0.01)
| javierarilos/bashtasks | src/bashtasks/pika_assertions.py | Python | apache-2.0 | 838 |
__author__ = 'yinjun'
import unittest
import os
import imp
class TestSolutionFuncs(unittest.TestCase):
def setUp(self):
path = os.getcwd() + '/' + 'solution.py'
so = imp.load_source('solution', path)
self.s = so.Solution()
def test_removeDuplicates(self):
expected = [1,1,2,2,3]
actual = [1,1, 1,2, 2, 3]
actualLen = self.s.removeDuplicates(actual)
self.assertEqual(actualLen, len(expected))
self.assertEqual(actual, expected)
# unittest.main()
if __name__ == '__main__':
# unittest.main()
# print os.getcwd()
print ''
suite = unittest.TestLoader().loadTestsFromTestCase(TestSolutionFuncs)
unittest.TextTestRunner(verbosity=2).run(suite)
#
# s = Solution()
# A = [0,2, 2, 2,1]
# s.sortColors(A)
# print A
| shootsoft/practice | LeetCode/python/061-090/080-remove-duplicates-from-sorted-array-ii/test.py | Python | apache-2.0 | 805 |
from django.db import models
from cms.models import CMSPlugin
from django.utils.html import strip_tags
from django.utils.text import truncate_words
from django.utils.translation import ugettext_lazy as _
from djangocms_text_ckeditor.utils import plugin_tags_to_id_list, replace_plugin_tags
from djangocms_text_ckeditor.html import clean_html, extract_images
class Text(CMSPlugin):
"""Abstract Text Plugin Class"""
body = models.TextField(_("body"))
search_fields = ('body',)
def __unicode__(self):
return u"%s" % (truncate_words(strip_tags(self.body), 3)[:30] + "...")
def save(self, *args, **kwargs):
body = self.body
body = extract_images(body, self)
body = clean_html(body, full=False)
self.body = body
super(Text, self).save(*args, **kwargs)
def clean_plugins(self):
ids = plugin_tags_to_id_list(self.body)
plugins = CMSPlugin.objects.filter(parent=self)
for plugin in plugins:
if not plugin.pk in ids:
plugin.delete() #delete plugins that are not referenced in the text anymore
def post_copy(self, old_instance, ziplist):
"""
Fix references to plugins
"""
replace_ids = {}
for new, old in ziplist:
replace_ids[old.pk] = new.pk
self.body = replace_plugin_tags(old_instance.get_plugin_instance()[0].body, replace_ids)
self.save()
| ojii/djangocms-text-ckeditor | djangocms_text_ckeditor/models.py | Python | bsd-3-clause | 1,437 |
# Copyright 2020 Alfredo de la Fuente - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
"name": "Product Pricelist Item Menu",
"version": "12.0.1.0.0",
"license": "AGPL-3",
"depends": [
"sale",
],
"author": "AvanzOSC",
"website": "http://www.avanzosc.es",
"category": "Sales",
"data": [
"views/product_pricelist_item_view.xml",
"views/res_partner_view.xml",
],
"installable": True,
}
| oihane/odoo-addons | product_pricelist_item_menu/__manifest__.py | Python | agpl-3.0 | 480 |
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from http.server import BaseHTTPRequestHandler, HTTPServer
import logging
class S(BaseHTTPRequestHandler):
def _set_response(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
logging.info("GET request,\nPath: %s\nHeaders:\n%s\n", str(self.path), str(self.headers))
self._set_response()
self.wfile.write("Hello World! {}".format(self.path).encode('utf-8'))
# HTTP Server runs on port 8080
def run(server_class=HTTPServer, handler_class=S, port=8080):
logging.basicConfig(level=logging.INFO)
server_address = ('', port)
httpd = server_class(server_address, handler_class)
logging.info('Starting server...\n')
# start listening
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
logging.info('Stopping...\n')
if __name__ == '__main__':
from sys import argv
if len(argv) == 2:
run(port=int(argv[1]))
else:
run() | GoogleCloudPlatform/istio-samples | sample-apps/helloserver/server/server.py | Python | apache-2.0 | 1,641 |
from __future__ import absolute_import
import six
from sentry.api.serializers import Serializer, register, serialize
from sentry.models import Activity
@register(Activity)
class ActivitySerializer(Serializer):
def get_attrs(self, item_list, user):
# TODO(dcramer); assert on relations
users = {
d['id']: d
for d in serialize(set(i.user for i in item_list if i.user_id), user)
}
return {
item: {
'user': users[six.text_type(item.user_id)] if item.user_id else None,
} for item in item_list
}
def serialize(self, obj, attrs, user):
return {
'id': six.text_type(obj.id),
'user': attrs['user'],
'type': obj.get_type_display(),
'data': obj.data,
'dateCreated': obj.datetime,
}
class OrganizationActivitySerializer(ActivitySerializer):
def get_attrs(self, item_list, user):
# TODO(dcramer); assert on relations
attrs = super(OrganizationActivitySerializer, self).get_attrs(
item_list, user,
)
groups = {
d['id']: d
for d in serialize(set(i.group for i in item_list if i.group_id), user)
}
projects = {
d['id']: d
for d in serialize(set(i.project for i in item_list), user)
}
for item in item_list:
attrs[item]['issue'] = groups[six.text_type(item.group_id)] if item.group_id else None
attrs[item]['project'] = projects[six.text_type(item.project_id)]
return attrs
def serialize(self, obj, attrs, user):
context = super(OrganizationActivitySerializer, self).serialize(
obj, attrs, user,
)
context['issue'] = attrs['issue']
context['project'] = attrs['project']
return context
| alexm92/sentry | src/sentry/api/serializers/models/activity.py | Python | bsd-3-clause | 1,885 |
import json
from binascii import hexlify
from flask import Response, request
from userver.object.device import Device
from userver.object.group import Group
from .decorators import msg_filter_valid
from userver.object.application import Application
from ..http_auth import auth
from userver.object.message import MsgUp, MsgDn
from . import api, root
@api.route(root+'msg-up', methods=['GET'])
@auth.auth_required
@msg_filter_valid
def msg_up(user=None, app=None, device=None, group=None, start_ts=0, end_ts=-1):
if request.method == 'GET':
if group is not None:
return 'Group has no msg up', 406
if device is not None:
msgs = MsgUp.objects.all(device.app_eui, device.dev_eui, start_ts=start_ts, end_ts=end_ts, cur_cnt=500)
msg_up_json = json.dumps([msg.obj_to_dict() for msg in msgs])
elif app is not None:
msgs = MsgUp.objects.all(app.app_eui, start_ts=start_ts, end_ts=end_ts, cur_cnt=500)
msg_up_json = json.dumps([msg.obj_to_dict() for msg in msgs])
else:
if user is not None:
apps = Application.query.filter_by(user_id=user.id)
else:
apps = Application.query.order_by(Application.user_id)
msg_all = []
for app in apps:
msgs = MsgUp.objects.all(app.app_eui, start_ts=start_ts, end_ts=end_ts, cur_cnt=500)
msg_all.append({'app': hexlify(app.app_eui).decode(), 'msg_up': [msg.obj_to_dict() for msg in msgs]})
msg_up_json = json.dumps(msg_all)
return Response(status=200, response=msg_up_json)
@api.route(root+'msg-down', methods=['GET'])
@auth.auth_required
@msg_filter_valid
def msg_down(user=None, app=None, group=None, device=None, start_ts=0, end_ts=-1):
if request.method == 'GET':
if device is None and group is None and app is not None:
msg_list = []
devices = Device.objects.all(app.app_eui)
groups = Group.objects.all(app.app_eui)
for device in devices:
msgs = MsgDn.objects.all(type='DEV', eui=device.dev_eui, start_ts=start_ts, end_ts=end_ts)
for msg in msgs:
msg_list.append(msg.obj_to_dict())
for group in groups:
msgs = MsgDn.objects.all(type='GROUP', eui=group.id, start_ts=start_ts, end_ts=end_ts)
for msg in msgs:
msg_list.append(msg.obj_to_dict())
return Response(status=200, response=json.dumps(msg_list))
elif app is None and group is None and device is None:
apps = Application.query.filter_by(user_id=user.id)
app_list = []
for app in apps:
msg_list = []
devices = Device.objects.all(app.app_eui)
groups = Group.objects.all(app.app_eui)
for device in devices:
msgs = MsgDn.objects.all(type='DEV', eui=device.dev_eui, start_ts=start_ts, end_ts=end_ts)
for msg in msgs:
msg_list.append(msg.obj_to_dict())
for group in groups:
msgs = MsgDn.objects.all(type='GROUP', eui=group.id, start_ts=start_ts, end_ts=end_ts)
for msg in msgs:
msg_list.append(msg.obj_to_dict())
app_list.append({'app': hexlify(app.app_eui).decode(),
'message_down': msg_list})
return Response(status=200, response=json.dumps(app_list))
else:
msg_list = []
if device is not None:
msgs = MsgDn.objects.all(type='DEV', eui=device.dev_eui, start_ts=start_ts, end_ts=end_ts)
for msg in msgs:
msg_list.append(msg.obj_to_dict())
if group is not None:
msgs = MsgDn.objects.all(type='GROUP', eui=group.id, start_ts=start_ts, end_ts=end_ts)
for msg in msgs:
msg_list.append(msg.obj_to_dict())
return Response(status=200, response=json.dumps(msg_list)) | soybean217/lora-python | UServer/admin_server/admin_http_api/api/api_msg.py | Python | mit | 4,128 |
"""
This module contains various configuration settings via
waffle switches for the Grades app.
"""
from openedx.core.djangoapps.waffle_utils import WaffleSwitchNamespace
# Namespace
WAFFLE_NAMESPACE = u'grades'
# Switches
WRITE_ONLY_IF_ENGAGED = u'write_only_if_engaged'
ASSUME_ZERO_GRADE_IF_ABSENT = u'assume_zero_grade_if_absent'
ESTIMATE_FIRST_ATTEMPTED = u'estimate_first_attempted'
def waffle():
"""
Returns the namespaced, cached, audited Waffle class for Grades.
"""
return WaffleSwitchNamespace(name=WAFFLE_NAMESPACE, log_prefix=u'Grades: ')
| fintech-circle/edx-platform | lms/djangoapps/grades/config/waffle.py | Python | agpl-3.0 | 572 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for array_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import time
import unittest
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test as test_lib
@test_util.run_all_in_graph_and_eager_modes
class BatchMatrixTransposeTest(test_util.TensorFlowTestCase):
def testNonBatchMatrix(self):
matrix = [[1, 2, 3], [4, 5, 6]] # Shape (2, 3)
expected_transposed = [[1, 4], [2, 5], [3, 6]] # Shape (3, 2)
transposed = array_ops.matrix_transpose(matrix)
self.assertEqual((3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, transposed)
def testConjugate(self):
m = [[1 + 1j, 2 + 2j, 3 + 3j], [4 + 4j, 5 + 5j, 6 + 6j]]
expected_transposed = [[1 - 1j, 4 - 4j], [2 - 2j, 5 - 5j], [3 - 3j, 6 - 6j]]
matrix = ops.convert_to_tensor(m)
transposed = array_ops.matrix_transpose(matrix, conjugate=True)
self.assertEqual((3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, transposed)
def testBatchMatrix(self):
matrix_0 = [[1, 2, 3], [4, 5, 6]]
matrix_0_t = [[1, 4], [2, 5], [3, 6]]
matrix_1 = [[11, 22, 33], [44, 55, 66]]
matrix_1_t = [[11, 44], [22, 55], [33, 66]]
batch_matrix = [matrix_0, matrix_1] # Shape (2, 2, 3)
expected_transposed = [matrix_0_t, matrix_1_t] # Shape (2, 3, 2)
transposed = array_ops.matrix_transpose(batch_matrix)
self.assertEqual((2, 3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, transposed)
def testNonBatchMatrixDynamicallyDefined(self):
# needs explicit `constant` because lists are not automatically
# converted to sensors when applying `transpose` below
matrix = constant_op.constant([[1, 2, 3], [4, 5, 6]]) # Shape (2, 3)
expected_transposed = [[1, 4], [2, 5], [3, 6]] # Shape (3, 2)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)
])
def transpose(matrix):
self.assertIs(matrix.shape.ndims, None)
return array_ops.matrix_transpose(matrix)
self.assertAllEqual(expected_transposed, transpose(matrix))
def testBatchMatrixDynamicallyDefined(self):
matrix_0 = [[1, 2, 3], [4, 5, 6]]
matrix_0_t = [[1, 4], [2, 5], [3, 6]]
matrix_1 = [[11, 22, 33], [44, 55, 66]]
matrix_1_t = [[11, 44], [22, 55], [33, 66]]
# needs explicit `constant` because lists are not automatically
# converted to sensors when applying `transpose` below
batch_matrix = constant_op.constant([matrix_0, matrix_1]) # Shape (2, 2, 3)
expected_transposed = [matrix_0_t, matrix_1_t] # Shape (2, 3, 2)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)
])
def transpose(matrix):
self.assertIs(matrix.shape.ndims, None)
return array_ops.matrix_transpose(matrix)
self.assertAllEqual(expected_transposed, transpose(batch_matrix))
def testTensorWithStaticRankLessThanTwoRaisesBecauseNotAMatrix(self):
vector = [1, 2, 3]
with self.assertRaisesRegex(ValueError, "should be a "):
array_ops.matrix_transpose(vector)
def testNarrowMatrixConjugateTranspose(self):
for dtype in (dtypes.float32, dtypes.float64):
for conjugate in (True, False):
with self.subTest(complex_type=dtype, conjugate=conjugate):
vector = math_ops.complex(
constant_op.constant(0, dtype=dtype),
math_ops.range(96, dtype=dtype))
column_vector = array_ops.expand_dims(vector, axis=-1)
row_vector = array_ops.expand_dims(vector, axis=0)
narrow_matrix = array_ops.tile(column_vector, [1, 2]) # [96, 2]
expected_transposed = array_ops.tile(row_vector, [2, 1]) # [2, 96]
if conjugate:
expected_transposed = -expected_transposed
transposed = array_ops.matrix_transpose(
narrow_matrix, conjugate=conjugate)
self.assertEqual((2, 96), transposed.get_shape())
self.assertAllEqual(expected_transposed, transposed)
class BooleanMaskTest(test_util.TensorFlowTestCase):
def setUp(self):
self.rng = np.random.RandomState(42)
def CheckVersusNumpy(self, ndims_mask, arr_shape, make_mask=None, axis=None):
"""Check equivalence between boolean_mask and numpy masking."""
if make_mask is None:
make_mask = lambda shape: self.rng.randint(0, 2, size=shape).astype(bool)
arr = np.random.rand(*arr_shape)
mask = make_mask(arr_shape[:ndims_mask])
if axis is not None:
mask = make_mask(arr_shape[axis:ndims_mask + axis])
if axis is None or axis == 0:
masked_arr = arr[mask]
elif axis == 1:
masked_arr = arr[:, mask]
elif axis == 2:
masked_arr = arr[:, :, mask]
with self.cached_session():
masked_tensor = array_ops.boolean_mask(arr, mask, axis=axis)
# Leading dimension size of masked_tensor is always unknown until runtime
# since we don't how many elements will be kept.
leading = 1 if axis is None else axis + 1
self.assertAllEqual(masked_tensor.get_shape()[leading:],
masked_arr.shape[leading:])
self.assertAllClose(masked_arr, masked_tensor)
@test_util.run_deprecated_v1
def testMaskDim1ArrDim2Axis1(self):
ndims_mask = 1
for arr_shape in [(1, 1), (2, 2), (2, 5)]:
with self.subTest(arr_shape=arr_shape):
self.CheckVersusNumpy(ndims_mask, arr_shape, axis=1)
@test_util.run_deprecated_v1
def testMaskDim2ArrDim2Axis1(self):
ndims_mask = 2
for arr_shape in [(1, 1), (2, 2), (2, 5)]:
with self.subTest(arr_shape=arr_shape):
self.CheckVersusNumpy(ndims_mask, arr_shape, axis=1)
@test_util.run_deprecated_v1
def testMaskDim1ArrDim1(self):
ndims_mask = 1
for arr_shape in [(1,), (2,), (3,), (10,)]:
with self.subTest(arr_shape=arr_shape):
self.CheckVersusNumpy(ndims_mask, arr_shape)
@test_util.run_deprecated_v1
def testMaskDim1ArrDim2(self):
ndims_mask = 1
for arr_shape in [(1, 1), (2, 2), (2, 5)]:
with self.subTest(arr_shape=arr_shape):
self.CheckVersusNumpy(ndims_mask, arr_shape)
@test_util.run_deprecated_v1
def testMaskDim2ArrDim2(self):
ndims_mask = 2
for arr_shape in [(1, 1), (2, 2), (2, 5)]:
with self.subTest(arr_shape=arr_shape):
self.CheckVersusNumpy(ndims_mask, arr_shape)
@test_util.run_deprecated_v1
def testMaskDim2ArrDim3(self):
ndims_mask = 2
for arr_shape in [(1, 1, 1), (1, 2, 2), (2, 2, 1)]:
with self.subTest(arr_shape=arr_shape):
self.CheckVersusNumpy(ndims_mask, arr_shape)
@test_util.run_deprecated_v1
def testEmptyInput2D(self):
mask = np.array([True, False])
arr = np.array([[], []]).astype(np.float32)
numpy_result = arr[mask]
tf_result = array_ops.boolean_mask(arr, mask)
self.assertAllEqual(numpy_result.shape[1:], tf_result.get_shape()[1:])
with self.cached_session():
self.assertAllClose(numpy_result, tf_result)
@test_util.run_deprecated_v1
def testEmptyInput1D(self):
mask = np.array([]).astype(bool)
arr = np.array([]).astype(np.float32)
numpy_result = arr[mask]
tf_result = array_ops.boolean_mask(arr, mask)
self.assertAllEqual(numpy_result.shape[1:], tf_result.get_shape()[1:])
with self.cached_session():
self.assertAllClose(numpy_result, tf_result)
@test_util.run_deprecated_v1
def testEmptyOutput(self):
make_mask = lambda shape: np.zeros(shape, dtype=bool)
for ndims_mask in range(1, 4):
for ndims_arr in range(ndims_mask, ndims_mask + 3):
for _ in range(3):
with self.subTest(ndims_mask=ndims_mask, ndims_arr=ndims_arr, _=_):
arr_shape = np.random.randint(1, 5, size=ndims_arr)
self.CheckVersusNumpy(ndims_mask, arr_shape, make_mask=make_mask)
@test_util.run_deprecated_v1
def testWorksWithDimensionsEqualToNoneDuringGraphBuild(self):
# The rank of the mask tensor must be specified. This is explained
# in the docstring as well.
with self.cached_session() as sess:
ph_tensor = array_ops.placeholder(dtypes.int32, shape=None)
ph_mask = array_ops.placeholder(dtypes.bool, shape=[None])
arr = np.array([[1, 2], [3, 4]])
mask = np.array([False, True])
masked_tensor = sess.run(
array_ops.boolean_mask(ph_tensor, ph_mask),
feed_dict={
ph_tensor: arr,
ph_mask: mask
})
np.testing.assert_allclose(masked_tensor, arr[mask])
@test_util.run_deprecated_v1
def testMaskDimensionsSetToNoneRaises(self):
# The rank of the mask tensor must be specified. This is explained
# in the docstring as well.
with self.cached_session():
tensor = array_ops.placeholder(dtypes.int32, shape=[None, 2])
mask = array_ops.placeholder(dtypes.bool, shape=None)
with self.assertRaisesRegex(ValueError, "dimensions must be specified"):
array_ops.boolean_mask(tensor, mask)
def testMaskHasMoreDimsThanTensorRaises(self):
mask = [[True, True], [False, False]]
tensor = [1, 2, 3, 4]
with self.cached_session():
with self.assertRaisesRegex(ValueError, "incompatible"):
array_ops.boolean_mask(tensor, mask).eval()
def testMaskIsScalarRaises(self):
mask = True
tensor = 1
with self.cached_session():
with self.assertRaisesRegex(ValueError, "mask.*scalar"):
array_ops.boolean_mask(tensor, mask).eval()
def testMaskShapeDifferentThanFirstPartOfTensorShapeRaises(self):
mask = [True, True, True]
tensor = [[1, 2], [3, 4]]
with self.cached_session():
with self.assertRaisesRegex(ValueError, "incompatible"):
array_ops.boolean_mask(tensor, mask).eval()
@test_util.run_deprecated_v1
def testStringMask(self):
# Reproduces b/111171330, where the optimized boolean_mask graph would
# be incorrectly placed on GPU.
with ops.Graph().as_default():
tile_placeholder = array_ops.placeholder(dtypes.int32, [2])
string_tensor = array_ops.tile([["hello"]], tile_placeholder)
bool_tensor = array_ops.tile([[True]], tile_placeholder)
masked_tensor = array_ops.boolean_mask(string_tensor, bool_tensor)
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.shape_optimization = 1
config.gpu_options.per_process_gpu_memory_fraction = 0.3
with session.Session(config=config) as sess:
result = sess.run(masked_tensor, feed_dict={tile_placeholder: [2, 2]})
self.assertAllEqual([b"hello", b"hello", b"hello", b"hello"], result)
def testMaskWithAxisTensor(self):
@def_function.function(autograph=False)
def f():
return array_ops.boolean_mask([1, 2, 3], [True, False, True],
axis=constant_op.constant(
0, dtype=dtypes.int32))
self.assertAllEqual(self.evaluate(f()), [1, 3])
def testMaskWithAxisNonConstTensor(self):
@def_function.function(
autograph=False,
input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)
])
def f(axis):
return array_ops.boolean_mask([1, 2, 3], [True, False, True], axis=axis)
self.assertAllEqual(
self.evaluate(f(constant_op.constant(0, dtype=dtypes.int32))), [1, 3])
@test_util.run_all_in_graph_and_eager_modes
class OperatorShapeTest(test_util.TensorFlowTestCase):
def testExpandScalar(self):
scalar = "hello"
scalar_expanded = array_ops.expand_dims(scalar, [0])
self.assertEqual(scalar_expanded.get_shape(), (1,))
def testSqueezeScalar(self):
scalar = "hello"
scalar_squeezed = array_ops.squeeze(scalar, ())
self.assertEqual(scalar_squeezed.get_shape(), ())
def testSqueezeMatrix(self):
matrix = [[1, 2, 3]]
matrix_squeezed = array_ops.squeeze(matrix, [0])
self.assertEqual(matrix_squeezed.get_shape(), (3))
with self.assertRaisesRegex(
Exception, "Can not squeeze dim.1., expected a dimension of 1, got 3"):
matrix_squeezed = array_ops.squeeze(matrix, [1])
def testSqueezeScalarDim(self):
matrix = [[1, 2, 3]]
matrix_squeezed = array_ops.squeeze(matrix, 0)
self.assertEqual(matrix_squeezed.get_shape(), (3))
def testExpandDimsWithNonScalarDim(self):
with self.assertRaisesRegex(Exception,
"must be a tensor with a single value"):
array_ops.expand_dims(1, axis=[0, 1])
class ReverseV2Test(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testReverse0DimAuto(self):
x_np = 4
for use_gpu in [False, True]:
with self.subTest(use_gpu=use_gpu):
with self.cached_session(use_gpu=use_gpu):
x_tf = array_ops.reverse_v2(x_np, []).eval()
self.assertAllEqual(x_tf, x_np)
def _reverse1DimAuto(self, np_dtype):
x_np = np.array([1, 200, 3, 40, 5], dtype=np_dtype)
for use_gpu in [False, True]:
for axis_dtype in [dtypes.int32, dtypes.int64]:
with self.subTest(use_gpu=use_gpu, axis_dtype=axis_dtype):
with self.cached_session(use_gpu=use_gpu):
x_tf = array_ops.reverse_v2(
x_np, constant_op.constant([0], dtype=axis_dtype)).eval()
self.assertAllEqual(x_tf, np.asarray(x_np)[::-1])
def _reverse2DimAuto(self, np_dtype):
x_np = np.array([[1, 200, 3], [4, 5, 60]], dtype=np_dtype)
for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
for use_gpu in [False, True]:
for axis_dtype in [dtypes.int32, dtypes.int64]:
with self.subTest(
reverse_f=reverse_f, use_gpu=use_gpu, axis_dtype=axis_dtype):
with self.cached_session(use_gpu=use_gpu):
x_tf_1 = reverse_f(x_np,
constant_op.constant([0],
dtype=axis_dtype)).eval()
x_tf_2 = reverse_f(x_np,
constant_op.constant([-2],
dtype=axis_dtype)).eval()
x_tf_3 = reverse_f(x_np,
constant_op.constant([1],
dtype=axis_dtype)).eval()
x_tf_4 = reverse_f(x_np,
constant_op.constant([-1],
dtype=axis_dtype)).eval()
x_tf_5 = reverse_f(x_np,
constant_op.constant([1, 0],
dtype=axis_dtype)).eval()
self.assertAllEqual(x_tf_1, np.asarray(x_np)[::-1, :])
self.assertAllEqual(x_tf_2, np.asarray(x_np)[::-1, :])
self.assertAllEqual(x_tf_3, np.asarray(x_np)[:, ::-1])
self.assertAllEqual(x_tf_4, np.asarray(x_np)[:, ::-1])
self.assertAllEqual(x_tf_5, np.asarray(x_np)[::-1, ::-1])
# This test covers the axis validation in the shape function
# (no eval())
@test_util.run_deprecated_v1
def testInvalidAxis(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
with self.assertRaisesRegex(ValueError, "is out of valid range"):
array_ops.reverse_v2(x_np, [-30])
with self.assertRaisesRegex(ValueError, "is out of valid range"):
array_ops.reverse_v2(x_np, [2])
with self.assertRaisesRegex(ValueError, "axis 0 specified more than once"):
array_ops.reverse_v2(x_np, [0, -2])
# This is the version of reverse that uses axis indices rather than
# bool tensors
# TODO(b/32254538): Change this test to use array_ops.reverse
#
# Note: this test passes placeholder as constant axis is validated
# in shape function (see testInvalidAxis)
@test_util.run_deprecated_v1
def testInvalid(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
axis = array_ops.placeholder(dtypes.int32)
with self.cached_session():
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
"is out of.*range"):
array_ops.reverse_v2(x_np, axis).eval(feed_dict={axis: [-30]})
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
"is out of.*range"):
array_ops.reverse_v2(x_np, axis).eval(feed_dict={axis: [2]})
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
"(axis 0 specified more than once|canonicalized axis 0 was repeated.)"
):
array_ops.reverse_v2(x_np, axis).eval(feed_dict={axis: [0, -2]})
@test_util.run_deprecated_v1
def testReverse1DimAuto(self):
for dtype in [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.bool,
np.float16, np.float32, np.float64, np.complex64, np.complex128,
np.array(b"").dtype.type
]:
self._reverse1DimAuto(dtype)
@test_util.run_deprecated_v1
def testReverse2DimAuto(self):
for dtype in [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.bool,
np.float16, np.float32, np.float64, np.complex64, np.complex128,
np.array(b"").dtype.type
]:
self._reverse2DimAuto(dtype)
@test_util.run_deprecated_v1
def testUnknownDims(self):
reverse_v2 = array_ops.reverse_v2
data_t = array_ops.placeholder(dtypes.float32)
axis_known_t = array_ops.placeholder(dtypes.int32, shape=[3])
reverse_known_t = reverse_v2(data_t, axis_known_t)
# Unlike V1 we cannot know this anymore
self.assertEqual(None, reverse_known_t.get_shape().ndims)
axis_unknown_t = array_ops.placeholder(dtypes.int32)
reverse_unknown_t = reverse_v2(data_t, axis_unknown_t)
self.assertIs(None, reverse_unknown_t.get_shape().ndims)
data_2d_t = array_ops.placeholder(dtypes.float32, shape=[None, None])
axis_2d_t = array_ops.placeholder(dtypes.int32, shape=[3])
reverse_2d_t = reverse_v2(data_2d_t, axis_2d_t)
self.assertEqual(2, reverse_2d_t.get_shape().ndims)
@test_util.run_deprecated_v1
def testReverseRowsOf3Channels(self):
"""Tests optimized code for reversing rows with last dim size = 3."""
with self.session():
for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
for outer_size in (1, 2):
for middle_size in list(range(50)) + [100000]:
with self.subTest(
reverse_f=reverse_f,
outer_size=outer_size,
middle_size=middle_size):
x_np = np.reshape(
np.arange(outer_size * middle_size * 3, dtype=np.float32),
newshape=(outer_size, middle_size, 3))
x_tf = reverse_f(x_np, [1]).eval()
np_answer = x_np[:, ::-1, :]
self.assertAllEqual(x_tf, np_answer)
@test_util.run_deprecated_v1
def testReverseRowsOf4Channels(self):
with self.session():
for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
for outer_size in (1, 2):
for middle_size in list(range(50)) + [100000]:
with self.subTest(
reverse_f=reverse_f,
outer_size=outer_size,
middle_size=middle_size):
x_np = np.reshape(
np.arange(outer_size * middle_size * 4, dtype=np.float32),
newshape=(outer_size, middle_size, 4))
x_tf = reverse_f(x_np, [1]).eval()
np_answer = x_np[:, ::-1, :]
self.assertAllEqual(x_tf, np_answer)
@test_util.run_deprecated_v1
def testReverseColumnsOf3Channels(self):
with self.session():
for reverse_f in [array_ops.reverse_v2, array_ops.reverse]:
for outer_size in list(range(50)) + [100000]:
for middle_size in (1, 2):
with self.subTest(
reverse_f=reverse_f,
outer_size=outer_size,
middle_size=middle_size):
x_np = np.reshape(
np.arange(outer_size * middle_size * 3, dtype=np.float32),
newshape=(outer_size, middle_size, 3))
x_tf = reverse_f(x_np, [0]).eval()
np_answer = x_np[::-1, :, :]
self.assertAllEqual(x_tf, np_answer)
def testReverseInvalidShape(self):
x = np.ndarray(shape=[0, 1, 1])
v = array_ops.reverse_v2(x, axis=[1])
self.assertAllEqual(self.evaluate(v), v)
class MeshgridTest(test_util.TensorFlowTestCase):
def _compareDiff(self, x, y, use_gpu):
for index in ("ij", "xy"):
numpy_out = np.meshgrid(x, y, indexing=index)
tf_out = array_ops.meshgrid(x, y, indexing=index)
with self.cached_session(use_gpu=use_gpu):
for xx, yy in zip(numpy_out, tf_out):
self.assertAllEqual(xx, yy)
def _compareDiffType(self, n, np_dtype, use_gpu):
inputs = []
for index in ("ij", "xy"):
for _ in range(n):
x = np.linspace(-10, 10, 5).astype(np_dtype)
if np_dtype in (np.complex64, np.complex128):
x += 1j
inputs.append(x)
numpy_out = np.meshgrid(*inputs, indexing=index)
with self.cached_session(use_gpu=use_gpu):
tf_out = array_ops.meshgrid(*inputs, indexing=index)
for x_np, x_tf in zip(numpy_out, tf_out):
self.assertAllEqual(x_np, x_tf)
@test_util.run_deprecated_v1
def testCompare(self):
for t in (np.float16, np.float32, np.float64, np.int32, np.int64,
np.complex64, np.complex128):
with self.subTest(t=t):
self._compareDiffType(2, t, False)
self._compareDiffType(3, t, False)
x = [1, 2, 3]
y = [4, 5]
a = [[1, 1], [1, 1]]
self._compareDiff(x, y, False)
self._compareDiff(x, a, False)
class StridedSliceChecker(object):
"""Check a given tensor against the numpy result."""
REF_TENSOR = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)
REF_TENSOR_ALIGNED = np.arange(1, 97, dtype=np.float32).reshape(3, 4, 8)
def __init__(self, test, x, tensor_type=dtypes.int32, check_type_infer=True):
self.x_np = np.array(x).astype(tensor_type.as_numpy_dtype)
if tensor_type.is_bool:
self.x_np = np.array(x % 3).astype(np.bool)
# Give the value a non-zero imaginary component for complex types.
if tensor_type.is_complex:
self.x_np -= 1j * self.x_np
self.test = test
self.x = constant_op.constant(self.x_np, dtype=tensor_type)
self.check_type_infer = check_type_infer
def __getitem__(self, spec):
op = self.x.__getitem__(spec)
def eval_if_tensor(x):
try:
return x.eval()
except AttributeError:
return x
if isinstance(spec, bool) or \
(isinstance(spec, ops.Tensor) and spec.dtype == dtypes.bool) or \
(isinstance(spec, np.ndarray) and spec.dtype == bool) or \
(isinstance(spec, (list, tuple)) and np.asarray(spec).dtype == bool):
tensor = op.eval()
np_spec = eval_if_tensor(spec)
self.test.assertAllEqual(self.x_np[np_spec], tensor)
return tensor
if not isinstance(spec, (list, tuple)):
spec = [spec]
tensor = op.eval()
# Make a numpy spec that pre-evals the tensors
np_specs = []
for s in spec:
if isinstance(s, slice):
start = eval_if_tensor(s.start)
stop = eval_if_tensor(s.stop)
step = eval_if_tensor(s.step)
np_specs.append(slice(start, stop, step))
else:
np_specs.append(eval_if_tensor(s))
self.test.assertAllEqual(self.x_np[tuple(np_specs)], tensor)
if self.check_type_infer:
self.test.assertAllEqual(tensor.shape, op.get_shape())
return tensor
STRIDED_SLICE_TYPES = [
dtypes.int32, dtypes.int64, dtypes.int16, dtypes.int8, dtypes.float32,
dtypes.float64, dtypes.complex64, dtypes.complex128, dtypes.bool
]
class StridedSliceTest(test_util.TensorFlowTestCase):
"""Test the strided slice operation with variants of slices."""
@test_util.run_deprecated_v1
def test_basic_slice(self):
for tensor_type in STRIDED_SLICE_TYPES:
with self.subTest(tensor_type=tensor_type):
with self.cached_session():
checker = StridedSliceChecker(
self, StridedSliceChecker.REF_TENSOR, tensor_type=tensor_type)
_ = checker[:, :, :]
# Various ways of representing identity slice
_ = checker[:, :, :]
_ = checker[::, ::, ::]
_ = checker[::1, ::1, ::1]
# Not zero slice
_ = checker[::1, ::5, ::2]
# Reverse in each dimension independently
_ = checker[::-1, :, :]
_ = checker[:, ::-1, :]
_ = checker[:, :, ::-1]
## negative index tests i.e. n-2 in first component
_ = checker[-2::-1, :, ::1]
# negative index tests i.e. n-2 in first component, non-unit stride
_ = checker[-2::-1, :, ::2]
# Check rank-0 examples
checker2 = StridedSliceChecker(self, 5, tensor_type=tensor_type)
_ = checker2[None]
_ = checker2[...]
_ = checker2[tuple()]
def testInt64GPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with test_util.force_gpu():
x = constant_op.constant([1., 2., 3.])
begin = constant_op.constant([2], dtype=dtypes.int64)
end = constant_op.constant([3], dtype=dtypes.int64)
strides = constant_op.constant([1], dtype=dtypes.int64)
s = array_ops.strided_slice(x, begin, end, strides)
self.assertAllEqual([3.], self.evaluate(s))
@test_util.assert_no_new_pyobjects_executing_eagerly
@test_util.assert_no_garbage_created
def testTensorSliceEagerMemory(self):
with context.eager_mode():
inputs = constant_op.constant([[[1], [2], [3], [4]]],
dtype=dtypes.float32)
# Tests that slicing an EagerTensor doesn't leak memory
inputs[0] # pylint: disable=pointless-statement
@test_util.assert_no_new_pyobjects_executing_eagerly
@test_util.assert_no_garbage_created
def testVariableSliceEagerMemory(self):
with context.eager_mode():
v = variables.Variable([1., 2.])
v[0] # pylint: disable=pointless-statement
@test_util.run_deprecated_v1
def testDegenerateSlices(self):
with self.session():
checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR)
# degenerate by offering a forward interval with a negative stride
_ = checker[0:-1:-1, :, :]
# degenerate with a reverse interval with a positive stride
_ = checker[-1:0, :, :]
# empty interval in every dimension
_ = checker[-1:0, 2:2, 2:3:-1]
# empty first dimension only (used to break for aligned tensors).
checker = StridedSliceChecker(self,
StridedSliceChecker.REF_TENSOR_ALIGNED)
_ = checker[1:0]
@test_util.run_deprecated_v1
def testSliceWithUndefinedDimension(self):
t = constant_op.constant([1, 2, 3])
d = tensor_shape.Dimension(None)
self.assertAllEqual(t[d:d:d], t)
@test_util.run_deprecated_v1
def testEllipsis(self):
with self.session():
raw = [[[[[1, 2], [3, 4], [5, 6]]], [[[7, 8], [9, 10], [11, 12]]]]]
checker = StridedSliceChecker(self, raw)
_ = checker[0:]
# implicit ellipsis
_ = checker[0:, ...]
# ellipsis alone
_ = checker[...]
# ellipsis at end
_ = checker[0:1, ...]
# ellipsis at begin
_ = checker[..., 0:1]
# ellipsis at middle
_ = checker[0:1, ..., 0:1]
# multiple ellipses not allowed
with self.assertRaisesRegex(ValueError, "Multiple ellipses"):
_ = checker[..., :, ...].eval()
@test_util.run_deprecated_v1
def testShrink(self):
with self.session():
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
checker = StridedSliceChecker(self, raw)
_ = checker[:, :, :, :, 3]
_ = checker[..., 3]
_ = checker[:, 0]
_ = checker[:, :, 0]
@test_util.run_deprecated_v1
def testBothNewAxisAndShrink(self):
with self.session():
ones = array_ops.placeholder(shape=[2, 2], dtype=dtypes.int16)
self.assertAllEqual(
ones[array_ops.newaxis, :,
0].eval(feed_dict={ones: [[1, 1], [1, 1]]}), [[1, 1]])
@test_util.run_deprecated_v1
def testTensorIndexing(self):
with self.session():
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
checker = StridedSliceChecker(self, raw, check_type_infer=False)
bar = constant_op.constant(2)
bar2 = constant_op.constant(3)
_ = checker[..., bar:bar2]
_ = checker[..., bar]
_ = checker[..., 3]
_ = checker[..., 2**64 // 2**63] # Test longs in Python 2
def testTensorIndexingTypeError(self):
with self.session():
checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR)
expected = re.escape(array_ops._SLICE_TYPE_ERROR)
with self.assertRaisesRegex(TypeError, expected):
_ = checker["foo"]
with self.assertRaisesRegex(TypeError, expected):
_ = checker[constant_op.constant("foo")]
with self.assertRaisesRegex(TypeError, expected):
_ = checker[0.0]
with self.assertRaisesRegex(TypeError, expected):
_ = checker[constant_op.constant(0.0)]
with self.assertRaisesRegex(TypeError, expected):
_ = checker[constant_op.constant([1, 2, 3])]
with self.assertRaisesRegex(TypeError, expected):
_ = checker[[2.1, -0.7, 1.5]]
@test_util.run_deprecated_v1
def testExpand(self):
with self.session():
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
checker = StridedSliceChecker(self, raw)
# new axis (followed by implicit ellipsis)
_ = checker[np.newaxis]
# newaxis after ellipsis
_ = checker[..., np.newaxis]
# newaxis in between ellipsis and explicit range
_ = checker[..., np.newaxis, :]
_ = checker[:, ..., np.newaxis, :, :]
# Reverse final dimension with new axis
_ = checker[:, :, np.newaxis, :, 2::-1]
# Ellipsis in middle of two newaxis
_ = checker[np.newaxis, ..., np.newaxis]
@test_util.run_deprecated_v1
def testExpandVariable(self):
with self.session():
x = variables.Variable(7, dtype=dtypes.int32)
self.evaluate(x.initializer)
y = x[None].eval()
self.assertEqual(y.shape, (1,))
self.assertAllEqual(y, (7,))
@test_util.run_deprecated_v1
def testOptimizedCases(self):
with self.session():
checker = StridedSliceChecker(self,
StridedSliceChecker.REF_TENSOR_ALIGNED)
# Identity
_ = checker[:]
# Identity
_ = checker[...]
# Identity
_ = checker[np.newaxis, ..., np.newaxis]
# First axis slice
_ = checker[1:]
# First axis slice
_ = checker[np.newaxis, 1:]
@test_util.run_v1_only("currently failing on v2")
def testMasks(self):
with self.session():
scalar = np.array(0)
# Test tensor type mask
checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR)
_ = checker[checker.x > 2]
_ = checker[checker.x <= 5]
_ = checker[ops.convert_to_tensor(scalar)]
# Test numpy array type mask
raw = np.array([[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23,
24]]]]])
checker1 = StridedSliceChecker(self, raw)
_ = checker1[raw >= 4]
_ = checker1[raw < 19]
_ = checker1[scalar]
# Test boolean and non boolean cases
mask = np.array([True, False, True])
raw1 = np.array([[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]])
checker2 = StridedSliceChecker(self, raw1)
_ = checker2[mask]
_ = checker2[ops.convert_to_tensor(mask)]
class StridedSliceShapeChecker(object):
def __init__(self, x):
self.x = x
def __getitem__(self, spec):
op = self.x.__getitem__(spec)
return op.get_shape()
class StridedSliceShapeTest(test_util.TensorFlowTestCase):
"""Test the shape inference of StridedSliceShapes."""
@test_util.run_deprecated_v1
def testUnknown(self):
with self.session():
uncertain_tensor = array_ops.placeholder(dtypes.float32)
a = StridedSliceShapeChecker(uncertain_tensor)
a_slice_shape = a[...]
self.assertAllEqual(a_slice_shape.ndims, None)
def tensorShapeEqual(self, x, y):
self.assertTrue(x is not None and y is not None or x is None and y is None)
self.assertEqual(x.as_list(), y.as_list())
@test_util.run_deprecated_v1
def testTensorShapeUncertain(self):
with self.session():
uncertain_tensor = array_ops.placeholder(
dtypes.float32, shape=(5, None, 7))
a = StridedSliceShapeChecker(uncertain_tensor)
self.tensorShapeEqual(a[3:5], tensor_shape.TensorShape([2, None, 7]))
self.tensorShapeEqual(a[3:5, :, 4], tensor_shape.TensorShape([2, None]))
self.tensorShapeEqual(a[3:5, 3:4, 4], tensor_shape.TensorShape([2, None]))
self.tensorShapeEqual(a[3:5, :, 5:10],
tensor_shape.TensorShape([2, None, 2]))
self.tensorShapeEqual(a[3:5, :, 50:3],
tensor_shape.TensorShape([2, None, 0]))
self.tensorShapeEqual(a[3:5, :, array_ops.newaxis, 50:3,],
tensor_shape.TensorShape([2, None, 1, 0]))
self.tensorShapeEqual(a[1:5:2, :, array_ops.newaxis, 50:3,],
tensor_shape.TensorShape([2, None, 1, 0]))
self.tensorShapeEqual(a[:5:3, :, array_ops.newaxis, 50:3,],
tensor_shape.TensorShape([2, None, 1, 0]))
self.tensorShapeEqual(a[:2:3, :, array_ops.newaxis, 50:3,],
tensor_shape.TensorShape([1, None, 1, 0]))
self.tensorShapeEqual(a[::-1, :, array_ops.newaxis, ::-2],
tensor_shape.TensorShape([5, None, 1, 4]))
@test_util.run_deprecated_v1
def testTensorValuedIndexShape(self):
with self.session():
defined_shape_tensor = array_ops.placeholder(
dtypes.float32, shape=(5, 3, 7))
index_value = array_ops.placeholder(dtypes.int32, shape=())
a = StridedSliceShapeChecker(defined_shape_tensor)
self.tensorShapeEqual(a[index_value], tensor_shape.TensorShape([3, 7]))
self.tensorShapeEqual(a[index_value, ::-1],
tensor_shape.TensorShape([3, 7]))
self.tensorShapeEqual(a[index_value, ::-2],
tensor_shape.TensorShape([2, 7]))
other_scalar = array_ops.placeholder(dtypes.int32, shape=())
self.tensorShapeEqual(a[index_value, other_scalar:2],
tensor_shape.TensorShape([None, 7]))
class GradSliceChecker(object):
"""Tests that we can compute a gradient for var^2."""
def __init__(self, test, sess, var, varnp):
self.test = test
self.sess = sess
self.val = var * var
self.var = var
self.varnp = varnp
def __getitem__(self, spec):
slice_var = self.var[spec]
slice_val = self.val[spec]
# compute analytic 2nd derivative
analytic_grad2 = 2 * slice_val
dy = variables.Variable(
array_ops.ones_like(slice_var, dtype=dtypes.float32))
assign = dy.assign(slice_var)
slice_val_grad, = gradients_impl.gradients(slice_val, self.var, grad_ys=dy)
slice_val_grad2, = gradients_impl.gradients(
slice_val_grad, dy, grad_ys=self.var)
self.sess.run(assign)
slice_val_grad_evaled, slice_val_grad2_evaled = (
self.sess.run([slice_val_grad, slice_val_grad2]))
analytic_grad2_evaled = analytic_grad2.eval()
self.test.assertAllEqual(slice_val_grad2_evaled, analytic_grad2_evaled)
# compute analytic gradient for slice
np_val_grad = (2 * self.varnp * self.varnp)
np_sliceval_grad = np.zeros(self.var.get_shape())
if isinstance(spec, ops.Tensor):
spec = self.sess.run([spec])
np_sliceval_grad[spec] = np_val_grad[spec]
# verify gradient
self.test.assertAllEqual(slice_val_grad_evaled, np_sliceval_grad)
class StridedSliceGradTest(test_util.TensorFlowTestCase):
"""Test that strided slice's custom gradient produces correct gradients."""
@test_util.run_v1_only("b/120545219")
def testGradient(self):
with self.session() as sess:
var = variables.Variable(
array_ops.reshape(
math_ops.range(1, 97, 1, dtype=dtypes.float32), shape=(6, 4, 4)))
init = variables.global_variables_initializer()
sess.run(init)
raw = np.array(range(1, 97, 1)).reshape((6, 4, 4))
grad = GradSliceChecker(self, sess, var, raw)
_ = grad[2:6:2, 1:3, 1:3]
_ = grad[3:0:-2, 1:3, 1:3]
_ = grad[3:0:-2, array_ops.newaxis, 1:3, 2, array_ops.newaxis]
_ = grad[3:0:-2, 1:3, 2]
_ = grad[:, -1, :]
_ = grad[:, -2, :]
with self.assertRaisesRegex(ValueError, "out of bounds"):
_ = grad[:, -200, :]
with self.assertRaisesRegex(ValueError, "out of bounds"):
_ = grad[:, 200, :]
# Test numpy array type mask
_ = grad[raw > 51]
# Test tensor type mask
_ = grad[ops.convert_to_tensor(raw) <= 76]
@test_util.run_v1_only("b/120545219")
def testGradientZero(self):
with self.session() as sess:
var = variables.Variable(8.)
init = variables.global_variables_initializer()
sess.run(init)
grad = GradSliceChecker(self, sess, var, np.array(8))
_ = grad[tuple()]
@test_util.run_deprecated_v1
def testInt64Indices(self):
with self.session():
a = math_ops.range(3, dtype=dtypes.float32)
index = constant_op.constant(1, dtype=dtypes.int64)
b = 2. * a[index]
grad, = gradients_impl.gradients(b, a)
self.assertAllEqual(self.evaluate(grad), [0., 2., 0.])
class StridedSliceGradTypeTest(test_util.TensorFlowTestCase):
"""Test varied index types and host located memory."""
@test_util.run_deprecated_v1
def testHostVsDevice(self):
with self.session() as sess:
var2 = variables.Variable(
array_ops.reshape(
math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
shape=(4, 1, 1)))
varshape = variables.Variable([6, 4, 4], dtype=dtypes.int32)
self.evaluate(variables.global_variables_initializer())
begin = constant_op.constant([0, 0, 0])
end = constant_op.constant([4, 1, 1])
strides = constant_op.constant([1, 1, 1])
foo = array_ops.strided_slice_grad(varshape, begin, end, strides, var2)
sess.run(foo)
@test_util.run_deprecated_v1
def testInt64Shape(self):
with self.session() as sess:
original_dy = array_ops.reshape(
math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
shape=(4, 1, 1))
original_shape = constant_op.constant([6, 4, 4], dtype=dtypes.int64)
self.evaluate(variables.global_variables_initializer())
begin = constant_op.constant([0, 0, 0], dtype=dtypes.int64)
end = constant_op.constant([4, 1, 1], dtype=dtypes.int64)
strides = constant_op.constant([1, 1, 1], dtype=dtypes.int64)
dx = array_ops.strided_slice_grad(original_shape, begin, end, strides,
original_dy)
sess.run(dx)
@test_util.run_deprecated_v1
def testMixedIndexTypes(self):
with self.session() as sess:
original_dy = array_ops.reshape(
math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
shape=(4, 1, 1))
original_shape = constant_op.constant([6, 4, 4], dtype=dtypes.int64)
self.evaluate(variables.global_variables_initializer())
begin = constant_op.constant([0, 0, 0], dtype=dtypes.int32)
end = constant_op.constant([4, 1, 1], dtype=dtypes.int64)
strides = constant_op.constant([1, 1, 1], dtype=dtypes.int64)
with self.assertRaisesRegex(
TypeError, "Input 'begin' of 'StridedSliceGrad' Op has type int32"
" that does not match type int64 of argument 'shape'"):
dx = array_ops.strided_slice_grad(original_shape, begin, end, strides,
original_dy)
sess.run(dx)
class BenchmarkSlice(object):
def __init__(self, tensor):
self.tensor = tensor
def __getitem__(self, x):
return self.tensor[x]
class StridedSliceBenchmark(test_lib.Benchmark):
"""Benchmark new strided slice operation on non-trivial case."""
def run_and_time(self, slice_op):
self.evaluate(variables.global_variables_initializer())
for _ in range(10):
_ = self.evaluate(slice_op)
iters = 1000
t0 = time.time()
for _ in range(iters):
self.evaluate(slice_op)
t1 = time.time()
self.report_benchmark(iters=iters, wall_time=(t1 - t0) / 1000.0)
def make_variable(self):
n = 256
shape = (n, n, n)
items = n**3
var = variables.Variable(
array_ops.reshape(math_ops.linspace(1., float(items), items), shape),
dtype=dtypes.float32)
return var
def benchmark_strided_slice_skip(self):
with session.Session():
var = self.make_variable()
helper = BenchmarkSlice(var)
slice_op = helper[::2, ::1, ::2]
self.run_and_time(slice_op)
def benchmark_strided_slice_easy(self):
with session.Session():
var = self.make_variable()
helper = BenchmarkSlice(var)
slice_op = helper[3::1, 3::1, 3::1]
self.run_and_time(slice_op)
def benchmark_slice_easy(self):
with session.Session():
var = self.make_variable()
slice_op = var[3::1, 3::1, 3::1]
self.run_and_time(slice_op)
class StridedSliceAssignChecker(object):
def __init__(self, test, x, tensor_type=dtypes.float32, use_resource=False):
self.tensor_type = tensor_type
self.test = test
self._use_resource = use_resource
self.x_np = np.array(x).astype(tensor_type.as_numpy_dtype)
# Give the value a non-zero imaginary component for complex types.
if tensor_type.is_complex:
self.x_np -= 1j * self.x_np
self.x = constant_op.constant(self.x_np, dtype=tensor_type)
def __setitem__(self, index, value):
value = np.array(value).astype(self.tensor_type.as_numpy_dtype)
# Give the value a non-zero imaginary component for complex types.
if self.tensor_type.is_complex:
value -= 1j * value
with self.test.test_session() as sess:
if self._use_resource:
var = resource_variable_ops.ResourceVariable(self.x)
else:
var = variables.Variable(self.x)
sess.run(variables.variables_initializer([var]))
val = sess.run(var[index].assign(value))
# val_copy is used to check that tf.compat.v1.assign works equivalently
# to the assign method above.
val_copy = sess.run(state_ops.assign(var[index], value))
valnp = np.copy(self.x_np)
valnp[index] = np.array(value)
self.test.assertAllEqual(val, valnp)
self.test.assertAllEqual(val_copy, valnp)
class SliceAssignTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def testInvalidSlice(self):
foo = constant_op.constant([1, 2, 3])
with self.assertRaisesRegex(AttributeError, "no attribute 'assign'"):
bar = foo[:2].assign(constant_op.constant([1, 2]))
self.evaluate(bar)
def doTestSliceAssign(self, use_resource):
for dtype in STRIDED_SLICE_TYPES:
with self.subTest(dtype=dtype):
checker = StridedSliceAssignChecker(
self, [[1, 2, 3], [4, 5, 6]],
use_resource=use_resource,
tensor_type=dtype)
# Check if equal
checker[:] = [[10, 20, 30], [40, 50, 60]]
# Check trivial (1,1) shape tensor
checker[1:2, 1:2] = [[66]]
# shrinks shape changes
checker[1:2, 1] = [66]
checker[1, 1:2] = [66]
checker[1, 1] = 66
# newaxis shape changes
checker[:, None, :] = [[[10, 20, 30]], [[40, 50, 50]]]
# shrink and newaxis
checker[None, None, 0, 0:1] = [[[99]]]
# Non unit strides
checker[::1, ::-2] = [[3, 33], [4, 44]]
# degenerate interval
checker[8:10, 0] = []
checker[8:10, 8:10] = [[]]
# Assign vector to scalar (rank-0) using newaxis
checker2 = StridedSliceAssignChecker(self, 222)
checker2[()] = 6 # no indices
checker2[...] = 6 # ellipsis
checker2[None] = [6] # new axis
@test_util.run_deprecated_v1
@test_util.disable_xla("b/123559667")
def testSliceAssign(self):
self.doTestSliceAssign(use_resource=False)
@test_util.run_deprecated_v1
@test_util.disable_xla("b/123559667")
def testSliceAssignResource(self):
self.doTestSliceAssign(use_resource=True)
@test_util.run_v1_only("b/120545219")
def testUninitialized(self):
with self.assertRaisesRegex(
errors.FailedPreconditionError,
"Attempting to use uninitialized value Variable"):
with self.cached_session() as sess:
v = variables.VariableV1([1, 2])
sess.run(v[:].assign([1, 2]))
@test_util.run_v1_only("b/120545219")
def testTypeError(self):
init_val = constant_op.constant([1, 2], dtype=dtypes.int32)
too_small_val = constant_op.constant([3, 4], dtype=dtypes.int8)
too_large_val = constant_op.constant([3, 4], dtype=dtypes.int64)
v = variables.VariableV1(init_val)
with self.assertRaises(TypeError):
v[:].assign(too_small_val)
with self.assertRaises(TypeError):
v[:].assign(too_large_val)
@test_util.run_deprecated_v1
def testTypeErrorResource(self):
init_val = constant_op.constant([1, 2], dtype=dtypes.int32)
too_small_val = constant_op.constant([3, 4], dtype=dtypes.int8)
too_large_val = constant_op.constant([3, 4], dtype=dtypes.int64)
v = resource_variable_ops.ResourceVariable(init_val)
with self.cached_session() as sess:
self.evaluate(v.initializer)
with self.assertRaises(ValueError):
sess.run(v[:].assign(too_large_val))
with self.assertRaises(ValueError):
sess.run(v[:].assign(too_small_val))
@test_util.disable_xla("b/123559667")
@test_util.run_in_graph_and_eager_modes
def testTensorStridedSliceUpdateWithInputForward(self):
"""Tests tensor_strided_slice_update with input-forwarding taking effect."""
@def_function.function
def assign(x):
y = x + 1
return gen_array_ops.tensor_strided_slice_update(y, [0], [1], [1], [0])
self.assertAllEqual([0, 1], self.evaluate(assign(array_ops.zeros([2]))))
@test_util.disable_xla("b/123559667")
@test_util.run_in_graph_and_eager_modes
def testTensorStridedSliceUpdateNoInputForward(self):
"""Tests tensor_strided_slice_update with no input-forwarding."""
x = constant_op.constant([0.2, 0.3])
y = x + 1
# y's buffer won't be forwarded to z because y and z will be alive at the
# same time later.
z = gen_array_ops.tensor_strided_slice_update(y, [0], [1], [1], [0.4])
ans = y + z
self.assertAllClose([1.6, 2.6], self.evaluate(ans))
@test_util.disable_xla("b/123559667")
def testTensorStridedSliceUpdateGradSimple(self):
original = constant_op.constant([0.2, 0.3])
updates = constant_op.constant([0.4])
with backprop.GradientTape() as tape:
tape.watch([original, updates])
updated = gen_array_ops.tensor_strided_slice_update(
original, [0], [1], [1], updates)
d1, d2 = tape.gradient(updated, [original, updates],
output_gradients=constant_op.constant([2.0, 3.0]))
self.assertAllClose([0.0, 3.0], d1)
self.assertAllClose([2.0], d2)
@parameterized.named_parameters(
("_%s" % i, *args) for i, args in enumerate([ # pylint:disable=g-complex-comprehension
([2, 5], [0, 1], [1, 0], [1, 2], [2], 0, 2, 0, 0, 1),
([4], [5], [3], [1], [3], 1, 0, 0, 0, 0),
([2, 2, 3, 2], [0, 0, 1], [1, 0, 2], [1, 0, 1], [2, 3], 0, 0, 2, 0, 5)
]))
@test_util.disable_xla("b/123559667")
def testTensorStridedSliceUpdateGrad(
self, shape, begin, end, strides, updates_shape, *args):
with self.cached_session():
def f(a, b):
return gen_array_ops.tensor_strided_slice_update(
a, begin, end, strides, b, *args)
theoretical, numerical = gradient_checker_v2.compute_gradient(
f, [array_ops.zeros(shape), array_ops.ones(updates_shape)], delta=1.0)
self.assertAllClose(theoretical, numerical)
class ShapeSizeRankTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testDenseShape(self):
t_value = [[0, 42], [24, 0]]
self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(t_value)))
self.assertEqual(4, self.evaluate(array_ops.size(t_value)))
self.assertEqual(2, self.evaluate(array_ops.rank(t_value)))
t = constant_op.constant(t_value)
self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(t)))
self.assertEqual(4, self.evaluate(array_ops.size(t)))
self.assertEqual(2, self.evaluate(array_ops.rank(t)))
@test_util.run_in_graph_and_eager_modes
def testSparseShape(self):
sp_value = sparse_tensor.SparseTensorValue(
indices=((0, 1), (1, 0)), values=(42, 24), dense_shape=(2, 2))
self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(sp_value)))
self.assertEqual(4, self.evaluate(array_ops.size(sp_value)))
self.assertEqual(2, self.evaluate(array_ops.rank(sp_value)))
sp = sparse_tensor.SparseTensor.from_value(sp_value)
self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(sp)))
self.assertEqual(4, self.evaluate(array_ops.size(sp)))
self.assertEqual(2, self.evaluate(array_ops.rank(sp)))
@test_util.run_in_graph_and_eager_modes
def testSizeDtype(self):
tensor = [1]
self.assertEqual(dtypes.int32, self.evaluate(array_ops.size(tensor)).dtype)
self.assertEqual(
dtypes.int64,
self.evaluate(array_ops.size(tensor, out_type=dtypes.int64)).dtype)
class SequenceMaskTest(test_util.TensorFlowTestCase):
def testExceptions(self):
with self.cached_session():
with self.assertRaisesRegex(ValueError, "maxlen must be scalar"):
array_ops.sequence_mask([10, 20], [10, 20])
@test_util.run_deprecated_v1
def testOneDimensionalWithMaxlen(self):
with self.cached_session():
res = array_ops.sequence_mask(constant_op.constant([1, 3, 2]), 5)
self.assertAllEqual(res.get_shape(), [3, 5])
self.assertAllEqual(
res,
[[True, False, False, False, False], [True, True, True, False, False],
[True, True, False, False, False]])
@test_util.run_deprecated_v1
def testOneDimensionalDtypeWithoutMaxlen(self):
with self.cached_session():
# test dtype and default maxlen:
res = array_ops.sequence_mask(
constant_op.constant([0, 1, 4]), dtype=dtypes.float32)
self.assertAllEqual(res.get_shape().as_list(), [3, 4])
self.assertAllEqual(
res,
[[0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0]])
@test_util.run_deprecated_v1
def testOneDimensionalWithoutMaxlen(self):
with self.cached_session():
res = array_ops.sequence_mask(constant_op.constant([0, 1, 4]))
self.assertAllEqual(res.get_shape().as_list(), [3, 4])
self.assertAllEqual(
res, [[False, False, False, False], [True, False, False, False],
[True, True, True, True]])
@test_util.run_deprecated_v1
def testTwoDimensional(self):
with self.cached_session():
res = array_ops.sequence_mask(constant_op.constant([[1, 3, 2]]), 5)
self.assertAllEqual(res.get_shape(), [1, 3, 5])
self.assertAllEqual(res, [[[True, False, False, False, False],
[True, True, True, False, False],
[True, True, False, False, False]]])
# test dtype and default maxlen:
res = array_ops.sequence_mask(
constant_op.constant([[0, 1, 4], [1, 2, 3]]), dtype=dtypes.float32)
self.assertAllEqual(res.get_shape().as_list(), [2, 3, 4])
self.assertAllEqual(
res,
[[[0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0]],
[[1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.0]]])
@test_util.run_deprecated_v1
def testUnknownShape(self):
lengths = array_ops.placeholder(dtype=dtypes.int32)
res = array_ops.sequence_mask(lengths)
self.assertEqual(res.shape, None)
@test_util.run_deprecated_v1
def testDtypes(self):
def check_dtypes(lengths_dtype, maxlen_dtype):
res = array_ops.sequence_mask(
constant_op.constant([1, 3, 2], dtype=lengths_dtype),
constant_op.constant(5, dtype=maxlen_dtype))
self.assertAllEqual(res.get_shape(), [3, 5])
self.assertAllEqual(
res,
[[True, False, False, False, False], [True, True, True, False, False],
[True, True, False, False, False]])
with self.cached_session():
check_dtypes(dtypes.int32, dtypes.int32)
check_dtypes(dtypes.int32, dtypes.int64)
check_dtypes(dtypes.int64, dtypes.int32)
check_dtypes(dtypes.int64, dtypes.int64)
def testOutputDtype(self):
def check_output_dtype(output_dtype):
res = self.evaluate(
array_ops.sequence_mask(
constant_op.constant([1, 3, 2], dtype=dtypes.int32),
constant_op.constant(5, dtype=dtypes.int32),
dtype=output_dtype))
self.assertAllEqual(
res,
self.evaluate(
math_ops.cast([[True, False, False, False, False],
[True, True, True, False, False],
[True, True, False, False, False]], output_dtype)))
check_output_dtype(dtypes.bool)
check_output_dtype("bool")
check_output_dtype(np.bool)
check_output_dtype(dtypes.int32)
check_output_dtype("int32")
check_output_dtype(np.int32)
check_output_dtype(dtypes.float32)
check_output_dtype("float32")
check_output_dtype(np.float32)
check_output_dtype(dtypes.int64)
check_output_dtype("float64")
check_output_dtype(np.float64)
class ConcatSliceResourceTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def testConcatSlice(self):
r1 = test_ops.stub_resource_handle_op(container="a", shared_name="b")
r2 = test_ops.stub_resource_handle_op(container="a", shared_name="c")
c = array_ops.stack([r1, r2])
s = array_ops.strided_slice(c, [1], [2])
self.evaluate(test_ops.resource_create_op(s))
with self.assertRaises(errors.AlreadyExistsError):
self.evaluate(test_ops.resource_create_op(r2))
class IdentityTest(test_util.TensorFlowTestCase):
@test_util.run_gpu_only
def testEagerIdentity(self):
with context.eager_mode():
def _test(x, y, device):
self.assertAllEqual(x.numpy(), y.numpy())
self.assertTrue(device in y.device.lower())
with test_util.force_gpu():
a = constant_op.constant([[2], [3]], dtype=dtypes.float32)
with test_util.force_gpu():
b = array_ops.identity(a)
_test(a, b, "gpu")
with test_util.force_cpu():
c = array_ops.identity(b)
_test(b, c, "cpu")
with test_util.force_cpu():
d = array_ops.identity(c)
_test(c, d, "cpu")
with test_util.force_gpu():
e = array_ops.identity(d)
_test(d, e, "gpu")
class PadTest(test_util.TensorFlowTestCase):
def testEager(self):
with context.eager_mode():
t = constant_op.constant([[1, 2, 3], [4, 5, 6]])
paddings = constant_op.constant([[
1,
1,
], [2, 2]])
padded = array_ops.pad(t, paddings, "CONSTANT")
self.assertAllEqual(padded.numpy(),
[[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 2, 3, 0, 0],
[0, 0, 4, 5, 6, 0, 0], [0, 0, 0, 0, 0, 0, 0]])
def testSymmetricMirrorPadGrad(self):
t = np.broadcast_to(np.arange(0, 7), (3, 2, 1, 7))
paddings = constant_op.constant([
[1, 1],
[0, 0],
[0, 0],
[2, 2],
])
expected = np.broadcast_to(np.array([9, 27, 27]), (1, 2, 1, 3))
result = gen_array_ops.mirror_pad_grad(t, paddings, "SYMMETRIC")
self.assertAllEqual(result, expected)
def testReflectMirrorPadGrad(self):
t = np.broadcast_to(np.reshape(np.arange(0, 7), (7, 1)), (1, 4, 7, 1))
paddings = constant_op.constant([
[0, 0],
[1, 1],
[2, 2],
[0, 0],
])
expected = np.broadcast_to(
np.reshape(np.array([16, 18, 8]), (3, 1)), (1, 2, 3, 1))
result = gen_array_ops.mirror_pad_grad(t, paddings, "REFLECT")
self.assertAllEqual(result, expected)
class InvertPermutationTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testInvertPermutation(self):
for dtype in [dtypes.int32, dtypes.int64]:
with self.subTest(dtype=dtype):
with self.cached_session():
x = constant_op.constant([3, 4, 0, 2, 1], dtype=dtype)
y = array_ops.invert_permutation(x)
self.assertAllEqual(y.get_shape(), [5])
self.assertAllEqual(y, [2, 4, 3, 0, 1])
class UnravelIndexTest(test_util.TensorFlowTestCase):
# TODO(b/73086570): Reenable test.
@unittest.skip("Test does not pass internally.")
def testUnravelIndex(self):
with self.cached_session():
for dtype in [dtypes.int32, dtypes.int64]:
with self.subTest(dtype=dtype):
indices_1 = constant_op.constant(1621, dtype=dtype)
dims_1 = constant_op.constant([6, 7, 8, 9], dtype=dtype)
out_1 = array_ops.unravel_index(indices_1, dims_1)
self.assertAllEqual(out_1, [3, 1, 4, 1])
indices_2 = constant_op.constant([1621], dtype=dtype)
dims_2 = constant_op.constant([6, 7, 8, 9], dtype=dtype)
out_2 = array_ops.unravel_index(indices_2, dims_2)
self.assertAllEqual(out_2, [[3], [1], [4], [1]])
indices_3 = constant_op.constant([22, 41, 37], dtype=dtype)
dims_3 = constant_op.constant([7, 6], dtype=dtype)
out_3 = array_ops.unravel_index(indices_3, dims_3)
self.assertAllEqual(out_3, [[3, 6, 6], [4, 5, 1]])
# Test case for GitHub issue 40204.
def testUnravelIndexZeroDim(self):
with self.cached_session():
for dtype in [dtypes.int32, dtypes.int64]:
with self.assertRaisesRegex(errors.InvalidArgumentError,
"index is out of bound as with dims"):
indices = constant_op.constant([2, 5, 7], dtype=dtype)
dims = constant_op.constant([3, 0], dtype=dtype)
self.evaluate(array_ops.unravel_index(indices=indices, dims=dims))
class GuaranteeConstOpTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testSimple(self):
with self.cached_session():
a = array_ops.constant(10)
guarantee_a = array_ops.guarantee_const(a)
self.assertEqual(10, self.evaluate(guarantee_a))
@test_util.run_deprecated_v1
def testVariables(self):
with self.cached_session() as sess:
for use_resource in [False, True]:
with self.subTest(use_resource=use_resource):
a = variable_scope.get_variable(
"var_{}".format(use_resource), [],
initializer=init_ops.constant_initializer(10.0),
use_resource=use_resource)
guarantee_a = array_ops.guarantee_const(a)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(10.0, self.evaluate(guarantee_a))
@test_util.run_deprecated_v1
def testResourceRejection(self):
with self.cached_session() as sess:
a = variable_scope.get_variable(
"resource_var", [],
initializer=init_ops.constant_initializer(10.0),
use_resource=True)
guarantee_a = array_ops.guarantee_const(a.handle)
self.evaluate(variables.global_variables_initializer())
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
"cannot be a resource variable"):
self.evaluate(guarantee_a)
class SnapshotOpTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testInvertPermutation(self):
for dtype in [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64]:
with self.subTest(dtype=dtype):
with self.cached_session():
x = constant_op.constant([0, 1, 2, 3], dtype=dtype)
y = gen_array_ops.snapshot(x)
self.assertAllEqual(y, [0, 1, 2, 3])
@test_util.run_all_in_graph_and_eager_modes
class QuantizeAndDequantizeTest(test_util.TensorFlowTestCase):
# Generates a tensor of the specified `shape` using values from `values`
# scaled by (slice_idx + 1) along `axis` dimension.
def _scale_per_slice(self, shape, axis, values):
# Note: repeats the values if the shape is larger than values.
out = np.take(values, np.remainder(np.arange(np.prod(shape)),
len(values))).reshape(shape)
if axis is not None:
scale_shape = [1] * len(shape)
scale_shape[axis] = shape[axis]
out *= np.arange(1, shape[axis] + 1).reshape(scale_shape)
return out
def testAxis(self):
shape = np.array([2, 3, 4, 5])
values = np.array([-1, -0.5, 0, 0.3, 0.8, 0.555, 0.5], dtype=np.float32)
quant_values = np.array(
[-1, -0.5, 0, 38.0 / 128, 102.0 / 128, 71.0 / 128, 0.5],
dtype=np.float32)
for axis in [None, 0, 1, 2, 3]:
with self.subTest(axis=axis):
inputs = constant_op.constant(
self._scale_per_slice(shape, axis, values))
expected = self._scale_per_slice(shape, axis, quant_values)
unused_minmax_value = 0 if axis is None else [0] * shape[axis]
fake_quantized = self.evaluate(
array_ops.quantize_and_dequantize_v2(
inputs,
unused_minmax_value,
unused_minmax_value,
range_given=False,
round_mode="HALF_UP",
axis=axis))
self.assertAllEqual(fake_quantized, expected)
if axis is not None:
fake_quantized = self.evaluate(
array_ops.quantize_and_dequantize_v2(
inputs,
unused_minmax_value,
unused_minmax_value,
range_given=False,
axis=(axis - 4)))
self.assertAllClose(fake_quantized, expected)
def testBadAxis(self):
input_tensor = [2.5, 2.5]
input_min = [0, 0]
input_max = [1, 1]
error_message_pattern = "Shape must be at least rank 11 but is rank 1"
# TODO(b/171260356): Eager mode and graph mode throw different error types
error = errors.InvalidArgumentError if context.executing_eagerly(
) else ValueError
with self.assertRaisesRegex(error, error_message_pattern):
self.evaluate(
array_ops.quantize_and_dequantize_v2(
input=input_tensor,
input_min=input_min,
input_max=input_max,
axis=10))
def testQuantizeDequantizeGrad(self):
shape = (2, 2)
max_threshold = 0
min_threshold = -10
input_value = np.random.rand(2, 2) * 40.0 - 20.0
input_tensor = constant_op.constant(input_value, shape=shape,
name="input_tensor")
with self.cached_session():
def f(a):
return array_ops.quantize_and_dequantize_v2(
a,
input_min=min_threshold,
input_max=max_threshold,
range_given=True)
output_grad = gradient_checker_v2.compute_gradient(f, [input_tensor])
self.assertAllClose(output_grad[0], np.zeros([1, 4, 4]))
@test_util.run_all_in_graph_and_eager_modes
class SortedSearchTest(test_util.TensorFlowTestCase):
def testUpperBoundFloatHandCoded(self):
cdf = np.array([0, .2, .5, .6, .8, 1.], dtype=np.float32)
arr = np.array([.04, .99, .53, .58, .31, .01, .79, .8, .21],
dtype=np.float32)
result = np.searchsorted(cdf, arr, side="right")
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
self.assertAllEqual(result, tf_result)
def testUpperBoundFloatRandomNd(self):
dim_size = 7
for d in range(1, 5):
shape = [dim_size] * d
cdf = np.cumsum(
np.random.uniform(size=shape).astype(np.float32), axis=(d - 1))
arr = np.random.uniform(size=shape).astype(np.float32) * dim_size
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
cdf = cdf.reshape([-1, dim_size])
arr = arr.reshape([-1, dim_size])
result = np.zeros(arr.shape, dtype=np.int32)
for i in range(dim_size**(d - 1)):
result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="right")
result = result.reshape(shape)
self.assertAllEqual(result, tf_result)
def testUpperBoundFloatUneven(self):
batch_size = 7
size_search_array = 1000
size_values = 47
cdf = np.cumsum(
np.random.uniform(size=[batch_size, size_search_array]).astype(
np.float32),
axis=1)
arr = np.random.uniform(size=[batch_size, size_values]).astype(
np.float32) * size_search_array
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
result = np.zeros(arr.shape, dtype=np.int32)
for i in range(batch_size):
result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="right")
self.assertAllEqual(result, tf_result)
def testLowerBoundFloatHandCoded(self):
cdf = np.array([0, .2, .5, .6, .8, 1.], dtype=np.float32)
arr = np.array([.04, .99, .53, .58, .31, .01, .79, .8, .21],
dtype=np.float32)
result = np.searchsorted(cdf, arr, side="left")
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
self.assertAllEqual(result, tf_result)
def testLowerBoundFloatRandomNd(self):
dim_size = 7
for d in range(1, 5):
shape = [dim_size] * d
cdf = np.cumsum(
np.random.uniform(size=shape).astype(np.float32), axis=(d - 1))
arr = np.random.uniform(size=shape).astype(np.float32) * dim_size
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
cdf = cdf.reshape([-1, dim_size])
arr = arr.reshape([-1, dim_size])
result = np.zeros(arr.shape, dtype=np.int32)
for i in range(dim_size**(d - 1)):
result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="left")
result = result.reshape(shape)
self.assertAllEqual(result, tf_result)
def testLowerBoundFloatUneven(self):
batch_size = 7
size_search_array = 1000
size_values = 47
cdf = np.cumsum(
np.random.uniform(size=[batch_size, size_search_array]).astype(
np.float32),
axis=1)
arr = np.random.uniform(size=[batch_size, size_values]).astype(
np.float32) * size_search_array
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
result = np.zeros(arr.shape, dtype=np.int32)
for i in range(batch_size):
result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="left")
self.assertAllEqual(result, tf_result)
def testUpperBoundIntHandCoded(self):
cdf = np.array([0, 20, 50, 60, 80, 100], dtype=np.int64)
arr = np.array([4, 99, 53, 58, 31, 1, 79, 8, 21], dtype=np.int64)
result = np.searchsorted(cdf, arr, side="right")
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
self.assertAllEqual(result, tf_result)
def testUpperBoundIntRandomNd(self):
dim_size = 7
for d in range(1, 5):
shape = [dim_size] * d
cdf = np.cumsum(
np.random.randint(low=0, high=10, size=shape).astype(np.int64),
axis=(d - 1))
arr = np.random.randint(
low=0, high=10 * dim_size, size=shape).astype(np.int64)
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
cdf = cdf.reshape([-1, dim_size])
arr = arr.reshape([-1, dim_size])
result = np.zeros(arr.shape, dtype=np.int32)
for i in range(dim_size**(d - 1)):
result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="right")
result = result.reshape(shape)
self.assertAllEqual(result, tf_result)
def testUpperBoundIntUneven(self):
batch_size = 7
size_search_array = 1000
size_values = 47
cdf = np.cumsum(
np.random.randint(low=0, high=10,
size=[batch_size,
size_search_array]).astype(np.int64),
axis=1)
arr = np.random.randint(
low=0, high=10 * size_search_array, size=[batch_size,
size_values]).astype(np.int64)
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
result = np.zeros(arr.shape, dtype=np.int32)
for i in range(batch_size):
result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="right")
self.assertAllEqual(result, tf_result)
def testLowerBoundIntHandCoded(self):
cdf = np.array([0, 20, 50, 60, 80, 100], dtype=np.int64)
arr = np.array([4, 99, 53, 58, 31, 1, 79, 8, 21], dtype=np.int64)
result = np.searchsorted(cdf, arr, side="left")
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
self.assertAllEqual(result, tf_result)
def testLowerBoundIntRandomNd(self):
dim_size = 7
for d in range(1, 5):
shape = [dim_size] * d
cdf = np.cumsum(
np.random.randint(low=0, high=10, size=shape).astype(np.int64),
axis=(d - 1))
arr = np.random.randint(
low=0, high=10 * dim_size, size=shape).astype(np.int64)
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
cdf = cdf.reshape([-1, dim_size])
arr = arr.reshape([-1, dim_size])
result = np.zeros(arr.shape, dtype=np.int32)
for i in range(dim_size**(d - 1)):
result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="left")
result = result.reshape(shape)
self.assertAllEqual(result, tf_result)
def testLowerBoundIntUneven(self):
batch_size = 7
size_search_array = 1000
size_values = 47
cdf = np.cumsum(
np.random.randint(low=0, high=10,
size=[batch_size,
size_search_array]).astype(np.int64),
axis=1)
arr = np.random.randint(
low=0, high=10 * size_search_array, size=[batch_size,
size_values]).astype(np.int64)
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
result = np.zeros(arr.shape, dtype=np.int32)
for i in range(batch_size):
result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="left")
self.assertAllEqual(result, tf_result)
def testZeroSequenceSize(self):
dtype = dtypes.int32
for side in ("left", "right"):
with self.subTest(side=side):
self.assertAllEqual(
array_ops.searchsorted(
array_ops.ones([2, 0]),
array_ops.ones([2, 3]),
side=side,
out_type=dtype), array_ops.zeros([2, 3], dtype))
def testZeroValueSize(self):
dtype = dtypes.int32
for side in ("left", "right"):
with self.subTest(side=side):
self.assertAllEqual(
array_ops.searchsorted(
array_ops.ones([2, 3]),
array_ops.ones([2, 0]),
side=side,
out_type=dtype), array_ops.zeros([2, 0], dtype))
class BatchGatherNdTest(test_util.TensorFlowTestCase):
def testShapesMatch(self):
"""Tests for various different shape combinations."""
shapes = []
# params_shape, indices_shape, batch_dims
shapes.append(((2, 2, 2), (2, 1), 1),)
shapes.append(((2, 2, 2), (2, 2), 1),)
shapes.append(((2, 2, 2), (2, 3), 0),)
shapes.append(((2, 2, 2), (3,), 0),)
shapes.append(((2, 2, 2), (1,), 0),)
shapes.append(((2, 2, 3, 2), (2, 3), 1),)
shapes.append(((2, 2, 3, 2), (2, 2), 1),)
shapes.append(((2, 2, 3, 2), (2, 1), 1),)
shapes.append(((2, 2, 3, 2), (2, 1, 3), 1),)
shapes.append(((2, 2, 3, 2), (2, 2, 2), 1),)
shapes.append(((2, 2, 3, 2), (2, 3, 1), 1),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 2), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 1), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 1, 3), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 2, 2), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 2),)
for params_shape, indices_shape, batch_dims in shapes:
with self.subTest(
params_shape=params_shape,
indices_shape=indices_shape,
batch_dims=batch_dims):
params = constant_op.constant(1.0, shape=(params_shape))
indices = constant_op.constant(
1, shape=(indices_shape), dtype=dtypes.int32)
out = array_ops.batch_gather_nd(
params=params, indices=indices, batch_dims=batch_dims)
ndims_params = len(params_shape) - batch_dims
ndims_rows = ndims_params - indices_shape[-1]
expected_out_shape = indices_shape[:-1]
if ndims_rows > 0:
expected_out_shape += params_shape[-ndims_rows:]
self.assertSequenceEqual(out.shape, expected_out_shape)
def testReducesToGatherNDWhenBatchDimIsZero(self):
"""Confirms setting batch_dims to zero reduces to tf.gather_nd."""
params = constant_op.constant(np.random.uniform(0.0, 1.0, size=(7, 8, 9)))
indices_shapes = []
indices_shapes.append((1,))
indices_shapes.append((3, 1))
indices_shapes.append((3, 3, 1))
indices_shapes.append((2,))
indices_shapes.append((3, 2))
indices_shapes.append((3, 3, 2))
indices_shapes.append((3,))
indices_shapes.append((3, 3))
indices_shapes.append((3, 3, 3))
for indices_shape in indices_shapes:
with self.subTest(indices_shape=indices_shape):
indices = np.random.randint(0, 7, size=indices_shape)
gather_nd_result = gen_array_ops.gather_nd(params, indices)
batch_gather_nd_result = array_ops.batch_gather_nd(
params=params, indices=indices, batch_dims=0)
self.assertAllEqual(gather_nd_result, batch_gather_nd_result)
def testSameResultAsMapFn(self):
"""Compares results with gather_nd called on every element with map_fn."""
shapes = []
# params_shape, indices_shape, batch_dims
shapes.append(((2, 2, 2), (2, 1), 1),)
shapes.append(((2, 2, 2), (2, 2), 1),)
shapes.append(((2, 2, 3, 2), (2, 3), 1),)
shapes.append(((2, 2, 3, 2), (2, 2), 1),)
shapes.append(((2, 2, 3, 2), (2, 1), 1),)
shapes.append(((2, 2, 3, 2), (2, 1, 3), 1),)
shapes.append(((2, 2, 3, 2), (2, 2, 2), 1),)
shapes.append(((2, 2, 3, 2), (2, 3, 1), 1),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 2), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 1), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 1, 3), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 2, 2), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 2),)
for params_shape, indices_shape, batch_dims in shapes:
with self.subTest(
params_shape=params_shape,
indices_shape=indices_shape,
batch_dims=batch_dims):
params = constant_op.constant(
np.random.uniform(0.0, 1.0, size=(params_shape)))
indices = np.random.randint(0, 2, size=indices_shape)
batch_gather_nd_result = array_ops.batch_gather_nd(
params=params, indices=indices, batch_dims=batch_dims)
if batch_dims > 1:
params = array_ops.reshape(
params, shape=[-1] + list(params_shape[batch_dims:]))
indices = array_ops.reshape(
indices, shape=[-1] + list(indices_shape[batch_dims:]))
map_fn_gather_nd_result = map_fn.map_fn(
fn=self._map_fn_body, elems=(params, indices), dtype=dtypes.float64)
if batch_dims > 1:
out_shape = map_fn_gather_nd_result.shape.as_list()
out_shape = list(params_shape[:batch_dims]) + out_shape[1:]
map_fn_gather_nd_result = array_ops.reshape(
map_fn_gather_nd_result, shape=out_shape)
self.assertAllEqual(map_fn_gather_nd_result, batch_gather_nd_result)
def _map_fn_body(self, elems):
return gen_array_ops.gather_nd(elems[0], elems[1])
def testBatchDimsAsTensor(self):
"""Tests Tensor batch_dims as input works as intended."""
shapes = []
# params_shape, indices_shape, batch_dims
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 0),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 1),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 2),)
for params_shape, indices_shape, batch_dims in shapes:
with self.subTest(
params_shape=params_shape,
indices_shape=indices_shape,
batch_dims=batch_dims):
params = constant_op.constant(
np.random.uniform(0.0, 1.0, size=(params_shape)))
indices = np.random.randint(0, 2, size=indices_shape)
batch_gather_nd_result = array_ops.gather_nd(
params=params, indices=indices, batch_dims=batch_dims)
batch_dims_tensor = constant_op.constant([batch_dims])
batch_gather_nd_tensor_batch_dims_result = array_ops.gather_nd(
params=params, indices=indices, batch_dims=batch_dims_tensor)
self.assertAllEqual(batch_gather_nd_tensor_batch_dims_result,
batch_gather_nd_result)
def testInvalidBatchDimsRaisesException(self):
"""Tests whether invalid batch_dims raise expected exceptions."""
params = constant_op.constant(
np.random.uniform(0.0, 1.0, size=(3, 2, 2, 3, 4)))
indices = np.random.randint(0, 2, size=(3, 2, 3))
with self.assertRaises(TypeError):
array_ops.batch_gather_nd(
params=params,
indices=indices,
batch_dims=constant_op.constant((0, 1)))
with self.assertRaises(ValueError):
array_ops.batch_gather_nd(params=params, indices=indices, batch_dims=-1)
with self.assertRaises(ValueError):
array_ops.batch_gather_nd(params=params, indices=indices, batch_dims=4)
@test_util.run_deprecated_v1
def testNoneBatchDimensions(self):
"""Tests gather_nd works with None dimensions."""
shapes = []
# params_shape, indices_shape, batch_dims
shapes.append(((2, 2, 2), (2, 1), 1),)
shapes.append(((2, 2, 2), (2, 2), 1),)
shapes.append(((2, 2, 3, 2), (2, 3), 1),)
shapes.append(((2, 2, 3, 2), (2, 2), 1),)
shapes.append(((2, 2, 3, 2), (2, 1), 1),)
shapes.append(((2, 2, 3, 2), (2, 1, 3), 1),)
shapes.append(((2, 2, 3, 2), (2, 2, 2), 1),)
shapes.append(((2, 2, 3, 2), (2, 3, 1), 1),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 2), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 1), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 1, 3), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 2, 2), 2),)
shapes.append(((3, 2, 2, 3, 4), (3, 2, 3, 1), 2),)
for params_shape, indices_shape, batch_dims in shapes:
params_ph_shape = list(params_shape)
indices_ph_shape = list(indices_shape)
for i in range(batch_dims):
params_ph_shape[i] = None
indices_ph_shape[i] = None
params = array_ops.placeholder(dtypes.float32, shape=params_ph_shape)
indices = array_ops.placeholder(dtypes.int32, shape=indices_ph_shape)
out = array_ops.batch_gather_nd(
params=params, indices=indices, batch_dims=batch_dims)
with self.cached_session() as sess:
params_val = np.ones(dtype=np.float32, shape=params_shape)
indices_val = np.ones(dtype=np.int32, shape=indices_shape)
res = sess.run(
out, feed_dict={
params: params_val,
indices: indices_val
})
row_ndims = len(params_shape) - batch_dims - indices_shape[-1]
expected_out_shape = indices_shape[:-1]
if row_ndims > 0:
expected_out_shape += params_shape[-row_ndims:]
self.assertSequenceEqual(res.shape, expected_out_shape)
@test_util.run_deprecated_v1
def testUnknownIndices(self):
"""Tests whether indices with unknown rank works correctly."""
params = constant_op.constant(((0, 1, 2),))
indices = array_ops.placeholder(dtypes.int32)
gather_nd_t = array_ops.gather_nd(params, indices, batch_dims=1)
shape = gather_nd_t.get_shape()
self.assertEqual(None, shape.ndims)
self.assertEqual(None, tensor_shape.dimension_value(shape[0]))
@test_util.run_all_in_graph_and_eager_modes
class RepeatTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.parameters(
(3, 4, None),
([[1, 2], [3, 4]], 2, None),
([[1, 2], [3, 4]], [1, 2], 0),
([[1, 2], [3, 4]], [1, 2], 1),
([[1, 2], [3, 4]], 3, 1),
([[1, 2], [3, 4]], [1, 2, 3, 4], None),
(np.ones([0, 4]), 0, 1),
(np.ones([1, 2]), [2], None),
)
def testRepeat(self, array, repeats, axis):
array = np.array(array)
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)] * 2)
def repeat_fn(array, repeats):
return array_ops.repeat(array, repeats, axis)
v_tf = array_ops.repeat(constant_op.constant(array), repeats, axis)
v_tf_fn = repeat_fn(
constant_op.constant(array, dtype=dtypes.int32), repeats)
v_np = np.repeat(array, repeats, axis)
self.assertAllEqual(v_tf, v_np)
self.assertAllEqual(v_tf_fn, v_np)
@test_util.run_all_in_graph_and_eager_modes
class TileVariantTest(test_util.TensorFlowTestCase):
def test_tile_tensor_list(self):
t = constant_op.constant(np.random.uniform(size=[2, 3, 4]))
handle = list_ops.tensor_list_from_tensor(t, element_shape=None)
with ops.device("CPU:0"):
tiled_handles = array_ops.tile(array_ops.reshape(handle, [1]), [2])
tiled_tensor_0 = list_ops.tensor_list_stack(tiled_handles[0], t.dtype, 2,
[3, 4])
tiled_tensor_1 = list_ops.tensor_list_stack(tiled_handles[1], t.dtype, 2,
[3, 4])
self.assertAllEqual(t, tiled_tensor_0)
self.assertAllEqual(t, tiled_tensor_1)
# Now mutate some of the lists and make sure the changes are not reflected
# in the tiled handles.
with ops.control_dependencies([
list_ops.tensor_list_scatter([t[0] + 1], [0], input_handle=handle),
list_ops.tensor_list_set_item(tiled_handles[0], 0, t[0] + 2)]):
tiled_tensor_0 = list_ops.tensor_list_stack(tiled_handles[0], t.dtype, 2,
[3, 4])
tiled_tensor_1 = list_ops.tensor_list_stack(tiled_handles[1], t.dtype, 2,
[3, 4])
self.assertAllEqual(t, tiled_tensor_0)
self.assertAllEqual(t, tiled_tensor_1)
if __name__ == "__main__":
test_lib.main()
| annarev/tensorflow | tensorflow/python/kernel_tests/array_ops_test.py | Python | apache-2.0 | 85,558 |
from __future__ import absolute_import
from datetime import timedelta
from random import randint
from flask import current_app
from celery import Celery
from celery.schedules import crontab
from celery.signals import worker_process_init
from redash import __version__, create_app, settings
from redash.metrics import celery as celery_metrics
celery = Celery('redash',
broker=settings.CELERY_BROKER,
include='redash.tasks')
celery_schedule = {
'refresh_queries': {
'task': 'redash.tasks.refresh_queries',
'schedule': timedelta(seconds=30)
},
'cleanup_tasks': {
'task': 'redash.tasks.cleanup_tasks',
'schedule': timedelta(minutes=5)
},
'refresh_schemas': {
'task': 'redash.tasks.refresh_schemas',
'schedule': timedelta(minutes=settings.SCHEMAS_REFRESH_SCHEDULE)
}
}
if settings.GOOGLE_GROUP_MEMBER_SYNC_ENABLED:
celery_schedule['sync_google_group_members'] = {
'task': 'redash.tasks.sync_google_group_members',
'schedule': timedelta(hours=1)
}
if settings.VERSION_CHECK:
celery_schedule['version_check'] = {
'task': 'redash.tasks.version_check',
# We need to schedule the version check to run at a random hour/minute, to spread the requests from all users
# evenly.
'schedule': crontab(minute=randint(0, 59), hour=randint(0, 23))
}
if settings.QUERY_RESULTS_CLEANUP_ENABLED:
celery_schedule['cleanup_query_results'] = {
'task': 'redash.tasks.cleanup_query_results',
'schedule': timedelta(minutes=5)
}
celery.conf.update(CELERY_RESULT_BACKEND=settings.CELERY_BACKEND,
CELERYBEAT_SCHEDULE=celery_schedule,
CELERY_TIMEZONE='UTC',
CELERY_TASK_RESULT_EXPIRES=settings.CELERY_TASK_RESULT_EXPIRES,
CELERYD_LOG_FORMAT=settings.CELERYD_LOG_FORMAT,
CELERYD_TASK_LOG_FORMAT=settings.CELERYD_TASK_LOG_FORMAT)
if settings.SENTRY_DSN:
from raven import Client
from raven.contrib.celery import register_signal
client = Client(settings.SENTRY_DSN, release=__version__, install_logging_hook=False)
register_signal(client)
# Create a new Task base class, that pushes a new Flask app context to allow DB connections if needed.
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with current_app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
# Create Flask app after forking a new worker, to make sure no resources are shared between processes.
@worker_process_init.connect
def init_celery_flask_app(**kwargs):
app = create_app()
app.app_context().push()
| crowdworks/redash | redash/worker.py | Python | bsd-2-clause | 2,788 |
import os
def valid_triangles(triangles):
valid = 0
for dimensions in triangles:
longest = max(dimensions)
rest = sum(dimensions) - longest
if rest > longest:
valid += 1
return valid
def make_groups(triangles):
output = []
traspose = [list(i) for i in zip(*triangles)]
for row in traspose:
output+=([row[i:i+3] for i in range(0, len(row), 3)])
return output
if __name__ == '__main__':
dir = os.path.dirname(__file__)
file = os.path.join(dir, 'input.txt')
triangles = []
with open(file) as fd:
for line in fd:
triangles.append(list(map(int, line.strip().split())))
print('Part1: ', valid_triangles(triangles))
print('Part 2:', valid_triangles(make_groups(triangles)))
| bbglab/adventofcode | 2016/iker/day3/squares_with_three_sides.py | Python | mit | 788 |
#!/usr/bin/env python
"""
Error messages that might be raised when creating datasets.
"""
DATA_ERROR_MESSAGES = {
"NOT_EXIST" : "\
<p>The folder you have selected does not exist.</p> \
<p>Please select a folder that <strong>exists</strong> and, preferably, \
contains some valid data files.</p> \
",
"FOLDER_EMPTY" : "\
<p>The folder that you have selected is empty.</p> \
<p>Please select the folder that contains your data files.</p> \
",
"BAD_FORMAT" : "\
<p>The folder contains a file in an unrecognised format.</p> \
",
"CONTAINS_DIR" : "\
<p>The folder that you have selected contains another folder.</p> \
<p>Directories containing data to be analysed should not contain \
other folders. Please check that you have selected the correct \
folder and that your data is arranged as it should be.</p> \
",
"FORMAT_MISMATCH" : "\
<p>Data files have been found in two or more valid formats.</p> \
<p>This suggests that the data in the selected folder has been \
collected in one or more separate runs with differing detector \
settings. Please check your data and the detector configurations \
used.</p> \
",
"MISSING_DSC" : "\
<p>One or more of the data files in the selected folder is missing \
a DSC (detector settings) file.</p> \
",
"MISSING_DAT" : "\
<p>One or more of the DSC files in the selected folder is missing \
a data file.</p> \
",
"DET_DIFF_CHIPIDS" : "\
<p>The Pixelman dataset supplied contains data from different \
detectors.</p> \
",
"DET_DIFF_NANDSN" : "\
<p>The Pixelman dataset supplied contains data from different \
detectors.</p> \
",
"PIXEL_MASK_IN_DB" : "\
<p>A pixel mask with that name is already in the database.</p>\
",
"NO_SOURCE_NAME" : "\
<p>There is currently no source name specified.</p> \
<p>Please select a source from the database or enter a new name \
in the <strong>Source Name</strong> field.</p>\
",
"NO_SOURCE_DESC" : "\
<p>There is currently no source description specified.</p> \
<p>Please enter a description of the source for the dataset to \
import, or select a source from the database.</p> \
",
"FRAME_NO_HV" : "\
<p>You are trying to create a frame but no bias voltage (HV) \
value has been supplied.</p>
",
"FRAME_NO_IKRUM" : "\
<p>You are trying to create a frame but no I_Krum value
has been supplied.</p>
"
}
| gridpp/dirac-getting-started | cernatschool/dataerrs.py | Python | mit | 2,334 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
import settings
from django.contrib.auth import views as auth_views
urlpatterns = patterns('',
# Examples:
# url(r'^telco_billing/', include('telco_billing.urls'), name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^wedding/', include('wedding.urls')),
)
urlpatterns += patterns('',
(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT, 'show_indexes':True}),
)
urlpatterns += patterns('',
(r'^wedding_static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT_2, 'show_indexes':True}),
) | harshittrivedi78/hotel_listing | myproject/urls.py | Python | apache-2.0 | 750 |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_utils import uuidutils
from openstack_dashboard.api import base
from openstack_dashboard.api import neutron
from openstack_dashboard.test.test_data import utils
from openstack_dashboard.usage import quotas as usage_quotas
def data(TEST):
# Data returned by openstack_dashboard.api.neutron wrapper.
TEST.agents = utils.TestDataContainer()
TEST.networks = utils.TestDataContainer()
TEST.subnets = utils.TestDataContainer()
TEST.subnetpools = utils.TestDataContainer()
TEST.ports = utils.TestDataContainer()
TEST.trunks = utils.TestDataContainer()
TEST.routers = utils.TestDataContainer()
TEST.routers_with_rules = utils.TestDataContainer()
TEST.routers_with_routes = utils.TestDataContainer()
TEST.floating_ips = utils.TestDataContainer()
TEST.security_groups = utils.TestDataContainer()
TEST.security_group_rules = utils.TestDataContainer()
TEST.providers = utils.TestDataContainer()
TEST.pools = utils.TestDataContainer()
TEST.vips = utils.TestDataContainer()
TEST.members = utils.TestDataContainer()
TEST.monitors = utils.TestDataContainer()
TEST.neutron_quotas = utils.TestDataContainer()
TEST.neutron_quota_usages = utils.TestDataContainer()
TEST.ip_availability = utils.TestDataContainer()
TEST.qos_policies = utils.TestDataContainer()
TEST.tp_ports = utils.TestDataContainer()
# Data return by neutronclient.
TEST.api_agents = utils.TestDataContainer()
TEST.api_networks = utils.TestDataContainer()
TEST.api_subnets = utils.TestDataContainer()
TEST.api_subnetpools = utils.TestDataContainer()
TEST.api_ports = utils.TestDataContainer()
TEST.api_trunks = utils.TestDataContainer()
TEST.api_routers = utils.TestDataContainer()
TEST.api_routers_with_routes = utils.TestDataContainer()
TEST.api_floating_ips = utils.TestDataContainer()
TEST.api_security_groups = utils.TestDataContainer()
TEST.api_security_group_rules = utils.TestDataContainer()
TEST.api_pools = utils.TestDataContainer()
TEST.api_vips = utils.TestDataContainer()
TEST.api_members = utils.TestDataContainer()
TEST.api_monitors = utils.TestDataContainer()
TEST.api_extensions = utils.TestDataContainer()
TEST.api_ip_availability = utils.TestDataContainer()
TEST.api_qos_policies = utils.TestDataContainer()
TEST.api_tp_trunks = utils.TestDataContainer()
TEST.api_tp_ports = utils.TestDataContainer()
# 1st network.
network_dict = {'admin_state_up': True,
'id': '82288d84-e0a5-42ac-95be-e6af08727e42',
'name': 'net1',
'status': 'ACTIVE',
'subnets': ['e8abc972-eb0c-41f1-9edd-4bc6e3bcd8c9'],
'tenant_id': '1',
'router:external': False,
'shared': False}
subnet_dict = {'allocation_pools': [{'end': '10.0.0.254',
'start': '10.0.0.2'}],
'dns_nameservers': [],
'host_routes': [],
'cidr': '10.0.0.0/24',
'enable_dhcp': True,
'gateway_ip': '10.0.0.1',
'id': network_dict['subnets'][0],
'ip_version': 4,
'name': 'mysubnet1',
'network_id': network_dict['id'],
'tenant_id': network_dict['tenant_id']}
TEST.api_networks.add(network_dict)
TEST.api_subnets.add(subnet_dict)
network = copy.deepcopy(network_dict)
subnet = neutron.Subnet(subnet_dict)
network['subnets'] = [subnet]
TEST.networks.add(neutron.Network(network))
TEST.subnets.add(subnet)
# Ports on 1st network.
port_dict = {
'admin_state_up': True,
'device_id': 'af75c8e5-a1cc-4567-8d04-44fcd6922890',
'device_owner': 'network:dhcp',
'fixed_ips': [{'ip_address': '10.0.0.3',
'subnet_id': subnet_dict['id']}],
'id': '063cf7f3-ded1-4297-bc4c-31eae876cc91',
'mac_address': 'fa:16:3e:9c:d5:7e',
'name': '',
'network_id': network_dict['id'],
'status': 'ACTIVE',
'tenant_id': network_dict['tenant_id'],
'binding:vnic_type': 'normal',
'binding:host_id': 'host',
'allowed_address_pairs': [
{'ip_address': '174.0.0.201',
'mac_address': 'fa:16:3e:7a:7b:18'}
],
'security_groups': [],
}
TEST.api_ports.add(port_dict)
TEST.ports.add(neutron.Port(port_dict))
port_dict = {
'admin_state_up': True,
'device_id': '1',
'device_owner': 'compute:nova',
'fixed_ips': [{'ip_address': '10.0.0.4',
'subnet_id': subnet_dict['id']}],
'id': '7e6ce62c-7ea2-44f8-b6b4-769af90a8406',
'mac_address': 'fa:16:3e:9d:e6:2f',
'name': '',
'network_id': network_dict['id'],
'status': 'ACTIVE',
'tenant_id': network_dict['tenant_id'],
'binding:vnic_type': 'normal',
'binding:host_id': 'host',
'security_groups': [
# sec_group_1 ID below
'faad7c80-3b62-4440-967c-13808c37131d',
# sec_group_2 ID below
'27a5c9a1-bdbb-48ac-833a-2e4b5f54b31d'
],
}
TEST.api_ports.add(port_dict)
TEST.ports.add(neutron.Port(port_dict))
assoc_port = port_dict
port_dict = {
'admin_state_up': True,
'device_id': '279989f7-54bb-41d9-ba42-0d61f12fda61',
'device_owner': 'network:router_interface',
'fixed_ips': [{'ip_address': '10.0.0.1',
'subnet_id': subnet_dict['id']}],
'id': '9036eedb-e7fa-458e-bc6e-d9d06d9d1bc4',
'mac_address': 'fa:16:3e:9c:d5:7f',
'name': '',
'network_id': network_dict['id'],
'status': 'ACTIVE',
'tenant_id': network_dict['tenant_id'],
'binding:vnic_type': 'normal',
'binding:host_id': 'host',
'security_groups': [],
}
TEST.api_ports.add(port_dict)
TEST.ports.add(neutron.Port(port_dict))
# 2nd network.
network_dict = {'admin_state_up': True,
'id': '72c3ab6c-c80f-4341-9dc5-210fa31ac6c2',
'name': 'net2',
'status': 'ACTIVE',
'subnets': ['3f7c5d79-ee55-47b0-9213-8e669fb03009'],
'tenant_id': '2',
'router:external': False,
'shared': True}
subnet_dict = {'allocation_pools': [{'end': '172.16.88.254',
'start': '172.16.88.2'}],
'dns_nameservers': ['10.56.1.20', '10.56.1.21'],
'host_routes': [{'destination': '192.168.20.0/24',
'nexthop': '172.16.88.253'},
{'destination': '192.168.21.0/24',
'nexthop': '172.16.88.252'}],
'cidr': '172.16.88.0/24',
'enable_dhcp': True,
'gateway_ip': '172.16.88.1',
'id': '3f7c5d79-ee55-47b0-9213-8e669fb03009',
'ip_version': 4,
'name': 'aaaa',
'network_id': network_dict['id'],
'tenant_id': network_dict['tenant_id']}
TEST.api_networks.add(network_dict)
TEST.api_subnets.add(subnet_dict)
network = copy.deepcopy(network_dict)
subnet = neutron.Subnet(subnet_dict)
network['subnets'] = [subnet]
TEST.networks.add(neutron.Network(network))
TEST.subnets.add(subnet)
port_dict = {
'admin_state_up': True,
'device_id': '2',
'device_owner': 'compute:nova',
'fixed_ips': [{'ip_address': '172.16.88.3',
'subnet_id': subnet_dict['id']}],
'id': '1db2cc37-3553-43fa-b7e2-3fc4eb4f9905',
'mac_address': 'fa:16:3e:56:e6:2f',
'name': '',
'network_id': network_dict['id'],
'status': 'ACTIVE',
'tenant_id': network_dict['tenant_id'],
'binding:vnic_type': 'normal',
'binding:host_id': 'host',
'security_groups': [
# sec_group_1 ID below
'faad7c80-3b62-4440-967c-13808c37131d',
],
}
TEST.api_ports.add(port_dict)
TEST.ports.add(neutron.Port(port_dict))
# External network.
network_dict = {'admin_state_up': True,
'id': '9b466b94-213a-4cda-badf-72c102a874da',
'name': 'ext_net',
'status': 'ACTIVE',
'subnets': ['d6bdc71c-7566-4d32-b3ff-36441ce746e8'],
'tenant_id': '3',
'router:external': True,
'shared': False}
subnet_dict = {'allocation_pools': [{'start': '172.24.4.226.',
'end': '172.24.4.238'}],
'dns_nameservers': [],
'host_routes': [],
'cidr': '172.24.4.0/28',
'enable_dhcp': False,
'gateway_ip': '172.24.4.225',
'id': 'd6bdc71c-7566-4d32-b3ff-36441ce746e8',
'ip_version': 4,
'name': 'ext_subnet',
'network_id': network_dict['id'],
'tenant_id': network_dict['tenant_id']}
ext_net = network_dict
TEST.api_networks.add(network_dict)
TEST.api_subnets.add(subnet_dict)
network = copy.deepcopy(network_dict)
subnet = neutron.Subnet(subnet_dict)
network['subnets'] = [subnet]
TEST.networks.add(neutron.Network(network))
TEST.subnets.add(subnet)
# 1st v6 network.
network_dict = {'admin_state_up': True,
'id': '96688ea1-ffa5-78ec-22ca-33aaabfaf775',
'name': 'v6_net1',
'status': 'ACTIVE',
'subnets': ['88ddd443-4377-ab1f-87dd-4bc4a662dbb6'],
'tenant_id': '1',
'router:external': False,
'shared': False}
subnet_dict = {'allocation_pools': [{'end': 'ff09::ff',
'start': 'ff09::02'}],
'dns_nameservers': [],
'host_routes': [],
'cidr': 'ff09::/64',
'enable_dhcp': True,
'gateway_ip': 'ff09::1',
'id': network_dict['subnets'][0],
'ip_version': 6,
'name': 'v6_subnet1',
'network_id': network_dict['id'],
'tenant_id': network_dict['tenant_id'],
'ipv6_modes': 'none/none'}
TEST.api_networks.add(network_dict)
TEST.api_subnets.add(subnet_dict)
network = copy.deepcopy(network_dict)
subnet = neutron.Subnet(subnet_dict)
network['subnets'] = [subnet]
TEST.networks.add(neutron.Network(network))
TEST.subnets.add(subnet)
# 2nd v6 network - slaac.
network_dict = {'admin_state_up': True,
'id': 'c62e4bb3-296a-4cd1-8f6b-aaa7a0092326',
'name': 'v6_net2',
'status': 'ACTIVE',
'subnets': ['5d736a21-0036-4779-8f8b-eed5f98077ec'],
'tenant_id': '1',
'router:external': False,
'shared': False}
subnet_dict = {'allocation_pools': [{'end': 'ff09::ff',
'start': 'ff09::02'}],
'dns_nameservers': [],
'host_routes': [],
'cidr': 'ff09::/64',
'enable_dhcp': True,
'gateway_ip': 'ff09::1',
'id': network_dict['subnets'][0],
'ip_version': 6,
'name': 'v6_subnet2',
'network_id': network_dict['id'],
'tenant_id': network_dict['tenant_id'],
'ipv6_modes': 'slaac/slaac'}
TEST.api_networks.add(network_dict)
TEST.api_subnets.add(subnet_dict)
network = copy.deepcopy(network_dict)
subnet = neutron.Subnet(subnet_dict)
network['subnets'] = [subnet]
TEST.networks.add(neutron.Network(network))
TEST.subnets.add(subnet)
# Set up router data.
port_dict = {
'admin_state_up': True,
'device_id': '7180cede-bcd8-4334-b19f-f7ef2f331f53',
'device_owner': 'network:router_gateway',
'fixed_ips': [{'ip_address': '10.0.0.3',
'subnet_id': subnet_dict['id']}],
'id': '44ec6726-4bdc-48c5-94d4-df8d1fbf613b',
'mac_address': 'fa:16:3e:9c:d5:7e',
'name': '',
'network_id': TEST.networks.get(name="ext_net")['id'],
'status': 'ACTIVE',
'tenant_id': '1',
'binding:vnic_type': 'normal',
'binding:host_id': 'host',
'security_groups': [],
}
TEST.api_ports.add(port_dict)
TEST.ports.add(neutron.Port(port_dict))
trunk_dict = {'status': 'UP',
'sub_ports': [],
'name': 'trunk1',
'admin_state_up': True,
'tenant_id': '1',
'project_id': '1',
'port_id': '895d375c-1447-11e7-a52f-f7f280bbc809',
'id': '94fcb9e8-1447-11e7-bed6-8b8c4ac74491'}
TEST.api_trunks.add(trunk_dict)
TEST.trunks.add(neutron.Trunk(trunk_dict))
router_dict = {'id': '279989f7-54bb-41d9-ba42-0d61f12fda61',
'name': 'router1',
'status': 'ACTIVE',
'admin_state_up': True,
'distributed': True,
'external_gateway_info':
{'network_id': ext_net['id']},
'tenant_id': '1'}
TEST.api_routers.add(router_dict)
TEST.routers.add(neutron.Router(router_dict))
router_dict = {'id': '10e3dc42-1ce1-4d48-87cf-7fc333055d6c',
'name': 'router2',
'status': 'ACTIVE',
'admin_state_up': False,
'distributed': False,
'external_gateway_info': None,
'tenant_id': '1'}
TEST.api_routers.add(router_dict)
TEST.routers.add(neutron.Router(router_dict))
router_dict = {'id': '7180cede-bcd8-4334-b19f-f7ef2f331f53',
'name': 'rulerouter',
'status': 'ACTIVE',
'admin_state_up': True,
'distributed': False,
'external_gateway_info':
{'network_id': ext_net['id']},
'tenant_id': '1',
'router_rules': [{'id': '101',
'action': 'deny',
'source': 'any',
'destination': 'any',
'nexthops': []},
{'id': '102',
'action': 'permit',
'source': 'any',
'destination': '8.8.8.8/32',
'nexthops': ['1.0.0.2', '1.0.0.1']}]}
TEST.api_routers.add(router_dict)
TEST.routers_with_rules.add(neutron.Router(router_dict))
router_dict_with_route = {'id': '725c24c9-061b-416b-b9d4-012392b32fd9',
'name': 'routerouter',
'status': 'ACTIVE',
'admin_state_up': True,
'distributed': False,
'external_gateway_info':
{'network_id': ext_net['id']},
'tenant_id': '1',
'routes': [{'nexthop': '10.0.0.1',
'destination': '172.0.0.0/24'},
{'nexthop': '10.0.0.2',
'destination': '172.1.0.0/24'}]}
TEST.api_routers_with_routes.add(router_dict_with_route)
TEST.routers_with_routes.add(neutron.Router(router_dict_with_route))
# Floating IP.
# Unassociated.
fip_dict = {'tenant_id': '1',
'floating_ip_address': '172.16.88.227',
'floating_network_id': ext_net['id'],
'id': '9012cd70-cfae-4e46-b71e-6a409e9e0063',
'fixed_ip_address': None,
'port_id': None,
'router_id': None}
TEST.api_floating_ips.add(fip_dict)
fip_with_instance = copy.deepcopy(fip_dict)
fip_with_instance.update({'instance_id': None,
'instance_type': None})
TEST.floating_ips.add(neutron.FloatingIp(fip_with_instance))
# Associated (with compute port on 1st network).
fip_dict = {'tenant_id': '1',
'floating_ip_address': '172.16.88.228',
'floating_network_id': ext_net['id'],
'id': 'a97af8f2-3149-4b97-abbd-e49ad19510f7',
'fixed_ip_address': assoc_port['fixed_ips'][0]['ip_address'],
'port_id': assoc_port['id'],
'router_id': router_dict['id']}
TEST.api_floating_ips.add(fip_dict)
fip_with_instance = copy.deepcopy(fip_dict)
fip_with_instance.update({'instance_id': '1',
'instance_type': 'compute'})
TEST.floating_ips.add(neutron.FloatingIp(fip_with_instance))
# Security group.
sec_group_1 = {'tenant_id': '1',
'description': 'default',
'id': 'faad7c80-3b62-4440-967c-13808c37131d',
'name': 'default'}
sec_group_2 = {'tenant_id': '1',
'description': 'NotDefault',
'id': '27a5c9a1-bdbb-48ac-833a-2e4b5f54b31d',
'name': 'other_group'}
sec_group_3 = {'tenant_id': '1',
'description': 'NotDefault',
'id': '443a4d7a-4bd2-4474-9a77-02b35c9f8c95',
'name': 'another_group'}
def add_rule_to_group(secgroup, default_only=True):
rule_egress_ipv4 = {
'id': uuidutils.generate_uuid(),
'direction': u'egress', 'ethertype': u'IPv4',
'port_range_min': None, 'port_range_max': None,
'protocol': None, 'remote_group_id': None,
'remote_ip_prefix': None,
'security_group_id': secgroup['id'],
'tenant_id': secgroup['tenant_id']}
rule_egress_ipv6 = {
'id': uuidutils.generate_uuid(),
'direction': u'egress', 'ethertype': u'IPv6',
'port_range_min': None, 'port_range_max': None,
'protocol': None, 'remote_group_id': None,
'remote_ip_prefix': None,
'security_group_id': secgroup['id'],
'tenant_id': secgroup['tenant_id']}
rule_tcp_80 = {
'id': uuidutils.generate_uuid(),
'direction': u'ingress', 'ethertype': u'IPv4',
'port_range_min': 80, 'port_range_max': 80,
'protocol': u'tcp', 'remote_group_id': None,
'remote_ip_prefix': u'0.0.0.0/0',
'security_group_id': secgroup['id'],
'tenant_id': secgroup['tenant_id']}
rule_icmp = {
'id': uuidutils.generate_uuid(),
'direction': u'ingress', 'ethertype': u'IPv4',
'port_range_min': 5, 'port_range_max': 8,
'protocol': u'icmp', 'remote_group_id': None,
'remote_ip_prefix': u'0.0.0.0/0',
'security_group_id': secgroup['id'],
'tenant_id': secgroup['tenant_id']}
rule_group = {
'id': uuidutils.generate_uuid(),
'direction': u'ingress', 'ethertype': u'IPv4',
'port_range_min': 80, 'port_range_max': 80,
'protocol': u'tcp', 'remote_group_id': sec_group_1['id'],
'remote_ip_prefix': None,
'security_group_id': secgroup['id'],
'tenant_id': secgroup['tenant_id']}
rule_all_tcp = {
'id': uuidutils.generate_uuid(),
'direction': u'egress', 'ethertype': u'IPv4',
'port_range_min': 1, 'port_range_max': 65535,
'protocol': u'tcp', 'remote_group_id': None,
'remote_ip_prefix': u'0.0.0.0/24',
'security_group_id': secgroup['id'],
'tenant_id': secgroup['tenant_id']}
rules = []
if not default_only:
rules += [rule_tcp_80, rule_icmp, rule_group, rule_all_tcp]
rules += [rule_egress_ipv4, rule_egress_ipv6]
secgroup['security_group_rules'] = rules
add_rule_to_group(sec_group_1, default_only=False)
add_rule_to_group(sec_group_2)
add_rule_to_group(sec_group_3)
groups = [sec_group_1, sec_group_2, sec_group_3]
sg_name_dict = dict([(sg['id'], sg['name']) for sg in groups])
for sg in groups:
# Neutron API.
TEST.api_security_groups.add(sg)
for rule in sg['security_group_rules']:
TEST.api_security_group_rules.add(copy.copy(rule))
# OpenStack Dashboard internaly API.
TEST.security_groups.add(
neutron.SecurityGroup(copy.deepcopy(sg), sg_name_dict))
for rule in sg['security_group_rules']:
TEST.security_group_rules.add(
neutron.SecurityGroupRule(copy.copy(rule), sg_name_dict))
# Subnetpools
# 1st subnetpool
subnetpool_dict = {'default_prefixlen': 24,
'default_quota': None,
'id': '419eb314-e244-4088-aed7-851af9d9500d',
'ip_version': 4,
'max_prefixlen': 32,
'min_prefixlen': 12,
'name': 'mysubnetpool1',
'prefixes': ['172.16.0.0/12'],
'shared': False,
'tenant_id': '1'}
TEST.api_subnetpools.add(subnetpool_dict)
subnetpool = neutron.SubnetPool(subnetpool_dict)
TEST.subnetpools.add(subnetpool)
# 2nd subnetpool (v6)
subnetpool_dict = {'default_prefixlen': 64,
'default_quota': None,
'id': 'dcdad289-46f3-4298-bec6-41d91c942efa',
'ip_version': 6,
'max_prefixlen': 64,
'min_prefixlen': 60,
'name': 'mysubnetpool2',
'prefixes': ['2001:db8:42::/48'],
'shared': False,
'tenant_id': '1'}
TEST.api_subnetpools.add(subnetpool_dict)
subnetpool = neutron.SubnetPool(subnetpool_dict)
TEST.subnetpools.add(subnetpool)
# Quotas.
quota_data = {'network': '10',
'subnet': '10',
'port': '50',
'router': '10',
'floatingip': '50',
'security_group': '20',
'security_group_rule': '100',
}
TEST.neutron_quotas.add(base.QuotaSet(quota_data))
# Quota Usages
quota_usage_data = {'networks': {'used': 0, 'quota': 5},
'subnets': {'used': 0, 'quota': 5},
'ports': {'used': 0, 'quota': 5},
'routers': {'used': 0, 'quota': 5},
}
quota_usage = usage_quotas.QuotaUsage()
for k, v in quota_usage_data.items():
quota_usage.add_quota(base.Quota(k, v['quota']))
quota_usage.tally(k, v['used'])
TEST.neutron_quota_usages.add(quota_usage)
# Extensions.
extension_1 = {"name": "security-group",
"alias": "security-group",
"description": "The security groups extension."}
extension_2 = {"name": "Quota management support",
"alias": "quotas",
"description": "Expose functions for quotas management"}
extension_3 = {"name": "Provider network",
"alias": "provider",
"description": "Provider network extension"}
extension_4 = {"name": "Distributed Virtual Router",
"alias": "dvr",
"description":
"Enables configuration of Distributed Virtual Routers."}
extension_5 = {"name": "HA Router extension",
"alias": "l3-ha",
"description": "Add HA capability to routers."}
extension_6 = {"name": "Trunks",
"alias": "trunk",
"description": "Provides support for trunk ports."}
TEST.api_extensions.add(extension_1)
TEST.api_extensions.add(extension_2)
TEST.api_extensions.add(extension_3)
TEST.api_extensions.add(extension_4)
TEST.api_extensions.add(extension_5)
TEST.api_extensions.add(extension_6)
# 1st agent.
agent_dict = {"binary": "neutron-openvswitch-agent",
"description": None,
"admin_state_up": True,
"heartbeat_timestamp": "2013-07-26 06:51:47",
"alive": True,
"id": "c876ff05-f440-443e-808c-1d34cda3e88a",
"topic": "N/A",
"host": "devstack001",
"agent_type": "Open vSwitch agent",
"started_at": "2013-07-26 05:23:28",
"created_at": "2013-07-26 05:23:28",
"configurations": {"devices": 2}}
TEST.api_agents.add(agent_dict)
TEST.agents.add(neutron.Agent(agent_dict))
# 2nd agent.
agent_dict = {"binary": "neutron-dhcp-agent",
"description": None,
"admin_state_up": True,
"heartbeat_timestamp": "2013-07-26 06:51:48",
"alive": True,
"id": "f0d12e3d-1973-41a2-b977-b95693f9a8aa",
"topic": "dhcp_agent",
"host": "devstack001",
"agent_type": "DHCP agent",
"started_at": "2013-07-26 05:23:30",
"created_at": "2013-07-26 05:23:30",
"configurations": {
"subnets": 1,
"use_namespaces": True,
"dhcp_lease_duration": 120,
"dhcp_driver": "neutron.agent.linux.dhcp.Dnsmasq",
"networks": 1,
"ports": 1}}
TEST.api_agents.add(agent_dict)
TEST.agents.add(neutron.Agent(agent_dict))
# Service providers.
provider_1 = {"service_type": "LOADBALANCER",
"name": "haproxy",
"default": True}
TEST.providers.add(provider_1)
# ports on 4th network
port_dict = {
'admin_state_up': True,
'device_id': '9872faaa-b2b2-eeee-9911-21332eedaa77',
'device_owner': 'network:dhcp',
'fixed_ips': [{'ip_address': '11.10.0.3',
'subnet_id':
TEST.subnets.first().id}],
'id': 'a21dcd22-6733-cccc-aa32-22adafaf16a2',
'mac_address': '78:22:ff:1a:ba:23',
'name': 'port5',
'network_id': TEST.networks.first().id,
'status': 'ACTIVE',
'tenant_id': TEST.networks.first().tenant_id,
'binding:vnic_type': 'normal',
'binding:host_id': 'host',
'security_groups': [],
}
TEST.api_ports.add(port_dict)
TEST.ports.add(neutron.Port(port_dict))
availability = {'network_ip_availability': {
'used_ips': 2,
'subnet_ip_availability': [{
'used_ips': 1,
'subnet_id': '2c90f321-9cc7-41b4-a3cf-88110f120a94',
'subnet_name': 'ipv6-public-subnet',
'ip_version': 6,
'cidr': '2001:db8::/64',
'total_ips': 18446744073709551614},
{'used_ips': 1,
'subnet_id': '4d77d5fb-c26c-4ac5-b2ca-fca2f89b0fc1',
'subnet_name': 'public-subnet',
'ip_version': 4,
'cidr': '172.24.4.0/24',
'total_ips': 253}],
'network_id': 'd87d5be5-cfca-486f-8db5-a446330e4513',
'tenant_id': 'd564b2a4fc0544fb89f8a0434dd96863',
'network_name': 'public',
'total_ips': 18446744073709551867}
}
TEST.ip_availability.add(availability)
TEST.api_ip_availability.add(availability)
# qos policies
policy_dict = {'id': 'a21dcd22-7189-cccc-aa32-22adafaf16a7',
'name': 'policy 1',
'tenant_id': '1'}
TEST.api_qos_policies.add(policy_dict)
TEST.qos_policies.add(neutron.QoSPolicy(policy_dict))
policy_dict1 = {'id': 'a21dcd22-7189-ssss-aa32-22adafaf16a7',
'name': 'policy 2',
'tenant_id': '1'}
TEST.api_qos_policies.add(policy_dict1)
TEST.qos_policies.add(neutron.QoSPolicy(policy_dict1))
# TRUNKPORT
#
# The test setup was created by the following command sequence:
# openstack network create tst
# openstack subnet create tstsub --network tst\
# --subnet-range 10.10.16.128/26
# openstack network create tstalt
# openstack subnet create tstaltsub --network tstalt\
# --subnet-range 10.10.17.128/26
# openstack port create --network tst plain
# openstack port create --network tst parent
# openstack port create --network tst child1
# openstack port create --network tstalt child2
# openstack network trunk create --parent-port parent trunk
# openstack network trunk set\
# --subport port=child1,segmentation-type=vlan,segmentation-id=100 trunk
# openstack network trunk set\
# --subport port=child2,segmentation-type=vlan,segmentation-id=200 trunk
# ids/uuids are captured from a live setup.
# This collection holds the test setup.
tdata = {'tenant_id': '19c9123a944644cb9e923497a018d0b7',
'trunk_id': '920625a3-13de-46b4-b6c9-8b35f29b3cfe',
'security_group': '3fd8c007-9093-4aa3-b475-a0c178d4e1e4',
'tag_1': 100,
'tag_2': 200,
'net': {'tst_id': '5a340332-cc92-42aa-8980-15f47c0d0f3d',
'tstalt_id': '0fb41ffd-3933-4da4-8a83-025d328aedf3'},
'subnet': {'tst_id': '0b883baf-5a21-4605-ab56-229a24ec585b',
'tstalt_id': '0e184cf2-97dc-4738-b4b3-1871faf5d685'},
'child1': {'id': '9c151ffb-d7a6-4f15-8eae-d0950999fdfe',
'ip': '10.10.16.140',
'mac': 'fa:16:3e:22:63:6f',
'device_id': '279989f7-54bb-41d9-ba42-0d61f12fda61'},
'child2': {'id': 'cedb145f-c163-4630-98a3-e1990744bdef',
'ip': '10.10.17.137',
'mac': 'fa:16:3e:0d:ca:eb',
'device_id': '9872faaa-b2b2-eeee-9911-21332eedaa77'},
'parent': {'id': '5b27429d-048b-40fa-88f9-8e2c4ff7d28b',
'ip': '10.10.16.141',
'mac': 'fa:16:3e:ab:a8:22',
'device_id': 'af75c8e5-a1cc-4567-8d04-44fcd6922890'},
'plain': {'id': 'bc04da56-d7fc-461e-b95d-a2c66e77ad9a',
'ip': '10.10.16.135',
'mac': 'fa:16:3e:9c:d5:7f',
'device_id': '7180cede-bcd8-4334-b19f-f7ef2f331f53'}}
# network tst
# trunk
tp_trunk_dict = {
'status': 'UP',
'sub_ports': [{'segmentation_type': 'vlan',
'segmentation_id': tdata['tag_1'],
'port_id': tdata['child1']['id']},
{'segmentation_type': u'vlan',
'segmentation_id': tdata['tag_2'],
'port_id': tdata['child2']['id']}],
'name': 'trunk',
'admin_state_up': True,
'tenant_id': tdata['tenant_id'],
'project_id': tdata['tenant_id'],
'port_id': tdata['parent']['id'],
'id': tdata['trunk_id']
}
TEST.api_tp_trunks.add(tp_trunk_dict)
# port parent
parent_port_dict = {
'admin_state_up': True,
'device_id': tdata['parent']['device_id'],
'device_owner': 'compute:nova',
'fixed_ips': [{'ip_address': tdata['parent']['ip'],
'subnet_id': tdata['subnet']['tst_id']}],
'id': tdata['parent']['id'],
'mac_address': tdata['parent']['mac'],
'name': 'parent',
'network_id': tdata['net']['tst_id'],
'status': 'ACTIVE',
'tenant_id': tdata['tenant_id'],
'binding:vnic_type': 'normal',
'binding:host_id': 'host',
'security_groups': [tdata['security_group']],
'trunk_details': {
'sub_ports': [{'segmentation_type': 'vlan',
'mac_address': tdata['child1']['mac'],
'segmentation_id': tdata['tag_1'],
'port_id': tdata['child1']['id']},
{'segmentation_type': 'vlan',
'mac_address': tdata['child2']['mac'],
'segmentation_id': tdata['tag_2'],
'port_id': tdata['child2']['id']}],
'trunk_id': tdata['trunk_id']}
}
TEST.api_tp_ports.add(parent_port_dict)
TEST.tp_ports.add(neutron.PortTrunkParent(parent_port_dict))
# port child1
child1_port_dict = {
'admin_state_up': True,
'device_id': tdata['child1']['device_id'],
'device_owner': 'compute:nova',
'fixed_ips': [{'ip_address': tdata['child1']['ip'],
'subnet_id': tdata['subnet']['tst_id']}],
'id': tdata['child1']['id'],
'mac_address': tdata['child1']['mac'],
'name': 'child1',
'network_id': tdata['net']['tst_id'],
'status': 'ACTIVE',
'tenant_id': tdata['tenant_id'],
'binding:vnic_type': 'normal',
'binding:host_id': 'host',
'security_groups': [tdata['security_group']]
}
TEST.api_tp_ports.add(child1_port_dict)
TEST.tp_ports.add(neutron.PortTrunkSubport(
child1_port_dict,
{'trunk_id': tdata['trunk_id'],
'segmentation_type': 'vlan',
'segmentation_id': tdata['tag_1']}))
# port plain
port_dict = {
'admin_state_up': True,
'device_id': tdata['plain']['device_id'],
'device_owner': 'compute:nova',
'fixed_ips': [{'ip_address': tdata['plain']['ip'],
'subnet_id': tdata['subnet']['tst_id']}],
'id': tdata['plain']['id'],
'mac_address': tdata['plain']['mac'],
'name': 'plain',
'network_id': tdata['net']['tst_id'],
'status': 'ACTIVE',
'tenant_id': tdata['tenant_id'],
'binding:vnic_type': 'normal',
'binding:host_id': 'host',
'security_groups': [tdata['security_group']]
}
TEST.api_tp_ports.add(port_dict)
TEST.tp_ports.add(neutron.Port(port_dict))
# network tstalt
# port child2
child2_port_dict = {
'admin_state_up': True,
'device_id': tdata['child2']['device_id'],
'device_owner': 'compute:nova',
'fixed_ips': [{'ip_address': tdata['child2']['ip'],
'subnet_id': tdata['subnet']['tstalt_id']}],
'id': tdata['child2']['id'],
'mac_address': tdata['child2']['mac'],
'name': 'child2',
'network_id': tdata['net']['tstalt_id'],
'status': 'ACTIVE',
'tenant_id': tdata['tenant_id'],
'binding:vnic_type': 'normal',
'binding:host_id': 'host',
'security_groups': [tdata['security_group']]
}
TEST.api_tp_ports.add(child2_port_dict)
TEST.tp_ports.add(neutron.PortTrunkSubport(
child2_port_dict,
{'trunk_id': tdata['trunk_id'],
'segmentation_type': 'vlan',
'segmentation_id': tdata['tag_2']}))
| BiznetGIO/horizon | openstack_dashboard/test/test_data/neutron_data.py | Python | apache-2.0 | 36,665 |
#!/usr/bin/env python
'''`rethinkdb restore` loads data into a RethinkDB cluster from an archive'''
from __future__ import print_function
import copy, datetime, multiprocessing, optparse, os, shutil, sys, tarfile, tempfile, time, traceback
from . import net, utils_common, _import
usage = "rethinkdb restore FILE [-c HOST:PORT] [--tls-cert FILENAME] [-p] [--password-file FILENAME] [--clients NUM] [--shards NUM_SHARDS] [--replicas NUM_REPLICAS] [--force] [-i (DB | DB.TABLE)]..."
help_epilog = '''
FILE:
the archive file to restore data from;
if FILE is -, use standard input (note that
intermediate files will still be written to
the --temp-dir directory)
EXAMPLES:
rethinkdb restore rdb_dump.tar.gz -c mnemosyne:39500
Import data into a cluster running on host 'mnemosyne' with a client port at 39500 using
the named archive file.
rethinkdb restore rdb_dump.tar.gz -i test
Import data into a local cluster from only the 'test' database in the named archive file.
rethinkdb restore rdb_dump.tar.gz -i test.subscribers -c hades -p
Import data into a cluster running on host 'hades' which requires a password from only
a specific table from the named archive file.
rethinkdb restore rdb_dump.tar.gz --clients 4 --force
Import data to a local cluster from the named archive file using only 4 client connections
and overwriting any existing rows with the same primary key.
'''
def parse_options(argv, prog=None):
parser = utils_common.CommonOptionsParser(usage=usage, epilog=help_epilog, prog=prog)
parser.add_option("-i", "--import", dest="db_tables", metavar="DB|DB.TABLE", default=[], help="limit restore to the given database or table (may be specified multiple times)", action="append", type="db_table")
parser.add_option("--temp-dir", dest="temp_dir", metavar="DIR", default=None, help="directory to use for intermediary results")
parser.add_option("--clients", dest="clients", metavar="CLIENTS", default=8, help="client connections to use (default: 8)", type="pos_int")
parser.add_option("--hard-durability", dest="durability", action="store_const", default="soft", help="use hard durability writes (slower, uses less memory)", const="hard")
parser.add_option("--force", dest="force", action="store_true", default=False, help="import data even if a table already exists")
parser.add_option("--no-secondary-indexes", dest="sindexes", action="store_false", default=True, help="do not create secondary indexes for the restored tables")
parser.add_option("--writers-per-table", dest="writers", metavar="WRITERS", default=multiprocessing.cpu_count(), help=optparse.SUPPRESS_HELP, type="pos_int")
parser.add_option("--batch-size", dest="batch_size", metavar="BATCH", default=_import.default_batch_size, help=optparse.SUPPRESS_HELP, type="pos_int")
# Replication settings
replicationOptionsGroup = optparse.OptionGroup(parser, 'Replication Options')
replicationOptionsGroup.add_option("--shards", dest="create_args", metavar="SHARDS", help="shards to setup on created tables (default: 1)", type="pos_int", action="add_key")
replicationOptionsGroup.add_option("--replicas", dest="create_args", metavar="REPLICAS", help="replicas to setup on created tables (default: 1)", type="pos_int", action="add_key")
parser.add_option_group(replicationOptionsGroup)
options, args = parser.parse_args(argv)
# -- Check validity of arguments
# - archive
if len(args) == 0:
parser.error("Archive to import not specified. Provide an archive file created by rethinkdb-dump.")
elif len(args) != 1:
parser.error("Only one positional argument supported")
options.in_file = args[0]
if options.in_file == '-':
options.in_file = sys.stdin
else:
if not os.path.isfile(options.in_file):
parser.error("Archive file does not exist: %s" % options.in_file)
options.in_file = os.path.realpath(options.in_file)
# - temp_dir
if options.temp_dir:
if not os.path.isdir(options.temp_dir):
parser.error("Temporary directory doesn't exist or is not a directory: %s" % options.temp_dir)
if not os.access(res["temp_dir"], os.W_OK):
parser.error("Temporary directory inaccessible: %s" % options.temp_dir)
# - create_args
if options.create_args is None:
options.create_args = {}
# --
return options
def do_unzip(temp_dir, options):
'''extract the tarfile to the filesystem'''
tables_to_export = set(options.db_tables)
top_level = None
files_ignored = []
files_found = False
tarfileOptions = {
"mode": "r|*",
"fileobj" if hasattr(options.in_file, "read") else "name": options.in_file
}
with tarfile.open(**tarfileOptions) as archive:
for tarinfo in archive:
# skip without comment anything but files
if not tarinfo.isfile():
continue # skip everything but files
# normalize the path
relpath = os.path.relpath(os.path.realpath(tarinfo.name.strip().lstrip(os.sep)))
# skip things that try to jump out of the folder
if relpath.startswith(os.path.pardir):
files_ignored.append(tarinfo.name)
continue
# skip files types other than what we use
if not os.path.splitext(relpath)[1] in (".json", ".csv", ".info"):
files_ignored.append(tarinfo.name)
continue
# ensure this looks like our structure
try:
top, db, file_name = relpath.split(os.sep)
except ValueError:
raise RuntimeError("Error: Archive file has an unexpected directory structure: %s" % tarinfo.name)
if not top_level:
top_level = top
elif top != top_level:
raise RuntimeError("Error: Archive file has an unexpected directory structure (%s vs %s)" % (top, top_level))
# filter out tables we are not looking for
table = os.path.splitext(file_name)
if tables_to_export and not ((db, table) in tables_to_export or (db, None) in tables_to_export):
continue # skip without comment
# write the file out
files_found = True
dest_path = os.path.join(temp_dir, db, file_name)
if not os.path.exists(os.path.dirname(dest_path)):
os.makedirs(os.path.dirname(dest_path))
with open(dest_path, 'wb') as dest:
source = archive.extractfile(tarinfo)
chunk = True
while chunk:
chunk = source.read(1024 * 128)
dest.write(chunk)
source.close()
assert os.path.isfile(os.path.join(temp_dir, db, file_name))
if not files_found:
raise RuntimeError("Error: Archive file had no files")
# - send the location and ignored list back to our caller
return files_ignored
def do_restore(options):
# Create a temporary directory to store the extracted data
temp_dir = tempfile.mkdtemp(dir=options.temp_dir)
try:
# - extract the archive
if not options.quiet:
print("Extracting archive file...")
start_time = time.time()
files_ignored = do_unzip(temp_dir, options)
if not options.quiet:
print(" Done (%d seconds)" % (time.time() - start_time))
# - default _import options
options = copy.copy(options)
options.fields = None
options.directory = temp_dir
# run the import
if not options.quiet:
print("Importing from directory...")
try:
_import.import_directory(options, files_ignored)
except RuntimeError as ex:
if options.debug:
traceback.print_exc()
if str(ex) == "Warnings occurred during import":
raise RuntimeError("Warning: import did not create some secondary indexes.")
else:
errorString = str(ex)
if errorString.startswith('Error: '):
errorString = errorString[len('Error: '):]
raise RuntimeError("Error: import failed: %s" % errorString)
# 'Done' message will be printed by the import script
finally:
shutil.rmtree(temp_dir)
def main(argv=None, prog=None):
if argv is None:
argv = sys.argv[1:]
options = parse_options(argv, prog=prog)
try:
do_restore(options)
except RuntimeError as ex:
print(ex, file=sys.stderr)
return 1
return 0
if __name__ == "__main__":
exit(main())
| bchavez/rethinkdb | drivers/python/rethinkdb/_restore.py | Python | agpl-3.0 | 9,062 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._resolve_private_link_service_id_operations import build_post_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ResolvePrivateLinkServiceIdOperations:
"""ResolvePrivateLinkServiceIdOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2021_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def post(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.PrivateLinkResource",
**kwargs: Any
) -> "_models.PrivateLinkResource":
"""Gets the private link service ID for the specified managed cluster.
Gets the private link service ID for the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters required in order to resolve a private link service ID.
:type parameters: ~azure.mgmt.containerservice.v2021_10_01.models.PrivateLinkResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResource, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_10_01.models.PrivateLinkResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'PrivateLinkResource')
request = build_post_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
content_type=content_type,
json=_json,
template_url=self.post.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
post.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resolvePrivateLinkServiceId'} # type: ignore
| Azure/azure-sdk-for-python | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2021_10_01/aio/operations/_resolve_private_link_service_id_operations.py | Python | mit | 4,868 |
class Book:
"""…"""
def __init__(self, title, authors, publisher, isbn):
"""…"""
self.title = title
# Copy the list in case the caller modifies that list later.
self.authors = authors[:]
self.publisher = publisher
self.ISBN = isbn
def num_authors(self):
"""…"""
return len(self.authors)
if __name__ == '__main__':
pybook = Book("Practical Programming",
["Campbell", "Gries", "Montojo"],
"Pragmatic Bookshelf",
"978-1-93778-545-1")
pybook.num_authors()
| simontakite/sysadmin | pythonscripts/practicalprogramming/oop/book_memmodel.py | Python | gpl-2.0 | 572 |
#!/usr/bin/env python
"""Test POD module"""
import unittest
import os
from os.path import join
from shutil import rmtree
import copy
import numpy as np
from modred import pod, parallel, util
from modred.py2to3 import range
from modred.vectorspace import VectorSpaceArrays, VectorSpaceHandles
from modred.vectors import VecHandlePickle
#@unittest.skip('Testing something else.')
@unittest.skipIf(parallel.is_distributed(), 'Serial only.')
class TestPODArraysFunctions(unittest.TestCase):
def setUp(self):
self.num_states = 30
self.num_vecs = 10
def test_compute_modes(self):
rtol = 1e-10
atol = 1e-12
# Generate weights to test different inner products.
weights_1D = np.random.random(self.num_states)
weights_2D = np.identity(self.num_states, dtype=np.complex)
weights_2D[0, 0] = 2.
weights_2D[2, 1] = 0.3j
weights_2D[1, 2] = weights_2D[2, 1].conj()
# Generate random snapshot data
vecs_array = (
np.random.random((self.num_states, self.num_vecs)) +
1j * np.random.random((self.num_states, self.num_vecs)))
# Test both method of snapshots and direct method
for method in ['snaps', 'direct']:
if method == 'snaps':
compute_POD = pod.compute_POD_arrays_snaps_method
elif method == 'direct':
compute_POD = pod.compute_POD_arrays_direct_method
else:
raise ValueError('Invalid method choice.')
# Loop through different inner product weights
for weights in [None, weights_1D, weights_2D]:
IP = VectorSpaceArrays(
weights=weights).compute_inner_product_array
# Compute POD
POD_res = compute_POD(vecs_array, inner_product_weights=weights)
# For method of snapshots, test correlation array values
if method == 'snaps':
np.testing.assert_allclose(
IP(vecs_array, vecs_array), POD_res.correlation_array,
rtol=rtol, atol=atol)
# Check POD eigenvalues and eigenvectors
np.testing.assert_allclose(
IP(vecs_array, vecs_array).dot(POD_res.eigvecs),
POD_res.eigvecs.dot(np.diag(POD_res.eigvals)),
rtol=rtol, atol=atol)
# Check POD modes
np.testing.assert_allclose(
vecs_array.dot(IP(vecs_array, POD_res.modes)),
POD_res.modes.dot(np.diag(POD_res.eigvals)),
rtol=rtol, atol=atol)
# Check projection coefficients
np.testing.assert_allclose(
POD_res.proj_coeffs, IP(POD_res.modes, vecs_array),
rtol=rtol, atol=atol)
# Choose a random subset of modes to compute, for testing mode
# indices argument. Test both an explicit selection of mode
# indices and a None argument.
mode_indices_trunc = np.unique(np.random.randint(
0, high=np.linalg.matrix_rank(vecs_array),
size=np.linalg.matrix_rank(vecs_array) // 2))
for mode_idxs_arg, mode_idxs_vals in zip(
[None, mode_indices_trunc],
[range(POD_res.eigvals.size), mode_indices_trunc]):
# Compute POD
POD_res_sliced = compute_POD(
vecs_array, mode_indices=mode_idxs_arg,
inner_product_weights=weights)
# Check that if mode indices are passed in, the correct
# modes are returned.
np.testing.assert_allclose(
POD_res_sliced.modes,
POD_res.modes[:, mode_idxs_vals],
rtol=rtol, atol=atol)
#@unittest.skip('Testing something else.')
class TestPODHandles(unittest.TestCase):
def setUp(self):
# Specify output locations
if not os.access('.', os.W_OK):
raise RuntimeError('Cannot write to current directory')
self.test_dir = 'files_POD_DELETE_ME'
if not os.path.isdir(self.test_dir):
parallel.call_from_rank_zero(os.mkdir, self.test_dir)
self.vec_path = join(self.test_dir, 'vec_%03d.pkl')
self.mode_path = join(self.test_dir, 'mode_%03d.pkl')
# Specify data dimensions
self.num_states = 30
self.num_vecs = 10
# Generate random data and write to disk using handles
self.vecs_array = (
parallel.call_and_bcast(
np.random.random, (self.num_states, self.num_vecs)) +
1j * parallel.call_and_bcast(
np.random.random, (self.num_states, self.num_vecs)))
self.vec_handles = [
VecHandlePickle(self.vec_path % i) for i in range(self.num_vecs)]
for idx, hdl in enumerate(self.vec_handles):
hdl.put(self.vecs_array[:, idx])
parallel.barrier()
def tearDown(self):
parallel.barrier()
parallel.call_from_rank_zero(rmtree, self.test_dir, ignore_errors=True)
parallel.barrier()
#@unittest.skip('Testing something else.')
def test_init(self):
"""Test arguments passed to the constructor are assigned properly"""
# Get default data member values
# Set verbosity to false, to avoid printing warnings during tests
def my_load(): pass
def my_save(): pass
def my_IP(): pass
data_members_default = {
'put_array': util.save_array_text, 'get_array':util.load_array_text,
'verbosity': 0, 'eigvecs': None, 'eigvals': None,
'correlation_array': None, 'vec_handles': None, 'vecs': None,
'vec_space': VectorSpaceHandles(inner_product=my_IP, verbosity=0)}
for k,v in util.get_data_members(
pod.PODHandles(inner_product=my_IP, verbosity=0)).items():
self.assertEqual(v, data_members_default[k])
my_POD = pod.PODHandles(inner_product=my_IP, verbosity=0)
data_members_modified = copy.deepcopy(data_members_default)
data_members_modified['vec_space'] = VectorSpaceHandles(
inner_product=my_IP, verbosity=0)
for k,v in util.get_data_members(my_POD).items():
self.assertEqual(v, data_members_modified[k])
my_POD = pod.PODHandles(
inner_product=my_IP, get_array=my_load, verbosity=0)
data_members_modified = copy.deepcopy(data_members_default)
data_members_modified['get_array'] = my_load
for k,v in util.get_data_members(my_POD).items():
self.assertEqual(v, data_members_modified[k])
my_POD = pod.PODHandles(
inner_product=my_IP, put_array=my_save, verbosity=0)
data_members_modified = copy.deepcopy(data_members_default)
data_members_modified['put_array'] = my_save
for k,v in util.get_data_members(my_POD).items():
self.assertEqual(v, data_members_modified[k])
max_vecs_per_node = 500
my_POD = pod.PODHandles(
inner_product=my_IP, max_vecs_per_node=max_vecs_per_node,
verbosity=0)
data_members_modified = copy.deepcopy(data_members_default)
data_members_modified['vec_space'].max_vecs_per_node = max_vecs_per_node
data_members_modified['vec_space'].max_vecs_per_proc = (
max_vecs_per_node *
parallel.get_num_nodes() /
parallel.get_num_procs())
for k,v in util.get_data_members(my_POD).items():
self.assertEqual(v, data_members_modified[k])
#@unittest.skip('Testing something else.')
def test_puts_gets(self):
# Generate some random data
correlation_array_true = parallel.call_and_bcast(
np.random.random, ((self.num_vecs, self.num_vecs)))
eigvals_true = parallel.call_and_bcast(
np.random.random, self.num_vecs)
eigvecs_true = parallel.call_and_bcast(
np.random.random, ((self.num_states, self.num_vecs)))
proj_coeffs_true = parallel.call_and_bcast(
np.random.random, ((self.num_vecs, self.num_vecs)))
# Create a POD object and store the data in it
POD_save = pod.PODHandles(verbosity=0)
POD_save.correlation_array = correlation_array_true
POD_save.eigvals = eigvals_true
POD_save.eigvecs = eigvecs_true
POD_save.proj_coeffs = proj_coeffs_true
# Write the data to disk
eigvecs_path = join(self.test_dir, 'eigvecs.txt')
eigvals_path = join(self.test_dir, 'eigvals.txt')
correlation_array_path = join(self.test_dir, 'correlation.txt')
proj_coeffs_path = join(self.test_dir, 'proj_coeffs.txt')
POD_save.put_decomp(eigvals_path, eigvecs_path)
POD_save.put_correlation_array(correlation_array_path)
POD_save.put_proj_coeffs(proj_coeffs_path)
parallel.barrier()
# Create a new POD object and use it to load the data
POD_load = pod.PODHandles(verbosity=0)
POD_load.get_decomp(eigvals_path, eigvecs_path)
POD_load.get_correlation_array(correlation_array_path)
POD_load.get_proj_coeffs(proj_coeffs_path)
# Check that the loaded data is correct
np.testing.assert_equal(POD_load.eigvals, eigvals_true)
np.testing.assert_equal(POD_load.eigvecs, eigvecs_true)
np.testing.assert_equal(
POD_load.correlation_array, correlation_array_true)
np.testing.assert_equal(POD_load.proj_coeffs, proj_coeffs_true)
#@unittest.skip('Testing something else.')
def test_compute_decomp(self):
"""Test computation of the correlation array and SVD arrays."""
rtol = 1e-10
atol = 1e-12
# Compute POD using modred
POD = pod.PODHandles(inner_product=np.vdot, verbosity=0)
eigvals, eigvecs = POD.compute_decomp(self.vec_handles)
# Test correlation array values by simply recomputing them. Here simply
# take all inner products, rather than assuming a symmetric inner
# product.
np.testing.assert_allclose(
POD.correlation_array,
POD.vec_space.compute_inner_product_array(
self.vec_handles, self.vec_handles),
rtol=rtol, atol=atol)
# Check POD eigenvectors and eigenvalues
np.testing.assert_allclose(
self.vecs_array.conj().T.dot(self.vecs_array.dot(eigvecs)),
eigvecs.dot(np.diag(eigvals)), rtol=rtol, atol=atol)
# Check that returned values match internal values
np.testing.assert_equal(eigvals, POD.eigvals)
np.testing.assert_equal(eigvecs, POD.eigvecs)
#@unittest.skip('Testing something else.')
def test_compute_modes(self):
rtol = 1e-10
atol = 1e-12
# Compute POD using modred. (The properties defining a POD mode require
# manipulations involving the correct decomposition, so we cannot
# isolate the mode computation from the decomposition step.)
POD = pod.PODHandles(inner_product=np.vdot, verbosity=0)
POD.compute_decomp(self.vec_handles)
# Select a subset of modes to compute. Compute at least half
# the modes, and up to all of them. Make sure to use unique
# values. (This may reduce the number of modes computed.)
num_modes = parallel.call_and_bcast(
np.random.randint,
POD.eigvals.size // 2, POD.eigvals.size + 1)
mode_idxs = np.unique(parallel.call_and_bcast(
np.random.randint,
0, POD.eigvals.size, num_modes))
# Create handles for the modes
mode_handles = [VecHandlePickle(self.mode_path % i) for i in mode_idxs]
# Compute modes
POD.compute_modes(mode_idxs, mode_handles, vec_handles=self.vec_handles)
# Test modes
np.testing.assert_allclose(
POD.vec_space.compute_inner_product_array(
mode_handles, self.vec_handles).dot(
POD.vec_space.compute_inner_product_array(
self.vec_handles, mode_handles)),
np.diag(POD.eigvals[mode_idxs]),
rtol=rtol, atol=atol)
#@unittest.skip('Testing something else.')
def test_compute_proj_coeffs(self):
rtol = 1e-10
atol = 1e-12
# Compute POD using modred. (The properties defining a projection onto
# POD modes require manipulations involving the correct decomposition
# and modes, so we cannot isolate the mode computation from those
# computations.)
POD = pod.PODHandles(inner_product=np.vdot, verbosity=0)
POD.compute_decomp(self.vec_handles)
mode_idxs = range(POD.eigvals.size)
mode_handles = [VecHandlePickle(self.mode_path % i) for i in mode_idxs]
POD.compute_modes(mode_idxs, mode_handles, vec_handles=self.vec_handles)
# Compute true projection coefficients by computing the inner products
# between modes and snapshots.
proj_coeffs_true = POD.vec_space.compute_inner_product_array(
mode_handles, self.vec_handles)
# Compute projection coefficients using POD object, which avoids
# actually manipulating handles and computing their inner products,
# instead using elements of the decomposition for a more efficient
# computations.
proj_coeffs = POD.compute_proj_coeffs()
# Test values
np.testing.assert_allclose(
proj_coeffs, proj_coeffs_true, rtol=rtol, atol=atol)
if __name__ == '__main__':
unittest.main()
| belson17/modred | modred/tests/testpod.py | Python | bsd-2-clause | 13,822 |
from django.contrib import admin
from .models import Member,Contact,Technology,Project,Contributor
admin.site.register(Member)
admin.site.register(Contact)
admin.site.register(Technology)
admin.site.register(Project)
admin.site.register(Contributor)
| o-d-i-n/HelloWorld | api/admin.py | Python | mit | 251 |
from django.http import HttpResponse
from django.core.serializers.json import DjangoJSONEncoder
import json
def json_response(content="", status="OK", status_code=200, error=""):
"""
Returns an HTTP response with standard JSON envelope
Keyword arguments:
content -- string with the contents of the response
status -- string with the status of the response
status_code -- HTTP status code
(See http://goo.gl/DKyBHK for status codes)
error -- string with the error message if there is one
"""
wrapper = {
'status': status,
'status_code': status_code,
'output': content,
'error': error
}
response = json.dumps(wrapper, cls=DjangoJSONEncoder, indent=4)
return HttpResponse(response, content_type='application/json', status=status_code) | shreddd/newt-2.0 | common/response.py | Python | bsd-2-clause | 856 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from dataclasses import dataclass, field
from typing import List, Optional, Tuple
import torch
from fairseq import utils
from fairseq.data import (
Dictionary,
TokenBlockDataset,
data_utils,
iterators,
)
from fairseq.dataclass import FairseqDataclass
from fairseq.distributed import utils as dist_utils
from fairseq.tasks import FairseqTask, register_task
from omegaconf import II
logger = logging.getLogger(__name__)
@dataclass
class TruncatedBPTTLMConfig(FairseqDataclass):
data: str = field(default="???", metadata={"help": "path to data directory"})
tokens_per_sample: int = field(
default=1024, metadata={"help": "max number of tokens per sequence"},
)
batch_size: int = II("dataset.batch_size")
# Some models use *max_target_positions* to know how many positional
# embeddings to learn. We use II(...) to make it default to
# *tokens_per_sample*, but in principle there could be more positional
# embeddings than tokens in a single batch. This may also be irrelevant for
# custom model implementations.
max_target_positions: int = II("task.tokens_per_sample")
# these will be populated automatically if not provided
data_parallel_rank: Optional[int] = None
data_parallel_size: Optional[int] = None
@register_task("truncated_bptt_lm", dataclass=TruncatedBPTTLMConfig)
class TruncatedBPTTLMTask(FairseqTask):
def __init__(self, cfg: TruncatedBPTTLMConfig):
super().__init__(cfg)
if cfg.data_parallel_rank is None or cfg.data_parallel_size is None:
if torch.distributed.is_initialized():
cfg.data_parallel_rank = dist_utils.get_data_parallel_rank()
cfg.data_parallel_size = dist_utils.get_data_parallel_world_size()
else:
cfg.data_parallel_rank = 0
cfg.data_parallel_size = 1
# load the dictionary
paths = utils.split_paths(cfg.data)
assert len(paths) > 0
self.dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
logger.info("dictionary: {} types".format(len(self.dictionary)))
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split (e.g., train, valid, test)"""
# support sharded datasets
paths = utils.split_paths(self.cfg.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
# each element of *data* will be a tensorized line from the original
# text dataset, similar to ``open(split_path).readlines()``
data = data_utils.load_indexed_dataset(
split_path, self.dictionary, combine=combine
)
if data is None:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, split_path)
)
# this is similar to ``data.view(-1).split(tokens_per_sample)``
data = TokenBlockDataset(
data,
data.sizes,
block_size=self.cfg.tokens_per_sample,
pad=None, # unused
eos=None, # unused
break_mode="none",
)
self.datasets[split] = TruncatedBPTTDataset(
data=data,
bsz_per_shard=self.cfg.batch_size,
shard_id=self.cfg.data_parallel_rank,
num_shards=self.cfg.data_parallel_size,
)
def dataset(self, split):
return self.datasets[split]
def get_batch_iterator(
self,
dataset,
num_workers=0,
epoch=1,
data_buffer_size=0,
skip_remainder_batch=False,
**kwargs
):
return iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=self._collate_fn,
num_workers=num_workers,
epoch=epoch,
buffer_size=data_buffer_size,
# we don't use the batching functionality from EpochBatchIterator;
# instead every item in *dataset* is a whole batch
batch_sampler=[[i] for i in range(len(dataset))],
disable_shuffling=True,
skip_remainder_batch=skip_remainder_batch,
)
def _collate_fn(self, items: List[List[torch.Tensor]]):
# we don't use fairseq's batching functionality, so we expect a single
# Tensor of type List[torch.Tensor]
assert len(items) == 1
# item will have shape B x T (the last batch may have length < T)
id, item = items[0]
item = data_utils.collate_tokens(item, pad_idx=self.source_dictionary.pad())
B, T = item.size()
# shift item one position over and append a padding token for the target
target = torch.nn.functional.pad(
item[:, 1:], (0, 1, 0, 0), value=self.target_dictionary.pad()
)
# fairseq expects batches to have the following structure
return {
"id": torch.tensor([id] * item.size(0)),
"net_input": {"src_tokens": item,},
"target": target,
"nsentences": item.size(0),
"ntokens": item.numel(),
}
def build_dataset_for_inference(
self, src_tokens: List[torch.Tensor], src_lengths: List[int], **kwargs
) -> torch.utils.data.Dataset:
eos = self.source_dictionary.eos()
dataset = TokenBlockDataset(
src_tokens,
src_lengths,
block_size=None, # ignored for "eos" break mode
pad=self.source_dictionary.pad(),
eos=eos,
break_mode="eos",
)
class Dataset(torch.utils.data.Dataset):
def __getitem__(self, i):
item = dataset[i]
if item[-1] == eos:
# remove eos to support generating with a prefix
item = item[:-1]
return (i, [item])
def __len__(self):
return len(dataset)
return Dataset()
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
with torch.no_grad():
if constraints is not None:
raise NotImplementedError
# SequenceGenerator doesn't use *src_tokens* directly, we need to
# pass the *prefix_tokens* argument instead.
if prefix_tokens is None and sample["net_input"]["src_tokens"].nelement():
prefix_tokens = sample["net_input"]["src_tokens"]
# begin generation with the end-of-sentence token
bos_token = self.source_dictionary.eos()
return generator.generate(
models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token
)
def eval_lm_dataloader(
self,
dataset,
max_tokens: Optional[int] = 36000,
batch_size: Optional[int] = None,
max_positions: Optional[int] = None,
num_shards: int = 1,
shard_id: int = 0,
num_workers: int = 1,
data_buffer_size: int = 10,
context_window: int = 0,
):
if context_window > 0:
raise NotImplementedError(
"Transformer-XL doesn't need --context-window, try "
"--model-overrides '{\"mem_len\":42}' instead "
)
return self.get_batch_iterator(
dataset=dataset,
max_tokens=max_tokens,
max_sentences=batch_size,
max_positions=max_positions,
ignore_invalid_inputs=True,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
data_buffer_size=data_buffer_size,
).next_epoch_itr(shuffle=False)
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
class TruncatedBPTTDataset(torch.utils.data.Dataset):
def __init__(
self,
data: List[torch.Tensor], # ordered list of items
bsz_per_shard, # number of items processed per GPUs per forward
shard_id, # current GPU ID
num_shards, # number of GPUs
):
super().__init__()
self.data = data
def batchify(data, bsz):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).contiguous()
return data
# total number of sequences processed by all GPUs in each forward pass
global_batch_size = bsz_per_shard * num_shards
"""
With a 16 item dataset, bsz_per_shard=2 and num_shards=3,
*indices* might look like:
indices = [[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9],
[10, 11]]
The size of the TruncatedBPTTDataset instance will be 2,
and shard 1 will see items:
[(0, [data[4], data[6]]),
(1, [data[5], data[7]])]
"""
indices = batchify(torch.arange(len(data)), global_batch_size)
assert indices.size(0) == global_batch_size
self.my_indices = indices[
shard_id * bsz_per_shard : (shard_id + 1) * bsz_per_shard
]
assert self.my_indices.size(0) == bsz_per_shard
def __len__(self):
return self.my_indices.size(1)
def __getitem__(self, i) -> Tuple[int, List[torch.Tensor]]:
return (i, [self.data[idx] for idx in self.my_indices[:, i]])
| pytorch/fairseq | examples/truncated_bptt/truncated_bptt_lm_task.py | Python | mit | 9,995 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Helper functions to access Chronicle APIs using OAuth 2.0.
Background information:
https://google-auth.readthedocs.io/en/latest/user-guide.html#service-account-private-key-files
https://developers.google.com/identity/protocols/oauth2#serviceaccount
Details about using the Google-auth library with the Requests library:
https://github.com/googleapis/google-auth-library-python/blob/master/google/auth/transport/requests.py
https://requests.readthedocs.io
"""
import argparse
import pathlib
from typing import Optional, Sequence, Union
from google.auth.transport import requests
from google.oauth2 import service_account
DEFAULT_CREDENTIALS_FILE = pathlib.Path.home() / ".chronicle_credentials.json"
AUTHORIZATION_SCOPES = ["https://www.googleapis.com/auth/chronicle-backstory"]
def initialize_http_session(
credentials_file_path: Optional[Union[str, pathlib.Path]],
scopes: Optional[Sequence[str]] = None) -> requests.AuthorizedSession:
"""Initializes an authorized HTTP session, based on the given credentials.
Args:
credentials_file_path: Absolute or relative path to a JSON file containing
the private OAuth 2.0 credentials of a Google Cloud Platform service
account. Optional - the default is ".chronicle_credentials.json" in the
user's home directory. Keep it secret, keep it safe.
scopes: A list of OAuth scopes (https://oauth.net/2/scope/) that are
associated with the end points to be accessed. The default is the
Chronicle API scope.
Returns:
HTTP session object to send authorized requests and receive responses.
Raises:
OSError: Failed to read the given file, e.g. not found, no read access
(https://docs.python.org/library/exceptions.html#os-exceptions).
ValueError: Invalid file contents.
"""
credentials = service_account.Credentials.from_service_account_file(
str(credentials_file_path or DEFAULT_CREDENTIALS_FILE),
scopes=scopes or AUTHORIZATION_SCOPES)
return requests.AuthorizedSession(credentials)
def add_argument_credentials_file(parser: argparse.ArgumentParser):
"""Adds a shared command-line argument to all the sample modules."""
parser.add_argument(
"-c",
"--credentials_file",
type=str,
help=f"credentials file path (default: '{DEFAULT_CREDENTIALS_FILE}')")
| chronicle/api-samples-python | common/chronicle_auth.py | Python | apache-2.0 | 2,896 |
# _domain.py
#
# openipmi GUI handling for domains
#
# Author: MontaVista Software, Inc.
# Corey Minyard <[email protected]>
# [email protected]
#
# Copyright 2006 MontaVista Software Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import Tix
class PopupSelector:
def __init__(self, handler, val, pd):
self.handler = handler
self.val = val;
self.pd = pd
return
def handle(self):
self.pd.done = True
self.handler(self.val)
return
pass
class PopupDone:
def __init__(self):
self.done = False;
pass
def setdone(self, event):
self.done = True
return
pass
def popup(ui, event, handlers, point=None):
menu = Tix.Menu(ui, tearoff=0);
pd = PopupDone()
for h in handlers:
if (len(h) >= 3):
p = PopupSelector(h[1], h[2], pd)
pass
else:
p = PopupSelector(h[1], None, pd)
pass
menu.add("command", command=p.handle, label=h[0])
pass
if (point == None):
point = event
pass
menu.post(point.x_root, point.y_root)
menu.grab_set_global()
menu.bind("<FocusOut>", pd.setdone)
menu.bind("<ButtonRelease-3>", pd.setdone)
while (not pd.done):
event.widget.tk.dooneevent()
pass
menu.grab_release()
menu.destroy()
return
| ystk/debian-openipmi | swig/python/openipmigui/gui_popup.py | Python | gpl-2.0 | 2,542 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-23 20:41
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('booking', '0012_auto_20170204_1351'),
]
operations = [
migrations.AlterModelOptions(
name='booking',
options={'permissions': (('show_booking', 'Can show booking'),), 'verbose_name': 'Réservation'},
),
]
| eedf/jeito | booking/migrations/0013_auto_20170223_2141.py | Python | mit | 477 |
from ndlib.viz.bokeh.DiffusionViz import DiffusionPlot
import numpy as np
__author__ = 'Giulio Rossetti'
__license__ = "BSD-2-Clause"
__email__ = "[email protected]"
class DiffusionPrevalence(DiffusionPlot):
def __init__(self, model, trends):
"""
:param model: The model object
:param iterations: The computed simulation iterations
"""
super(self.__class__, self).__init__(model, trends)
self.ylabel = "#Delta Nodes"
self.title = "Prevalence"
self.normalized = False
def iteration_series(self, percentile=100):
series = {k: [] for k in self.srev.keys()}
presences = {k: [] for k in self.srev.keys()}
for t in self.trends:
for st in t:
for k in t[st]['status_delta']:
presences[k].append(np.array(t[st]['status_delta'][k]))
for st in presences:
tp = np.percentile(np.array(presences[st]), percentile, axis=0)
bp = np.percentile(np.array(presences[st]), 100 - percentile, axis=0)
av = np.average(np.array(presences[st]), axis=0)
series[st] = (tp, av, bp)
return series | GiulioRossetti/ndlib | ndlib/viz/bokeh/DiffusionPrevalence.py | Python | bsd-2-clause | 1,192 |
from flask import abort, current_app, request
from flask.wrappers import Request
class NotifyRequest(Request):
"""
A custom Request class, implementing extraction of zipkin headers used to trace request through cloudfoundry
as described here: https://docs.cloudfoundry.org/concepts/http-routing.html#zipkin-headers
"""
@property
def request_id(self):
return self.trace_id
@property
def trace_id(self):
"""
The "trace id" (in zipkin terms) assigned to this request, if present (None otherwise)
"""
if not hasattr(self, "_trace_id"):
self._trace_id = self._get_header_value(current_app.config['NOTIFY_TRACE_ID_HEADER'])
return self._trace_id
@property
def span_id(self):
"""
The "span id" (in zipkin terms) set in this request's header, if present (None otherwise)
"""
if not hasattr(self, "_span_id"):
# note how we don't generate an id of our own. not being supplied a span id implies that we are running in
# an environment with no span-id-aware request router, and thus would have no intermediary to prevent the
# propagation of our span id all the way through all our onwards requests much like trace id. and the point
# of span id is to assign identifiers to each individual request.
self._span_id = self._get_header_value(current_app.config['NOTIFY_SPAN_ID_HEADER'])
return self._span_id
@property
def parent_span_id(self):
"""
The "parent span id" (in zipkin terms) set in this request's header, if present (None otherwise)
"""
if not hasattr(self, "_parent_span_id"):
self._parent_span_id = self._get_header_value(current_app.config['NOTIFY_PARENT_SPAN_ID_HEADER'])
return self._parent_span_id
def _get_header_value(self, header_name):
"""
Returns value of the given header
"""
if header_name in self.headers and self.headers[header_name]:
return self.headers[header_name]
return None
class ResponseHeaderMiddleware(object):
def __init__(self, app, trace_id_header, span_id_header):
self.app = app
self.trace_id_header = trace_id_header
self.span_id_header = span_id_header
def __call__(self, environ, start_response):
def rewrite_response_headers(status, headers, exc_info=None):
lower_existing_header_names = frozenset(name.lower() for name, value in headers)
if self.trace_id_header not in lower_existing_header_names:
headers.append((self.trace_id_header, str(request.trace_id)))
if self.span_id_header not in lower_existing_header_names:
headers.append((self.span_id_header, str(request.span_id)))
return start_response(status, headers, exc_info)
return self.app(environ, rewrite_response_headers)
def init_app(app):
app.config.setdefault("NOTIFY_TRACE_ID_HEADER", "X-B3-TraceId")
app.config.setdefault("NOTIFY_SPAN_ID_HEADER", "X-B3-SpanId")
app.config.setdefault("NOTIFY_PARENT_SPAN_ID_HEADER", "X-B3-ParentSpanId")
app.request_class = NotifyRequest
app.wsgi_app = ResponseHeaderMiddleware(
app.wsgi_app,
app.config['NOTIFY_TRACE_ID_HEADER'],
app.config['NOTIFY_SPAN_ID_HEADER'],
)
def check_proxy_header_before_request():
keys = [
current_app.config.get('ROUTE_SECRET_KEY_1'),
current_app.config.get('ROUTE_SECRET_KEY_2'),
]
result, msg = _check_proxy_header_secret(request, keys)
if not result:
if current_app.config.get('CHECK_PROXY_HEADER', False):
current_app.logger.warning(msg)
abort(403)
# We need to return None to continue processing the request
# http://flask.pocoo.org/docs/0.12/api/#flask.Flask.before_request
return None
def _check_proxy_header_secret(request, secrets, header='X-Custom-Forwarder'):
if header not in request.headers:
return False, "Header missing"
header_secret = request.headers.get(header)
if not header_secret:
return False, "Header exists but is empty"
# if there isn't any non-empty secret configured we fail closed
if not any(secrets):
return False, "Secrets are not configured"
for i, secret in enumerate(secrets):
if header_secret == secret:
return True, "Key used: {}".format(i + 1) # add 1 to make it human-compatible
return False, "Header didn't match any keys"
| alphagov/notifications-utils | notifications_utils/request_helper.py | Python | mit | 4,607 |
#
# Copyright 2015-2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class FeatureSearchError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class InvalidDataTypeException(FeatureSearchError):
"""Exception raised when requested data type does not exist"""
class InvalidQueryException(FeatureSearchError):
"""Exception raised when a query for a datatype is invalid"""
class InvalidFieldException(InvalidQueryException):
"""Exception raised when the requested search field does not exist for a datatype"""
class EmptyQueryException(InvalidQueryException):
"""Exception raised when a query contain only empty keywords"""
class BackendException(FeatureSearchError):
"""Exception raised when feature search fails because of backend datasource"""
FOUND_FEATURE_LIMIT = 20
| isb-cgc/ISB-CGC-Webapp | bq_data_access/v2/feature_search/common.py | Python | apache-2.0 | 1,407 |
# -*- coding: utf-8 -*-
from rest_framework import permissions
from rest_framework import exceptions
from addons.base.models import BaseAddonSettings
from osf.models import (
AbstractNode,
Contributor,
DraftRegistration,
Institution,
Node,
NodeRelation,
OSFUser,
PreprintService,
PrivateLink,
)
from osf.utils import permissions as osf_permissions
from website.project.metadata.utils import is_prereg_admin
from api.base.utils import get_user_auth, is_deprecated
class ContributorOrPublic(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
from api.nodes.views import NodeProvider
if isinstance(obj, BaseAddonSettings):
obj = obj.owner
if isinstance(obj, (NodeProvider, PreprintService)):
obj = obj.node
assert isinstance(obj, (AbstractNode, NodeRelation)), 'obj must be an Node, NodeProvider, NodeRelation, PreprintService, or AddonSettings; got {}'.format(obj)
auth = get_user_auth(request)
if request.method in permissions.SAFE_METHODS:
return obj.is_public or obj.can_view(auth)
else:
return obj.can_edit(auth)
class IsPublic(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, AbstractNode), 'obj must be an Node got {}'.format(obj)
auth = get_user_auth(request)
return obj.is_public or obj.can_view(auth)
class IsAdmin(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, AbstractNode), 'obj must be an Node, got {}'.format(obj)
auth = get_user_auth(request)
return obj.has_permission(auth.user, osf_permissions.ADMIN)
class IsAdminOrReviewer(permissions.BasePermission):
"""
Prereg admins can update draft registrations.
"""
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (AbstractNode, DraftRegistration, PrivateLink)), 'obj must be an Node, Draft Registration, or PrivateLink, got {}'.format(obj)
auth = get_user_auth(request)
if request.method != 'DELETE' and is_prereg_admin(auth.user):
return True
return obj.has_permission(auth.user, osf_permissions.ADMIN)
class AdminOrPublic(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (AbstractNode, OSFUser, Institution, BaseAddonSettings, DraftRegistration, PrivateLink)), 'obj must be an Node, User, Institution, Draft Registration, PrivateLink, or AddonSettings; got {}'.format(obj)
auth = get_user_auth(request)
if request.method in permissions.SAFE_METHODS:
return obj.is_public or obj.can_view(auth)
else:
return obj.has_permission(auth.user, osf_permissions.ADMIN)
class ExcludeWithdrawals(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if isinstance(obj, Node):
node = obj
else:
context = request.parser_context['kwargs']
node = AbstractNode.load(context[view.node_lookup_url_kwarg])
if node.is_retracted:
return False
return True
class ContributorDetailPermissions(permissions.BasePermission):
"""Permissions for contributor detail page."""
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (AbstractNode, OSFUser, Contributor)), 'obj must be User, Contributor, or Node, got {}'.format(obj)
auth = get_user_auth(request)
context = request.parser_context['kwargs']
node = AbstractNode.load(context[view.node_lookup_url_kwarg])
user = OSFUser.load(context['user_id'])
if request.method in permissions.SAFE_METHODS:
return node.is_public or node.can_view(auth)
elif request.method == 'DELETE':
return node.has_permission(auth.user, osf_permissions.ADMIN) or auth.user == user
else:
return node.has_permission(auth.user, osf_permissions.ADMIN)
class ContributorOrPublicForPointers(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, (AbstractNode, NodeRelation)), 'obj must be an Node or NodeRelation, got {}'.format(obj)
auth = get_user_auth(request)
parent_node = AbstractNode.load(request.parser_context['kwargs']['node_id'])
pointer_node = NodeRelation.load(request.parser_context['kwargs']['node_link_id']).child
if request.method in permissions.SAFE_METHODS:
has_parent_auth = parent_node.can_view(auth)
has_pointer_auth = pointer_node.can_view(auth)
public = pointer_node.is_public
has_auth = public or (has_parent_auth and has_pointer_auth)
return has_auth
else:
has_auth = parent_node.can_edit(auth)
return has_auth
class ContributorOrPublicForRelationshipPointers(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, dict)
auth = get_user_auth(request)
parent_node = obj['self']
if request.method in permissions.SAFE_METHODS:
return parent_node.can_view(auth)
elif request.method == 'DELETE':
return parent_node.can_edit(auth)
else:
has_parent_auth = parent_node.can_edit(auth)
if not has_parent_auth:
return False
pointer_nodes = []
for pointer in request.data.get('data', []):
node = AbstractNode.load(pointer['id'])
if not node or node.is_collection:
raise exceptions.NotFound(detail='Node with id "{}" was not found'.format(pointer['id']))
pointer_nodes.append(node)
has_pointer_auth = True
for pointer in pointer_nodes:
if not pointer.can_view(auth):
has_pointer_auth = False
break
return has_pointer_auth
class RegistrationAndPermissionCheckForPointers(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
node_link = NodeRelation.load(request.parser_context['kwargs']['node_link_id'])
node = AbstractNode.load(request.parser_context['kwargs'][view.node_lookup_url_kwarg])
auth = get_user_auth(request)
if request.method == 'DELETE'and node.is_registration:
raise exceptions.MethodNotAllowed(method=request.method)
if node.is_collection or node.is_registration:
raise exceptions.NotFound
if node != node_link.parent:
raise exceptions.NotFound
if request.method == 'DELETE' and not node.can_edit(auth):
return False
return True
class WriteOrPublicForRelationshipInstitutions(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, dict)
auth = get_user_auth(request)
node = obj['self']
if request.method in permissions.SAFE_METHODS:
return node.is_public or node.can_view(auth)
else:
return node.has_permission(auth.user, osf_permissions.WRITE)
class ReadOnlyIfRegistration(permissions.BasePermission):
"""Makes PUT and POST forbidden for registrations."""
def has_object_permission(self, request, view, obj):
if not isinstance(obj, AbstractNode):
obj = AbstractNode.load(request.parser_context['kwargs'][view.node_lookup_url_kwarg])
assert isinstance(obj, AbstractNode), 'obj must be an Node'
if obj.is_registration:
return request.method in permissions.SAFE_METHODS
return True
class ShowIfVersion(permissions.BasePermission):
def __init__(self, min_version, max_version, deprecated_message):
super(ShowIfVersion, self).__init__()
self.min_version = min_version
self.max_version = max_version
self.deprecated_message = deprecated_message
def has_object_permission(self, request, view, obj):
if is_deprecated(request.version, self.min_version, self.max_version):
raise exceptions.NotFound(detail=self.deprecated_message)
return True
class NodeLinksShowIfVersion(ShowIfVersion):
def __init__(self):
min_version = '2.0'
max_version = '2.0'
deprecated_message = 'This feature is deprecated as of version 2.1'
super(NodeLinksShowIfVersion, self).__init__(min_version, max_version, deprecated_message)
| icereval/osf.io | api/nodes/permissions.py | Python | apache-2.0 | 8,682 |
"""Config flow for Kodi integration."""
import logging
from pykodi import CannotConnectError, InvalidAuthError, Kodi, get_kodi_connection
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_TIMEOUT,
CONF_USERNAME,
)
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.typing import DiscoveryInfoType, Optional
from .const import (
CONF_WS_PORT,
DEFAULT_PORT,
DEFAULT_SSL,
DEFAULT_TIMEOUT,
DEFAULT_WS_PORT,
)
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
async def validate_http(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect over HTTP."""
host = data[CONF_HOST]
port = data[CONF_PORT]
username = data.get(CONF_USERNAME)
password = data.get(CONF_PASSWORD)
ssl = data.get(CONF_SSL)
session = async_get_clientsession(hass)
_LOGGER.debug("Connecting to %s:%s over HTTP", host, port)
khc = get_kodi_connection(
host, port, None, username, password, ssl, session=session
)
kodi = Kodi(khc)
try:
await kodi.ping()
except CannotConnectError as error:
raise CannotConnect from error
except InvalidAuthError as error:
raise InvalidAuth from error
async def validate_ws(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect over WS."""
ws_port = data.get(CONF_WS_PORT)
if not ws_port:
return
host = data[CONF_HOST]
port = data[CONF_PORT]
username = data.get(CONF_USERNAME)
password = data.get(CONF_PASSWORD)
ssl = data.get(CONF_SSL)
session = async_get_clientsession(hass)
_LOGGER.debug("Connecting to %s:%s over WebSocket", host, ws_port)
kwc = get_kodi_connection(
host, port, ws_port, username, password, ssl, session=session
)
try:
await kwc.connect()
if not kwc.connected:
_LOGGER.warning("Cannot connect to %s:%s over WebSocket", host, ws_port)
raise WSCannotConnect()
kodi = Kodi(kwc)
await kodi.ping()
except CannotConnectError as error:
raise WSCannotConnect from error
class KodiConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Kodi."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize flow."""
self._host: Optional[str] = None
self._port: Optional[int] = DEFAULT_PORT
self._ws_port: Optional[int] = DEFAULT_WS_PORT
self._name: Optional[str] = None
self._username: Optional[str] = None
self._password: Optional[str] = None
self._ssl: Optional[bool] = DEFAULT_SSL
self._discovery_name: Optional[str] = None
async def async_step_zeroconf(self, discovery_info: DiscoveryInfoType):
"""Handle zeroconf discovery."""
self._host = discovery_info["host"]
self._port = int(discovery_info["port"])
self._name = discovery_info["hostname"][: -len(".local.")]
uuid = discovery_info["properties"]["uuid"]
self._discovery_name = discovery_info["name"]
await self.async_set_unique_id(uuid)
self._abort_if_unique_id_configured(
updates={
CONF_HOST: self._host,
CONF_PORT: self._port,
CONF_NAME: self._name,
}
)
try:
await validate_http(self.hass, self._get_data())
await validate_ws(self.hass, self._get_data())
except InvalidAuth:
return await self.async_step_credentials()
except WSCannotConnect:
return await self.async_step_ws_port()
except CannotConnect:
return self.async_abort(reason="cannot_connect")
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
return self.async_abort(reason="unknown")
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
self.context.update({"title_placeholders": {CONF_NAME: self._name}})
return await self.async_step_discovery_confirm()
async def async_step_discovery_confirm(self, user_input=None):
"""Handle user-confirmation of discovered node."""
if user_input is None:
return self.async_show_form(
step_id="discovery_confirm",
description_placeholders={"name": self._name},
)
return self._create_entry()
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
self._host = user_input[CONF_HOST]
self._port = user_input[CONF_PORT]
self._ssl = user_input[CONF_SSL]
try:
await validate_http(self.hass, self._get_data())
await validate_ws(self.hass, self._get_data())
except InvalidAuth:
return await self.async_step_credentials()
except WSCannotConnect:
return await self.async_step_ws_port()
except CannotConnect:
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
return self._create_entry()
return self._show_user_form(errors)
async def async_step_credentials(self, user_input=None):
"""Handle username and password input."""
errors = {}
if user_input is not None:
self._username = user_input.get(CONF_USERNAME)
self._password = user_input.get(CONF_PASSWORD)
try:
await validate_http(self.hass, self._get_data())
await validate_ws(self.hass, self._get_data())
except InvalidAuth:
errors["base"] = "invalid_auth"
except WSCannotConnect:
return await self.async_step_ws_port()
except CannotConnect:
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
return self._create_entry()
return self._show_credentials_form(errors)
async def async_step_ws_port(self, user_input=None):
"""Handle websocket port of discovered node."""
errors = {}
if user_input is not None:
self._ws_port = user_input.get(CONF_WS_PORT)
try:
await validate_ws(self.hass, self._get_data())
except WSCannotConnect:
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
return self._create_entry()
return self._show_ws_port_form(errors)
async def async_step_import(self, data):
"""Handle import from YAML."""
reason = None
try:
await validate_http(self.hass, data)
await validate_ws(self.hass, data)
except InvalidAuth:
_LOGGER.exception("Invalid Kodi credentials")
reason = "invalid_auth"
except CannotConnect:
_LOGGER.exception("Cannot connect to Kodi")
reason = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
reason = "unknown"
else:
return self.async_create_entry(title=data[CONF_NAME], data=data)
return self.async_abort(reason=reason)
@callback
def _show_credentials_form(self, errors=None):
schema = vol.Schema(
{
vol.Optional(
CONF_USERNAME, description={"suggested_value": self._username}
): str,
vol.Optional(
CONF_PASSWORD, description={"suggested_value": self._password}
): str,
}
)
return self.async_show_form(
step_id="credentials", data_schema=schema, errors=errors or {}
)
@callback
def _show_user_form(self, errors=None):
default_port = self._port or DEFAULT_PORT
default_ssl = self._ssl or DEFAULT_SSL
schema = vol.Schema(
{
vol.Required(CONF_HOST, default=self._host): str,
vol.Required(CONF_PORT, default=default_port): int,
vol.Required(CONF_SSL, default=default_ssl): bool,
}
)
return self.async_show_form(
step_id="user", data_schema=schema, errors=errors or {}
)
@callback
def _show_ws_port_form(self, errors=None):
suggestion = self._ws_port or DEFAULT_WS_PORT
schema = vol.Schema(
{
vol.Optional(
CONF_WS_PORT, description={"suggested_value": suggestion}
): int
}
)
return self.async_show_form(
step_id="ws_port", data_schema=schema, errors=errors or {}
)
@callback
def _create_entry(self):
return self.async_create_entry(
title=self._name or self._host,
data=self._get_data(),
)
@callback
def _get_data(self):
data = {
CONF_NAME: self._name,
CONF_HOST: self._host,
CONF_PORT: self._port,
CONF_WS_PORT: self._ws_port,
CONF_USERNAME: self._username,
CONF_PASSWORD: self._password,
CONF_SSL: self._ssl,
CONF_TIMEOUT: DEFAULT_TIMEOUT,
}
return data
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
class WSCannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect to websocket."""
| tchellomello/home-assistant | homeassistant/components/kodi/config_flow.py | Python | apache-2.0 | 10,504 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: sw=4 ts=4 fenc=utf-8
import re
import os
import logging
import StringIO
# URLs are always with / separators
import posixpath as path
import shutil
import time
import cssutils
from cssutils.serialize import CSSSerializer
from pylons import config
from pylons.decorators.cache import beaker_cache
from webhelpers.html.tags import javascript_link as __javascript_link
from webhelpers.html.tags import stylesheet_link as __stylesheet_link
from minwebhelpers.jsmin import JavascriptMinify
__all__ = ['javascript_link', 'stylesheet_link']
log = logging.getLogger(__name__)
beaker_kwargs = dict(key='sources',
expire='never',
type='memory')
def generate_timestamp(timestamp):
if timestamp:
return '.' + str(int(time.time()))
else:
return ''
def combine_sources(sources, ext, fs_root, filename=False, timestamp=False):
"""Use utilities to combine two or more files together.
:param sources: Paths of source files
:param ext: Type of files
:param fs_root: Root of file (normally public dir)
:param filename: Filename of the combined file
:type sources: string
:type ext: js or css
:type fs_root: string
:type filename: string
:returns: List of path to minified source
"""
if len(sources) < 2:
return sources
names = list()
js_buffer = StringIO.StringIO()
base = path.dirname(os.path.commonprefix(sources))
for source in sources:
# get a list of all filenames without extensions
js_file = path.basename(source)
js_file_name = path.splitext(js_file)[0]
names.append(js_file_name)
# build a master file with all contents
full_source = path.join(fs_root, (source).lstrip('/'))
f = open(full_source, 'r')
try:
js_buffer.write(f.read())
js_buffer.write('\n')
finally:
f.close()
# glue a new name and generate path to it
if filename:
names = [filename]
if timestamp:
names.append(generate_timestamp(timestamp)[1:])
fname = '.'.join(names + ['COMBINED', ext])
fpath = path.join(fs_root, (base).lstrip('/'), fname)
# write the combined file
f = open(fpath, 'w')
try:
f.write(js_buffer.getvalue())
finally:
f.close()
return [path.join(base, fname)]
def minify_sources(sources, ext, fs_root='', timestamp=False):
"""Use utilities to minify javascript or css.
:param sources: Paths of source files
:param ext: Type of files
:param fs_root: root of file (normally public dir)
:type sources: string
:type ext: js or css
:type fs_root: string
:returns: List of paths to minified sources
"""
if 'js' in ext:
js_minify = JavascriptMinify()
minified_sources = []
for source in sources:
# generate full path to source
no_ext_source = path.splitext(source)[0]
full_source = path.join(fs_root, (no_ext_source + ext).lstrip('/'))
# generate minified source path
full_source = path.join(fs_root, (source).lstrip('/'))
no_ext_full_source = path.splitext(full_source)[0]
minified = no_ext_full_source + ext
f_minified_source = open(minified, 'w')
try:
# minify js source (read stream is auto-closed inside)
if 'js' in ext:
js_minify.minify(open(full_source, 'r'), f_minified_source)
# minify css source
if 'css' in ext:
sheet = cssutils.parseFile(full_source)
cssutils.setSerializer(CSSUtilsMinificationSerializer())
cssutils.ser.prefs.useMinified()
f_minified_source.write(sheet.cssText)
finally:
f_minified_source.close()
if no_ext_source.endswith('COMBINED'):
minified_sources.append(no_ext_source + ext)
else:
minified_sources.append(no_ext_source + generate_timestamp(timestamp) + ext)
return minified_sources
def base_link(ext, *sources, **options):
"""Base function that glues all logic together.
It parses options and calls :func:`minify_sources` or :func:`combine_sources`
if apropriate.
:param ext: js or css helper
:param sources: paths to your files
:param combined: if True combines sources into one file
:param minified: if True minifies javascript or css files
:param beaker_kwargs: Beaker options to pass to caching decorators
:param combined_filename: filename that will be used when combining files
:param timestamp: append `time.time` timestamp to file, eg. test.js?t=123012343
:param strip_prefix: prefix to be stripped from `sources` URL list
:type ext: string
:type sources: string
:type combined_filename: keyword arg
:type combined: keyword arg
:type minified: keyword arg
:type beaker_kwargs: dict
:type timestamp: bool
:type strip_prefix: string
:returns: HTML source code
.. versionadded:: 0.3.1
`beaker_kwargs` parameter
.. versionadded:: 0.3.2
`combined_filename` parameter
.. versionadded:: 0.3.5
`timestamp` parameter
.. versionadded:: 0.3.6
`strip_prefix` parameter
"""
filename = options.pop('combined_filename', False)
combined = options.pop('combined', False)
minified = options.pop('minified', False)
timestamp = options.pop('timestamp', False)
strip_prefix = options.pop('strip_prefix', False)
beaker_options = options.pop('beaker_kwargs', False)
fs_root = config.get('pylons.paths').get('static_files')
stripped_sources = []
if filename and not combined:
raise ValueError("combined_filename=True specifies filename for"
" combined=True parameter which is not set.")
if not (config.get('debug', False) or options.get('builtins', False)):
if beaker_options:
beaker_kwargs.update(beaker_options)
if strip_prefix:
stripped_sources = [source.replace(strip_prefix, '', 1)
for source in sources
if source.startswith(strip_prefix)]
if stripped_sources:
sources = stripped_sources
if combined:
# use beaker_cache decorator to cache the return value
sources = beaker_cache(**beaker_kwargs)\
(combine_sources)(list(sources), ext, fs_root,
filename, timestamp)
if minified:
# use beaker_cache decorator to cache the return value
sources = beaker_cache(**beaker_kwargs)\
(minify_sources)(list(sources), '.min.' + ext, fs_root, timestamp)
if stripped_sources:
sources = [strip_prefix + source for source in sources]
if 'js' in ext:
return __javascript_link(*sources, **options)
if 'css' in ext:
return __stylesheet_link(*sources, **options)
def javascript_link(*sources, **options):
"""Calls :func:`base_link` with first argument ``js``
All other arguments are passed on.
"""
return base_link('js', *sources, **options)
def stylesheet_link(*sources, **options):
"""Calls :func:`base_link` with first argument ``css``
All other arguments are passed on.
"""
return base_link('css', *sources, **options)
class CSSUtilsMinificationSerializer(CSSSerializer):
DOT_ZERO_REGEX = re.compile(r'(?<=[^\d])0(\.\d+)')
ZERO_PX_REGEX = re.compile(r'([^\d][0])(?:px|em|pt)')
def do_css_CSSStyleDeclaration(self, style, separator=None):
try:
color = style.getPropertyValue('color')
if color and color is not u'':
color = self.change_colors(color)
style.setProperty('color', color)
except:
pass
output = CSSSerializer.do_css_CSSStyleDeclaration(self, style, separator)
output = self.ZERO_PX_REGEX.sub(r'\1', output)
return self.DOT_ZERO_REGEX.sub(r'\1', output)
def change_colors(self, color):
if color.startswith('#') and len(color) == 7:
if color[1]==color[2] and color[3]==color[4] and color[5]==color[6]:
color = '#%s%s%s' % (color[1], color[3], color[5])
return color
| jimf/MinificationWebHelpers | minwebhelpers/__init__.py | Python | bsd-3-clause | 8,370 |
import json
from .auth import tenant
import requests
class RoomClient(object):
def __init__(self, room_id=None):
self.room_id = room_id
self.token = tenant.get_token()
self.base_url = tenant.capabilities_url[0:tenant.capabilities_url.rfind('/')]
def send_notification(self, message):
resp = requests.post("%s/room/%s/notification?auth_token=%s" % (self.base_url, self.room_id, self.token),
headers={'content-type': 'application/json'},
data=json.dumps({"message": message}), timeout=10)
# todo: do better
assert resp.status_code == 204
| congocongo/flask-hipchat-addon | flask_hipchat_addon/clients.py | Python | apache-2.0 | 656 |
from nycodex import db
from nycodex.logging import get_logger
from nycodex.scrape import scrape_dataset, scrape_geojson
from nycodex.scrape.exceptions import SocrataError
BASE = "https://data.cityofnewyork.us/api"
logger = get_logger(__name__)
def main():
session = db.Session()
while True:
try:
with db.queue.next_row_to_scrape() as (trans, dataset_id):
if dataset_id is None:
break
# TODO(alan): Use same transaction connection for this query
dataset_type, names, fields, types = session.query(
db.Dataset.asset_type, db.Dataset.column_names,
db.Dataset.column_sql_names, db.Dataset.column_types
).filter(db.Dataset.id == dataset_id).first() # yapf: disable
log = logger.bind(
dataset_id=dataset_id, dataset_type=dataset_type)
log.info(f"Scraping dataset {dataset_id}")
if dataset_type == db.AssetType.DATASET or names:
scrape_dataset(trans, dataset_id, names, fields, types)
elif dataset_type == db.AssetType.MAP:
scrape_geojson(trans, dataset_id)
else:
log.warning("Illegal dataset_type")
except SocrataError as e:
log.error("Failed to import dataset", exc_info=e)
except Exception as e:
log.critical(
"Failed to import datset with unknown exception", exc_info=e)
if __name__ == "__main__":
main()
| alexander-yu/nycodex | scripts/socrata_raw.py | Python | apache-2.0 | 1,584 |
class Common(Exception):
def __init__(self, status_string='', message=''):
if isinstance(status_string, Exception):
self.status_string = ''
return super(Common, self).__init__(*status_string)
self.status_string = status_string
self.msg = message
def __getitem__(self, val):
return (self.status_string, self.msg)[val]
def __repr__(self):
return self.__str__()
def __str__(self):
return '{} - {}'.format(self.status_string, self.msg)
class BadRequest(Common):
pass
class Conflict(Common):
pass
class Duplicate(Common):
pass
class Forbidden(Common):
pass
class InternalServerError(Common):
pass
class BadGateway(Common):
pass
class LicenseExceeded(Common):
pass
class LicenseInvalid(Common):
pass
class MethodNotAllowed(Common):
pass
class NoContent(Common):
message = ''
class NotFound(Common):
pass
class PaymentRequired(Common):
pass
class Unauthorized(Common):
pass
class Unknown(Common):
pass
class WaitUntilTimeout(Common):
pass
class UnexpectedAWXState(Common):
pass
class IsMigrating(Common):
pass
class ImportExportError(Exception):
pass
| GoogleCloudPlatform/sap-deployment-automation | third_party/github.com/ansible/awx/awxkit/awxkit/exceptions.py | Python | apache-2.0 | 1,263 |
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from jug import TaskGenerator
from time import sleep
@TaskGenerator
def double(x):
sleep(4)
return 2 * x
@TaskGenerator
def add(a, b):
return a + b
@TaskGenerator
def print_final_result(oname, value):
with open(oname, 'w') as output:
output.write("Final result: {0}\n".format(value))
input = 2
y = double(input)
z = double(y)
y2 = double(7)
z2 = double(y2)
print_final_result('output.txt', add(z, z2))
| kpespinosa/BuildingMachineLearningSystemsWithPython | ch12/jugfile.py | Python | mit | 651 |
#!/usr/bin/python
# Python version
print("http://machinelearningmastery.com/machine-learning-in-python-step-by-step/")
import sys
print('Python: {}'.format(sys.version))
# scipy
import scipy
print('scipy: {}'.format(scipy.__version__))
# numpy
import numpy
print('numpy: {}'.format(numpy.__version__))
# matplotlib
import matplotlib
print('matplotlib: {}'.format(matplotlib.__version__))
# pandas
import pandas
print('pandas: {}'.format(pandas.__version__))
# scikit-learn
import sklearn
print('sklearn: {}'.format(sklearn.__version__))
| yvlasov/ConProbIN | try-ml/check_env.py | Python | mit | 539 |
from string import Template
from robocompdsl.templates.common.templatedict import TemplateDict
STATEMACHINE_SLOT_STR = """
@QtCore.Slot()
def sm_${state_name}(self):
print(\"Error: lack sm_${state_name} in Specificworker\")
sys.exit(-1)
"""
class src_genericworker_py(TemplateDict):
def __init__(self, component):
super(src_genericworker_py, self).__init__()
self.component = component
self['statemachine_signals'] = self.statemachine_signals()
self['statemachine_states_creation'] = self.statemachine_states_creation()
self['statemachine_slots_creation'] = self.statemachine_slots_creation()
# TODO: refactor
def statemachine_slots_creation(self):
result = ""
statemachine = self.component.statemachine
if statemachine is not None:
codVirtuals = ""
codcompsubclas = ""
if statemachine['machine']['contents']['states'] is not None:
for state in statemachine['machine']['contents']['states']:
codVirtuals += Template(STATEMACHINE_SLOT_STR).substitute(state_name=state)
if statemachine['machine']['contents']['initialstate'] is not None:
codVirtuals += Template(STATEMACHINE_SLOT_STR).substitute(
state_name=statemachine['machine']['contents']['initialstate'])
if statemachine['machine']['contents']['finalstate'] is not None:
codVirtuals += Template(STATEMACHINE_SLOT_STR).substitute(
state_name=statemachine['machine']['contents']['finalstate'])
if statemachine['substates'] is not None:
for substates in statemachine['substates']:
if substates['contents']['states'] is not None:
for state in substates['contents']['states']:
codVirtuals += Template(STATEMACHINE_SLOT_STR).substitute(state_name=state)
if substates['contents']['initialstate'] is not None:
codVirtuals += Template(STATEMACHINE_SLOT_STR).substitute(
state_name=substates['contents']['initialstate'])
if substates['contents']['finalstate'] is not None:
codVirtuals += Template(STATEMACHINE_SLOT_STR).substitute(
state_name=substates['contents']['finalstate'])
result += "#Slots funtion State Machine\n"
result += codVirtuals + '\n'
result += "#-------------------------\n"
return result
# TODO: Refactooooor
def statemachine_states_creation(self):
result = ""
statemachine = self.component.statemachine
if statemachine is not None:
machine = statemachine['machine']['name']
code_qstates = ""
code_parallel_qstate = ""
code_final_qstate = ""
code_state_machine = "self." + machine + "= QtCore.QStateMachine()"
if statemachine['machine']['contents']['states'] is not None:
for state in statemachine['machine']['contents']['states']:
aux = "self." + state + "_state = QtCore.QState(self." + machine + ")\n"
if statemachine['substates'] is not None:
for substates in statemachine['substates']:
if state == substates['parent']:
if substates['parallel'] == "parallel":
aux = "self." + state + "_state = QtCore.QState(QtCore.QState.ParallelStates, self." + machine + ")\n"
break
if "ParallelStates" in aux:
code_parallel_qstate += aux
else:
code_qstates += aux
if statemachine['machine']['contents']['initialstate'] is not None:
state = statemachine['machine']['contents']['initialstate']
aux = "self." + state + "_state = QtCore.QState(self." + machine + ")\n"
if statemachine['substates'] is not None:
for substates in statemachine['substates']:
if state == substates['parent']:
if substates['parallel'] == "parallel":
aux = "self." + state + "_state = QtCore.QState(QtCore.QState.ParallelStates,self." + machine + ")\n"
break
if "ParallelStates" in aux:
code_parallel_qstate += aux
else:
code_qstates += aux
if statemachine['machine']['contents']['finalstate'] is not None:
state = statemachine['machine']['contents']['finalstate']
code_final_qstate += "self." + state + "_state = QtCore.QFinalState(self." + machine + ")\n"
result += "#State Machine\n"
result += code_state_machine + '\n'
result += code_qstates + '\n'
result += code_final_qstate + '\n'
result += code_parallel_qstate + '\n'
code_state_machine = ""
code_qstates = ""
code_parallel_qstate = ""
code_final_qstate = ""
if statemachine['substates'] is not None:
for substates in statemachine['substates']:
if substates['contents']['states'] is not None:
for state in substates['contents']['states']:
aux = "self." + state + "_state = QtCore.QState(self." + substates[
'parent'] + "_state)\n"
for sub in statemachine['substates']:
if state == sub['parent']:
if sub['parallel'] == "parallel":
aux = "self." + state + "_state = QtCore.QState(QtCore.QState.ParallelStates, self." + \
substates['parent'] + "_state)\n"
break
if "ParallelStates" in aux:
code_parallel_qstate += aux
else:
code_qstates += aux
if substates['contents']['initialstate'] is not None:
aux = "self." + substates['contents'][
'initialstate'] + "_state = QtCore.QState(self." + substates['parent'] + "_state)\n"
for sub in statemachine['substates']:
if state == sub['parent']:
if sub['parallel'] == "parallel":
aux = "self." + state + "_state = QtCore.QState(QtCore.QState.ParallelStates, self." + \
substates['parent'] + "_state)\n"
break
if "ParallelStates" in aux:
code_parallel_qstate += aux
else:
code_qstates += aux
if substates['contents']['finalstate'] is not None:
code_final_qstate += "self." + substates['contents'][
'finalstate'] + "_state = QtCore.QFinalState(self." + substates['parent'] + "_state)\n"
result += code_state_machine + '\n'
result += code_qstates + '\n'
result += code_final_qstate + '\n'
result += code_parallel_qstate + '\n'
code_state_machine = ""
code_qstates = ""
code_parallel_qstate = ""
code_final_qstate = ""
result += "#------------------\n"
code_add_transition = ""
code_add_state = ""
code_connect = ""
code_set_initial_state = ""
if statemachine['machine']['contents']['transitions'] is not None:
for transi in statemachine['machine']['contents']['transitions']:
for dest in transi['dests']:
code_add_transition += "self." + transi['src'] + "_state.addTransition(self.t_" + \
transi['src'] + "_to_" + dest + ", self." + dest + "_state)\n"
if statemachine['substates'] is not None:
for substates in statemachine['substates']:
if substates['contents']['transitions'] is not None:
for transi in substates['contents']['transitions']:
for dest in transi['dests']:
code_add_transition += "self." + transi[
'src'] + "_state.addTransition(self.t_" + transi[
'src'] + "_to_" + dest + ", self." + dest + "_state)\n"
if statemachine['machine']['contents']['states'] is not None:
for state in statemachine['machine']['contents']['states']:
code_connect += "self." + state + "_state.entered.connect(self.sm_" + state + ")\n"
if statemachine['machine']['contents']['initialstate'] is not None:
state = statemachine['machine']['contents']['initialstate']
code_set_initial_state += "self." + statemachine['machine'][
'name'] + ".setInitialState(self." + state + "_state)\n"
code_connect += "self." + state + "_state.entered.connect(self.sm_" + state + ")\n"
if statemachine['machine']['contents']['finalstate'] is not None:
state = statemachine['machine']['contents']['finalstate']
code_connect += "self." + state + "_state.entered.connect(self.sm_" + state + ")\n"
if statemachine['substates'] is not None:
for substates in statemachine['substates']:
if substates['contents']['initialstate'] is not None:
state = substates['contents']['initialstate']
code_set_initial_state += "self." + substates[
'parent'] + "_state.setInitialState(self." + state + "_state)\n"
code_connect += "self." + state + "_state.entered.connect(self.sm_" + state + ")\n"
if substates['contents']['finalstate'] is not None:
state = substates['contents']['finalstate']
code_connect += "self." + state + "_state.entered.connect(self.sm_" + state + ")\n"
if substates['contents']['states'] is not None:
for state in substates['contents']['states']:
code_connect += "self." + state + "_state.entered.connect(self.sm_" + state + ")\n"
if statemachine['machine']['default']:
code_connect += "self.timer.timeout.connect(self.t_compute_to_compute)\n"
result += "#Initialization State machine\n"
result += code_add_transition + '\n'
result += code_add_state + '\n'
result += code_connect + '\n'
result += code_set_initial_state + '\n'
result += "#------------------\n"
return result
def statemachine_signals(self):
result = ""
statemachine = self.component.statemachine
if statemachine is not None:
codsignals = ""
if statemachine['machine']['contents']['transitions'] is not None:
for transi in statemachine['machine']['contents']['transitions']:
for dest in transi['dests']:
codsignals += "t_" + transi['src'] + "_to_" + dest + " = QtCore.Signal()\n"
if statemachine['substates'] is not None:
for substates in statemachine['substates']:
if substates['contents']['transitions'] is not None:
for transi in substates['contents']['transitions']:
for dest in transi['dests']:
codsignals += "t_" + transi['src'] + "_to_" + dest + " = QtCore.Signal()\n"
result += "#Signals for State Machine\n"
result += codsignals + '\n'
result += "#-------------------------\n"
return result
| robocomp/robocomp | tools/cli/robocompdsl/robocompdsl/templates/templatePython/plugins/statemachine/functions/src/genericworker_py.py | Python | gpl-3.0 | 12,581 |
import itertools
import sys
import random
# @include
class Name:
def __init__(self, first_name, last_name):
self.first_name, self.last_name = first_name, last_name
def __eq__(self, other):
return self.first_name == other.first_name
def __lt__(self, other):
return (self.first_name < other.first_name
if self.first_name != other.first_name else
self.last_name < other.last_name)
# @exclude
def __repr__(self):
return '%s %s' % (self.first_name, self.last_name)
# @include
def eliminate_duplicate(A):
A.sort() # Makes identical elements become neighbors.
write_idx = 1
for cand in A[1:]:
if cand != A[write_idx - 1]:
A[write_idx] = cand
write_idx += 1
del A[write_idx:]
# @exclude
def eliminate_duplicate_pythonic(A):
A.sort()
write_idx = 0
for cand, _ in itertools.groupby(A):
A[write_idx] = cand
write_idx += 1
del A[write_idx:]
def small_test():
A = [Name('Foo', 'Bar'), Name('ABC', 'XYZ'), Name('Foo', 'Widget')]
eliminate_duplicate(A)
assert len(A) == 2
def main():
small_test()
for _ in range(1000):
n = int(sys.argv[1]) if len(sys.argv) == 2 else random.randint(0, 1000)
A = [
Name(str(random.randrange(n)), str(random.randrange(n)))
for _ in range(n)
]
A_copy = A.copy()
eliminate_duplicate(A)
eliminate_duplicate_pythonic(A_copy)
assert all(a != b for a, b in zip(A, A[1:]))
assert all(a != b for a, b in zip(A_copy, A_copy[1:]))
assert A == A_copy
if __name__ == '__main__':
main()
| meisamhe/GPLshared | Programming/MPI — AMath 483 583, Spring 2013 1.0 documentation_files/eliminate_duplicate.py | Python | gpl-3.0 | 1,689 |
# Author: Jaakko Leppakangas <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import inspect
from mne.utils import run_tests_if_main
from mne.io import read_raw_nicolet
from mne.io.tests.test_raw import _test_raw_reader
FILE = inspect.getfile(inspect.currentframe())
base_dir = op.join(op.dirname(op.abspath(FILE)), 'data')
fname = op.join(base_dir, 'test_nicolet_raw.data')
def test_data():
"""Test reading raw nicolet files."""
_test_raw_reader(read_raw_nicolet, input_fname=fname, ch_type='eeg',
ecg='auto', eog='auto', emg='auto', misc=['PHO'])
run_tests_if_main()
| olafhauk/mne-python | mne/io/nicolet/tests/test_nicolet.py | Python | bsd-3-clause | 632 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Atividade.comissoes'
db.alter_column('cadastro_atividade', 'comissoes', self.gf('django.db.models.fields.IntegerField')(null=True, max_length=2))
# Changing field 'Extensao.estudantes_pos'
db.alter_column('cadastro_extensao', 'estudantes_pos', self.gf('django.db.models.fields.IntegerField')(null=True, max_length=2))
# Changing field 'Extensao.voluntarios'
db.alter_column('cadastro_extensao', 'voluntarios', self.gf('django.db.models.fields.IntegerField')(null=True, max_length=2))
# Changing field 'Extensao.estudantes_graduacao'
db.alter_column('cadastro_extensao', 'estudantes_graduacao', self.gf('django.db.models.fields.IntegerField')(null=True, max_length=2))
# Changing field 'Extensao.bolsistas_ppq'
db.alter_column('cadastro_extensao', 'bolsistas_ppq', self.gf('django.db.models.fields.IntegerField')(null=True, max_length=2))
# Changing field 'Extensao.bolsistas_pibex'
db.alter_column('cadastro_extensao', 'bolsistas_pibex', self.gf('django.db.models.fields.IntegerField')(null=True, max_length=2))
# Changing field 'Extensao.parceria_inter'
db.alter_column('cadastro_extensao', 'parceria_inter', self.gf('django.db.models.fields.CharField')(null=True, max_length=255))
# Changing field 'Pesquisa.estudantes_pos'
db.alter_column('cadastro_pesquisa', 'estudantes_pos', self.gf('django.db.models.fields.IntegerField')(null=True, max_length=2))
# Changing field 'Pesquisa.voluntarios'
db.alter_column('cadastro_pesquisa', 'voluntarios', self.gf('django.db.models.fields.IntegerField')(null=True, max_length=2))
# Changing field 'Pesquisa.estudantes_graduacao'
db.alter_column('cadastro_pesquisa', 'estudantes_graduacao', self.gf('django.db.models.fields.IntegerField')(null=True, max_length=2))
# Changing field 'Pesquisa.bolsistas_ppq'
db.alter_column('cadastro_pesquisa', 'bolsistas_ppq', self.gf('django.db.models.fields.IntegerField')(null=True, max_length=2))
# Changing field 'Pesquisa.bolsistas_pibic'
db.alter_column('cadastro_pesquisa', 'bolsistas_pibic', self.gf('django.db.models.fields.IntegerField')(null=True, max_length=2))
# Changing field 'Pesquisa.parceria_inter'
db.alter_column('cadastro_pesquisa', 'parceria_inter', self.gf('django.db.models.fields.CharField')(null=True, max_length=255))
def backwards(self, orm):
# Changing field 'Atividade.comissoes'
db.alter_column('cadastro_atividade', 'comissoes', self.gf('django.db.models.fields.IntegerField')(default=0))
# Changing field 'Extensao.estudantes_pos'
db.alter_column('cadastro_extensao', 'estudantes_pos', self.gf('django.db.models.fields.IntegerField')(max_length=2, default=0))
# Changing field 'Extensao.voluntarios'
db.alter_column('cadastro_extensao', 'voluntarios', self.gf('django.db.models.fields.IntegerField')(max_length=2, default=0))
# Changing field 'Extensao.estudantes_graduacao'
db.alter_column('cadastro_extensao', 'estudantes_graduacao', self.gf('django.db.models.fields.IntegerField')(max_length=2, default=0))
# Changing field 'Extensao.bolsistas_ppq'
db.alter_column('cadastro_extensao', 'bolsistas_ppq', self.gf('django.db.models.fields.IntegerField')(max_length=2, default=0))
# Changing field 'Extensao.bolsistas_pibex'
db.alter_column('cadastro_extensao', 'bolsistas_pibex', self.gf('django.db.models.fields.IntegerField')(max_length=2, default=0))
# Changing field 'Extensao.parceria_inter'
db.alter_column('cadastro_extensao', 'parceria_inter', self.gf('django.db.models.fields.CharField')(max_length=255, default=''))
# Changing field 'Pesquisa.estudantes_pos'
db.alter_column('cadastro_pesquisa', 'estudantes_pos', self.gf('django.db.models.fields.IntegerField')(max_length=2, default=0))
# Changing field 'Pesquisa.voluntarios'
db.alter_column('cadastro_pesquisa', 'voluntarios', self.gf('django.db.models.fields.IntegerField')(max_length=2, default=0))
# Changing field 'Pesquisa.estudantes_graduacao'
db.alter_column('cadastro_pesquisa', 'estudantes_graduacao', self.gf('django.db.models.fields.IntegerField')(max_length=2, default=0))
# Changing field 'Pesquisa.bolsistas_ppq'
db.alter_column('cadastro_pesquisa', 'bolsistas_ppq', self.gf('django.db.models.fields.IntegerField')(max_length=2, default=0))
# Changing field 'Pesquisa.bolsistas_pibic'
db.alter_column('cadastro_pesquisa', 'bolsistas_pibic', self.gf('django.db.models.fields.IntegerField')(max_length=2, default=0))
# Changing field 'Pesquisa.parceria_inter'
db.alter_column('cadastro_pesquisa', 'parceria_inter', self.gf('django.db.models.fields.CharField')(max_length=255, default=''))
models = {
'cadastro.atividade': {
'Meta': {'object_name': 'Atividade'},
'afastamento': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cargo': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '100'}),
'comissoes': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'max_length': '2', 'blank': 'True'}),
'disciplinas': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cadastro.Disciplina']", 'symmetrical': 'False'}),
'docente': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cadastro.Docente']"}),
'extensao': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cadastro.Extensao']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pesquisa': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cadastro.Pesquisa']", 'symmetrical': 'False'}),
'semestre': ('django.db.models.fields.CharField', [], {'max_length': '6'})
},
'cadastro.disciplina': {
'Meta': {'object_name': 'Disciplina'},
'cargahoraria': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'codigo': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'estudantes': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multicampia': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'nivel': ('django.db.models.fields.CharField', [], {'max_length': '11'}),
'tipo': ('django.db.models.fields.CharField', [], {'max_length': '11'})
},
'cadastro.docente': {
'Meta': {'object_name': 'Docente'},
'centro': ('django.db.models.fields.CharField', [], {'max_length': '11'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'matricula': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '7'}),
'nome': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'cadastro.extensao': {
'Meta': {'object_name': 'Extensao'},
'area': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'bolsistas_pibex': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'max_length': '2', 'blank': 'True'}),
'bolsistas_ppq': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'max_length': '2', 'blank': 'True'}),
'estudantes_graduacao': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'max_length': '2', 'blank': 'True'}),
'estudantes_pos': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'max_length': '2', 'blank': 'True'}),
'financiador': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parceria': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '255'}),
'parceria_inter': ('django.db.models.fields.CharField', [], {'null': 'True', 'max_length': '255', 'blank': 'True'}),
'voluntarios': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'max_length': '2', 'blank': 'True'})
},
'cadastro.pesquisa': {
'Meta': {'object_name': 'Pesquisa'},
'area': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'bolsistas_pibic': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'max_length': '2', 'blank': 'True'}),
'bolsistas_ppq': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'max_length': '2', 'blank': 'True'}),
'estudantes_graduacao': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'max_length': '2', 'blank': 'True'}),
'estudantes_pos': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'max_length': '2', 'blank': 'True'}),
'financiador': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parceria': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '255'}),
'parceria_inter': ('django.db.models.fields.CharField', [], {'null': 'True', 'max_length': '255', 'blank': 'True'}),
'voluntarios': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'max_length': '2', 'blank': 'True'})
}
}
complete_apps = ['cadastro'] | UFRB/chdocente | cadastro/migrations/0003_auto__chg_field_atividade_comissoes__chg_field_extensao_estudantes_pos.py | Python | agpl-3.0 | 9,991 |
#-*- coding=utf-8 -*-
import urllib
from django.shortcuts import render, get_object_or_404
from kitabu.search.available import Clusters as ClustersSearcher, Subjects as SubjectSearcher, FindPeriod
from kitabu.search.reservations import SingleSubjectManagerReservationSearch
from lanes.models import Lane, LaneReservation
from models import Pool
from forms import PoolReservationsSearchForm, ClusterSearchForm, PeriodSearchForm
cluster_searcher = ClustersSearcher(subject_model=Lane, cluster_model=Pool)
def index(request):
form = ClusterSearchForm(request.GET or None)
if form.is_valid():
results = cluster_searcher.search(**form.cleaned_data)
lane_search_query_string = urllib.urlencode({
'start': form.cleaned_data['start'],
'end': form.cleaned_data['end'],
'required_size': form.cleaned_data['required_size']})
else:
results = None
lane_search_query_string = ''
return render(
request,
'pools/index.html',
{
'form': form,
'available_pools': results,
'lane_search_query_string': lane_search_query_string,
}
)
def show(request, pool_id):
pool = get_object_or_404(Pool, pk=pool_id)
return render(request, 'pools/show.html', {'pool': pool})
def availability(request, pool_id):
pool = get_object_or_404(Pool, pk=pool_id)
form = ClusterSearchForm(request.GET or None)
searcher = SubjectSearcher(Lane, pool.lanes)
available_lanes = searcher.search(**form.cleaned_data) if form.is_valid() else []
return render(
request,
'pools/availability.html',
{
'pool': pool,
'form': form,
'available_lanes': available_lanes
}
)
def reservations(request, pool_id):
pool = get_object_or_404(Pool, pk=pool_id)
Form = PoolReservationsSearchForm
form = Form(request.GET) if request.GET else Form()
searcher = SingleSubjectManagerReservationSearch(
reservation_model=LaneReservation, subject_manager=pool.subjects)
reservations = (
searcher.search(**form.cleaned_data)
if form.is_valid() else
LaneReservation.objects.filter(subject__cluster=pool)
if not request.GET else
[]
)
return render(
request,
'pools/reservations.html',
{
'reservations': reservations,
'pool': pool,
'form': form,
}
)
def available_periods(request, pool_id):
pool = get_object_or_404(Pool, pk=pool_id)
form = PeriodSearchForm(request.GET) if request.GET else PeriodSearchForm()
lanes_and_periods = [
(subject, FindPeriod().search(subject=subject, **form.cleaned_data))
for subject in pool.subjects.all()] if form.is_valid() else []
lanes_and_periods = filter(lambda t: t[1], lanes_and_periods) # select only lanes with non empty periods list
return render(
request,
'pools/periods.html',
{
'lanes_and_periods': lanes_and_periods,
'pool': pool,
'form': form,
}
)
| mbad/kitabu | example_project/pools/views.py | Python | mit | 3,155 |
#!/usr/local/bin/python3
import cgi
print("Content-type: text/html")
from lib import helpers
print(helpers.render('header', {'title': "MVC"}))
print('''
<p>MVC</p>
<p>This is the article for MVC</p>
''')
print(helpers.render('footer'))
| Secretmapper/updevcamp-session-2-dist | form/cgi-bin/lectures/mvc-4/mvc.py | Python | mit | 246 |
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
class TestAtomicOps(TestCase):
def test_atomic_ops(self):
"""
Test that both countdown and checksum are update atomically by having
cowntdown count from 20k to 0 from parallel the workers and updating
the checksum to the value fetched. If operations are trully atomic,
each value from 1 to 20k should be fetched exactly once from the
countdown, and fed exactly once to the checksum, such that at the end
checksum must contain the exact value of sum[i=0..20000](i).
"""
init_net = core.Net('init')
mutex_countdown = init_net.CreateMutex([])
mutex_checksum = init_net.CreateMutex([])
countdown = init_net.ConstantFill([], shape=[], value=20000,
dtype=core.DataType.INT32)
checksum = init_net.ConstantFill(
[], shape=[], value=0, dtype=core.DataType.INT32)
minus_one = init_net.ConstantFill(
[], shape=[], value=-1, dtype=core.DataType.INT32)
steps = []
for i in range(0, 100):
net = core.Net('net:%d' % i)
_, fetched_count = net.AtomicFetchAdd(
[mutex_countdown, countdown, minus_one],
[countdown, 'fetched_count:%d' % i])
net.AtomicFetchAdd(
[mutex_checksum, checksum, fetched_count],
[checksum, 'not_used'])
steps.append(
core.execution_step('worker:%d' % i, net, num_iter=200))
super_step = core.execution_step(
'parent', steps, concurrent_substeps=True)
plan = core.Plan('plan')
plan.AddStep(core.execution_step('init', init_net))
plan.AddStep(super_step)
workspace.RunPlan(plan)
# checksum = sum[i=1..20000](i) = 20000 * 20001 / 2 = 200010000
self.assertEquals(workspace.FetchBlob(checksum), 200010000)
if __name__ == "__main__":
import unittest
unittest.main()
| davinwang/caffe2 | caffe2/python/operator_test/atomic_ops_test.py | Python | apache-2.0 | 2,877 |
import sys
import os
import hashlib
download_url = "https://raw.githubusercontent.com/OneMoreGres/ScreenTranslator/master"
if len(sys.argv) > 1:
download_url = sys.argv[1]
subdir = 'translators'
root = os.path.abspath(os.path.basename(__file__) + '/../../..')
translators_dir = root + '/' + subdir
files = {}
it = os.scandir(translators_dir)
for f in it:
if not f.is_file() or not f.name.endswith('.js'):
continue
name = f.name[:f.name.index('.')]
files[name] = f.name
print(',"translators":{')
comma = ''
for name in sorted(files.keys()):
file_name = files[name]
print(' {}"{}": {{"files":['.format(comma, name))
comma = ','
md5 = hashlib.md5()
size = 0
with open(os.path.join(translators_dir, file_name), 'rb') as f:
data = f.read()
size = len(data)
md5.update(data)
print(' {{"url":"{}/{}", "path":"$translators$/{}", "md5":"{}", "size":{}}}'.format(
download_url, subdir + '/' + file_name, file_name,
md5.hexdigest(), size))
print(' ]}')
print('}')
| OneMoreGres/ScreenTranslator | share/updates/translators.py | Python | mit | 1,055 |
"""
Euler's Method:
Solving Ordinary Differential Eqautions numerically
Theory:
Euler's method is a technique used in estimating the value of the gradient of
a field. Like most numerical approximations, the Euler's method has some element
of Taylor series approximation in it.
Given and ODE of the form
dy/dt + 2*y = 2 - e^(-4*t)
initial conditions:
y(0) = 0; t = 0
Find y(5)?
Using Euler's method:
Redefine the equation so that the derivative is the subject of the formula
and it has to be firt order derivative.
In the case you are dealing with higher order derivative, you define new variables.
Doing this ensures that you are only approximating systems of coupled first ODE
Redefine:
dy/dt = -2*y + 2 - e^(-4*t)
Once redefined, you are ready to use the Euler's method solve the problem
Algorithm:
Requirements:
Initial conditions
Step size
Euler formula:
y_n+1 = y_n + h * f(y_n,t_n)
"""
import math as m
import time
import matplotlib.pyplot as plt
class Euler:
"""
Implementation of the Euler method for 1st order ODE
"""
_fptr = None # funtion pointer to the function that performs the calculation
_step_size = 0.1 # step size
_max_iter = 500 # maximum iteriation before the solver quits
_tol = 0.0001 # tolerance
"""
Object contructor:
fptr: function to evaluate derivative
step_size: time increments
max_iter: maximum iterations to perform
tol: Error tolerance between the actual
"""
def __init__(self, fptr, step_size = 0.1, max_iter = 500, tol = 0.0001):
self._fptr = fptr
self._step_size = step_size
self._max_iter = max_iter
self._tol = tol
"""
mutators
"""
def set_step_size(self,h):
self._step_size = h
def set_tol(self,tol):
self._tol = tol
def set_maxIter(self,mI):
self._max_iter = mI;
def execute(self, y_0, t_0, t):
"""
method to perform the calculation
supports only first order ODE
"""
soln = [y_0]
lIter = 0
fig = None
for i in range(self._max_iter):
"""" Eulers formula """
y_n = y_0 + self._step_size * self._fptr(y_0,t_0)
if(abs(t_0 - t) <= self._tol):
break;
soln.append(y_n)
y_0 = y_n
t_0 += self._step_size
lIter = i+1
""" Not final! Still not what we want!"""
plt.plot([i for i in range(i+2)],soln,"ro")
plt.show(block = False)
time.sleep(2)
plt.close();
return soln[-1],lIter
"""
testing
"""
def equation(y,t):
return -2*y + 2 - m.exp(-4*t)
y_0 = 1
t_0 = 0
t = 5
h = 0.001
mIter = 50000
tol = 0.1
solver = Euler(equation)
solver.set_tol(tol)
soln = solver.execute(y_0, t_0, t)
##for i in soln:
## print(i)
| CleverChuk/Numerical-Analysis | Python/EulerMethod.py | Python | mit | 3,187 |
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
from collections import deque
from functools import partial
import logging
import os
import socket
import sys
from threading import Lock, Thread, Event
import time
import weakref
import sys
from six.moves import range
try:
from weakref import WeakSet
except ImportError:
from cassandra.util import WeakSet # noqa
import asyncore
try:
import ssl
except ImportError:
ssl = None # NOQA
from cassandra.connection import Connection, ConnectionShutdown, NONBLOCKING, Timer, TimerManager
log = logging.getLogger(__name__)
_dispatcher_map = {}
def _cleanup(loop_weakref):
try:
loop = loop_weakref()
except ReferenceError:
return
loop._cleanup()
class WaitableTimer(Timer):
def __init__(self, timeout, callback):
Timer.__init__(self, timeout, callback)
self.callback = callback
self.event = Event()
self.final_exception = None
def finish(self, time_now):
try:
finished = Timer.finish(self, time_now)
if finished:
self.event.set()
return True
return False
except Exception as e:
self.final_exception = e
self.event.set()
return True
def wait(self, timeout=None):
self.event.wait(timeout)
if self.final_exception:
raise self.final_exception
class _PipeWrapper(object):
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
def close(self):
os.close(self.fd)
def getsockopt(self, level, optname, buflen=None):
# act like an unerrored socket for the asyncore error handling
if level == socket.SOL_SOCKET and optname == socket.SO_ERROR and not buflen:
return 0
raise NotImplementedError()
class _AsyncoreDispatcher(asyncore.dispatcher):
def __init__(self, socket):
asyncore.dispatcher.__init__(self, map=_dispatcher_map)
# inject after to avoid base class validation
self.set_socket(socket)
self._notified = False
def writable(self):
return False
def validate(self):
assert not self._notified
self.notify_loop()
assert self._notified
self.loop(0.1)
assert not self._notified
def loop(self, timeout):
asyncore.loop(timeout=timeout, use_poll=True, map=_dispatcher_map, count=1)
class _AsyncorePipeDispatcher(_AsyncoreDispatcher):
def __init__(self):
self.read_fd, self.write_fd = os.pipe()
_AsyncoreDispatcher.__init__(self, _PipeWrapper(self.read_fd))
def writable(self):
return False
def handle_read(self):
while len(os.read(self.read_fd, 4096)) == 4096:
pass
self._notified = False
def notify_loop(self):
if not self._notified:
self._notified = True
os.write(self.write_fd, b'x')
class _AsyncoreUDPDispatcher(_AsyncoreDispatcher):
"""
Experimental alternate dispatcher for avoiding busy wait in the asyncore loop. It is not used by default because
it relies on local port binding.
Port scanning is not implemented, so multiple clients on one host will collide. This address would need to be set per
instance, or this could be specialized to scan until an address is found.
To use::
from cassandra.io.asyncorereactor import _AsyncoreUDPDispatcher, AsyncoreLoop
AsyncoreLoop._loop_dispatch_class = _AsyncoreUDPDispatcher
"""
bind_address = ('localhost', 10000)
def __init__(self):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.bind(self.bind_address)
self._socket.setblocking(0)
_AsyncoreDispatcher.__init__(self, self._socket)
def handle_read(self):
try:
d = self._socket.recvfrom(1)
while d and d[1]:
d = self._socket.recvfrom(1)
except socket.error as e:
pass
self._notified = False
def notify_loop(self):
if not self._notified:
self._notified = True
self._socket.sendto(b'', self.bind_address)
def loop(self, timeout):
asyncore.loop(timeout=timeout, use_poll=False, map=_dispatcher_map, count=1)
class _BusyWaitDispatcher(object):
max_write_latency = 0.001
"""
Timeout pushed down to asyncore select/poll. Dictates the amount of time it will sleep before coming back to check
if anything is writable.
"""
def notify_loop(self):
pass
def loop(self, timeout):
if not _dispatcher_map:
time.sleep(0.005)
count = timeout // self.max_write_latency
asyncore.loop(timeout=self.max_write_latency, use_poll=True, map=_dispatcher_map, count=count)
def validate(self):
pass
def close(self):
pass
class AsyncoreLoop(object):
timer_resolution = 0.1 # used as the max interval to be in the io loop before returning to service timeouts
_loop_dispatch_class = _AsyncorePipeDispatcher if os.name != 'nt' else _BusyWaitDispatcher
def __init__(self):
self._pid = os.getpid()
self._loop_lock = Lock()
self._started = False
self._shutdown = False
self._thread = None
self._timers = TimerManager()
try:
dispatcher = self._loop_dispatch_class()
dispatcher.validate()
log.debug("Validated loop dispatch with %s", self._loop_dispatch_class)
except Exception:
log.exception("Failed validating loop dispatch with %s. Using busy wait execution instead.", self._loop_dispatch_class)
dispatcher.close()
dispatcher = _BusyWaitDispatcher()
self._loop_dispatcher = dispatcher
atexit.register(partial(_cleanup, weakref.ref(self)))
def maybe_start(self):
should_start = False
did_acquire = False
try:
did_acquire = self._loop_lock.acquire(False)
if did_acquire and not self._started:
self._started = True
should_start = True
finally:
if did_acquire:
self._loop_lock.release()
if should_start:
self._thread = Thread(target=self._run_loop, name="cassandra_driver_event_loop")
self._thread.daemon = True
self._thread.start()
def wake_loop(self):
self._loop_dispatcher.notify_loop()
def _run_loop(self):
log.debug("Starting asyncore event loop")
with self._loop_lock:
while not self._shutdown:
try:
self._loop_dispatcher.loop(self.timer_resolution)
self._timers.service_timeouts()
except Exception:
log.debug("Asyncore event loop stopped unexepectedly", exc_info=True)
break
self._started = False
log.debug("Asyncore event loop ended")
def add_timer(self, timer):
self._timers.add_timer(timer)
# This function is called from a different thread than the event loop
# thread, so for this call to be thread safe, we must wake up the loop
# in case it's stuck at a select
self.wake_loop()
def _cleanup(self):
global _dispatcher_map
self._shutdown = True
if not self._thread:
return
log.debug("Waiting for event loop thread to join...")
self._thread.join(timeout=1.0)
if self._thread.is_alive():
log.warning(
"Event loop thread could not be joined, so shutdown may not be clean. "
"Please call Cluster.shutdown() to avoid this.")
log.debug("Event loop thread was joined")
# Ensure all connections are closed and in-flight requests cancelled
for conn in tuple(_dispatcher_map.values()):
if conn is not self._loop_dispatcher:
conn.close()
self._timers.service_timeouts()
# Once all the connections are closed, close the dispatcher
self._loop_dispatcher.close()
log.debug("Dispatchers were closed")
class AsyncoreConnection(Connection, asyncore.dispatcher):
"""
An implementation of :class:`.Connection` that uses the ``asyncore``
module in the Python standard library for its event loop.
"""
_loop = None
_writable = False
_readable = False
@classmethod
def initialize_reactor(cls):
if not cls._loop:
cls._loop = AsyncoreLoop()
else:
current_pid = os.getpid()
if cls._loop._pid != current_pid:
log.debug("Detected fork, clearing and reinitializing reactor state")
cls.handle_fork()
cls._loop = AsyncoreLoop()
@classmethod
def handle_fork(cls):
global _dispatcher_map
_dispatcher_map = {}
if cls._loop:
cls._loop._cleanup()
cls._loop = None
@classmethod
def create_timer(cls, timeout, callback):
timer = Timer(timeout, callback)
cls._loop.add_timer(timer)
return timer
def __init__(self, *args, **kwargs):
Connection.__init__(self, *args, **kwargs)
self.deque = deque()
self.deque_lock = Lock()
self._connect_socket()
# start the event loop if needed
self._loop.maybe_start()
init_handler = WaitableTimer(
timeout=0,
callback=partial(asyncore.dispatcher.__init__,
self, self._socket, _dispatcher_map)
)
self._loop.add_timer(init_handler)
init_handler.wait(kwargs["connect_timeout"])
self._writable = True
self._readable = True
self._send_options_message()
def close(self):
with self.lock:
if self.is_closed:
return
self.is_closed = True
log.debug("Closing connection (%s) to %s", id(self), self.host)
self._writable = False
self._readable = False
# We don't have to wait for this to be closed, we can just schedule it
self.create_timer(0, partial(asyncore.dispatcher.close, self))
log.debug("Closed socket to %s", self.host)
if not self.is_defunct:
self.error_all_requests(
ConnectionShutdown("Connection to %s was closed" % self.host))
#This happens when the connection is shutdown while waiting for the ReadyMessage
if not self.connected_event.is_set():
self.last_error = ConnectionShutdown("Connection to %s was closed" % self.host)
# don't leave in-progress operations hanging
self.connected_event.set()
def handle_error(self):
self.defunct(sys.exc_info()[1])
def handle_close(self):
log.debug("Connection %s closed by server", self)
self.close()
def handle_write(self):
while True:
with self.deque_lock:
try:
next_msg = self.deque.popleft()
except IndexError:
self._writable = False
return
try:
sent = self.send(next_msg)
self._readable = True
except socket.error as err:
if (err.args[0] in NONBLOCKING):
with self.deque_lock:
self.deque.appendleft(next_msg)
else:
self.defunct(err)
return
else:
if sent < len(next_msg):
with self.deque_lock:
self.deque.appendleft(next_msg[sent:])
if sent == 0:
return
def handle_read(self):
try:
while True:
buf = self.recv(self.in_buffer_size)
self._iobuf.write(buf)
if len(buf) < self.in_buffer_size:
break
except socket.error as err:
if ssl and isinstance(err, ssl.SSLError):
if err.args[0] not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
self.defunct(err)
return
elif err.args[0] not in NONBLOCKING:
self.defunct(err)
return
if self._iobuf.tell():
self.process_io_buffer()
if not self._requests and not self.is_control_connection:
self._readable = False
def push(self, data):
sabs = self.out_buffer_size
if len(data) > sabs:
chunks = []
for i in range(0, len(data), sabs):
chunks.append(data[i:i + sabs])
else:
chunks = [data]
with self.deque_lock:
self.deque.extend(chunks)
self._writable = True
self._loop.wake_loop()
def writable(self):
return self._writable
def readable(self):
return self._readable or (self.is_control_connection and not (self.is_defunct or self.is_closed))
| thelastpickle/python-driver | cassandra/io/asyncorereactor.py | Python | apache-2.0 | 13,797 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
import warnings
from agate import Table
from agate.testcase import AgateTestCase
from agate.data_types import *
class TestRename(AgateTestCase):
def setUp(self):
self.rows = (
(1, 4, 'a'),
(2, 3, 'b'),
(None, 2, 'c')
)
self.number_type = Number()
self.text_type = Text()
self.column_names = ['one', 'two', 'three']
self.column_types = [self.number_type, self.number_type, self.text_type]
def test_rename_row_names(self):
table = Table(self.rows, self.column_names, self.column_types)
table2 = table.rename(row_names=['a', 'b', 'c'])
self.assertSequenceEqual(table2.row_names, ['a', 'b', 'c'])
self.assertSequenceEqual(table2.column_names, self.column_names)
self.assertIs(table.row_names, None)
self.assertSequenceEqual(table.column_names, self.column_names)
def test_rename_row_names_dict(self):
table = Table(self.rows, self.column_names, self.column_types, row_names=['a', 'b', 'c'])
table2 = table.rename(row_names={'b': 'd'})
self.assertSequenceEqual(table2.row_names, ['a', 'd', 'c'])
self.assertSequenceEqual(table2.column_names, self.column_names)
self.assertSequenceEqual(table.row_names, ['a', 'b', 'c'])
self.assertSequenceEqual(table.column_names, self.column_names)
def test_rename_column_names(self):
table = Table(self.rows, self.column_names, self.column_types)
table2 = table.rename(column_names=['d', 'e', 'f'])
self.assertIs(table2.row_names, None)
self.assertSequenceEqual(table2.column_names, ['d', 'e', 'f'])
self.assertIs(table.row_names, None)
self.assertSequenceEqual(table.column_names, self.column_names)
def test_rename_column_names_dict(self):
table = Table(self.rows, self.column_names, self.column_types)
table2 = table.rename(column_names={'two': 'second'})
self.assertIs(table2.row_names, None)
self.assertSequenceEqual(table2.column_names, ['one', 'second', 'three'])
self.assertIs(table.row_names, None)
self.assertSequenceEqual(table.column_names, self.column_names)
def test_rename_column_names_renames_row_values(self):
table = Table(self.rows, self.column_names, self.column_types)
new_column_names = ['d', 'e', 'f']
table2 = table.rename(column_names=new_column_names)
self.assertColumnNames(table2, new_column_names)
def test_rename_slugify_columns(self):
strings = ['Test kož', 'test 2', 'test 2']
table = Table(self.rows, self.column_names, self.column_types)
table2 = table.rename(strings, slug_columns=True)
table3 = table.rename(strings, slug_columns=True, separator='.')
self.assertColumnNames(table, ['one', 'two', 'three'])
self.assertColumnNames(table2, ['test_koz', 'test_2', 'test_2_2'])
self.assertColumnNames(table3, ['test.koz', 'test.2', 'test.2.2'])
def test_rename_slugify_rows(self):
strings = ['Test kož', 'test 2', 'test 2']
table = Table(self.rows, self.column_names, self.column_types)
table2 = table.rename(row_names=strings, slug_rows=True)
table3 = table.rename(row_names=strings, slug_rows=True, separator='.')
self.assertIs(table.row_names, None)
self.assertRowNames(table2, ['test_koz', 'test_2', 'test_2_2'])
self.assertRowNames(table3, ['test.koz', 'test.2', 'test.2.2'])
def test_rename_slugify_columns_in_place(self):
column_names = [u'Test kož', 'test 2', 'test 2']
warnings.simplefilter('ignore')
try:
table = Table(self.rows, column_names, self.column_types)
finally:
warnings.resetwarnings()
table2 = table.rename(slug_columns=True)
table3 = table.rename(slug_columns=True, separator='.')
self.assertColumnNames(table, [u'Test kož', 'test 2', 'test 2_2'])
self.assertColumnNames(table2, ['test_koz', 'test_2', 'test_2_2'])
self.assertColumnNames(table3, ['test.koz', 'test.2', 'test.2.2'])
def test_rename_slugify_rows_in_place(self):
strings = ['Test kož', 'test 2', 'test 2']
table = Table(self.rows, self.column_names, self.column_types, row_names=strings)
table2 = table.rename(slug_rows=True)
table3 = table.rename(slug_rows=True, separator='.')
self.assertRowNames(table, ['Test kož', 'test 2', 'test 2'])
self.assertRowNames(table2, ['test_koz', 'test_2', 'test_2_2'])
self.assertRowNames(table3, ['test.koz', 'test.2', 'test.2.2'])
| onyxfish/agate | tests/test_table/test_rename.py | Python | mit | 4,715 |
# -*- coding: utf8 -*-
import logging
class Logger:
__slots__ = ['_logger']
def __init__(self, name):
self._logger = logging.getLogger(name)
def debug(self, msg, **kwargs):
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug(msg.format_map(kwargs))
def info(self, msg, **kwargs):
if self._logger.isEnabledFor(logging.INFO):
self._logger.info(msg.format_map(kwargs))
def warning(self, msg, **kwargs):
if self._logger.isEnabledFor(logging.WARNING):
self._logger.warning(msg.format_map(kwargs))
def error(self, msg, **kwargs):
if self._logger.isEnabledFor(logging.ERROR):
self._logger.error(msg.format_map(kwargs))
def critical(self, msg, **kwargs):
if self._logger.isEnabledFor(logging.CRITICAL):
self._logger.critical(msg.format_map(kwargs)) | nosix/PyCraft | src/pycraft/common/log.py | Python | lgpl-3.0 | 910 |
import sqlite3
from bdkanvasuser import BDKanvasUser
from bdkvalidator import BDKValidator
class BDKSqliteValidator(BDKValidator):
"""docstring for BDKSqliteValidator"""
def __init__(self):
super(BDKSqliteValidator, self).__init__()
def validate_user(self, username, password):
result = None
conn = sqlite3.connect('BDKanvas.db')
c = conn.cursor()
c.execute('''SELECT username, maxsessions FROM users WHERE username = ? AND password = ?''',(username, password))
udata = c.fetchone()
if udata != None:
result = BDKanvasUser(udata[0], udata[1])
conn.close()
return result
| malkiah/bdkanvas | server/bdksqlitevalidator.py | Python | gpl-3.0 | 676 |
# -*- coding: utf-8 -*-
import logging
import pprint
import re
from cssselect import HTMLTranslator
import lxml.html
from lxml.html.clean import Cleaner
logger = logging.getLogger(__name__)
class Parser():
"""Default Parse"""
no_results_selector = []
effective_query_selector = []
num_results_search_selectors = []
page_number_selectors = []
search_types = []
def __init__(self, config={}, html='', query=''):
"""Create new Parser instance and parse all information."""
self.config = config
self.searchtype = self.config.get('search_type', 'normal')
assert self.searchtype in self.search_types, 'search type "{}" is not supported in {}'.format(
self.searchtype,
self.__class__.__name__
)
self.query = query
self.html = html
self.dom = None
self.search_results = {}
self.num_results_for_query = ''
self.num_results = 0
self.effective_query = ''
self.page_number = -1
self.no_results = False
self.related_keywords = {}
# to be set by the implementing sub classes
self.search_engine = ''
# short alias because we use it so extensively
self.css_to_xpath = HTMLTranslator().css_to_xpath
if self.html:
self.parse()
def parse(self, html=None):
"""Public function to start parsing the search engine results.
Args:
html: The raw html data to extract the SERP entries from.
"""
if html:
self.html = html.encode('utf-8').decode('utf-8')
# lets do the actual parsing
self._parse()
# Apply subclass specific behaviour after parsing has happened
# This is needed because different parsers need to clean/modify
# the parsed data uniquely.
self.after_parsing()
def _parse_lxml(self, cleaner=None):
try:
parser = lxml.html.HTMLParser(encoding='utf-8')
if cleaner:
self.dom = cleaner.clean_html(self.dom)
self.dom = lxml.html.document_fromstring(self.html, parser=parser)
self.dom.resolve_base_href()
except Exception as e:
# maybe wrong encoding
logger.error(e)
def _parse(self, cleaner=None):
"""Internal parse the dom according to the provided css selectors.
Raises: Exception
if no css selectors for the searchtype could be found.
"""
self.num_results = 0
self._parse_lxml(cleaner)
# try to parse the number of results.
attr_name = self.searchtype + '_search_selectors'
selector_dict = getattr(self, attr_name, None)
# get the appropriate css selectors for the num_results for the keyword
num_results_selector = getattr(
self,
'num_results_search_selectors',
None
)
self.num_results_for_query = self.first_match(
num_results_selector,
self.dom
)
if not self.num_results_for_query:
logger.debug(''''{}: Cannot parse num_results from serp page
with selectors {}
'''.format(self.__class__.__name__, num_results_selector))
# get the current page we are at.
try:
self.page_number = int(
self.first_match(self.page_number_selectors, self.dom)
)
except ValueError:
self.page_number = -1
# let's see if the search query was shitty (no results for that query)
self.effective_query = self.first_match(
self.effective_query_selector,
self.dom
)
if self.effective_query:
logger.debug('''{}: There was no search hit for the search query.
Search engine used {} instead.
'''.format(self.__class__.__name__, self.effective_query))
else:
self.effective_query = ''
# the element that notifies the user about no results.
self.no_results_text = self.first_match(
self.no_results_selector,
self.dom
)
# get the stuff that is of interest in SERP pages.
if not selector_dict and not isinstance(selector_dict, dict):
raise Exception('''There is no such attribute: {}. No selectors found
'''.format(attr_name))
for result_type, selector_class in selector_dict.items():
self.search_results[result_type] = []
self.related_keywords[result_type] = []
for _, selectors in selector_class.items():
if 'result_container' in selectors and selectors['result_container']:
css = '{container} {result_container}'.format(**selectors)
else:
css = selectors['container']
# logger.info('try {}: '.format(css))
results = self.dom.xpath(
self.css_to_xpath(css)
)
# logger.info('results {}: '.format(results))
to_extract = set(selectors.keys()) - {'container', 'result_container'}
selectors_to_use = {key: selectors[key] for key in to_extract if key in selectors.keys()}
for index, result in enumerate(results):
# Let's add primitive support for CSS3 pseudo selectors
serp_result = {}
# key are for example 'link', 'snippet', 'visible-url', ...
# selector is the selector to grab these items
for key, selector in selectors_to_use.items():
serp_result[key] = self.advanced_css(selector, result)
serp_result['rank'] = index + 1
# only add items that have not None links.
# Avoid duplicates. Detect them by the link.
# If statement below: Lazy evaluation.
# The more probable case first.
if 'link' in serp_result and serp_result['link'] and \
not [e for e in self.search_results[result_type]
if e['link'] == serp_result['link']]:
self.search_results[result_type].append(serp_result)
self.num_results += 1
if 'keyword' in serp_result and serp_result['keyword']:
self.related_keywords[result_type].append(serp_result)
def advanced_css(self, selector, element):
"""Evaluate the :text and ::attr(attr-name) additionally.
Args:
selector: A css selector.
element: The element on which to apply the selector.
Returns:
The targeted element.
"""
value = None
if selector.endswith('::text'):
try:
value = element.xpath(self.css_to_xpath(selector.split('::')[0]))[0].text_content()
except IndexError:
pass
else:
match = re.search(r'::attr\((?P<attr>.*)\)$', selector)
if match:
attr = match.group('attr')
try:
value = element.xpath(self.css_to_xpath(selector.split('::')[0]))[0].get(attr)
except IndexError:
pass
else:
try:
value = element.xpath(self.css_to_xpath(selector))[0].text_content()
except IndexError:
pass
return value
def first_match(self, selectors, element):
"""Get the first match.
Args:
selectors: The selectors to test for a match.
element: The element on which to apply the selectors.
Returns:
The very first match or False if all selectors didn't match anything.
"""
assert isinstance(selectors, list), 'selectors must be of type list!'
for selector in selectors:
if selector:
try:
match = self.advanced_css(selector, element=element)
if match:
return match
except IndexError:
pass
return False
def after_parsing(self):
"""Subclass specific behaviour after parsing happened.
Override in subclass to add search engine specific behaviour.
Commonly used to clean the results.
"""
def __str__(self):
"""Return a nicely formatted overview of the results."""
return pprint.pformat(self.search_results)
@property
def cleaned_html(self):
# Try to parse the provided HTML string using lxml
# strip all unnecessary information to save space
cleaner = Cleaner()
cleaner.scripts = True
cleaner.javascript = True
cleaner.comments = True
cleaner.style = True
self.dom = cleaner.clean_html(self.dom)
assert len(self.dom), 'The html needs to be parsed to get the cleaned html'
return lxml.html.tostring(self.dom)
def iter_serp_items(self):
"""Yields the key and index of any item in the serp results that has a link value"""
for key, value in self.search_results.items():
if isinstance(value, list):
for i, item in enumerate(value):
if isinstance(item, dict) and item['link']:
yield (key, i)
| ecoron/SerpScrap | scrapcore/parser/parser.py | Python | mit | 9,569 |
import json
from django.http import HttpResponse
from catmaid.control.stack import get_stack_info_response
from catmaid.control.dvid.models import DVIDProjectStacks
def stack_info(request, project_id=None, stack_id=None):
""" Returns a dictionary with relevant information for stacks.
Depending on the tile_source_type, get information from database
or from tile server directly
"""
ps = DVIDProjectStacks()
stack = ps.get_stack(project_id, stack_id)
project = ps.get_project(project_id)
overlay_data = {}
broken_slices = {i:1 for i in stack.broken_slices}
result = get_stack_info_response(project, stack, ps, overlay_data, broken_slices)
return HttpResponse(json.dumps(result, sort_keys=True, indent=4), content_type="application/json")
def stacks(request, project_id=None):
""" Returns a response containing the JSON object with menu information
about the project's stacks.
"""
return HttpResponse(json.dumps({}, sort_keys=True, indent=4),
content_type="application/json")
| catsop/CATMAID | django/applications/catmaid/control/dvid/stack.py | Python | gpl-3.0 | 1,049 |
#!/usr/bin/env python
#
# Instrument vmlinux STP, LDP and BLR instructions to protect RA and restrict jumpping
#
# Depends on:
# 1) a modified gcc that
# - outputs 2 nop's before stp x29, x30 instructions
# - outputs 1 nop after ldp x29, x30 instructions
# 2) a kernel built using gcc command-line options to prevent allocation of registers x16, x17, and x18
#
# Copyright (c) 2015 Samsung Electronics Co., Ltd.
# Authors: James Gleeson <[email protected]>
# Wenbo Shen <[email protected]>
# Set to False to have vmlinux instrumented during kernel build.
# OFF = True
OFF = False
# If true, skip instrumenting functions in hyperdrive/resource/debug/skip.txt
# (for debugging).
# SKIP_INSTRUMENTING = True
SKIP_INSTRUMENTING = False
import argparse
import subprocess
from common import pr, log
import common
import os
import fnmatch
import re
import cPickle
import sys
import mmap
import contextlib
import binascii
import pprint
import multiprocessing
import math
import tempfile
import pipes
import StringIO
import textwrap
import bisect
import itertools
# shutil has issues being re-imported during ipython's dreload.
# Just use subprocess instead.
#
# import shutil
import debug
# NOTE: must be kept in sync with macro definitions in init/hyperdrive.S
RRX_DEFAULT = 16
RRK_DEFAULT = 17
RRS_DEFAULT = 18
# Get some useful paths based on location of this file.
sys.path.append(os.path.dirname(__file__))
RKP_CFP = os.path.abspath(os.path.join( os.path.dirname(__file__), "..", ".."))
RESOURCE = os.path.abspath(os.path.join( RKP_CFP, "resource"))
DEBUG = os.path.abspath(os.path.join( RKP_CFP, "resource", "debug"))
SCRIPTS = os.path.abspath(os.path.join( RKP_CFP, "scripts"))
SRCTREE = os.path.abspath(os.path.join( RKP_CFP, ".."))
def bitmask(start_bit, end_bit):
"""
e.g. start_bit = 8, end_bit = 2
0b11111111 (2**(start_bit + 1) - 1)
0b11111100 (2**(start_bit + 1) - 1) ^ (2**end_bit - 1)
"""
return (2**(start_bit + 1) - 1) ^ (2**end_bit - 1);
def _zbits(x):
"""
Return the number of low bits that are zero.
e.g.
>>> _zbits(0b11000000000000000000000000000000)
30
"""
n = 0
while (x & 0x1) != 0x1 and x > 0:
x >>= 1
n += 1
return n
# Use CROSS_COMPILE provided to kernel make command.
devnull = open('/dev/null', 'w')
def which(executable):
return subprocess.Popen(['which', executable], stdout=devnull).wait() == 0
def guess_cross_compile(order):
for prefix in order:
if which("{prefix}gcc".format(**locals())):
return prefix
CROSS_COMPILE_DEFAULT = os.environ.get('CROSS_COMPILE', guess_cross_compile(order=[
"aarch64-linux-android-",
"aarch64-linux-gnu-",
]))
KERNEL_ARCH = 'arm64'
assert CROSS_COMPILE_DEFAULT is not None
def _cross(execname):
name = "{CROSS_COMPILE_DEFAULT}{execname}".format(
CROSS_COMPILE_DEFAULT=CROSS_COMPILE_DEFAULT, execname=execname)
if not which(name):
raise RuntimeError("Couldn't find {execname} on PATH\nPATH = {PATH}".format(
execname=execname, PATH=os.environ['PATH']))
return name
OBJDUMP = _cross("objdump")
READELF = _cross("readelf")
NM = _cross("nm")
#GDB = _cross("gdb")
GDB = None
hex_re = r'(?:[a-f0-9]+)'
virt_addr_re = re.compile(r'^(?P<virt_addr>{hex_re}):\s+'.format(hex_re=hex_re))
BL_OFFSET_MASK = 0x3ffffff
BLR_AND_RET_RN_MASK = 0b1111100000
ADRP_IMMLO_MASK = 0b1100000000000000000000000000000
ADRP_IMMHI_MASK = 0b111111111111111111100000
ADRP_RD_MASK = 0b11111
# _zbits
ADRP_IMMLO_ZBITS = _zbits(ADRP_IMMLO_MASK)
ADRP_IMMHI_ZBITS = _zbits(ADRP_IMMHI_MASK)
ADRP_RD_ZBITS = _zbits(ADRP_RD_MASK)
STP_OPC_MASK = 0b11000000000000000000000000000000
STP_ADDRESSING_MODE_MASK = 0b00111111110000000000000000000000
STP_IMM7_MASK = 0b1111111000000000000000
STP_RT2_MASK = 0b111110000000000
STP_RN_MASK = 0b1111100000
STP_RT_MASK = 0b11111
# http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0489c/CIHGJHED.html
# op{type}{cond} Rt, [Rn {, #offset}] ; immediate offset
# op{type}{cond} Rt, [Rn, #offset]! ; pre-indexed
# op{type}{cond} Rt, [Rn], #offset ; post-indexed
# opD{cond} Rt, Rt2, [Rn {, #offset}] ; immediate offset, doubleword
# opD{cond} Rt, Rt2, [Rn, #offset]! ; pre-indexed, doubleword
STP_PRE_INDEXED = 0b10100110
STP_POST_INDEXED = 0b10100010
STP_IMM_OFFSET = 0b10100100
# opD{cond} Rt, Rt2, [Rn], #offset ; post-indexed, doubleword
# 00 for 32bit, 10 for 64bit
OPC_32 = 0b00
OPC_64 = 0b10
# Bits don't encode preindexed for str_imm_unsigned_preindex_insn
STR_IMM_OFFSET = 'preindexed'
STR_SIGN_UNSIGNED = 0b01
STR_SIZE_32 = 0b10
STR_SIZE_64 = 0b11
ADDIM_OPCODE_BITS = 0b10001
ADDIMM_SF_BIT_64 = 0b1
ADDIMM_SF_BIT_32 = 0b0
ADDIMM_OPCODE_MASK = bitmask(28, 24)
ADDIMM_SHIFT_MASK = bitmask(23, 22)
ADDIMM_IMM_MASK = bitmask(21, 10)
ADDIMM_RN_MASK = bitmask(9, 5)
ADDIMM_RD_MASK = bitmask(4, 0)
ADDIMM_SF_MASK = bitmask(31, 31)
DEFAULT_SKIP_ASM_FILE = os.path.join(RESOURCE, "skip_asm.txt")
DEFAULT_SKIP_BR = os.path.join(RESOURCE, "skip_br.txt")
DEFAULT_SKIP_SAVE_LR_TO_STACK = os.path.join(RESOURCE, "skip_save_lr_to_stack.txt")
DEFAULT_SKIP_STP_FILE = os.path.join(RESOURCE, "skip_stp.txt")
DEFAULT_SKIP_FILE = os.path.join(DEBUG, "skip.txt")
DEFAULT_THREADS = multiprocessing.cpu_count()
#DEFAULT_THREADS = 1
BYTES_PER_INSN = 4
REG_FP = 29
REG_LR = 30
REG_SP = 31
REG_XZR = 31
# x0, ..., x7
NUM_ARG_REGS = 8
def skip_func(func, skip, skip_asm):
# Don't instrument the springboard itself.
# Don't instrument functions in asm files we skip.
# Don't instrument certain functions (for debugging).
return func.startswith('jopp_springboard_') or \
func in skip_asm or \
func in skip
def parse_last_insn(objdump, i, n):
return [objdump.parse_insn(j) if objdump.is_insn(j) else None for j in xrange(i-n, i)]
def instrument(objdump, func=None, skip=set([]), skip_stp=set([]), skip_asm=set([]), skip_blr=set([]), threads=1):
"""
Replace:
BLR rX
With:
BL jopp_springboard_blr_rX
Replace
<assembled_c_function>:
nop
stp x29, x30, [sp,#-<frame>]!
(insns)
mov x29, sp
With:
<assembled_c_function>:
eor RRX, x30, RRK
stp x29, RRX, [sp,#-<frame>]!
mov x29, sp
(insns)
Replace
<assembled_c_function>:
nop
stp x29, x30, [sp,#<offset>]
(insns)
add x29, sp, #<offset>
With:
<assembled_c_function>:
eor RRX, x30, RRK
stp x29, RRX, [sp,#<offset>]
add x29, sp, offset
(insns)
Replace:
ldp x29, x30, ...
nop
With:
ldp x29, RRX, ...
eor x30, RRX, RRK
"""
def __instrument(func=None, start_func=None, end_func=None, start_i=None, end_i=None,
tid=None):
def parse_insn_range(i, r):
return [objdump.parse_insn(j) if objdump.is_insn(j) else None for j in xrange(i, r)]
#
# Instrumentation of function prologues.
#
#import pdb; pdb.set_trace()
def instrument_stp(curfunc, func_i, i, stp_insn, new_stp_insn, add_x29_imm):
"""
new_stp_insn(stp_insn, replaced_insn) -> new stp instruction to encode.
"""
last_insn = parse_insn_range(i-1, i)
if not are_nop_insns(last_insn):
return
offset = insn['args']['imm']
# The last instruction was a nop spacer.
# Move forward until we see "add x29, sp, #<...>".
# (we expect ... = add_x29_imm)
#mov_j, movx29_insn = find_add_x29_x30_imm(objdump, curfunc, func_i, i)
#assert movx29_insn['args']['imm'] == add_x29_imm
# Time to do prologue instrumentation.
# eor RRX, x30, RRK
eor = eor_insn(last_insn[0],
reg1=objdump.RRX, reg2=REG_LR, reg3=objdump.RRK)
objdump.write(i-1, objdump.encode_insn(eor))
# stp x29, RRX, ...
stp = new_stp_insn(insn, insn)
objdump.write(i, objdump.encode_insn(stp))
# add x29, sp, #<add_x29_imm>
#add = add_insn({},
#dst_reg=REG_FP, src_reg=REG_SP, imm12=add_x29_imm)
#objdump.write(i, objdump.encode_insn(add))
# nop
#nop = nop_insn(movx29_insn)
#objdump.write(mov_j, objdump.encode_insn(nop))
def _skip_func(func):
return skip_func(func, skip, skip_asm)
last_func_i = [None]
def each_insn():
# Keep track of the last 2 instructions
# (needed it for CONFIG_RKP_CFP_JOPP)
for curfunc, func_i, i, insn, last_insns in objdump.each_insn(func=func, start_func=start_func, end_func=end_func,
start_i=start_i, end_i=end_i, skip_func=_skip_func, num_last_insns=1):
yield curfunc, func_i, i, insn, last_insns
last_func_i[0] = func_i
for curfunc, func_i, i, insn, last_insns in each_insn():
if objdump.JOPP and func_i != last_func_i[0] and are_nop_insns(ins[1] for ins in last_insns):
# Instrument the 2 noop spacers just before the function.
#nargs_i, nargs_insn = last_insns[0]
magic_i, magic_insn = last_insns[0]
#nargs = objdump.get_nargs(func_i)
#if nargs is None:
# nargs ought to be defined for everything eventually. For symbol
# we're not sure about yet, don't zero any argument registers.
#nargs = NUM_ARG_REGS
#else:
# ARM64 calling standard dictates that only registers x0 ... x7 can
# contain function parameters.
#
# If the C function takes more than these 8 registers (e.g. it takes 9
# arguments), we only need to mark the first 8 as "don't clobber" (the
# springboard code depends on this convention).
#
#nargs = min(NUM_ARG_REGS, nargs)
#objdump.write(nargs_i, nargs)
objdump.write(magic_i, objdump.JOPP_MAGIC)
if objdump.JOPP and insn['type'] == 'blr' and curfunc not in skip_blr :
springboard_blr = 'jopp_springboard_blr_x{register}'.format(register=insn['args']['dst_reg'])
insn = bl_insn(insn,
offset=objdump.func_offset(springboard_blr) - objdump.insn_offset(i))
objdump.write(i, objdump.encode_insn(insn))
continue
elif objdump.ROPP_REARRANGE_PROLOGUE and insn['type'] == 'ldp' and \
insn['args']['reg1'] == REG_FP and \
insn['args']['reg2'] == REG_LR:
forward_insn = parse_insn_range(i+1, i+2)
if not are_nop_insns(forward_insn):
continue
# stp x29, RRX, ...
insn['args']['reg2'] = objdump.RRX
stp = ((hexint(insn['binary']) >> 15) << 15) | \
(insn['args']['reg2'] << 10) | \
((insn['args']['base_reg']) << 5) | \
(insn['args']['reg1'])
objdump.write(i, stp)
# eor x30, RRX, RRK
eor = eor_insn(forward_insn[0],
reg1=REG_LR, reg2=objdump.RRX, reg3=objdump.RRK)
objdump.write(i+1, objdump.encode_insn(eor))
continue
elif objdump.ROPP_REARRANGE_PROLOGUE and curfunc not in skip_stp and insn['type'] == 'stp' and \
insn['args']['reg1'] == REG_FP and \
insn['args']['reg2'] == REG_LR and \
insn['args']['imm'] > 0:
def stp_x29_RRX_offset(insn, replaced_insn):
# stp x29, RRX, [sp,#<offset>]
offset = insn['args']['imm']
return stp_insn(replaced_insn,
reg1=REG_FP, reg2=objdump.RRX, base_reg=REG_SP, imm=offset, mode=STP_IMM_OFFSET)
instrument_stp(curfunc, func_i, i, insn, stp_x29_RRX_offset, insn['args']['imm'])
continue
elif objdump.ROPP_REARRANGE_PROLOGUE and curfunc not in skip_stp and insn['type'] == 'stp' and \
insn['args']['reg1'] == REG_FP and \
insn['args']['reg2'] == REG_LR and \
insn['args']['imm'] < 0:
def stp_x29_RRX_frame(insn, replaced_insn):
# stp x29, RRX, [sp,#-<frame>]!
frame = -1 * insn['args']['imm']
return stp_insn(replaced_insn,
reg1=REG_FP, reg2=objdump.RRX, base_reg=REG_SP, imm=-1 * frame, mode=STP_PRE_INDEXED)
instrument_stp(curfunc, func_i, i, insn, stp_x29_RRX_frame, 0)
continue
objdump.flush()
if threads == 1 or func:
# Just use the current thread.
if func:
# Instrument a single function
__instrument(func=func, tid=0)
else:
# Instrument all functions
__instrument(start_func=objdump.funcs()[0][0], tid=0)
return
objdump.each_insn_parallel(__instrument, threads)
def find_add_x29_x30_imm(objdump, curfunc, func_i, i):
"""
Find the instruction that adjusts the frame pointer in the function prologue:
add x29, x30, #...
Start the search from i (inside curfunc) in the objdump.
Throw an exception if we don't find it.
"""
mov_j = None
movx29_insn = None
for j, ins in objdump.each_insn(end_func=curfunc, end_i=func_i, start_i=i+1, just_insns=True):
if ins['type'] == 'add' and \
'raw' not in ins['args'] and \
ins['args']['dst_reg'] == REG_FP and \
ins['args']['src_reg'] == REG_SP:
mov_j = j
movx29_insn = ins
break
if mov_j is None:
raise RuntimeError("saw function prologue (nop, stp x29 x30) without mov x29 sp in {curfunc}".format(**locals()))
return mov_j, movx29_insn
class Objdump(object):
"""
Parse a vmlinux file, and apply instruction re-writes to a copy of it (or inplace).
Makes heavy use of aarch64-linux-android-objdump output.
i index in usage below is 0-based line number in aarch64-linux-android-objdump output.
Usage:
objdump = Objdump(vmlinux_filepath)
objdump.parse()
objdump.open()
for i, insn in objdump.each_insn(func="stext"):
if insn['type'] == 'bl':
insn = bl_insn(insn, offset=32)
objdump.write(i, objdump.encode_insn(insn))
objdump.close()
See "instrument" for implementation of actual hyperdrive instrumentations.
"""
def __init__(self, vmlinux, kernel_src,
config_file=None,
RRK=RRK_DEFAULT, RRX=RRX_DEFAULT, RRS=RRS_DEFAULT,
instr="{dirname}/{basename}.instr", inplace=False,
make_copy=True, just_lines=False):
self.vmlinux = vmlinux
self.vmlinux_old = None
self.kernel_src = kernel_src
self.config_file = config_file
self.conf = None
self.nargs = None
self.c_functions = set([])
self.lines = []
self.func_idx = {}
self.func_addrs = set([])
self._funcs = None
self.sections = None
self.make_copy = make_copy
self.just_lines = just_lines
#load config flags
self._load_config()
self.ROPP_REARRANGE_PROLOGUE = self.is_conf_set('CONFIG_RKP_CFP_ROPP')
self.JOPP = self.is_conf_set('CONFIG_RKP_CFP_JOPP')
self.JOPP_MAGIC = int(self.get_conf('CONFIG_RKP_CFP_JOPP_MAGIC'), 16)
self.RRK = RRK
self.RRX = RRX
self.RRS = RRS
self.instr_copy = None
if inplace:
self.instr = self.vmlinux
else:
basename = os.path.basename(vmlinux)
dirname = my_dirname(vmlinux)
if dirname == '':
dirname = '.'
self.instr = instr.format(**locals())
def _load_config(self):
if self.config_file:
self.conf = parse_config(self.config_file)
def parse(self):
"""
Read and save all lines from "aarch64-linux-android-objdump -d vmlinux".
Read and save section information from "aarch64-linux-android-objdump -x".
Keep track of where in the objdump output functions occur.
"""
self.sections = parse_sections(self.vmlinux)
fd, tmp = tempfile.mkstemp()
os.close(fd)
subprocess.check_call("{OBJDUMP} -d {vmlinux} > {tmp}".format(
OBJDUMP=OBJDUMP, vmlinux=pipes.quote(self.vmlinux), tmp=pipes.quote(tmp)), shell=True)
# NOTE: DON'T MOVE THIS.
# We are adding to the objdump output symbols from the data section.
symbols = parse_nm(self.vmlinux)
for s in symbols.keys():
sym = symbols[s]
if sym[NE_TYPE] in ['t', 'T']:
self.func_addrs.add(_int(sym[NE_ADDR]))
"""
Find any assembly routines that appear inside the data section.
1. List all symbols in vmlinux using nm
2. Find all symbols in 1. that also both an "ENTRY(...)" and an "ENDPROC(...)" entry in
a .S Linux source file
NOTE:
The method we use isn't guaranteed to locate all such instances, but is a fast
approximation, and avoids having to instrument assembly .S files more than we
already do.
e.g.
In particular, there could be assembly code that doesn't use ENTRY/ENDPROC;
we rely on Linux coding standards for this.
"""
symbols = parse_nm(self.vmlinux, symbols=parse_all_asm_functions(self.kernel_src))
for s in sorted(symbols.keys(), key=lambda s: _int(symbols[s][NE_ADDR])):
sym = symbols[s]
if sym[NE_TYPE] in ['d', 'D']:
# This symbol appears in the .data section but it's code since it's
# declared using both ENTRY and ENDPROC assembly routine markers
start_address = '0x' + sym[NE_ADDR]
stop_address = '0x' + _hex(_int(sym[NE_ADDR]) + sym[NE_SIZE]*BYTES_PER_INSN)
subprocess.check_call(("{OBJDUMP} -D {vmlinux} "
"--start-address={start_address} --stop-address={stop_address} >> {tmp}").format(
OBJDUMP=OBJDUMP, vmlinux=pipes.quote(self.vmlinux), tmp=pipes.quote(tmp),
start_address=start_address, stop_address=stop_address), shell=True)
"""
Now process objdump output and extract information into self.lines:
self.func_idx mapping from symbol name to a set of indicies into self.lines where
that symbol is defined (there can be multiple places with the same symbol /
function name in objdump).
self.lines is tuple of:
1. The line itself
2. Which section each instructions occurs in
3. Virtual addresses of instructions
"""
section_idx = None
with open(tmp, 'r') as f:
for i, line in enumerate(f):
virt_addr = None
m = re.search(virt_addr_re, line)
if m:
virt_addr = _int(m.group('virt_addr'))
self.lines.append((line, section_idx, virt_addr))
m = re.search(r'Disassembly of section (?P<name>.*):', line)
if m:
section_idx = self.sections['section_idx'][m.group('name')]
continue
m = re.search(common.fun_rec, line)
if m:
if m.group('func_name') not in self.func_idx:
self.func_idx[m.group('func_name')] = set()
self.func_idx[m.group('func_name')].add(i)
continue
# We have all the objdump lines read, we can delete the file now.
self._copy_to_tmp(tmp, 'objdump.txt')
os.remove(tmp)
def _copy_to_tmp(self, from_path, to_basename):
"""
Copy file to dirname(vmlinux)/tmp/basename(filename)
(e.g. tmp directory inside where vmlinux is)
"""
vfile = os.path.join(my_dirname(self.vmlinux), 'scripts/rkp_cfp/tmp', to_basename)
subprocess.check_call(['mkdir', '-p', my_dirname(vfile)])
subprocess.check_call(['cp', from_path, vfile])
return vfile
def save_instr_copy(self):
"""
Copy vmlinux_instr to dirname(vmlinux)/tmp/vmlinux.instr
(mostly for debugging)
"""
self.instr_copy = self._copy_to_tmp(self.instr, 'vmlinux.instr')
def open(self):
"""
mmap vmlinux for reading and vmlinux.instr for writing instrumented instructions.
"""
if os.path.abspath(self.vmlinux) != os.path.abspath(self.instr):
subprocess.check_call(['cp', self.vmlinux, self.instr])
if self.make_copy:
# copy vmlinux to tmp/vmlinux.old (needed for validate_instrumentation)
self.vmlinux_old = self._copy_to_tmp(os.path.join(my_dirname(self.vmlinux), 'vmlinux'), 'vmlinux')
self._copy_to_tmp(os.path.join(my_dirname(self.vmlinux), '.config'), '.config')
self.write_f = open(self.instr, 'r+b')
self.write_f.flush()
self.write_f.seek(0)
self.write_mmap = mmap.mmap(self.write_f.fileno(), 0, access=mmap.ACCESS_WRITE)
self.read_f = open(self.vmlinux, 'rb')
self.read_f.flush()
self.read_f.seek(0)
self.read_mmap = mmap.mmap(self.read_f.fileno(), 0, access=mmap.ACCESS_READ)
def __getstate__(self):
"""
For debugging. Don't pickle non-picklable attributes.
"""
d = dict(self.__dict__)
del d['write_f']
del d['write_mmap']
del d['read_f']
del d['read_mmap']
del d['_funcs']
return d
def __setstate__(self, d):
self.__dict__.update(d)
def insn_offset(self, i):
"""
Virtual address of
"""
return self.parse_insn(i)['virt_addr']
def _insn_idx(self, i):
"""
Return the byte address into the file vmlinux.instr for the instruction at
self.line(i).
(this is all the index into an mmap of the file).
"""
virt_addr = self.virt_addr(i)
section_file_offset = self._section(i)['offset']
section_virt = self._section(i)['address']
return section_file_offset + (virt_addr - section_virt)
def read(self, i, size=4):
"""
Read a 32-bit instruction into a list of chars in big-endian.
"""
idx = self._insn_idx(i)
insn = list(self.read_mmap[idx:idx+size])
# ARM uses little-endian.
# Need to flip bytes around since we're reading individual chars.
flip_endianness(insn)
return insn
def write(self, i, insn):
"""
Write a 32-bit instruction back to vmlinux.instr.
insn can be a list of 4 chars or an 32-bit integer in big-endian format.
Converts back to little-endian (ARM's binary format) before writing.
"""
size = 4
idx = self._insn_idx(i)
insn = list(byte_string(insn))
flip_endianness(insn)
self.write_mmap[idx:idx+size] = byte_string(insn)
def write_raw(self, i, insn):
self.write(i, insn['binary'], size=4)
def read_raw(self, hexaddr, size):
section = addr_to_section(hexaddr, self.sections['sections'])
offset = _int(hexaddr) - section['address'] + section['offset']
bytes = list(self.read_mmap[offset:offset+size])
flip_endianness(bytes)
return bytes
def close(self):
self.flush()
self.write_mmap.close()
self.write_f.close()
self.read_mmap.close()
self.read_f.close()
self.write_mmap = None
self.write_f = None
self.read_mmap = None
self.read_f = None
def is_conf_set(self, var):
if self.conf is None:
return None
return self.conf.get(var) == 'y'
def get_conf(self, var):
if self.conf is None:
return None
return self.conf.get(var)
def flush(self):
self.write_mmap.flush()
self.write_f.flush()
def line(self, i):
"""
Return the i-th (0-based) line of output from "aarch64-linux-android-objdump -d vmlinux".
(no lines are filtered).
"""
return self.lines[i][0]
def _section_idx(self, i):
return self.lines[i][1]
def virt_addr(self, i):
return self.lines[i][2]
def section(self, section_name):
"""
>>> self.section('.text')
{'address': 18446743798847711608L,
'align': 1,
'lma': 18446743798847711608L,
'name': '.text',
'number': 23,
'offset': 15608184,
'size': '0017aae8',
'type': 'ALLOC'},
"""
return self.sections['sections'][self.sections['section_idx'][section_name]]
def _section(self, i):
return self.sections['sections'][self._section_idx(i)]
def is_func(self, i):
return bool(self.get_func(i))
def is_func_addr(self, func_addr):
return func_addr in self.func_addrs
def get_func(self, i):
return re.search(common.fun_rec, self.line(i))
def is_insn(self, i):
"""
Returns True if self.line(i) is an instruction
(i.e. not a function label line, blank line, etc.)
"""
return not self.is_func(i) and self.virt_addr(i) is not None
FUNC_OFFSET_RE = re.compile(r'^(?P<virt_addr>{hex_re})'.format(hex_re=hex_re))
def get_func_idx(self, func, i=None):
i_set = self.func_idx[func]
if len(i_set) != 1 and i is None:
raise RuntimeError("{func} occurs multiple times in vmlinux, specify which line from objdump you want ({i_set})".format(**locals()))
elif i is None:
i = iter(i_set).next()
else:
assert i in i_set
return i
def get_func_end_idx(self, func, start_i=None):
i = self.get_func_idx(func, start_i)
while i < len(self.lines) and ( self.is_func(i) or self.is_insn(i) ):
i += 1
return i - 1
def func_offset(self, func, i=None):
i = self.get_func_idx(func, i)
m = re.search(Objdump.FUNC_OFFSET_RE, self.line(i))
return _int(m.group('virt_addr'))
PARSE_INSN_RE = re.compile((
r'(?P<virt_addr>{hex_re}):\s+'
r'(?P<hex_insn>{hex_re})\s+'
r'(?P<type>[^\s]+)\s*'
).format(hex_re=hex_re))
def parse_insn(self, i):
"""
Parse the i-th line of objdump output into a python dict.
e.g.
>>> self.line(...)
[2802364][97 DB 48 D0] :: ffffffc0014ee5b4: 97db48d0 bl ffffffc000bc08f4 <rmnet_vnd_exit>
>>> self.parse_insn(2802364)
{'args': {'offset': -9624768}, # 'args' field varies based on instruction type.
'binary': ['\x97', '\xdb', 'H', '\xd0'], # The remaining fields are always present.
'hex_insn': '97db48d0',
'type': 'bl',
'virt_addr': 18446743798853592500L}
"""
line = self.line(i)
m = re.search(Objdump.PARSE_INSN_RE, line)
insn = m.groupdict()
insn['virt_addr'] = _int(insn['virt_addr'])
insn['binary'] = self.read(i)
insn['args'] = {}
if insn['type'] == 'bl':
# imm26 (bits 0..25)
insn['args']['offset'] = from_twos_compl((hexint(insn['binary']) & BL_OFFSET_MASK) << 2, nbits=26 + 2)
elif insn['type'] in set(['blr', 'ret']):
arg = {
'blr':'dst_reg',
'ret':'target_reg',
}[insn['type']]
insn['args'][arg] = (hexint(insn['binary']) & BLR_AND_RET_RN_MASK) >> 5
elif insn['type'] == 'stp':
insn['args']['reg1'] = mask_shift(insn , STP_RT_MASK , 0)
insn['args']['base_reg'] = mask_shift(insn , STP_RN_MASK , 5)
insn['args']['reg2'] = mask_shift(insn , STP_RT2_MASK , 10)
insn['args']['opc'] = mask_shift(insn , STP_OPC_MASK , 30)
insn['args']['mode'] = mask_shift(insn , STP_ADDRESSING_MODE_MASK , 22)
lsl_bits = stp_lsl_bits(insn)
insn['args']['imm'] = from_twos_compl(
((hexint(insn['binary']) & STP_IMM7_MASK) >> 15) << lsl_bits,
nbits=7 + lsl_bits)
elif mask_shift(insn, ADDIMM_OPCODE_MASK, 24) == ADDIM_OPCODE_BITS \
and insn['type'] in set(['add', 'mov']):
insn['type'] = 'add'
insn['args']['sf'] = mask_shift(insn, ADDIMM_SF_MASK, 31)
insn['args']['shift'] = mask_shift(insn, ADDIMM_SHIFT_MASK, 22)
insn['args']['imm'] = mask_shift(insn, ADDIMM_IMM_MASK, 10)
insn['args']['src_reg'] = mask_shift(insn, ADDIMM_RN_MASK, 5)
insn['args']['dst_reg'] = mask_shift(insn, ADDIMM_RD_MASK, 0)
insn['args']['opcode_bits'] = mask_shift(insn, ADDIMM_OPCODE_MASK, 24)
elif insn['type'] == 'adrp':
immlo = mask_shift(insn, ADRP_IMMLO_MASK, ADRP_IMMLO_ZBITS)
immhi = mask_shift(insn, ADRP_IMMHI_MASK, ADRP_IMMHI_ZBITS)
insn['args']['dst_reg'] = mask_shift(insn, ADRP_RD_MASK, ADRP_RD_ZBITS)
insn['args']['imm'] = from_twos_compl((immhi << (2 + 12)) | (immlo << 12), nbits=2 + 19 + 12)
elif insn['type'] == 'ldp':
insn['args']['reg1'] = mask_shift(insn , STP_RT_MASK , 0)
insn['args']['base_reg'] = mask_shift(insn , STP_RN_MASK , 5)
insn['args']['reg2'] = mask_shift(insn , STP_RT2_MASK , 10)
elif mask_shift(insn, ADDIMM_OPCODE_MASK, 24) == ADDIM_OPCODE_BITS \
and insn['type'] in set(['add', 'mov']):
insn['type'] = 'add'
insn['args']['sf'] = mask_shift(insn, ADDIMM_SF_MASK, 31)
insn['args']['shift'] = mask_shift(insn, ADDIMM_SHIFT_MASK, 22)
insn['args']['imm'] = mask_shift(insn, ADDIMM_IMM_MASK, 10)
insn['args']['src_reg'] = mask_shift(insn, ADDIMM_RN_MASK, 5)
insn['args']['dst_reg'] = mask_shift(insn, ADDIMM_RD_MASK, 0)
insn['args']['opcode_bits'] = mask_shift(insn, ADDIMM_OPCODE_MASK, 24)
else:
insn['args']['raw'] = line[m.end():]
return insn
def encode_insn(self, insn):
"""
Given a python dict representation of an instruction (see parse_insn), write its
binary to vmlinux.instr.
TODO:
stp x29, xzr, [sp,#<frame>]
str x30, [sp,#<frame - 8>]
add x29, sp, offset
"""
if insn['type'] == 'eor':
upper_11_bits =0b11001010000
return (upper_11_bits << 21) | (insn['args']['reg3'] << 16) | (0b000000<<10) | \
(insn['args']['reg2'] << 5) |(insn['args']['reg1'])
elif insn['type'] == 'ldp':
return (0b1010100111 << 22) | (insn['args']['reg2'] << 10) | \
(insn['args']['base_reg'] << 5) | (insn['args']['reg1'])
elif insn['type'] in ['bl', 'b']:
# BL: 1 0 0 1 0 1 [ imm26 ]
# B: 0 0 0 1 0 1 [ imm26 ]
upper_6_bits = {
'bl':0b100101,
'b':0b000101,
}[insn['type']]
assert 128*1024*1024 >= insn['args']['offset'] >= -128*1024*1024
return ( upper_6_bits << 26 ) | (to_twos_compl(insn['args']['offset'], nbits=26 + 2) >> 2)
elif insn['type'] in ['blr', 'ret']:
# 1 1 0 1 0 1 1 0 0 [ op ] 1 1 1 1 1 0 0 0 0 0 0 [ Rn ] 0 0 0 0 0
# BLR: 0 1
# RET: 1 0
op = {
'blr':0b01,
'ret':0b10,
}[insn['type']]
assert 0 <= insn['args']['dst_reg'] <= 2**5 - 1
return (0b110101100 << 25) | \
(op << 21) | \
(0b11111000000 << 10) | \
(insn['args']['dst_reg'] << 5)
elif insn['type'] == 'ret':
# 1 1 0 1 0 1 1 0 0 0 1 1 1 1 1 1 0 0 0 0 0 0 [ Rn ] 0 0 0 0 0
assert 0 <= insn['args']['dst_reg'] <= 2**5 - 1
return (0b1101011000111111000000 << 9) | (insn['args']['dst_reg'] << 4)
elif insn['type'] == 'stp':
assert insn['args']['opc'] == OPC_64
return (insn['args']['opc'] << 30) | \
(insn['args']['mode'] << 22) | \
(to_twos_compl(insn['args']['imm'] >> stp_lsl_bits(insn), nbits=7) << 15) | \
(insn['args']['reg2'] << 10) | \
(insn['args']['base_reg'] << 5) | \
insn['args']['reg1']
elif insn['type'] == 'str' and \
insn['args']['mode'] == STR_IMM_OFFSET and \
insn['args']['sign'] == STR_SIGN_UNSIGNED:
assert insn['args']['imm'] >= 0
return (insn['args']['size'] << 30) | \
(0b111001 << 24) | \
(insn['args']['opc'] << 22) | \
(to_twos_compl(insn['args']['imm'] >> str_lsl_bits(insn), nbits=12) << 10) | \
(insn['args']['base_reg'] << 5) | \
(insn['args']['reg1'] << 0)
elif insn['type'] == 'add' and insn['args']['opcode_bits'] == ADDIM_OPCODE_BITS:
assert insn['args']['sf'] == ADDIMM_SF_BIT_64
return \
(insn['args']['sf'] << 31) | \
(insn['args']['shift'] << 22) | \
(insn['args']['imm'] << 10) | \
(insn['args']['src_reg'] << 5) | \
(insn['args']['dst_reg'] << 0) | \
(insn['args']['opcode_bits'] << 24)
elif insn['type'] in ['mov', 'movk', 'movn']:
opc = {
'mov':0b10,
'movk':0b11,
'movn':0b00,
}[insn['type']]
hw = {
0:0b00,
16:0b01,
32:0b11,
}[insn['args']['shift']]
sf = 0b1 # 64-bit registers
return (sf << 31) | \
(opc << 29) | \
(0b100101 << 23) | \
(hw << 21) | \
(insn['args']['imm16'] << 5) | \
(insn['args']['dst_reg'])
elif insn['type'] == 'nop':
return 0xd503201f
raise NotImplementedError
def _print_func(self, func_name,
parse_insn=True):
"""
Output parse objdump lines the internal indices (i) of those lines.
Always ouput python dict representations of instructions.
Useful for debugging / learning how this class works.
>>> self.print_func('rmnet_exit')
[2802357][A9 BF 7B FD] :: ffffffc0014ee598: a9bf7bfd stp x29, x30, [sp,#-16]!
[2802358][91 00 03 FD] :: ffffffc0014ee59c: 910003fd mov x29, sp
...
Parsed instructions:
{'args': {'raw': 'x29, x30, [sp,#-16]!'},
'binary': ['\xa9', '\xbf', '{', '\xfd'],
'hex_insn': 'a9bf7bfd',
'id': 2802357,
'type': 'stp',
'virt_addr': 18446743798853592472L}
{'args': {'raw': 'x29, sp'},
'binary': ['\x91', '\x00', '\x03', '\xfd'],
'hex_insn': '910003fd',
'id': 2802358,
'type': 'mov',
'virt_addr': 18446743798853592476L}
...
"""
def byte_to_hex(byteStr):
return ''.join(["%02X " % ord(x) for x in byteStr]).strip()
parsed_insns = []
for i, insn in self.each_insn(func=func_name):
objdump_line = self.line(i)
hex_insn = byte_to_hex(insn['binary'])
log("[{i}][{hex_insn}] :: {objdump_line}".format(**locals()))
if parse_insn:
log()
log("Parsed instructions:")
for i, insn in self.each_insn(func=func_name):
insn['id'] = i
pr(insn)
def num_funcs(self):
return len(self.func_idx)
def funcs(self):
"""
[ ("func_1", 0), ("func_2", 1), ... ]
"""
def __funcs():
for func, i_set in self.func_idx.iteritems():
for i in i_set:
yield func, i
funcs = list(__funcs())
funcs.sort(key=lambda func_i: func_i[1])
return funcs
def _idx_to_func(self, i):
if self._funcs is None:
self._funcs = self.funcs()
lo = 0
hi = len(self._funcs) - 1
mi = None
def func(i):
return self._funcs[i][0]
def idx(i):
return self._funcs[i][1]
while lo <= hi:
mi = (hi + lo)/2
if i < idx(mi):
hi = mi-1
elif i > idx(mi):
lo = mi+1
else:
return i
assert lo == hi + 1
return idx(hi)
def each_func_lines(self,
# Number of past instruction lines to yield with the current one.
num_last_insns=None,
with_func_i=False):
last_insns = [] if num_last_insns is None else [None] * num_last_insns
def is_insn(line):
return not re.search(common.fun_rec, line) and re.search(virt_addr_re, line)
def each_line():
for i, line in enumerate(self.lines):
line = line[0]
yield i, line
if not num_last_insns or not is_insn(line):
continue
last_insns.pop(0)
last_insns.append(line)
last_func_insns = list(last_insns)
func_lines = []
func = None
func_i = None
def _tuple(func_i, func, func_lines, last_func_insns):
func_tup = None
if with_func_i:
func_tup = (func_i, func)
else:
func_tup = (func,)
if num_last_insns:
return func_tup + (func_lines, last_func_insns)
return func_tup + (func_lines,)
for i, line in each_line():
m = re.search(common.fun_rec, line)
if m:
if func is not None:
yield _tuple(func_i, func, func_lines, last_func_insns)
if num_last_insns:
last_func_insns = list(last_insns)
func_lines = []
func = m.group('func_name')
func_i = i
elif func is None:
continue
func_lines.append(line)
if func is not None:
yield _tuple(func_i, func, func_lines, last_func_insns)
def each_insn(self,
# Instrument a single function.
func=None,
# Instrument a range of functions.
start_func=None, end_func=None,
# Start index into objdump of function (NEED this to disambiguate duplicate symbols)
start_i=None, end_i=None,
# If skip_func(func_name), skip it.
skip_func=None,
just_insns=False,
# Don't parse instruction, just give raw objdump line.
raw_line=False,
# Number of past instruction lines to yield with the current one.
num_last_insns=None,
debug=False):
"""
Iterate over instructions (i.e. line indices and their parsed python dicts).
Default is entire file, but can be limited to just a function.
"""
if func:
start_func = func
end_func = func
i = 0
if start_func is not None:
i = self.get_func_idx(start_func, start_i)
elif start_i is not None:
i = start_i
start_func = self._idx_to_func(i)
else:
# The first function
start_func, i = self.funcs()[0]
func_i = i
curfunc = start_func
end = len(self.lines) - 1
if end_func is not None:
end = self.get_func_end_idx(end_func, end_i)
assert not( end_func is None and end_i is not None )
def should_skip_func(func):
return skip_func is not None and skip_func(func)
assert start_func is not None
last_insns = None
num_before_start = 0
if num_last_insns is not None:
last_insns = [(None, None)] * num_last_insns
assert len(last_insns) == num_last_insns
# Walk backwards from the start until we see num_last_insns instructions.
# j is the index of the instruction.
# That new starting point (i) will be that many instructions back.
n = num_last_insns
j = i - 1
while n > 0 and j > 0:
if self.is_insn(j):
n -= 1
j -= 1
new_i = j + 1
num_before_start = i - new_i + 1
i = new_i
def shift_insns_left(last_insns, i, to_yield):
last_insns.pop(0)
last_insns.append((i, to_yield))
do_skip_func = should_skip_func(start_func)
def _tup(i, curfunc, func_i, to_yield):
if just_insns:
return i, to_yield
else:
return curfunc, func_i, i, to_yield
def _parse(to_yield, i):
if to_yield is None:
return self.line(i) if raw_line else self.parse_insn(i)
return to_yield
for i in xrange(i, min(end, len(self.lines) - 1) + 1):
to_yield = None
if num_last_insns is not None and num_before_start != 0:
if self.is_insn(i):
to_yield = _parse(to_yield, i)
shift_insns_left(last_insns, i, to_yield)
num_before_start -= 1
continue
if self.is_insn(i):
to_yield = _parse(to_yield, i)
if not do_skip_func:
t = _tup(i, curfunc, func_i, to_yield)
if num_last_insns is not None:
yield t + (last_insns,)
else:
yield t
if num_last_insns is not None:
shift_insns_left(last_insns, i, to_yield)
else:
m = self.get_func(i)
if m:
curfunc = m.group('func_name')
func_i = i
do_skip_func = should_skip_func(curfunc)
def each_insn_parallel(self, each_insn, threads=1, **kwargs):
"""
each_insn(start_func=None, end_func=None, start_i=None, end_i=None)
"""
# Spawn a bunch of threads to instrument in parallel.
procs = []
i = 0
funcs = self.funcs()
chunk = int(math.ceil(len(funcs)/float(threads)))
for n in xrange(threads):
start_func_idx = i
end_func_idx = min(i+chunk-1, len(funcs)-1)
start_i = funcs[start_func_idx][1]
end_i = funcs[end_func_idx][1]
kwargs.update({
'start_func':funcs[start_func_idx][0],
'end_func':funcs[end_func_idx][0],
'start_i':start_i,
'end_i':end_i,
'tid':n,
})
if threads == 1:
each_insn(**kwargs)
return
proc = multiprocessing.Process(target=each_insn, kwargs=kwargs)
# pr(kwargs)
# log("{start_i} {end_i} [start_i, end_i]".format(**locals()))
i = end_func_idx + 1
proc.start()
procs.append(proc)
for proc in procs:
proc.join()
def each_procline(proc):
"""
Iterate over the stdout lines of subprocess.Popen(...).
"""
while True:
line = proc.stdout.readline()
if line != '':
yield line.rstrip()
else:
break
"""
Replace an instruction with a new one.
These functions modify the python dict's returned by Objdump.each_insn.
Instructions can then be written to vmlinux.instr using Objdump.write(i, Objdump.encode_insn(insn)).
"""
def bl_insn(insn, offset):
return _jmp_offset_insn(insn, 'bl', offset)
def b_insn(insn, offset):
return _jmp_offset_insn(insn, 'b', offset)
def _jmp_offset_insn(insn, typ, offset):
insn['type'] = typ
insn['args'] = { 'offset': offset }
return insn
def _mov_insn(insn, typ, dst_reg, imm16, shift):
insn['type'] = typ
insn['args'] = { 'dst_reg': dst_reg, 'imm16': imm16, 'shift': shift }
return insn
def mov_insn(insn, dst_reg, imm16, shift=0):
return _mov_insn(insn, 'mov', dst_reg, imm16, shift)
def movk_insn(insn, dst_reg, imm16, shift=0):
return _mov_insn(insn, 'movk', dst_reg, imm16, shift)
def movn_insn(insn, dst_reg, imm16, shift=0):
return _mov_insn(insn, 'movn', dst_reg, imm16, shift)
def movreg_insn(insn, dst_reg, src_reg):
return add_insn(insn, dst_reg, src_reg, 0)
def add_insn(insn, dst_reg, src_reg, imm12):
insn['type'] = 'add'
insn['args'] = {
'sf':ADDIMM_SF_BIT_64,
'shift':0,
'imm':imm12,
'src_reg':src_reg,
'dst_reg':dst_reg,
'opcode_bits':ADDIM_OPCODE_BITS,
}
return insn
def nop_insn(insn):
insn['type'] = 'nop'
insn['args'] = {}
return insn
def eor_insn(insn, reg1, reg2, reg3):
insn['type'] = 'eor'
insn['args'] = {
'reg1':reg1,
'reg2':reg2,
'reg3':reg3,
}
return insn
def str_imm_unsigned_preindex_insn(insn, reg1, base_reg, imm):
"""
STR (immediate)
Unsigned offset
11 111 0 01 00 000000000111 11111 11110
31:30 29:27 26 25:24 23:22 21:10 9:5 4:0
size sign opc imm12 Rn Rt
size = 11 (64-bit)
10 (32-bit)
mode = 01
sign = 01 (unsigned offset)
ffffffc000097a8c: f9001fbe str x30, [x29,#56]
11 111 0 01 00 0 00000 000 1 11 11101 11110
"""
insn['type'] = 'str'
insn['args'] = {
'mode':STR_IMM_OFFSET,
'size':STR_SIZE_64,
'sign':STR_SIGN_UNSIGNED,
'reg1':reg1,
'base_reg':base_reg,
# 0b00 for all STR (imm) forms
'opc':0b00,
'imm':imm,
}
return insn
def stp_insn(insn, reg1, reg2, base_reg, imm, mode):
insn['type'] = 'stp'
insn['args'] = {
'reg1':reg1,
'base_reg':base_reg,
'reg2':reg2,
'opc':OPC_64,
'mode':mode,
'imm':imm,
}
return insn
def print_bl_offsets(objdump):
for curfunc, func_i, i, insn in objdump.each_insn():
if insn['type'] == 'bl':
log(objdump.line(i))
log(" instr-bits = 0x{offset:x}".format(offset=hexint(insn['binary']) & BL_OFFSET_MASK))
log(" actual-offset = {offset}".format(offset=insn['args']['offset']))
# For tests, limit instrumentation to a known function in the source code that we can
# trigger once the kernel is up and running.
DEFAULT_TEST_FUNC = 'tima_read'
def test_instrument(objdump):
"""
1. read the elf file header, to determine where text section is mapped to in vmlinux
2. mmap the text section
3. create a mapping between:
objdump output lines from vmlinux and mmap'ed text words
4. iterate until we find tima_read
for func in functions:
if func == 'tima_read':
for insn in instructions:
if insn['type'] = 'bl':
# need to do calculation of offset from this bl instruction to print_something function
dump[idx] = bl print_something
5. instrument the first bl instruction to jump to the springboard
"""
for curfunc, func_i, i, insn in objdump.each_insn(func=DEFAULT_TEST_FUNC):
if insn['type'] == 'bl':
insn['args']['offset'] = objdump.func_offset('print_something') - objdump.insn_offset(i)
objdump.write(i, objdump.encode_insn(insn))
break
class GDBError(Exception):
pass
def list_symbol_locations(hex_func_pairs, obj, gdb=GDB):
"""
Use GDB to figure out where a symbol is in the source.
symbols can be hex addresses or symbol names.
"""
fd, tmp = tempfile.mkstemp()
os.close(fd)
gdbcmds = open(tmp)
if os.path.exists('.gdbinit') and os.getcwd() != os.path.expandvars('$HOME'):
raise RuntimeError("WARNING: .gdbinit is in this directory, run from another place to avoid sourcing it.")
with open(tmp, 'w') as gdbcmds:
def write(string, **kwargs):
gdbcmds.write(textwrap.dedent(string.format(**kwargs)))
write("""
set pagination off
set listsize 1
""")
# This is slow...
# info function ^{func}$
for hexaddr, func in hex_func_pairs:
write("""
list *(0x{hexaddr})
echo > SYMBOL\\n
""", hexaddr=strip_hex_prefix(hexaddr), func=func)
write("quit")
# 0x28ca40 is in set_in_fips_err (crypto/testmgr.c:163).
skip_until_match = r'^0x[0-9a-f]+ is in '
matched = [False]
def should_skip(line):
if matched[0] or re.search(skip_until_match, line):
matched[0] = True
return not matched[0]
def should_error(line):
return re.search(r'Reading symbols from .*\(no debugging symbols found\)', line)
proc = subprocess.Popen([gdb, obj, '-x', tmp],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def _tuple(m):
symbol = strip_hex_prefix(m.group('symbol'))
return symbol, m.group('filepath'), int(m.group('lineno'))
tup = (None, None, None)
possible_files = set([])
for line in iter(proc.stdout.readline, ''):
if should_error(line):
raise GDBError("ERROR: " + line)
elif should_skip(line):
continue
elif re.search(r'^> SYMBOL', line):
list_sym, list_path, list_lineno = tup
path = None
if (list_path is None or list_path.endswith('.h')) and len(possible_files) == 1:
path = iter(possible_files).next()
else:
path = list_path
yield list_sym, path, list_lineno
tup = (None, None, None)
possible_files = set([])
continue
# File net/sunrpc/cache.c:
m = re.search(r'^File (?P<filepath>[^:]+):$', line)
if m:
possible_files.add(m.group('filepath'))
# print line
# 0xffffffc000bf7f20 is in boot_alloc_snapshot (kernel/trace/trace.c:208).
m = re.search(r'^(?P<symbol>[^\s]+) is in [^\s]+ \((?P<filepath>.*):(?P<lineno>\d+)\)\.', line)
if m:
tup = _tuple(m)
continue
# 0xffffffc000080000 is at arch/arm64/kernel/head.bl.S:129.
# 129 add x13, x18, #0x16
m = re.search(r'^(?P<symbol>.*) is at (?P<filepath>.*):(?P<lineno>\d+)\.', line)
if m:
tup = _tuple(m)
continue
NM_RE = re.compile(r'(?P<addr>.{16}) (?P<symbol_type>.) (?P<symbol>.*)')
NE_TYPE = 0
NE_ADDR = 1
NE_SIZE = 2
def parse_nm(vmlinux, symbols=None):
"""
MAJOR TODO:
Must handle functions (symbols) that occur more than once!
e.g.
add_dirent_to_buf is a static function defined in both:
- fs/ext3/namei.c
- fs/ext4/namei.c
ffffffc0000935b0 T cpu_resume_mmu
ffffffc0000935c0 t cpu_resume_after_mmu
...
ffffffc0000935f0 D cpu_resume
...
ffffffc000093680 T __cpu_suspend_save
...
ffffffc000c60000 B _end
U el
U lr
{
'cpu_resume':('D', 'ffffffc0000935f0', 36)
}
"""
proc = subprocess.Popen(["{NM} {vmlinux} | sort".format(NM=NM, vmlinux=vmlinux)], shell=True, stdout=subprocess.PIPE)
f = each_procline(proc)
nm = {}
last_symbol = None
last_name = None
for line in f:
m = re.search(NM_RE, line)
if m:
if last_symbol is not None and ( symbols is None or last_name in symbols ):
last_symbol[NE_SIZE] = ( _int(m.group('addr')) - _int(last_symbol[NE_ADDR]) ) / BYTES_PER_INSN \
if \
re.match(hex_re, last_symbol[NE_ADDR]) and \
re.match(hex_re, m.group('addr')) \
else None
last_symbol = [m.group('symbol_type'), m.group('addr'), None]
last_name = m.group('symbol')
if symbols is None or m.group('symbol') in symbols:
nm[m.group('symbol')] = last_symbol
return nm
def parse_plain_labels(asm_file):
labels = set([])
for line in each_line(asm_file):
m = re.search(r'^\s*(?:(?P<label>{ident_re}):)'.format(ident_re=common.ident_re), line)
if m:
labels.add(m.group('label'))
continue
return labels
def parse_asm(asm_file,
# If true, just parse functions
functions=None,
# If true, just parse nargs
nargs=None):
"""
Find all "functions" in an assembly file.
We consider a function to be something like:
ENTRY(func):
...
ENDPROC(func)
NOTE: we don't consider this a function:
func:
...
"""
endproc = set([])
entry = set([])
func_entry = set([])
vector_entry = set([])
narg = {}
if not functions and not nargs:
functions = True
nargs = True
def add_narg(m):
symbol, n = re.split(r', ', m.group('symbol'))
if nargs:
narg[symbol] = (asm_file, int(n))
return symbol
def asm_pattern(macroname):
return r'^\s*' + macroname + r'\((?P<symbol>[^)\\]+)\)'
for line in each_line(asm_file):
m = re.search(asm_pattern('FUNC_ENTRY'), line)
if m:
symbol = add_narg(m)
func_entry.add(symbol)
continue
m = re.search(asm_pattern('FUNC_NARGS'), line)
if m:
add_narg(m)
continue
m = re.search(asm_pattern('ENTRY'), line)
if m:
entry.add(m.group('symbol'))
continue
m = re.search(asm_pattern('VECTOR_ENTRY'), line)
if m:
vector_entry.add(m.group('symbol'))
continue
m = re.search(asm_pattern('ENDPROC'), line)
if m:
endproc.add(m.group('symbol'))
continue
funcs = endproc.intersection(
(entry.union(func_entry)).difference(
vector_entry))
if functions and not nargs:
return funcs
elif not functions and nargs:
return narg
return funcs, narg
def kernel_files(root, glob):
for root, dirnames, filenames in os.walk(root):
for filename in fnmatch.filter(filenames, glob):
yield os.path.join(root, filename)
def asm_files(kernel_src):
return kernel_files(os.path.join(kernel_src, 'arch', KERNEL_ARCH), '*.S')
def nargs_files(kernel_src):
return kernel_files(kernel_src, '*.o.nargs')
def parse_all_asm_functions(kernel_src):
s = set([])
for asm_file in asm_files(kernel_src):
for symbol in parse_asm(asm_file, functions=True):
s.add(symbol)
return s
def parse_all_asm_labels(kernel_src):
s = set([])
for asm_file in asm_files(kernel_src):
for symbol in parse_plain_labels(asm_file):
s.add(symbol)
return s
def offset_to_section(offset, sections):
for section in sections:
if section['offset'] <= offset < section['offset'] + section['size']:
return section
def addr_to_section(hexaddr, sections):
addr = _int(hexaddr)
for section in sections:
if section['address'] <= addr < section['address'] + section['size']:
return section
def parse_sections(vmlinux):
"""
[Nr] Name Type Address Offset
Size EntSize Flags Link Info Align
[ 0] NULL 0000000000000000 00000000
0000000000000000 0000000000000000 0 0 0
[ 1] .head.text PROGBITS ffffffc000205000 00005000
0000000000000500 0000000000000000 AX 0 0 64
{
'name': '.head.text',
'size': 0,
'type': PROGBITS,
...
}
"""
proc = subprocess.Popen([OBJDUMP, '--section-headers', vmlinux], stdout=subprocess.PIPE)
f = each_procline(proc)
d = {
'sections': [],
'section_idx': {},
}
it = iter(f)
section_idx = 0
while True:
try:
line = it.next()
except StopIteration:
break
m = re.search(r'^Sections:', line)
if m:
# first section
it.next()
continue
m = re.search((
# [Nr] Name Type Address Offset
r'^\s*(?P<number>\d+)'
r'\s+(?P<name>[^\s]*)'
r'\s+(?P<size>{hex_re})'
r'\s+(?P<address>{hex_re})'
r'\s+(?P<lma>{hex_re})'
r'\s+(?P<offset>{hex_re})'
r'\s+(?P<align>[^\s]+)'
).format(hex_re=hex_re), line)
if m:
section = {}
d['section_idx'][m.group('name')] = int(m.group('number'))
def parse_power(x):
m = re.match(r'(?P<base>\d+)\*\*(?P<exponent>\d+)', x)
return int(m.group('base'))**int(m.group('exponent'))
section.update(coerce(m.groupdict(), [
[_int, ['size', 'address', 'offset', 'lma']],
[int, ['number']],
[parse_power, ['align']]]))
line = it.next()
# CONTENTS, ALLOC, LOAD, READONLY, CODE
m = re.search((
r'\s+(?P<type>.*)'
).format(hex_re=hex_re), line)
section.update(m.groupdict())
d['sections'].append(section)
return d
def coerce(dic, funcs, default=lambda x: x):
field_to_func = {}
for row in funcs:
f, fields = row
for field in fields:
field_to_func[field] = f
fields = dic.keys()
for field in fields:
if field not in field_to_func:
continue
dic[field] = field_to_func[field](dic[field])
return dic
BYTES_PER_INSN = 4
def insn_addr():
try:
return _int(env['insertion_addr']) + (env['line_number'] - 1)*BYTES_PER_INSN
except Exception, e:
raise e
def _int(hex_string):
"""
Convert a string of hex characters into an integer
>>> _int("ffffffc000206028")
18446743798833766440L
"""
return int(hex_string, 16)
def _hex(integer):
return re.sub('^0x', '', hex(integer)).rstrip('L')
def strip_hex_prefix(string):
return re.sub('^0x', '', string)
def main():
if OFF:
return
parser = argparse.ArgumentParser("Instrument vmlinux to protect against ROP attacks")
parser.add_argument("--vmlinux", required=True,
help="vmlinux file to run objdump on")
parser.add_argument("--config",
help="kernel .config file; default = .config in location of vmlinux if it exists")
args = parser.parse_known_args()[0]
config_file = None
if args.config:
config_file = args.config
if not args.config:
config_file = guess_config_file(args.vmlinux)
if config_file is None:
parser.error("Cannot find kernel .config file in directory of vmlinux; specify one with --config")
# Some default arguments depend on configuration options.
conf = parse_config(config_file)
parser.add_argument("--threads", type=int, default=DEFAULT_THREADS,
help="Number of threads to instrument with (default = # of CPUs on machine)")
parser.add_argument("--instrument-func",
help=\
"We will instrument the first BL instruction inside this function. " \
"We calculate B offsets based on this")
parser.add_argument("--vmlinux-dump",
help="vmlinux objdump")
parser.add_argument("--insertion-addr", default='0xffffffc002000000',
help="virtual address that we insert the code at (should follow text)")
parser.add_argument("--print-insertion-addr", action='store_true')
parser.add_argument("--test-instrument", action='store_true')
parser.add_argument("--test-func", action='store_true',
help="Only instrument the {DEFAULT_TEST_FUNC} function".format(
DEFAULT_TEST_FUNC=DEFAULT_TEST_FUNC))
_reg_note = " (NOTE: must be kept in sync with macro definitions in init/hyperdrive.S)"
parser.add_argument("--RRK", default=RRK_DEFAULT,
help="ARM64 register reserved for storing the return-address key" + _reg_note)
parser.add_argument("--RRX", default=RRX_DEFAULT,
help="ARM64 register reserved for scratch during BL/BLR instrumentation" + _reg_note)
parser.add_argument("--RRS", default=RRS_DEFAULT,
help="ARM64 register reserved for storing the springboard base address" + _reg_note)
parser.add_argument("--debug", action='store_true')
parser.add_argument("--inplace", action='store_true',
help="instrument the vmlinux file inplace")
parser.add_argument("--log",
help="in addition to standard out, write any output to this file (default: kernel-src/scripts/rkp_cfp/cfp_log.txt)")
parser.add_argument("--kernel-src",
help="kernel source top directory; default = directory containing vmlinux")
parser.add_argument("--print-bl-offsets", action='store_true')
args = parser.parse_known_args()[0]
if args.print_insertion_addr:
log(args.insertion_addr)
sys.exit(0)
args = parser.parse_args()
if not os.path.exists(args.vmlinux):
parser.error("--vmlinux ({vmlinux}) doesn't exist".format(vmlinux=args.vmlinux))
kernel_src = args.kernel_src if args.kernel_src else guess_kernel_src(args.vmlinux)
if kernel_src is None:
parser.error("Need top directory of kernel source for --kernel-src")
common.LOG = common.Log('cfp_log.txt')
with common.LOG:
test_func = None
if args.test_func:
test_func = DEFAULT_TEST_FUNC
def _load_objdump():
return contextlib.closing(load_and_cache_objdump(args.vmlinux, debug=args.debug,
kernel_src=kernel_src, config_file=config_file, RRK=args.RRK, RRX=args.RRX, RRS=args.RRS, inplace=args.inplace))
if args.test_instrument:
with _load_objdump() as objdump:
test_instrument(objdump)
return
if args.print_bl_offsets:
with _load_objdump() as objdump:
print_bl_offsets(objdump)
return
# instrument and validate
with _load_objdump() as objdump:
instrument(objdump, func=test_func, skip=common.skip, skip_stp=common.skip_stp,
skip_asm=common.skip_asm, skip_blr=common.skip_blr, threads=args.threads)
objdump.save_instr_copy()
return
def each_line(fname):
with open(fname) as f:
for line in f:
line = line.rstrip()
yield line
def each_func_lines(objdump_path):
lines = []
func = None
for line in each_line(objdump_path):
m = re.search(common.fun_rec, line)
if m:
yield func, lines
lines = []
func = m.group('func_name')
elif func is None:
continue
lines.append(line)
if func is not None:
yield func, lines
def guess_kernel_src(vmlinux):
kernel_src = my_dirname(vmlinux)
if os.path.exists(kernel_src):
return kernel_src
def guess_config_file(vmlinux):
config_file = os.path.join(my_dirname(vmlinux), '.config')
if os.path.exists(config_file):
return config_file
return None
def parse_config(config_file):
"""
Parse kernel .config
Apparently, even if this script gets run from the Kbuild system, it's not seeing CONFIG_ variables in its environment.
At least CROSS_COMPILE is there though.
"""
conf = {}
for line in each_line(config_file):
m = re.search(r'^\s*(?P<var>[A-Z0-9_]+)=(?P<value>[^\s#]+)', line)
if m:
conf[m.group('var')] = m.group('value')
return conf
def first_n(xs, n):
it = iter(xs)
i = 0
for x in xs:
if i == n:
break
yield x
i += 1
def parse_skip(skip_file):
"""
Parse a file that looks like this:
'
# this is a comment
1st_entry # another commment
2nd_entry
'
>>> set(['1st_entry', '2nd_entry'])
"""
skip = set([])
with open(skip_file, 'r') as f:
for line in f:
m = re.search(r'^\s*(?P<entry>[^\s#]+)', line)
if m:
skip.add(m.group('entry'))
return skip
DEFAULT_PICKLE_FNAME="{dirname}/.{basename}.pickle"
def pickle_name(fname, pickle_fname=DEFAULT_PICKLE_FNAME):
basename = os.path.basename(fname)
dirname = my_dirname(fname)
return pickle_fname.format(**locals())
def load_and_cache_objdump(vmlinux, debug=False, use_stale_pickle=False, pickle_fname=DEFAULT_PICKLE_FNAME,
*objdump_args, **objdump_kwargs):
"""
Parse vmlinux into an Objdump.
If debug mode, load it from a cached python pickle file to speed things up.
"""
def should_use_pickle(pickle_file, vmlinux):
return os.path.exists(pickle_file) and (
# Pickle is newer than vmlinux and this script
(os.path.getmtime(pickle_file) > os.path.getmtime(vmlinux) or use_stale_pickle) and
(os.path.getmtime(pickle_file) > os.path.getmtime(__file__) or debug)
)
def should_save_pickle():
return debug
objdump = None
#needs_update = True
#pickle_file = pickle_name(vmlinux)
#if should_use_pickle(pickle_file, vmlinux):
#with open(pickle_file, 'rb') as f:
#objdump = cPickle.load(f)
#objdump.open()
#needs_update = False
#else:
objdump = Objdump(vmlinux, *objdump_args, **objdump_kwargs)
objdump.parse()
objdump.open()
## Update the pickle file.
#if needs_update and should_save_pickle():
#with open(pickle_file, 'wb') as f:
#cPickle.dump(objdump, f)
return objdump
def flip_endianness(word):
assert len(word) == 4
def swap(i, j):
tmp = word[i]
word[i] = word[j]
word[j] = tmp
swap(0, 3)
swap(1, 2)
def from_twos_compl(x, nbits):
"""
Convert nbit two's compliment into native decimal.
"""
# Truely <= nbits long?
assert x == x & ((2**nbits) - 1)
if x & (1 << (nbits - 1)):
# sign bit is set; it's negative
flip = -( (x ^ (2**nbits) - 1) + 1 )
# twiddle = ~x + 1
return flip
return x
def to_twos_compl(x, nbits):
"""
Convert native decimal into nbit two's complement
"""
if x < 0:
flip = (( -x ) - 1) ^ ((2**nbits) - 1)
assert flip == flip & ((2**nbits) - 1)
return flip
return x
def byte_string(xs):
if type(xs) == list:
return ''.join(xs)
elif type(xs) in [int, long]:
return ''.join([chr((xs >> 8*i) & 0xff) for i in xrange(3, -1, 0-1)])
return xs
def hexint(b):
return int(binascii.hexlify(byte_string(b)), 16)
def mask_shift(insn, mask, shift):
return (hexint(insn['binary']) & mask) >> shift
def mask(insn, mask):
return hexint(insn['binary']) & mask
def my_dirname(fname):
"""
If file is in current directory, return '.'.
"""
dirname = os.path.dirname(fname)
if dirname == '':
dirname = '.'
return dirname
def are_nop_insns(insns):
return all(ins is not None and ins['type'] == 'nop' for ins in insns)
def stp_lsl_bits(insn):
return (2 + (insn['args']['opc'] >> 1))
def str_lsl_bits(insn):
"""
ARMv8 Manual:
integer scale = UInt(size);
bits(64) offset = LSL(ZeroExtend(imm12, 64), scale);
"""
return insn['args']['size']
def shuffle_insns(objdump, mov_j, i):
# This doesn't work on the emulator. oh well.
# Only reason i can think of would be that there are branch instructions between stp and add...
# But then it would branch over adjusting x29...
#
# We CANNOT guarantee that "add x29, sp, offset" immediately follows "stp x29, RRX, ...".
# We need to shuffle instructions down like so:
#
# nop -> eor RRX, x30, RRK
# stp x29, x30, [sp,#<offset>] -> stp x29, RRX, [sp,#<offset>]
# insns[0] -> add x29, sp, #<offset>
# ... -> insns[0]
# insns[n] -> ...
# add x29, sp, #<offset> -> insns[n]
for j in xrange(mov_j, i+1, -1):
objdump.write(j, objdump.read(j-1))
def _p(path):
return os.path.expandvars(path)
if common.run_from_ipython():
"""
Iterative development is done using ipython REPL.
This code only runs when importing this module from ipython.
We use this to conveniently define variables and load a sample vmlinux file.
Set sample_vmlinux_file to a vmlinux file on your path.
Instrumentation will be created in a copy of that file (with a .instr suffix).
==== How to test ====
>>> ... # means to type this at the ipython terminal prompt
# To reload your code after making changes, do:
change the DEFAULT_THREADS to 1 before debugging
>>> import instrument; dreload(instrument)
# To instrument vmlinux, do:
>>> instrument._instrument()
"""
# Define some useful stuff for debugging via ipython.
# Set this to a vmlinux file we want to copy then instrument.
sample_vmlinux_file = _p("./vmlinux")
sample_kernel_src = _p("$k6")
sample_config_file = guess_config_file(sample_vmlinux_file)
# Set this to True if you changed data members in Objdump
# reload_and_save_pickle = True
reload_and_save_pickle = False
num_threads = DEFAULT_THREADS
# num_threads = 1
#import pdb; pdb.set_trace()
o = load_and_cache_objdump(sample_vmlinux_file, debug=True,
kernel_src=sample_kernel_src, config_file=sample_config_file)
print "in function common.run_from_ipython()"
common.LOG = common.Log('cfp_log.txt')
log("first line, for test")
def _instrument(func=None, skip=common.skip, validate=True, threads=num_threads):
instrument(o, func=func, skip=common.skip, skip_stp=common.skip_stp, skip_asm=common.skip_asm, threads=threads)
o.flush()
if validate and (sample_config_file) and func is None:
return debug.validate_instrumentation(o, common.skip, common.skip_stp, \
common.skip_asm, common.skip_save_lr_to_stack, common.skip_br, threads=num_threads)
if __name__ == '__main__':
main()
| Fevax/android_kernel_samsung_universal8890-N | scripts/rkp_cfp/instrument.py | Python | gpl-2.0 | 72,148 |
from pyramid.httpexceptions import HTTPNotFound
from pyramid.response import FileResponse
from ..env import env
from ..resource import resource_factory
def preview(resource, request):
if resource.social is None or resource.social.preview_fileobj is None:
raise HTTPNotFound()
path = env.file_storage.filename(resource.social.preview_fileobj)
return FileResponse(path, content_type='image/png', request=request)
def setup_pyramid(comp, config):
config.add_route(
'resource.preview', '/api/resource/{id}/preview.png',
factory=resource_factory
).add_view(preview, request_method='GET')
| nextgis/nextgisweb | nextgisweb/social/api.py | Python | gpl-3.0 | 633 |
# -*- coding: utf-8 -*-
from magpy.stream import *
#from .stream import *
class MagPyFlag(object):
"""
DESCRIPTION:
A list object with flagging information
The list should contain information according to FLAGKEYS
Flag ID's are listed version specific in FLAGID dictionary.
Some methods support a options dictionary
APPLICTAION:
Initiate flags with >>> flag = MagPyFlag().
EXAMPLES:
METHODS OVERVIEW:
----------------------------
- flag.delete(self, searchdict, combine): # e.g. flag.delete({'sensorid':'xxx','flagid':1 },combine='and')
- flag.extend(self, otherflaglist):
- flag.union(self, options):
- flag.clean(self, options):
- flag.get(self, searchdict):
- flag.put(self, newflag):
- flag.save(self, path):
- flag.sort(self):
- flag.modify(self):
- flag.stats(self):
- load(self, options): # load function supports import of old versions
"""
def __init__(self, flaglist=None):
if flaglist is None:
flaglist = []
self.flaglist = flaglist
self.FLAGKEYS = ['starttime', # datetime
'endtime', # datetime
'components', # list like [1,2,3] ref. to columns, or ['x','y'] ref. to keys
'id', # integer number
'comment', # text without special characters (utf-8)
'sensorid', # text without special characters (utf-8)
'modificationtime', # datetime
'flagversion'] # string like 1.0
self.FLAGID = {'0.4' : { 0: 'normal data',
1: 'automatically flagged for removal',
2: 'observer decision: keep data',
3: 'observer decision: remove data',
4: 'special flag: define in comment'
},
'1.0' : { 0: 'normal data',
1: 'automatically flagged to keep',
2: 'automatically flagged for removal',
3: 'observer decision: keep data',
4: 'observer decision: remove data',
5: 'special flag: define in comment'
}
}
# ------------------------------------------------------------------------
# Flag methods in alphabetical order
# ------------------------------------------------------------------------
def copy(self):
"""
DESCRIPTION
copy data into a new flaglist
"""
flaglist = MagPyFlag()
if not type(self) == list:
return self
for el in self:
flaglist.append(el)
return flaglist
def put(self, flags):
"""
DESCRIPTION
add data into a flaglist
"""
flaglist = self.copy()
# Get dimensions of flags
if not type(flags) == list:
return self
if not type(flags[0]) == list:
# Single line
flagadd = [flags]
for flagline in flagadd:
if len(flagline) == 8 and flagline[7] in FLAGID:
flaglist.append(flagline)
return flaglist
def get(self, searchdict, combine='and'):
"""
DESCRIPTION:
extract data from flaglist
EXAMPLE:
newflaglist = flaglist.get({'comment':'lightning'})
"""
extractedflaglist = MagPyFlags()
for idx,searchcrit in enumerate(searchdict):
if combine == 'and' and idx >= 1:
flaglist = extractedflaglist
elif combine == 'and' and idx == 0:
flaglist = self.copy()
else: # or
flaglist = self.copy()
print (searchcrit, searchdict[searchcrit])
pos = self.FLAGKEYS.index('comment')
fl = [el for el in flaglist if searchdict[searchcrit] in el[pos]]
extractedflaglist.put(fl)
return extractedflaglist
def consecutive_check(flaglist, sr=1, overlap=True, singular=False, remove=False, critamount=20, flagids=None, debug=False):
"""
DESCRIPTION:
Method to inspect a flaglist and check for consecutive elements
PARAMETER:
sr (float) : [sec] Sampling rate of underlying flagged data sequence
critamount (int) : Amount of maximum allowed consecutive (to be used when removing consecutive data)
result (BOOL) : True will replace consecutive data with a new flag, False will remove consecutive data from flaglist
overlap (BOOL) : if True than overlapping flags will also be combined, comments from last modification will be used
singular (BOOL) : if True than only single time stamp flags will be investigated (should be spikes)
INPUT:
flaglist with line like
[datetime.datetime(2016, 4, 13, 16, 54, 40, 32004), datetime.datetime(2016, 4, 13, 16, 54, 40, 32004), 't2', 3,
'spike and woodwork', 'LEMI036_1_0002', datetime.datetime(2016, 4, 28, 15, 25, 41, 894402)]
OUTPUT:
flaglist
"""
if flagids:
if isinstance(flagids, list):
uniqueids = flagids
elif isinstance(flagids, int):
uniqueids = [flagids]
else:
uniqueids = [0,1,2,3,4]
else:
uniqueids = [0,1,2,3,4]
if not len(flaglist) > 0:
return flaglist
# Ideally flaglist is a list of dictionaries:
# each dictionary consists of starttime, endtime, components, flagid, comment, sensorid, modificationdate
flagdict = [{"starttime" : el[0], "endtime" : el[1], "components" : el[2].split(','), "flagid" : el[3], "comment" : el[4], "sensorid" : el[5], "modificationdate" : el[6]} for el in flaglist]
## Firstly extract all flagging IDs from flaglst
if len(flaglist[0]) > 6:
ids = [el[5] for el in flaglist]
uniquenames = list(set(ids))
else:
print ("Found an old flaglist type - aborting")
return flaglist
newflaglist = []
for name in uniquenames:
cflaglist = [el for el in flaglist if el[5] == name]
# if singular, extract flags with identical start and endtime
if singular:
nonsingularflaglist = [el for el in flaglist if el[0] != el[1]]
testlist = [el for el in flaglist if el[0] == el[1]]
newflaglist.extend(nonsingularflaglist)
else:
testlist = cflaglist
#if debug:
# print (name, len(testlist))
# extract possible components
#uniquecomponents = list(set([item for sublist in [el[2].split(',') for el in testlist] for item in sublist]))
# better use componentgroups
uniquecomponents = list(set([el[2] for el in testlist]))
if debug:
print (" - Components", uniquecomponents)
for unid in uniqueids:
idlist = [el for el in testlist if el[3] == unid]
for comp in uniquecomponents:
complist = [el for el in idlist if comp == el[2]]
if debug:
print (" - Inputs for component {} with flagID {}: {}".format(comp,unid,len(complist)))
idxtmp = 0
testcnt = 0
while idxtmp < len(complist):
complist = complist[idxtmp:]
extendedcomplist = []
for idx,line in enumerate(complist):
tdiff = (line[1]-line[0]).total_seconds()
if tdiff > sr-(0.05*sr):
# add steps
firstt = line[0]
lastt = line[1]
steps = int(np.ceil(tdiff/float(sr)))
for step in np.arange(steps):
val0 = firstt+timedelta(seconds=int(step)*sr)
extendedcomplist.append([val0,val0,line[2],line[3],line[4],line[5],line[6]])
extendedcomplist.append([lastt,lastt,line[2],line[3],line[4],line[5],line[6]])
else:
extendedcomplist.append(line)
if len(extendedcomplist) > 500000:
idxtmp = idx+1
break
idxtmp = idx+1
if debug:
print (" -> Individual time stamps: {}".format(len(extendedcomplist)))
if overlap:
if debug:
print (" -> removing overlaps")
# Now sort the extendedlist according to modification date
extendedcomplist.sort(key=lambda x: x[-1], reverse=True)
#print (extendedcomplist)
# Now remove all overlapping data
seen = set()
new1list = []
for item in extendedcomplist:
ti = item[0]
if item[0] not in seen:
new1list.append(item)
seen.add(ti)
extendedcomplist = new1list
if debug:
print (" -> After overlap removal - time stamps: {}".format(len(extendedcomplist)))
# now combine all subsequent time steps below sr to single inputs again
extendedcomplist.sort(key=lambda x: x[0])
new2list = []
startt = None
endt = None
tmem = None
for idx,line in enumerate(extendedcomplist):
if idx < len(extendedcomplist)-1:
t0 = line[0]
t1 = extendedcomplist[idx+1][0]
tdiff = (t1-t0).total_seconds()
if tdiff <= sr+(0.05*sr):
if not tmem:
tmem = t0
endt = None
else:
startt = t0
if tmem:
startt = tmem
endt = t0
else:
t0 = line[0]
startt = t0
if tmem:
startt = tmem
endt = t0
if startt and endt:
# add new line
if not remove:
new2list.append([startt,endt,line[2],line[3],line[4],line[5],line[6]])
newflaglist.append([startt,endt,line[2],line[3],line[4],line[5],line[6]])
else:
if unid == 1 and (endt-startt).total_seconds()/float(sr) >= critamount:
# do not add subsequent automatic flags
pass
else:
new2list.append([startt,endt,line[2],line[3],line[4],line[5],line[6]])
newflaglist.append([startt,endt,line[2],line[3],line[4],line[5],line[6]])
tmem = None
if debug:
print (" -> After recombination: {}".format(len(new2list)))
#print (unid, len(newflaglist))
return newflaglist
def load(self, path, sensorid=None, begin=None, end=None, format='json'):
"""
DEFINITION:
Load list e.g. flaglist from file using pickle.
PARAMETERS:
Variables:
- path: (str) Path to data files in form:
- begin: (datetime)
- end: (datetime)
RETURNS:
- MagPyFlag obsject (e.g. flaglist)
EXAMPLE:
>>> import magpy.flags as flags
>>> flaglist = flags.load('/my/path/myfile.pkl')
"""
flaglist = MagPyFlag()
if not path:
return flaglist
if not os.path.isfile(path):
return flaglist
if not format in ['json','pkl']:
return flaglist
if format == 'json':
import json
print ("Reading a json style flaglist...")
def dateparser(dct):
# Convert dates in dictionary to datetime objects
for (key,value) in dct.items():
for i,line in enumerate(value):
for j,elem in enumerate(line):
if str(elem).count('-') + str(elem).count(':') == 4:
try:
try:
value[i][j] = datetime.strptime(elem,"%Y-%m-%d %H:%M:%S.%f")
except:
value[i][j] = datetime.strptime(elem,"%Y-%m-%d %H:%M:%S")
except:
pass
dct[key] = value
return dct
if os.path.isfile(path):
with open(path,'r') as file:
mydic = json.load(file,object_hook=dateparser)
if sensorid:
mylist = mydic.get(sensorid,'')
do = [el.insert(5,sensorid) for el in mylist]
else:
mylist = []
for s in mydic:
ml = mydic[s]
do = [el.insert(5,s) for el in ml]
mylist.extend(mydic[s])
if begin:
mylist = [el for el in mylist if el[1] > begin]
if end:
mylist = [el for el in mylist if el[0] < end]
return MagPyFlag(mylist)
else:
print ("Flagfile not yet existing ...")
return []
elif format == 'pkl':
try:
from pickle import load as pklload
mylist = pklload(open(path,"rb"))
print("loadflags: list {a} successfully loaded, found {b} inputs".format(a=path,b=len(mylist)))
if sensorid:
print(" - extracting data for sensor {}".format(sensorid))
mylist = [el for el in mylist if el[5] == sensorid]
if begin:
mylist = [el for el in mylist if el[1] > begin]
if end:
mylist = [el for el in mylist if el[0] < end]
#print(" -> remaining flags: {b}".format(b=len(mylist)))
return MagPyFlag(mylist)
except:
return []
| geomagpy/magpy | magpy/core/flagging.py | Python | bsd-3-clause | 15,087 |
"""
Copyright 2008, 2009 Free Software Foundation, Inc.
This file is part of GNU Radio
SPDX-License-Identifier: GPL-2.0-or-later
"""
from __future__ import absolute_import, print_function
import sys
import os
from .Config import Config
from . import canvas
from ..core.platform import Platform as CorePlatform
from ..core.utils.backports import ChainMap
class Platform(CorePlatform):
def __init__(self, *args, **kwargs):
CorePlatform.__init__(self, *args, **kwargs)
# Ensure conf directories
gui_prefs_file = self.config.gui_prefs_file
if not os.path.exists(os.path.dirname(gui_prefs_file)):
os.mkdir(os.path.dirname(gui_prefs_file))
self._move_old_pref_file()
def get_prefs_file(self):
return self.config.gui_prefs_file
def _move_old_pref_file(self):
gui_prefs_file = self.config.gui_prefs_file
old_gui_prefs_file = os.environ.get(
'GRC_PREFS_PATH', os.path.expanduser('~/.grc'))
if gui_prefs_file == old_gui_prefs_file:
return # prefs file overridden with env var
if os.path.exists(old_gui_prefs_file) and not os.path.exists(gui_prefs_file):
try:
import shutil
shutil.move(old_gui_prefs_file, gui_prefs_file)
except Exception as e:
print(e, file=sys.stderr)
##############################################
# Factories
##############################################
Config = Config
FlowGraph = canvas.FlowGraph
Connection = canvas.Connection
def new_block_class(self, **data):
cls = CorePlatform.new_block_class(self, **data)
return canvas.Block.make_cls_with_base(cls)
block_classes_build_in = {key: canvas.Block.make_cls_with_base(cls)
for key, cls in CorePlatform.block_classes_build_in.items()}
block_classes = ChainMap({}, block_classes_build_in)
port_classes = {key: canvas.Port.make_cls_with_base(cls)
for key, cls in CorePlatform.port_classes.items()}
param_classes = {key: canvas.Param.make_cls_with_base(cls)
for key, cls in CorePlatform.param_classes.items()}
| TheWylieStCoyote/gnuradio | grc/gui/Platform.py | Python | gpl-3.0 | 2,219 |
#!/usr/bin/python
import sys
windows_line_ending = '\r\n'
linux_line_ending = '\n'
if not len(sys.argv) == 2:
sys.exit(1)
filename = sys.argv[1]
with open(filename, 'rb') as f:
content = f.read()
content = content.replace(windows_line_ending, linux_line_ending)
with open(filename, 'wb') as f:
f.write(content) | hicknhack-software/ansible-buildbot | winworker/create/files/utilities/dos2unix.py | Python | mit | 329 |
"""
.. _ex-spoc-cmc:
====================================
Continuous Target Decoding with SPoC
====================================
Source Power Comodulation (SPoC) :footcite:`DahneEtAl2014` allows to identify
the composition of
orthogonal spatial filters that maximally correlate with a continuous target.
SPoC can be seen as an extension of the CSP for continuous variables.
Here, SPoC is applied to decode the (continuous) fluctuation of an
electromyogram from MEG beta activity using data from
`Cortico-Muscular Coherence example of FieldTrip
<http://www.fieldtriptoolbox.org/tutorial/coherence>`_
"""
# Author: Alexandre Barachant <[email protected]>
# Jean-Remi King <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import Epochs
from mne.decoding import SPoC
from mne.datasets.fieldtrip_cmc import data_path
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import Ridge
from sklearn.model_selection import KFold, cross_val_predict
# Define parameters
fname = data_path() + '/SubjectCMC.ds'
raw = mne.io.read_raw_ctf(fname)
raw.crop(50., 250.) # crop for memory purposes
# Filter muscular activity to only keep high frequencies
emg = raw.copy().pick_channels(['EMGlft']).load_data()
emg.filter(20., None, fir_design='firwin')
# Filter MEG data to focus on beta band
raw.pick_types(meg=True, ref_meg=True, eeg=False, eog=False).load_data()
raw.filter(15., 30., fir_design='firwin')
# Build epochs as sliding windows over the continuous raw file
events = mne.make_fixed_length_events(raw, id=1, duration=.250)
# Epoch length is 1.5 second
meg_epochs = Epochs(raw, events, tmin=0., tmax=1.500, baseline=None,
detrend=1, decim=8)
emg_epochs = Epochs(emg, events, tmin=0., tmax=1.500, baseline=None)
# Prepare classification
X = meg_epochs.get_data()
y = emg_epochs.get_data().var(axis=2)[:, 0] # target is EMG power
# Classification pipeline with SPoC spatial filtering and Ridge Regression
spoc = SPoC(n_components=2, log=True, reg='oas', rank='full')
clf = make_pipeline(spoc, Ridge())
# Define a two fold cross-validation
cv = KFold(n_splits=2, shuffle=False)
# Run cross validaton
y_preds = cross_val_predict(clf, X, y, cv=cv)
# Plot the True EMG power and the EMG power predicted from MEG data
fig, ax = plt.subplots(1, 1, figsize=[10, 4])
times = raw.times[meg_epochs.events[:, 0] - raw.first_samp]
ax.plot(times, y_preds, color='b', label='Predicted EMG')
ax.plot(times, y, color='r', label='True EMG')
ax.set_xlabel('Time (s)')
ax.set_ylabel('EMG Power')
ax.set_title('SPoC MEG Predictions')
plt.legend()
mne.viz.tight_layout()
plt.show()
##############################################################################
# Plot the contributions to the detected components (i.e., the forward model)
spoc.fit(X, y)
spoc.plot_patterns(meg_epochs.info)
##############################################################################
# References
# ----------
# .. footbibliography::
| rkmaddox/mne-python | examples/decoding/decoding_spoc_CMC.py | Python | bsd-3-clause | 3,025 |
import re
from datetime import datetime # pylint: disable=E0611
from django.contrib.auth.models import User
from django.test import RequestFactory
from django.urls import reverse
from pytz import utc
from oioioi.base.tests import TestCase, fake_time
from oioioi.contests.models import Contest, Submission
from oioioi.disqualification.models import Disqualification
def _disqualify_contestwide():
disqualification = Disqualification(
user=User.objects.get(username="test_user"),
contest=Contest.objects.get(),
title="I cannot tell!",
content="Suffice to say, is one of the words the Knights of Ni cannot hear!",
)
disqualification.save()
class TestContestController(TestCase):
fixtures = [
"test_contest",
"test_users",
"test_submission",
"test_full_package",
"test_problem_instance",
"test_another_submission",
"test_submission_disqualification",
]
def _get_fake_request(self, user, contest):
def wrapped():
fake_request = RequestFactory().request()
fake_request.user = user
fake_request.contest = contest
fake_request.timestamp = datetime(2013, 1, 1, tzinfo=utc)
return fake_request
return wrapped
def test_disqualified(self):
user = User.objects.get(username="test_user")
contest = Contest.objects.get()
controller = contest.controller
submission = Submission.objects.get(id=1)
submission_ok = Submission.objects.get(id=2)
fake_request = self._get_fake_request(user, contest)
self.assertTrue(controller.is_submission_disqualified(submission))
self.assertFalse(controller.is_submission_disqualified(submission_ok))
self.assertTrue(controller.has_disqualification_history(submission))
self.assertFalse(controller.has_disqualification_history(submission_ok))
self.assertTrue(
controller.is_any_submission_to_problem_disqualified(
user, submission.problem_instance
)
)
self.assertTrue(controller.is_user_disqualified(fake_request(), user))
self.assertTrue(
controller.user_has_disqualification_history(fake_request(), user)
)
self.assertTrue(controller.results_visible(fake_request(), submission))
# submission_ok is a submission to the same problem
self.assertTrue(controller.results_visible(fake_request(), submission_ok))
self.assertNotIn(
user, controller.exclude_disqualified_users(User.objects.all())
)
other_contest = Contest(
name="finding_another_shrubbery",
controller_name=contest.controller_name,
creation_date=contest.creation_date,
)
other_contest.save()
other_fake_request = self._get_fake_request(user, other_contest)
self.assertFalse(
other_contest.controller.is_user_disqualified(other_fake_request(), user)
)
self.assertFalse(
other_contest.controller.user_has_disqualification_history(
other_fake_request(), user
)
)
def test_not_disqualified(self):
user = User.objects.get(username="test_user2")
contest = Contest.objects.get()
controller = contest.controller
submission = Submission.objects.get(id=2)
submission.user = user
submission.save()
fake_request = self._get_fake_request(user, contest)
self.assertFalse(controller.is_submission_disqualified(submission))
self.assertFalse(controller.has_disqualification_history(submission))
self.assertFalse(
controller.is_any_submission_to_problem_disqualified(
user, submission.problem_instance
)
)
self.assertFalse(controller.is_user_disqualified(fake_request(), user))
self.assertTrue(controller.results_visible(fake_request(), submission))
self.assertIn(user, controller.exclude_disqualified_users(User.objects.all()))
def test_disqualified_contestwide(self):
Disqualification.objects.all().delete()
_disqualify_contestwide()
user = User.objects.get(username="test_user")
contest = Contest.objects.get()
controller = contest.controller
submission = Submission.objects.get(id=1)
fake_request = self._get_fake_request(user, contest)
self.assertFalse(controller.is_submission_disqualified(submission))
self.assertFalse(controller.has_disqualification_history(submission))
self.assertFalse(
controller.is_any_submission_to_problem_disqualified(
user, submission.problem_instance
)
)
self.assertTrue(controller.is_user_disqualified(fake_request(), user))
self.assertTrue(
controller.user_has_disqualification_history(fake_request(), user)
)
self.assertTrue(controller.results_visible(fake_request(), submission))
self.assertNotIn(
user, controller.exclude_disqualified_users(User.objects.all())
)
class TestViewsMixin(object):
@staticmethod
def remove_whitespaces(response):
return re.sub(r'\s*', '', response.content.decode('utf-8'))
def _assert_disqualification_box(self, response_callback):
raise NotImplementedError
def _assert_submission(self, submission_id, disqualified):
self.assertTrue(self.client.login(username="test_user"))
submission = Submission.objects.get(id=submission_id)
with fake_time(datetime(2015, 1, 1, tzinfo=utc)):
response = self.client.get(
reverse(
"submission",
kwargs={
"submission_id": submission.id,
"contest_id": Contest.objects.get().id,
},
)
)
disqualification_strings = ["Disqualification", "Ni in code", "ninininini"]
for s in disqualification_strings:
if disqualified:
self.assertContains(response, s)
else:
self.assertNotContains(response, s)
self.assertIn(
">" + str(submission.score) + "<", self.remove_whitespaces(response)
)
self.assertContains(response, "Submission " + str(submission.id))
def test_dashboard(self):
self.assertTrue(self.client.login(username="test_user"))
response_cb = lambda: self.client.get(
reverse("contest_dashboard", kwargs=self.contest_kwargs), follow=True
)
self._assert_disqualification_box(response_cb)
def test_my_submissions(self):
self.assertTrue(self.client.login(username="test_user"))
response_cb = lambda: self.client.get(
reverse("my_submissions", kwargs=self.contest_kwargs)
)
self._assert_disqualification_box(response_cb)
def test_user_info_page(self):
self.assertTrue(self.client.login(username='test_admin'))
user = User.objects.get(username="test_user")
contest = Contest.objects.get()
response_callback = lambda: self.client.get(
reverse('user_info', kwargs={'contest_id': contest.id, 'user_id': user.id})
)
self._assert_disqualification_box(response_callback)
class TestViewsProgramSubmissions(TestCase, TestViewsMixin):
fixtures = [
"test_contest",
"test_users",
"test_full_package",
"test_problem_instance",
"test_submission",
"test_another_submission",
"test_submission_disqualification",
]
def setUp(self):
self.contest_kwargs = {"contest_id": Contest.objects.get().id}
def _assert_disqualification_box(self, response_callback):
with fake_time(datetime(2015, 1, 1, tzinfo=utc)):
response = response_callback()
self.assertContains(response, "Ni in code")
self.assertContains(response, "ninininini")
self.assertContains(response, "Score")
self.assertIn(">34<", self.remove_whitespaces(response))
self.assertIn(">42<", self.remove_whitespaces(response))
_disqualify_contestwide()
with fake_time(datetime(2015, 1, 1, tzinfo=utc)):
response = response_callback()
self.assertContains(response, "Ni in code")
self.assertContains(response, "I cannot tell")
self.assertContains(response, "Knights of Ni")
self.assertIn(">34<", self.remove_whitespaces(response))
self.assertIn(">42<", self.remove_whitespaces(response))
Disqualification.objects.filter(submission__isnull=False).delete()
with fake_time(datetime(2015, 1, 1, tzinfo=utc)):
response = response_callback()
self.assertNotContains(response, "Ni in code")
self.assertContains(response, "I cannot tell")
self.assertContains(response, "Knights of Ni")
self.assertIn(">34<", self.remove_whitespaces(response))
def test_ranking(self):
contest = Contest.objects.get()
url = reverse("default_ranking", kwargs={"contest_id": contest.id})
self.assertTrue(self.client.login(username="test_admin"))
with fake_time(datetime(2015, 1, 1, tzinfo=utc)):
response = self.client.get(url)
self.assertContains(response, "Test User")
self.assertContains(response, "disqualified")
self.assertTrue(self.client.login(username="test_user"))
with fake_time(datetime(2015, 1, 1, tzinfo=utc)):
response = self.client.get(url)
self.assertNotContains(response, "Test User")
def test_ranking_csv(self):
contest = Contest.objects.get()
url = reverse("ranking_csv", kwargs={"contest_id": contest.id, "key": "c"})
self.assertTrue(self.client.login(username="test_admin"))
with fake_time(datetime(2015, 1, 1, tzinfo=utc)):
response = self.client.get(url)
self.assertContains(response, "Test")
self.assertContains(response, "Disqualified")
self.assertContains(response, "Yes")
self.assertContains(response, str(Submission.objects.get(id=1).score))
def test_submission(self):
self._assert_submission(1, True)
self._assert_submission(2, False)
class TestViewsQuizSubmission(TestCase, TestViewsMixin):
fixtures = [
"test_contest",
"test_users",
"test_quiz_problem",
"test_problem_instance",
"test_quiz_submission",
"test_submission_disqualification",
]
def setUp(self):
self.contest_kwargs = {"contest_id": Contest.objects.get().id}
def _assert_disqualification_box(self, response_callback):
with fake_time(datetime(2015, 1, 1, tzinfo=utc)):
response = response_callback()
self.assertContains(response, "Ni in code")
self.assertContains(response, "ninininini")
self.assertContains(response, "Score")
self.assertIn(">50<", self.remove_whitespaces(response))
def test_submission(self):
self._assert_submission(1, True)
| sio2project/oioioi | oioioi/disqualification/tests.py | Python | gpl-3.0 | 11,387 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.