code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1
value | license stringclasses 15
values | size int64 3 1.05M |
|---|---|---|---|---|---|
from __future__ import unicode_literals
from django.db import models
from django.utils import six
from account.conf import settings
HAS_SOUTH = True
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
HAS_SOUTH = False
class TimeZoneField(six.with_metaclass(models.SubfieldBase, models.CharField)):
def __init__(self, *args, **kwargs):
defaults = {
"max_length": 100,
"default": "",
"choices": settings.ACCOUNT_TIMEZONES,
"blank": True,
}
defaults.update(kwargs)
return super(TimeZoneField, self).__init__(*args, **defaults)
if HAS_SOUTH:
add_introspection_rules([], ["^account\.fields\.TimeZoneField"])
| Amechi101/indieapp | account/fields.py | Python | mit | 739 |
#!/usr/bin/env python3
import timeit
import multisecret.MultiSecretRoyAdhikari as RA
import multisecret.MultiSecretLinYeh as LY
import multisecret.MultiSecretHerranzRuizSaez as HRS
TEST_HRS = 1
if __name__ == "__main__":
""" Measure time performance of multi-secret sharing algorithms """
prime = 2 ** 256 - 2 ** 224 + 2 ** 192 + 2 ** 96 - 1
# prime = 15487469
# multi secret sharing parameters
secrets = [7, 9, 41, 15002900, 313, 501]
n_participants = 8
# Hardcoded test configurations
if n_participants == 8:
access_structures = [[[1,2,3,4,5,6,7,8]],
[[1,2,3,4,5,6,7,8]],
[[1,2,3,4,5,6,7,8]],
[[1, 2, 3, 4, 5, 6, 7, 8]],
[[1, 2, 3, 4, 5, 6, 7, 8]],
[[1, 2, 3, 4, 5, 6, 7, 8]]
]
elif n_participants == 3:
access_structures = [[[1,2,3]], [[1,2,3]], [[1,2,3]], [[1,2,3]], [[1,2,3]], [[1,2,3]]]
else:
raise
#
# Setup
#
#
# Performance test
#
start = timeit.default_timer()
#---#
dealer = HRS.Dealer(prime, n_participants, secrets, access_structures)
pseudo_shares = dealer.split_secret_keys()
#---#
end = timeit.default_timer()
if not TEST_HRS:
secret_num = 0
group_num = 0
combined = dealer.combine_secret(secret_num, group_num,
pseudo_shares[secret_num][group_num])
else:
shares_for_secret_0 = []
for user in range(1, n_participants+1):
shares_for_secret_0.append( dealer.get_user_key_share(user)[0] )
secret0 = dealer.combine_secret_key(0, shares_for_secret_0)
print('Time for {} users and {} secrets with prime {}: {} seconds:'.format(
n_participants, len(secrets), prime, end - start))
# Combine first secret for its first access group
#assert combined == secrets[secret_num]
#print('Combined secret: ', combined)
| Qbicz/multi-secret-sharing | python/time-performance.py | Python | mit | 2,062 |
import Adafruit_BBIO.PWM as PWM
import time
pin = "P8_13"
#PWM.start(channel, duty, freq=2000, polarity=0)
#duty values are valid 0 (off) to 100 (on)
PWM.start(pin, 50)
#PWM.set_duty_cycle(pin, 25.5)
#PWM.set_frequency(pin, 10)
for i in range(0,100):
print i
PWM.set_duty_cycle(pin,i)
time.sleep(0.1)
time.sleep(5)
PWM.stop(pin)
PWM.cleanup()
#set polarity to 1 on start:
#PWM.start(pin, 50, 2000, 1)
| reiser4/amc | test/test-pwm.py | Python | gpl-2.0 | 410 |
#!/bin/python
import sys
def getSumOfAP(n, max):
size = (max - 1) // n
return (size * (n + size * (n)) / 2)
def getSumOfMultiples(n):
return (getSumOfAP(3, n) + getSumOfAP(5, n) - getSumOfAP(15, n))
def main():
numInputs = int(raw_input().strip())
for idx in xrange(numInputs):
n = int(raw_input().strip())
ans = getSumOfAP(n)
print(ans)
if __name__ == '__main__':
main()
| pavithranrao/projectEuler | projectEulerPython/problem001.py | Python | mit | 429 |
"""Config flow for the SolarEdge platform."""
from requests.exceptions import ConnectTimeout, HTTPError
import solaredge
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_API_KEY, CONF_NAME
from homeassistant.core import HomeAssistant, callback
from homeassistant.util import slugify
from .const import CONF_SITE_ID, DEFAULT_NAME, DOMAIN
@callback
def solaredge_entries(hass: HomeAssistant):
"""Return the site_ids for the domain."""
return set(
(entry.data[CONF_SITE_ID])
for entry in hass.config_entries.async_entries(DOMAIN)
)
class SolarEdgeConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self) -> None:
"""Initialize the config flow."""
self._errors = {}
def _site_in_configuration_exists(self, site_id) -> bool:
"""Return True if site_id exists in configuration."""
if site_id in solaredge_entries(self.hass):
return True
return False
def _check_site(self, site_id, api_key) -> bool:
"""Check if we can connect to the soleredge api service."""
api = solaredge.Solaredge(api_key)
try:
response = api.get_details(site_id)
except (ConnectTimeout, HTTPError):
self._errors[CONF_SITE_ID] = "could_not_connect"
return False
try:
if response["details"]["status"].lower() != "active":
self._errors[CONF_SITE_ID] = "site_not_active"
return False
except KeyError:
self._errors[CONF_SITE_ID] = "api_failure"
return False
return True
async def async_step_user(self, user_input=None):
"""Step when user intializes a integration."""
self._errors = {}
if user_input is not None:
name = slugify(user_input.get(CONF_NAME, DEFAULT_NAME))
if self._site_in_configuration_exists(user_input[CONF_SITE_ID]):
self._errors[CONF_SITE_ID] = "site_exists"
else:
site = user_input[CONF_SITE_ID]
api = user_input[CONF_API_KEY]
can_connect = await self.hass.async_add_executor_job(
self._check_site, site, api
)
if can_connect:
return self.async_create_entry(
title=name, data={CONF_SITE_ID: site, CONF_API_KEY: api}
)
else:
user_input = {}
user_input[CONF_NAME] = DEFAULT_NAME
user_input[CONF_SITE_ID] = ""
user_input[CONF_API_KEY] = ""
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(
CONF_NAME, default=user_input.get(CONF_NAME, DEFAULT_NAME)
): str,
vol.Required(CONF_SITE_ID, default=user_input[CONF_SITE_ID]): str,
vol.Required(CONF_API_KEY, default=user_input[CONF_API_KEY]): str,
}
),
errors=self._errors,
)
async def async_step_import(self, user_input=None):
"""Import a config entry."""
if self._site_in_configuration_exists(user_input[CONF_SITE_ID]):
return self.async_abort(reason="site_exists")
return await self.async_step_user(user_input)
| leppa/home-assistant | homeassistant/components/solaredge/config_flow.py | Python | apache-2.0 | 3,551 |
from unittest import TestCase
import json.encoder
CASES = [
(u'/\\"\ucafe\ubabe\uab98\ufcde\ubcda\uef4a\x08\x0c\n\r\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>',
'"/\\\\\\"\\ucafe\\ubabe\\uab98\\ufcde\\ubcda\\uef4a\\b\\f\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>"'),
(u'\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
(u'controls', '"controls"'),
(u'\x08\x0c\n\r\t', '"\\x08\\x0c\\n\\r\\t"'),
(u'{"object with 1 member":["array with 1 elements"]}',
'"{\\"object with 1 member\\":[\\"array with 1 elements\\"]}"'),
(u' s p a c e d ', '" s p a c e d "'),
(u'\U001d120', '"\\ud834\\udd20"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
('\xce\xb1\xce\xa9', '"\\u03b1\\u03a9"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
('\xce\xb1\xce\xa9', '"\\u03b1\\u03a9"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
(u"`1~!@#$%^&*()_+-={':[,]}|;,</>?", '"`1~!@#$%^&*()_+-={\':[,]}|;,</>?"'),
(u'\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'),
(u'\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
]
class TestEncodeBaseStringAscii(TestCase):
def test_py_encode_basestring_ascii(self):
self._test_encode_basestring_ascii(json.encoder.py_encode_basestring_ascii)
def test_c_encode_basestring_ascii(self):
if not json.encoder.c_encode_basestring_ascii:
return
self._test_encode_basestring_ascii(json.encoder.c_encode_basestring_ascii)
def _test_encode_basestring_ascii(self, encode_basestring_ascii):
fname = encode_basestring_ascii.__name__
for input_string, expect in CASES:
result = encode_basestring_ascii(input_string)
self.assertEquals(result, expect,
'{0!r} != {1!r} for {2}({3!r})'.format(
result, expect, fname, input_string))
| billygoo/dev-365 | python/just_coding/Lib/json/tests/test_encode_basestring_ascii.py | Python | gpl-2.0 | 1,934 |
# $HeadURL: $
''' LogPolicyResultAction
'''
from DIRAC import S_OK, S_ERROR
from DIRAC.ResourceStatusSystem.PolicySystem.Actions.BaseAction import BaseAction
from DIRAC.ResourceStatusSystem.Utilities import Utils
ResourceManagementClient = getattr(Utils.voimport( 'DIRAC.ResourceStatusSystem.Client.ResourceManagementClient' ),'ResourceManagementClient')
__RCSID__ = '$Id: $'
class LogPolicyResultAction( BaseAction ):
'''
Action that registers on the database a new entry per policy result in the
list singlePolicyResults.
'''
def __init__( self, name, decisionParams, enforcementResult, singlePolicyResults,
clients = None ):
super( LogPolicyResultAction, self ).__init__( name, decisionParams, enforcementResult,
singlePolicyResults, clients )
if clients is not None and 'ResourceManagementClient' in clients:
self.rmClient = clients[ 'ResourceManagementClient' ]
else:
self.rmClient = ResourceManagementClient()
def run( self ):
'''
Checks it has the parameters it needs and tries to addOrModify in the
database.
'''
element = self.decisionParams[ 'element' ]
if element is None:
return S_ERROR( 'element should not be None' )
name = self.decisionParams[ 'name' ]
if name is None:
return S_ERROR( 'name should not be None' )
statusType = self.decisionParams[ 'statusType' ]
if statusType is None:
return S_ERROR( 'statusType should not be None' )
for singlePolicyResult in self.singlePolicyResults:
status = singlePolicyResult[ 'Status' ]
if status is None:
return S_ERROR( 'status should not be None' )
reason = singlePolicyResult[ 'Reason' ]
if reason is None:
return S_ERROR( 'reason should not be None' )
policyName = singlePolicyResult[ 'Policy' ][ 'name' ]
if policyName is None:
return S_ERROR( 'policyName should not be None' )
#Truncate reason to fit in database column
reason = ( reason[ :508 ] + '..') if len( reason ) > 508 else reason
polUpdateRes = self.rmClient.addOrModifyPolicyResult( element = element,
name = name,
policyName = policyName,
statusType = statusType,
status = status,
reason = reason )
if not polUpdateRes[ 'OK' ]:
return polUpdateRes
return S_OK()
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| arrabito/DIRAC | ResourceStatusSystem/PolicySystem/Actions/LogPolicyResultAction.py | Python | gpl-3.0 | 2,914 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-06 14:17
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import wagtail.wagtailcore.fields
import wagtail.wagtailembeds.blocks
import wagtail.wagtailimages.blocks
import wiki.blocks
import wiki.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailcore', '0033_remove_golive_expiry_help_text'),
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BasicWikiPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('repeat_in_subnav', models.BooleanField(default=False, help_text="If checked, a link to this page will be repeated alongside it's direct children when displaying a sub-navigation for this page.", verbose_name='repeat in sub-navigation')),
('repeated_item_text', models.CharField(blank=True, help_text="e.g. 'Section home' or 'Overview'. If left blank, the page title will be used.", max_length=255, verbose_name='repeated item link text')),
('blurb', models.TextField(help_text=b'Text visible on section pages.', max_length=300)),
('content', wiki.fields.MarkdownField(help_text=b"\n Standard MarkDown should all work, but also see:\n 'python markdown' as that is the base library used for rendering,\n and see 'pymdown-extensions' for some other included extensions\n to 'python markdown'.\n ")),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BasicWikiPageRelatedLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name=b'External link')),
('title', models.CharField(help_text=b'Link title', max_length=255)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='MarkdownPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('repeat_in_subnav', models.BooleanField(default=False, help_text="If checked, a link to this page will be repeated alongside it's direct children when displaying a sub-navigation for this page.", verbose_name='repeat in sub-navigation')),
('repeated_item_text', models.CharField(blank=True, help_text="e.g. 'Section home' or 'Overview'. If left blank, the page title will be used.", max_length=255, verbose_name='repeated item link text')),
('content', wagtail.wagtailcore.fields.StreamField([(b'markdown', wiki.blocks.MarkdownBlock(icon=b'code')), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'video', wagtail.wagtailembeds.blocks.EmbedBlock())])),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='MarkdownPageRelatedLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name=b'External link')),
('title', models.CharField(help_text=b'Link title', max_length=255)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='RedirectWithBlurbPage',
fields=[
('redirectpage_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.RedirectPage')),
('blurb', models.TextField(help_text=b'Text visible on section pages.', max_length=300)),
],
options={
'abstract': False,
},
bases=('core.redirectpage',),
),
migrations.CreateModel(
name='WikiPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('repeat_in_subnav', models.BooleanField(default=False, help_text="If checked, a link to this page will be repeated alongside it's direct children when displaying a sub-navigation for this page.", verbose_name='repeat in sub-navigation')),
('repeated_item_text', models.CharField(blank=True, help_text="e.g. 'Section home' or 'Overview'. If left blank, the page title will be used.", max_length=255, verbose_name='repeated item link text')),
('blurb', models.TextField(help_text=b'Text visible on section pages.', max_length=300)),
('content', wagtail.wagtailcore.fields.StreamField([(b'markdown', wiki.blocks.MarkdownBlock(icon=b'code')), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'video', wagtail.wagtailembeds.blocks.EmbedBlock())])),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='WikiPageRelatedLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name=b'External link')),
('title', models.CharField(help_text=b'Link title', max_length=255)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='WikiSectionPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('repeat_in_subnav', models.BooleanField(default=False, help_text="If checked, a link to this page will be repeated alongside it's direct children when displaying a sub-navigation for this page.", verbose_name='repeat in sub-navigation')),
('repeated_item_text', models.CharField(blank=True, help_text="e.g. 'Section home' or 'Overview'. If left blank, the page title will be used.", max_length=255, verbose_name='repeated item link text')),
('blurb', models.TextField(help_text=b'Text visible on section pages.', max_length=300)),
('content', wiki.fields.MarkdownField(help_text=b"\n Standard MarkDown should all work, but also see:\n 'python markdown' as that is the base library used for rendering,\n and see 'pymdown-extensions' for some other included extensions\n to 'python markdown'.\n ")),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.AddField(
model_name='wikipagerelatedlink',
name='link_page',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.Page'),
),
migrations.AddField(
model_name='wikipagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_links', to='wiki.WikiPage'),
),
migrations.AddField(
model_name='markdownpagerelatedlink',
name='link_page',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.Page'),
),
migrations.AddField(
model_name='markdownpagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_links', to='wiki.MarkdownPage'),
),
migrations.AddField(
model_name='basicwikipagerelatedlink',
name='link_page',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.Page'),
),
migrations.AddField(
model_name='basicwikipagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_links', to='wiki.BasicWikiPage'),
),
]
| Uncaught-Exceptions/minedash | wiki/migrations/0001_initial.py | Python | gpl-2.0 | 9,625 |
###############################################################################
# Name: misc/gdb/print.py
# Purpose: pretty-printers for wx data structures: this file is meant to
# be sourced from gdb using "source -p" (or, better, autoloaded
# in the future...)
# Author: Vadim Zeitlin
# Created: 2009-01-04
# Copyright: (c) 2009 Vadim Zeitlin
# Licence: wxWindows licence
###############################################################################
# Define wxFooPrinter class implementing (at least) to_string() method for each
# wxFoo class we want to pretty print. Then just add wxFoo to the types array
# in wxLookupFunction at the bottom of this file.
import datetime
# shamelessly stolen from std::string example
class wxStringPrinter:
def __init__(self, val):
self.val = val
def to_string(self):
return self.val['m_impl']['_M_dataplus']['_M_p']
def display_hint(self):
return 'string'
class wxDateTimePrinter:
def __init__(self, val):
self.val = val
def to_string(self):
# A value of type wxLongLong can't be used in Python arithmetic
# expressions directly so we need to convert it to long long first and
# then cast to int explicitly to be able to use it as a timestamp.
msec = self.val['m_time'].cast(gdb.lookup_type('long long'))
if msec == 0x8000000000000000:
return 'NONE'
sec = int(msec / 1000)
return datetime.datetime.fromtimestamp(sec).isoformat(' ')
class wxFileNamePrinter:
def __init__(self, val):
self.val = val
def to_string(self):
# It is simpler to just call the internal function here than to iterate
# over m_dirs array ourselves. The disadvantage of this approach is
# that it requires a live inferior process and so doesn't work when
# debugging using only a core file. If this ever becomes a serious
# problem, this should be rewritten to use m_dirs and m_name and m_ext.
return gdb.parse_and_eval('((wxFileName*)%s)->GetFullPath(0)' %
self.val.address)
class wxXYPrinterBase:
def __init__(self, val):
self.x = val['x']
self.y = val['y']
class wxPointPrinter(wxXYPrinterBase):
def to_string(self):
return '(%d, %d)' % (self.x, self.y)
class wxSizePrinter(wxXYPrinterBase):
def to_string(self):
return '%d*%d' % (self.x, self.y)
class wxRectPrinter(wxXYPrinterBase):
def __init__(self, val):
wxXYPrinterBase.__init__(self, val)
self.width = val['width']
self.height = val['height']
def to_string(self):
return '(%d, %d) %d*%d' % (self.x, self.y, self.width, self.height)
# The function looking up the pretty-printer to use for the given value.
def wxLookupFunction(val):
# Using a list is probably ok for so few items but consider switching to a
# set (or a dict and cache class types as the keys in it?) if needed later.
types = ['wxString',
'wxDateTime',
'wxFileName',
'wxPoint',
'wxSize',
'wxRect']
for t in types:
if val.type.tag == t:
# Not sure if this is the best name to create the object of a class
# by name but at least it beats eval()
return globals()[t + 'Printer'](val)
return None
gdb.pretty_printers.append(wxLookupFunction)
| adouble42/nemesis-current | wxWidgets-3.1.0/misc/gdb/print.py | Python | bsd-2-clause | 3,484 |
import cv2
import numpy as np
import pascal
from keras import backend as K
nb_train_samples = 3000 # 3000 training samples
nb_valid_samples = 100 # 100 validation samples
num_classes = 20
def load_pascal_data(version="VOC2007"):
# Load cifar10 training and validation sets
(X_train, Y_train), (X_valid, Y_valid) = pascal.load_data(version)
# For Theano
# # Switch RGB to BGR order
# X_train = X_train[:, ::-1, :, :]
# X_valid = X_valid[:, ::-1, :, :]
#
# # Subtract ImageNet mean pixel
# X_train[:, 0, :, :] -= 103.939
# X_train[:, 1, :, :] -= 116.779
# X_train[:, 2, :, :] -= 123.68
#
#
#
# X_valid[:, 0, :, :] -= 103.939
# X_valid[:, 1, :, :] -= 116.779
# X_valid[:, 2, :, :] -= 123.68
# # Resize trainging images, th means theano / else is the tensorflow
# if K.image_dim_ordering() == 'th':
# X_train = np.array([cv2.resize(img.transpose(1,2,0), (img_rows,img_cols)).transpose(2,0,1) for img in X_train[:nb_train_samples,:,:,:]])
# X_valid = np.array([cv2.resize(img.transpose(1,2,0), (img_rows,img_cols)).transpose(2,0,1) for img in X_valid[:nb_valid_samples,:,:,:]])
# else:
# X_train = np.array([cv2.resize(img, (img_rows,img_cols)) for img in X_train[:nb_train_samples,:,:,:]])
# X_valid = np.array([cv2.resize(img, (img_rows,img_cols)) for img in X_valid[:nb_valid_samples,:,:,:]])
#
# # Transform targets to keras compatible format
# Y_train = to_categoricals(Y_train[:nb_train_samples], num_classes)
# Y_valid = to_categoricals(Y_valid[:nb_valid_samples], num_classes)
return X_train, Y_train, X_valid, Y_valid
| whoisever/vgg16_finetune_mutli_label | load_pascal.py | Python | mit | 1,652 |
"""
WSGI config for quizshowdown project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "quizshowdown.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| thoreg/quiz | quizshowdown/quizshowdown/wsgi.py | Python | mit | 399 |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import wraps
from flask import Response
client_auth = None
def init_app(app):
pass
def requires_authentication(function):
@wraps(function)
def decorated(*args, **kwargs):
return Response("Forbidden", 403)
return decorated
| KL-WLCR/incubator-airflow | airflow/api/auth/backend/deny_all.py | Python | apache-2.0 | 834 |
# coding=utf-8
"""
The Click Detail Report Members API endpoint
Documentation: http://developer.mailchimp.com/documentation/mailchimp/reference/reports/click-details/members/
Schema: https://api.mailchimp.com/schema/3.0/Reports/ClickDetails/Members/Instance.json
"""
from __future__ import unicode_literals
from mailchimp3.baseapi import BaseApi
from mailchimp3.helpers import check_subscriber_hash
class ReportClickDetailMembers(BaseApi):
"""
Get information about specific subscribers who clicked on links in a
campaign.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(ReportClickDetailMembers, self).__init__(*args, **kwargs)
self.endpoint = 'reports'
self.campaign_id = None
self.link_id = None
self.subscriber_hash = None
def all(self, campaign_id, link_id, get_all=False, **queryparams):
"""
Get information about list members who clicked on a specific link in a
campaign.
:param campaign_id: The unique id for the campaign.
:type campaign_id: :py:class:`str`
:param link_id: The id for the link.
:type link_id: :py:class:`str`
:param get_all: Should the query get all results
:type get_all: :py:class:`bool`
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
queryparams['count'] = integer
queryparams['offset'] = integer
"""
self.campaign_id = campaign_id
self.link_id = link_id
self.subscriber_hash = None
if get_all:
return self._iterate(url=self._build_path(campaign_id, 'click-details', link_id, 'members'), **queryparams)
else:
return self._mc_client._get(
url=self._build_path(campaign_id, 'click-details', link_id, 'members'),
**queryparams
)
def get(self, campaign_id, link_id, subscriber_hash, **queryparams):
"""
Get information about a specific subscriber who clicked a link in a
specific campaign.
:param campaign_id: The unique id for the campaign.
:type campaign_id: :py:class:`str`
:param link_id: The id for the link.
:type link_id: :py:class:`str`
:param subscriber_hash: The MD5 hash of the lowercase version of the
list member’s email address.
:type subscriber_hash: :py:class:`str`
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
"""
subscriber_hash = check_subscriber_hash(subscriber_hash)
self.campaign_id = campaign_id
self.link_id = link_id
self.subscriber_hash = subscriber_hash
return self._mc_client._get(
url=self._build_path(campaign_id, 'click-details', link_id, 'members', subscriber_hash),
**queryparams
)
| charlesthk/python-mailchimp | mailchimp3/entities/reportclickdetailmembers.py | Python | mit | 3,016 |
import json
import pathlib
import sys
import boto3
dist_folder = pathlib.Path.cwd() / 'dist'
try:
f = next(dist_folder.glob('*.whl'))
except StopIteration:
print("No .whl files found in ./dist!")
sys.exit()
print("Uploading", f.name)
s3 = boto3.client('s3')
s3.upload_file(str(f), 'releases.wagtail.io', 'nightly/dist/' + f.name, ExtraArgs={'ACL': 'public-read'})
print("Updating latest.json")
boto3.resource('s3').Object('releases.wagtail.io', 'nightly/latest.json').put(
ACL='public-read',
Body=json.dumps({
"url": 'https://releases.wagtail.io/nightly/dist/' + f.name,
})
)
| kaedroho/wagtail | scripts/nightly/upload.py | Python | bsd-3-clause | 615 |
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides generic XML views
This modules defines several basic views for serializing
data to XML. Submodels that have already been serialized
as XML may have their string values marked with `__is_xml__
= True` using :class:`openstack.common.report.utils.StringWithAttrs`
(each of the classes within this module does this automatically,
and non-naive serializers check for this attribute and handle
such strings specially)
"""
import collections as col
import copy
import xml.etree.ElementTree as ET
import six
import nova.openstack.common.report.utils as utils
class KeyValueView(object):
"""A Key-Value XML View
This view performs advanced serialization of a data model
into XML. It first deserializes any values marked as XML so
that they can be properly reserialized later. It then follows
the following rules to perform serialization:
key : text/xml
The tag name is the key name, and the contents are the text or xml
key : Sequence
A wrapper tag is created with the key name, and each item is placed
in an 'item' tag
key : Mapping
A wrapper tag is created with the key name, and the serialize is called
on each key-value pair (such that each key gets its own tag)
:param str wrapper_name: the name of the top-level element
"""
def __init__(self, wrapper_name="model"):
self.wrapper_name = wrapper_name
def __call__(self, model):
# this part deals with subviews that were already serialized
cpy = copy.deepcopy(model)
for key, valstr in model.items():
if getattr(valstr, '__is_xml__', False):
cpy[key] = ET.fromstring(valstr)
def serialize(rootmodel, rootkeyname):
res = ET.Element(rootkeyname)
if isinstance(rootmodel, col.Mapping):
for key in rootmodel:
res.append(serialize(rootmodel[key], key))
elif (isinstance(rootmodel, col.Sequence)
and not isinstance(rootmodel, six.string_types)):
for val in rootmodel:
res.append(serialize(val, 'item'))
elif ET.iselement(rootmodel):
res.append(rootmodel)
else:
res.text = str(rootmodel)
return res
res = utils.StringWithAttrs(ET.tostring(serialize(cpy,
self.wrapper_name)))
res.__is_xml__ = True
return res
| ChinaMassClouds/copenstack-server | openstack/src/nova-2014.2/nova/openstack/common/report/views/xml/generic.py | Python | gpl-2.0 | 3,115 |
# Copyright 2014 Novo Nordisk Foundation Center for Biosustainability, DTU.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides reference models for a number of supported host organisms (currently Escherichia coli and
Sacharomyces cerevisiae)."""
from __future__ import absolute_import, print_function
import os
from functools import partial
from lazy_object_proxy import Proxy
import cameo
from cameo import load_model
from cameo import util
__all__ = ['hosts']
MODEL_DIRECTORY = os.path.join(os.path.join(cameo.__path__[0]), 'models/json')
class Host(object):
def __init__(self, name='', models=None, biomass=None, carbon_sources=None):
models = models or []
biomass = biomass or []
carbon_sources = carbon_sources or []
self.name = name
self.models = util.IntelliContainer()
for id, biomass, carbon_source in zip(models, biomass, carbon_sources):
def lazy_model_init(path):
model = load_model(path)
setattr(model, "biomass", biomass)
setattr(model, "carbon_source", carbon_source)
return model
model = Proxy(partial(lazy_model_init, os.path.join(MODEL_DIRECTORY, id + '.json')))
self.models[id] = model
def __str__(self):
return self.name
class Hosts(object):
def __init__(self, host_spec, aliases=None):
self._host_spec = host_spec
self._hosts = list()
for host_id, information in self._host_spec.items():
host = Host(**information)
self._hosts.append(host)
setattr(self, host_id, host)
if aliases and isinstance(aliases, list):
for pair in aliases:
setattr(self, pair[1], getattr(self, pair[0]))
def __iter__(self):
return iter(self._hosts)
def __dir__(self):
return list(self._host_spec.keys())
HOST_SPECS = {
# 'iAF1260', 'iJO1366', 'EcoliCore'
'ecoli': {
'name': 'Escherichia coli',
'models': ('iJO1366',),
'biomass': ('BIOMASS_Ec_iJO1366_core_53p95M',),
'carbon_sources': ('EX_glc__D_e',)
},
# 'iND750',
'scerevisiae': {
'name': 'Saccharomyces cerevisiae',
'models': ('iMM904',),
'biomass': ('BIOMASS_SC5_notrace',),
'carbon_sources': ('EX_glc__D_e',)
}
}
hosts = Hosts(HOST_SPECS, aliases=[('scerevisiae', 'yeast')])
| biosustain/cameo | cameo/api/hosts.py | Python | apache-2.0 | 2,939 |
# -*- coding: utf-8 -*-
"""
sphinx.transforms
~~~~~~~~~~~~~~~~~
Docutils transforms used by Sphinx when reading documents.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from docutils import nodes
from docutils.transforms import Transform
from docutils.transforms.parts import ContentsFilter
from sphinx import addnodes
from sphinx.locale import _
from sphinx.util.i18n import format_date
from sphinx.util.nodes import apply_source_workaround
default_substitutions = set([
'version',
'release',
'today',
])
class DefaultSubstitutions(Transform):
"""
Replace some substitutions if they aren't defined in the document.
"""
# run before the default Substitutions
default_priority = 210
def apply(self):
env = self.document.settings.env
config = self.document.settings.env.config
# only handle those not otherwise defined in the document
to_handle = default_substitutions - set(self.document.substitution_defs)
for ref in self.document.traverse(nodes.substitution_reference):
refname = ref['refname']
if refname in to_handle:
text = config[refname]
if refname == 'today' and not text:
# special handling: can also specify a strftime format
text = format_date(config.today_fmt or _('%b %d, %Y'),
language=config.language, warn=env.warn)
ref.replace_self(nodes.Text(text, text))
class MoveModuleTargets(Transform):
"""
Move module targets that are the first thing in a section to the section
title.
XXX Python specific
"""
default_priority = 210
def apply(self):
for node in self.document.traverse(nodes.target):
if not node['ids']:
continue
if ('ismod' in node and
node.parent.__class__ is nodes.section and
# index 0 is the section title node
node.parent.index(node) == 1):
node.parent['ids'][0:0] = node['ids']
node.parent.remove(node)
class HandleCodeBlocks(Transform):
"""
Several code block related transformations.
"""
default_priority = 210
def apply(self):
# move doctest blocks out of blockquotes
for node in self.document.traverse(nodes.block_quote):
if all(isinstance(child, nodes.doctest_block) for child
in node.children):
node.replace_self(node.children)
# combine successive doctest blocks
# for node in self.document.traverse(nodes.doctest_block):
# if node not in node.parent.children:
# continue
# parindex = node.parent.index(node)
# while len(node.parent) > parindex+1 and \
# isinstance(node.parent[parindex+1], nodes.doctest_block):
# node[0] = nodes.Text(node[0] + '\n\n' +
# node.parent[parindex+1][0])
# del node.parent[parindex+1]
class AutoNumbering(Transform):
"""
Register IDs of tables, figures and literal_blocks to assign numbers.
"""
default_priority = 210
def apply(self):
domain = self.document.settings.env.domains['std']
for node in self.document.traverse(nodes.Element):
if domain.is_enumerable_node(node) and domain.get_numfig_title(node) is not None:
self.document.note_implicit_target(node)
class SortIds(Transform):
"""
Sort secion IDs so that the "id[0-9]+" one comes last.
"""
default_priority = 261
def apply(self):
for node in self.document.traverse(nodes.section):
if len(node['ids']) > 1 and node['ids'][0].startswith('id'):
node['ids'] = node['ids'][1:] + [node['ids'][0]]
class CitationReferences(Transform):
"""
Replace citation references by pending_xref nodes before the default
docutils transform tries to resolve them.
"""
default_priority = 619
def apply(self):
for citnode in self.document.traverse(nodes.citation_reference):
cittext = citnode.astext()
refnode = addnodes.pending_xref(cittext, refdomain='std', reftype='citation',
reftarget=cittext, refwarn=True,
ids=citnode["ids"])
refnode.source = citnode.source or citnode.parent.source
refnode.line = citnode.line or citnode.parent.line
refnode += nodes.Text('[' + cittext + ']')
citnode.parent.replace(citnode, refnode)
TRANSLATABLE_NODES = {
'literal-block': nodes.literal_block,
'doctest-block': nodes.doctest_block,
'raw': nodes.raw,
'index': addnodes.index,
'image': nodes.image,
}
class ApplySourceWorkaround(Transform):
"""
update source and rawsource attributes
"""
default_priority = 10
def apply(self):
for n in self.document.traverse():
if isinstance(n, (nodes.TextElement, nodes.image)):
apply_source_workaround(n)
class AutoIndexUpgrader(Transform):
"""
Detect old style; 4 column based indices and automatically upgrade to new style.
"""
default_priority = 210
def apply(self):
env = self.document.settings.env
for node in self.document.traverse(addnodes.index):
if 'entries' in node and any(len(entry) == 4 for entry in node['entries']):
msg = ('4 column based index found. '
'It might be a bug of extensions you use: %r' % node['entries'])
env.warn_node(msg, node)
for i, entry in enumerate(node['entries']):
if len(entry) == 4:
node['entries'][i] = entry + (None,)
class ExtraTranslatableNodes(Transform):
"""
make nodes translatable
"""
default_priority = 10
def apply(self):
targets = self.document.settings.env.config.gettext_additional_targets
target_nodes = [v for k, v in TRANSLATABLE_NODES.items() if k in targets]
if not target_nodes:
return
def is_translatable_node(node):
return isinstance(node, tuple(target_nodes))
for node in self.document.traverse(is_translatable_node):
node['translatable'] = True
class FilterSystemMessages(Transform):
"""Filter system messages from a doctree."""
default_priority = 999
def apply(self):
env = self.document.settings.env
filterlevel = env.config.keep_warnings and 2 or 5
for node in self.document.traverse(nodes.system_message):
if node['level'] < filterlevel:
env.app.debug('%s [filtered system message]', node.astext())
node.parent.remove(node)
class SphinxContentsFilter(ContentsFilter):
"""
Used with BuildEnvironment.add_toc_from() to discard cross-file links
within table-of-contents link nodes.
"""
def visit_pending_xref(self, node):
text = node.astext()
self.parent.append(nodes.literal(text, text))
raise nodes.SkipNode
def visit_image(self, node):
raise nodes.SkipNode
| axbaretto/beam | sdks/python/.tox/docs/lib/python2.7/site-packages/sphinx/transforms/__init__.py | Python | apache-2.0 | 7,389 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Instance.read_only'
db.add_column(u'physical_instance', 'read_only',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Instance.read_only'
db.delete_column(u'physical_instance', 'read_only')
models = {
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_vm_created': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name_stamp': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.diskoffering': {
'Meta': {'object_name': 'DiskOffering'},
'available_size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'ordering': "(u'engine_type__name', u'version')", 'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'physical.enginetype': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_memory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'future_host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'os_description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'future_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'plans'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}),
'engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'plans'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
'flipperfox_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'flipperfox_migration_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'has_persistence': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'replication_topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replication_topology'", 'null': 'True', 'to': u"orm['physical.ReplicationTopology']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.planattribute': {
'Meta': {'object_name': 'PlanAttribute'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plan_attributes'", 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.replicationtopology': {
'Meta': {'object_name': 'ReplicationTopology'},
'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.Engine']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['physical'] | globocom/database-as-a-service | dbaas/physical/migrations/0035_auto__add_field_instance_read_only.py | Python | bsd-3-clause | 12,823 |
import imp
import os
import logging
from phoneslack.triggers import *
from phoneslack.actions import *
from phoneslack.actions.manager import MessageManager
from threading import Thread
from Queue import Queue
import sys
from ConfigParser import SafeConfigParser as ConfigParser
from traceback import print_exc
__all__ =["actions", "triggers"]
def main():
if len(sys.argv) > 1:
confFile = sys.argv[-1]
cf = ConfigParser()
cf.read(confFile)
else:
print "Usage: sniff.py <conffile>"
exit()
eventQueue = Queue()
triggers = cf.get("SlackPhone", "triggers").split(",")
triggerList= list()
for t in triggers:
for i in dir(globals()[t]):
try:
x = getattr( globals()[t], i)
if issubclass(x, Thread) and x!=Thread:
obj = x( cf, eventQueue)
triggerList.append(obj)
obj.start()
except Exception, e:
pass
actionList = list()
actions = cf.get("SlackPhone", "actions").split(",")
for a in actions:
for i in dir( globals()[a] ):
x = getattr( globals()[a], i)
if "Sender" in str(i):
obj = x(cf)
actionList.append(obj)
manager = MessageManager( cf, eventQueue, *actionList)
manager.start()
manager.join()
| robscetury/phoneslack | lib/phoneslack/__init__.py | Python | mit | 1,408 |
# -*- coding: utf-8 -*-
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import random
import mock
import testtools
from testtools.matchers import GreaterThan
from oslo_config import cfg
from oslo_log import log as logging
from oslo_db import exception as db_exception
from oslo_messaging.notify import notifier
from designate import exceptions
from designate import objects
from designate.tests.test_central import CentralTestCase
LOG = logging.getLogger(__name__)
class CentralServiceTest(CentralTestCase):
def test_stop(self):
# Test stopping the service
self.central_service.stop()
def test_start_with_tlds(self):
# Stop Service
self.central_service.stop()
list = objects.TldList()
list.append(objects.Tld(name='com.'))
with mock.patch.object(self.central_service.storage, 'find_tlds',
return_value=list):
self.central_service.start()
self.assertTrue(self.central_service.check_for_tlds)
def test_is_valid_domain_name(self):
self.config(max_domain_name_len=10,
group='service:central')
context = self.get_context()
self.central_service._is_valid_domain_name(context, 'valid.org.')
with testtools.ExpectedException(exceptions.InvalidDomainName):
self.central_service._is_valid_domain_name(context, 'example.org.')
with testtools.ExpectedException(exceptions.InvalidDomainName):
self.central_service._is_valid_domain_name(context, 'example.tld.')
with testtools.ExpectedException(exceptions.InvalidDomainName):
self.central_service._is_valid_domain_name(context, 'tld.')
def test_is_valid_domain_name_with_tlds(self):
# Stop Service
self.central_service.stop()
list = objects.TldList()
list.append(objects.Tld(name='com'))
list.append(objects.Tld(name='biz'))
list.append(objects.Tld(name='z'))
with mock.patch.object(self.central_service.storage, 'find_tlds',
return_value=list):
self.central_service.start()
context = self.get_context()
with mock.patch.object(self.central_service.storage, 'find_tld',
return_value=objects.Tld(name='biz')):
with testtools.ExpectedException(exceptions.InvalidDomainName):
self.central_service._is_valid_domain_name(context, 'biz.')
def test_is_valid_recordset_name(self):
self.config(max_recordset_name_len=18,
group='service:central')
context = self.get_context()
domain = self.create_domain(name='example.org.')
self.central_service._is_valid_recordset_name(
context, domain, 'valid.example.org.')
with testtools.ExpectedException(exceptions.InvalidRecordSetName):
self.central_service._is_valid_recordset_name(
context, domain, 'toolong.example.org.')
with testtools.ExpectedException(ValueError):
self.central_service._is_valid_recordset_name(
context, domain, 'invalidtld.example.org')
with testtools.ExpectedException(exceptions.InvalidRecordSetLocation):
self.central_service._is_valid_recordset_name(
context, domain, 'a.example.COM.')
def test_is_blacklisted_domain_name(self):
# Create blacklisted zones with specific names
self.create_blacklist(pattern='example.org.')
self.create_blacklist(pattern='example.net.')
self.create_blacklist(pattern='^blacklisted.org.$')
self.create_blacklist(pattern='com.$')
# Set the policy to reject the authz
self.policy({'use_blacklisted_domain': '!'})
context = self.get_context()
result = self.central_service._is_blacklisted_domain_name(
context, 'org.')
self.assertFalse(result)
# Subdomains should not be allowed from a blacklisted domain
result = self.central_service._is_blacklisted_domain_name(
context, 'www.example.org.')
self.assertTrue(result)
result = self.central_service._is_blacklisted_domain_name(
context, 'example.org.')
self.assertTrue(result)
# Check for blacklisted domains containing regexps
result = self.central_service._is_blacklisted_domain_name(
context, 'example.net.')
self.assertTrue(result)
result = self.central_service._is_blacklisted_domain_name(
context, 'example.com.')
self.assertTrue(result)
result = self.central_service._is_blacklisted_domain_name(
context, 'blacklisted.org.')
self.assertTrue(result)
def test_is_subdomain(self):
context = self.get_context()
# Create a domain (using the specified domain name)
domain = self.create_domain(name='example.org.')
result = self.central_service._is_subdomain(
context, 'org.', domain.pool_id)
self.assertFalse(result)
result = self.central_service._is_subdomain(
context, 'www.example.net.', domain.pool_id)
self.assertFalse(result)
result = self.central_service._is_subdomain(
context, 'example.org.', domain.pool_id)
self.assertFalse(result)
result = self.central_service._is_subdomain(
context, 'www.example.org.', domain.pool_id)
self.assertTrue(result)
def test_is_superdomain(self):
context = self.get_context()
# Create a domain (using the specified domain name)
domain = self.create_domain(name='example.org.')
LOG.debug("Testing 'org.'")
result = self.central_service._is_superdomain(
context, 'org.', domain.pool_id)
self.assertTrue(result)
LOG.debug("Testing 'www.example.net.'")
result = self.central_service._is_superdomain(
context, 'www.example.net.', domain.pool_id)
self.assertFalse(result)
LOG.debug("Testing 'www.example.org.'")
result = self.central_service._is_superdomain(
context, 'www.example.org.', domain.pool_id)
self.assertFalse(result)
def test_is_valid_recordset_placement_subdomain(self):
context = self.get_context()
# Create a domain (using the specified domain name)
domain = self.create_domain(name='example.org.')
sub_domain = self.create_domain(name='sub.example.org.')
def _fail(domain_, name):
with testtools.ExpectedException(
exceptions.InvalidRecordSetLocation):
self.central_service._is_valid_recordset_placement_subdomain(
context, domain_, name)
def _ok(domain_, name):
self.central_service._is_valid_recordset_placement_subdomain(
context, domain_, name)
_fail(domain, 'record.sub.example.org.')
_fail(domain, 'sub.example.org.')
_ok(domain, 'example.org.')
_ok(domain, 'record.example.org.')
_ok(sub_domain, 'record.example.org.')
def test_is_valid_ttl(self):
self.policy({'use_low_ttl': '!'})
self.config(min_ttl=100,
group='service:central')
context = self.get_context()
values = self.get_domain_fixture(fixture=1)
values['ttl'] = 0
with testtools.ExpectedException(exceptions.InvalidTTL):
self.central_service._is_valid_ttl(
context, values['ttl'])
# TLD Tests
def test_create_tld(self):
# Create a TLD with one label
tld = self.create_tld(fixture=0)
# Ensure all values have been set correctly
self.assertIsNotNone(tld['id'])
self.assertEqual(tld['name'], self.get_tld_fixture(fixture=0)['name'])
# Create a TLD with more than one label
tld = self.create_tld(fixture=1)
# Ensure all values have been set correctly
self.assertIsNotNone(tld['id'])
self.assertEqual(tld['name'], self.get_tld_fixture(fixture=1)['name'])
def test_find_tlds(self):
# Ensure we have no tlds to start with.
tlds = self.central_service.find_tlds(self.admin_context)
self.assertEqual(len(tlds), 0)
# Create a single tld
self.create_tld(fixture=0)
# Ensure we can retrieve the newly created tld
tlds = self.central_service.find_tlds(self.admin_context)
self.assertEqual(len(tlds), 1)
self.assertEqual(tlds[0]['name'],
self.get_tld_fixture(fixture=0)['name'])
# Create a second tld
self.create_tld(fixture=1)
# Ensure we can retrieve both tlds
tlds = self.central_service.find_tlds(self.admin_context)
self.assertEqual(len(tlds), 2)
self.assertEqual(tlds[0]['name'],
self.get_tld_fixture(fixture=0)['name'])
self.assertEqual(tlds[1]['name'],
self.get_tld_fixture(fixture=1)['name'])
def test_get_tld(self):
# Create a tld
tld_name = 'ns%d.co.uk' % random.randint(10, 1000)
expected_tld = self.create_tld(name=tld_name)
# Retrieve it, and ensure it's the same
tld = self.central_service.get_tld(
self.admin_context, expected_tld['id'])
self.assertEqual(tld['id'], expected_tld['id'])
self.assertEqual(tld['name'], expected_tld['name'])
def test_update_tld(self):
# Create a tld
tld = self.create_tld(name='org.')
# Update the Object
tld.name = 'net.'
# Perform the update
self.central_service.update_tld(self.admin_context, tld)
# Fetch the tld again
tld = self.central_service.get_tld(self.admin_context, tld.id)
# Ensure the tld was updated correctly
self.assertEqual('net.', tld.name)
def test_delete_tld(self):
# Create a tld
tld = self.create_tld(fixture=0)
# Delete the tld
self.central_service.delete_tld(self.admin_context, tld['id'])
# Fetch the tld again, ensuring an exception is raised
self.assertRaises(
exceptions.TldNotFound,
self.central_service.get_tld,
self.admin_context, tld['id'])
# TsigKey Tests
def test_create_tsigkey(self):
values = self.get_tsigkey_fixture(fixture=0)
# Create a tsigkey
tsigkey = self.central_service.create_tsigkey(
self.admin_context, tsigkey=objects.TsigKey.from_dict(values))
# Ensure all values have been set correctly
self.assertIsNotNone(tsigkey['id'])
self.assertEqual(tsigkey['name'], values['name'])
self.assertEqual(tsigkey['algorithm'], values['algorithm'])
self.assertEqual(tsigkey['secret'], values['secret'])
def test_find_tsigkeys(self):
# Ensure we have no tsigkeys to start with.
tsigkeys = self.central_service.find_tsigkeys(self.admin_context)
self.assertEqual(len(tsigkeys), 0)
# Create a single tsigkey (using default values)
tsigkey_one = self.create_tsigkey()
# Ensure we can retrieve the newly created tsigkey
tsigkeys = self.central_service.find_tsigkeys(self.admin_context)
self.assertEqual(len(tsigkeys), 1)
self.assertEqual(tsigkeys[0]['name'], tsigkey_one['name'])
# Create a second tsigkey
tsigkey_two = self.create_tsigkey(fixture=1)
# Ensure we can retrieve both tsigkeys
tsigkeys = self.central_service.find_tsigkeys(self.admin_context)
self.assertEqual(len(tsigkeys), 2)
self.assertEqual(tsigkeys[0]['name'], tsigkey_one['name'])
self.assertEqual(tsigkeys[1]['name'], tsigkey_two['name'])
def test_get_tsigkey(self):
# Create a tsigkey
expected = self.create_tsigkey()
# Retrieve it, and ensure it's the same
tsigkey = self.central_service.get_tsigkey(
self.admin_context, expected['id'])
self.assertEqual(tsigkey['id'], expected['id'])
self.assertEqual(tsigkey['name'], expected['name'])
self.assertEqual(tsigkey['algorithm'], expected['algorithm'])
self.assertEqual(tsigkey['secret'], expected['secret'])
def test_update_tsigkey(self):
# Create a tsigkey
tsigkey = self.create_tsigkey(name='test-key')
# Update the Object
tsigkey.name = 'test-key-updated'
# Perform the update
self.central_service.update_tsigkey(self.admin_context, tsigkey)
# Fetch the tsigkey again
tsigkey = self.central_service.get_tsigkey(
self.admin_context, tsigkey.id)
# Ensure the new value took
self.assertEqual('test-key-updated', tsigkey.name)
def test_delete_tsigkey(self):
# Create a tsigkey
tsigkey = self.create_tsigkey()
# Delete the tsigkey
self.central_service.delete_tsigkey(self.admin_context, tsigkey['id'])
# Fetch the tsigkey again, ensuring an exception is raised
with testtools.ExpectedException(exceptions.TsigKeyNotFound):
self.central_service.get_tsigkey(self.admin_context, tsigkey['id'])
# Tenant Tests
def test_count_tenants(self):
admin_context = self.get_admin_context()
admin_context.all_tenants = True
tenant_one_context = self.get_context(tenant=1)
tenant_two_context = self.get_context(tenant=2)
# in the beginning, there should be nothing
tenants = self.central_service.count_tenants(admin_context)
self.assertEqual(tenants, 0)
# Explicitly set a tenant_id
self.create_domain(fixture=0, context=tenant_one_context)
self.create_domain(fixture=1, context=tenant_two_context)
tenants = self.central_service.count_tenants(admin_context)
self.assertEqual(tenants, 2)
def test_count_tenants_policy_check(self):
# Set the policy to reject the authz
self.policy({'count_tenants': '!'})
with testtools.ExpectedException(exceptions.Forbidden):
self.central_service.count_tenants(self.get_context())
# Domain Tests
@mock.patch.object(notifier.Notifier, "info")
def _test_create_domain(self, values, mock_notifier):
# Reset the mock to avoid the calls from the create_nameserver() call
mock_notifier.reset_mock()
# Create a domain
domain = self.central_service.create_domain(
self.admin_context, domain=objects.Domain.from_dict(values))
# Ensure all values have been set correctly
self.assertIsNotNone(domain['id'])
self.assertEqual(domain['name'], values['name'])
self.assertEqual(domain['email'], values['email'])
self.assertIn('status', domain)
self.assertEqual(mock_notifier.call_count, 1)
# Ensure the correct NS Records are in place
pool = self.central_service.get_pool(
self.admin_context, domain.pool_id)
ns_recordset = self.central_service.find_recordset(
self.admin_context,
criterion={'domain_id': domain.id, 'type': "NS"})
self.assertIsNotNone(ns_recordset.id)
self.assertEqual(ns_recordset.type, 'NS')
self.assertIsNotNone(ns_recordset.records)
self.assertEqual(set([n.hostname for n in pool.ns_records]),
set([n.data for n in ns_recordset.records]))
mock_notifier.assert_called_once_with(
self.admin_context, 'dns.domain.create', domain)
return domain
def test_create_domain_duplicate_different_pools(self):
fixture = self.get_domain_fixture()
# Create first domain that's placed in default pool
self.create_domain(**fixture)
# Create a secondary pool
second_pool = self.create_pool()
fixture["pool_id"] = second_pool.id
self.create_domain(**fixture)
def test_create_domain_over_tld(self):
values = dict(
name='example.com.',
email='[email protected]',
type='PRIMARY'
)
self._test_create_domain(values)
def test_idn_create_domain_over_tld(self):
values = dict(
name='xn--3e0b707e'
)
# Create the appropriate TLD
self.central_service.create_tld(
self.admin_context, objects.Tld.from_dict(values))
# Test creation of a domain in 한국 (kr)
values = dict(
name='example.xn--3e0b707e.',
email='[email protected]',
type='PRIMARY'
)
self._test_create_domain(values)
def test_create_domain_over_quota(self):
self.config(quota_domains=1)
self.create_domain()
with testtools.ExpectedException(exceptions.OverQuota):
self.create_domain()
def test_create_subdomain(self):
# Create the Parent Domain using fixture 0
parent_domain = self.create_domain(fixture=0)
# Prepare values for the subdomain using fixture 1 as a base
values = self.get_domain_fixture(fixture=1)
values['name'] = 'www.%s' % parent_domain['name']
# Create the subdomain
domain = self.central_service.create_domain(
self.admin_context, objects.Domain.from_dict(values))
# Ensure all values have been set correctly
self.assertIsNotNone(domain['id'])
self.assertEqual(domain['parent_domain_id'], parent_domain['id'])
def test_create_subdomain_different_pools(self):
fixture = self.get_domain_fixture()
# Create first domain that's placed in default pool
self.create_domain(**fixture)
# Create a secondary pool
second_pool = self.create_pool()
fixture["pool_id"] = second_pool.id
fixture["name"] = "sub.%s" % fixture["name"]
subdomain = self.create_domain(**fixture)
self.assertIsNone(subdomain.parent_domain_id)
def test_create_superdomain(self):
# Prepare values for the domain and subdomain
# using fixture 1 as a base
domain_values = self.get_domain_fixture(fixture=1)
subdomain_values = copy.deepcopy(domain_values)
subdomain_values['name'] = 'www.%s' % domain_values['name']
subdomain_values['context'] = self.admin_context
LOG.debug("domain_values: {0}".format(domain_values))
LOG.debug("subdomain_values: {0}".format(subdomain_values))
# Create the subdomain
subdomain = self.create_domain(**subdomain_values)
# Create the Parent Domain using fixture 1
parent_domain = self.central_service.create_domain(
self.admin_context, objects.Domain.from_dict(domain_values))
# Get updated subdomain values
subdomain = self.central_service.get_domain(self.admin_context,
subdomain.id)
# Ensure all values have been set correctly
self.assertIsNotNone(parent_domain['id'])
self.assertEqual(subdomain['parent_domain_id'], parent_domain['id'])
def test_create_subdomain_failure(self):
context = self.get_admin_context()
# Explicitly set a tenant_id
context.tenant = '1'
# Create the Parent Domain using fixture 0
parent_domain = self.create_domain(fixture=0, context=context)
context = self.get_admin_context()
# Explicitly use a different tenant_id
context.tenant = '2'
# Prepare values for the subdomain using fixture 1 as a base
values = self.get_domain_fixture(fixture=1)
values['name'] = 'www.%s' % parent_domain['name']
# Attempt to create the subdomain
with testtools.ExpectedException(exceptions.Forbidden):
self.central_service.create_domain(
context, objects.Domain.from_dict(values))
def test_create_superdomain_failure(self):
context = self.get_admin_context()
# Explicitly set a tenant_id
context.tenant = '1'
# Set up domain and subdomain values
domain_values = self.get_domain_fixture(fixture=1)
domain_name = domain_values['name']
subdomain_values = copy.deepcopy(domain_values)
subdomain_values['name'] = 'www.%s' % domain_name
subdomain_values['context'] = context
# Create sub domain
self.create_domain(**subdomain_values)
context = self.get_admin_context()
# Explicitly use a different tenant_id
context.tenant = '2'
# Attempt to create the domain
with testtools.ExpectedException(exceptions.Forbidden):
self.central_service.create_domain(
context, objects.Domain.from_dict(domain_values))
def test_create_blacklisted_domain_success(self):
# Create blacklisted zone using default values
self.create_blacklist()
# Set the policy to accept the authz
self.policy({'use_blacklisted_domain': '@'})
values = dict(
name='blacklisted.com.',
email='[email protected]'
)
# Create a zone that is blacklisted
domain = self.central_service.create_domain(
self.admin_context, objects.Domain.from_dict(values))
# Ensure all values have been set correctly
self.assertIsNotNone(domain['id'])
self.assertEqual(domain['name'], values['name'])
self.assertEqual(domain['email'], values['email'])
def test_create_blacklisted_domain_fail(self):
self.create_blacklist()
# Set the policy to reject the authz
self.policy({'use_blacklisted_domain': '!'})
values = dict(
name='blacklisted.com.',
email='[email protected]'
)
with testtools.ExpectedException(exceptions.InvalidDomainName):
# Create a domain
self.central_service.create_domain(
self.admin_context, objects.Domain.from_dict(values))
def _test_create_domain_fail(self, values, exception):
with testtools.ExpectedException(exception):
# Create an invalid domain
self.central_service.create_domain(
self.admin_context, objects.Domain.from_dict(values))
def test_create_domain_invalid_tld_fail(self):
# add a tld for com
self.create_tld(fixture=0)
values = dict(
name='example.com.',
email='[email protected]'
)
# Create a valid domain
self.central_service.create_domain(
self.admin_context, objects.Domain.from_dict(values))
values = dict(
name='example.net.',
email='[email protected]'
)
# There is no TLD for net so it should fail
with testtools.ExpectedException(exceptions.InvalidDomainName):
# Create an invalid domain
self.central_service.create_domain(
self.admin_context, objects.Domain.from_dict(values))
def test_create_domain_invalid_ttl_fail(self):
self.policy({'use_low_ttl': '!'})
self.config(min_ttl=100,
group='service:central')
context = self.get_context()
values = self.get_domain_fixture(fixture=1)
values['ttl'] = 0
with testtools.ExpectedException(exceptions.InvalidTTL):
self.central_service.create_domain(
context, objects.Domain.from_dict(values))
def test_create_domain_no_min_ttl(self):
self.policy({'use_low_ttl': '!'})
self.config(min_ttl=None,
group='service:central')
values = self.get_domain_fixture(fixture=1)
values['ttl'] = -100
# Create domain with random TTL
domain = self.central_service.create_domain(
self.admin_context, objects.Domain.from_dict(values))
# Ensure all values have been set correctly
self.assertEqual(domain['ttl'], values['ttl'])
def test_find_domains(self):
# Ensure we have no domains to start with.
domains = self.central_service.find_domains(self.admin_context)
self.assertEqual(len(domains), 0)
# Create a single domain (using default values)
self.create_domain()
# Ensure we can retrieve the newly created domain
domains = self.central_service.find_domains(self.admin_context)
self.assertEqual(len(domains), 1)
self.assertEqual(domains[0]['name'], 'example.com.')
# Create a second domain
self.create_domain(name='example.net.')
# Ensure we can retrieve both domain
domains = self.central_service.find_domains(self.admin_context)
self.assertEqual(len(domains), 2)
self.assertEqual(domains[0]['name'], 'example.com.')
self.assertEqual(domains[1]['name'], 'example.net.')
def test_find_domains_criteria(self):
# Create a domain
domain_name = '%d.example.com.' % random.randint(10, 1000)
expected_domain = self.create_domain(name=domain_name)
# Retrieve it, and ensure it's the same
criterion = {'name': domain_name}
domains = self.central_service.find_domains(
self.admin_context, criterion)
self.assertEqual(domains[0]['id'], expected_domain['id'])
self.assertEqual(domains[0]['name'], expected_domain['name'])
self.assertEqual(domains[0]['email'], expected_domain['email'])
def test_find_domains_tenant_restrictions(self):
admin_context = self.get_admin_context()
admin_context.all_tenants = True
tenant_one_context = self.get_context(tenant=1)
tenant_two_context = self.get_context(tenant=2)
# Ensure we have no domains to start with.
domains = self.central_service.find_domains(admin_context)
self.assertEqual(len(domains), 0)
# Create a single domain (using default values)
domain = self.create_domain(context=tenant_one_context)
# Ensure admins can retrieve the newly created domain
domains = self.central_service.find_domains(admin_context)
self.assertEqual(len(domains), 1)
self.assertEqual(domains[0]['name'], domain['name'])
# Ensure tenant=1 can retrieve the newly created domain
domains = self.central_service.find_domains(tenant_one_context)
self.assertEqual(len(domains), 1)
self.assertEqual(domains[0]['name'], domain['name'])
# Ensure tenant=2 can NOT retrieve the newly created domain
domains = self.central_service.find_domains(tenant_two_context)
self.assertEqual(len(domains), 0)
def test_get_domain(self):
# Create a domain
domain_name = '%d.example.com.' % random.randint(10, 1000)
expected_domain = self.create_domain(name=domain_name)
# Retrieve it, and ensure it's the same
domain = self.central_service.get_domain(
self.admin_context, expected_domain['id'])
self.assertEqual(domain['id'], expected_domain['id'])
self.assertEqual(domain['name'], expected_domain['name'])
self.assertEqual(domain['email'], expected_domain['email'])
def test_get_domain_servers(self):
# Create a domain
domain = self.create_domain()
# Retrieve the servers list
servers = self.central_service.get_domain_servers(
self.admin_context, domain['id'])
self.assertTrue(len(servers) > 0)
def test_find_domain(self):
# Create a domain
domain_name = '%d.example.com.' % random.randint(10, 1000)
expected_domain = self.create_domain(name=domain_name)
# Retrieve it, and ensure it's the same
criterion = {'name': domain_name}
domain = self.central_service.find_domain(
self.admin_context, criterion)
self.assertEqual(domain['id'], expected_domain['id'])
self.assertEqual(domain['name'], expected_domain['name'])
self.assertEqual(domain['email'], expected_domain['email'])
self.assertIn('status', domain)
@mock.patch.object(notifier.Notifier, "info")
def test_update_domain(self, mock_notifier):
# Create a domain
domain = self.create_domain(email='[email protected]')
original_serial = domain.serial
# Update the object
domain.email = '[email protected]'
# Reset the mock to avoid the calls from the create_domain() call
mock_notifier.reset_mock()
# Perform the update
self.central_service.update_domain(self.admin_context, domain)
# Fetch the domain again
domain = self.central_service.get_domain(
self.admin_context, domain.id)
# Ensure the domain was updated correctly
self.assertTrue(domain.serial > original_serial)
self.assertEqual('[email protected]', domain.email)
self.assertEqual(mock_notifier.call_count, 1)
# Check that the object used in the notify is a Domain and the id
# matches up
notified_domain = mock_notifier.call_args[0][-1]
self.assertIsInstance(notified_domain, objects.Domain)
self.assertEqual(domain.id, notified_domain.id)
mock_notifier.assert_called_once_with(
self.admin_context, 'dns.domain.update', mock.ANY)
def test_update_domain_without_id(self):
# Create a domain
domain = self.create_domain(email='[email protected]')
# Update the object
domain.email = '[email protected]'
domain.id = None
# Perform the update
with testtools.ExpectedException(Exception):
self.central_service.update_domain(self.admin_context, domain)
def test_update_domain_without_incrementing_serial(self):
# Create a domain
domain = self.create_domain(email='[email protected]')
original_serial = domain.serial
# Update the object
domain.email = '[email protected]'
# Perform the update
self.central_service.update_domain(
self.admin_context, domain, increment_serial=False)
# Fetch the domain again
domain = self.central_service.get_domain(self.admin_context, domain.id)
# Ensure the domain was updated correctly
self.assertEqual(original_serial, domain.serial)
self.assertEqual('[email protected]', domain.email)
def test_update_domain_name_fail(self):
# Create a domain
domain = self.create_domain(name='example.org.')
# Update the Object
domain.name = 'example.net.'
# Perform the update
with testtools.ExpectedException(exceptions.BadRequest):
self.central_service.update_domain(self.admin_context, domain)
def test_update_domain_deadlock_retry(self):
# Create a domain
domain = self.create_domain(name='example.org.')
original_serial = domain.serial
# Update the Object
domain.email = '[email protected]'
# Due to Python's scoping of i - we need to make it a mutable type
# for the counter to work.. In Py3, we can use the nonlocal keyword.
i = [False]
def fail_once_then_pass():
if i[0] is True:
return self.central_service.storage.session.commit()
else:
i[0] = True
raise db_exception.DBDeadlock()
with mock.patch.object(self.central_service.storage, 'commit',
side_effect=fail_once_then_pass):
# Perform the update
domain = self.central_service.update_domain(
self.admin_context, domain)
# Ensure i[0] is True, indicating the side_effect code above was
# triggered
self.assertTrue(i[0])
# Ensure the domain was updated correctly
self.assertTrue(domain.serial > original_serial)
self.assertEqual('[email protected]', domain.email)
@mock.patch.object(notifier.Notifier, "info")
def test_delete_domain(self, mock_notifier):
# Create a domain
domain = self.create_domain()
mock_notifier.reset_mock()
# Delete the domain
self.central_service.delete_domain(self.admin_context, domain['id'])
# Fetch the domain
deleted_domain = self.central_service.get_domain(
self.admin_context, domain['id'])
# Ensure the domain is marked for deletion
self.assertEqual(deleted_domain.id, domain.id)
self.assertEqual(deleted_domain.name, domain.name)
self.assertEqual(deleted_domain.email, domain.email)
self.assertEqual(deleted_domain.status, 'PENDING')
self.assertEqual(deleted_domain.tenant_id, domain.tenant_id)
self.assertEqual(deleted_domain.parent_domain_id,
domain.parent_domain_id)
self.assertEqual(deleted_domain.action, 'DELETE')
self.assertEqual(deleted_domain.serial, domain.serial)
self.assertEqual(deleted_domain.pool_id, domain.pool_id)
self.assertEqual(mock_notifier.call_count, 1)
# Check that the object used in the notify is a Domain and the id
# matches up
notified_domain = mock_notifier.call_args[0][-1]
self.assertIsInstance(notified_domain, objects.Domain)
self.assertEqual(deleted_domain.id, notified_domain.id)
mock_notifier.assert_called_once_with(
self.admin_context, 'dns.domain.delete', mock.ANY)
def test_delete_parent_domain(self):
# Create the Parent Domain using fixture 0
parent_domain = self.create_domain(fixture=0)
# Create the subdomain
self.create_domain(fixture=1, name='www.%s' % parent_domain['name'])
# Attempt to delete the parent domain
with testtools.ExpectedException(exceptions.DomainHasSubdomain):
self.central_service.delete_domain(
self.admin_context, parent_domain['id'])
def test_count_domains(self):
# in the beginning, there should be nothing
domains = self.central_service.count_domains(self.admin_context)
self.assertEqual(domains, 0)
# Create a single domain
self.create_domain()
# count 'em up
domains = self.central_service.count_domains(self.admin_context)
# well, did we get 1?
self.assertEqual(domains, 1)
def test_count_domains_policy_check(self):
# Set the policy to reject the authz
self.policy({'count_domains': '!'})
with testtools.ExpectedException(exceptions.Forbidden):
self.central_service.count_domains(self.get_context())
def test_touch_domain(self):
# Create a domain
expected_domain = self.create_domain()
# Touch the domain
self.central_service.touch_domain(
self.admin_context, expected_domain['id'])
# Fetch the domain again
domain = self.central_service.get_domain(
self.admin_context, expected_domain['id'])
# Ensure the serial was incremented
self.assertTrue(domain['serial'] > expected_domain['serial'])
def test_xfr_domain(self):
# Create a domain
fixture = self.get_domain_fixture('SECONDARY', 0)
fixture['email'] = cfg.CONF['service:central'].managed_resource_email
fixture['attributes'] = [{"key": "master", "value": "10.0.0.10"}]
# Create a zone
secondary = self.create_domain(**fixture)
self.central_service.xfr_domain(self.admin_context, secondary.id)
def test_xfr_domain_invalid_type(self):
domain = self.create_domain()
with testtools.ExpectedException(exceptions.BadRequest):
self.central_service.xfr_domain(self.admin_context, domain.id)
# RecordSet Tests
def test_create_recordset(self):
domain = self.create_domain()
original_serial = domain.serial
# Create the Object
recordset = objects.RecordSet(name='www.%s' % domain.name, type='A')
# Persist the Object
recordset = self.central_service.create_recordset(
self.admin_context, domain.id, recordset=recordset)
# Get the zone again to check if serial increased
updated_domain = self.central_service.get_domain(self.admin_context,
domain.id)
new_serial = updated_domain.serial
# Ensure all values have been set correctly
self.assertIsNotNone(recordset.id)
self.assertEqual('www.%s' % domain.name, recordset.name)
self.assertEqual('A', recordset.type)
self.assertIsNotNone(recordset.records)
# The serial number does not get updated is there are no records
# in the recordset
self.assertEqual(new_serial, original_serial)
def test_create_recordset_with_records(self):
domain = self.create_domain()
original_serial = domain.serial
# Create the Object
recordset = objects.RecordSet(
name='www.%s' % domain.name,
type='A',
records=objects.RecordList(objects=[
objects.Record(data='192.3.3.15'),
objects.Record(data='192.3.3.16'),
])
)
# Persist the Object
recordset = self.central_service.create_recordset(
self.admin_context, domain.id, recordset=recordset)
# Get updated serial number
updated_zone = self.central_service.get_domain(self.admin_context,
domain.id)
new_serial = updated_zone.serial
# Ensure all values have been set correctly
self.assertIsNotNone(recordset.records)
self.assertEqual(2, len(recordset.records))
self.assertIsNotNone(recordset.records[0].id)
self.assertIsNotNone(recordset.records[1].id)
self.assertThat(new_serial, GreaterThan(original_serial))
# def test_create_recordset_over_quota(self):
# self.config(quota_domain_recordsets=1)
# domain = self.create_domain()
# self.create_recordset(domain)
# with testtools.ExpectedException(exceptions.OverQuota):
# self.create_recordset(domain)
def test_create_invalid_recordset_location_cname_at_apex(self):
domain = self.create_domain()
values = dict(
name=domain['name'],
type='CNAME'
)
# Attempt to create a CNAME record at the apex
with testtools.ExpectedException(exceptions.InvalidRecordSetLocation):
self.central_service.create_recordset(
self.admin_context,
domain['id'],
recordset=objects.RecordSet.from_dict(values))
def test_create_invalid_recordset_location_cname_sharing(self):
domain = self.create_domain()
expected = self.create_recordset(domain)
values = dict(
name=expected['name'],
type='CNAME'
)
# Attempt to create a CNAME record alongside another record
with testtools.ExpectedException(exceptions.InvalidRecordSetLocation):
self.central_service.create_recordset(
self.admin_context,
domain['id'],
recordset=objects.RecordSet.from_dict(values))
def test_create_invalid_recordset_location_wrong_domain(self):
domain = self.create_domain()
other_domain = self.create_domain(fixture=1)
values = dict(
name=other_domain['name'],
type='A'
)
# Attempt to create a record in the incorrect domain
with testtools.ExpectedException(exceptions.InvalidRecordSetLocation):
self.central_service.create_recordset(
self.admin_context,
domain['id'],
recordset=objects.RecordSet.from_dict(values))
def test_create_invalid_recordset_ttl(self):
self.policy({'use_low_ttl': '!'})
self.config(min_ttl=100,
group='service:central')
domain = self.create_domain()
values = dict(
name='www.%s' % domain['name'],
type='A',
ttl=10
)
# Attempt to create a A record under the TTL
with testtools.ExpectedException(exceptions.InvalidTTL):
self.central_service.create_recordset(
self.admin_context,
domain['id'],
recordset=objects.RecordSet.from_dict(values))
def test_create_recordset_no_min_ttl(self):
self.policy({'use_low_ttl': '!'})
self.config(min_ttl=None,
group='service:central')
domain = self.create_domain()
values = dict(
name='www.%s' % domain['name'],
type='A',
ttl=10
)
recordset = self.central_service.create_recordset(
self.admin_context,
domain['id'],
recordset=objects.RecordSet.from_dict(values))
self.assertEqual(recordset['ttl'], values['ttl'])
def test_get_recordset(self):
domain = self.create_domain()
# Create a recordset
expected = self.create_recordset(domain)
# Retrieve it, and ensure it's the same
recordset = self.central_service.get_recordset(
self.admin_context, domain['id'], expected['id'])
self.assertEqual(recordset['id'], expected['id'])
self.assertEqual(recordset['name'], expected['name'])
self.assertEqual(recordset['type'], expected['type'])
def test_get_recordset_with_records(self):
domain = self.create_domain()
# Create a recordset and two records
recordset = self.create_recordset(domain)
self.create_record(domain, recordset)
self.create_record(domain, recordset, fixture=1)
# Retrieve it, and ensure it's the same
recordset = self.central_service.get_recordset(
self.admin_context, domain.id, recordset.id)
self.assertEqual(2, len(recordset.records))
self.assertIsNotNone(recordset.records[0].id)
self.assertIsNotNone(recordset.records[1].id)
def test_get_recordset_incorrect_domain_id(self):
domain = self.create_domain()
other_domain = self.create_domain(fixture=1)
# Create a recordset
expected = self.create_recordset(domain)
# Ensure we get a 404 if we use the incorrect domain_id
with testtools.ExpectedException(exceptions.RecordSetNotFound):
self.central_service.get_recordset(
self.admin_context, other_domain['id'], expected['id'])
def test_find_recordsets(self):
domain = self.create_domain()
criterion = {'domain_id': domain['id']}
# Ensure we have two recordsets to start with as SOA & NS
# recordsets are created automatically
recordsets = self.central_service.find_recordsets(
self.admin_context, criterion)
self.assertEqual(len(recordsets), 2)
# Create a single recordset (using default values)
self.create_recordset(domain, name='www.%s' % domain['name'])
# Ensure we can retrieve the newly created recordset
recordsets = self.central_service.find_recordsets(
self.admin_context, criterion)
self.assertEqual(len(recordsets), 3)
self.assertEqual(recordsets[2]['name'], 'www.%s' % domain['name'])
# Create a second recordset
self.create_recordset(domain, name='mail.%s' % domain['name'])
# Ensure we can retrieve both recordsets
recordsets = self.central_service.find_recordsets(
self.admin_context, criterion)
self.assertEqual(len(recordsets), 4)
self.assertEqual(recordsets[2]['name'], 'www.%s' % domain['name'])
self.assertEqual(recordsets[3]['name'], 'mail.%s' % domain['name'])
def test_find_recordset(self):
domain = self.create_domain()
# Create a recordset
expected = self.create_recordset(domain)
# Retrieve it, and ensure it's the same
criterion = {'domain_id': domain['id'], 'name': expected['name']}
recordset = self.central_service.find_recordset(
self.admin_context, criterion)
self.assertEqual(recordset['id'], expected['id'])
self.assertEqual(recordset['name'], expected['name'])
def test_find_recordset_with_records(self):
domain = self.create_domain()
# Create a recordset
recordset = self.create_recordset(domain)
self.create_record(domain, recordset)
self.create_record(domain, recordset, fixture=1)
# Retrieve it, and ensure it's the same
criterion = {'domain_id': domain.id, 'name': recordset.name}
recordset = self.central_service.find_recordset(
self.admin_context, criterion)
self.assertEqual(2, len(recordset.records))
self.assertIsNotNone(recordset.records[0].id)
self.assertIsNotNone(recordset.records[1].id)
def test_update_recordset(self):
# Create a domain
domain = self.create_domain()
original_serial = domain.serial
# Create a recordset
recordset = self.create_recordset(domain)
# Update the recordset
recordset.ttl = 1800
# Perform the update
self.central_service.update_recordset(self.admin_context, recordset)
# Get domain again to verify that serial number was updated
updated_domain = self.central_service.get_domain(self.admin_context,
domain.id)
new_serial = updated_domain.serial
# Fetch the resource again
recordset = self.central_service.get_recordset(
self.admin_context, recordset.domain_id, recordset.id)
# Ensure the new value took
self.assertEqual(recordset.ttl, 1800)
self.assertThat(new_serial, GreaterThan(original_serial))
def test_update_recordset_deadlock_retry(self):
# Create a domain
domain = self.create_domain()
# Create a recordset
recordset = self.create_recordset(domain)
# Update the recordset
recordset.ttl = 1800
# Due to Python's scoping of i - we need to make it a mutable type
# for the counter to work.. In Py3, we can use the nonlocal keyword.
i = [False]
def fail_once_then_pass():
if i[0] is True:
return self.central_service.storage.session.commit()
else:
i[0] = True
raise db_exception.DBDeadlock()
with mock.patch.object(self.central_service.storage, 'commit',
side_effect=fail_once_then_pass):
# Perform the update
recordset = self.central_service.update_recordset(
self.admin_context, recordset)
# Ensure i[0] is True, indicating the side_effect code above was
# triggered
self.assertTrue(i[0])
# Ensure the recordset was updated correctly
self.assertEqual(1800, recordset.ttl)
def test_update_recordset_with_record_create(self):
# Create a domain
domain = self.create_domain()
# Create a recordset
recordset = self.create_recordset(domain)
# Append two new Records
recordset.records.append(objects.Record(data='192.0.2.1'))
recordset.records.append(objects.Record(data='192.0.2.2'))
# Perform the update
self.central_service.update_recordset(self.admin_context, recordset)
# Fetch the RecordSet again
recordset = self.central_service.get_recordset(
self.admin_context, domain.id, recordset.id)
# Ensure two Records are attached to the RecordSet correctly
self.assertEqual(2, len(recordset.records))
self.assertIsNotNone(recordset.records[0].id)
self.assertIsNotNone(recordset.records[1].id)
def test_update_recordset_with_record_delete(self):
# Create a domain
domain = self.create_domain()
original_serial = domain.serial
# Create a recordset and two records
recordset = self.create_recordset(domain)
self.create_record(domain, recordset)
self.create_record(domain, recordset, fixture=1)
# Append two new Records
recordset.records.append(objects.Record(data='192.0.2.1'))
recordset.records.append(objects.Record(data='192.0.2.2'))
# Remove one of the Records
recordset.records.pop(0)
# Perform the update
self.central_service.update_recordset(self.admin_context, recordset)
# Fetch the RecordSet again
recordset = self.central_service.get_recordset(
self.admin_context, domain.id, recordset.id)
# Fetch the Domain again
updated_domain = self.central_service.get_domain(self.admin_context,
domain.id)
new_serial = updated_domain.serial
# Ensure two Records are attached to the RecordSet correctly
self.assertEqual(1, len(recordset.records))
self.assertIsNotNone(recordset.records[0].id)
self.assertThat(new_serial, GreaterThan(original_serial))
def test_update_recordset_with_record_update(self):
# Create a domain
domain = self.create_domain()
# Create a recordset and two records
recordset = self.create_recordset(domain, 'A')
self.create_record(domain, recordset)
self.create_record(domain, recordset, fixture=1)
# Fetch the RecordSet again
recordset = self.central_service.get_recordset(
self.admin_context, domain.id, recordset.id)
# Update one of the Records
updated_record_id = recordset.records[0].id
recordset.records[0].data = '192.0.2.255'
# Perform the update
self.central_service.update_recordset(self.admin_context, recordset)
# Fetch the RecordSet again
recordset = self.central_service.get_recordset(
self.admin_context, domain.id, recordset.id)
# Ensure the Record has been updated
for record in recordset.records:
if record.id != updated_record_id:
continue
self.assertEqual('192.0.2.255', record.data)
return # Exits this test early as we succeeded
raise Exception('Updated record not found')
def test_update_recordset_without_incrementing_serial(self):
domain = self.create_domain()
# Create a recordset
recordset = self.create_recordset(domain)
# Fetch the domain so we have the latest serial number
domain_before = self.central_service.get_domain(
self.admin_context, domain.id)
# Update the recordset
recordset.ttl = 1800
# Perform the update
self.central_service.update_recordset(
self.admin_context, recordset, increment_serial=False)
# Fetch the resource again
recordset = self.central_service.get_recordset(
self.admin_context, recordset.domain_id, recordset.id)
# Ensure the recordset was updated correctly
self.assertEqual(recordset.ttl, 1800)
# Ensure the domains serial number was not updated
domain_after = self.central_service.get_domain(
self.admin_context, domain.id)
self.assertEqual(domain_before.serial, domain_after.serial)
def test_update_recordset_immutable_domain_id(self):
domain = self.create_domain()
other_domain = self.create_domain(fixture=1)
# Create a recordset
recordset = self.create_recordset(domain)
# Update the recordset
recordset.ttl = 1800
recordset.domain_id = other_domain.id
# Ensure we get a BadRequest if we change the domain_id
with testtools.ExpectedException(exceptions.BadRequest):
self.central_service.update_recordset(
self.admin_context, recordset)
def test_update_recordset_immutable_tenant_id(self):
domain = self.create_domain()
# Create a recordset
recordset = self.create_recordset(domain)
# Update the recordset
recordset.ttl = 1800
recordset.tenant_id = 'other-tenant'
# Ensure we get a BadRequest if we change the domain_id
with testtools.ExpectedException(exceptions.BadRequest):
self.central_service.update_recordset(
self.admin_context, recordset)
def test_update_recordset_immutable_type(self):
domain = self.create_domain()
# Create a recordset
recordset = self.create_recordset(domain)
cname_recordset = self.create_recordset(domain, type='CNAME')
# Update the recordset
recordset.ttl = 1800
recordset.type = cname_recordset.type
# Ensure we get a BadRequest if we change the domain_id
with testtools.ExpectedException(exceptions.BadRequest):
self.central_service.update_recordset(
self.admin_context, recordset)
def test_delete_recordset(self):
domain = self.create_domain()
original_serial = domain.serial
# Create a recordset
recordset = self.create_recordset(domain)
# Delete the recordset
self.central_service.delete_recordset(
self.admin_context, domain['id'], recordset['id'])
# Fetch the recordset again, ensuring an exception is raised
with testtools.ExpectedException(exceptions.RecordSetNotFound):
self.central_service.get_recordset(
self.admin_context, domain['id'], recordset['id'])
# Fetch the domain again to verify serial number increased
updated_domain = self.central_service.get_domain(self.admin_context,
domain.id)
new_serial = updated_domain.serial
self.assertThat(new_serial, GreaterThan(original_serial))
def test_delete_recordset_without_incrementing_serial(self):
domain = self.create_domain()
# Create a recordset
recordset = self.create_recordset(domain)
# Fetch the domain so we have the latest serial number
domain_before = self.central_service.get_domain(
self.admin_context, domain['id'])
# Delete the recordset
self.central_service.delete_recordset(
self.admin_context, domain['id'], recordset['id'],
increment_serial=False)
# Fetch the record again, ensuring an exception is raised
with testtools.ExpectedException(exceptions.RecordSetNotFound):
self.central_service.get_recordset(
self.admin_context, domain['id'], recordset['id'])
# Ensure the domains serial number was not updated
domain_after = self.central_service.get_domain(
self.admin_context, domain['id'])
self.assertEqual(domain_before['serial'], domain_after['serial'])
def test_delete_recordset_incorrect_domain_id(self):
domain = self.create_domain()
other_domain = self.create_domain(fixture=1)
# Create a recordset
recordset = self.create_recordset(domain)
# Ensure we get a 404 if we use the incorrect domain_id
with testtools.ExpectedException(exceptions.RecordSetNotFound):
self.central_service.delete_recordset(
self.admin_context, other_domain['id'], recordset['id'])
def test_count_recordsets(self):
# in the beginning, there should be nothing
recordsets = self.central_service.count_recordsets(self.admin_context)
self.assertEqual(recordsets, 0)
# Create a domain to put our recordset in
domain = self.create_domain()
# Create a recordset
self.create_recordset(domain)
# We should have 1 recordset now, plus SOA & NS recordsets
recordsets = self.central_service.count_recordsets(self.admin_context)
self.assertEqual(recordsets, 3)
def test_count_recordsets_policy_check(self):
# Set the policy to reject the authz
self.policy({'count_recordsets': '!'})
with testtools.ExpectedException(exceptions.Forbidden):
self.central_service.count_recordsets(self.get_context())
# Record Tests
def test_create_record(self):
domain = self.create_domain()
recordset = self.create_recordset(domain, type='A')
values = dict(
data='127.0.0.1'
)
# Create a record
record = self.central_service.create_record(
self.admin_context, domain['id'], recordset['id'],
objects.Record.from_dict(values))
# Ensure all values have been set correctly
self.assertIsNotNone(record['id'])
self.assertEqual(record['data'], values['data'])
self.assertIn('status', record)
def test_create_record_over_quota(self):
self.config(quota_domain_records=3)
# Creating the domain automatically creates SOA & NS records
domain = self.create_domain()
recordset = self.create_recordset(domain)
self.create_record(domain, recordset)
with testtools.ExpectedException(exceptions.OverQuota):
self.create_record(domain, recordset)
def test_create_record_without_incrementing_serial(self):
domain = self.create_domain()
recordset = self.create_recordset(domain, type='A')
values = dict(
data='127.0.0.1'
)
# Create a record
self.central_service.create_record(
self.admin_context, domain['id'], recordset['id'],
objects.Record.from_dict(values),
increment_serial=False)
# Ensure the domains serial number was not updated
updated_domain = self.central_service.get_domain(
self.admin_context, domain['id'])
self.assertEqual(domain['serial'], updated_domain['serial'])
def test_get_record(self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
# Create a record
expected = self.create_record(domain, recordset)
# Retrieve it, and ensure it's the same
record = self.central_service.get_record(
self.admin_context, domain['id'], recordset['id'], expected['id'])
self.assertEqual(record['id'], expected['id'])
self.assertEqual(record['data'], expected['data'])
self.assertIn('status', record)
def test_get_record_incorrect_domain_id(self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
other_domain = self.create_domain(fixture=1)
# Create a record
expected = self.create_record(domain, recordset)
# Ensure we get a 404 if we use the incorrect domain_id
with testtools.ExpectedException(exceptions.RecordNotFound):
self.central_service.get_record(
self.admin_context, other_domain['id'], recordset['id'],
expected['id'])
def test_get_record_incorrect_recordset_id(self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
other_recordset = self.create_recordset(domain, fixture=1)
# Create a record
expected = self.create_record(domain, recordset)
# Ensure we get a 404 if we use the incorrect recordset_id
with testtools.ExpectedException(exceptions.RecordNotFound):
self.central_service.get_record(
self.admin_context, domain['id'], other_recordset['id'],
expected['id'])
def test_find_records(self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
criterion = {
'domain_id': domain['id'],
'recordset_id': recordset['id']
}
# Ensure we have no records to start with.
records = self.central_service.find_records(
self.admin_context, criterion)
self.assertEqual(len(records), 0)
# Create a single record (using default values)
expected_one = self.create_record(domain, recordset)
# Ensure we can retrieve the newly created record
records = self.central_service.find_records(
self.admin_context, criterion)
self.assertEqual(len(records), 1)
self.assertEqual(records[0]['data'], expected_one['data'])
# Create a second record
expected_two = self.create_record(domain, recordset, fixture=1)
# Ensure we can retrieve both records
records = self.central_service.find_records(
self.admin_context, criterion)
self.assertEqual(len(records), 2)
self.assertEqual(records[0]['data'], expected_one['data'])
self.assertEqual(records[1]['data'], expected_two['data'])
def test_find_record(self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
# Create a record
expected = self.create_record(domain, recordset)
# Retrieve it, and ensure it's the same
criterion = {
'domain_id': domain['id'],
'recordset_id': recordset['id'],
'data': expected['data']
}
record = self.central_service.find_record(
self.admin_context, criterion)
self.assertEqual(record['id'], expected['id'])
self.assertEqual(record['data'], expected['data'])
self.assertIn('status', record)
def test_update_record(self):
domain = self.create_domain()
recordset = self.create_recordset(domain, 'A')
# Create a record
record = self.create_record(domain, recordset)
# Update the Object
record.data = '192.0.2.255'
# Perform the update
self.central_service.update_record(self.admin_context, record)
# Fetch the resource again
record = self.central_service.get_record(
self.admin_context, record.domain_id, record.recordset_id,
record.id)
# Ensure the new value took
self.assertEqual('192.0.2.255', record.data)
def test_update_record_without_incrementing_serial(self):
domain = self.create_domain()
recordset = self.create_recordset(domain, 'A')
# Create a record
record = self.create_record(domain, recordset)
# Fetch the domain so we have the latest serial number
domain_before = self.central_service.get_domain(
self.admin_context, domain.id)
# Update the Object
record.data = '192.0.2.255'
# Perform the update
self.central_service.update_record(
self.admin_context, record, increment_serial=False)
# Fetch the resource again
record = self.central_service.get_record(
self.admin_context, record.domain_id, record.recordset_id,
record.id)
# Ensure the new value took
self.assertEqual('192.0.2.255', record.data)
# Ensure the domains serial number was not updated
domain_after = self.central_service.get_domain(
self.admin_context, domain.id)
self.assertEqual(domain_before.serial, domain_after.serial)
def test_update_record_immutable_domain_id(self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
other_domain = self.create_domain(fixture=1)
# Create a record
record = self.create_record(domain, recordset)
# Update the record
record.domain_id = other_domain.id
# Ensure we get a BadRequest if we change the domain_id
with testtools.ExpectedException(exceptions.BadRequest):
self.central_service.update_record(self.admin_context, record)
def test_update_record_immutable_recordset_id(self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
other_recordset = self.create_recordset(domain, fixture=1)
# Create a record
record = self.create_record(domain, recordset)
# Update the record
record.recordset_id = other_recordset.id
# Ensure we get a BadRequest if we change the recordset_id
with testtools.ExpectedException(exceptions.BadRequest):
self.central_service.update_record(self.admin_context, record)
def test_delete_record(self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
# Create a record
record = self.create_record(domain, recordset)
# Fetch the domain serial number
domain_serial = self.central_service.get_domain(
self.admin_context, domain['id']).serial
# Delete the record
self.central_service.delete_record(
self.admin_context, domain['id'], recordset['id'], record['id'])
# Ensure the domain serial number was updated
new_domain_serial = self.central_service.get_domain(
self.admin_context, domain['id']).serial
self.assertNotEqual(new_domain_serial, domain_serial)
# Fetch the record
deleted_record = self.central_service.get_record(
self.admin_context, domain['id'], recordset['id'],
record['id'])
# Ensure the record is marked for deletion
self.assertEqual(deleted_record.id, record.id)
self.assertEqual(deleted_record.data, record.data)
self.assertEqual(deleted_record.domain_id, record.domain_id)
self.assertEqual(deleted_record.status, 'PENDING')
self.assertEqual(deleted_record.tenant_id, record.tenant_id)
self.assertEqual(deleted_record.recordset_id, record.recordset_id)
self.assertEqual(deleted_record.action, 'DELETE')
self.assertEqual(deleted_record.serial, new_domain_serial)
def test_delete_record_without_incrementing_serial(self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
# Create a record
record = self.create_record(domain, recordset)
# Fetch the domain serial number
domain_serial = self.central_service.get_domain(
self.admin_context, domain['id']).serial
# Delete the record
self.central_service.delete_record(
self.admin_context, domain['id'], recordset['id'], record['id'],
increment_serial=False)
# Ensure the domains serial number was not updated
new_domain_serial = self.central_service.get_domain(
self.admin_context, domain['id'])['serial']
self.assertEqual(new_domain_serial, domain_serial)
# Fetch the record
deleted_record = self.central_service.get_record(
self.admin_context, domain['id'], recordset['id'],
record['id'])
# Ensure the record is marked for deletion
self.assertEqual(deleted_record.id, record.id)
self.assertEqual(deleted_record.data, record.data)
self.assertEqual(deleted_record.domain_id, record.domain_id)
self.assertEqual(deleted_record.status, 'PENDING')
self.assertEqual(deleted_record.tenant_id, record.tenant_id)
self.assertEqual(deleted_record.recordset_id, record.recordset_id)
self.assertEqual(deleted_record.action, 'DELETE')
self.assertEqual(deleted_record.serial, new_domain_serial)
def test_delete_record_incorrect_domain_id(self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
other_domain = self.create_domain(fixture=1)
# Create a record
record = self.create_record(domain, recordset)
# Ensure we get a 404 if we use the incorrect domain_id
with testtools.ExpectedException(exceptions.RecordNotFound):
self.central_service.delete_record(
self.admin_context, other_domain['id'], recordset['id'],
record['id'])
def test_delete_record_incorrect_recordset_id(self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
other_recordset = self.create_recordset(domain, fixture=1)
# Create a record
record = self.create_record(domain, recordset)
# Ensure we get a 404 if we use the incorrect recordset_id
with testtools.ExpectedException(exceptions.RecordNotFound):
self.central_service.delete_record(
self.admin_context, domain['id'], other_recordset['id'],
record['id'])
def test_count_records(self):
# in the beginning, there should be nothing
records = self.central_service.count_records(self.admin_context)
self.assertEqual(records, 0)
# Create a domain and recordset to put our record in
domain = self.create_domain()
recordset = self.create_recordset(domain)
# Create a record
self.create_record(domain, recordset)
# we should have 1 record now, plus SOA & NS records
records = self.central_service.count_records(self.admin_context)
self.assertEqual(records, 3)
def test_count_records_policy_check(self):
# Set the policy to reject the authz
self.policy({'count_records': '!'})
with testtools.ExpectedException(exceptions.Forbidden):
self.central_service.count_records(self.get_context())
def test_get_floatingip_no_record(self):
context = self.get_context(tenant='a')
fip = self.network_api.fake.allocate_floatingip(context.tenant)
fip_ptr = self.central_service.get_floatingip(
context, fip['region'], fip['id'])
self.assertEqual(fip['region'], fip_ptr['region'])
self.assertEqual(fip['id'], fip_ptr['id'])
self.assertEqual(fip['address'], fip_ptr['address'])
self.assertEqual(None, fip_ptr['ptrdname'])
def test_get_floatingip_with_record(self):
context = self.get_context(tenant='a')
fixture = self.get_ptr_fixture()
fip = self.network_api.fake.allocate_floatingip(context.tenant)
expected = self.central_service.update_floatingip(
context, fip['region'], fip['id'], fixture)
actual = self.central_service.get_floatingip(
context, fip['region'], fip['id'])
self.assertEqual(expected, actual)
self.assertEqual(expected, actual)
def test_get_floatingip_not_allocated(self):
context = self.get_context(tenant='a')
fip = self.network_api.fake.allocate_floatingip(context.tenant)
self.network_api.fake.deallocate_floatingip(fip['id'])
with testtools.ExpectedException(exceptions.NotFound):
self.central_service.get_floatingip(
context, fip['region'], fip['id'])
def test_get_floatingip_deallocated_and_invalidate(self):
context_a = self.get_context(tenant='a')
elevated_a = context_a.elevated()
elevated_a.all_tenants = True
context_b = self.get_context(tenant='b')
fixture = self.get_ptr_fixture()
# First allocate and create a FIP as tenant a
fip = self.network_api.fake.allocate_floatingip(context_a.tenant)
self.central_service.update_floatingip(
context_a, fip['region'], fip['id'], fixture)
criterion = {
'managed_resource_id': fip['id'],
'managed_tenant_id': context_a.tenant}
domain_id = self.central_service.find_record(
elevated_a, criterion).domain_id
# Simulate the update on the backend
domain_serial = self.central_service.get_domain(
elevated_a, domain_id).serial
self.central_service.update_status(
elevated_a, domain_id, "SUCCESS", domain_serial)
self.network_api.fake.deallocate_floatingip(fip['id'])
with testtools.ExpectedException(exceptions.NotFound):
self.central_service.get_floatingip(
context_a, fip['region'], fip['id'])
# Ensure that the record is still in DB (No invalidation)
self.central_service.find_record(elevated_a, criterion)
# Now give the fip id to tenant 'b' and see that it get's deleted
self.network_api.fake.allocate_floatingip(
context_b.tenant, fip['id'])
# There should be a fip returned with ptrdname of None
fip_ptr = self.central_service.get_floatingip(
context_b, fip['region'], fip['id'])
self.assertEqual(None, fip_ptr['ptrdname'])
# Simulate the invalidation on the backend
domain_serial = self.central_service.get_domain(
elevated_a, domain_id).serial
self.central_service.update_status(
elevated_a, domain_id, "SUCCESS", domain_serial)
# Ensure that the old record for tenant a for the fip now owned by
# tenant b is gone
with testtools.ExpectedException(exceptions.RecordNotFound):
self.central_service.find_record(elevated_a, criterion)
def test_list_floatingips_no_allocations(self):
context = self.get_context(tenant='a')
fips = self.central_service.list_floatingips(context)
self.assertEqual(0, len(fips))
def test_list_floatingips_no_record(self):
context = self.get_context(tenant='a')
fip = self.network_api.fake.allocate_floatingip(context.tenant)
fips = self.central_service.list_floatingips(context)
self.assertEqual(1, len(fips))
self.assertEqual(None, fips[0]['ptrdname'])
self.assertEqual(fip['id'], fips[0]['id'])
self.assertEqual(fip['region'], fips[0]['region'])
self.assertEqual(fip['address'], fips[0]['address'])
self.assertEqual(None, fips[0]['description'])
def test_list_floatingips_with_record(self):
context = self.get_context(tenant='a')
fixture = self.get_ptr_fixture()
fip = self.network_api.fake.allocate_floatingip(context.tenant)
fip_ptr = self.central_service.update_floatingip(
context, fip['region'], fip['id'], fixture)
fips = self.central_service.list_floatingips(context)
self.assertEqual(1, len(fips))
self.assertEqual(fip_ptr['ptrdname'], fips[0]['ptrdname'])
self.assertEqual(fip_ptr['id'], fips[0]['id'])
self.assertEqual(fip_ptr['region'], fips[0]['region'])
self.assertEqual(fip_ptr['address'], fips[0]['address'])
self.assertEqual(fip_ptr['description'], fips[0]['description'])
def test_list_floatingips_deallocated_and_invalidate(self):
context_a = self.get_context(tenant='a')
elevated_a = context_a.elevated()
elevated_a.all_tenants = True
context_b = self.get_context(tenant='b')
fixture = self.get_ptr_fixture()
# First allocate and create a FIP as tenant a
fip = self.network_api.fake.allocate_floatingip(context_a.tenant)
self.central_service.update_floatingip(
context_a, fip['region'], fip['id'], fixture)
criterion = {
'managed_resource_id': fip['id'],
'managed_tenant_id': context_a.tenant}
domain_id = self.central_service.find_record(
elevated_a, criterion).domain_id
# Simulate the update on the backend
domain_serial = self.central_service.get_domain(
elevated_a, domain_id).serial
self.central_service.update_status(
elevated_a, domain_id, "SUCCESS", domain_serial)
self.network_api.fake.deallocate_floatingip(fip['id'])
fips = self.central_service.list_floatingips(context_a)
assert(len(fips) == 0)
# Ensure that the record is still in DB (No invalidation)
self.central_service.find_record(elevated_a, criterion)
# Now give the fip id to tenant 'b' and see that it get's deleted
self.network_api.fake.allocate_floatingip(
context_b.tenant, fip['id'])
# There should be a fip returned with ptrdname of None
fips = self.central_service.list_floatingips(context_b)
self.assertEqual(1, len(fips))
self.assertEqual(None, fips[0]['ptrdname'])
# Simulate the invalidation on the backend
domain_serial = self.central_service.get_domain(
elevated_a, domain_id).serial
self.central_service.update_status(
elevated_a, domain_id, "SUCCESS", domain_serial)
# Ensure that the old record for tenant a for the fip now owned by
# tenant b is gone
with testtools.ExpectedException(exceptions.RecordNotFound):
self.central_service.find_record(elevated_a, criterion)
def test_set_floatingip(self):
context = self.get_context(tenant='a')
fixture = self.get_ptr_fixture()
fip = self.network_api.fake.allocate_floatingip(context.tenant)
fip_ptr = self.central_service.update_floatingip(
context, fip['region'], fip['id'], fixture)
self.assertEqual(fixture['ptrdname'], fip_ptr['ptrdname'])
self.assertEqual(fip['address'], fip_ptr['address'])
self.assertEqual(None, fip_ptr['description'])
self.assertIsNotNone(fip_ptr['ttl'])
def test_set_floatingip_no_managed_resource_tenant_id(self):
context = self.get_context(tenant='a')
fixture = self.get_ptr_fixture()
fip = self.network_api.fake.allocate_floatingip(context.tenant)
self.central_service.update_floatingip(
context, fip['region'], fip['id'], fixture)
tenant_id = "00000000-0000-0000-0000-000000000000"
elevated_context = context.elevated()
elevated_context.all_tenants = True
# The domain created should have the default 0's uuid as owner
domain = self.central_service.find_domain(
elevated_context,
{"tenant_id": tenant_id})
self.assertEqual(tenant_id, domain.tenant_id)
def test_set_floatingip_removes_old_record(self):
context_a = self.get_context(tenant='a')
elevated_a = context_a.elevated()
elevated_a.all_tenants = True
context_b = self.get_context(tenant='b')
fixture = self.get_ptr_fixture()
# Test that re-setting as tenant a an already set floatingip leaves
# only 1 record
fip = self.network_api.fake.allocate_floatingip(context_a.tenant)
self.central_service.update_floatingip(
context_a, fip['region'], fip['id'], fixture)
criterion = {
'managed_resource_id': fip['id'],
'managed_tenant_id': context_a.tenant}
domain_id = self.central_service.find_record(
elevated_a, criterion).domain_id
fixture2 = self.get_ptr_fixture(fixture=1)
self.central_service.update_floatingip(
context_a, fip['region'], fip['id'], fixture2)
# Simulate the update on the backend
domain_serial = self.central_service.get_domain(
elevated_a, domain_id).serial
self.central_service.update_status(
elevated_a, domain_id, "SUCCESS", domain_serial)
count = self.central_service.count_records(
elevated_a, {'managed_resource_id': fip['id']})
self.assertEqual(1, count)
self.network_api.fake.deallocate_floatingip(fip['id'])
# Now test that tenant b allocating the same fip and setting a ptr
# deletes any records
fip = self.network_api.fake.allocate_floatingip(
context_b.tenant, fip['id'])
self.central_service.update_floatingip(
context_b, fip['region'], fip['id'], fixture)
# Simulate the update on the backend
domain_serial = self.central_service.get_domain(
elevated_a, domain_id).serial
self.central_service.update_status(
elevated_a, domain_id, "SUCCESS", domain_serial)
count = self.central_service.count_records(
elevated_a, {'managed_resource_id': fip['id']})
self.assertEqual(1, count)
def test_set_floatingip_not_allocated(self):
context = self.get_context(tenant='a')
fixture = self.get_ptr_fixture()
fip = self.network_api.fake.allocate_floatingip(context.tenant)
self.network_api.fake.deallocate_floatingip(fip['id'])
# If one attempts to assign a de-allocated FIP or not-owned it should
# fail with BadRequest
with testtools.ExpectedException(exceptions.NotFound):
fixture = self.central_service.update_floatingip(
context, fip['region'], fip['id'], fixture)
def test_unset_floatingip(self):
context = self.get_context(tenant='a')
fixture = self.get_ptr_fixture()
fip = self.network_api.fake.allocate_floatingip(context.tenant)
fip_ptr = self.central_service.update_floatingip(
context, fip['region'], fip['id'], fixture)
self.assertEqual(fixture['ptrdname'], fip_ptr['ptrdname'])
self.assertEqual(fip['address'], fip_ptr['address'])
self.assertEqual(None, fip_ptr['description'])
self.assertIsNotNone(fip_ptr['ttl'])
self.central_service.update_floatingip(
context, fip['region'], fip['id'],
objects.FloatingIP().from_dict({'ptrdname': None}))
self.central_service.get_floatingip(
context, fip['region'], fip['id'])
# Blacklist Tests
def test_create_blacklist(self):
values = self.get_blacklist_fixture(fixture=0)
blacklist = self.create_blacklist(fixture=0)
# Verify all values have been set correctly
self.assertIsNotNone(blacklist['id'])
self.assertEqual(blacklist['pattern'], values['pattern'])
self.assertEqual(blacklist['description'], values['description'])
def test_get_blacklist(self):
# Create a blacklisted zone
expected = self.create_blacklist(fixture=0)
# Retrieve it, and verify it is the same
blacklist = self.central_service.get_blacklist(
self.admin_context, expected['id'])
self.assertEqual(blacklist['id'], expected['id'])
self.assertEqual(blacklist['pattern'], expected['pattern'])
self.assertEqual(blacklist['description'], expected['description'])
def test_find_blacklists(self):
# Verify there are no blacklisted zones to start with
blacklists = self.central_service.find_blacklists(
self.admin_context)
self.assertEqual(len(blacklists), 0)
# Create a single blacklisted zone
self.create_blacklist()
# Verify we can retrieve the newly created blacklist
blacklists = self.central_service.find_blacklists(
self.admin_context)
values1 = self.get_blacklist_fixture(fixture=0)
self.assertEqual(len(blacklists), 1)
self.assertEqual(blacklists[0]['pattern'], values1['pattern'])
# Create a second blacklisted zone
self.create_blacklist(fixture=1)
# Verify we can retrieve both blacklisted zones
blacklists = self.central_service.find_blacklists(
self.admin_context)
values2 = self.get_blacklist_fixture(fixture=1)
self.assertEqual(len(blacklists), 2)
self.assertEqual(blacklists[0]['pattern'], values1['pattern'])
self.assertEqual(blacklists[1]['pattern'], values2['pattern'])
def test_find_blacklist(self):
# Create a blacklisted zone
expected = self.create_blacklist(fixture=0)
# Retrieve the newly created blacklist
blacklist = self.central_service.find_blacklist(
self.admin_context, {'id': expected['id']})
self.assertEqual(blacklist['pattern'], expected['pattern'])
self.assertEqual(blacklist['description'], expected['description'])
def test_update_blacklist(self):
# Create a blacklisted zone
blacklist = self.create_blacklist(fixture=0)
# Update the Object
blacklist.description = "New Comment"
# Perform the update
self.central_service.update_blacklist(self.admin_context, blacklist)
# Fetch the resource again
blacklist = self.central_service.get_blacklist(self.admin_context,
blacklist.id)
# Verify that the record was updated correctly
self.assertEqual("New Comment", blacklist.description)
def test_delete_blacklist(self):
# Create a blacklisted zone
blacklist = self.create_blacklist()
# Delete the blacklist
self.central_service.delete_blacklist(self.admin_context,
blacklist['id'])
# Try to fetch the blacklist to verify an exception is raised
with testtools.ExpectedException(exceptions.BlacklistNotFound):
self.central_service.get_blacklist(self.admin_context,
blacklist['id'])
# SOA recordset tests
def test_create_SOA(self):
# A SOA record should automatically be created each time
# a zone is created
# Create a zone
zone = self.create_domain(name='example3.org.')
# Retrieve SOA
criterion = {'domain_id': zone['id'], 'type': 'SOA'}
soa = self.central_service.find_recordset(self.admin_context,
criterion)
# Split out the various soa values
soa_record_values = soa.records[0].data.split()
zone_email = zone['email'].replace("@", ".")
zone_email += (".")
# Ensure all values have been set correctly
self.assertIsNotNone(soa.id)
self.assertEqual('SOA', soa.type)
self.assertIsNotNone(soa.records)
self.assertEqual(int(soa_record_values[2]), zone['serial'])
self.assertEqual(soa_record_values[1], zone_email)
self.assertEqual(int(soa_record_values[3]), zone['refresh'])
self.assertEqual(int(soa_record_values[4]), zone['retry'])
self.assertEqual(int(soa_record_values[5]), zone['expire'])
self.assertEqual(int(soa_record_values[6]), zone['minimum'])
def test_update_soa(self):
# Anytime the zone's serial number is incremented
# the SOA recordset should automatically be updated
zone = self.create_domain(email='[email protected]')
# Update the object
zone.email = '[email protected]'
# Perform the update
self.central_service.update_domain(self.admin_context, zone)
# Fetch the domain again
updated_zone = self.central_service.get_domain(self.admin_context,
zone.id)
# Retrieve SOA
criterion = {'domain_id': zone['id'], 'type': 'SOA'}
soa = self.central_service.find_recordset(self.admin_context,
criterion)
# Split out the various soa values
soa_record_values = soa.records[0].data.split()
self.assertEqual(int(soa_record_values[2]), updated_zone['serial'])
# Pool Tests
def test_create_pool(self):
# Get the values
values = self.get_pool_fixture(fixture=0)
# Create the pool using the values
pool = self.central_service.create_pool(
self.admin_context, objects.Pool.from_dict(values))
# Verify that all the values were set correctly
self.assertIsNotNone(pool['id'])
self.assertIsNotNone(pool['created_at'])
self.assertIsNotNone(pool['version'])
self.assertIsNotNone(pool['tenant_id'])
self.assertIsNone(pool['updated_at'])
self.assertIsNotNone(pool['attributes'])
self.assertIsNotNone(pool['ns_records'])
self.assertEqual(pool['name'], values['name'])
# Compare the actual values of attributes and ns_records
for k in range(0, len(values['attributes'])):
self.assertDictContainsSubset(
values['attributes'][k],
pool['attributes'][k].to_primitive()['designate_object.data']
)
for k in range(0, len(values['ns_records'])):
self.assertDictContainsSubset(
values['ns_records'][k],
pool['ns_records'][k].to_primitive()['designate_object.data'])
def test_get_pool(self):
# Create a server pool
expected = self.create_pool(fixture=0)
# GET the pool and verify it is the same
pool = self.central_service.get_pool(self.admin_context,
expected['id'])
self.assertEqual(pool['id'], expected['id'])
self.assertEqual(pool['created_at'], expected['created_at'])
self.assertEqual(pool['version'], expected['version'])
self.assertEqual(pool['tenant_id'], expected['tenant_id'])
self.assertEqual(pool['name'], expected['name'])
# Compare the actual values of attributes and ns_records
for k in range(0, len(expected['attributes'])):
self.assertEqual(
pool['attributes'][k].to_primitive()['designate_object.data'],
expected['attributes'][k].to_primitive()
['designate_object.data'])
for k in range(0, len(expected['ns_records'])):
self.assertEqual(
pool['ns_records'][k].to_primitive()['designate_object.data'],
expected['ns_records'][k].to_primitive()
['designate_object.data'])
def test_find_pools(self):
# Verify no pools exist, except for default pool
pools = self.central_service.find_pools(self.admin_context)
self.assertEqual(len(pools), 1)
# Create a pool
self.create_pool(fixture=0)
# Verify we can find the newly created pool
pools = self.central_service.find_pools(self.admin_context)
values = self.get_pool_fixture(fixture=0)
self.assertEqual(len(pools), 2)
self.assertEqual(pools[1]['name'], values['name'])
# Compare the actual values of attributes and ns_records
expected_attributes = values['attributes'][0]
actual_attributes = \
pools[1]['attributes'][0].to_primitive()['designate_object.data']
for k in expected_attributes:
self.assertEqual(actual_attributes[k], expected_attributes[k])
expected_ns_records = values['ns_records'][0]
actual_ns_records = \
pools[1]['ns_records'][0].to_primitive()['designate_object.data']
for k in expected_ns_records:
self.assertEqual(actual_ns_records[k], expected_ns_records[k])
def test_find_pool(self):
# Create a server pool
expected = self.create_pool(fixture=0)
# Find the created pool
pool = self.central_service.find_pool(self.admin_context,
{'id': expected['id']})
self.assertEqual(pool['name'], expected['name'])
# Compare the actual values of attributes and ns_records
for k in range(0, len(expected['attributes'])):
self.assertEqual(
pool['attributes'][k].to_primitive()['designate_object.data'],
expected['attributes'][k].to_primitive()
['designate_object.data'])
for k in range(0, len(expected['ns_records'])):
self.assertEqual(
pool['ns_records'][k].to_primitive()['designate_object.data'],
expected['ns_records'][k].to_primitive()
['designate_object.data'])
def test_update_pool(self):
# Create a server pool
pool = self.create_pool(fixture=0)
# Update and save the pool
pool.description = 'New Comment'
self.central_service.update_pool(self.admin_context, pool)
# Fetch the pool
pool = self.central_service.get_pool(self.admin_context, pool.id)
# Verify that the pool was updated correctly
self.assertEqual("New Comment", pool.description)
def test_update_pool_add_ns_record(self):
# Create a server pool and domain
pool = self.create_pool(fixture=0)
domain = self.create_domain(pool_id=pool.id)
ns_record_count = len(pool.ns_records)
new_ns_record = objects.PoolNsRecord(
priority=10,
hostname='ns-new.example.org.')
# Update and save the pool
pool.ns_records.append(new_ns_record)
self.central_service.update_pool(self.admin_context, pool)
# Fetch the pool
pool = self.central_service.get_pool(self.admin_context, pool.id)
# Verify that the pool was updated correctly
self.assertEqual(ns_record_count + 1, len(pool.ns_records))
self.assertIn(new_ns_record.hostname,
[n.hostname for n in pool.ns_records])
# Fetch the domains NS recordset
ns_recordset = self.central_service.find_recordset(
self.admin_context,
criterion={'domain_id': domain.id, 'type': "NS"})
# Verify that the doamins NS records ware updated correctly
self.assertEqual(set([n.hostname for n in pool.ns_records]),
set([n.data for n in ns_recordset.records]))
def test_update_pool_remove_ns_record(self):
# Create a server pool and domain
pool = self.create_pool(fixture=0)
domain = self.create_domain(pool_id=pool.id)
ns_record_count = len(pool.ns_records)
# Update and save the pool
removed_ns_record = pool.ns_records.pop(-1)
self.central_service.update_pool(self.admin_context, pool)
# Fetch the pool
pool = self.central_service.get_pool(self.admin_context, pool.id)
# Verify that the pool was updated correctly
self.assertEqual(ns_record_count - 1, len(pool.ns_records))
self.assertNotIn(removed_ns_record.hostname,
[n.hostname for n in pool.ns_records])
# Fetch the domains NS recordset
ns_recordset = self.central_service.find_recordset(
self.admin_context,
criterion={'domain_id': domain.id, 'type': "NS"})
# Verify that the doamins NS records ware updated correctly
self.assertEqual(set([n.hostname for n in pool.ns_records]),
set([n.data for n in ns_recordset.records]))
def test_delete_pool(self):
# Create a server pool
pool = self.create_pool()
# Delete the pool
self.central_service.delete_pool(self.admin_context, pool['id'])
# Verify that the pool has been deleted
with testtools.ExpectedException(exceptions.PoolNotFound):
self.central_service.get_pool(self.admin_context, pool['id'])
def test_update_status_delete_domain(self):
# Create a domain
domain = self.create_domain()
# Delete the domain
self.central_service.delete_domain(self.admin_context, domain['id'])
# Simulate the domain having been deleted on the backend
domain_serial = self.central_service.get_domain(
self.admin_context, domain['id']).serial
self.central_service.update_status(
self.admin_context, domain['id'], "SUCCESS", domain_serial)
# Fetch the domain again, ensuring an exception is raised
with testtools.ExpectedException(exceptions.DomainNotFound):
self.central_service.get_domain(self.admin_context, domain['id'])
def test_update_status_delete_last_record(self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
# Create a record
record = self.create_record(domain, recordset)
# Delete the record
self.central_service.delete_record(
self.admin_context, domain['id'], recordset['id'], record['id'])
# Simulate the record having been deleted on the backend
domain_serial = self.central_service.get_domain(
self.admin_context, domain['id']).serial
self.central_service.update_status(
self.admin_context, domain['id'], "SUCCESS", domain_serial)
# Fetch the record again, ensuring an exception is raised
with testtools.ExpectedException(exceptions.RecordSetNotFound):
self.central_service.get_record(
self.admin_context, domain['id'], recordset['id'],
record['id'])
def test_update_status_delete_last_record_without_incrementing_serial(
self):
domain = self.create_domain()
recordset = self.create_recordset(domain)
# Create a record
record = self.create_record(domain, recordset)
# Fetch the domain serial number
domain_serial = self.central_service.get_domain(
self.admin_context, domain['id']).serial
# Delete the record
self.central_service.delete_record(
self.admin_context, domain['id'], recordset['id'], record['id'],
increment_serial=False)
# Simulate the record having been deleted on the backend
domain_serial = self.central_service.get_domain(
self.admin_context, domain['id']).serial
self.central_service.update_status(
self.admin_context, domain['id'], "SUCCESS", domain_serial)
# Fetch the record again, ensuring an exception is raised
with testtools.ExpectedException(exceptions.RecordSetNotFound):
self.central_service.get_record(
self.admin_context, domain['id'], recordset['id'],
record['id'])
# Ensure the domains serial number was not updated
new_domain_serial = self.central_service.get_domain(
self.admin_context, domain['id']).serial
self.assertEqual(new_domain_serial, domain_serial)
def test_create_zone_transfer_request(self):
domain = self.create_domain()
zone_transfer_request = self.create_zone_transfer_request(domain)
# Verify all values have been set correctly
self.assertIsNotNone(zone_transfer_request.id)
self.assertIsNotNone(zone_transfer_request.tenant_id)
self.assertIsNotNone(zone_transfer_request.key)
self.assertEqual(zone_transfer_request.domain_id, domain.id)
def test_create_zone_transfer_request_duplicate(self):
domain = self.create_domain()
self.create_zone_transfer_request(domain)
with testtools.ExpectedException(
exceptions.DuplicateZoneTransferRequest):
self.create_zone_transfer_request(domain)
def test_create_scoped_zone_transfer_request(self):
domain = self.create_domain()
values = self.get_zone_transfer_request_fixture(fixture=1)
zone_transfer_request = self.create_zone_transfer_request(domain,
fixture=1)
# Verify all values have been set correctly
self.assertIsNotNone(zone_transfer_request.id)
self.assertIsNotNone(zone_transfer_request.tenant_id)
self.assertEqual(zone_transfer_request.domain_id, domain.id)
self.assertIsNotNone(zone_transfer_request.key)
self.assertEqual(
zone_transfer_request.target_tenant_id,
values['target_tenant_id'])
def test_get_zone_transfer_request(self):
domain = self.create_domain()
zt_request = self.create_zone_transfer_request(domain,
fixture=1)
retrived_zt = self.central_service.get_zone_transfer_request(
self.admin_context,
zt_request.id)
self.assertEqual(zt_request.domain_id, retrived_zt.domain_id)
self.assertEqual(zt_request.key, retrived_zt.key)
def test_get_zone_transfer_request_scoped(self):
tenant_1_context = self.get_context(tenant=1)
tenant_2_context = self.get_context(tenant=2)
tenant_3_context = self.get_context(tenant=3)
domain = self.create_domain(context=tenant_1_context)
zt_request = self.create_zone_transfer_request(
domain,
context=tenant_1_context,
target_tenant_id=2)
self.central_service.get_zone_transfer_request(
tenant_2_context, zt_request.id)
self.central_service.get_zone_transfer_request(
tenant_1_context, zt_request.id)
with testtools.ExpectedException(exceptions.Forbidden):
self.central_service.get_zone_transfer_request(
tenant_3_context, zt_request.id)
def test_update_zone_transfer_request(self):
domain = self.create_domain()
zone_transfer_request = self.create_zone_transfer_request(domain)
zone_transfer_request.description = 'TEST'
self.central_service.update_zone_transfer_request(
self.admin_context, zone_transfer_request)
# Verify all values have been set correctly
self.assertIsNotNone(zone_transfer_request.id)
self.assertIsNotNone(zone_transfer_request.tenant_id)
self.assertIsNotNone(zone_transfer_request.key)
self.assertEqual(zone_transfer_request.description, 'TEST')
def test_delete_zone_transfer_request(self):
domain = self.create_domain()
zone_transfer_request = self.create_zone_transfer_request(domain)
self.central_service.delete_zone_transfer_request(
self.admin_context, zone_transfer_request.id)
with testtools.ExpectedException(
exceptions.ZoneTransferRequestNotFound):
self.central_service.get_zone_transfer_request(
self.admin_context,
zone_transfer_request.id)
def test_create_zone_transfer_accept(self):
tenant_1_context = self.get_context(tenant=1)
tenant_2_context = self.get_context(tenant=2)
admin_context = self.get_admin_context()
admin_context.all_tenants = True
domain = self.create_domain(context=tenant_1_context)
recordset = self.create_recordset(domain, context=tenant_1_context)
record = self.create_record(
domain, recordset, context=tenant_1_context)
zone_transfer_request = self.create_zone_transfer_request(
domain, context=tenant_1_context)
zone_transfer_accept = objects.ZoneTransferAccept()
zone_transfer_accept.zone_transfer_request_id =\
zone_transfer_request.id
zone_transfer_accept.key = zone_transfer_request.key
zone_transfer_accept.domain_id = domain.id
zone_transfer_accept = \
self.central_service.create_zone_transfer_accept(
tenant_2_context, zone_transfer_accept)
result = {}
result['domain'] = self.central_service.get_domain(
admin_context, domain.id)
result['recordset'] = self.central_service.get_recordset(
admin_context, domain.id, recordset.id)
result['record'] = self.central_service.get_record(
admin_context, domain.id, recordset.id, record.id)
result['zt_accept'] = self.central_service.get_zone_transfer_accept(
admin_context, zone_transfer_accept.id)
result['zt_request'] = self.central_service.get_zone_transfer_request(
admin_context, zone_transfer_request.id)
self.assertEqual(
result['domain'].tenant_id, str(tenant_2_context.tenant))
self.assertEqual(
result['recordset'].tenant_id, str(tenant_2_context.tenant))
self.assertEqual(
result['record'].tenant_id, str(tenant_2_context.tenant))
self.assertEqual(
result['zt_accept'].status, 'COMPLETE')
self.assertEqual(
result['zt_request'].status, 'COMPLETE')
def test_create_zone_transfer_accept_scoped(self):
tenant_1_context = self.get_context(tenant=1)
tenant_2_context = self.get_context(tenant=2)
admin_context = self.get_admin_context()
admin_context.all_tenants = True
domain = self.create_domain(context=tenant_1_context)
recordset = self.create_recordset(domain, context=tenant_1_context)
record = self.create_record(
domain, recordset, context=tenant_1_context)
zone_transfer_request = self.create_zone_transfer_request(
domain,
context=tenant_1_context,
target_tenant_id='2')
zone_transfer_accept = objects.ZoneTransferAccept()
zone_transfer_accept.zone_transfer_request_id =\
zone_transfer_request.id
zone_transfer_accept.key = zone_transfer_request.key
zone_transfer_accept.domain_id = domain.id
zone_transfer_accept = \
self.central_service.create_zone_transfer_accept(
tenant_2_context, zone_transfer_accept)
result = {}
result['domain'] = self.central_service.get_domain(
admin_context, domain.id)
result['recordset'] = self.central_service.get_recordset(
admin_context, domain.id, recordset.id)
result['record'] = self.central_service.get_record(
admin_context, domain.id, recordset.id, record.id)
result['zt_accept'] = self.central_service.get_zone_transfer_accept(
admin_context, zone_transfer_accept.id)
result['zt_request'] = self.central_service.get_zone_transfer_request(
admin_context, zone_transfer_request.id)
self.assertEqual(
result['domain'].tenant_id, str(tenant_2_context.tenant))
self.assertEqual(
result['recordset'].tenant_id, str(tenant_2_context.tenant))
self.assertEqual(
result['record'].tenant_id, str(tenant_2_context.tenant))
self.assertEqual(
result['zt_accept'].status, 'COMPLETE')
self.assertEqual(
result['zt_request'].status, 'COMPLETE')
def test_create_zone_transfer_accept_failed_key(self):
tenant_1_context = self.get_context(tenant=1)
tenant_2_context = self.get_context(tenant=2)
admin_context = self.get_admin_context()
admin_context.all_tenants = True
domain = self.create_domain(context=tenant_1_context)
zone_transfer_request = self.create_zone_transfer_request(
domain,
context=tenant_1_context,
target_tenant_id=2)
zone_transfer_accept = objects.ZoneTransferAccept()
zone_transfer_accept.zone_transfer_request_id =\
zone_transfer_request.id
zone_transfer_accept.key = 'WRONG KEY'
zone_transfer_accept.domain_id = domain.id
with testtools.ExpectedException(exceptions.IncorrectZoneTransferKey):
zone_transfer_accept = \
self.central_service.create_zone_transfer_accept(
tenant_2_context, zone_transfer_accept)
def test_create_zone_tarnsfer_accept_out_of_tenant_scope(self):
tenant_1_context = self.get_context(tenant=1)
tenant_3_context = self.get_context(tenant=3)
admin_context = self.get_admin_context()
admin_context.all_tenants = True
domain = self.create_domain(context=tenant_1_context)
zone_transfer_request = self.create_zone_transfer_request(
domain,
context=tenant_1_context,
target_tenant_id=2)
zone_transfer_accept = objects.ZoneTransferAccept()
zone_transfer_accept.zone_transfer_request_id =\
zone_transfer_request.id
zone_transfer_accept.key = zone_transfer_request.key
zone_transfer_accept.domain_id = domain.id
with testtools.ExpectedException(exceptions.Forbidden):
zone_transfer_accept = \
self.central_service.create_zone_transfer_accept(
tenant_3_context, zone_transfer_accept)
# Zone Import Tests
def test_create_zone_import(self):
# Create a Zone Import
context = self.get_context()
request_body = self.get_zonefile_fixture()
zone_import = self.central_service.create_zone_import(context,
request_body)
# Ensure all values have been set correctly
self.assertIsNotNone(zone_import['id'])
self.assertEqual(zone_import.status, 'PENDING')
self.assertEqual(zone_import.message, None)
self.assertEqual(zone_import.domain_id, None)
self.wait_for_import(zone_import.id)
def test_find_zone_imports(self):
context = self.get_context()
# Ensure we have no zone_imports to start with.
zone_imports = self.central_service.find_zone_imports(
self.admin_context)
self.assertEqual(len(zone_imports), 0)
# Create a single zone_import
request_body = self.get_zonefile_fixture()
self.central_service.create_zone_import(context, request_body)
# Ensure we can retrieve the newly created zone_import
zone_imports = self.central_service.find_zone_imports(
self.admin_context)
self.assertEqual(len(zone_imports), 1)
# Create a second zone_import
request_body = self.get_zonefile_fixture(variant="two")
zone_import = self.central_service.create_zone_import(context,
request_body)
# Wait for the imports to complete
self.wait_for_import(zone_import.id)
# Ensure we can retrieve both zone_imports
zone_imports = self.central_service.find_zone_imports(
self.admin_context)
self.assertEqual(len(zone_imports), 2)
self.assertEqual(zone_imports[0].status, 'COMPLETE')
self.assertEqual(zone_imports[1].status, 'COMPLETE')
def test_get_zone_import(self):
# Create a Zone Import
context = self.get_context()
request_body = self.get_zonefile_fixture()
zone_import = self.central_service.create_zone_import(
context, request_body)
# Wait for the import to complete
# time.sleep(1)
self.wait_for_import(zone_import.id)
# Retrieve it, and ensure it's the same
zone_import = self.central_service.get_zone_import(
self.admin_context, zone_import.id)
self.assertEqual(zone_import['id'], zone_import.id)
self.assertEqual(zone_import['status'], zone_import.status)
self.assertEqual('COMPLETE', zone_import.status)
def test_update_zone_import(self):
# Create a Zone Import
context = self.get_context()
request_body = self.get_zonefile_fixture()
zone_import = self.central_service.create_zone_import(
context, request_body)
self.wait_for_import(zone_import.id)
# Update the Object
zone_import.message = 'test message'
# Perform the update
zone_import = self.central_service.update_zone_import(
self.admin_context, zone_import)
# Fetch the zone_import again
zone_import = self.central_service.get_zone_import(context,
zone_import.id)
# Ensure the zone_import was updated correctly
self.assertEqual('test message', zone_import.message)
def test_delete_zone_import(self):
# Create a Zone Import
context = self.get_context()
request_body = self.get_zonefile_fixture()
zone_import = self.central_service.create_zone_import(
context, request_body)
self.wait_for_import(zone_import.id)
# Delete the zone_import
self.central_service.delete_zone_import(context,
zone_import['id'])
# Fetch the zone_import again, ensuring an exception is raised
self.assertRaises(
exceptions.ZoneTaskNotFound,
self.central_service.get_zone_import,
context, zone_import['id'])
| kiall/designate-py3 | designate/tests/test_central/test_service.py | Python | apache-2.0 | 113,441 |
"""empty message
Revision ID: 2345bfa569f
Revises: 202f38341bd
Create Date: 2015-11-22 20:49:52.248358
"""
# revision identifiers, used by Alembic.
revision = '2345bfa569f'
down_revision = '202f38341bd'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('rules', sa.Column('turn_motion_on', sa.Boolean(), nullable=True))
op.alter_column('rules', 'turn_on',
existing_type=sa.BOOLEAN(),
nullable=True)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('rules', 'turn_on',
existing_type=sa.BOOLEAN(),
nullable=False)
op.drop_column('rules', 'turn_motion_on')
### end Alembic commands ###
| DanCardin/nosferatu | migrations/versions/2345bfa569f_.py | Python | apache-2.0 | 842 |
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
import collections
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from fractions import Fraction
from numbers import Number
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
import pytz
from pandas._libs import iNaT, lib, missing as libmissing
from pandas.compat import PY2, StringIO, lrange, u
import pandas.util._test_decorators as td
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
ensure_categorical, ensure_int32, is_bool, is_datetime64_any_dtype,
is_datetime64_dtype, is_datetime64_ns_dtype, is_datetime64tz_dtype,
is_float, is_integer, is_number, is_scalar, is_scipy_sparse,
is_timedelta64_dtype, is_timedelta64_ns_dtype)
import pandas as pd
from pandas import (
Categorical, DataFrame, DateOffset, DatetimeIndex, Index, Interval, Panel,
Period, Series, Timedelta, TimedeltaIndex, Timestamp, compat, isna)
from pandas.util import testing as tm
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1])])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike(object):
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key):
return self.d.__contains__(key)
d = DictLike({1: 2})
result = inference.is_dict_like(d)
expected = has_keys and has_getitem and has_contains
assert result is expected
def test_is_file_like():
class MockFile(object):
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
@pytest.mark.parametrize(
"ll", [collections.namedtuple('Test', list('abc'))(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize(
"ll", [(1, 2, 3), 'a', Series({'pi': 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if PY2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, compat.Hashable)
assert inference.is_hashable(c)
hash(c) # this will not raise
@pytest.mark.parametrize(
"ll", [re.compile('ad')])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize(
"ll", ['x', 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r'a', u('x'),
r'asdf',
re.compile('adsf'),
u(r'\u2233\s*'),
re.compile(r'')])
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize(
"ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference(object):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr, skipna=True) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr, skipna=True) == compare
# object array of bytes with missing values
assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare
def test_isinf_scalar(self):
# GH 11352
assert libmissing.isposinf_scalar(float('inf'))
assert libmissing.isposinf_scalar(np.inf)
assert not libmissing.isposinf_scalar(-np.inf)
assert not libmissing.isposinf_scalar(1)
assert not libmissing.isposinf_scalar('a')
assert libmissing.isneginf_scalar(float('-inf'))
assert libmissing.isneginf_scalar(-np.inf)
assert not libmissing.isneginf_scalar(np.inf)
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = {'', 'NULL', 'nan'}
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(data, nan_values, coerce)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
@pytest.mark.parametrize("arr", [
np.array([2**63, np.nan], dtype=object),
np.array([str(2**63), np.nan], dtype=object),
np.array([np.nan, 2**63], dtype=object),
np.array([np.nan, str(2**63)], dtype=object)])
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result = lib.maybe_convert_numeric(arr, set(),
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
def test_convert_numeric_uint64_nan_values(self, coerce):
arr = np.array([2**63, 2**63 + 1], dtype=object)
na_values = {2**63}
expected = (np.array([np.nan, 2**63 + 1], dtype=float)
if coerce else arr.copy())
result = lib.maybe_convert_numeric(arr, na_values,
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("case", [
np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object)])
def test_convert_numeric_int64_uint64(self, case, coerce):
expected = case.astype(float) if coerce else case.copy()
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("value", [-2**63 - 1, 2**64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2**63, -1], dtype=object)
exp = np.array([2**63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],
dtype=object)
result = lib.maybe_convert_objects(array, convert_datetime=1)
tm.assert_numpy_array_equal(result, array)
class TestTypeInference(object):
# Dummy class used for testing with Python objects
class Dummy():
pass
def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):
# see pandas/conftest.py
inferred_dtype, values = any_skipna_inferred_dtype
# make sure the inferred dtype of the fixture is as requested
assert inferred_dtype == lib.infer_dtype(values, skipna=True)
@pytest.mark.parametrize('skipna', [True, False])
def test_length_zero(self, skipna):
result = lib.infer_dtype(np.array([], dtype='i4'), skipna=skipna)
assert result == 'integer'
result = lib.infer_dtype([], skipna=skipna)
assert result == 'empty'
# GH 18004
arr = np.array([np.array([], dtype=object),
np.array([], dtype=object)])
result = lib.infer_dtype(arr, skipna=skipna)
assert result == 'empty'
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'integer'
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'integer'
def test_deprecation(self):
# GH 24050
arr = np.array([1, 2, 3], dtype=object)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = lib.infer_dtype(arr) # default: skipna=None -> warn
assert result == 'integer'
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'mixed'
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
arr = np.array([True, np.nan, False], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
result = lib.infer_dtype(arr, skipna=False)
assert result == 'mixed'
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'floating'
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'floating'
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'floating'
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr, skipna=True)
assert result == 'decimal'
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr, skipna=True)
assert result == 'mixed'
arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)])
result = lib.infer_dtype(arr, skipna=True)
assert result == 'decimal'
arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'decimal'
def test_string(self):
pass
def test_unicode(self):
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr, skipna=False)
assert result == 'mixed'
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr, skipna=True)
expected = 'unicode' if PY2 else 'string'
assert result == expected
@pytest.mark.parametrize('dtype, missing, skipna, expected', [
(float, np.nan, False, 'floating'),
(float, np.nan, True, 'floating'),
(object, np.nan, False, 'floating'),
(object, np.nan, True, 'empty'),
(object, None, False, 'mixed'),
(object, None, True, 'empty')
])
@pytest.mark.parametrize('box', [pd.Series, np.array])
def test_object_empty(self, box, missing, dtype, skipna, expected):
# GH 23421
arr = box([missing, missing], dtype=dtype)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == expected
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'datetime64'
def test_infer_dtype_datetime(self):
arr = np.array([Timestamp('2011-01-01'),
Timestamp('2011-01-02')])
assert lib.infer_dtype(arr, skipna=True) == 'datetime'
arr = np.array([np.datetime64('2011-01-01'),
np.datetime64('2011-01-01')], dtype=object)
assert lib.infer_dtype(arr, skipna=True) == 'datetime64'
arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])
assert lib.infer_dtype(arr, skipna=True) == 'datetime'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr, skipna=True) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02')])
assert lib.infer_dtype(arr, skipna=True) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1)])
assert lib.infer_dtype(arr, skipna=True) == 'datetime'
arr = np.array([n, pd.Timestamp('2011-01-02'), n])
assert lib.infer_dtype(arr, skipna=True) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02'), n])
assert lib.infer_dtype(arr, skipna=True) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1), n])
assert lib.infer_dtype(arr, skipna=True) == 'datetime'
# different type of nat
arr = np.array([np.timedelta64('nat'),
np.datetime64('2011-01-02')], dtype=object)
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
arr = np.array([np.datetime64('2011-01-02'),
np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
# mixed datetime
arr = np.array([datetime(2011, 1, 1),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr, skipna=True) == 'datetime'
# should be datetime?
arr = np.array([np.datetime64('2011-01-01'),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr, skipna=True) == 'mixed'
arr = np.array([pd.Timestamp('2011-01-02'),
np.datetime64('2011-01-01')])
assert lib.infer_dtype(arr, skipna=True) == 'mixed'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1])
assert lib.infer_dtype(arr, skipna=True) == 'mixed-integer'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1])
assert lib.infer_dtype(arr, skipna=True) == 'mixed'
arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr, skipna=True) == 'mixed'
def test_infer_dtype_timedelta(self):
arr = np.array([pd.Timedelta('1 days'),
pd.Timedelta('2 days')])
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
arr = np.array([np.timedelta64(1, 'D'),
np.timedelta64(2, 'D')], dtype=object)
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
arr = np.array([timedelta(1), timedelta(2)])
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, Timedelta('1 days')])
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D')])
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
arr = np.array([n, timedelta(1)])
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
arr = np.array([n, pd.Timedelta('1 days'), n])
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D'), n])
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
arr = np.array([n, timedelta(1), n])
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
# different type of nat
arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')],
dtype=object)
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
def test_infer_dtype_period(self):
# GH 13664
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='D')])
assert lib.infer_dtype(arr, skipna=True) == 'period'
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='M')])
assert lib.infer_dtype(arr, skipna=True) == 'period'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Period('2011-01', freq='D')])
assert lib.infer_dtype(arr, skipna=True) == 'period'
arr = np.array([n, pd.Period('2011-01', freq='D'), n])
assert lib.infer_dtype(arr, skipna=True) == 'period'
# different type of nat
arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')],
dtype=object)
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
arr = np.array([pd.Period('2011-01', freq='M'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
@pytest.mark.parametrize(
"data",
[
[datetime(2017, 6, 12, 19, 30), datetime(2017, 3, 11, 1, 15)],
[Timestamp("20170612"), Timestamp("20170311")],
[Timestamp("20170612", tz='US/Eastern'),
Timestamp("20170311", tz='US/Eastern')],
[date(2017, 6, 12),
Timestamp("20170311", tz='US/Eastern')],
[np.datetime64("2017-06-12"), np.datetime64("2017-03-11")],
[np.datetime64("2017-06-12"), datetime(2017, 3, 11, 1, 15)]
]
)
def test_infer_datetimelike_array_datetime(self, data):
assert lib.infer_datetimelike_array(data) == "datetime"
@pytest.mark.parametrize(
"data",
[
[timedelta(2017, 6, 12), timedelta(2017, 3, 11)],
[timedelta(2017, 6, 12), date(2017, 3, 11)],
[np.timedelta64(2017, "D"), np.timedelta64(6, "s")],
[np.timedelta64(2017, "D"), timedelta(2017, 3, 11)]
]
)
def test_infer_datetimelike_array_timedelta(self, data):
assert lib.infer_datetimelike_array(data) == "timedelta"
def test_infer_datetimelike_array_date(self):
arr = [date(2017, 6, 12), date(2017, 3, 11)]
assert lib.infer_datetimelike_array(arr) == "date"
@pytest.mark.parametrize(
"data",
[
["2017-06-12", "2017-03-11"],
[20170612, 20170311],
[20170612.5, 20170311.8],
[Dummy(), Dummy()],
[Timestamp("20170612"), Timestamp("20170311", tz='US/Eastern')],
[Timestamp("20170612"), 20170311],
[timedelta(2017, 6, 12), Timestamp("20170311", tz='US/Eastern')]
]
)
def test_infer_datetimelike_array_mixed(self, data):
assert lib.infer_datetimelike_array(data) == "mixed"
@pytest.mark.parametrize(
"first, expected",
[
[[None], "mixed"],
[[np.nan], "mixed"],
[[pd.NaT], "nat"],
[[datetime(2017, 6, 12, 19, 30), pd.NaT], "datetime"],
[[np.datetime64("2017-06-12"), pd.NaT], "datetime"],
[[date(2017, 6, 12), pd.NaT], "date"],
[[timedelta(2017, 6, 12), pd.NaT], "timedelta"],
[[np.timedelta64(2017, "D"), pd.NaT], "timedelta"]
]
)
@pytest.mark.parametrize("second", [None, np.nan])
def test_infer_datetimelike_array_nan_nat_like(self, first, second,
expected):
first.append(second)
assert lib.infer_datetimelike_array(first) == expected
def test_infer_dtype_all_nan_nat_like(self):
arr = np.array([np.nan, np.nan])
assert lib.infer_dtype(arr, skipna=True) == 'floating'
# nan and None mix are result in mixed
arr = np.array([np.nan, np.nan, None])
assert lib.infer_dtype(arr, skipna=True) == 'empty'
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
arr = np.array([None, np.nan, np.nan])
assert lib.infer_dtype(arr, skipna=True) == 'empty'
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
# pd.NaT
arr = np.array([pd.NaT])
assert lib.infer_dtype(arr, skipna=False) == 'datetime'
arr = np.array([pd.NaT, np.nan])
assert lib.infer_dtype(arr, skipna=False) == 'datetime'
arr = np.array([np.nan, pd.NaT])
assert lib.infer_dtype(arr, skipna=False) == 'datetime'
arr = np.array([np.nan, pd.NaT, np.nan])
assert lib.infer_dtype(arr, skipna=False) == 'datetime'
arr = np.array([None, pd.NaT, None])
assert lib.infer_dtype(arr, skipna=False) == 'datetime'
# np.datetime64(nat)
arr = np.array([np.datetime64('nat')])
assert lib.infer_dtype(arr, skipna=False) == 'datetime64'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr, skipna=False) == 'datetime64'
arr = np.array([pd.NaT, n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr, skipna=False) == 'datetime64'
arr = np.array([np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr, skipna=False) == 'timedelta'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr, skipna=False) == 'timedelta'
arr = np.array([pd.NaT, n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr, skipna=False) == 'timedelta'
# datetime / timedelta mixed
arr = np.array([pd.NaT, np.datetime64('nat'),
np.timedelta64('nat'), np.nan])
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
arr = np.array([np.timedelta64('nat'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
def test_is_datetimelike_array_all_nan_nat_like(self):
arr = np.array([np.nan, pd.NaT, np.datetime64('nat')])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.datetime64('nat'),
np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, np.nan], dtype=object)
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
assert lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='US/Eastern')],
dtype=object))
assert not lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='CET')],
dtype=object))
@pytest.mark.parametrize(
"func",
[
'is_datetime_array',
'is_datetime64_array',
'is_bool_array',
'is_timedelta_or_timedelta64_array',
'is_date_array',
'is_time_array',
'is_interval_array',
'is_period_array'])
def test_other_dtypes_for_array(self, func):
func = getattr(lib, func)
arr = np.array(['foo', 'bar'])
assert not func(arr)
arr = np.array([1, 2])
assert not func(arr)
def test_date(self):
dates = [date(2012, 1, day) for day in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'date'
dates = [date(2012, 1, day) for day in range(1, 20)] + [np.nan]
result = lib.infer_dtype(dates, skipna=False)
assert result == 'mixed'
result = lib.infer_dtype(dates, skipna=True)
assert result == 'date'
def test_is_numeric_array(self):
assert lib.is_float_array(np.array([1, 2.0]))
assert lib.is_float_array(np.array([1, 2.0, np.nan]))
assert not lib.is_float_array(np.array([1, 2]))
assert lib.is_integer_array(np.array([1, 2]))
assert not lib.is_integer_array(np.array([1, 2.0]))
def test_is_string_array(self):
assert lib.is_string_array(np.array(['foo', 'bar']))
assert not lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=False)
assert lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=True)
assert not lib.is_string_array(np.array([1, 2]))
def test_to_object_array_tuples(self):
r = (5, 6)
values = [r]
result = lib.to_object_array_tuples(values)
try:
# make sure record array works
from collections import namedtuple
record = namedtuple('record', 'x y')
r = record(5, 6)
values = [r]
result = lib.to_object_array_tuples(values) # noqa
except ImportError:
pass
def test_object(self):
# GH 7431
# cannot infer more than this as only a single element
arr = np.array([None], dtype='O')
result = lib.infer_dtype(arr, skipna=False)
assert result == 'mixed'
result = lib.infer_dtype(arr, skipna=True)
assert result == 'empty'
def test_to_object_array_width(self):
# see gh-13320
rows = [[1, 2, 3], [4, 5, 6]]
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows)
tm.assert_numpy_array_equal(out, expected)
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows, min_width=1)
tm.assert_numpy_array_equal(out, expected)
expected = np.array([[1, 2, 3, None, None],
[4, 5, 6, None, None]], dtype=object)
out = lib.to_object_array(rows, min_width=5)
tm.assert_numpy_array_equal(out, expected)
def test_is_period(self):
assert lib.is_period(pd.Period('2011-01', freq='M'))
assert not lib.is_period(pd.PeriodIndex(['2011-01'], freq='M'))
assert not lib.is_period(pd.Timestamp('2011-01'))
assert not lib.is_period(1)
assert not lib.is_period(np.nan)
def test_categorical(self):
# GH 8974
from pandas import Categorical, Series
arr = Categorical(list('abc'))
result = lib.infer_dtype(arr, skipna=True)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr), skipna=True)
assert result == 'categorical'
arr = Categorical(list('abc'), categories=['cegfab'], ordered=True)
result = lib.infer_dtype(arr, skipna=True)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr), skipna=True)
assert result == 'categorical'
class TestNumberScalar(object):
def test_is_number(self):
assert is_number(True)
assert is_number(1)
assert is_number(1.1)
assert is_number(1 + 3j)
assert is_number(np.bool(False))
assert is_number(np.int64(1))
assert is_number(np.float64(1.1))
assert is_number(np.complex128(1 + 3j))
assert is_number(np.nan)
assert not is_number(None)
assert not is_number('x')
assert not is_number(datetime(2011, 1, 1))
assert not is_number(np.datetime64('2011-01-01'))
assert not is_number(Timestamp('2011-01-01'))
assert not is_number(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_number(timedelta(1000))
assert not is_number(Timedelta('1 days'))
# questionable
assert not is_number(np.bool_(False))
assert is_number(np.timedelta64(1, 'D'))
def test_is_bool(self):
assert is_bool(True)
assert is_bool(np.bool(False))
assert is_bool(np.bool_(False))
assert not is_bool(1)
assert not is_bool(1.1)
assert not is_bool(1 + 3j)
assert not is_bool(np.int64(1))
assert not is_bool(np.float64(1.1))
assert not is_bool(np.complex128(1 + 3j))
assert not is_bool(np.nan)
assert not is_bool(None)
assert not is_bool('x')
assert not is_bool(datetime(2011, 1, 1))
assert not is_bool(np.datetime64('2011-01-01'))
assert not is_bool(Timestamp('2011-01-01'))
assert not is_bool(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_bool(timedelta(1000))
assert not is_bool(np.timedelta64(1, 'D'))
assert not is_bool(Timedelta('1 days'))
def test_is_integer(self):
assert is_integer(1)
assert is_integer(np.int64(1))
assert not is_integer(True)
assert not is_integer(1.1)
assert not is_integer(1 + 3j)
assert not is_integer(np.bool(False))
assert not is_integer(np.bool_(False))
assert not is_integer(np.float64(1.1))
assert not is_integer(np.complex128(1 + 3j))
assert not is_integer(np.nan)
assert not is_integer(None)
assert not is_integer('x')
assert not is_integer(datetime(2011, 1, 1))
assert not is_integer(np.datetime64('2011-01-01'))
assert not is_integer(Timestamp('2011-01-01'))
assert not is_integer(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_integer(timedelta(1000))
assert not is_integer(Timedelta('1 days'))
# questionable
assert is_integer(np.timedelta64(1, 'D'))
def test_is_float(self):
assert is_float(1.1)
assert is_float(np.float64(1.1))
assert is_float(np.nan)
assert not is_float(True)
assert not is_float(1)
assert not is_float(1 + 3j)
assert not is_float(np.bool(False))
assert not is_float(np.bool_(False))
assert not is_float(np.int64(1))
assert not is_float(np.complex128(1 + 3j))
assert not is_float(None)
assert not is_float('x')
assert not is_float(datetime(2011, 1, 1))
assert not is_float(np.datetime64('2011-01-01'))
assert not is_float(Timestamp('2011-01-01'))
assert not is_float(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_float(timedelta(1000))
assert not is_float(np.timedelta64(1, 'D'))
assert not is_float(Timedelta('1 days'))
def test_is_datetime_dtypes(self):
ts = pd.date_range('20130101', periods=3)
tsa = pd.date_range('20130101', periods=3, tz='US/Eastern')
assert is_datetime64_dtype('datetime64')
assert is_datetime64_dtype('datetime64[ns]')
assert is_datetime64_dtype(ts)
assert not is_datetime64_dtype(tsa)
assert not is_datetime64_ns_dtype('datetime64')
assert is_datetime64_ns_dtype('datetime64[ns]')
assert is_datetime64_ns_dtype(ts)
assert is_datetime64_ns_dtype(tsa)
assert is_datetime64_any_dtype('datetime64')
assert is_datetime64_any_dtype('datetime64[ns]')
assert is_datetime64_any_dtype(ts)
assert is_datetime64_any_dtype(tsa)
assert not is_datetime64tz_dtype('datetime64')
assert not is_datetime64tz_dtype('datetime64[ns]')
assert not is_datetime64tz_dtype(ts)
assert is_datetime64tz_dtype(tsa)
for tz in ['US/Eastern', 'UTC']:
dtype = 'datetime64[ns, {}]'.format(tz)
assert not is_datetime64_dtype(dtype)
assert is_datetime64tz_dtype(dtype)
assert is_datetime64_ns_dtype(dtype)
assert is_datetime64_any_dtype(dtype)
def test_is_timedelta(self):
assert is_timedelta64_dtype('timedelta64')
assert is_timedelta64_dtype('timedelta64[ns]')
assert not is_timedelta64_ns_dtype('timedelta64')
assert is_timedelta64_ns_dtype('timedelta64[ns]')
tdi = TimedeltaIndex([1e14, 2e14], dtype='timedelta64[ns]')
assert is_timedelta64_dtype(tdi)
assert is_timedelta64_ns_dtype(tdi)
assert is_timedelta64_ns_dtype(tdi.astype('timedelta64[ns]'))
# Conversion to Int64Index:
assert not is_timedelta64_ns_dtype(tdi.astype('timedelta64'))
assert not is_timedelta64_ns_dtype(tdi.astype('timedelta64[h]'))
class TestIsScalar(object):
def test_is_scalar_builtin_scalars(self):
assert is_scalar(None)
assert is_scalar(True)
assert is_scalar(False)
assert is_scalar(Number())
assert is_scalar(Fraction())
assert is_scalar(0.)
assert is_scalar(np.nan)
assert is_scalar('foobar')
assert is_scalar(b'foobar')
assert is_scalar(u('efoobar'))
assert is_scalar(datetime(2014, 1, 1))
assert is_scalar(date(2014, 1, 1))
assert is_scalar(time(12, 0))
assert is_scalar(timedelta(hours=1))
assert is_scalar(pd.NaT)
def test_is_scalar_builtin_nonscalars(self):
assert not is_scalar({})
assert not is_scalar([])
assert not is_scalar([1])
assert not is_scalar(())
assert not is_scalar((1, ))
assert not is_scalar(slice(None))
assert not is_scalar(Ellipsis)
def test_is_scalar_numpy_array_scalars(self):
assert is_scalar(np.int64(1))
assert is_scalar(np.float64(1.))
assert is_scalar(np.int32(1))
assert is_scalar(np.object_('foobar'))
assert is_scalar(np.str_('foobar'))
assert is_scalar(np.unicode_(u('foobar')))
assert is_scalar(np.bytes_(b'foobar'))
assert is_scalar(np.datetime64('2014-01-01'))
assert is_scalar(np.timedelta64(1, 'h'))
def test_is_scalar_numpy_zerodim_arrays(self):
for zerodim in [np.array(1), np.array('foobar'),
np.array(np.datetime64('2014-01-01')),
np.array(np.timedelta64(1, 'h')),
np.array(np.datetime64('NaT'))]:
assert not is_scalar(zerodim)
assert is_scalar(lib.item_from_zerodim(zerodim))
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_is_scalar_numpy_arrays(self):
assert not is_scalar(np.array([]))
assert not is_scalar(np.array([[]]))
assert not is_scalar(np.matrix('1; 2'))
def test_is_scalar_pandas_scalars(self):
assert is_scalar(Timestamp('2014-01-01'))
assert is_scalar(Timedelta(hours=1))
assert is_scalar(Period('2014-01-01'))
assert is_scalar(Interval(left=0, right=1))
assert is_scalar(DateOffset(days=1))
def test_is_scalar_pandas_containers(self):
assert not is_scalar(Series())
assert not is_scalar(Series([1]))
assert not is_scalar(DataFrame())
assert not is_scalar(DataFrame([[1]]))
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
assert not is_scalar(Panel())
assert not is_scalar(Panel([[[1]]]))
assert not is_scalar(Index([]))
assert not is_scalar(Index([1]))
def test_datetimeindex_from_empty_datetime64_array():
for unit in ['ms', 'us', 'ns']:
idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))
assert (len(idx) == 0)
def test_nan_to_nat_conversions():
df = DataFrame(dict({
'A': np.asarray(
lrange(10), dtype='float64'),
'B': Timestamp('20010101')
}))
df.iloc[3:6, :] = np.nan
result = df.loc[4, 'B'].value
assert (result == iNaT)
s = df['B'].copy()
s._data = s._data.setitem(indexer=tuple([slice(8, 9)]), value=np.nan)
assert (isna(s[8]))
assert (s[8].value == np.datetime64('NaT').astype(np.int64))
@td.skip_if_no_scipy
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_is_scipy_sparse(spmatrix): # noqa: F811
assert is_scipy_sparse(spmatrix([[0, 1]]))
assert not is_scipy_sparse(np.array([1]))
def test_ensure_int32():
values = np.arange(10, dtype=np.int32)
result = ensure_int32(values)
assert (result.dtype == np.int32)
values = np.arange(10, dtype=np.int64)
result = ensure_int32(values)
assert (result.dtype == np.int32)
def test_ensure_categorical():
values = np.arange(10, dtype=np.int32)
result = ensure_categorical(values)
assert (result.dtype == 'category')
values = Categorical(values)
result = ensure_categorical(values)
tm.assert_categorical_equal(result, values)
| GuessWhoSamFoo/pandas | pandas/tests/dtypes/test_inference.py | Python | bsd-3-clause | 48,913 |
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
from taskflow import task
class BaseCloudTask(task.Task):
def __init__(self, cloud, *args, **kwargs):
super(BaseCloudTask, self).__init__(*args, **kwargs)
self.cloud = cloud
class BaseCloudsTask(task.Task):
def __init__(self, src_cloud, dst_cloud, *args, **kwargs):
super(BaseCloudsTask, self).__init__(*args, **kwargs)
self.src_cloud = src_cloud
self.dst_cloud = dst_cloud
| Mirantis/pumphouse | pumphouse/task.py | Python | apache-2.0 | 1,003 |
__title__ = "betfairlightweight"
__description__ = "Lightweight python wrapper for Betfair API-NG"
__url__ = "https://github.com/liampauling/betfair"
__version__ = "2.16.0"
__author__ = "Liam Pauling"
__license__ = "MIT"
| liampauling/betfair | betfairlightweight/__version__.py | Python | mit | 221 |
__author__ = "William Clyde"
__copyright__ = "Copyright 2016, William Clyde"
__license__ = "MIT"
| BillClyde/safenetfs | safenet/__init__.py | Python | mit | 97 |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.v2 import volumes
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class SchedulerHintsController(wsgi.Controller):
@staticmethod
def _extract_scheduler_hints(body):
hints = {}
attr = '%s:scheduler_hints' % Scheduler_hints.alias
try:
if attr in body:
hints.update(body[attr])
except ValueError:
msg = _("Malformed scheduler_hints attribute")
raise webob.exc.HTTPBadRequest(explanation=msg)
return hints
@wsgi.extends
def create(self, req, body):
hints = self._extract_scheduler_hints(body)
if 'volume' in body:
body['volume']['scheduler_hints'] = hints
yield
class Scheduler_hints(extensions.ExtensionDescriptor):
"""Pass arbitrary key/value pairs to the scheduler."""
name = "SchedulerHints"
alias = "OS-SCH-HNT"
namespace = volumes.SCHEDULER_HINTS_NAMESPACE
updated = "2013-04-18T00:00:00+00:00"
def get_controller_extensions(self):
controller = SchedulerHintsController()
ext = extensions.ControllerExtension(self, 'volumes', controller)
return [ext]
| github-borat/cinder | cinder/api/contrib/scheduler_hints.py | Python | apache-2.0 | 1,964 |
import pytest
from django.urls import reverse
from gamification.models import CourseGamificationEvent, \
MediaGamificationEvent, \
ActivityGamificationEvent
from oppia.test import OppiaTestCase
from oppia.models import Course, CoursePublishingLog, Quiz, Activity, Question
from zipfile import BadZipfile
from quiz.models import QuizProps, QuestionProps
class CourseUploadTest(OppiaTestCase):
fixtures = ['tests/test_user.json',
'tests/test_oppia.json',
'tests/test_quiz.json',
'tests/test_permissions.json',
'tests/test_course_permissions.json']
file_root = './oppia/fixtures/reference_files/'
course_file_path = file_root + 'ncd1_test_course.zip'
media_file_path = file_root + 'sample_video.m4v'
empty_section_course = file_root + 'test_course_empty_section.zip'
no_module_xml = file_root + 'test_course_no_module_xml.zip'
corrupt_course_zip = file_root + 'corrupt_course.zip'
course_no_sub_dir = file_root + 'test_course_no_sub_dir.zip'
course_old_version = file_root + 'ncd1_old_course.zip'
course_no_activities = file_root + 'test_course_no_activities.zip'
course_with_custom_points = file_root + 'ref-1.zip'
course_with_copied_activities = file_root + 'ref-1-copy.zip'
course_with_custom_points_updated = file_root + 'ref-1-updated.zip'
course_with_quizprops = file_root + 'quizprops_course.zip'
course_with_updated_quizprops = file_root + 'quizprops_course_updated.zip'
@pytest.mark.xfail(reason="works on local but not on github workflows")
def test_upload_template(self):
with open(self.course_file_path, 'rb') as course_file:
self.client.force_login(self.admin_user)
response = self.client.post(reverse('oppia:upload'),
{'course_file': course_file})
# should be redirected to the update step 2 form
self.assertRedirects(response,
reverse('oppia:upload_step2', args=[2]),
302,
200)
@pytest.mark.xfail(reason="works on local but not on github workflows")
def test_upload_with_empty_sections(self):
with open(self.empty_section_course, 'rb') as course_file:
self.client.force_login(self.admin_user)
response = self.client.post(reverse('oppia:upload'),
{'course_file': course_file})
course = Course.objects.latest('created_date')
# should be redirected to the update step 2 form
self.assertRedirects(response,
reverse('oppia:upload_step2',
args=[course.id]),
302,
200)
@pytest.mark.xfail(reason="works on local but not on github workflows")
def test_upload_no_module_xml(self):
with open(self.no_module_xml, 'rb') as course_file:
self.client.force_login(self.admin_user)
response = self.client.post(reverse('oppia:upload'),
{'course_file': course_file})
self.assertEqual(200, response.status_code)
course_log = CoursePublishingLog.objects.latest('log_date')
self.assertEqual("no_module_xml", course_log.action)
@pytest.mark.xfail(reason="works on local but not on github workflows")
def test_corrupt_course(self):
with open(self.corrupt_course_zip, 'rb') as course_file:
self.client.force_login(self.admin_user)
response = self.client.post(reverse('oppia:upload'),
{'course_file': course_file})
self.assertEqual(200, response.status_code)
self.assertRaises(BadZipfile)
course_log = CoursePublishingLog.objects.latest('log_date')
self.assertEqual("invalid_zip", course_log.action)
@pytest.mark.xfail(reason="works on local but not on github workflows")
def test_no_sub_dir(self):
with open(self.course_no_sub_dir, 'rb') as course_file:
self.client.force_login(self.admin_user)
response = self.client.post(reverse('oppia:upload'),
{'course_file': course_file})
self.assertEqual(200, response.status_code)
course_log = CoursePublishingLog.objects.latest('log_date')
self.assertEqual("invalid_zip", course_log.action)
@pytest.mark.xfail(reason="works on local but not on github workflows")
def test_newer_version_exists(self):
with open(self.course_old_version, 'rb') as course_file:
self.client.force_login(self.admin_user)
response = self.client.post(reverse('oppia:upload'),
{'course_file': course_file})
self.assertEqual(200, response.status_code)
course_log = CoursePublishingLog.objects.latest('log_date')
self.assertEqual("newer_version_exists", course_log.action)
@pytest.mark.xfail(reason="works on local but not on github workflows")
def test_course_no_activities(self):
with open(self.course_no_activities, 'rb') as course_file:
self.client.force_login(self.admin_user)
response = self.client.post(reverse('oppia:upload'),
{'course_file': course_file})
self.assertEqual(200, response.status_code)
course_log = CoursePublishingLog.objects.latest('log_date')
self.assertEqual("no_activities", course_log.action)
@pytest.mark.xfail(reason="works on local but not on github workflows")
def test_course_with_custom_points(self):
course_game_events_start = CourseGamificationEvent. \
objects.all().count()
media_game_events_start = MediaGamificationEvent. \
objects.all().count()
activity_game_events_start = ActivityGamificationEvent. \
objects.all().count()
with open(self.course_with_custom_points, 'rb') as course_file:
self.client.force_login(self.admin_user)
response = self.client.post(reverse('oppia:upload'),
{'course_file': course_file})
course = Course.objects.latest('created_date')
self.assertRedirects(response,
reverse('oppia:upload_step2',
args=[course.id]),
302,
200)
course_game_events_end = CourseGamificationEvent.objects.all().count()
self.assertEqual(course_game_events_start+10, course_game_events_end)
media_game_events_end = MediaGamificationEvent.objects.all().count()
self.assertEqual(media_game_events_start+4, media_game_events_end)
activity_game_events_end = ActivityGamificationEvent. \
objects.all().count()
self.assertEqual(activity_game_events_start+1,
activity_game_events_end)
@pytest.mark.xfail(reason="works on local but not on github workflows")
def test_course_with_custom_points_updated(self):
with open(self.course_with_custom_points, 'rb') as course_file:
self.client.force_login(self.admin_user)
response = self.client.post(reverse('oppia:upload'),
{'course_file': course_file})
course = Course.objects.latest('created_date')
self.assertRedirects(response,
reverse('oppia:upload_step2',
args=[course.id]),
302,
200)
course_game_events_start = CourseGamificationEvent. \
objects.all().count()
media_game_events_start = MediaGamificationEvent. \
objects.all().count()
activity_game_events_start = ActivityGamificationEvent. \
objects.all().count()
# reset course version no to avoid issue with newer version being
# reported in the test
update_course = Course.objects.get(shortname='ref-1')
update_course.version = 0
update_course.save()
with open(self.course_with_custom_points_updated, 'rb') as course_file:
self.client.force_login(self.admin_user)
response = self.client.post(reverse('oppia:upload'),
{'course_file': course_file})
course = Course.objects.latest('created_date')
self.assertRedirects(response,
reverse('oppia:upload_step2',
args=[course.id]),
302,
200)
course_game_events_end = CourseGamificationEvent.objects.all().count()
self.assertEqual(course_game_events_start, course_game_events_end)
media_game_events_end = MediaGamificationEvent.objects.all().count()
self.assertEqual(media_game_events_start, media_game_events_end)
activity_game_events_end = ActivityGamificationEvent. \
objects.all().count()
self.assertEqual(activity_game_events_start, activity_game_events_end)
@pytest.mark.xfail(reason="works on local but not on github workflows")
def test_update_quizprops(self):
self.client.force_login(self.admin_user)
with open(self.course_with_quizprops, 'rb') as course_file:
response = self.client.post(reverse('oppia:upload'), {'course_file': course_file})
course = Course.objects.get(shortname='quizprops_course')
self.assertRedirects(response,
reverse('oppia:upload_step2',
args=[course.id]),
302,
200)
current_quizzes = Activity.objects.filter(section__course=course, type=Activity.QUIZ).values_list('digest', flat=True)
quizzes = Quiz.objects.filter(quizprops__name='digest', quizprops__value__in=current_quizzes)
quiz_questions = Question.objects.filter(quizquestion__quiz__in=quizzes)
quiz_props = QuizProps.objects.filter(quiz__in=quizzes)
question_props = QuestionProps.objects.filter(question__in=quiz_questions)
self.assertEqual(1, quizzes.count())
self.assertEqual(2, quiz_questions.count())
self.assertEqual(8, quiz_props.count())
self.assertEqual(4, question_props.count())
self.assertEqual(QuizProps.objects.filter(name='moodle_quiz_id', quiz=quizzes.first()).first().value, '43504')
# Lower the version so that we can upload a new one regardless of the current date
course.version = 100
course.save()
with open(self.course_with_updated_quizprops, 'rb') as course_file:
response = self.client.post(reverse('oppia:upload'), {'course_file': course_file})
course = Course.objects.get(shortname='quizprops_course')
current_quizzes = Activity.objects.filter(section__course=course, type=Activity.QUIZ).values_list('digest',
flat=True)
quizzes = Quiz.objects.filter(quizprops__name='digest', quizprops__value__in=current_quizzes)
quiz_questions = Question.objects.filter(quizquestion__quiz__in=quizzes)
quiz_props = QuizProps.objects.filter(quiz__in=quizzes)
question_props = QuestionProps.objects.filter(question__in=quiz_questions)
# Assert that no new quizzes or props were created, only updated
self.assertEqual(1, quizzes.count())
self.assertEqual(2, quiz_questions.count())
self.assertEqual(8, quiz_props.count())
self.assertEqual(5, question_props.count()) # Additional question prop added
self.assertEqual(QuizProps.objects.filter(name='moodle_quiz_id', quiz=quizzes.first()).first().value,
'43505') # property updated
@pytest.mark.xfail(reason="works on local but not on github workflows")
def test_course_with_repeated_activities(self):
with open(self.course_with_custom_points, 'rb') as course_file:
self.client.force_login(self.admin_user)
response = self.client.post(reverse('oppia:upload'),
{'course_file': course_file})
course = Course.objects.latest('created_date')
self.assertRedirects(response,
reverse('oppia:upload_step2',
args=[course.id]), 302, 200)
course_activities = Activity.objects.filter(section__course__shortname='ref-1').count()
self.assertEqual(course_activities, 5)
with open(self.course_with_copied_activities, 'rb') as course_file:
self.client.force_login(self.admin_user)
response = self.client.post(reverse('oppia:upload'),
{'course_file': course_file})
course = Course.objects.latest('created_date')
self.assertRedirects(response,
reverse('oppia:upload_step2',
args=[course.id]), 302, 200)
course_activities = Activity.objects.filter(section__course__shortname='ref-1').count()
new_course_activities = Activity.objects.filter(section__course__shortname='ref-1-copy').count()
self.assertEqual(new_course_activities, 5)
self.assertEqual(course_activities, 5)
| DigitalCampus/django-oppia | tests/test_course_upload.py | Python | gpl-3.0 | 14,057 |
import numpy as np
import pytest
from ogusa import demographics
def test_get_pop_objs():
"""
Test of the that omega_SS and the last period of omega_path_S are
close to each other.
"""
E = 20
S = 80
T = int(round(4.0 * S))
start_year = 2018
(omega, g_n_ss, omega_SS, surv_rate, rho, g_n_vector, imm_rates,
omega_S_preTP) = demographics.get_pop_objs(E, S, T, 1, 100,
start_year, False)
assert (np.allclose(omega_SS, omega[-1, :]))
def test_pop_smooth():
"""
Test that population growth rates evolve smoothly.
"""
E = 20
S = 80
T = int(round(4.0 * S))
start_year = 2018
(omega, g_n_ss, omega_SS, surv_rate, rho, g_n_vector, imm_rates,
omega_S_preTP) = demographics.get_pop_objs(E, S, T, 1, 100,
start_year, False)
assert (np.any(np.absolute(omega[:-1, :] - omega[1:, :]) < 0.0001))
assert (np.any(np.absolute(g_n_vector[:-1] - g_n_vector[1:]) < 0.0001))
def test_imm_smooth():
"""
Test that population growth rates evolve smoothly.
"""
E = 20
S = 80
T = int(round(4.0 * S))
start_year = 2018
(omega, g_n_ss, omega_SS, surv_rate, rho, g_n_vector, imm_rates,
omega_S_preTP) = demographics.get_pop_objs(E, S, T, 1, 100,
start_year, False)
assert (np.any(np.absolute(imm_rates[:-1, :] - imm_rates[1:, :]) <
0.0001))
def test_get_fert():
'''
Test of function to get fertility rates from data
'''
S = 100
fert_rates = demographics.get_fert(S, 0, 100, graph=False)
assert (fert_rates.shape[0] == S)
def test_get_mort():
'''
Test of function to get mortality rates from data
'''
S = 100
mort_rates, infmort_rate = demographics.get_mort(
S, 0, 100, graph=False)
assert (mort_rates.shape[0] == S)
def test_infant_mort():
'''
Test of function to get mortality rates from data
'''
mort_rates, infmort_rate = demographics.get_mort(
100, 0, 100, graph=False)
# check that infant mortality equals rate hardcoded into
# demographics.py
assert (infmort_rate == 0.00587)
def test_pop_rebin():
'''
Test of population rebin function
'''
curr_pop_dist = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
totpers_new = 5
rebinned_data = demographics.pop_rebin(curr_pop_dist, totpers_new)
assert (rebinned_data.shape[0] == totpers_new)
def test_get_imm_resid():
'''
Test of function to solve for immigration rates from population data
'''
S = 100
imm_rates = demographics.get_imm_resid(S, 0, 100)
assert (imm_rates.shape[0] == S)
| OpenSourcePolicyCenter/dynamic | ogusa/tests/test_demographics.py | Python | mit | 2,783 |
default_app_config = 'cms_articles.import_wordpress.apps.CmsArticlesImportWordpressConfig'
| misli/django-cms-articles | cms_articles/import_wordpress/__init__.py | Python | bsd-3-clause | 91 |
#!/usr/bin/env python
from __future__ import print_function
import sys
from os import environ
from os.path import dirname, join, pardir, abspath, exists
import subprocess
import nose
def fetch_es_repo():
# user is manually setting YAML dir, don't tamper with it
if 'TEST_ES_YAML_DIR' in environ:
return
repo_path = environ.get(
'TEST_ES_REPO',
abspath(join(dirname(__file__), pardir, pardir, 'elasticsearch'))
)
# no repo
if not exists(repo_path) or not exists(join(repo_path, '.git')):
print('No elasticsearch repo found...')
# set YAML DIR to empty to skip yaml tests
environ['TEST_ES_YAML_DIR'] = ''
return
# set YAML test dir
environ['TEST_ES_YAML_DIR'] = join(repo_path, 'rest-api-spec', 'src', 'main', 'resources', 'rest-api-spec', 'test')
# fetching of yaml tests disabled, we'll run with what's there
if environ.get('TEST_ES_NOFETCH', False):
return
from test_elasticsearch.test_server import get_client
from test_elasticsearch.test_cases import SkipTest
# find out the sha of the running es
try:
es = get_client()
sha = es.info()['version']['build_hash']
except (SkipTest, KeyError):
print('No running elasticsearch >1.X server...')
return
# fetch new commits to be sure...
print('Fetching elasticsearch repo...')
subprocess.check_call('cd %s && git fetch https://github.com/elasticsearch/elasticsearch.git' % repo_path, shell=True)
# reset to the version fron info()
subprocess.check_call('cd %s && git reset --hard %s' % (repo_path, sha), shell=True)
def run_all(argv=None):
sys.exitfunc = lambda: sys.stderr.write('Shutting down....\n')
# fetch yaml tests
fetch_es_repo()
# always insert coverage when running tests
if argv is None:
argv = [
'nosetests', '--with-xunit',
'--with-xcoverage', '--cover-package=elasticsearch', '--cover-erase',
'--logging-filter=elasticsearch', '--logging-level=DEBUG',
'--verbose', '--with-id',
]
nose.run_exit(
argv=argv,
defaultTest=abspath(dirname(__file__))
)
if __name__ == '__main__':
run_all(sys.argv)
| brunobell/elasticsearch-py | test_elasticsearch/run_tests.py | Python | apache-2.0 | 2,255 |
"""SCons.Tool.fortran
Tool-specific initialization for a generic Posix f77/f90 Fortran compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/fortran.py 2014/03/02 14:18:15 garyo"
import re
import SCons.Action
import SCons.Defaults
import SCons.Scanner.Fortran
import SCons.Tool
import SCons.Util
from SCons.Tool.FortranCommon import add_all_to_env, add_fortran_to_env
compilers = ['f95', 'f90', 'f77']
def generate(env):
add_all_to_env(env)
add_fortran_to_env(env)
fc = env.Detect(compilers) or 'f77'
env['SHFORTRAN'] = fc
env['FORTRAN'] = fc
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| unigent/OpenWrt-Firefly-SDK | staging_dir/host/lib/scons-2.3.1/SCons/Tool/fortran.py | Python | gpl-2.0 | 2,068 |
from yowsup.layers.protocol_messages.protocolentities.attributes.attributes_downloadablemedia import \
DownloadableMediaMessageAttributes
class AudioAttributes(object):
def __init__(self, downloadablemedia_attributes, seconds, ptt, streaming_sidecar=None):
# type: (DownloadableMediaMessageAttributes, int, bool, bytes) -> None
"""
:param seconds: duration of audio playback in seconds
:param ptt: indicates whether this is a push-to-talk audio message
:param streaming_sidecar
"""
self._downloadablemedia_attributes = downloadablemedia_attributes
self._seconds = seconds # type: int
self._ptt = ptt # type: bool
self._streaming_sidecar = streaming_sidecar # type: bytes
def __str__(self):
attrs = []
if self.seconds is not None:
attrs.append(("seconds", self.seconds))
if self.ptt is not None:
attrs.append(("ptt", self.ptt))
if self._streaming_sidecar is not None:
attrs.append(("streaming_sidecar", "[binary data]"))
attrs.append(("downloadable", self.downloadablemedia_attributes))
return "[%s]" % " ".join((map(lambda item: "%s=%s" % item, attrs)))
@property
def downloadablemedia_attributes(self):
return self._downloadablemedia_attributes
@downloadablemedia_attributes.setter
def downloadablemedia_attributes(self, value):
self._downloadablemedia_attributes = value
@property
def seconds(self):
return self._seconds
@seconds.setter
def seconds(self, value):
self._seconds = value
@property
def ptt(self):
return self._ptt
@ptt.setter
def ptt(self, value):
self._ptt = value
@property
def streaming_sidecar(self):
return self._streaming_sidecar
@streaming_sidecar.setter
def streaming_sidecar(self, value):
self._streaming_sidecar = value
| tgalal/yowsup | yowsup/layers/protocol_messages/protocolentities/attributes/attributes_audio.py | Python | gpl-3.0 | 1,964 |
# -*- coding: utf-8 -*-
# czat/views.py
from django.shortcuts import render
# from django.http import HttpResponse
def index(request):
"""Strona główna aplikacji."""
# return HttpResponse("Witaj w aplikacji Czat!")
return render(request, 'czat/index.html')
| koduj-z-klasa/python101 | docs/webdjango/czat1/views_z2.py | Python | mit | 274 |
"""Modularity matrix of graphs.
"""
import networkx as nx
from networkx.utils import not_implemented_for
__all__ = ["modularity_matrix", "directed_modularity_matrix"]
@not_implemented_for("directed")
@not_implemented_for("multigraph")
def modularity_matrix(G, nodelist=None, weight=None):
r"""Returns the modularity matrix of G.
The modularity matrix is the matrix B = A - <A>, where A is the adjacency
matrix and <A> is the average adjacency matrix, assuming that the graph
is described by the configuration model.
More specifically, the element B_ij of B is defined as
.. math::
A_{ij} - {k_i k_j \over 2 m}
where k_i is the degree of node i, and where m is the number of edges
in the graph. When weight is set to a name of an attribute edge, Aij, k_i,
k_j and m are computed using its value.
Parameters
----------
G : Graph
A NetworkX graph
nodelist : list, optional
The rows and columns are ordered according to the nodes in nodelist.
If nodelist is None, then the ordering is produced by G.nodes().
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used for
the edge weight. If None then all edge weights are 1.
Returns
-------
B : Numpy matrix
The modularity matrix of G.
Examples
--------
>>> k =[3, 2, 2, 1, 0]
>>> G = nx.havel_hakimi_graph(k)
>>> B = nx.modularity_matrix(G)
See Also
--------
to_numpy_array
modularity_spectrum
adjacency_matrix
directed_modularity_matrix
References
----------
.. [1] M. E. J. Newman, "Modularity and community structure in networks",
Proc. Natl. Acad. Sci. USA, vol. 103, pp. 8577-8582, 2006.
"""
if nodelist is None:
nodelist = list(G)
A = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight, format="csr")
k = A.sum(axis=1)
m = k.sum() * 0.5
# Expected adjacency matrix
X = k * k.transpose() / (2 * m)
return A - X
@not_implemented_for("undirected")
@not_implemented_for("multigraph")
def directed_modularity_matrix(G, nodelist=None, weight=None):
"""Returns the directed modularity matrix of G.
The modularity matrix is the matrix B = A - <A>, where A is the adjacency
matrix and <A> is the expected adjacency matrix, assuming that the graph
is described by the configuration model.
More specifically, the element B_ij of B is defined as
.. math::
B_{ij} = A_{ij} - k_i^{out} k_j^{in} / m
where :math:`k_i^{in}` is the in degree of node i, and :math:`k_j^{out}` is the out degree
of node j, with m the number of edges in the graph. When weight is set
to a name of an attribute edge, Aij, k_i, k_j and m are computed using
its value.
Parameters
----------
G : DiGraph
A NetworkX DiGraph
nodelist : list, optional
The rows and columns are ordered according to the nodes in nodelist.
If nodelist is None, then the ordering is produced by G.nodes().
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used for
the edge weight. If None then all edge weights are 1.
Returns
-------
B : Numpy matrix
The modularity matrix of G.
Examples
--------
>>> G = nx.DiGraph()
>>> G.add_edges_from(((1,2), (1,3), (3,1), (3,2), (3,5), (4,5), (4,6),
... (5,4), (5,6), (6,4)))
>>> B = nx.directed_modularity_matrix(G)
Notes
-----
NetworkX defines the element A_ij of the adjacency matrix as 1 if there
is a link going from node i to node j. Leicht and Newman use the opposite
definition. This explains the different expression for B_ij.
See Also
--------
to_numpy_array
modularity_spectrum
adjacency_matrix
modularity_matrix
References
----------
.. [1] E. A. Leicht, M. E. J. Newman,
"Community structure in directed networks",
Phys. Rev Lett., vol. 100, no. 11, p. 118703, 2008.
"""
if nodelist is None:
nodelist = list(G)
A = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight, format="csr")
k_in = A.sum(axis=0)
k_out = A.sum(axis=1)
m = k_in.sum()
# Expected adjacency matrix
X = k_out * k_in / m
return A - X
| SpaceGroupUCL/qgisSpaceSyntaxToolkit | esstoolkit/external/networkx/linalg/modularitymatrix.py | Python | gpl-3.0 | 4,394 |
"""
Builds sumatra and uploads results to s3 for easy analysis, viewable at:
http://kjkpub.s3.amazonaws.com/sumatrapdf/buildbot/index.html
"""
import sys
import os
# assumes is being run as ./scripts/buildbot.py
efi_scripts_dir = os.path.join("tools", "efi")
sys.path.append(efi_scripts_dir)
import shutil
import time
import datetime
import cPickle
import traceback
import s3
import util
import util2
import efiparse
import build
from util import file_remove_try_hard, run_cmd_throw
from util import parse_svnlog_out, Serializable, create_dir
from util import load_config, run_cmd, strip_empty_lines
from util import verify_path_exists, verify_started_in_right_directory
from buildbot_html import gen_analyze_html, build_index_html, rebuild_trans_src_path_cache
from buildbot_html import build_sizes_json, g_first_analyze_build
import runtests
"""
TODO:
- diff for symbols in html format
- upload efi html diff as part of buildbot
MAYBE:
- aggressive optimization cause symbol churn which makes reading efi output
hard. One option would be to run efi on an executable compiled with less
aggressive optimization. Another would be to post-process the result
and use heuristic to suppress bogus changes
"""
class Stats(Serializable):
fields = {
"analyze_sumatra_warnings_count": 0,
"analyze_mupdf_warnings_count": 0,
"analyze_ext_warnings_count": 0,
"rel_sumatrapdf_exe_size": 0,
"rel_sumatrapdf_no_mupdf_exe_size": 0,
"rel_installer_exe_size": 0,
"rel_libmupdf_dll_size": 0,
"rel_nppdfviewer_dll_size": 0,
"rel_pdffilter_dll_size": 0,
"rel_pdfpreview_dll_size": 0,
"rel_failed": False,
"rel_build_log": "",
"analyze_out": "",
}
fields_no_serialize = ["rel_build_log", "analyze_out"]
def __init__(self, read_from_file=None):
Serializable.__init__(self, Stats.fields,
Stats.fields_no_serialize, read_from_file)
def file_size(p):
return os.path.getsize(p)
def str2bool(s):
if s.lower() in ("true", "1"):
return True
if s.lower() in ("false", "0"):
return False
assert(False)
TIME_BETWEEN_PRE_RELEASE_BUILDS_IN_SECS = 60 * 60 * 8 # 8hrs
g_time_of_last_build = None
g_cache_dir = create_dir(
os.path.realpath(os.path.join("..", "sumatrapdfcache", "buildbot")))
g_stats_cache_dir = create_dir(os.path.join(g_cache_dir, "stats"))
g_logs_cache_dir = create_dir(os.path.join(g_cache_dir, "logs"))
def get_cache_dir():
return g_cache_dir
def get_stats_cache_dir():
return g_stats_cache_dir
def get_logs_cache_dir():
return g_logs_cache_dir
@util2.memoize
def cert_path():
scripts_dir = os.path.realpath(os.path.dirname(__file__))
cert_path = os.path.join(scripts_dir, "cert.pfx")
return verify_path_exists(cert_path)
def logs_efi_out_path(ver):
return os.path.join(get_logs_cache_dir(), str(ver) + "_efi.txt.bz2")
# logs are only kept for potential troubleshooting and they're quite big,
# so we delete old files (we keep logs for the last $to_keep revisions)
def delete_old_logs(to_keep=10):
files = os.listdir(get_logs_cache_dir())
versions = []
for f in files:
ver = int(f.split("_")[0])
if ver not in versions:
versions.append(ver)
versions.sort(reverse=True)
if len(versions) <= to_keep:
return
to_delete = versions[to_keep:]
for f in files:
ver = int(f.split("_")[0])
if ver in to_delete:
p = os.path.join(get_logs_cache_dir(), f)
os.remove(p)
# return Stats object or None if we don't have it for this version
def stats_for_ver(ver):
local_path = os.path.join(get_stats_cache_dir(), ver + ".txt")
if not os.path.exists(local_path):
s3_path = "sumatrapdf/buildbot/%s/stats.txt" % ver
if not s3.exists(s3_path):
return None
s3.download_to_file(s3_path, local_path)
assert(os.path.exists(local_path))
return Stats(local_path)
def previous_successful_build_ver(ver):
ver = int(ver) - 1
while True:
stats = stats_for_ver(str(ver))
if None == stats:
return 0
if not stats.rel_failed:
return ver
ver -= 1
# We cache results of running svn log in a dict mapping
# version to string returned by svn log
g_svn_log_per_ver = None
def load_svn_log_data():
try:
path = os.path.join(get_cache_dir(), "snv_log.dat")
fo = open(path, "rb")
except IOError:
# it's ok if doesn't exist
return {}
try:
res = cPickle.load(fo)
fo.close()
return res
except:
fo.close()
file_remove_try_hard(path)
return {}
def save_svn_log_data(data):
p = os.path.join(get_cache_dir(), "snv_log.dat")
fo = open(p, "wb")
cPickle.dump(data, fo, protocol=cPickle.HIGHEST_PROTOCOL)
fo.close()
def checkin_comment_for_ver(ver):
global g_svn_log_per_ver
ver = str(ver)
if g_svn_log_per_ver is None:
g_svn_log_per_ver = load_svn_log_data()
if ver not in g_svn_log_per_ver:
# TODO: retry few times to make it robust against temporary network
# failures
(out, err) = run_cmd_throw("svn", "log", "-r%s" % ver, "-v")
g_svn_log_per_ver[ver] = out
save_svn_log_data(g_svn_log_per_ver)
s = g_svn_log_per_ver[ver]
res = parse_svnlog_out(s)
if res is None:
return "not a source code change"
return res[1]
# return true if we already have results for a given build number in s3
def has_already_been_built(ver):
s3_dir = "sumatrapdf/buildbot/"
n1 = s3_dir + ver + "/analyze.html"
n2 = s3_dir + ver + "/release_build_log.txt"
keys = s3.list(s3_dir)
for k in keys:
if k.name in [n1, n2]:
return True
return False
def verify_efi_present():
try:
(out, err, errcode) = util.run_cmd("efi.exe")
except:
print("Must have efi.exe in the %PATH%!!!")
sys.exit(1)
if "Usage:" not in out:
print("efi.exe created unexpected output:\n%s" % out)
sys.exit(1)
def file_size_in_obj(file_name, defSize=None):
p = os.path.join("obj-rel", file_name)
if not os.path.exists(p) and defSize is not None:
return defSize
return file_size(p)
def clean_release():
shutil.rmtree("obj-rel", ignore_errors=True)
shutil.rmtree("vs-premake", ignore_errors=True)
shutil.rmtree(os.path.join("mupdf", "generated"), ignore_errors=True)
def build_release(stats, ver):
config = "CFG=rel"
obj_dir = "obj-rel"
extcflags = "EXTCFLAGS=-DSVN_PRE_RELEASE_VER=%s" % ver
platform = "PLATFORM=X86"
clean_release()
(out, err, errcode) = run_cmd("nmake", "-f", "makefile.msvc",
config, extcflags, platform,
"all_sumatrapdf")
log_path = os.path.join(get_logs_cache_dir(), ver + "_rel_log.txt")
build_log = out + "\n====STDERR:\n" + err
build_log = strip_empty_lines(build_log)
open(log_path, "w").write(build_log)
stats.rel_build_log = ""
stats.rel_failed = False
if errcode != 0:
stats.rel_build_log = build_log
stats.rel_failed = True
return
stats.rel_sumatrapdf_exe_size = file_size_in_obj("SumatraPDF.exe")
stats.rel_sumatrapdf_no_mupdf_exe_size = file_size_in_obj(
"SumatraPDF-no-MuPDF.exe")
stats.rel_libmupdf_dll_size = file_size_in_obj("libmupdf.dll")
stats.rel_nppdfviewer_dll_size = file_size_in_obj("npPdfViewer.dll", 0)
stats.rel_pdffilter_dll_size = file_size_in_obj("PdfFilter.dll")
stats.rel_pdfpreview_dll_size = file_size_in_obj("PdfPreview.dll")
stats.rel_installer_exe_size = file_size_in_obj("Installer.exe")
def build_analyze(stats, ver):
config = "CFG=rel"
obj_dir = "obj-rel"
extcflags = "EXTCFLAGS=-DSVN_PRE_RELEASE_VER=%s" % ver
platform = "PLATFORM=X86"
shutil.rmtree(obj_dir, ignore_errors=True)
shutil.rmtree(os.path.join("mupdf", "generated"), ignore_errors=True)
(out, err, errcode) = run_cmd("nmake", "-f", "makefile.msvc",
"WITH_ANALYZE=yes", config, extcflags, platform, "all_sumatrapdf")
stats.analyze_out = out
log_path = os.path.join(get_logs_cache_dir(), ver + "_analyze_log.txt")
s = out + "\n====STDERR:\n" + err
open(log_path, "w").write(strip_empty_lines(s))
def svn_update_to_ver(ver):
run_cmd_throw("svn", "update", "-r" + ver)
rebuild_trans_src_path_cache()
# runs efi.exe on obj-rel/SumatraPDF.exe, stores the data in obj-rel/efi.txt.bz2
# and uploads to s3 as efi.txt.bz2
def build_and_upload_efi_out(ver):
obj_dir = "obj-rel"
s3dir = "sumatrapdf/buildbot/%s/" % ver
os.chdir(obj_dir)
util.run_cmd_throw("efi", "SumatraPDF.exe", ">efi.txt")
util.bz_file_compress("efi.txt", "efi.txt.bz2")
s3.upload_file_public("efi.txt.bz2", s3dir + "efi.txt.bz2", silent=True)
shutil.copyfile("efi.txt.bz2", logs_efi_out_path(ver))
os.chdir("..")
def get_efi_out(ver):
ver = str(ver)
p = logs_efi_out_path(ver)
if os.path.exists(p):
return p
# TODO: try download from s3 if doesn't exist? For now we rely on the fact
# that it was build on this machine, so the results should still be in logs
# cache
return None
def efi_diff_as_txt(diff, max=-1):
lines = []
diff.added.sort(key=lambda sym: sym.size, reverse=True)
diff.removed.sort(key=lambda sym: sym.size, reverse=True)
diff.changed.sort(key=lambda sym: sym.size_diff, reverse=True)
added = diff.added
if len(added) > 0:
lines.append("\nAdded symbols:")
if max != -1:
added = added[:max]
for sym in added:
#sym = diff.syms2.name_to_sym[sym_name]
size = sym.size
s = "%4d : %s" % (size, sym.full_name())
lines.append(s)
removed = diff.removed
if len(removed) > 0:
lines.append("\nRemoved symbols:")
if max != -1:
removed = removed[:max]
for sym in removed:
#sym = diff.syms2.name_to_sym[sym_name]
size = sym.size
s = "%4d : %s" % (size, sym.full_name())
lines.append(s)
changed = diff.changed
if len(changed) > 0:
lines.append("\nChanged symbols:")
if max != -1:
changed = changed[:max]
for sym in changed:
size = sym.size_diff
lines.append("%4d : %s" % (size, sym.full_name()))
return "\n".join(lines)
# builds efi diff between this version and previous succesful version
# and uploads as efi_diff.txt
def build_and_upload_efi_txt_diff(ver):
prev_ver = previous_successful_build_ver(ver)
if 0 == prev_ver:
return
efi_path_curr = get_efi_out(ver)
if not efi_path_curr:
print("didn't find efi output for %s" % str(ver))
return
efi_path_prev = get_efi_out(prev_ver)
if not efi_path_prev:
print("didn't find efi output for %s" % str(prev_ver))
return
obj_file_splitters = ["obj-rel\\", "INTEL\\"]
efi1 = efiparse.parse_file(efi_path_prev, obj_file_splitters)
efi2 = efiparse.parse_file(efi_path_curr, obj_file_splitters)
diff = efiparse.diff(efi1, efi2)
s = str(diff)
s = s + "\n" + efi_diff_as_txt(diff)
s3dir = "sumatrapdf/buildbot/%s/" % str(ver)
s3.upload_data_public_with_content_type(
s, s3dir + "efi_diff.txt", silent=True)
# TODO: maybe add debug build and 64bit release?
# skip_release is just for testing
def build_version(ver, skip_release=False):
print("Building version %s" % ver)
clean_release()
# a hack: checkin_comment_for_ver() might call svn log, which doesn't like
# unversioning directories (like obj-rel or vs-premake), so we call it here,
# after clean, to cache the result
checkin_comment_for_ver(ver)
svn_update_to_ver(ver)
s3dir = "sumatrapdf/buildbot/%s/" % ver
stats = Stats()
# only run /analyze on newer builds since we didn't have the necessary
# makefile logic before
run_analyze = int(ver) >= g_first_analyze_build
if not skip_release:
start_time = datetime.datetime.now()
build_release(stats, ver)
dur = datetime.datetime.now() - start_time
print("%s for release build" % str(dur))
if stats.rel_failed:
# don't bother running analyze if release failed
run_analyze = False
s3.upload_data_public_with_content_type(
stats.rel_build_log, s3dir + "release_build_log.txt", silent=True)
if not stats.rel_failed:
build_and_upload_efi_out(ver)
if run_analyze:
start_time = datetime.datetime.now()
build_analyze(stats, ver)
dur = datetime.datetime.now() - start_time
print("%s for analyze build" % str(dur))
html = gen_analyze_html(stats, ver)
p = os.path.join(get_logs_cache_dir(), "%s_analyze.html" % str(ver))
open(p, "w").write(html)
s3.upload_data_public_with_content_type(
html, s3dir + "analyze.html", silent=True)
if not stats.rel_failed:
build_and_upload_efi_txt_diff(ver)
# TODO: it appears we might throw an exception after uploading analyze.html but
# before/dufing uploading stats.txt. Would have to implement transactional
# multi-upload to be robust aginst that, so will just let it be
stats_txt = stats.to_s()
s3.upload_data_public_with_content_type(
stats_txt, s3dir + "stats.txt", silent=True)
html = build_index_html(stats_for_ver, checkin_comment_for_ver)
s3.upload_data_public_with_content_type(
html, "sumatrapdf/buildbot/index.html", silent=True)
json_s = build_sizes_json(get_stats_cache_dir, stats_for_ver)
s3.upload_data_public_with_content_type(
json_s, "sumatrapdf/buildbot/sizes.js", silent=True)
if stats.rel_failed:
email_build_failed(ver)
return # don't run tests if build fails
err = runtests.run_tests()
if err != None:
s3.upload_data_public_with_content_type(
err, s3dir + "tests_error.txt", silent=True)
email_tests_failed(ver, err)
print("Tests failed. Error message:\n" + err)
else:
print("Tests passed!")
def test_build_html_index():
print("test_build_html_index()")
html = build_index_html(stats_for_ver, checkin_comment_for_ver)
print("after build_index_html()")
import codecs
codecs.open("index.html", "w", "utf8").write(html)
print("after write")
sys.exit(1)
g_email_to = ["[email protected]", "[email protected]"]
def email_tests_failed(ver, err):
s3_url_start = "http://kjkpub.s3.amazonaws.com/sumatrapdf/buildbot/"
c = load_config()
if not c.HasNotifierEmail():
print("email_tests_failed() not ran because not c.HasNotifierEmail()")
return
sender, senderpwd = c.GetNotifierEmailAndPwdMustExist()
subject = "SumatraPDF tests failed for build %s" % str(ver)
checkin_url = "https://code.google.com/p/sumatrapdf/source/detail?r=%s" % str(ver)
body = "Checkin: %s\n\n" % checkin_url
log_url = s3_url_start + str(ver) + "/tests_error.txt"
body += "Build log: %s\n\n" % log_url
buildbot_index_url = s3_url_start + "index.html"
body += "Buildbot: %s\n\n" % buildbot_index_url
body += "Error: %s\n\n" % err
util.sendmail(sender, senderpwd, g_email_to, subject, body)
def email_build_failed(ver):
s3_url_start = "http://kjkpub.s3.amazonaws.com/sumatrapdf/buildbot/"
c = load_config()
if not c.HasNotifierEmail():
return
sender, senderpwd = c.GetNotifierEmailAndPwdMustExist()
subject = "SumatraPDF build %s failed" % str(ver)
checkin_url = "https://code.google.com/p/sumatrapdf/source/detail?r=%s" % str(ver)
body = "Checkin: %s\n\n" % checkin_url
build_log_url = s3_url_start + str(ver) + "/release_build_log.txt"
body += "Build log: %s\n\n" % build_log_url
buildbot_index_url = s3_url_start + "index.html"
body += "Buildbot: %s\n\n" % buildbot_index_url
util.sendmail(sender, senderpwd, g_email_to, subject, body)
# for testing
def build_curr(force=False):
(local_ver, latest_ver) = util.get_svn_versions()
print("local ver: %s, latest ver: %s" % (local_ver, latest_ver))
if not has_already_been_built(local_ver) or force:
build_version(local_ver)
else:
print("We have already built revision %s" % local_ver)
def build_version_retry(ver, try_count=2):
# it can happen we get a valid but intermitten exception e.g.
# due to svn command failing due to server hiccup
# in that case we'll retry, waiting 1 min in between,
# but only up to try_count times
while True:
try:
build_version(ver)
except Exception, e:
# rethrow assert() exceptions, they come from our code
# and we should stop
if isinstance(e, AssertionError):
print("assert happened:")
print(str(e))
traceback.print_exc()
raise e
print(str(e))
traceback.print_exc()
try_count -= 1
if 0 == try_count:
raise
time.sleep(60)
return
def buildbot_loop():
global g_time_of_last_build
while True:
# util.get_svn_versions() might throw an exception due to
# temporary network problems, so retry
try:
(local_ver, latest_ver) = util.get_svn_versions()
except:
print("get_svn_versions() threw an exception")
time.sleep(120)
continue
print("local ver: %s, latest ver: %s" % (local_ver, latest_ver))
revs_built = 0
while int(local_ver) <= int(latest_ver):
if not has_already_been_built(local_ver):
build_version_retry(local_ver)
revs_built += 1
else:
print("We have already built revision %s" % local_ver)
local_ver = str(int(local_ver) + 1)
delete_old_logs()
# don't sleep if we built something in this cycle. a new checkin might
# have happened while we were working
if revs_built > 0:
g_time_of_last_build = datetime.datetime.now()
continue
secs_until_prerelease = None
if g_time_of_last_build is not None:
td = datetime.datetime.now() - g_time_of_last_build
secs_until_prerelease = TIME_BETWEEN_PRE_RELEASE_BUILDS_IN_SECS - \
int(td.total_seconds())
if secs_until_prerelease < 0:
build_pre_release()
g_time_of_last_build = None
print("Sleeping for 15 minutes, %s seconds until pre-release" %
str(secs_until_prerelease))
time.sleep(60 * 15) # 15 mins
def ignore_pre_release_build_error(s):
# it's possible we did a pre-release build outside of buildbot and that
# shouldn't be a fatal error
if "already exists in s3" in s:
return True
return False
def build_pre_release():
try:
cert_dst_path = os.path.join("scripts", "cert.pfx")
if not os.path.exists(cert_dst_path):
shutil.copyfile(cert_path(), cert_dst_path)
print("Building pre-release")
build.build_pre_release()
except BaseException, e:
s = str(e)
print(s)
# a bit of a hack. not every kind of failure should stop the buildbot
if not ignore_pre_release_build_error(s):
traceback.print_exc()
raise
def test_email_tests_failed():
email_tests_failed("200", "hello")
sys.exit(1)
def main():
cert_path() # early check and ensures value is memoized
verify_efi_present()
verify_started_in_right_directory()
# to avoid problems, we build a separate source tree, just for the buildbot
src_path = os.path.join("..", "sumatrapdf_buildbot")
verify_path_exists(src_path)
conf = load_config()
s3.set_secrets(conf.aws_access, conf.aws_secret)
s3.set_bucket("kjkpub")
os.chdir(src_path)
# test_email_tests_failed()
#build_version("8190", skip_release=True)
# test_build_html_index()
# build_sizes_json()
# build_curr(force=True)
# TODO: add a try/catch and e-mail if failed for unexpected reasons
buildbot_loop()
if __name__ == "__main__":
main()
| ibb-zimmers/betsynetpdf | sumatrapdf/scripts/buildbot.py | Python | gpl-3.0 | 21,187 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import warnings
from distutils.version import LooseVersion
import pytest
import numpy as np
from astropy import __minimum_asdf_version__
asdf = pytest.importorskip('asdf', minversion=__minimum_asdf_version__)
from asdf import util
from asdf.tests import helpers
from asdf import AsdfFile
import asdf
import astropy.units as u
from astropy.modeling.core import fix_inputs
from astropy.modeling import models as astmodels
def custom_and_analytical_inverse():
p1 = astmodels.Polynomial1D(1)
p2 = astmodels.Polynomial1D(1)
p3 = astmodels.Polynomial1D(1)
p4 = astmodels.Polynomial1D(1)
m1 = p1 & p2
m2 = p3 & p4
m1.inverse = m2
return m1
test_models = [
astmodels.Identity(2), astmodels.Polynomial1D(2, c0=1, c1=2, c2=3),
astmodels.Polynomial2D(1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Shift(2.),
astmodels.Hermite1D(2, c0=2, c1=3, c2=0.5),
astmodels.Legendre1D(2, c0=2, c1=3, c2=0.5),
astmodels.Chebyshev1D(2, c0=2, c1=3, c2=0.5),
astmodels.Chebyshev2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Legendre2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Hermite2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Scale(3.4), astmodels.RotateNative2Celestial(5.63, -72.5, 180),
astmodels.Multiply(3), astmodels.Multiply(10*u.m),
astmodels.RotateCelestial2Native(5.63, -72.5, 180),
astmodels.EulerAngleRotation(23, 14, 2.3, axes_order='xzx'),
astmodels.Mapping((0, 1), n_inputs=3),
astmodels.Shift(2.*u.deg),
astmodels.Scale(3.4*u.deg),
astmodels.RotateNative2Celestial(5.63*u.deg, -72.5*u.deg, 180*u.deg),
astmodels.RotateCelestial2Native(5.63*u.deg, -72.5*u.deg, 180*u.deg),
astmodels.RotationSequence3D([1.2, 2.3, 3.4, .3], 'xyzx'),
astmodels.SphericalRotationSequence([1.2, 2.3, 3.4, .3], 'xyzy'),
custom_and_analytical_inverse(),
]
math_models = []
for kl in astmodels.math.__all__:
klass = getattr(astmodels.math, kl)
math_models.append(klass())
test_models.extend(math_models)
def test_transforms_compound(tmpdir):
tree = {
'compound':
astmodels.Shift(1) & astmodels.Shift(2) |
astmodels.Sky2Pix_TAN() |
astmodels.Rotation2D() |
astmodels.AffineTransformation2D([[2, 0], [0, 2]], [42, 32]) +
astmodels.Rotation2D(32)
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_inverse_transforms(tmpdir):
rotation = astmodels.Rotation2D(32)
rotation.inverse = astmodels.Rotation2D(45)
real_rotation = astmodels.Rotation2D(32)
tree = {
'rotation': rotation,
'real_rotation': real_rotation
}
def check(ff):
assert ff.tree['rotation'].inverse.angle == 45
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
@pytest.mark.parametrize(('model'), test_models)
def test_single_model(tmpdir, model):
with warnings.catch_warnings():
# Some schema files are missing from asdf<=2.4.2 which causes warnings
if LooseVersion(asdf.__version__) <= '2.4.2':
warnings.filterwarnings('ignore', 'Unable to locate schema file')
tree = {'single_model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_name(tmpdir):
def check(ff):
assert ff.tree['rot'].name == 'foo'
tree = {'rot': astmodels.Rotation2D(23, name='foo')}
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
def test_zenithal_with_arguments(tmpdir):
tree = {
'azp': astmodels.Sky2Pix_AZP(0.5, 0.3)
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_naming_of_compound_model(tmpdir):
"""Issue #87"""
def asdf_check(ff):
assert ff.tree['model'].name == 'compound_model'
offx = astmodels.Shift(1)
scl = astmodels.Scale(2)
model = (offx | scl).rename('compound_model')
tree = {
'model': model
}
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=asdf_check)
def test_generic_projections(tmpdir):
from astropy.io.misc.asdf.tags.transform import projections
for tag_name, (name, params, version) in projections._generic_projections.items():
tree = {
'forward': util.resolve_name(
f'astropy.modeling.projections.Sky2Pix_{name}')(),
'backward': util.resolve_name(
f'astropy.modeling.projections.Pix2Sky_{name}')()
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_tabular_model(tmpdir):
points = np.arange(0, 5)
values = [1., 10, 2, 45, -3]
model = astmodels.Tabular1D(points=points, lookup_table=values)
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
table = np.array([[ 3., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 0.]])
points = ([1, 2, 3], [1, 2, 3])
model2 = astmodels.Tabular2D(points, lookup_table=table, bounds_error=False,
fill_value=None, method='nearest')
tree = {'model': model2}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_bounding_box(tmpdir):
model = astmodels.Shift(1) & astmodels.Shift(2)
model.bounding_box = ((1, 3), (2, 4))
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_linear1d(tmpdir):
model = astmodels.Linear1D()
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_linear1d_quantity(tmpdir):
model = astmodels.Linear1D(1*u.nm, 1*(u.nm/u.pixel))
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_tabular_model_units(tmpdir):
points = np.arange(0, 5) * u.pix
values = [1., 10, 2, 45, -3] * u.nm
model = astmodels.Tabular1D(points=points, lookup_table=values)
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
table = np.array([[3., 0., 0.],
[0., 2., 0.],
[0., 0., 0.]]) * u.nm
points = ([1, 2, 3], [1, 2, 3]) * u.pix
model2 = astmodels.Tabular2D(points, lookup_table=table,
bounds_error=False, fill_value=None,
method='nearest')
tree = {'model': model2}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_fix_inputs(tmpdir):
with warnings.catch_warnings():
# Some schema files are missing from asdf<=2.4.2 which causes warnings
if LooseVersion(asdf.__version__) <= '2.4.2':
warnings.filterwarnings('ignore', 'Unable to locate schema file')
model = astmodels.Pix2Sky_TAN() | astmodels.Rotation2D()
tree = {
'compound': fix_inputs(model, {'x': 45}),
'compound1': fix_inputs(model, {0: 45})
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_fix_inputs_type():
with pytest.raises(TypeError):
tree = {
'compound': fix_inputs(3, {'x': 45})
}
helpers.assert_roundtrip_tree(tree, tmpdir)
with pytest.raises(AttributeError):
tree = {
'compound': astmodels.Pix2Sky_TAN() & {'x': 45}
}
helpers.assert_roundtrip_tree(tree, tmpdir)
comp_model = custom_and_analytical_inverse()
@pytest.mark.parametrize(('model'), [astmodels.Shift(1) & astmodels.Shift(2) | comp_model,
comp_model | astmodels.Shift(1) & astmodels.Shift(2),
astmodels.Shift(1) & comp_model,
comp_model & astmodels.Shift(1)
])
def test_custom_and_analytical(model, tmpdir):
fa = AsdfFile()
fa.tree['model'] = model
file_path = str(tmpdir.join('custom_and_analytical_inverse.asdf'))
fa.write_to(file_path)
with asdf.open(file_path) as f:
assert f.tree['model'].inverse is not None
| stargaser/astropy | astropy/io/misc/asdf/tags/transform/tests/test_transform.py | Python | bsd-3-clause | 7,934 |
# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
""":class:`TimestampedModel` tests."""
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
str = None
__metaclass__ = type
__all__ = []
from datetime import datetime
from django.db import transaction
from maasserver.models.timestampedmodel import now
from maasserver.testing.factory import factory
from maasserver.testing.testcase import MAASServerTestCase
from maasserver.tests.models import TimestampedModelTestModel
from maastesting.djangotestcase import (
TestModelMixin,
TransactionTestCase,
)
class TimestampedModelTest(TestModelMixin, MAASServerTestCase):
"""Testing for the class `TimestampedModel`."""
app = 'maasserver.tests'
def test_created_populated_when_object_saved(self):
obj = TimestampedModelTestModel()
obj.save()
self.assertIsNotNone(obj.created)
def test_updated_populated_when_object_saved(self):
obj = TimestampedModelTestModel()
obj.save()
self.assertIsNotNone(obj.updated)
def test_updated_and_created_are_the_same_after_first_save(self):
obj = TimestampedModelTestModel()
obj.save()
self.assertEqual(obj.created, obj.updated)
def test_created_not_modified_by_subsequent_calls_to_save(self):
obj = TimestampedModelTestModel()
obj.save()
old_created = obj.created
obj.save()
self.assertEqual(old_created, obj.created)
class TimestampedModelTransactionalTest(TestModelMixin, TransactionTestCase):
app = 'maasserver.tests'
def test_created_bracketed_by_before_and_after_time(self):
before = now()
obj = TimestampedModelTestModel()
obj.save()
transaction.commit()
after = now()
self.assertLessEqual(before, obj.created)
self.assertGreaterEqual(after, obj.created)
def test_updated_is_updated_when_object_saved(self):
obj = TimestampedModelTestModel()
obj.save()
old_updated = obj.updated
transaction.commit()
obj.save()
self.assertLessEqual(old_updated, obj.updated)
class UtilitiesTest(MAASServerTestCase):
def test_now_returns_datetime(self):
self.assertIsInstance(now(), datetime)
def test_now_returns_same_datetime_inside_transaction(self):
date_now = now()
self.assertEqual(date_now, now())
class UtilitiesTransactionalTest(TransactionTestCase):
def test_now_returns_transaction_time(self):
date_now = now()
# Perform a write database operation.
factory.make_node()
transaction.commit()
self.assertLessEqual(date_now, now())
| cloudbase/maas | src/maasserver/models/tests/test_timestampedmodel.py | Python | agpl-3.0 | 2,803 |
from django.contrib import admin
from charcoallog.investments.models import NewInvestment, NewInvestmentDetails
class NewInvestmentModelAdmin(admin.ModelAdmin):
list_display = ('user_name', 'date', 'money', 'kind', 'tx_op', 'brokerage')
readonly_fields = ('user_name',)
search_fields = ('date',)
date_hierarchy = 'date'
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.filter(newinvestmentdetails=None)
admin.site.register(NewInvestment, NewInvestmentModelAdmin)
class NewInvestmentDetailsModelAdmin(admin.ModelAdmin):
list_display = ('user_name', 'date', 'money', 'kind', 'tx_op', 'brokerage',
'which_target', 'segment', 'tx_or_price', 'quant')
readonly_fields = ('user_name',)
search_fields = ('date',)
date_hierarchy = 'date'
admin.site.register(NewInvestmentDetails, NewInvestmentDetailsModelAdmin)
| hpfn/charcoallog | charcoallog/investments/admin.py | Python | gpl-3.0 | 915 |
#!/usr/bin/env python
"""Tests for HTTP API."""
import json
from grr.gui import api_aff4_object_renderers
from grr.gui import api_call_renderers
from grr.gui import http_api
from grr.lib import flags
from grr.lib import registry
from grr.lib import test_lib
from grr.lib import utils
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import tests_pb2
class SampleGetRendererArgs(rdf_structs.RDFProtoStruct):
protobuf = tests_pb2.SampleGetRendererArgs
class SampleGetRenderer(api_call_renderers.ApiCallRenderer):
args_type = SampleGetRendererArgs
def Render(self, args, token=None):
return {
"method": "GET",
"path": args.path,
"foo": args.foo
}
class SampleGetRendererWithAdditionalArgsArgs(rdf_structs.RDFProtoStruct):
protobuf = tests_pb2.SampleGetRendererWithAdditionalArgsArgs
class SampleGetRendererWithAdditionalArgs(api_call_renderers.ApiCallRenderer):
args_type = SampleGetRendererWithAdditionalArgsArgs
additional_args_types = {
"AFF4Object": api_aff4_object_renderers.ApiAFF4ObjectRendererArgs,
"RDFValueCollection": (api_aff4_object_renderers.
ApiRDFValueCollectionRendererArgs)
}
def Render(self, args, token=None):
result = {
"method": "GET",
"path": args.path,
"foo": args.foo
}
if args.additional_args:
rendered_additional_args = []
for arg in args.additional_args:
rendered_additional_args.append(str(arg))
result["additional_args"] = rendered_additional_args
return result
class TestHttpRoutingInit(registry.InitHook):
def RunOnce(self):
http_api.RegisterHttpRouteHandler(
"GET", "/test_sample/<path:path>", SampleGetRenderer)
http_api.RegisterHttpRouteHandler(
"GET", "/test_sample_with_additional_args/<path:path>",
SampleGetRendererWithAdditionalArgs)
class RenderHttpResponseTest(test_lib.GRRBaseTest):
"""Test for api_call_renderers.RenderHttpResponse logic."""
def _CreateRequest(self, method, path, query_parameters=None):
if not query_parameters:
query_parameters = {}
request = utils.DataObject()
request.method = method
request.path = path
request.scheme = "http"
request.environ = {
"SERVER_NAME": "foo.bar",
"SERVER_PORT": 1234
}
request.user = "test"
if method == "GET":
request.GET = query_parameters
request.META = {}
return request
def _RenderResponse(self, request):
response = http_api.RenderHttpResponse(request)
if response.content.startswith(")]}'\n"):
response.content = response.content[5:]
return response
def testReturnsRendererMatchingUrlAndMethod(self):
renderer, _ = http_api.GetRendererForHttpRequest(
self._CreateRequest("GET", "/test_sample/some/path"))
self.assertTrue(isinstance(renderer, SampleGetRenderer))
def testPathParamsAreReturnedWithMatchingRenderer(self):
_, path_params = http_api.GetRendererForHttpRequest(
self._CreateRequest("GET", "/test_sample/some/path"))
self.assertEqual(path_params, {"path": "some/path"})
def testRaisesIfNoRendererMatchesUrl(self):
self.assertRaises(api_call_renderers.ApiCallRendererNotFoundError,
http_api.GetRendererForHttpRequest,
self._CreateRequest("GET",
"/some/missing/path"))
def testRendersGetRendererCorrectly(self):
response = self._RenderResponse(
self._CreateRequest("GET", "/test_sample/some/path"))
self.assertEqual(
json.loads(response.content),
{"method": "GET",
"path": "some/path",
"foo": ""})
self.assertEqual(response.status_code, 200)
def testQueryParamsArePassedIntoRendererArgs(self):
response = self._RenderResponse(
self._CreateRequest("GET", "/test_sample/some/path",
query_parameters={"foo": "bar"}))
self.assertEqual(
json.loads(response.content),
{"method": "GET",
"path": "some/path",
"foo": "bar"})
def testRouteArgumentTakesPrecedenceOverQueryParams(self):
response = self._RenderResponse(
self._CreateRequest("GET", "/test_sample/some/path",
query_parameters={"path": "foobar"}))
self.assertEqual(
json.loads(response.content),
{"method": "GET",
"path": "some/path",
"foo": ""})
def testAdditionalArgumentsAreParsedCorrectly(self):
additional_args = http_api.FillAdditionalArgsFromRequest(
{
"AFF4Object.limit_lists": "10",
"RDFValueCollection.with_total_count": "1"
}, {
"AFF4Object": api_aff4_object_renderers.ApiAFF4ObjectRendererArgs,
"RDFValueCollection":
api_aff4_object_renderers.ApiRDFValueCollectionRendererArgs
})
additional_args = sorted(additional_args, key=lambda x: x.name)
self.assertListEqual(
[x.name for x in additional_args],
["AFF4Object", "RDFValueCollection"])
self.assertListEqual(
[x.type for x in additional_args],
["ApiAFF4ObjectRendererArgs", "ApiRDFValueCollectionRendererArgs"])
self.assertListEqual(
[x.args for x in additional_args],
[api_aff4_object_renderers.ApiAFF4ObjectRendererArgs(limit_lists=10),
api_aff4_object_renderers.ApiRDFValueCollectionRendererArgs(
with_total_count=True)])
def testAdditionalArgumentsAreFoundAndPassedToTheRenderer(self):
response = self._RenderResponse(
self._CreateRequest("GET",
"/test_sample_with_additional_args/some/path",
query_parameters={"foo": "42"}))
self.assertEqual(
json.loads(response.content),
{"method": "GET",
"path": "some/path",
"foo": "42"})
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| pchaigno/grr | gui/http_api_test.py | Python | apache-2.0 | 6,014 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-11 12:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contributions', '0006_auto_20170711_1302'),
]
operations = [
migrations.AlterField(
model_name='contribution',
name='attached_to',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='attachments', to='contributions.Contribution'),
),
migrations.AlterField(
model_name='contribution',
name='in_reply_to',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='replies', to='contributions.Contribution'),
),
]
| stadtgestalten/stadtgestalten | grouprise/features/contributions/migrations/0007_auto_20170711_1430.py | Python | agpl-3.0 | 867 |
import os
import traceback
from time import time, gmtime, strftime
from datetime import date
from commands import getstatusoutput, getoutput
from shutil import copy2
from PilotErrors import PilotErrors
from pUtil import tolog, readpar, timeStamp, getBatchSystemJobID, getCPUmodel, PFCxml, updateMetadata, addSkippedToPFC,\
makeHTTPUpdate, tailPilotErrorDiag, isLogfileCopied, updateJobState, updateXMLWithSURLs, getMetadata, toPandaLogger,\
getSiteInformation, getExperiment, readStringFromFile, merge_dictionaries, updateXMLWithEndpoints, isAnalysisJob
from JobState import JobState
from FileStateClient import getFilesOfState
from FileHandling import getOSTransferDictionaryFilename, getOSTransferDictionary, getHighestPriorityError
class PandaServerClient:
"""
Client to the Panda Server
Methods for communicating with the Panda Server
"""
# private data members
__errorString = "!!WARNING!!1992!! %s" # default error string
__error = PilotErrors() # PilotErrors object
__pilot_version_tag = ""
__pilot_initdir = ""
__jobSchedulerId = ""
__pilotId = ""
__updateServer = True
__jobrec = False
__pshttpurl = ""
def __init__(self, pilot_version="", pilot_version_tag="", pilot_initdir="", jobSchedulerId=None, pilotId=None, updateServer=True, jobrec=False, pshttpurl=""):
""" Default initialization """
self.__pilot_version_tag = pilot_version_tag
self.__pilot_initdir = pilot_initdir
self.__jobSchedulerId = jobSchedulerId
self.__pilotId = pilotId
self.__updateServer = updateServer
self.__jobrec = jobrec
self.__pshttpurl = pshttpurl
self.__pilot_version = pilot_version
def getNodeStructureFromFile(self, workDir, jobId):
""" get the node structure from the Job State file """
JS = JobState()
_node = None
# open the job state file
tolog("workDir: %s" % (workDir))
tolog("jobId: %s" % (jobId))
filename = JS.getFilename(workDir, jobId)
tolog("filename: %s" % (filename))
if os.path.exists(filename):
# load the objects
if JS.get(filename):
# decode the job state info
_job, _site, _node, _recoveryAttempt = JS.decode()
else:
tolog("JS.decode() failed to load objects")
else:
tolog("%s does not exist" % (filename))
return _node
def copyNodeStruct4NG(self, node):
""" store the node structure for ARC """
from pickle import dump
try:
_fname = "%s/panda_node_struct.pickle" % os.getcwd()
fp = open(_fname, "w")
except Exception, e:
tolog("!!WARNING!!2999!! Could not store panda node structure: %s" % str(e))
else:
try:
dump(node, fp)
fp.close()
except Exception, e:
tolog("!!WARNING!!2999!! Could not dump panda node structure: %s" % str(e))
else:
tolog("Stored panda node structure at: %s" % (_fname))
tolog("node : %s" % (str(node)))
try:
copy2(_fname, self.__pilot_initdir)
except Exception, e:
tolog("!!WARNING!!2999!! Could not copy panda node structure to init dir: %s" % str(e))
else:
tolog("Copied panda node structure (%s) to init dir: %s" % (_fname, self.__pilot_initdir))
def jobMetric(self, key="", value=""):
""" Add 'key'='value' to the jobMetrics """
# Use this method to avoid missing the separating space between key-value pairs in the job metrics
if key != "" and value != "":
# Add a space at the end since there might be several key values added
jobMetric = "%s=%s " % (key, value)
else:
jobMetric = ""
return jobMetric
def getJobMetrics(self, job, site, workerNode):
""" Return a properly formatted job metrics string """
# style: Number of events read | Number of events written | vmPeak maximum | vmPeak average | RSS average | JEM activation
# format: nEvents=<int> nEventsW=<int> vmPeakMax=<int> vmPeakMean=<int> RSSMean=<int> JEM=<string>
# hs06=<float> shutdownTime=<int> cpuFactor=<float> cpuLimit=<float> diskLimit=<float> jobStart=<int> memLimit=<int> runLimit=<float>
# get the experiment object
thisExperiment = getExperiment(job.experiment)
if "HPC_HPC" in readpar('catchall'):
if job.coreCount is None:
job.coreCount = 0
else:
if job.coreCount:
# Always use the ATHENA_PROC_NUMBER first, if set
if os.environ.has_key('ATHENA_PROC_NUMBER'):
try:
job.coreCount = int(os.environ['ATHENA_PROC_NUMBER'])
except Exception, e:
tolog("ATHENA_PROC_NUMBER is not properly set: %s (will use existing job.coreCount value)" % (e))
else:
try:
job.coreCount = int(os.environ['ATHENA_PROC_NUMBER'])
except:
tolog("env ATHENA_PROC_NUMBER is not set. corecount is not set")
coreCount = job.coreCount
jobMetrics = ""
if coreCount is not None and coreCount != "NULL" and coreCount != 'null':
jobMetrics += self.jobMetric(key="coreCount", value=coreCount)
if job.nEvents > 0:
jobMetrics += self.jobMetric(key="nEvents", value=job.nEvents)
if job.nEventsW > 0:
jobMetrics += self.jobMetric(key="nEventsW", value=job.nEventsW)
if job.external_stageout_time:
jobMetrics += self.jobMetric(key="ExStageoutTime", value=job.external_stageout_time)
# hpc status
#if job.mode:
# jobMetrics += self.jobMetric(key="mode", value=job.mode)
#if job.hpcStatus:
# jobMetrics += self.jobMetric(key="HPCStatus", value=job.hpcStatus)
if job.yodaJobMetrics:
for key in job.yodaJobMetrics:
if key == 'startTime' or key == 'endTime':
value = strftime("%Y-%m-%d %H:%M:%S", gmtime(job.yodaJobMetrics[key]))
jobMetrics += self.jobMetric(key=key, value=value)
elif key.startswith("min") or key.startswith("max"):
pass
else:
jobMetrics += self.jobMetric(key=key, value=job.yodaJobMetrics[key])
#if job.HPCJobId:
# jobMetrics += self.jobMetric(key="HPCJobId", value=job.HPCJobId)
# eventservice zip file
if job.outputZipName and job.outputZipBucketID:
jobMetrics += self.jobMetric(key="outputZipName", value=os.path.basename(job.outputZipName))
jobMetrics += self.jobMetric(key="outputZipBucketID", value=job.outputZipBucketID)
# report alternative stage-out in case alt SE method was used
# (but not in job recovery mode)
recovery_mode = False
if job.filesAltStageOut > 0 and not recovery_mode:
#_jobMetrics = ""
#_jobMetrics += " filesAltStageOut=%d" % (job.filesAltStageOut)
#_jobMetrics += " filesNormalStageOut=%d" % (job.filesNormalStageOut)
#tolog("Could have reported: %s" % (_jobMetrics))
# Report which output files were moved to an alternative SE
filenames = getFilesOfState(site.workdir, job.jobId, state="alt_transferred")
if filenames != "":
jobMetrics += self.jobMetric(key="altTransferred", value=filenames)
# report on which OS bucket the log was written to, if any
if job.logBucketID != -1:
jobMetrics += self.jobMetric(key="logBucketID", value=job.logBucketID)
# only add the JEM bit if explicitly set to YES, otherwise assumed to be NO
if job.JEM == "YES":
jobMetrics += self.jobMetric(key="JEM", value=1)
# old format: jobMetrics += " JEM=%s" % (job.JEM)
if job.dbTime != "":
jobMetrics += self.jobMetric(key="dbTime", value=job.dbTime)
if job.dbData != "":
jobMetrics += self.jobMetric(key="dbData", value=job.dbData)
# machine and job features, max disk space used by the payload
jobMetrics += workerNode.addToJobMetrics(job.result[0], self.__pilot_initdir, job.jobId)
si = getSiteInformation(job.experiment)
_jobMetrics = ""
# report any OS transfers
#message = self.getOSJobMetrics()
#if message != "":
# _jobMetrics = self.jobMetric(key="OS", value=message)
# tolog("Could have added: %s to job metrics" % (_jobMetrics))
# correct for potential initial and trailing space
jobMetrics = jobMetrics.lstrip().rstrip()
if jobMetrics != "":
tolog('Job metrics=\"%s\"' % (jobMetrics))
else:
tolog("No job metrics (all values are zero)")
# is jobMetrics within allowed size?
if len(jobMetrics) > 500:
tolog("!!WARNING!!2223!! jobMetrics out of size (%d)" % (len(jobMetrics)))
# try to reduce the field size and remove the last entry which might be cut
jobMetrics = jobMetrics[:500]
jobMetrics = " ".join(jobMetrics.split(" ")[:-1])
tolog("jobMetrics has been reduced to: %s" % (jobMetrics))
return jobMetrics
# deprecated
def getOSJobMetrics(self):
""" Generate the objectstore jobMetrics message """
# Message format:
# OS=<os_name_0>:<os_bucket_endpoint_0>:<os_bucket_endpoint_1>: ..
# Example:
# os_name = BNL_OS_0, os_bucket_name = atlas_eventservice_F0 or atlas_logs_3D (where F0 and 3D are examples of file name hashes)
# -> OS=BNL_OS_0;atlas_eventservice_F0:atlas_logs_3D
# (note: at least one os_bucket_endpoint will be included in a message, but not necessarily both of them and order is random)
message = ""
# Locate the OS transfer dictionary
filename = getOSTransferDictionaryFilename()
path = os.path.join(self.__pilot_initdir, filename)
if os.path.exists(path):
# Which OS's were used?
os_names_dictionary = getOSTransferDictionary(path)
if os_names_dictionary != {}:
message = ""
os_names = os_names_dictionary.keys()
# Note: the should only be one os_name
if len(os_names) > 1:
tolog("!!WARNING!!2345!! Can only report one ddm endpoint (will use first only): %s" % (os_names_dictionary))
# Which buckets were written to?
for os_name in os_names_dictionary.keys():
message += os_name + ";"
bucket_list = os_names_dictionary[os_name]
for os_bucket_endpoint in bucket_list:
message += os_bucket_endpoint + ":"
# Remove the last ':'
message = message[:-1]
# Ignore any other os_names - there should one be one and we can only report one
break
else:
tolog("!!WARNING!!3335!! No OS transfers were found in: %s" % (filename))
else:
tolog("OS transfer dictionary does not exist, will not report OS transfers in jobMetrics (%s)" % (path))
return message
def getNodeStructure(self, job, site, workerNode, spaceReport=False, log=None):
""" define the node structure expected by the server """
node = {}
node['node'] = workerNode.nodename
node['workdir'] = job.workdir
node['siteName'] = site.sitename
node['jobId'] = job.jobId
node['state'] = job.result[0]
node['timestamp'] = timeStamp()
if job.attemptNr > -1:
node['attemptNr'] = job.attemptNr
if self.__jobSchedulerId:
node['schedulerID'] = self.__jobSchedulerId
if self.__pilotId:
use_newmover = str(readpar('use_newmover')).lower() in ["1", "true"]
use_newmover_tag = 'NEWMOVER-%s' % ('ON' if use_newmover else 'OFF')
tolog("Checking if new site movers workflow is enabled: use_newmover=%s" % use_newmover)
# report the batch system job id, if available
batchSystemType, _id = getBatchSystemJobID()
if batchSystemType:
tolog("Batch system: %s" % batchSystemType)
tolog("Batch system job ID: %s" % _id)
node['pilotID'] = "%s|%s|%s|%s|%s" % (self.__pilotId, use_newmover_tag, batchSystemType, self.__pilot_version_tag, self.__pilot_version)
node['batchID'] = _id
tolog("Will send batchID: %s and pilotID: %s" % (node['batchID'], node['pilotID']))
else:
tolog("Batch system type was not identified (will not be reported)")
node['pilotID'] = "%s|%s|%s|%s" % (self.__pilotId, use_newmover_tag, self.__pilot_version_tag, self.__pilot_version)
tolog("Will send pilotID: %s" % node['pilotID'])
tolog("pilotId: %s" % str(self.__pilotId))
if log and (job.result[0] == 'failed' or job.result[0] == 'holding' or "outbound connections" in log):
node['pilotLog'] = log
# add the startTime if the file exists
_filename = 'START_TIME_%s' % (job.jobId)
_path = os.path.join(self.__pilot_initdir, _filename)
if os.path.exists(_path):
startTime = readStringFromFile(_path)
node['startTime'] = startTime
if job.yodaJobMetrics:
if 'startTime' in job.yodaJobMetrics and job.yodaJobMetrics['startTime']:
node['startTime'] = strftime("%Y-%m-%d %H:%M:%S", gmtime(job.yodaJobMetrics['startTime']))
#job.yodaJobMetrics['startTime'] = node['startTime']
if 'endTime' in job.yodaJobMetrics and job.yodaJobMetrics['endTime']:
node['endTime'] = strftime("%Y-%m-%d %H:%M:%S", gmtime(job.yodaJobMetrics['endTime']))
#job.yodaJobMetrics['endTime'] = node['endTime']
# build the jobMetrics
node['jobMetrics'] = self.getJobMetrics(job, site, workerNode)
# for hpc status
if job.hpcStatus:
node['jobSubStatus'] = job.hpcStatus
tolog("jobSubStatus: %s" % job.subStatus)
if job.subStatus:
node['jobSubStatus'] = job.subStatus
if job.coreCount and job.coreCount != 'null' and job.coreCount != 'NULL':
node['coreCount'] = job.coreCount
if job.HPCJobId:
node['batchID'] = job.HPCJobId
# check to see if there were any high priority errors reported
errorInfo = getHighestPriorityError(job.jobId, self.__pilot_initdir)
if errorInfo != {}:
try:
pilotErrorCode = errorInfo['pilotErrorCode']
pilotErrorDiag = errorInfo['pilotErrorDiag']
except Exception, e:
tolog("!!WARNING!!2323!! Exception caught: %s" % (e))
else:
# Overwrite any existing errors
if pilotErrorCode == 0 and job.result[2] != 0:
tolog('Encountered bad high priority error code %d (will not overwrite error code %d)' % (pilotErrorCode, job.result[2]))
else:
if job.result[2] != 0:
tolog("Encountered high priority error code %d (will overwrite error code %d)" % (pilotErrorCode, job.result[2]))
else:
tolog("Encountered high priority error code %d" % (pilotErrorCode))
job.result[2] = pilotErrorCode
job.pilotErrorDiag = pilotErrorDiag
else:
tolog("Did not find any reported high priority errors")
# send pilotErrorDiag for finished, failed and holding jobs
if job.result[0] == 'finished' or job.result[0] == 'failed' or job.result[0] == 'holding':
# get the pilot error diag from the right source
if job.pilotErrorDiag:
if job.pilotErrorDiag == "":
node['pilotErrorDiag'] = tailPilotErrorDiag(self.__error.getPilotErrorDiag(job.result[2]))
job.pilotErrorDiag = node['pilotErrorDiag']
tolog("Empty pilotErrorDiag set to: %s" % (job.pilotErrorDiag))
elif job.pilotErrorDiag.upper().find("<HTML>") >= 0:
tolog("Found html in pilotErrorDiag: %s" % (job.pilotErrorDiag))
node['pilotErrorDiag'] = self.__error.getPilotErrorDiag(job.result[2])
job.pilotErrorDiag = node['pilotErrorDiag']
tolog("Updated pilotErrorDiag: %s" % (job.pilotErrorDiag))
else:
# truncate if necesary
if len(job.pilotErrorDiag) > 250:
tolog("pilotErrorDiag will be truncated to size 250")
tolog("Original pilotErrorDiag message: %s" % (job.pilotErrorDiag))
job.pilotErrorDiag = job.pilotErrorDiag[:250]
# set the pilotErrorDiag, but only the last 256 characters
node['pilotErrorDiag'] = tailPilotErrorDiag(job.pilotErrorDiag)
else:
# set the pilotErrorDiag, but only the last 256 characters
job.pilotErrorDiag = self.__error.getPilotErrorDiag(job.result[2])
node['pilotErrorDiag'] = tailPilotErrorDiag(job.pilotErrorDiag)
tolog("Updated pilotErrorDiag from None: %s" % (job.pilotErrorDiag))
# get the number of events, should report in heartbeat in case of preempted.
if job.nEvents != 0:
node['nEvents'] = job.nEvents
tolog("Total number of processed events: %d (read)" % (job.nEvents))
else:
tolog("Payload/TRF did not report the number of read events")
try:
# report CPUTime and CPUunit at the end of the job
try:
constime = int(job.cpuConsumptionTime)
except:
constime = None
if constime:
if constime < 10**9:
node['cpuConsumptionTime'] = job.cpuConsumptionTime
else:
tolog("!!WARNING!!2222!! Unrealistic cpuConsumptionTime: %s (reset to -1)" % job.cpuConsumptionTime)
node['cpuConsumptionTime'] = "-1"
except:
tolog("Failed to get cpu time: %s" % traceback.format_exc())
try:
node['cpuConsumptionUnit'] = job.cpuConsumptionUnit + "+" + getCPUmodel()
except:
node['cpuConsumptionUnit'] = '?'
node['cpuConversionFactor'] = job.cpuConversionFactor
if job.result[0] == 'finished' or job.result[0] == 'failed':
# make sure there is no mismatch between the transformation error codes (when both are reported)
# send transformation errors depending on what is available
if job.exeErrorDiag != "":
node['exeErrorCode'] = job.exeErrorCode
node['exeErrorDiag'] = job.exeErrorDiag
# verify that exeErrorCode is set, if not, use the info in result[1]
if job.exeErrorCode == 0:
tolog("WARNING: job.exeErrorDiag is set but not job.exeErrorCode: setting it to: %d" % (job.result[1]))
job.exeErrorCode = job.result[1]
node['exeErrorCode'] = job.exeErrorCode
else:
node['transExitCode'] = job.result[1]
if (job.result[0] == 'failed') and (job.exeErrorCode != 0) and (job.result[1] != job.exeErrorCode):
if log:
mismatch = "MISMATCH | Trf error code mismatch: exeErrorCode = %d, transExitCode = %d" %\
(job.exeErrorCode, job.result[1])
if node.has_key('pilotLog'):
node['pilotLog'] = mismatch + node['pilotLog']
else:
tolog("!!WARNING!!1300!! Could not write mismatch error to log extracts: %s" % mismatch)
# check if Pilot-controlled resubmission is required:
analyJob = isAnalysisJob(job.trf.split(",")[0])
if (job.result[0] == "failed" and analyJob):
pilotExitCode = job.result[2]
error = PilotErrors()
if (error.isPilotResubmissionErrorCode(pilotExitCode) or job.isPilotResubmissionRequired):
# negate PilotError, ensure it's negative
job.result[2] = -abs(pilotExitCode)
tolog("(Negated error code)")
else:
tolog("(No need to negate error code)")
node['pilotErrorCode'] = job.result[2]
tolog("Pilot error code: %d" % (node['pilotErrorCode']))
# report specific time measures
# node['pilotTiming'] = "getJob=%s setup=%s stageIn=%s payload=%s stageOut=%s" % (job.timeGetJob, job.timeSetup, job.timeStageIn, job.timeExe, job.timeStageOut)
node['pilotTiming'] = "%s|%s|%s|%s|%s" % (job.timeGetJob, job.timeStageIn, job.timeExe, job.timeStageOut, job.timeSetup)
elif job.result[0] == 'holding':
node['exeErrorCode'] = job.result[2]
node['exeErrorDiag'] = self.__error.getPilotErrorDiag(job.result[2])
else:
node['cpuConsumptionUnit'] = getCPUmodel()
# Add the utility info if it is available
thisExperiment = getExperiment(job.experiment)
if thisExperiment.shouldExecuteUtility():
utility_node = thisExperiment.getUtilityInfo(job.workdir, self.__pilot_initdir, allowTxtFile=True)
node = merge_dictionaries(node, utility_node)
return node
def getXML(self, job, sitename, workdir, xmlstr=None, jr=False):
""" Get the metadata xml """
node_xml = ""
tolog("getXML called")
# for backwards compatibility
try:
experiment = job.experiment
except:
experiment = "unknown"
# do not send xml for state 'holding' (will be sent by a later pilot during job recovery)
if job.result[0] == 'holding' and sitename != "CERNVM":
pass
else:
# only create and send log xml if the log was transferred
if job.result[0] == 'failed' and isLogfileCopied(workdir, job.jobId):
# generate the xml string for log file
# at this time the job.workdir might have been removed (because this function can be called
# after the removal of workdir is done), so we make a new dir
xmldir = "%s/XML4PandaJob_%s" % (workdir, job.jobId)
# group rw permission added as requested by LYON
ec, rv = getstatusoutput("mkdir -m g+rw %s" % (xmldir))
if ec != 0:
tolog("!!WARNING!!1300!! Could not create xmldir from updatePandaServer: %d, %s (resetting to site workdir)" % (ec, rv))
cmd = "ls -l %s" % (xmldir)
out = getoutput(cmd)
tolog("%s \n%s" % (cmd, out))
xmldir = workdir
if os.environ.has_key('Nordugrid_pilot'):
fname = os.path.join(self.__pilot_initdir, job.logFile)
else:
fname = os.path.join(workdir, job.logFile)
if os.path.exists(fname):
fnamelog = "%s/logfile.xml" % (xmldir)
guids_status = PFCxml(experiment, fnamelog, fntag="lfn", alog=job.logFile, alogguid=job.tarFileGuid, jr=jr, logToOS=job.putLogToOS)
from SiteMover import SiteMover
ec, pilotErrorDiag, _fsize, _checksum = SiteMover.getLocalFileInfo(fname, csumtype="adler32")
if ec != 0:
tolog("!!WARNING!!1300!! getLocalFileInfo failed: (%d, %s, %s)" % (ec, str(_fsize), str(_checksum)))
tolog("!!WARNING!!1300!! Can not set XML (will not be sent to server)")
node_xml = ''
else:
ec, _strXML = updateMetadata(fnamelog, _fsize, _checksum)
if ec == 0:
tolog("Added (%s, %s) to metadata file (%s)" % (_fsize, _checksum, fnamelog))
else:
tolog("!!WARNING!!1300!! Could not add (%s, %s) to metadata file (%s). XML will be incomplete: %d" %\
(_fsize, _checksum, fnamelog, ec))
# add skipped file info
_skippedfname = os.path.join(workdir, "skipped.xml")
if os.path.exists(_skippedfname):
ec = addSkippedToPFC(fnamelog, _skippedfname)
try:
f = open(fnamelog)
except Exception,e:
tolog("!!WARNING!!1300!! Exception caught: Can not open the file %s: %s (will not send XML)" %\
(fnamelog, str(e)))
node_xml = ''
else:
node_xml = ''
for line in f:
node_xml += line
f.close()
# transfer logfile.xml to pilot init dir for Nordugrid
if os.environ.has_key('Nordugrid_pilot'):
try:
copy2(fnamelog, self.__pilot_initdir)
except Exception, e:
tolog("!!WARNING!!1600!! Exception caught: Could not copy NG log metadata file to init dir: %s" % str(e))
else:
tolog("Successfully copied NG log metadata file to pilot init dir: %s" % (self.__pilot_initdir))
else: # log file does not exist anymore
if isLogfileCopied(workdir, job.jobId):
tolog("Log file has already been copied and removed")
if not os.environ.has_key('Nordugrid_pilot'):
# only send xml with log info if the log has been transferred
if xmlstr:
node_xml = xmlstr
tolog("Found xml anyway (stored since before)")
else:
node_xml = ''
tolog("!!WARNING!!1300!! XML not found, nothing to send to server")
else:
tolog("!!WARNING!!1300!! File %s does not exist and transfer lockfile not found (job from old pilot?)" % (fname))
node_xml = ''
elif xmlstr:
# xmlstr was set in postJobTask for all files
tolog("XML string set")
_skippedfname = os.path.join(workdir, "skipped.xml")
fname = "%s/metadata-%s.xml" % (workdir, job.jobId)
if os.path.exists(fname):
if os.path.exists(_skippedfname):
# add the skipped file info if needed
ec = addSkippedToPFC(fname, _skippedfname)
# transfer metadata to pilot init dir for Nordugrid
if os.environ.has_key('Nordugrid_pilot'):
try:
copy2(fname, self.__pilot_initdir)
except Exception, e:
tolog("!!WARNING!!1600!! Exception caught: Could not copy metadata file to init dir for NG: %s" % str(e))
else:
tolog("Successfully copied metadata file to pilot init dir for NG: %s" % (self.__pilot_initdir))
else:
tolog("Warning: Metadata does not exist: %s" % (fname))
tolog("Will send XML")
node_xml = xmlstr
# we don't need the job's log file anymore, delete it (except for NG)
if (job.result[0] == 'failed' or job.result[0] == 'finished') and not os.environ.has_key('Nordugrid_pilot'):
try:
os.system("rm -rf %s/%s" % (workdir, job.logFile))
except OSError:
tolog("!!WARNING!!1300!! Could not remove %s" % (job.logFile))
else:
tolog("Removed log file")
return node_xml
def updateOutputFilesXMLWithSURLs4NG(self, experiment, siteWorkdir, jobId, outputFilesXML):
""" Update the OutputFiles.xml file with SURLs """
status = False
# open and read back the OutputFiles.xml file
_filename = os.path.join(siteWorkdir, outputFilesXML)
if os.path.exists(_filename):
try:
f = open(_filename, "r")
except Exception, e:
tolog("!!WARNING!!1990!! Could not open file %s: %s" % (_filename, e))
else:
# get the metadata
xmlIN = f.read()
f.close()
# update the XML
xmlOUT = updateXMLWithSURLs(experiment, xmlIN, siteWorkdir, jobId, self.__jobrec, format='NG')
# write the XML
try:
f = open(_filename, "w")
except OSError, e:
tolog("!!WARNING!!1990!! Could not open file %s: %s" % (_filename, e))
else:
# write the XML and close the file
f.write(xmlOUT)
f.close()
tolog("Final XML for Nordugrid / CERNVM:\n%s" % (xmlOUT))
status = True
else:
tolog("!!WARNING!!1888!! Metadata file does not exist: %s" % (_filename))
return status
def getDateDirs(self):
""" Return a directory path based on the current date """
# E.g. 2014/09/22
year = date.today().strftime("%Y")
month = date.today().strftime("%m")
day = date.today().strftime("%d")
return "%s-%s-%s" % (year, month, day)
def tryint(self, x):
""" Used by numbered string comparison (to protect against unexpected letters in version number) """
try:
return int(x)
except ValueError:
return x
def splittedname(self, s):
""" Used by numbered string comparison """
# Can also be used for sorting:
# > names = ['YT4.11', '4.3', 'YT4.2', '4.10', 'PT2.19', 'PT2.9']
# > sorted(names, key=splittedname)
# ['4.3', '4.10', 'PT2.9', 'PT2.19', 'YT4.2', 'YT4.11']
from re import split
return tuple(self.tryint(x) for x in split('([0-9]+)', s))
def isAGreaterOrEqualToB(self, A, B):
""" Is numbered string A > B? """
# > a="1.2.3"
# > b="2.2.2"
# > e.isAGreaterThanB(a,b)
# False
return self.splittedname(A) >= self.splittedname(B)
def getPayloadMetadataFilename(self, workdir, jobId, altloc=""):
""" Return a proper path for the payload metadata """
filenamePayloadMetadata = ""
# Primarily use the jobReport.json if its' version is >= 1.0.0
_filename = os.path.join(workdir, "jobReport.json")
if not os.path.exists(_filename) and altloc != "":
_filename = os.path.join(altloc, "jobReport.json")
tolog("Trying alternative location: %s" % (_filename))
if os.path.exists(_filename):
# Now check the version
try:
f = open(_filename, 'r')
except Exception, e:
tolog("!!WARNING!!2233!! Could not open %s: %s" % (_filename, e))
else:
# Now verify that the version is at least 1.0.0
from json import load
try:
jobReport_dict = load(f)
version = jobReport_dict['reportVersion']
except Exception, e:
filenamePayloadMetadata = "%s/metadata-%s.xml.PAYLOAD" % (workdir, jobId)
tolog("reportVersion not found in jobReport, using default metadata XML file")
else:
v = '1.0.0'
if self.isAGreaterOrEqualToB(version, v):
tolog("Will send metadata file %s since version %s is >= %s" % (_filename, version, v))
filenamePayloadMetadata = _filename
else:
filenamePayloadMetadata = "%s/metadata-%s.xml.PAYLOAD" % (workdir, jobId)
tolog('Metadata version in file %s is too old (%s < %s), will send old XML file %s' % \
(os.path.basename(_filename), version, v, os.path.basename(filenamePayloadMetadata)))
else:
# Use default metadata file
tolog("Did not find %s" % (_filename))
filenamePayloadMetadata = "%s/metadata-%s.xml.PAYLOAD" % (workdir, jobId)
# Make sure the metadata file actually exists
if os.path.exists(filenamePayloadMetadata):
tolog("Verified existance of metadata file: %s" % (filenamePayloadMetadata))
else:
tolog("WARNING: metadata file does not exist: %s" % (filenamePayloadMetadata))
tolog("Looking for it in the pilot init dir..")
fname = os.path.basename(filenamePayloadMetadata)
path = os.path.join(self.__pilot_initdir, fname)
if os.path.exists(path):
filenamePayloadMetadata = path
tolog("Verified existance of metadata file: %s" % (filenamePayloadMetadata))
return filenamePayloadMetadata
def updatePandaServer(self, job, site, workerNode, port, xmlstr=None, spaceReport=False, log=None, ra=0, jr=False, useCoPilot=False, stdout_tail="", stdout_path="", additionalMetadata=None):
"""
Update the job status with the jobdispatcher web server.
State is a tuple of (jobId, ["jobstatus", transExitCode, pilotErrorCode], timestamp)
log = log extracts
xmlstr is set in postJobTask for finished jobs (all files). Failed jobs will only send xml for log (created in this function)
jr = job recovery mode
"""
tolog("Updating job status in updatePandaServer(): PandaId=%s, result=%s, time=%s" % (job.getState()))
# set any holding job to failed for sites that do not use job recovery (e.g. sites with LSF, that immediately
# removes any work directory after the LSF job finishes which of course makes job recovery impossible)
if not self.__jobrec:
if job.result[0] == 'holding' and site.sitename != "CERNVM":
job.result[0] = 'failed'
tolog("This site does not support job recovery: HOLDING state reset to FAILED")
# note: any changed job state above will be lost for fake server updates, does it matter?
# get the node structure expected by the server
node = self.getNodeStructure(job, site, workerNode, spaceReport=spaceReport, log=log)
# skip the server update (e.g. on NG)
if not self.__updateServer:
tolog("(fake server update)")
return 0, node
# get the xml
node['xml'] = self.getXML(job, site.sitename, site.workdir, xmlstr=xmlstr, jr=jr)
# stdout tail in case job.debug == 'true'
if job.debug.lower() == "true" and stdout_tail != "":
# protection for potentially large tails
stdout_tail = stdout_tail[-2048:]
node['stdout'] = stdout_tail
tolog("Will send stdout tail:\n%s (length = %d)" % (stdout_tail, len(stdout_tail)))
# also send the full stdout to a text indexer if required
if stdout_path != "":
if "stdout_to_text_indexer" in readpar('catchall') and os.path.exists(stdout_path):
tolog("Will send payload stdout to text indexer")
# get the user name, which we will use to create a proper filename
from SiteMover import SiteMover
s = SiteMover()
username = s.extractUsername(job.prodUserID)
# get setup path for xrdcp
try:
si = getSiteInformation(job.experiment)
setup_path = si.getLocalROOTSetup()
filename = "PanDA_payload_stdout-%s.txt" % (job.jobId)
dateDirs = self.getDateDirs()
remotePath = os.path.join(os.path.join(username, dateDirs), filename)
url = "root://faxbox.mwt2.org//group/logs/pilot/%s" % (remotePath)
cmd = "%sxrdcp -f %s %s" % (setup_path, stdout_path, url)
tolog("Executing command: %s" % (cmd))
rc, rs = getstatusoutput(cmd)
tolog("rc=%d, rs=%s" % (rc, rs))
except Exception, e:
tolog("!!WARNING!!3322!! Failed with text indexer: %s" % (e))
else:
tolog("stdout_path not set")
else:
if job.debug.lower() != "true":
tolog("Stdout tail will not be sent (debug=False)")
elif stdout_tail == "":
tolog("Stdout tail will not be sent (no stdout tail)")
else:
tolog("Stdout tail will not be sent (debug=%s, stdout_tail=\'%s\')" % (str(job.debug), stdout_tail))
# PN fake lostheartbeat
# if job.result[0] == "finished":
# node['state'] = "holding"
# node['xml'] = ""
# read back node['xml'] from jobState file for CERNVM
sendXML = True
if site.sitename == "CERNVM":
_node = self.getNodeStructureFromFile(site.workdir, job.jobId)
if _node:
if _node.has_key('xml'):
if _node['xml'] != "":
node['xml'] = _node['xml']
tolog("Read back metadata xml from job state file (length: %d)" % len(node['xml']))
else:
tolog("No metadata xml present in current job state file (1 - pilot should not send xml at this time)")
sendXML = False
else:
tolog("No xml key in node structure")
sendXML = False
else:
tolog("No metadata xml present in current job state file (2 - pilot should not send xml at this time)")
sendXML = False
# change the state to holding for initial CERNVM job
if not sendXML and (job.result[0] == "finished" or job.result[0] == "failed"):
# only set the holding state if the Co-Pilot is used
if useCoPilot:
job.result[0] = "holding"
node['state'] = "holding"
# update job state file
_retjs = updateJobState(job, site, node, recoveryAttempt=ra)
# is it the final update?
if job.result[0] == 'finished' or job.result[0] == 'failed' or job.result[0] == 'holding':
final = True
else:
final = False
# send the original xml/json if it exists (end of production job, ignore for event service job)
filenamePayloadMetadata = self.getPayloadMetadataFilename(site.workdir, job.jobId, altloc=job.workdir)
payloadXMLProblem = False
# backward compatibility
try:
eventService = job.eventService
except:
eventService = False
if os.path.exists(filenamePayloadMetadata) and final:
# get the metadata created by the payload
payloadXML = getMetadata(site.workdir, job.jobId, athena=True, altpath=filenamePayloadMetadata)
# add the metadata to the node
if payloadXML != "" and payloadXML != None:
tolog("Adding payload metadata of size %d to node dictionary (\'metaData\' field):\n%s" % (len(payloadXML), payloadXML))
node['metaData'] = payloadXML
else:
pilotErrorDiag = "Empty Athena metadata in file: %s" % (filenamePayloadMetadata)
payloadXMLProblem = True
else:
# athena XML should exist at the end of the job
analyJob = isAnalysisJob(job.trf.split(",")[0])
if job.result[0] == 'finished' and 'Install' not in site.sitename and not analyJob and 'DDM' not in site.sitename and 'test' not in site.sitename and job.prodSourceLabel != "install" and not eventService:
pilotErrorDiag = "Metadata does not exist: %s" % (filenamePayloadMetadata)
payloadXMLProblem = True
# fail the job if there was a problem with the athena metadata
# remove the comments below if a certain trf and release should be excluded from sending metadata
# trf_exclusions = ['merge_trf.py']
# release_exclusions = ['14.5.2.4']
# jobAtlasRelease = getAtlasRelease(job.release)
# if payloadXMLProblem and job.trf.split(",")[-1] not in trf_exclusions and jobAtlasRelease[-1] not in release_exclusions:
if payloadXMLProblem:
if job.trf == 'Archive_tf.py' or job.trf == 'Dummy_tf.py':
tolog("Metadata does not exist because the job is an archive/dummy job")
else:
tolog("!!FAILED!!1300!! %s" % (pilotErrorDiag))
job.result[0] = "failed"
job.result[2] = self.__error.ERR_NOPAYLOADMETADATA
if node.has_key('pilotLog'):
node['pilotLog'] += "!!FAILED!!1300!! %s" % (pilotErrorDiag)
else:
node['pilotLog'] = "!!FAILED!!1300!! %s" % (pilotErrorDiag)
node['pilotErrorCode'] = job.result[2]
node['state'] = job.result[0]
# for backward compatibility
try:
experiment = job.experiment
except:
experiment = "unknown"
# do not make the update if Nordugrid (leave for ARC to do)
if os.environ.has_key('Nordugrid_pilot'):
if final:
# update xml with SURLs stored in special SURL dictionary file
if self.updateOutputFilesXMLWithSURLs4NG(experiment, site.workdir, job.jobId, job.outputFilesXML):
tolog("Successfully added SURLs to %s" % (job.outputFilesXML))
# update xml with SURLs stored in special SURL dictionary file
if node.has_key('xml'):
tolog("Updating node structure XML with SURLs")
node['xml'] = updateXMLWithSURLs(experiment, node['xml'], site.workdir, job.jobId, self.__jobrec) # do not use format 'NG' here
# was the log file transferred to an OS? check in the OS transfer dictionary
tolog("job.logBucketID: %s" % job.logBucketID)
if job.logBucketID != -1:
# get the corresponding ddm endpoint
si = getSiteInformation(experiment)
os_ddmendpoint = si.getObjectstoreDDMEndpointFromBucketID(job.logBucketID)
node['xml'] = updateXMLWithEndpoints(node['xml'], [job.logFile], [os_ddmendpoint])
else:
node['xml'] = updateXMLWithEndpoints(node['xml'], [job.logFile], [None])
tolog("Updated XML:\n%s" % (node['xml']))
else:
tolog("WARNING: Found no xml entry in the node structure")
# store final node structure in pilot_initdir (will be sent to server by ARC control tower)
self.copyNodeStruct4NG(node)
tolog("Leaving the final update for the control tower")
return 0, node
# do not send xml if there was a put error during the log transfer
_xml = None
if final and node.has_key('xml'):
# is the call to updateXMLWithSURLs() useless? already done in JobLog?
# update xml with SURLs stored in special SURL dictionary file
tolog("Updating node structure XML with SURLs")
node['xml'] = updateXMLWithSURLs(experiment, node['xml'], site.workdir, job.jobId, self.__jobrec)
# was the log file transferred to an OS? check in the OS transfer dictionary
tolog("job.logBucketID: %s" % job.logBucketID)
if job.logBucketID != -1:
# get the corresponding ddm endpoint
si = getSiteInformation(experiment)
os_ddmendpoint = si.getObjectstoreDDMEndpointFromBucketID(job.logBucketID)
node['xml'] = updateXMLWithEndpoints(node['xml'], [job.logFile], [os_ddmendpoint])
else:
node['xml'] = updateXMLWithEndpoints(node['xml'], [job.logFile], [None])
tolog("Updated XML:\n%s" % (node['xml']))
_xml = node['xml']
if not isLogfileCopied(site.workdir, job.jobId):
tolog("Pilot will not send xml about output files since log was not transferred")
node['xml'] = ""
# should XML be sent at this time?
if not sendXML:
tolog("Metadata xml will not be sent")
if node.has_key('xml'):
if node['xml'] != "":
_xml = node['xml']
node['xml'] = ""
# add experiment specific metadata
if final and additionalMetadata != None:
tolog("Adding additionalMetadata to node")
if 'metaData' in node:
node['metaData'] += additionalMetadata
else:
node['metaData'] = additionalMetadata
# make the PandaLogger update at the final job update
if final:
# do not send FAX info for overflow jobs (transferType=fax), only for failover jobs
if job.filesWithFAX > 0 and job.transferType.lower() != "fax":
tolog("Sending PandaLogger update")
params = {}
params['pid'] = job.jobId
params['line'] = 0 # this is mandatory part of API, has to be present
params['type'] = 'FAXrecovery'
params['message'] = '"WithFAX":' + str(job.filesWithFAX) +\
',"WithoutFAX":' + str(job.filesWithoutFAX) +\
',"bytesWithFAX":' + str(job.bytesWithFAX) +\
',"bytesWithoutFAX":' + str(job.bytesWithoutFAX) +\
',"timeToCopy":' + job.timeStageIn
toPandaLogger(params)
# make the actual update, repeatedly if necessary (for the final update)
#ret = makeHTTPUpdate(job.result[0], node, port, url=self.__pshttpurl, path=self.__pilot_initdir)
if job.workdir.endswith("/"):
job.workdir = job.workdir[:-1]
ret = makeHTTPUpdate(job.result[0], node, port, url=self.__pshttpurl, path=os.path.dirname(job.workdir))
if not ret[2]: # data is None for a failed update attempt
tolog("makeHTTPUpdate returned: %s" % str(ret))
return 1, None
tolog("ret = %s" % str(ret))
data = ret[1]
tolog("data = %s" % str(data))
if data.has_key("command"):
job.action = data['command']
try:
awk = data['StatusCode']
except:
tolog("!!WARNING!!1300!! Having problem updating job status, set the awk to 1 for now, and continue...")
awk = "1"
else:
tolog("jobDispatcher acknowledged with %s" % (awk))
# need to have a return code so subprocess knows if update goes ok or not
ecode = int(awk) # use the awk code from jobdispatcher as the exit code
# PN fake lostheartbeat
# if job.result[0] == "finished":
# ecode = 1
# reset xml in case it was overwritten above for failed log transfers
if final and node.has_key('xml'):
node['xml'] = _xml
# if final update, now it's safe to remove any lingering memory output files from the init dir
if final:
try:
filename = os.path.join(self.__pilot_initdir, "memory_monitor*")
tolog("Will remove any lingering %s files from the init directory" % (filename))
os.system("rm -rf %s" % (filename))
except Exception, e:
tolog("!!WARNING!!4343!! Failed to remove %s: %s" % (filename), e)
return ecode, node # ecode=0 : update OK, otherwise something wrong
| mlassnig/pilot | PandaServerClient.py | Python | apache-2.0 | 49,522 |
"""Script to generate reports on translator classes from Doxygen sources.
The main purpose of the script is to extract the information from sources
related to internationalization (the translator classes). It uses the
information to generate documentation (language.doc,
translator_report.txt) from templates (language.tpl, maintainers.txt).
Simply run the script without parameters to get the reports and
documentation for all supported languages. If you want to generate the
translator report only for some languages, pass their codes as arguments
to the script. In that case, the language.doc will not be generated.
Example:
python translator.py en nl cz
Originally, the script was written in Perl and was known as translator.pl.
The last Perl version was dated 2002/05/21 (plus some later corrections)
$Id: translator.py 744 2010-10-09 08:04:33Z dimitri $
Petr Prikryl ([email protected])
History:
--------
2002/05/21 - This was the last Perl version.
2003/05/16 - List of language marks can be passed as arguments.
2004/01/24 - Total reimplementation started: classes TrManager, and Transl.
2004/02/05 - First version that produces translator report. No language.doc yet.
2004/02/10 - First fully functional version that generates both the translator
report and the documentation. It is a bit slower than the
Perl version, but is much less tricky and much more flexible.
It also solves some problems that were not solved by the Perl
version. The translator report content should be more useful
for developers.
2004/02/11 - Some tuning-up to provide more useful information.
2004/04/16 - Added new tokens to the tokenizer (to remove some warnings).
2004/05/25 - Added from __future__ import generators not to force Python 2.3.
2004/06/03 - Removed dependency on textwrap module.
2004/07/07 - Fixed the bug in the fill() function.
2004/07/21 - Better e-mail mangling for HTML part of language.doc.
- Plural not used for reporting a single missing method.
- Removal of not used translator adapters is suggested only
when the report is not restricted to selected languages
explicitly via script arguments.
2004/07/26 - Better reporting of not-needed adapters.
2004/10/04 - Reporting of not called translator methods added.
2004/10/05 - Modified to check only doxygen/src sources for the previous report.
2005/02/28 - Slight modification to generate "mailto.txt" auxiliary file.
2005/08/15 - Doxygen's root directory determined primarily from DOXYGEN
environment variable. When not found, then relatively to the script.
2007/03/20 - The "translate me!" searched in comments and reported if found.
2008/06/09 - Warning when the MAX_DOT_GRAPH_HEIGHT is still part of trLegendDocs().
2009/05/09 - Changed HTML output to fit it with XHTML DTD
2009/09/02 - Added percentage info to the report (implemented / to be implemented).
2010/02/09 - Added checking/suggestion 'Reimplementation using UTF-8 suggested.
2010/03/03 - Added [unreachable] prefix used in maintainers.txt.
2010/05/28 - BOM skipped; minor code cleaning.
2010/05/31 - e-mail mangled already in maintainers.txt
2010/08/20 - maintainers.txt to UTF-8, related processin of unicode strings
- [any mark] introduced instead of [unreachable] only
- marks hihglighted in HTML
2010/08/30 - Highlighting in what will be the table in langhowto.html modified.
2010/09/27 - The underscore in \latexonly part of the generated language.doc
was prefixed by backslash (was LaTeX related error).
2013/02/19 - Better diagnostics when translator_xx.h is too crippled.
"""
from __future__ import generators
import codecs
import os
import re
import sys
def fill(s):
"""Returns string formated to the wrapped paragraph multiline string.
Replaces whitespaces by one space and then uses he textwrap.fill()."""
# Replace all whitespace by spaces, remove whitespaces that are not
# necessary, strip the left and right whitespaces, and break the string
# to list of words.
rexWS = re.compile(r'\s+')
lst = rexWS.sub(' ', s).strip().split()
# If the list is not empty, put the words together and form the lines
# of maximum 70 characters. Build the list of lines.
lines = []
if lst:
line = lst.pop(0) # no separation space in front of the first word
for word in lst:
if len(line) + len(word) < 70:
line += ' ' + word
else:
lines.append(line) # another full line formed
line = word # next line started
lines.append(line) # the last line
return '\n'.join(lines)
# The following function dedent() is the verbatim copy from the textwrap.py
# module. The textwrap.py was introduced in Python 2.3. To make this script
# working also in older Python versions, I have decided to copy it.
# Notice that the textwrap.py is copyrighted:
#
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <[email protected]>
#
# The explicit permission to use the code here was sent by Guido van Rossum
# (4th June, 2004).
#
def dedent(text):
"""dedent(text : string) -> string
Remove any whitespace than can be uniformly removed from the left
of every line in `text`.
This can be used e.g. to make triple-quoted strings line up with
the left edge of screen/whatever, while still presenting it in the
source code in indented form.
For example:
def test():
# end first line with \ to avoid the empty line!
s = '''\
hello
world
'''
print repr(s) # prints ' hello\n world\n '
print repr(dedent(s)) # prints 'hello\n world\n'
"""
lines = text.expandtabs().split('\n')
margin = None
for line in lines:
content = line.lstrip()
if not content:
continue
indent = len(line) - len(content)
if margin is None:
margin = indent
else:
margin = min(margin, indent)
if margin is not None and margin > 0:
for i in range(len(lines)):
lines[i] = lines[i][margin:]
return '\n'.join(lines)
class Transl:
"""One instance is build for each translator.
The abbreviation of the source file--part after 'translator_'--is used as
the identification of the object. The empty string is used for the
abstract Translator class from translator.h. The other information is
extracted from inside the source file."""
def __init__(self, fname, manager):
"""Bind to the manager and initialize."""
# Store the filename and the reference to the manager object.
self.fname = fname
self.manager = manager
# The instance is responsible for loading the source file, so it checks
# for its existence and quits if something goes wrong.
if not os.path.isfile(fname):
sys.stderr.write("\a\nFile '%s' not found!\n" % fname)
sys.exit(1)
# Initialize the other collected information.
self.classId = None
self.baseClassId = None
self.readableStatus = None # 'up-to-date', '1.2.3', '1.3', etc.
self.status = None # '', '1.2.03', '1.3.00', etc.
self.lang = None # like 'Brasilian'
self.langReadable = None # like 'Brasilian Portuguese'
self.note = None # like 'should be cleaned up'
self.prototypeDic = {} # uniPrototype -> prototype
self.translateMeText = 'translate me!'
self.translateMeFlag = False # comments with "translate me!" found
self.txtMAX_DOT_GRAPH_HEIGHT_flag = False # found in string in trLegendDocs()
self.obsoleteMethods = None # list of prototypes to be removed
self.missingMethods = None # list of prototypes to be implemented
self.implementedMethods = None # list of implemented required methods
self.adaptMinClass = None # The newest adapter class that can be used
self.isDecodedTranslator = None # Flag related to internal usage of UTF-8
def __tokenGenerator(self):
"""Generator that reads the file and yields tokens as 4-tuples.
The tokens have the form (tokenId, tokenString, lineNo). The
last returned token has the form ('eof', None, None). When trying
to access next token afer that, the exception would be raised."""
# Set the dictionary for recognizing tokenId for keywords, separators
# and the similar categories. The key is the string to be recognized,
# the value says its token identification.
tokenDic = { 'class': 'class',
'const': 'const',
'public': 'public',
'protected': 'protected',
'private': 'private',
'static': 'static',
'virtual': 'virtual',
':': 'colon',
';': 'semic',
',': 'comma',
'[': 'lsqbra',
']': 'rsqbra',
'(': 'lpar',
')': 'rpar',
'{': 'lcurly',
'}': 'rcurly',
'=': 'assign',
'*': 'star',
'&': 'amp',
'+': 'plus',
'-': 'minus',
'!': 'excl',
'?': 'qmark',
'<': 'lt',
'>': 'gt',
"'": 'quot',
'"': 'dquot',
'.': 'dot',
'%': 'perc',
'~': 'tilde',
'^': 'caret',
}
# Regular expression for recognizing identifiers.
rexId = re.compile(r'^[a-zA-Z]\w*$')
# Open the file for reading and extracting tokens until the eof.
# Initialize the finite automaton.
f = open(self.fname)
lineNo = 0
line = '' # init -- see the pos initialization below
linelen = 0 # init
pos = 100 # init -- pos after the end of line
status = 0
tokenId = None # init
tokenStr = '' # init -- the characters will be appended.
tokenLineNo = 0
while status != 777:
# Get the next character. Read next line first, if necessary.
if pos < linelen:
c = line[pos]
else:
lineNo += 1
line = f.readline()
if line.startswith('\xef\xbb\xbf'):
line = line[3:] # skip the BOM
linelen = len(line)
pos = 0
if line == '': # eof
status = 777
else:
c = line[pos]
# Consume the character based on the status
if status == 0: # basic status
# This is the initial status. If tokenId is set, yield the
# token here and only here (except when eof is found).
# Initialize the token variables after the yield.
if tokenId:
# If it is an unknown item, it can still be recognized
# here. Keywords and separators are the example.
if tokenId == 'unknown':
if tokenDic.has_key(tokenStr):
tokenId = tokenDic[tokenStr]
elif tokenStr.isdigit():
tokenId = 'num'
elif rexId.match(tokenStr):
tokenId = 'id'
else:
msg = '\aWarning: unknown token "' + tokenStr + '"'
msg += '\tfound on line %d' % tokenLineNo
msg += ' in "' + self.fname + '".\n'
sys.stderr.write(msg)
yield (tokenId, tokenStr, tokenLineNo)
# If it is a comment that contains the self.translateMeText
# string, set the flag -- the situation will be reported.
if tokenId == 'comment' and tokenStr.find(self.translateMeText) >= 0:
self.translateMeFlag = True
tokenId = None
tokenStr = ''
tokenLineNo = 0
# Now process the character. When we just skip it (spaces),
# stay in this status. All characters that will be part of
# some token cause moving to the specific status. And only
# when moving to the status == 0 (or the final state 777),
# the token is yielded. With respect to that the automaton
# behaves as Moore's one (output bound to status). When
# collecting tokens, the automaton is the Mealy's one
# (actions bound to transitions).
if c.isspace():
pass # just skip whitespace characters
elif c == '/': # Possibly comment starts here, but
tokenId = 'unknown' # it could be only a slash in code.
tokenStr = c
tokenLineNo = lineNo
status = 1
elif c == '#':
tokenId = 'preproc' # preprocessor directive
tokenStr = c
tokenLineNo = lineNo
status = 5
elif c == '"': # string starts here
tokenId = 'string'
tokenStr = c
tokenLineNo = lineNo
status = 6
elif c == "'": # char literal starts here
tokenId = 'charlit'
tokenStr = c
tokenLineNo = lineNo
status = 8
elif tokenDic.has_key(c): # known one-char token
tokenId = tokenDic[c]
tokenStr = c
tokenLineNo = lineNo
# stay in this state to yield token immediately
else:
tokenId = 'unknown' # totally unknown
tokenStr = c
tokenLineNo = lineNo
status = 333
pos += 1 # move position in any case
elif status == 1: # possibly a comment
if c == '/': # ... definitely the C++ comment
tokenId = 'comment'
tokenStr += c
pos += 1
status = 2
elif c == '*': # ... definitely the C comment
tokenId = 'comment'
tokenStr += c
pos += 1
status = 3
else:
status = 0 # unrecognized, don't move pos
elif status == 2: # inside the C++ comment
if c == '\n': # the end of C++ comment
status = 0 # yield the token
else:
tokenStr += c # collect the C++ comment
pos += 1
elif status == 3: # inside the C comment
if c == '*': # possibly the end of the C comment
tokenStr += c
status = 4
else:
tokenStr += c # collect the C comment
pos += 1
elif status == 4: # possibly the end of the C comment
if c == '/': # definitely the end of the C comment
tokenStr += c
status = 0 # yield the token
elif c == '*': # more stars inside the comment
tokenStr += c
else:
tokenStr += c # this cannot be the end of comment
status = 3
pos += 1
elif status == 5: # inside the preprocessor directive
if c == '\n': # the end of the preproc. command
status = 0 # yield the token
else:
tokenStr += c # collect the preproc
pos += 1
elif status == 6: # inside the string
if c == '\\': # escaped char inside the string
tokenStr += c
status = 7
elif c == '"': # end of the string
tokenStr += c
status = 0
else:
tokenStr += c # collect the chars of the string
pos += 1
elif status == 7: # escaped char inside the string
tokenStr += c # collect the char of the string
status = 6
pos += 1
elif status == 8: # inside the char literal
tokenStr += c # collect the char of the literal
status = 9
pos += 1
elif status == 9: # end of char literal expected
if c == "'": # ... and found
tokenStr += c
status = 0
pos += 1
else:
tokenId = 'error' # end of literal was expected
tokenStr += c
status = 0
elif status == 333: # start of the unknown token
if c.isspace():
pos += 1
status = 0 # tokenId may be determined later
elif tokenDic.has_key(c): # separator, don't move pos
status = 0
else:
tokenStr += c # collect
pos += 1
# We should have finished in the final status. If some token
# have been extracted, yield it first.
assert(status == 777)
if tokenId:
yield (tokenId, tokenStr, tokenLineNo)
tokenId = None
tokenStr = ''
tokenLineNo = 0
# The file content is processed. Close the file. Then always yield
# the eof token.
f.close()
yield ('eof', None, None)
def __collectClassInfo(self, tokenIterator):
"""Collect the information about the class and base class.
The tokens including the opening left curly brace of the class are
consumed."""
status = 0 # initial state
while status != 777: # final state
# Always assume that the previous tokens were processed. Get
# the next one.
tokenId, tokenStr, tokenLineNo = tokenIterator.next()
# Process the token and never return back.
if status == 0: # waiting for the 'class' keyword.
if tokenId == 'class':
status = 1
elif status == 1: # expecting the class identification
if tokenId == 'id':
self.classId = tokenStr
status = 2
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 2: # expecting the curly brace or base class info
if tokenId == 'lcurly':
status = 777 # correctly finished
elif tokenId == 'colon':
status = 3
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 3: # expecting the 'public' in front of base class id
if tokenId == 'public':
status = 4
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 4: # expecting the base class id
if tokenId == 'id':
self.baseClassId = tokenStr
status = 5
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 5: # expecting the curly brace and quitting
if tokenId == 'lcurly':
status = 777 # correctly finished
elif tokenId == 'comment':
pass
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
# Extract the status of the TranslatorXxxx class. The readable form
# will be used in reports the status form is a string that can be
# compared lexically (unified length, padding with zeros, etc.).
if self.baseClassId:
lst = self.baseClassId.split('_')
if lst[0] == 'Translator':
self.readableStatus = 'up-to-date'
self.status = ''
elif lst[0] == 'TranslatorAdapter':
self.status = lst[1] + '.' + lst[2]
self.readableStatus = self.status
if len(lst) > 3: # add the last part of the number
self.status += '.' + ('%02d' % int(lst[3]))
self.readableStatus += '.' + lst[3]
else:
self.status += '.00'
elif lst[0] == 'TranslatorEnglish':
# Obsolete or Based on English.
if self.classId[-2:] == 'En':
self.readableStatus = 'English based'
self.status = 'En'
else:
self.readableStatus = 'obsolete'
self.status = '0.0.00'
# Check whether status was set, or set 'strange'.
if self.status == None:
self.status = 'strange'
if not self.readableStatus:
self.readableStatus = 'strange'
# Extract the name of the language and the readable form.
self.lang = self.classId[10:] # without 'Translator'
if self.lang == 'Brazilian':
self.langReadable = 'Brazilian Portuguese'
elif self.lang == 'Chinesetraditional':
self.langReadable = 'Chinese Traditional'
else:
self.langReadable = self.lang
def __unexpectedToken(self, status, tokenId, tokenLineNo):
"""Reports unexpected token and quits with exit code 1."""
import inspect
calledFrom = inspect.stack()[1][3]
msg = "\a\nUnexpected token '%s' on the line %d in '%s'.\n"
msg = msg % (tokenId, tokenLineNo, self.fname)
msg += 'status = %d in %s()\n' % (status, calledFrom)
sys.stderr.write(msg)
sys.exit(1)
def collectPureVirtualPrototypes(self):
"""Returns dictionary 'unified prototype' -> 'full prototype'.
The method is expected to be called only for the translator.h. It
extracts only the pure virtual method and build the dictionary where
key is the unified prototype without argument identifiers."""
# Prepare empty dictionary that will be returned.
resultDic = {}
# Start the token generator which parses the class source file.
tokenIterator = self.__tokenGenerator()
# Collect the class and the base class identifiers.
self.__collectClassInfo(tokenIterator)
assert(self.classId == 'Translator')
# Let's collect readable form of the public virtual pure method
# prototypes in the readable form -- as defined in translator.h.
# Let's collect also unified form of the same prototype that omits
# everything that can be omitted, namely 'virtual' and argument
# identifiers.
prototype = '' # readable prototype (with everything)
uniPrototype = '' # unified prototype (without arg. identifiers)
# Collect the pure virtual method prototypes. Stop on the closing
# curly brace followed by the semicolon (end of class).
status = 0
curlyCnt = 0 # counter for the level of curly braces
# Loop until the final state 777 is reached. The errors are processed
# immediately. In this implementation, it always quits the application.
while status != 777:
# Get the next token.
tokenId, tokenStr, tokenLineNo = tokenIterator.next()
if status == 0: # waiting for 'public:'
if tokenId == 'public':
status = 1
elif status == 1: # colon after the 'public'
if tokenId == 'colon':
status = 2
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 2: # waiting for 'virtual'
if tokenId == 'virtual':
prototype = tokenStr # but not to unified prototype
status = 3
elif tokenId == 'comment':
pass
elif tokenId == 'rcurly':
status = 11 # expected end of class
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 3: # return type of the method expected
if tokenId == 'id':
prototype += ' ' + tokenStr
uniPrototype = tokenStr # start collecting the unified prototype
status = 4
elif tokenId == 'tilde':
status = 4
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 4: # method identifier expected
if tokenId == 'id':
prototype += ' ' + tokenStr
uniPrototype += ' ' + tokenStr
status = 5
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 5: # left bracket of the argument list expected
if tokenId == 'lpar':
prototype += tokenStr
uniPrototype += tokenStr
status = 6
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 6: # collecting arguments of the method
if tokenId == 'rpar':
prototype += tokenStr
uniPrototype += tokenStr
status = 7
elif tokenId == 'const':
prototype += tokenStr
uniPrototype += tokenStr
status = 12
elif tokenId == 'id': # type identifier
prototype += tokenStr
uniPrototype += tokenStr
status = 13
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 7: # assignment expected or left curly brace
if tokenId == 'assign':
status = 8
elif tokenId == 'lcurly':
curlyCnt = 1 # method body entered
status = 10
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 8: # zero expected
if tokenId == 'num' and tokenStr == '0':
status = 9
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 9: # after semicolon, produce the dic item
if tokenId == 'semic':
assert(not resultDic.has_key(uniPrototype))
resultDic[uniPrototype] = prototype
status = 2
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 10: # consuming the body of the method
if tokenId == 'rcurly':
curlyCnt -= 1
if curlyCnt == 0:
status = 2 # body consumed
elif tokenId == 'lcurly':
curlyCnt += 1
elif status == 11: # probably the end of class
if tokenId == 'semic':
status = 777
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 12: # type id for argument expected
if tokenId == 'id':
prototype += ' ' + tokenStr
uniPrototype += ' ' + tokenStr
status = 13
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 13: # namespace qualification or * or & expected
if tokenId == 'colon': # was namespace id
prototype += tokenStr
uniPrototype += tokenStr
status = 14
elif tokenId == 'star' or tokenId == 'amp': # pointer or reference
prototype += ' ' + tokenStr
uniPrototype += ' ' + tokenStr
status = 16
elif tokenId == 'id': # argument identifier
prototype += ' ' + tokenStr
# don't put this into unified prototype
status = 17
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 14: # second colon for namespace:: expected
if tokenId == 'colon':
prototype += tokenStr
uniPrototype += tokenStr
status = 15
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 15: # type after namespace:: expected
if tokenId == 'id':
prototype += tokenStr
uniPrototype += tokenStr
status = 13
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 16: # argument identifier expected
if tokenId == 'id':
prototype += ' ' + tokenStr
# don't put this into unified prototype
status = 17
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 17: # comma or ')' after argument identifier expected
if tokenId == 'comma':
prototype += ', '
uniPrototype += ', '
status = 6
elif tokenId == 'rpar':
prototype += tokenStr
uniPrototype += tokenStr
status = 7
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
# Eat the rest of the source to cause closing the file.
while tokenId != 'eof':
tokenId, tokenStr, tokenLineNo = tokenIterator.next()
# Return the resulting dictionary with 'uniPrototype -> prototype'.
return resultDic
def __collectPublicMethodPrototypes(self, tokenIterator):
"""Collects prototypes of public methods and fills self.prototypeDic.
The dictionary is filled by items: uniPrototype -> prototype.
The method is expected to be called only for TranslatorXxxx classes,
i.e. for the classes that implement translation to some language.
It assumes that the openning curly brace of the class was already
consumed. The source is consumed until the end of the class.
The caller should consume the source until the eof to cause closing
the source file."""
assert(self.classId != 'Translator')
assert(self.baseClassId != None)
# The following finite automaton slightly differs from the one
# inside self.collectPureVirtualPrototypes(). It produces the
# dictionary item just after consuming the body of the method
# (transition from from state 10 to state 2). It also does not allow
# definitions of public pure virtual methods, except for
# TranslatorAdapterBase (states 8 and 9). Argument identifier inside
# method argument lists can be omitted or commented.
#
# Let's collect readable form of all public method prototypes in
# the readable form -- as defined in the source file.
# Let's collect also unified form of the same prototype that omits
# everything that can be omitted, namely 'virtual' and argument
# identifiers.
prototype = '' # readable prototype (with everything)
uniPrototype = '' # unified prototype (without arg. identifiers)
warning = '' # warning message -- if something special detected
methodId = None # processed method id
# Collect the method prototypes. Stop on the closing
# curly brace followed by the semicolon (end of class).
status = 0
curlyCnt = 0 # counter for the level of curly braces
# Loop until the final state 777 is reached. The errors are processed
# immediately. In this implementation, it always quits the application.
while status != 777:
# Get the next token.
tokenId, tokenStr, tokenLineNo = tokenIterator.next()
if status == 0: # waiting for 'public:'
if tokenId == 'public':
status = 1
elif tokenId == 'eof': # non-public things until the eof
status = 777
elif status == 1: # colon after the 'public'
if tokenId == 'colon':
status = 2
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 2: # waiting for 'virtual' (can be omitted)
if tokenId == 'virtual':
prototype = tokenStr # but not to unified prototype
status = 3
elif tokenId == 'id': # 'virtual' was omitted
prototype = tokenStr
uniPrototype = tokenStr # start collecting the unified prototype
status = 4
elif tokenId == 'comment':
pass
elif tokenId == 'protected' or tokenId == 'private':
status = 0
elif tokenId == 'rcurly':
status = 11 # expected end of class
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 3: # return type of the method expected
if tokenId == 'id':
prototype += ' ' + tokenStr
uniPrototype = tokenStr # start collecting the unified prototype
status = 4
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 4: # method identifier expected
if tokenId == 'id':
prototype += ' ' + tokenStr
uniPrototype += ' ' + tokenStr
methodId = tokenStr # for reporting
status = 5
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 5: # left bracket of the argument list expected
if tokenId == 'lpar':
prototype += tokenStr
uniPrototype += tokenStr
status = 6
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 6: # collecting arguments of the method
if tokenId == 'rpar':
prototype += tokenStr
uniPrototype += tokenStr
status = 7
elif tokenId == 'const':
prototype += tokenStr
uniPrototype += tokenStr
status = 12
elif tokenId == 'id': # type identifier
prototype += tokenStr
uniPrototype += tokenStr
status = 13
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 7: # left curly brace expected
if tokenId == 'lcurly':
curlyCnt = 1 # method body entered
status = 10
elif tokenId == 'comment':
pass
elif tokenId == 'assign': # allowed only for TranslatorAdapterBase
assert(self.classId == 'TranslatorAdapterBase')
status = 8
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 8: # zero expected (TranslatorAdapterBase)
assert(self.classId == 'TranslatorAdapterBase')
if tokenId == 'num' and tokenStr == '0':
status = 9
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 9: # after semicolon (TranslatorAdapterBase)
assert(self.classId == 'TranslatorAdapterBase')
if tokenId == 'semic':
status = 2
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 10: # consuming the body of the method, then dic item
if tokenId == 'rcurly':
curlyCnt -= 1
if curlyCnt == 0:
# Check for possible copy/paste error when name
# of the method was not corrected (i.e. the same
# name already exists).
if uniPrototype in self.prototypeDic:
msg = "'%s' prototype found again (duplicity)\n"
msg += "in '%s'.\n" % self.fname
msg = msg % uniPrototype
sys.stderr.write(msg)
assert False
assert(not self.prototypeDic.has_key(uniPrototype))
# Insert new dictionary item.
self.prototypeDic[uniPrototype] = prototype
status = 2 # body consumed
methodId = None # outside of any method
elif tokenId == 'lcurly':
curlyCnt += 1
# Warn in special case.
elif methodId == 'trLegendDocs' and tokenId == 'string' \
and tokenStr.find('MAX_DOT_GRAPH_HEIGHT') >= 0:
self.txtMAX_DOT_GRAPH_HEIGHT_flag = True
elif status == 11: # probably the end of class
if tokenId == 'semic':
status = 777
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 12: # type id for argument expected
if tokenId == 'id':
prototype += ' ' + tokenStr
uniPrototype += ' ' + tokenStr
status = 13
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 13: # :: or * or & or id or ) expected
if tokenId == 'colon': # was namespace id
prototype += tokenStr
uniPrototype += tokenStr
status = 14
elif tokenId == 'star' or tokenId == 'amp': # pointer or reference
prototype += ' ' + tokenStr
uniPrototype += ' ' + tokenStr
status = 16
elif tokenId == 'id': # argument identifier
prototype += ' ' + tokenStr
# don't put this into unified prototype
status = 17
elif tokenId == 'comment': # probably commented-out identifier
prototype += tokenStr
elif tokenId == 'rpar':
prototype += tokenStr
uniPrototype += tokenStr
status = 7
elif tokenId == 'comma':
prototype += ', '
uniPrototype += ', '
status = 6
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 14: # second colon for namespace:: expected
if tokenId == 'colon':
prototype += tokenStr
uniPrototype += tokenStr
status = 15
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 15: # type after namespace:: expected
if tokenId == 'id':
prototype += tokenStr
uniPrototype += tokenStr
status = 13
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 16: # argument identifier or ) expected
if tokenId == 'id':
prototype += ' ' + tokenStr
# don't put this into unified prototype
status = 17
elif tokenId == 'rpar':
prototype += tokenStr
uniPrototype += tokenStr
status = 7
elif tokenId == 'comment':
prototype += tokenStr
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
elif status == 17: # comma or ')' after argument identifier expected
if tokenId == 'comma':
prototype += ', '
uniPrototype += ', '
status = 6
elif tokenId == 'rpar':
prototype += tokenStr
uniPrototype += tokenStr
status = 7
else:
self.__unexpectedToken(status, tokenId, tokenLineNo)
def collectAdapterPrototypes(self):
"""Returns the dictionary of prototypes implemented by adapters.
It is created to process the translator_adapter.h. The returned
dictionary has the form: unifiedPrototype -> (version, classId)
thus by looking for the prototype, we get the information what is
the newest (least adapting) adapter that is sufficient for
implementing the method."""
# Start the token generator which parses the class source file.
assert(os.path.split(self.fname)[1] == 'translator_adapter.h')
tokenIterator = self.__tokenGenerator()
# Get the references to the involved dictionaries.
reqDic = self.manager.requiredMethodsDic
# Create the empty dictionary that will be returned.
adaptDic = {}
# Loop through the source of the adapter file until no other adapter
# class is found.
while True:
try:
# Collect the class and the base class identifiers.
self.__collectClassInfo(tokenIterator)
# Extract the comparable version of the adapter class.
# Note: The self.status as set by self.__collectClassInfo()
# contains similar version, but is related to the base class,
# not to the class itself.
lst = self.classId.split('_')
version = ''
if lst[0] == 'TranslatorAdapter': # TranslatorAdapterBase otherwise
version = lst[1] + '.' + lst[2]
if len(lst) > 3: # add the last part of the number
version += '.' + ('%02d' % int(lst[3]))
else:
version += '.00'
# Collect the prototypes of implemented public methods.
self.__collectPublicMethodPrototypes(tokenIterator)
# For the required methods, update the dictionary of methods
# implemented by the adapter.
for protoUni in self.prototypeDic:
if reqDic.has_key(protoUni):
# This required method will be marked as implemented
# by this adapter class. This implementation assumes
# that newer adapters do not reimplement any required
# methods already implemented by older adapters.
assert(not adaptDic.has_key(protoUni))
adaptDic[protoUni] = (version, self.classId)
# Clear the dictionary object and the information related
# to the class as the next adapter class is to be processed.
self.prototypeDic.clear()
self.classId = None
self.baseClassId = None
except StopIteration:
break
# Return the result dictionary.
return adaptDic
def processing(self):
"""Processing of the source file -- only for TranslatorXxxx classes."""
# Start the token generator which parses the class source file.
tokenIterator = self.__tokenGenerator()
# Collect the class and the base class identifiers.
self.__collectClassInfo(tokenIterator)
assert(self.classId != 'Translator')
assert(self.classId[:17] != 'TranslatorAdapter')
# Collect the prototypes of implemented public methods.
self.__collectPublicMethodPrototypes(tokenIterator)
# Eat the rest of the source to cause closing the file.
while True:
try:
t = tokenIterator.next()
except StopIteration:
break
# Shorthands for the used dictionaries.
reqDic = self.manager.requiredMethodsDic
adaptDic = self.manager.adaptMethodsDic
myDic = self.prototypeDic
# Build the list of obsolete methods.
self.obsoleteMethods = []
for p in myDic:
if not reqDic.has_key(p):
self.obsoleteMethods.append(p)
# Build the list of missing methods and the list of implemented
# required methods.
self.missingMethods = []
self.implementedMethods = []
for p in reqDic:
if myDic.has_key(p):
self.implementedMethods.append(p)
else:
self.missingMethods.append(p)
# Set the least important note first if the translator is decoded.
# If yes, then it means that the implementation should be switched
# to UTF-8 later (suggestion).
self.isDecodedTranslator = self.classId in self.manager.decodedTranslators
if self.isDecodedTranslator:
self.note = 'Reimplementation using UTF-8 suggested.'
# Check whether adapter must be used or suggest the newest one.
# Change the status and set the note accordingly.
if self.baseClassId != 'Translator':
if not self.missingMethods:
self.note = 'Change the base class to Translator.'
self.status = ''
self.readableStatus = 'up-to-date'
elif self.baseClassId != 'TranslatorEnglish':
# The translator uses some of the adapters.
# Look at the missing methods and check what adapter
# implements them. Remember the one with the lowest version.
adaptMinVersion = '9.9.99'
adaptMinClass = 'TranslatorAdapter_9_9_99'
for uniProto in self.missingMethods:
if adaptDic.has_key(uniProto):
version, cls = adaptDic[uniProto]
if version < adaptMinVersion:
adaptMinVersion = version
adaptMinClass = cls
# Test against the current status -- preserve the self.status.
# Possibly, the translator implements enough methods to
# use some newer adapter.
status = self.status
# If the version of the used adapter is smaller than
# the required, set the note and update the status as if
# the newer adapter was used.
if adaptMinVersion > status:
self.note = 'Change the base class to %s.' % adaptMinClass
self.status = adaptMinVersion
self.adaptMinClass = adaptMinClass
self.readableStatus = adaptMinVersion # simplified
# If everything seems OK, some explicit warning flags still could
# be set.
if not self.note and self.status == '' and \
(self.translateMeFlag or self.txtMAX_DOT_GRAPH_HEIGHT_flag):
self.note = ''
if self.translateMeFlag:
self.note += 'The "%s" found in a comment.' % self.translateMeText
if self.note != '':
self.note += '\n\t\t'
if self.txtMAX_DOT_GRAPH_HEIGHT_flag:
self.note += 'The MAX_DOT_GRAPH_HEIGHT found in trLegendDocs()'
# If everything seems OK, but there are obsolete methods, set
# the note to clean-up source. This note will be used only when
# the previous code did not set another note (priority).
if not self.note and self.status == '' and self.obsoleteMethods:
self.note = 'Remove the obsolete methods (never used).'
def report(self, fout):
"""Returns the report part for the source as a multiline string.
No output for up-to-date translators without problem."""
# If there is nothing to report, return immediately.
if self.status == '' and not self.note:
return
# Report the number of not implemented methods.
fout.write('\n\n\n')
fout.write(self.classId + ' (' + self.baseClassId + ')')
percentImplemented = 100 # init
allNum = len(self.manager.requiredMethodsDic)
if self.missingMethods:
num = len(self.missingMethods)
percentImplemented = 100 * (allNum - num) / allNum
fout.write(' %d' % num)
fout.write(' method')
if num > 1:
fout.write('s')
fout.write(' to implement (%d %%)' % (100 * num / allNum))
fout.write('\n' + '-' * len(self.classId))
# Write the info about the implemented required methods.
fout.write('\n\n Implements %d' % len(self.implementedMethods))
fout.write(' of the required methods (%d %%).' % percentImplemented)
# Report the missing method, but only when it is not English-based
# translator.
if self.missingMethods and self.status != 'En':
fout.write('\n\n Missing methods (should be implemented):\n')
reqDic = self.manager.requiredMethodsDic
for p in self.missingMethods:
fout.write('\n ' + reqDic[p])
# Always report obsolete methods.
if self.obsoleteMethods:
fout.write('\n\n Obsolete methods (should be removed, never used):\n')
myDic = self.prototypeDic
for p in self.obsoleteMethods:
fout.write('\n ' + myDic[p])
# For English-based translator, report the implemented methods.
if self.status == 'En' and self.implementedMethods:
fout.write('\n\n This English-based translator implements ')
fout.write('the following methods:\n')
reqDic = self.manager.requiredMethodsDic
for p in self.implementedMethods:
fout.write('\n ' + reqDic[p])
def getmtime(self):
"""Returns the last modification time of the source file."""
assert(os.path.isfile(self.fname))
return os.path.getmtime(self.fname)
class TrManager:
"""Collects basic info and builds subordinate Transl objects."""
def __init__(self):
"""Determines paths, creates and initializes structures.
The arguments of the script may explicitly say what languages should
be processed. Write the two letter identifications that are used
for composing the source filenames, so...
python translator.py cz
this will process only translator_cz.h source.
"""
# Determine the path to the script and its name.
self.script = os.path.abspath(sys.argv[0])
self.script_path, self.script_name = os.path.split(self.script)
self.script_path = os.path.abspath(self.script_path)
# Determine the absolute path to the Doxygen's root subdirectory.
# If DOXYGEN environment variable is not found, the directory is
# determined from the path of the script.
doxy_default = os.path.join(self.script_path, '..')
self.doxy_path = os.path.abspath(os.getenv('DOXYGEN', doxy_default))
# Get the explicit arguments of the script.
self.script_argLst = sys.argv[1:]
# Build the path names based on the Doxygen's root knowledge.
self.doc_path = os.path.join(self.doxy_path, 'doc')
self.src_path = os.path.join(self.doxy_path, 'src')
# Create the empty dictionary for Transl object identitied by the
# class identifier of the translator.
self.__translDic = {}
# Create the None dictionary of required methods. The key is the
# unified prototype, the value is the full prototype. Set inside
# the self.__build().
self.requiredMethodsDic = None
# Create the empty dictionary that says what method is implemented
# by what adapter.
self.adaptMethodsDic = {}
# The last modification time will capture the modification of this
# script, of the translator.h, of the translator_adapter.h (see the
# self.__build() for the last two) of all the translator_xx.h files
# and of the template for generating the documentation. So, this
# time can be compared with modification time of the generated
# documentation to decide, whether the doc should be re-generated.
self.lastModificationTime = os.path.getmtime(self.script)
# Set the names of the translator report text file, of the template
# for generating "Internationalization" document, for the generated
# file itself, and for the maintainers list.
self.translatorReportFileName = 'translator_report.txt'
self.maintainersFileName = 'maintainers.txt'
self.languageTplFileName = 'language.tpl'
self.languageDocFileName = 'language.doc'
# The information about the maintainers will be stored
# in the dictionary with the following name.
self.__maintainersDic = None
# Define the other used structures and variables for information.
self.langLst = None # including English based
self.supportedLangReadableStr = None # coupled En-based as a note
self.numLang = None # excluding coupled En-based
self.doxVersion = None # Doxygen version
# Capture the knowledge about translators that are not implemented
# to use UTF-8 internally.
self.decodedTranslators = self.getDecodedTranslators()
# Build objects where each one is responsible for one translator.
self.__build()
def getDecodedTranslators(self):
"""Parses language.cpp to find what translators do not use UTF-8 yet"""
decodedTranslators = []
# Regular expression to detect the lines like
# theTranslator=new TranslatorDecoder(new TranslatorSwedish);
rex = re.compile(r'^\s*theTranslator\s*=\s*new\s+.*$')
# Regular expression to get the (optional) TranslatorDecoder and TranslatorXXX
rex2 = re.compile(r'\bTranslator\w+')
# Parse the lines in the specific source code.
f = open(os.path.join(self.src_path, 'language.cpp'), 'rU')
for line in f:
if rex.match(line):
lst = rex2.findall(line)
if lst[0] == 'TranslatorDecoder':
decodedTranslators.append(lst[1])
f.close()
# Display warning when all translator implementations were converted
# to UTF-8.
if len(decodedTranslators) == 0:
print 'This script should be updated. All translators do use UTF-8'
print 'internally. The TranslatorDecoder adapter should be removed'
print 'from the code and its usage should not be checked any more.'
return decodedTranslators
def __build(self):
"""Find the translator files and build the objects for translators."""
# The translator.h must exist (the Transl object will check it),
# create the object for it and let it build the dictionary of
# required methods.
tr = Transl(os.path.join(self.src_path, 'translator.h'), self)
self.requiredMethodsDic = tr.collectPureVirtualPrototypes()
tim = tr.getmtime()
if tim > self.lastModificationTime:
self.lastModificationTime = tim
# The translator_adapter.h must exist (the Transl object will check it),
# create the object for it and store the reference in the dictionary.
tr = Transl(os.path.join(self.src_path, 'translator_adapter.h'), self)
self.adaptMethodsDic = tr.collectAdapterPrototypes()
tim = tr.getmtime()
if tim > self.lastModificationTime:
self.lastModificationTime = tim
# Create the list of the filenames with language translator sources.
# If the explicit arguments of the script were typed, process only
# those files.
if self.script_argLst:
lst = ['translator_' + x + '.h' for x in self.script_argLst]
for fname in lst:
if not os.path.isfile(os.path.join(self.src_path, fname)):
sys.stderr.write("\a\nFile '%s' not found!\n" % fname)
sys.exit(1)
else:
lst = os.listdir(self.src_path)
lst = filter(lambda x: x[:11] == 'translator_'
and x[-2:] == '.h'
and x != 'translator_adapter.h', lst)
# Build the object for the translator_xx.h files, and process the
# content of the file. Then insert the object to the dictionary
# accessed via classId.
for fname in lst:
fullname = os.path.join(self.src_path, fname)
tr = Transl(fullname, self)
tr.processing()
assert(tr.classId != 'Translator')
self.__translDic[tr.classId] = tr
# Extract the global information of the processed info.
self.__extractProcessedInfo()
def __extractProcessedInfo(self):
"""Build lists and strings of the processed info."""
# Build the auxiliary list with strings compound of the status,
# readable form of the language, and classId.
statLst = []
for obj in self.__translDic.values():
assert(obj.classId != 'Translator')
s = obj.status + '|' + obj.langReadable + '|' + obj.classId
statLst.append(s)
# Sort the list and extract the object identifiers (classId's) for
# the up-to-date translators and English-based translators.
statLst.sort()
self.upToDateIdLst = [x.split('|')[2] for x in statLst if x[0] == '|']
self.EnBasedIdLst = [x.split('|')[2] for x in statLst if x[:2] == 'En']
# Reverse the list and extract the TranslatorAdapter based translators.
statLst.reverse()
self.adaptIdLst = [x.split('|')[2] for x in statLst if x[0].isdigit()]
# Build the list of tuples that contain (langReadable, obj).
# Sort it by readable name.
self.langLst = []
for obj in self.__translDic.values():
self.langLst.append((obj.langReadable, obj))
self.langLst.sort(lambda a, b: cmp(a[0], b[0]))
# Create the list with readable language names. If the language has
# also the English-based version, modify the item by appending
# the note. Number of the supported languages is equal to the length
# of the list.
langReadableLst = []
for name, obj in self.langLst:
if obj.status == 'En': continue
# Append the 'En' to the classId to possibly obtain the classId
# of the English-based object. If the object exists, modify the
# name for the readable list of supported languages.
classIdEn = obj.classId + 'En'
if self.__translDic.has_key(classIdEn):
name += ' (+En)'
# Append the result name of the language, possibly with note.
langReadableLst.append(name)
# Create the multiline string of readable language names,
# with punctuation, wrapped to paragraph.
if len(langReadableLst) == 1:
s = langReadableLst[0]
elif len(langReadableLst) == 2:
s = ' and '.join(langReadableLst)
else:
s = ', '.join(langReadableLst[:-1]) + ', and '
s += langReadableLst[-1]
self.supportedLangReadableStr = fill(s + '.')
# Find the number of the supported languages. The English based
# languages are not counted if the non-English based also exists.
self.numLang = len(self.langLst)
for name, obj in self.langLst:
if obj.status == 'En':
classId = obj.classId[:-2]
if self.__translDic.has_key(classId):
self.numLang -= 1 # the couple will be counted as one
# Extract the version of Doxygen.
f = open(os.path.join(self.doxy_path, 'VERSION'))
self.doxVersion = f.readline().strip()
f.close()
# Update the last modification time.
for tr in self.__translDic.values():
tim = tr.getmtime()
if tim > self.lastModificationTime:
self.lastModificationTime = tim
def __getNoTrSourceFilesLst(self):
"""Returns the list of sources to be checked.
All .cpp files and also .h files that do not declare or define
the translator methods are included in the list. The file names
are searched in doxygen/src directory.
"""
files = []
for item in os.listdir(self.src_path):
# Split the bare name to get the extension.
name, ext = os.path.splitext(item)
ext = ext.lower()
# Include only .cpp and .h files (case independent) and exclude
# the files where the checked identifiers are defined.
if ext == '.cpp' or (ext == '.h' and name.find('translator') == -1):
fname = os.path.join(self.src_path, item)
assert os.path.isfile(fname) # assumes no directory with the ext
files.append(fname) # full name
return files
def __removeUsedInFiles(self, fname, dic):
"""Removes items for method identifiers that are found in fname.
The method reads the content of the file as one string and searches
for all identifiers from dic. The identifiers that were found in
the file are removed from the dictionary.
Note: If more files is to be checked, the files where most items are
probably used should be checked first and the resulting reduced
dictionary should be used for checking the next files (speed up).
"""
lst_in = dic.keys() # identifiers to be searched for
# Read content of the file as one string.
assert os.path.isfile(fname)
f = open(fname)
cont = f.read()
f.close()
# Remove the items for identifiers that were found in the file.
while lst_in:
item = lst_in.pop(0)
if cont.find(item) != -1:
del dic[item]
def __checkForNotUsedTrMethods(self):
"""Returns the dictionary of not used translator methods.
The method can be called only after self.requiredMethodsDic has been
built. The stripped prototypes are the values, the method identifiers
are the keys.
"""
# Build the dictionary of the required method prototypes with
# method identifiers used as keys.
trdic = {}
for prototype in self.requiredMethodsDic.keys():
ri = prototype.split('(')[0]
identifier = ri.split()[1].strip()
trdic[identifier] = prototype
# Build the list of source files where translator method identifiers
# can be used.
files = self.__getNoTrSourceFilesLst()
# Loop through the files and reduce the dictionary of id -> proto.
for fname in files:
self.__removeUsedInFiles(fname, trdic)
# Return the dictionary of not used translator methods.
return trdic
def __emails(self, classId):
"""Returns the list of maintainer emails.
The method returns the list of e-mail adresses for the translator
class, but only the addresses that were not marked as [xxx]."""
lst = []
for m in self.__maintainersDic[classId]:
if not m[1].startswith('['):
email = m[1]
email = email.replace(' at ', '@') # Unmangle the mangled e-mail
email = email.replace(' dot ', '.')
lst.append(email)
return lst
def generateTranslatorReport(self):
"""Generates the translator report."""
output = os.path.join(self.doc_path, self.translatorReportFileName)
# Open the textual report file for the output.
f = open(output, 'w')
# Output the information about the version.
f.write('(' + self.doxVersion + ')\n\n')
# Output the information about the number of the supported languages
# and the list of the languages, or only the note about the explicitly
# given languages to process.
if self.script_argLst:
f.write('The report was generated for the following, explicitly')
f.write(' identified languages:\n\n')
f.write(self.supportedLangReadableStr + '\n\n')
else:
f.write('Doxygen supports the following ')
f.write(str(self.numLang))
f.write(' languages (sorted alphabetically):\n\n')
f.write(self.supportedLangReadableStr + '\n\n')
# Write the summary about the status of language translators (how
# many translators) are up-to-date, etc.
s = 'Of them, %d translators are up-to-date, ' % len(self.upToDateIdLst)
s += '%d translators are based on some adapter class, ' % len(self.adaptIdLst)
s += 'and %d are English based.' % len(self.EnBasedIdLst)
f.write(fill(s) + '\n\n')
# The e-mail addresses of the maintainers will be collected to
# the auxiliary file in the order of translator classes listed
# in the translator report.
fmail = open('mailto.txt', 'w')
# Write the list of up-to-date translator classes.
if self.upToDateIdLst:
s = '''The following translator classes are up-to-date (sorted
alphabetically). This means that they derive from the
Translator class and they implement all %d of the required
methods. Anyway, there still may be some details listed even
for them:'''
s = s % len(self.requiredMethodsDic)
f.write('-' * 70 + '\n')
f.write(fill(s) + '\n\n')
mailtoLst = []
for x in self.upToDateIdLst:
obj = self.__translDic[x]
f.write(' ' + obj.classId)
if obj.note:
f.write(' -- ' + obj.note)
f.write('\n')
mailtoLst.extend(self.__emails(obj.classId))
fmail.write('up-to-date\n')
fmail.write('; '.join(mailtoLst))
# Write the list of the adapter based classes. The very obsolete
# translators that derive from TranslatorEnglish are included.
if self.adaptIdLst:
s = '''The following translator classes need some maintenance
(the most obsolete at the end). The other info shows the
estimation of Doxygen version when the class was last
updated and number of methods that must be implemented to
become up-to-date:'''
f.write('\n' + '-' * 70 + '\n')
f.write(fill(s) + '\n\n')
# Find also whether some adapter classes may be removed.
adaptMinVersion = '9.9.99'
mailtoLst = []
numRequired = len(self.requiredMethodsDic)
for x in self.adaptIdLst:
obj = self.__translDic[x]
f.write(' %-30s' % obj.classId)
f.write(' %-6s' % obj.readableStatus)
numimpl = len(obj.missingMethods)
pluralS = ''
if numimpl > 1: pluralS = 's'
percent = 100 * numimpl / numRequired
f.write('\t%2d method%s to implement (%d %%)' % (
numimpl, pluralS, percent))
if obj.note:
f.write('\n\tNote: ' + obj.note + '\n')
f.write('\n')
mailtoLst.extend(self.__emails(obj.classId)) # to maintainer
# Check the level of required adapter classes.
if obj.status != '0.0.00' and obj.status < adaptMinVersion:
adaptMinVersion = obj.status
fmail.write('\n\ntranslator based\n')
fmail.write('; '.join(mailtoLst))
# Set the note if some old translator adapters are not needed
# any more. Do it only when the script is called without arguments,
# i.e. all languages were checked against the needed translator
# adapters.
if not self.script_argLst:
to_remove = {}
for version, adaptClassId in self.adaptMethodsDic.values():
if version < adaptMinVersion:
to_remove[adaptClassId] = True
if to_remove:
lst = to_remove.keys()
lst.sort()
plural = len(lst) > 1
note = 'Note: The adapter class'
if plural: note += 'es'
note += ' ' + ', '.join(lst)
if not plural:
note += ' is'
else:
note += ' are'
note += ' not used and can be removed.'
f.write('\n' + fill(note) + '\n')
# Write the list of the English-based classes.
if self.EnBasedIdLst:
s = '''The following translator classes derive directly from the
TranslatorEnglish. The class identifier has the suffix 'En'
that says that this is intentional. Usually, there is also
a non-English based version of the translator for
the language:'''
f.write('\n' + '-' * 70 + '\n')
f.write(fill(s) + '\n\n')
for x in self.EnBasedIdLst:
obj = self.__translDic[x]
f.write(' ' + obj.classId)
f.write('\timplements %d methods' % len(obj.implementedMethods))
if obj.note:
f.write(' -- ' + obj.note)
f.write('\n')
# Check for not used translator methods and generate warning if found.
# The check is rather time consuming, so it is not done when report
# is restricted to explicitly given language identifiers.
if not self.script_argLst:
dic = self.__checkForNotUsedTrMethods()
if dic:
s = '''WARNING: The following translator methods are declared
in the Translator class but their identifiers do not appear
in source files. The situation should be checked. The .cpp
files and .h files excluding the '*translator*' files
in doxygen/src directory were simply searched for occurrence
of the method identifiers:'''
f.write('\n' + '=' * 70 + '\n')
f.write(fill(s) + '\n\n')
keys = dic.keys()
keys.sort()
for key in keys:
f.write(' ' + dic[key] + '\n')
f.write('\n')
# Write the details for the translators.
f.write('\n' + '=' * 70)
f.write('\nDetails for translators (classes sorted alphabetically):\n')
cls = self.__translDic.keys()
cls.sort()
for c in cls:
obj = self.__translDic[c]
assert(obj.classId != 'Translator')
obj.report(f)
# Close the report file and the auxiliary file with e-mails.
f.close()
fmail.close()
def __loadMaintainers(self):
"""Load and process the file with the maintainers.
Fills the dictionary classId -> [(name, e-mail), ...]."""
fname = os.path.join(self.doc_path, self.maintainersFileName)
# Include the maintainers file to the group of files checked with
# respect to the modification time.
tim = os.path.getmtime(fname)
if tim > self.lastModificationTime:
self.lastModificationTime = tim
# Process the content of the maintainers file.
f = codecs.open(fname, 'r', 'utf-8')
inside = False # inside the record for the language
lineReady = True
classId = None
maintainersLst = None
self.__maintainersDic = {}
while lineReady:
line = f.readline() # next line
lineReady = line != '' # when eof, then line == ''
line = line.strip() # eof should also behave as separator
if line != u'' and line[0] == u'%': # skip the comment line
continue
if not inside: # if outside of the record
if line != u'': # should be language identifier
classId = line
maintainersLst = []
inside = True
# Otherwise skip empty line that do not act as separator.
else: # if inside the record
if line == u'': # separator found
inside = False
else:
# If it is the first maintainer, create the empty list.
if not self.__maintainersDic.has_key(classId):
self.__maintainersDic[classId] = []
# Split the information about the maintainer and append
# the tuple. The address may be prefixed '[unreachable]'
# or whatever '[xxx]'. This will be processed later.
lst = line.split(u':', 1)
assert(len(lst) == 2)
t = (lst[0].strip(), lst[1].strip())
self.__maintainersDic[classId].append(t)
f.close()
def generateLanguageDoc(self):
"""Checks the modtime of files and generates language.doc."""
self.__loadMaintainers()
# Check the last modification time of the template file. It is the
# last file from the group that decide whether the documentation
# should or should not be generated.
fTplName = os.path.join(self.doc_path, self.languageTplFileName)
tim = os.path.getmtime(fTplName)
if tim > self.lastModificationTime:
self.lastModificationTime = tim
# If the generated documentation exists and is newer than any of
# the source files from the group, do not generate it and quit
# quietly.
fDocName = os.path.join(self.doc_path, self.languageDocFileName)
if os.path.isfile(fDocName):
if os.path.getmtime(fDocName) > self.lastModificationTime:
return
# The document or does not exist or is older than some of the
# sources. It must be generated again.
#
# Read the template of the documentation, and remove the first
# attention lines.
f = codecs.open(fTplName, 'r', 'utf-8')
doctpl = f.read()
f.close()
pos = doctpl.find(u'/***')
assert pos != -1
doctpl = doctpl[pos:]
# Fill the tplDic by symbols that will be inserted into the
# document template.
tplDic = {}
s = u'Do not edit this file. It was generated by the %s script.' % self.script_name
tplDic['editnote'] = s
tplDic['doxVersion'] = self.doxVersion
tplDic['supportedLangReadableStr'] = self.supportedLangReadableStr
tplDic['translatorReportFileName'] = self.translatorReportFileName
ahref = u'<a href="../doc/' + self.translatorReportFileName
ahref += u'"\n><code>doxygen/doc/' + self.translatorReportFileName
ahref += u'</code></a>'
tplDic['translatorReportLink'] = ahref
tplDic['numLangStr'] = str(self.numLang)
# Define templates for HTML table parts of the documentation.
htmlTableTpl = u'''\
\\htmlonly
<table align="center" cellspacing="0" cellpadding="0" border="0">
<tr bgcolor="#000000">
<td>
<table cellspacing="1" cellpadding="2" border="0">
<tr bgcolor="#4040c0">
<td ><b><font size="+1" color="#ffffff"> Language </font></b></td>
<td ><b><font size="+1" color="#ffffff"> Maintainer </font></b></td>
<td ><b><font size="+1" color="#ffffff"> Contact address </font>
<font size="-2" color="#ffffff">(replace the at and dot)</font></b></td>
<td ><b><font size="+1" color="#ffffff"> Status </font></b></td>
</tr>
<!-- table content begin -->
%s
<!-- table content end -->
</table>
</td>
</tr>
</table>
\\endhtmlonly
'''
htmlTableTpl = dedent(htmlTableTpl)
htmlTrTpl = u'\n <tr bgcolor="#ffffff">%s\n </tr>'
htmlTdTpl = u'\n <td>%s</td>'
# Loop through transl objects in the order of sorted readable names
# and add generate the content of the HTML table.
trlst = []
for name, obj in self.langLst:
# Fill the table data elements for one row. The first element
# contains the readable name of the language.
lst = [ htmlTdTpl % obj.langReadable ]
# The next two elements contain the list of maintainers
# and the list of their mangled e-mails. For English-based
# translators that are coupled with the non-English based,
# insert the 'see' note.
mm = None # init -- maintainer
ee = None # init -- e-mail address
if obj.status == 'En':
# Check whether there is the coupled non-English.
classId = obj.classId[:-2]
if classId in self.__translDic:
lang = self.__translDic[classId].langReadable
mm = u'see the %s language' % lang
ee = u' '
if not mm and obj.classId in self.__maintainersDic:
# Build a string of names separated by the HTML break element.
# Special notes used instead of names are highlighted.
lm = []
for maintainer in self.__maintainersDic[obj.classId]:
name = maintainer[0]
if name.startswith(u'--'):
name = u'<span style="color: red; background-color: yellow">'\
+ name + u'</span>'
lm.append(name)
mm = u'<br/>'.join(lm)
# The marked adresses (they start with the mark '[unreachable]',
# '[resigned]', whatever '[xxx]') will not be displayed at all.
# Only the mark will be used instead.
rexMark = re.compile(ur'(?P<mark>\[.*?\])')
le = []
for maintainer in self.__maintainersDic[obj.classId]:
address = maintainer[1]
m = rexMark.search(address)
if m is not None:
address = u'<span style="color: brown">'\
+ m.group(u'mark') + u'</span>'
le.append(address)
ee = u'<br/>'.join(le)
# Append the maintainer and e-mail elements.
lst.append(htmlTdTpl % mm)
lst.append(htmlTdTpl % ee)
# The last element contains the readable form of the status.
lst.append(htmlTdTpl % obj.readableStatus)
# Join the table data to one table row.
trlst.append(htmlTrTpl % (''.join(lst)))
# Join the table rows and insert into the template.
htmlTable = htmlTableTpl % (''.join(trlst))
# Define templates for LaTeX table parts of the documentation.
latexTableTpl = ur'''
\latexonly
\footnotesize
\begin{longtable}{|l|l|l|l|}
\hline
{\bf Language} & {\bf Maintainer} & {\bf Contact address} & {\bf Status} \\
\hline
%s
\hline
\end{longtable}
\normalsize
\endlatexonly
'''
latexTableTpl = dedent(latexTableTpl)
latexLineTpl = u'\n' + r' %s & %s & {\tt\tiny %s} & %s \\'
# Loop through transl objects in the order of sorted readable names
# and add generate the content of the LaTeX table.
trlst = []
for name, obj in self.langLst:
# For LaTeX, more maintainers for the same language are
# placed on separate rows in the table. The line separator
# in the table is placed explicitly above the first
# maintainer. Prepare the arguments for the LaTeX row template.
maintainers = []
if self.__maintainersDic.has_key(obj.classId):
maintainers = self.__maintainersDic[obj.classId]
lang = obj.langReadable
maintainer = None # init
email = None # init
if obj.status == 'En':
# Check whether there is the coupled non-English.
classId = obj.classId[:-2]
if classId in self.__translDic:
langNE = self.__translDic[classId].langReadable
maintainer = u'see the %s language' % langNE
email = u'~'
if not maintainer and (obj.classId in self.__maintainersDic):
lm = [ m[0] for m in self.__maintainersDic[obj.classId] ]
maintainer = maintainers[0][0]
email = maintainers[0][1]
status = obj.readableStatus
# Use the template to produce the line of the table and insert
# the hline plus the constructed line into the table content.
# The underscore character must be escaped.
trlst.append(u'\n \\hline')
s = latexLineTpl % (lang, maintainer, email, status)
s = s.replace(u'_', u'\\_')
trlst.append(s)
# List the other maintainers for the language. Do not set
# lang and status for them.
lang = u'~'
status = u'~'
for m in maintainers[1:]:
maintainer = m[0]
email = m[1]
s = latexLineTpl % (lang, maintainer, email, status)
s = s.replace(u'_', u'\\_')
trlst.append(s)
# Join the table lines and insert into the template.
latexTable = latexTableTpl % (u''.join(trlst))
# Put the HTML and LaTeX parts together and define the dic item.
tplDic['informationTable'] = htmlTable + u'\n' + latexTable
# Insert the symbols into the document template and write it down.
f = codecs.open(fDocName, 'w', 'utf-8')
f.write(doctpl % tplDic)
f.close()
if __name__ == '__main__':
# Create the manager, build the transl objects, and parse the related
# sources.
trMan = TrManager()
# Generate the language.doc.
trMan.generateLanguageDoc()
# Generate the translator report.
trMan.generateTranslatorReport()
| TextusData/Mover | thirdparty/doxygen-1.8.4/doc/translator.py | Python | gpl-3.0 | 86,697 |
import hashlib
import logging
from datetime import datetime
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import func, Column, Integer, String, DateTime
from sqlalchemy.exc import SQLAlchemyError
from political_data import PoliticalData
data = PoliticalData()
db = SQLAlchemy()
class Call(db.Model):
__tablename__ = 'calls'
id = Column(Integer, primary_key=True)
timestamp = Column(DateTime)
campaign_id = Column(String(32))
source_id = Column(String(64))
ak_id = Column(String(64))
member_id = Column(String(10)) # congress member sunlight identifier
target_name = Column(String(64))
target_number = Column(String(64))
# user attributes
user_id = Column(String(64)) # hashed phone number
zipcode = Column(String(5))
areacode = Column(String(3)) # first 3 digits of phone number
exchange = Column(String(3)) # next 3 digits of phone number
# twilio attributes
call_id = Column(String(40)) # twilio call ID
status = Column(String(25)) # twilio call status
duration = Column(Integer) # twilio call time in seconds
@classmethod
def hash_phone(cls, number):
"""
Takes a phone number and returns a 64 character string
"""
return hashlib.sha256(number).hexdigest()
def __init__(self, campaign_id, member_id, config=None, zipcode=None, phone_number=None,
call_id=None, source_id=None, ak_id=None, status='unknown', duration=0, target_name=None, target_number=None):
self.timestamp = datetime.now()
self.status = status
self.duration = duration
self.campaign_id = campaign_id
self.member_id = member_id
self.call_id = call_id
self.ak_id = ak_id
self.source_id = source_id
self.target_name = target_name
self.target_number = target_number
# Salted hash
if phone_number:
phone_number = phone_number.replace('-', '').replace('.', '')
self.user_id = self.hash_phone(phone_number + config['LOGGING_USER_ID_SALT'])
self.zipcode = zipcode
def __repr__(self):
return '<Call {}-{}-xxxx to {}>'.format(
self.areacode, self.exchange, self.member_id)
def get_current_phase(params, campaign):
return campaign.get('phases')[int(params['phase'])]
def log_call(params, campaign, config, request):
try:
i = int(params['callIndex'])
phase = get_current_phase(params, campaign)
if phase.get('method') == 'specific':
target_i = int(params['targetOrder'][i])
target = phase.get('targets')[target_i]
target_name = target['name']
target_number = target['number']
else:
member = [l for l in data.legislators
if l['bioguide_id'] == params['repIds'][i]][0]
target_number = member['phone']
target_name = unicode("{} {}".format(
member['firstname'], member['lastname']), 'utf8')
member_id = '?'
if params['repIds']:
member_id = params['repIds'][i]
elif params['targetOrder']:
member_id = params['targetOrder'][i]
kwds = {
'campaign_id': campaign['id'],
'config': config,
'member_id': member_id,
'ak_id': params['ak_id'],
'source_id': params['source_id'],
'zipcode': params['zipcode'],
'phone_number': params['userPhone'],
'call_id': request.values.get('CallSid', None),
'status': request.values.get('DialCallStatus', 'unknown'),
'duration': request.values.get('DialCallDuration', 0),
'target_name': target_name,
'target_number': target_number
}
db.session.add(Call(**kwds))
db.session.commit()
except SQLAlchemyError:
logging.error('Failed to log call:', exc_info=True)
def call_count(campaign_id):
try:
return (db.session.query(func.Count(Call.zipcode))
.filter(Call.campaign_id == campaign_id).all())[0][0]
except SQLAlchemyError:
logging.error('Failed to get call_count:', exc_info=True)
return 0
def aggregate_stats(campaign_id):
zipcodes = (db.session.query(Call.zipcode, func.Count(Call.zipcode))
.filter(Call.campaign_id == campaign_id)
.group_by(Call.zipcode).all())
reps = (db.session.query(Call.member_id, func.Count(Call.member_id))
.filter(Call.campaign_id == campaign_id)
.group_by(Call.member_id).all())
return {
'campaign': campaign_id,
'calls': {
'zipcodes': dict(tuple(z) for z in zipcodes),
'reps': dict(tuple(r) for r in reps)
}
}
| credo-action/call-congress-for-credo | models.py | Python | agpl-3.0 | 4,827 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Get rietveld stats about the review you done, or forgot to do.
Example:
- my_reviews.py -r [email protected] -Q for stats for last quarter.
"""
import datetime
import math
import optparse
import os
import sys
import rietveld
def username(email):
"""Keeps the username of an email address."""
return email.split('@', 1)[0]
def to_datetime(string):
"""Load UTC time as a string into a datetime object."""
try:
# Format is 2011-07-05 01:26:12.084316
return datetime.datetime.strptime(
string.split('.', 1)[0], '%Y-%m-%d %H:%M:%S')
except ValueError:
return datetime.datetime.strptime(string, '%Y-%m-%d')
def to_time(seconds):
"""Convert a number of seconds into human readable compact string."""
prefix = ''
if seconds < 0:
prefix = '-'
seconds *= -1
minutes = math.floor(seconds / 60)
seconds -= minutes * 60
hours = math.floor(minutes / 60)
minutes -= hours * 60
days = math.floor(hours / 24)
hours -= days * 24
out = []
if days > 0:
out.append('%dd' % days)
if hours > 0 or days > 0:
out.append('%02dh' % hours)
if minutes > 0 or hours > 0 or days > 0:
out.append('%02dm' % minutes)
if seconds > 0 and not out:
# Skip seconds unless there's only seconds.
out.append('%02ds' % seconds)
return prefix + ''.join(out)
class Stats(object):
def __init__(self):
self.total = 0
self.actually_reviewed = 0
self.latencies = []
self.lgtms = 0
self.multiple_lgtms = 0
self.drive_by = 0
self.not_requested = 0
self.self_review = 0
self.percent_lgtm = 0.
self.percent_drive_by = 0.
self.percent_not_requested = 0.
self.days = 0
@property
def average_latency(self):
if not self.latencies:
return 0
return sum(self.latencies) / float(len(self.latencies))
@property
def median_latency(self):
if not self.latencies:
return 0
length = len(self.latencies)
latencies = sorted(self.latencies)
if (length & 1) == 0:
return (latencies[length/2] + latencies[length/2-1]) / 2.
else:
return latencies[length/2]
@property
def percent_done(self):
if not self.total:
return 0
return self.actually_reviewed * 100. / self.total
@property
def review_per_day(self):
if not self.days:
return 0
return self.total * 1. / self.days
@property
def review_done_per_day(self):
if not self.days:
return 0
return self.actually_reviewed * 1. / self.days
def finalize(self, first_day, last_day):
if self.actually_reviewed:
assert self.actually_reviewed > 0
self.percent_lgtm = (self.lgtms * 100. / self.actually_reviewed)
self.percent_drive_by = (self.drive_by * 100. / self.actually_reviewed)
self.percent_not_requested = (
self.not_requested * 100. / self.actually_reviewed)
assert bool(first_day) == bool(last_day)
if first_day and last_day:
assert first_day < last_day
self.days = (to_datetime(last_day) - to_datetime(first_day)).days + 1
assert self.days > 0
def _process_issue_lgtms(issue, reviewer, stats):
"""Calculates LGTMs stats."""
stats.actually_reviewed += 1
reviewer_lgtms = len([
msg for msg in issue['messages']
if msg['approval'] and msg['sender'] == reviewer])
if reviewer_lgtms > 1:
stats.multiple_lgtms += 1
return ' X '
if reviewer_lgtms:
stats.lgtms += 1
return ' x '
else:
return ' o '
def _process_issue_latency(issue, reviewer, stats):
"""Calculates latency for an issue that was actually reviewed."""
from_owner = [
msg for msg in issue['messages'] if msg['sender'] == issue['owner_email']
]
if not from_owner:
# Probably requested by email.
stats.not_requested += 1
return '<no rqst sent>'
first_msg_from_owner = None
latency = None
received = False
for index, msg in enumerate(issue['messages']):
if not first_msg_from_owner and msg['sender'] == issue['owner_email']:
first_msg_from_owner = msg
if index and not received and msg['sender'] == reviewer:
# Not first email, reviewer never received one, reviewer sent a mesage.
stats.drive_by += 1
return '<drive-by>'
received |= reviewer in msg['recipients']
if first_msg_from_owner and msg['sender'] == reviewer:
delta = msg['date'] - first_msg_from_owner['date']
latency = delta.seconds + delta.days * 24 * 3600
break
if latency is None:
stats.not_requested += 1
return '<no rqst sent>'
if latency > 0:
stats.latencies.append(latency)
else:
stats.not_requested += 1
return to_time(latency)
def _process_issue(issue):
"""Preprocesses the issue to simplify the remaining code."""
issue['owner_email'] = username(issue['owner_email'])
issue['reviewers'] = set(username(r) for r in issue['reviewers'])
# By default, hide commit-bot.
issue['reviewers'] -= set(['commit-bot'])
for msg in issue['messages']:
msg['sender'] = username(msg['sender'])
msg['recipients'] = [username(r) for r in msg['recipients']]
# Convert all times to datetime instances.
msg['date'] = to_datetime(msg['date'])
issue['messages'].sort(key=lambda x: x['date'])
def print_issue(issue, reviewer, stats):
"""Process an issue and prints stats about it."""
stats.total += 1
_process_issue(issue)
if issue['owner_email'] == reviewer:
stats.self_review += 1
latency = '<self review>'
reviewed = ''
elif any(msg['sender'] == reviewer for msg in issue['messages']):
reviewed = _process_issue_lgtms(issue, reviewer, stats)
latency = _process_issue_latency(issue, reviewer, stats)
else:
latency = 'N/A'
reviewed = ''
# More information is available, print issue.keys() to see them.
print '%7d %10s %3s %14s %-15s %s' % (
issue['issue'],
issue['created'][:10],
reviewed,
latency,
issue['owner_email'],
', '.join(sorted(issue['reviewers'])))
def print_reviews(reviewer, created_after, created_before, instance_url):
"""Prints issues |reviewer| received and potentially reviewed."""
remote = rietveld.Rietveld(instance_url, None, None)
# The stats we gather. Feel free to send me a CL to get more stats.
stats = Stats()
# Column sizes need to match print_issue() output.
print >> sys.stderr, (
'Issue Creation Did Latency Owner Reviewers')
# See def search() in rietveld.py to see all the filters you can use.
issues = []
for issue in remote.search(
reviewer=reviewer,
created_after=created_after,
created_before=created_before,
with_messages=True):
issues.append(issue)
print_issue(issue, username(reviewer), stats)
issues.sort(key=lambda x: x['created'])
first_day = None
last_day = None
if issues:
first_day = issues[0]['created'][:10]
last_day = issues[-1]['created'][:10]
stats.finalize(first_day, last_day)
print >> sys.stderr, (
'%s reviewed %d issues out of %d (%1.1f%%). %d were self-review.' %
(reviewer, stats.actually_reviewed, stats.total, stats.percent_done,
stats.self_review))
print >> sys.stderr, (
'%4.1f review request/day during %3d days (%4.1f r/d done).' % (
stats.review_per_day, stats.days, stats.review_done_per_day))
print >> sys.stderr, (
'%4d were drive-bys (%5.1f%% of reviews done).' % (
stats.drive_by, stats.percent_drive_by))
print >> sys.stderr, (
'%4d were requested over IM or irc (%5.1f%% of reviews done).' % (
stats.not_requested, stats.percent_not_requested))
print >> sys.stderr, (
('%4d issues LGTM\'d (%5.1f%% of reviews done),'
' gave multiple LGTMs on %d issues.') % (
stats.lgtms, stats.percent_lgtm, stats.multiple_lgtms))
print >> sys.stderr, (
'Average latency from request to first comment is %s.' %
to_time(stats.average_latency))
print >> sys.stderr, (
'Median latency from request to first comment is %s.' %
to_time(stats.median_latency))
def print_count(reviewer, created_after, created_before, instance_url):
remote = rietveld.Rietveld(instance_url, None, None)
print len(list(remote.search(
reviewer=reviewer,
created_after=created_after,
created_before=created_before,
keys_only=True)))
def get_previous_quarter(today):
"""There are four quarters, 01-03, 04-06, 07-09, 10-12.
If today is in the last month of a quarter, assume it's the current quarter
that is requested.
"""
end_year = today.year
end_month = today.month - (today.month % 3) + 1
if end_month <= 0:
end_year -= 1
end_month += 12
if end_month > 12:
end_year += 1
end_month -= 12
end = '%d-%02d-01' % (end_year, end_month)
begin_year = end_year
begin_month = end_month - 3
if begin_month <= 0:
begin_year -= 1
begin_month += 12
begin = '%d-%02d-01' % (begin_year, begin_month)
return begin, end
def main():
# Silence upload.py.
rietveld.upload.verbosity = 0
today = datetime.date.today()
begin, end = get_previous_quarter(today)
parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
parser.add_option(
'--count', action='store_true',
help='Just count instead of printing individual issues')
parser.add_option(
'-r', '--reviewer', metavar='<email>',
default=os.environ.get('EMAIL_ADDRESS'),
help='Filter on issue reviewer, default=%default')
parser.add_option(
'-b', '--begin', metavar='<date>',
help='Filter issues created after the date')
parser.add_option(
'-e', '--end', metavar='<date>',
help='Filter issues created before the date')
parser.add_option(
'-Q', '--last_quarter', action='store_true',
help='Use last quarter\'s dates, e.g. %s to %s' % (
begin, end))
parser.add_option(
'-i', '--instance_url', metavar='<host>',
default='http://codereview.chromium.org',
help='Host to use, default is %default')
# Remove description formatting
parser.format_description = (
lambda _: parser.description) # pylint: disable=E1101
options, args = parser.parse_args()
if args:
parser.error('Args unsupported')
if not options.reviewer:
parser.error('$EMAIL_ADDRESS is not set, please use -r')
print >> sys.stderr, 'Searching for reviews by %s' % options.reviewer
if options.last_quarter:
options.begin = begin
options.end = end
print >> sys.stderr, 'Using range %s to %s' % (
options.begin, options.end)
if options.count:
print_count(
options.reviewer,
options.begin,
options.end,
options.instance_url)
else:
print_reviews(
options.reviewer,
options.begin,
options.end,
options.instance_url)
return 0
if __name__ == '__main__':
sys.exit(main())
| coreos/depot_tools | my_reviews.py | Python | bsd-3-clause | 11,079 |
################################
# These variables are overwritten by Zenoss when the ZenPack is exported
# or saved. Do not modify them directly here.
# NB: PACKAGES is deprecated
NAME = "ZenPacks.example.Techniques"
VERSION = "1.4.1"
AUTHOR = "Chet Luther"
LICENSE = ""
NAMESPACE_PACKAGES = ['ZenPacks', 'ZenPacks.example']
PACKAGES = ['ZenPacks', 'ZenPacks.example', 'ZenPacks.example.Techniques']
INSTALL_REQUIRES = []
COMPAT_ZENOSS_VERS = ">=2.3"
PREV_ZENPACK_NAME = ""
# STOP_REPLACEMENTS
################################
# Zenoss will not overwrite any changes you make below here.
from setuptools import setup, find_packages
setup(
# This ZenPack metadata should usually be edited with the Zenoss
# ZenPack edit page. Whenever the edit page is submitted it will
# overwrite the values below (the ones it knows about) with new values.
name = NAME,
version = VERSION,
author = AUTHOR,
license = LICENSE,
# This is the version spec which indicates what versions of Zenoss
# this ZenPack is compatible with
compatZenossVers = COMPAT_ZENOSS_VERS,
# previousZenPackName is a facility for telling Zenoss that the name
# of this ZenPack has changed. If no ZenPack with the current name is
# installed then a zenpack of this name if installed will be upgraded.
prevZenPackName = PREV_ZENPACK_NAME,
# Indicate to setuptools which namespace packages the zenpack
# participates in
namespace_packages = NAMESPACE_PACKAGES,
# Tell setuptools what packages this zenpack provides.
packages = find_packages(),
# Tell setuptools to figure out for itself which files to include
# in the binary egg when it is built.
include_package_data = True,
# The MANIFEST.in file is the recommended way of including additional files
# in your ZenPack. package_data is another.
#package_data = {}
package_data = {
'':['../COPYRIGHT.txt','../LICENSE.txt'],
},
# Indicate dependencies on other python modules or ZenPacks. This line
# is modified by zenoss when the ZenPack edit page is submitted. Zenoss
# tries to put add/delete the names it manages at the beginning of this
# list, so any manual additions should be added to the end. Things will
# go poorly if this line is broken into multiple lines or modified to
# dramatically.
install_requires = INSTALL_REQUIRES,
# Every ZenPack egg must define exactly one zenoss.zenpacks entry point
# of this form.
entry_points = {
'zenoss.zenpacks': '%s = %s' % (NAME, NAME),
},
# All ZenPack eggs must be installed in unzipped form.
zip_safe = False,
)
| anksp21/Community-Zenpacks | ZenPacks.example.Techniques/setup.py | Python | gpl-2.0 | 2,673 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Compute API documentation build configuration file
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['openstackdocstheme']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Compute API Guide'
bug_tag = u'api-guide'
repository_name = 'openstack/nova'
bug_project = 'nova'
# Must set this variable to include year, month, day, hours, and minutes.
html_last_updated_fmt = '%Y-%m-%d %H:%M'
copyright = u'2015, OpenStack contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.1.0'
# The full version, including alpha/beta/rc tags.
release = '2.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = [openstackdocstheme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'compute-api-guide'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ComputeAPI.tex', u'Compute API Documentation',
u'OpenStack contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'computeapi', u'Compute API Documentation',
[u'OpenStack contributors'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ComputeAPIGuide', u'Compute API Guide',
u'OpenStack contributors', 'APIGuide',
'This guide teaches OpenStack Compute service users concepts about '
'managing resources in an OpenStack cloud with the Compute API.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
# -- Options for PDF output --------------------------------------------------
pdf_documents = [
('index', u'ComputeAPIGuide', u'Compute API Guide', u'OpenStack '
'contributors')
]
# -- Options for openstackdocstheme -------------------------------------------
openstack_projects = [
'nova',
]
| gooddata/openstack-nova | api-guide/source/conf.py | Python | apache-2.0 | 9,312 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
import time
from openerp import api, fields as fields2
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools import float_round, float_is_zero, float_compare
from openerp.tools.translate import _
CURRENCY_DISPLAY_PATTERN = re.compile(r'(\w+)\s*(?:\((.*)\))?')
class res_currency(osv.osv):
def _current_rate(self, cr, uid, ids, name, arg, context=None):
return self._get_current_rate(cr, uid, ids, context=context)
def _current_rate_silent(self, cr, uid, ids, name, arg, context=None):
return self._get_current_rate(cr, uid, ids, raise_on_no_rate=False, context=context)
def _get_current_rate(self, cr, uid, ids, raise_on_no_rate=True, context=None):
if context is None:
context = {}
res = {}
date = context.get('date') or time.strftime('%Y-%m-%d')
for id in ids:
cr.execute('SELECT rate FROM res_currency_rate '
'WHERE currency_id = %s '
'AND name <= %s '
'ORDER BY name desc LIMIT 1',
(id, date))
if cr.rowcount:
res[id] = cr.fetchone()[0]
elif not raise_on_no_rate:
res[id] = 0
else:
currency = self.browse(cr, uid, id, context=context)
raise osv.except_osv(_('Error!'),_("No currency rate associated for currency '%s' for the given period" % (currency.name)))
return res
_name = "res.currency"
_description = "Currency"
_columns = {
# Note: 'code' column was removed as of v6.0, the 'name' should now hold the ISO code.
'name': fields.char('Currency', size=3, required=True, help="Currency Code (ISO 4217)"),
'symbol': fields.char('Symbol', size=4, help="Currency sign, to be used when printing amounts."),
'rate': fields.function(_current_rate, string='Current Rate', digits=(12,6),
help='The rate of the currency to the currency of rate 1.'),
# Do not use for computation ! Same as rate field with silent failing
'rate_silent': fields.function(_current_rate_silent, string='Current Rate', digits=(12,6),
help='The rate of the currency to the currency of rate 1 (0 if no rate defined).'),
'rate_ids': fields.one2many('res.currency.rate', 'currency_id', 'Rates'),
'accuracy': fields.integer('Computational Accuracy'),
'rounding': fields.float('Rounding Factor', digits=(12,6)),
'active': fields.boolean('Active'),
'company_id':fields.many2one('res.company', 'Company'),
'base': fields.boolean('Base'),
'position': fields.selection([('after','After Amount'),('before','Before Amount')], 'Symbol Position', help="Determines where the currency symbol should be placed after or before the amount.")
}
_defaults = {
'active': 1,
'position' : 'after',
'rounding': 0.01,
'accuracy': 4,
'company_id': False,
}
_sql_constraints = [
# this constraint does not cover all cases due to SQL NULL handling for company_id,
# so it is complemented with a unique index (see below). The constraint and index
# share the same prefix so that IntegrityError triggered by the index will be caught
# and reported to the user with the constraint's error message.
('unique_name_company_id', 'unique (name, company_id)', 'The currency code must be unique per company!'),
]
_order = "name"
def init(self, cr):
# CONSTRAINT/UNIQUE INDEX on (name,company_id)
# /!\ The unique constraint 'unique_name_company_id' is not sufficient, because SQL92
# only support field names in constraint definitions, and we need a function here:
# we need to special-case company_id to treat all NULL company_id as equal, otherwise
# we would allow duplicate "global" currencies (all having company_id == NULL)
cr.execute("""SELECT indexname FROM pg_indexes WHERE indexname = 'res_currency_unique_name_company_id_idx'""")
if not cr.fetchone():
cr.execute("""CREATE UNIQUE INDEX res_currency_unique_name_company_id_idx
ON res_currency
(name, (COALESCE(company_id,-1)))""")
date = fields2.Date(compute='compute_date')
@api.one
@api.depends('rate_ids.name')
def compute_date(self):
self.date = self.rate_ids[:1].name
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
results = super(res_currency,self)\
.name_search(cr, user, name, args, operator=operator, context=context, limit=limit)
if not results:
name_match = CURRENCY_DISPLAY_PATTERN.match(name)
if name_match:
results = super(res_currency,self)\
.name_search(cr, user, name_match.group(1), args, operator=operator, context=context, limit=limit)
return results
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name','symbol'], context=context, load='_classic_write')
return [(x['id'], tools.ustr(x['name'])) for x in reads]
@api.v8
def round(self, amount):
""" Return `amount` rounded according to currency `self`. """
return float_round(amount, precision_rounding=self.rounding)
@api.v7
def round(self, cr, uid, currency, amount):
"""Return ``amount`` rounded according to ``currency``'s
rounding rules.
:param Record currency: currency for which we are rounding
:param float amount: the amount to round
:return: rounded float
"""
return float_round(amount, precision_rounding=currency.rounding)
@api.v8
def compare_amounts(self, amount1, amount2):
""" Compare `amount1` and `amount2` after rounding them according to
`self`'s precision. An amount is considered lower/greater than
another amount if their rounded value is different. This is not the
same as having a non-zero difference!
For example 1.432 and 1.431 are equal at 2 digits precision, so this
method would return 0. However 0.006 and 0.002 are considered
different (returns 1) because they respectively round to 0.01 and
0.0, even though 0.006-0.002 = 0.004 which would be considered zero
at 2 digits precision.
"""
return float_compare(amount1, amount2, precision_rounding=self.rounding)
@api.v7
def compare_amounts(self, cr, uid, currency, amount1, amount2):
"""Compare ``amount1`` and ``amount2`` after rounding them according to the
given currency's precision..
An amount is considered lower/greater than another amount if their rounded
value is different. This is not the same as having a non-zero difference!
For example 1.432 and 1.431 are equal at 2 digits precision,
so this method would return 0.
However 0.006 and 0.002 are considered different (returns 1) because
they respectively round to 0.01 and 0.0, even though
0.006-0.002 = 0.004 which would be considered zero at 2 digits precision.
:param Record currency: currency for which we are rounding
:param float amount1: first amount to compare
:param float amount2: second amount to compare
:return: (resp.) -1, 0 or 1, if ``amount1`` is (resp.) lower than,
equal to, or greater than ``amount2``, according to
``currency``'s rounding.
"""
return float_compare(amount1, amount2, precision_rounding=currency.rounding)
@api.v8
def is_zero(self, amount):
""" Return true if `amount` is small enough to be treated as zero
according to currency `self`'s rounding rules.
Warning: ``is_zero(amount1-amount2)`` is not always equivalent to
``compare_amounts(amount1,amount2) == 0``, as the former will round
after computing the difference, while the latter will round before,
giving different results, e.g., 0.006 and 0.002 at 2 digits precision.
"""
return float_is_zero(amount, precision_rounding=self.rounding)
@api.v7
def is_zero(self, cr, uid, currency, amount):
"""Returns true if ``amount`` is small enough to be treated as
zero according to ``currency``'s rounding rules.
Warning: ``is_zero(amount1-amount2)`` is not always equivalent to
``compare_amounts(amount1,amount2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param Record currency: currency for which we are rounding
:param float amount: amount to compare with currency's zero
"""
return float_is_zero(amount, precision_rounding=currency.rounding)
def _get_conversion_rate(self, cr, uid, from_currency, to_currency, context=None):
if context is None:
context = {}
ctx = context.copy()
from_currency = self.browse(cr, uid, from_currency.id, context=ctx)
to_currency = self.browse(cr, uid, to_currency.id, context=ctx)
if from_currency.rate == 0 or to_currency.rate == 0:
date = context.get('date', time.strftime('%Y-%m-%d'))
if from_currency.rate == 0:
currency_symbol = from_currency.symbol
else:
currency_symbol = to_currency.symbol
raise osv.except_osv(_('Error'), _('No rate found \n' \
'for the currency: %s \n' \
'at the date: %s') % (currency_symbol, date))
return to_currency.rate/from_currency.rate
def _compute(self, cr, uid, from_currency, to_currency, from_amount, round=True, context=None):
if (to_currency.id == from_currency.id):
if round:
return self.round(cr, uid, to_currency, from_amount)
else:
return from_amount
else:
rate = self._get_conversion_rate(cr, uid, from_currency, to_currency, context=context)
if round:
return self.round(cr, uid, to_currency, from_amount * rate)
else:
return from_amount * rate
@api.v7
def compute(self, cr, uid, from_currency_id, to_currency_id, from_amount,
round=True, context=None):
context = context or {}
if not from_currency_id:
from_currency_id = to_currency_id
if not to_currency_id:
to_currency_id = from_currency_id
xc = self.browse(cr, uid, [from_currency_id,to_currency_id], context=context)
from_currency = (xc[0].id == from_currency_id and xc[0]) or xc[1]
to_currency = (xc[0].id == to_currency_id and xc[0]) or xc[1]
return self._compute(cr, uid, from_currency, to_currency, from_amount, round, context)
@api.v8
def compute(self, from_amount, to_currency, round=True):
""" Convert `from_amount` from currency `self` to `to_currency`. """
assert self, "compute from unknown currency"
assert to_currency, "compute to unknown currency"
# apply conversion rate
if self == to_currency:
to_amount = from_amount
else:
to_amount = from_amount * self._get_conversion_rate(self, to_currency)
# apply rounding
return to_currency.round(to_amount) if round else to_amount
class res_currency_rate(osv.osv):
_name = "res.currency.rate"
_description = "Currency Rate"
_columns = {
'name': fields.datetime('Date', required=True, select=True),
'rate': fields.float('Rate', digits=(12, 6), help='The rate of the currency to the currency of rate 1'),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
}
_defaults = {
'name': lambda *a: time.strftime('%Y-%m-%d 00:00:00'),
}
_order = "name desc"
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| fdvarela/odoo8 | openerp/addons/base/res/res_currency.py | Python | agpl-3.0 | 13,459 |
# encoding: UTF-8
from ctaBase import *
from ctaTemplate import CtaTemplate
import talib
import numpy as np
import math
import copy
from datetime import datetime
########################################################################
class Tmm2agStrategy(CtaTemplate):
className = 'Tmm2agStrategy'
author = u'用Python的交易员'
barDbName = MINUTE_5_DB_NAME
# 策略参数
initDays = 10 # 初始化数据所用的天数
fixedSize = 1 # 开仓
# 策略变量
bar = None # K线对象
m5bar = None
barMinute = EMPTY_STRING # K线当前的分钟
bufferSize = 200 # 需要缓存的数据的大小
bufferCount = 0 # 目前已经缓存了的数据的计数
UporDownCount = 0 # 上升或下降的计数
highArray = np.zeros(bufferSize) # K线最高价的数组
lowArray = np.zeros(bufferSize) # K线最低价的数组
closeArray = np.zeros(bufferSize) # K线收盘价的数组
openArray = np.zeros(bufferSize) # K线开盘价的数组
# Tmm K线
H1Array = np.zeros(bufferSize) # K线最高价的数组
L1Array = np.zeros(bufferSize) # K线最低价的数组
C1Array = np.zeros(bufferSize) # K线收盘价的数组
O1Array = np.zeros(bufferSize) # K线开盘价的数组
UPorDOWNArray = np.zeros(bufferSize)
H1Value = 0
L1Value = 0
C1Value = 0
O1Value = 0
UPorDOWNValue = 0
orderList = [] # 保存委托代码的列表
# 参数列表,保存了参数的名称
paramList = ['name',
'className',
'author',
'vtSymbol']
# 变量列表,保存了变量的名称
varList = ['inited',
'trading',
'pos',
'ydPos',
'lastEntryPrice',
'H1Value',
'L1Value',
'O1Value',
'C1Value',
'UPorDOWNValue']
#----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""Constructor"""
super(Tmm2agStrategy, self).__init__(ctaEngine, setting)
# 注意策略类中的可变对象属性(通常是list和dict等),在策略初始化时需要重新创建,
# 否则会出现多个策略实例之间数据共享的情况,有可能导致潜在的策略逻辑错误风险,
# 策略类中的这些可变对象属性可以选择不写,全都放在__init__下面,写主要是为了阅读
# 策略时方便(更多是个编程习惯的选择)
self.isPrePosHaved = False
self.isAlreadyTraded = False
#----------------------------------------------------------------------
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略初始化' %self.name)
# 载入历史数据,并采用回放计算的方式初始化策略数值
initData = self.loadBar(self.initDays)
for bar in initData:
self.onBar(bar)
self.putEvent()
#----------------------------------------------------------------------
def onStart(self):
"""启动策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略启动' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略停止' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送(必须由用户继承实现)"""
# 计算K线
tickMinute = tick.datetime.minute
if tickMinute != self.barMinute:
if self.bar:
self.bar.datetime = tick.datetime.replace(second=0, microsecond=0)
self.bar.date = tick.date
self.bar.time = tick.time
# self.writeCtaLog(u'记录分钟线数据%s,时间:%s, O:%s, H:%s, L:%s, C:%s'
# % (self.bar.vtSymbol, self.bar.time, self.bar.open, self.bar.high,
#
# self.bar.low, self.bar.close))
if self.barInTime(tick):
self.procecssBar(self.bar)
bar = CtaBarData()
bar.vtSymbol = tick.vtSymbol
bar.symbol = tick.symbol
bar.exchange = tick.exchange
bar.open = tick.lastPrice
bar.high = tick.lastPrice
bar.low = tick.lastPrice
bar.close = tick.lastPrice
bar.date = tick.date
bar.time = tick.time
bar.datetime = tick.datetime # K线的时间设为第一个Tick的时间
self.bar = bar # 这种写法为了减少一层访问,加快速度
self.barMinute = tickMinute # 更新当前的分钟
else: # 否则继续累加新的K线
bar = self.bar # 写法同样为了加快速度
bar.high = max(bar.high, tick.lastPrice)
bar.low = min(bar.low, tick.lastPrice)
bar.close = tick.lastPrice
#----------------------------------------------------------------------
def procecssBar(self,bar):
if not self.m5bar or bar.datetime.minute % 5 == 1:
m5bar = CtaBarData()
m5bar.vtSymbol = bar.vtSymbol
m5bar.symbol = bar.vtSymbol
m5bar.exchange = bar.exchange
m5bar.open = bar.open
m5bar.high = bar.high
m5bar.low = bar.low
m5bar.close = bar.close
m5bar.date = bar.date
m5bar.time = bar.time
m5bar.datetime = bar.datetime
m5bar.volume = bar.volume
m5bar.openInterest = bar.openInterest
self.m5bar = m5bar
else:
m5bar = self.m5bar
m5bar.high = max(m5bar.high, bar.high)
m5bar.low = min(m5bar.low, bar.low)
m5bar.close = bar.close
m5bar.volume = m5bar.volume + bar.volume
m5bar.openInterest = bar.openInterest
if bar.datetime.minute % 5 == 0:
newBar = copy.copy(m5bar)
newBar.datetime = bar.datetime.replace(second=0,microsecond=0)
newBar.date = bar.date
newBar.time = bar.time
self.onBar(newBar)
# self.writeCtaLog(u'记录3分钟线数据%s,时间:%s, O:%s, H:%s, L:%s, C:%s'
# % (newBar.vtSymbol, newBar.time, newBar.open, newBar.high,
# newBar.low, newBar.close))
#----------------------------------------------------------------------
def onBar(self, bar):
"""收到Bar推送(必须由用户继承实现)"""
# 撤销之前发出的尚未成交的委托(包括限价单和停止单)
for orderID in self.orderList:
self.cancelOrder(orderID)
self.orderList = []
# 保存K线数据
self.closeArray[0:self.bufferSize-1] = self.closeArray[1:self.bufferSize]
self.highArray[0:self.bufferSize-1] = self.highArray[1:self.bufferSize]
self.lowArray[0:self.bufferSize-1] = self.lowArray[1:self.bufferSize]
self.openArray[0:self.bufferSize - 1] = self.openArray[1:self.bufferSize]
self.C1Array[0:self.bufferSize - 1] = self.C1Array[1:self.bufferSize]
self.H1Array[0:self.bufferSize - 1] = self.H1Array[1:self.bufferSize]
self.L1Array[0:self.bufferSize - 1] = self.L1Array[1:self.bufferSize]
self.O1Array[0:self.bufferSize - 1] = self.O1Array[1:self.bufferSize]
self.UPorDOWNArray[0:self.bufferSize - 1] = self.UPorDOWNArray[1:self.bufferSize]
self.closeArray[-1] = bar.close
self.highArray[-1] = bar.high
self.lowArray[-1] = bar.low
self.openArray[-1] = bar.open
self.bufferCount += 1
if self.bufferCount == 1: #第一天特殊处理
if self.closeArray[-1] >= self.openArray[-1]: #上涨
self.O1Array[-1] = self.openArray[-1]
self.L1Array[-1] = self.openArray[-1]
self.H1Array[-1] = self.closeArray[-1]
self.C1Array[-1] = self.closeArray[-1]
self.UPorDOWNArray[-1] = 1
else: #下跌
self.O1Array[-1] = self.openArray[-1]
self.H1Array[-1] = self.openArray[-1]
self.L1Array[-1] = self.closeArray[-1]
self.C1Array[-1] = self.closeArray[-1]
self.UPorDOWNArray[-1] = 0
self.UporDownCount = 1
return
if self.bufferCount > 1:
if self.UPorDOWNArray[-2] == 1: #昨天是上涨
if self.closeArray[-1] > self.H1Array[-2]: #第一种情况,上涨:今天的收盘价超过前一个柱子的最高点
self.O1Array[-1] = self.H1Array[-2]
self.L1Array[-1] = self.H1Array[-2]
self.H1Array[-1] = self.closeArray[-1]
self.C1Array[-1] = self.closeArray[-1]
self.UPorDOWNArray[-1] = 1 #表示上涨
self.UporDownCount += 1
if self.closeArray[-1] < self.L1Array[-2]: #第二种情况,下跌:今天的收盘价,下跌超过前三个柱子的最低价
#低过前一个柱子的最低价,才开始计算
hh = self.L1Array[-2]
n = 1
if self.bufferCount > 2:
for a in range(3,self.bufferSize):
if self.UPorDOWNArray[-a] == 0:
break
if self.H1Array[-a] != self.H1Array[-a + 1] and self.H1Array[-a] != 0:
n = n + 1
hh = self.L1Array[-a]
if n == 3:
break
if self.closeArray[-1] < hh:
self.O1Array[-1] = self.L1Array[-2]
self.H1Array[-1] = self.L1Array[-2]
self.L1Array[-1] = self.closeArray[-1]
self.C1Array[-1] = self.closeArray[-1]
self.UPorDOWNArray[-1] = 0
self.UporDownCount = 1
if self.UPorDOWNArray[-2] == 0: #昨天是下跌
if self.closeArray[-1] < self.L1Array[-2]: #第一种情况,下跌:今天的收盘价超过前一个柱子的最低点
self.O1Array[-1] = self.L1Array[-2]
self.H1Array[-1] = self.L1Array[-2]
self.L1Array[-1] = self.closeArray[-1]
self.C1Array[-1] = self.closeArray[-1]
self.UPorDOWNArray[-1] = 0 #表示下跌
self.UporDownCount += 1
if self.closeArray[-1] > self.H1Array[-2]: #第二种情况,上涨:今天的收盘价,下跌超过前三个柱子的最高价
#高过前一个柱子的最高价,才开始计算
hh = self.H1Array[-2]
n = 1
if self.bufferCount > 2:
for a in range(3,self.bufferSize):
if self.UPorDOWNArray[-a] == 1:
break
if self.H1Array[-a] != self.H1Array[-a + 1] and self.H1Array[-a] != 0:
n = n + 1
hh = self.H1Array[-a]
if n == 3:
break
if self.closeArray[-1] > hh:
self.O1Array[-1] = self.H1Array[-2]
self.L1Array[-1] = self.H1Array[-2]
self.H1Array[-1] = self.closeArray[-1]
self.C1Array[-1] = self.closeArray[-1]
self.UPorDOWNArray[-1] = 1
self.UporDownCount = 1
self.O1Value = self.O1Array[-1]
self.L1Value = self.L1Array[-1]
self.H1Value = self.H1Array[-1]
self.C1Value = self.C1Array[-1]
self.UPorDOWNValue = self.UPorDOWNArray[-1]
self.writeCtaLog(u'记录5分钟线数据%s,时间:%s, O:%s, H:%s, L:%s, C:%s, PD:%s, Count:%s'
% (bar.vtSymbol, bar.time,self.O1Value, self.H1Value,
self.L1Value, self.C1Value, self.UPorDOWNValue, self.UporDownCount))
# 判断是否要进行交易
# 当前无仓位
if self.pos == 0:
#self.intraTradeHigh = bar.high
#self.intraTradeLow = bar.low
# 当前K线上涨前一K线下跌买入开仓
if self.UPorDOWNArray[-1] == 1 and self.UPorDOWNArray[-2] == 0:
# 这里为了保证成交,选择超价5个整指数点下单
orderID = self.buy(bar.close + 5, self.fixedSize * 2)
self.orderList.append(orderID)
self.lastEntryPrice = bar.close
# 当前K线下跌前一K线上涨卖出开仓
elif self.UPorDOWNArray[-1] == 0 and self.UPorDOWNArray[-2] == 1:
orderID = self.short(bar.close - 5, self.fixedSize * 2)
self.orderList.append(orderID)
self.lastEntryPrice = bar.close
# 持有多头仓位
elif self.pos > 0:
# 为上涨,上涨计数为4时加仓
if self.UPorDOWNValue == 1 and self.UporDownCount >= 6 and self.pos == 2 * self.fixedSize:
orderID = self.sell(bar.close - 5, self.fixedSize)
self.orderList.append(orderID)
if self.UPorDOWNValue == 1 and self.UporDownCount >= 4:
orderID = self.sell(self.lastEntryPrice, abs(self.pos), True)
self.orderList.append(orderID)
else:
orderID = self.sell(self.lastEntryPrice - 20, abs(self.pos), True)
self.orderList.append(orderID)
# 当前K线下跌前一K线上涨卖出开仓
if self.UPorDOWNArray[-1] == 0:
orderID = self.sell(bar.close - 5, abs(self.pos))
self.orderList.append(orderID)
orderID = self.short(bar.close - 5, self.fixedSize * 2)
self.orderList.append(orderID)
self.lastEntryPrice = bar.close
# 持有空头仓位
elif self.pos < 0:
# 为下跌,下跌计数为4时加仓
if self.UPorDOWNValue == 0 and self.UporDownCount >= 6 and self.pos == -2 * self.fixedSize:
orderID = self.cover(bar.close + 5, self.fixedSize)
self.orderList.append(orderID)
if self.UPorDOWNValue == 0 and self.UporDownCount >= 4:
orderID = self.cover(self.lastEntryPrice, abs(self.pos), True)
self.orderList.append(orderID)
else:
orderID = self.cover(self.lastEntryPrice + 20, abs(self.pos), True)
self.orderList.append(orderID)
# 当前K线上涨前一K线下跌买入开仓
if self.UPorDOWNArray[-1] == 1:
# 这里为了保证成交,选择超价5个整指数点下单
orderID = self.cover(bar.close + 5, abs(self.pos))
self.orderList.append(orderID)
orderID = self.buy(bar.close + 5, self.fixedSize * 2)
self.orderList.append(orderID)
self.lastEntryPrice = bar.close
# 发出状态更新事件
self.putEvent()
#----------------------------------------------------------------------
def onOrder(self, order):
"""收到委托变化推送(必须由用户继承实现)"""
pass
#----------------------------------------------------------------------
def onTrade(self, trade):
pass
#-----------------------------------------------------------------------
def onTimer(self):
pass
if __name__ == '__main__':
# 提供直接双击回测的功能
# 导入PyQt4的包是为了保证matplotlib使用PyQt4而不是PySide,防止初始化出错
from ctaBacktesting import *
from PyQt4 import QtCore, QtGui
# 创建回测引擎
engine = BacktestingEngine()
# 设置引擎的回测模式为K线
engine.setBacktestingMode(engine.BAR_MODE)
# 设置回测用的数据起始日期
engine.setStartDate('20161010')
# 设置产品相关参数
engine.setSlippage(0.2) # 股指1跳
engine.setRate(0.3 / 10000) # 万0.3
engine.setSize(15) # 股指合约大小
# 设置使用的历史数据库
engine.setDatabase(MINUTE_DB_NAME, 'ag1612')
## 在引擎中创建策略对象
# d = {'atrLength': 11}
# engine.initStrategy(AtrRsiStrategy, d)
## 开始跑回测
##engine.runBacktesting()
## 显示回测结果
##engine.showBacktestingResult()
# 跑优化
setting = OptimizationSetting() # 新建一个优化任务设置对象
setting.setOptimizeTarget('capital') # 设置优化排序的目标是策略净盈利
setting.addParameter('atrLength', 11, 20, 1) # 增加第一个优化参数atrLength,起始11,结束12,步进1
setting.addParameter('atrMaLength', 20, 30, 5) # 增加第二个优化参数atrMa,起始20,结束30,步进1
# 性能测试环境:I7-3770,主频3.4G, 8核心,内存16G,Windows 7 专业版
# 测试时还跑着一堆其他的程序,性能仅供参考
import time
start = time.time()
# 运行单进程优化函数,自动输出结果,耗时:359秒
# engine.runOptimization(AtrRsiStrategy, setting)
# 多进程优化,耗时:89秒
engine.runParallelOptimization(AtrRsiStrategy, setting)
print u'耗时:%s' % (time.time() - start)
| mumuwoyou/vnpy-dev | vn.trader/ctaStrategy/strategy/strategyTmm2ag.py | Python | mit | 18,603 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mutations', '0005_mutation_predictor'),
]
operations = [
migrations.AddField(
model_name='strainsource',
name='wgs_group',
field=models.CharField(max_length=10, null=True, verbose_name=b'Whole Gnome Sequence Group', blank=True),
),
]
| IQSS/gentb-site | apps/mutations/migrations/0006_strainsource_wgs_group.py | Python | agpl-3.0 | 478 |
#!/usr/bin/env python
"""
Simple wrapper around the ipinfo.io IP geolocation API.
"""
import json
import subprocess as sp
class IPLookupError(Exception):
pass
class IPLookup(object):
def __init__(self):
pass
def lookup(self, ip_address, param=None):
"""
Returns a dictionary info regarding the given IP address.
If a specific parameter is supplied (city, country), a string is
returned.
"""
cmd = 'curl -s ipinfo.io/'
if param:
cmd += '{}/{}'.format(ip_address, param)
else:
cmd += ip_address
cmd_obj = sp.Popen(cmd, stdout=sp.PIPE, shell=True)
cmd_out = cmd_obj.communicate()[0].strip()
if cmd_out == 'null':
cmd_out = None
# error handling
if cmd_out == 'Please provide a valid IP address':
raise IPLookupError("Invalid IP address")
elif cmd_out == 'undefined':
raise IPLookupError("Invalid parameter")
if param:
return cmd_out
else:
return json.loads(cmd_out)
| mossberg/pyipinfoio | pyipinfoio/pyipinfoio.py | Python | mit | 1,120 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for the internal ops used by tfdbg v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python.compat import compat
from tensorflow.python.debug.lib import debug_events_reader
from tensorflow.python.debug.lib import debug_events_writer
from tensorflow.python.debug.lib import dumping_callback_test_lib
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_debug_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class DebugIdentityV2OpTest(dumping_callback_test_lib.DumpingCallbackTestBase):
"""Tests for DebugIdentityV2Op: when DebugEventsWriter is initialized.
DebugEventsWriter being initialized prior to DebugIdentityV2 ops being invoked
for the first time is the typical case (e.g., tfdbg2 running on a local
machine with only local devices.)
"""
def setUp(self):
super(DebugIdentityV2OpTest, self).setUp()
# Testing using a small circular-buffer size.
self.circular_buffer_size = 4
self.tfdbg_run_id = "test_tfdbg_run"
self.writer = debug_events_writer.DebugEventsWriter(
self.dump_root, self.tfdbg_run_id, self.circular_buffer_size)
def tearDown(self):
self.writer.Close()
super(DebugIdentityV2OpTest, self).tearDown()
@test_util.run_in_graph_and_eager_modes
def testSingleTensorFullTensorDebugModeWithCircularBufferBehavior(self):
@def_function.function
def write_debug_trace(x):
square = math_ops.square(x)
gen_debug_ops.debug_identity_v2(
square,
tfdbg_context_id="deadbeaf",
op_name="Square",
output_slot=0,
tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR,
debug_urls=["file://%s" % self.dump_root])
sqrt = math_ops.sqrt(x)
gen_debug_ops.debug_identity_v2(
sqrt,
tfdbg_context_id="beafdead",
op_name="Sqrt",
output_slot=0,
tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR,
debug_urls=["file://%s" % self.dump_root])
return square + sqrt
x = np.array([3.0, 4.0])
# Only the graph-execution trace of the last iteration should be written
# to self.dump_root.
for _ in range(self.circular_buffer_size // 2 + 1):
self.assertAllClose(
write_debug_trace(x), [9.0 + np.sqrt(3.0), 16.0 + 2.0])
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
# Check that the .metadata DebugEvents data file has been created, even
# before FlushExecutionFiles() is called.
self.assertGreater(reader.starting_wall_time(), 0)
self.assertTrue(reader.tensorflow_version())
self.assertTrue(reader.tfdbg_file_version().startswith("debug.Event"))
graph_trace_iter = reader.graph_execution_traces_iterators()[0]
# Before FlushExecutionFiles() is called, the .graph_execution_traces file
# ought to be empty.
with self.assertRaises(StopIteration):
next(graph_trace_iter)
# Flush the circular buffer.
self.writer.FlushExecutionFiles()
graph_trace_iter = reader.graph_execution_traces_iterators()[0]
# The circular buffer has a size of 4. So only the data from the
# last two iterations should have been written to self.dump_root.
for _ in range(2):
debug_event = next(graph_trace_iter).debug_event
self.assertGreater(debug_event.wall_time, 0)
trace = debug_event.graph_execution_trace
self.assertEqual(trace.tfdbg_context_id, "deadbeaf")
self.assertEqual(trace.op_name, "Square")
self.assertEqual(trace.output_slot, 0)
self.assertEqual(trace.tensor_debug_mode,
debug_event_pb2.TensorDebugMode.FULL_TENSOR)
tensor_value = tensor_util.MakeNdarray(trace.tensor_proto)
self.assertAllClose(tensor_value, [9.0, 16.0])
debug_event = next(graph_trace_iter).debug_event
self.assertGreater(debug_event.wall_time, 0)
trace = debug_event.graph_execution_trace
self.assertEqual(trace.tfdbg_context_id, "beafdead")
self.assertEqual(trace.op_name, "Sqrt")
self.assertEqual(trace.output_slot, 0)
self.assertEqual(trace.tensor_debug_mode,
debug_event_pb2.TensorDebugMode.FULL_TENSOR)
tensor_value = tensor_util.MakeNdarray(trace.tensor_proto)
self.assertAllClose(tensor_value, [np.sqrt(3.0), 2.0])
# Only the graph-execution trace of the last iteration should be written
# to self.dump_root.
with self.assertRaises(StopIteration):
next(graph_trace_iter)
@test_util.run_in_graph_and_eager_modes
def testControlFlow(self):
@def_function.function
def collatz(x):
counter = constant_op.constant(0, dtype=dtypes.int32)
while math_ops.greater(x, 1):
counter = counter + 1
gen_debug_ops.debug_identity_v2(
x,
tfdbg_context_id="deadbeaf",
op_name="x",
output_slot=0,
tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR,
debug_urls=["file://%s" % self.dump_root])
if math_ops.equal(x % 2, 0):
x = math_ops.div(x, 2)
else:
x = x * 3 + 1
return counter
x = constant_op.constant(10, dtype=dtypes.int32)
self.evaluate(collatz(x))
self.writer.FlushExecutionFiles()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
graph_trace_iter = reader.graph_execution_traces_iterators()[0]
try:
x_values = []
timestamp = 0
while True:
debug_event = next(graph_trace_iter).debug_event
self.assertGreater(debug_event.wall_time, timestamp)
timestamp = debug_event.wall_time
trace = debug_event.graph_execution_trace
self.assertEqual(trace.tfdbg_context_id, "deadbeaf")
self.assertEqual(trace.op_name, "x")
self.assertEqual(trace.output_slot, 0)
self.assertEqual(trace.tensor_debug_mode,
debug_event_pb2.TensorDebugMode.FULL_TENSOR)
x_values.append(int(tensor_util.MakeNdarray(trace.tensor_proto)))
except StopIteration:
pass
# Due to the circular buffer, only the last 4 iterations of
# [10, 5, 16, 8, 4, 2] should have been written.
self.assertAllEqual(x_values, [16, 8, 4, 2])
@test_util.run_in_graph_and_eager_modes
def testTwoDumpRoots(self):
another_dump_root = os.path.join(self.dump_root, "another")
another_debug_url = "file://%s" % another_dump_root
another_writer = debug_events_writer.DebugEventsWriter(
another_dump_root, "test_tfdbg_run")
@def_function.function
def write_debug_trace(x):
# DebugIdentityV2 is a stateful op. It ought to be included by auto
# control dependency.
square = math_ops.square(x)
gen_debug_ops.debug_identity_v2(
square,
tfdbg_context_id="deadbeaf",
tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR,
debug_urls=["file://%s" % self.dump_root, another_debug_url])
return square + 1.0
x = np.array([3.0, 4.0])
self.assertAllClose(write_debug_trace(x), np.array([10.0, 17.0]))
self.writer.FlushExecutionFiles()
another_writer.FlushExecutionFiles()
another_writer.Close()
for debug_root in (self.dump_root, another_dump_root):
with debug_events_reader.DebugEventsReader(debug_root) as reader:
graph_trace_iter = reader.graph_execution_traces_iterators()[0]
debug_event = next(graph_trace_iter).debug_event
trace = debug_event.graph_execution_trace
self.assertEqual(trace.tfdbg_context_id, "deadbeaf")
self.assertEqual(trace.op_name, "")
self.assertEqual(trace.tensor_debug_mode,
debug_event_pb2.TensorDebugMode.FULL_TENSOR)
tensor_value = tensor_util.MakeNdarray(trace.tensor_proto)
self.assertAllClose(tensor_value, [9.0, 16.0])
with self.assertRaises(StopIteration):
next(graph_trace_iter)
class DebugIdentityV2OpUninitializedWriterTest(
dumping_callback_test_lib.DumpingCallbackTestBase):
"""Tests for DebugIdentityV2Op: when DebugEventsWriter is not initialized.
This case can occur when DebugIdentityV2Ops are running on a remote
TensorFlow server (e.g., a TPU worker).
"""
@test_util.run_in_graph_and_eager_modes
def testInvokingDebugIdentityV2OpBeforeCreatingDebugEventsWriterWorks(self):
if not compat.forward_compatible(2020, 6, 24):
self.skipTest("Functionality currently not supported.")
circular_buffer_size = 3
@def_function.function
def write_debug_trace(x):
# DebugIdentityV2 is a stateful op. It ought to be included by auto
# control dependency.
square = math_ops.square(x)
gen_debug_ops.debug_identity_v2(
square,
tfdbg_context_id="deadbeaf",
op_name="Square",
output_slot=0,
tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR,
debug_urls=["file://%s" % self.dump_root],
circular_buffer_size=circular_buffer_size)
return square
# The DebugIdentityV2 ops are invokes *before* a DebugEventsWriter at the
# same dump root is created.
for i in range(circular_buffer_size * 2):
self.assertAllClose(
write_debug_trace(np.array([i]).astype(np.float32)), [i**2.0])
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
"test_tfdbg_run",
circular_buffer_size)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
graph_trace_iter = reader.graph_execution_traces_iterators()[0]
graph_execution_traces = []
while True:
try:
graph_execution_traces.append(
next(graph_trace_iter).debug_event.graph_execution_trace)
except StopIteration:
break
self.assertLen(graph_execution_traces, circular_buffer_size)
for i in range(circular_buffer_size):
self.assertAllClose(
tensor_util.MakeNdarray(graph_execution_traces[i].tensor_proto),
[(i + circular_buffer_size)**2.0])
class DebugNumericSummaryV2Test(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpReduceInfNanThreeSlots(self):
def debug_summary(x):
return self.evaluate(gen_debug_ops.debug_numeric_summary_v2(
x, tensor_debug_mode=(
debug_event_pb2.TensorDebugMode.REDUCE_INF_NAN_THREE_SLOTS)))
self.assertAllEqual(
debug_summary(constant_op.constant([])), [0.0, 0.0, 0.0])
self.assertAllEqual(
debug_summary(constant_op.constant(42.0)), [0.0, 0.0, 0.0])
self.assertAllEqual(
debug_summary(constant_op.constant([3.0, 4.0])), [0.0, 0.0, 0.0])
self.assertAllEqual(
debug_summary(constant_op.constant(np.array([3.0, -np.inf]))),
[-np.inf, 0.0, 0.0])
self.assertAllEqual(
debug_summary(constant_op.constant(np.array([[0, 0], [np.nan, 0]]))),
[0.0, 0.0, np.nan])
self.assertAllEqual(
debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, np.inf]]))),
[0.0, np.inf, np.nan])
self.assertAllEqual(
debug_summary(
constant_op.constant(np.array([[0, np.inf], [np.nan, -np.inf]]))),
[-np.inf, np.inf, np.nan])
x = np.zeros([100, 100], dtype=np.float16)
x[32, 47] = np.nan
self.assertAllEqual(
debug_summary(constant_op.constant(x)), [0.0, 0.0, np.nan])
x = np.zeros([97, 97], dtype=np.float32)
x[50, 83] = -np.inf
self.assertAllEqual(
debug_summary(constant_op.constant(x)), [-np.inf, 0.0, 0.0])
x[1, 41] = np.nan
self.assertAllEqual(
debug_summary(constant_op.constant(x)), [-np.inf, 0.0, np.nan])
x = np.zeros([9701], dtype=np.float64)
x[9700] = np.nan
self.assertAllEqual(
debug_summary(constant_op.constant(x)), [0.0, 0.0, np.nan])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpLargeTensorIDError(self):
modes = [
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH,
debug_event_pb2.TensorDebugMode.SHAPE,
]
# Maximum allowed tensor_id
tensor_id = np.power(2, 53, dtype=np.int64)
for mode in modes:
self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
constant_op.constant(42.0),
tensor_debug_mode=mode,
tensor_id=tensor_id,
output_dtype=dtypes.float64))
# Incrementing by one should error
tensor_id += 1
for mode in modes:
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
constant_op.constant(42.0),
tensor_debug_mode=mode,
tensor_id=tensor_id,
output_dtype=dtypes.float64))
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpCurtHealthValuesSmall(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.CURT_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
tensor, tensor_id = debug_summary(constant_op.constant([]))
self.assertAllEqual(tensor, [tensor_id, 0.0])
tensor, tensor_id = debug_summary(constant_op.constant(42.0))
self.assertAllEqual(tensor, [tensor_id, 0.0])
tensor, tensor_id = debug_summary(constant_op.constant([3.0, 4.0]))
self.assertAllEqual(tensor, [tensor_id, 0.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([3.0, -np.inf])))
self.assertAllEqual(tensor, [tensor_id, 1.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, 0]])))
self.assertAllEqual(tensor, [tensor_id, 1.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, np.inf]])))
self.assertAllEqual(tensor, [tensor_id, 1.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, np.inf], [np.nan, -np.inf]])))
self.assertAllEqual(tensor, [tensor_id, 1.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpCurtHealthValuesLarge(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.CURT_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
x = np.zeros([100, 100], dtype=np.float16)
x[32, 47] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 1.0])
x = np.zeros([97, 97], dtype=np.float32)
x[50, 83] = -np.inf
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 1.0])
x[1, 41] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 1.0])
x = np.zeros([9701], dtype=np.float64)
x[9700] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 1.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpCurtHealthConsistency(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.CURT_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
x = np.zeros([100, 100], dtype=np.float16)
x[43, 99] = np.nan
c = constant_op.constant(x)
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
x = np.zeros([100, 100, 50], dtype=np.float64)
x[0, 0, 1] = np.inf
c = constant_op.constant(x)
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
c = constant_op.constant(np.ones((100, 200), np.double))
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpConciseHealthSmall(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
tensor, tensor_id = debug_summary(constant_op.constant([]))
self.assertAllEqual(tensor, [tensor_id, 0.0, 0.0, 0.0, 0.0])
tensor, tensor_id = debug_summary(constant_op.constant(42.0))
self.assertAllEqual(tensor, [tensor_id, 1.0, 0.0, 0.0, 0.0])
tensor, tensor_id = debug_summary(constant_op.constant([3.0, 4.0]))
self.assertAllEqual(tensor, [tensor_id, 2.0, 0.0, 0.0, 0.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([3.0, -np.inf])))
self.assertAllEqual(tensor, [tensor_id, 2.0, 1.0, 0.0, 0.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, 0]])))
self.assertAllEqual(tensor, [tensor_id, 4.0, 0.0, 0.0, 1.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, np.inf]])))
self.assertAllEqual(tensor, [tensor_id, 4.0, 0.0, 1.0, 1.0])
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, np.inf], [np.nan, -np.inf]])))
self.assertAllEqual(tensor, [tensor_id, 4.0, 1.0, 1.0, 1.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpConciseHealthLarge(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
x = np.zeros([100, 100], dtype=np.float16)
x[32, :] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 10000.0, 0.0, 0.0, 100.0])
x = np.zeros([97, 97], dtype=np.float32)
x[50, 83:85] = -np.inf
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 97 * 97, 2.0, 0.0, 0.0])
x[1:9, 41] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 97 * 97, 2.0, 0.0, 8.0])
x = np.zeros([9701], dtype=np.float64)
x[9700] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [tensor_id, 9701, 0.0, 0.0, 1.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpConciseHealthConsistency(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
# Assert the same op is returns a consistent value
x = np.zeros([100, 100], dtype=np.float16)
x[3, 4] = -np.inf
c = constant_op.constant(x)
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
c = constant_op.constant(np.ones((100, 200), np.double))
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpShapeEmpty(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.SHAPE),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
tensor, tensor_id = debug_summary(constant_op.constant(0.0))
self.assertAllEqual(
tensor, [tensor_id, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpShapeSmall(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.SHAPE),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
x = np.zeros([3, 4], dtype=np.float32)
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(
tensor, [tensor_id, 1.0, 2.0, 12.0, 3.0, 4.0, 0.0, 0.0, 0.0, 0.0])
x = np.ones([1, 2, 3, 4, 5, 6], dtype=np.float16)
x[0, 1, 2, 2, 2, 2] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(
tensor,
[tensor_id, 19, 6.0, 2 * 3 * 4 * 5 * 6, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
x = np.zeros([2], dtype=np.float32)
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(
tensor, [tensor_id, 1.0, 1.0, 2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0])
tensor, tensor_id = debug_summary(constant_op.constant([]))
self.assertAllEqual(
tensor, [tensor_id, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpShapeLarge(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.SHAPE),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
x = np.ones([1, 2, 3, 4, 5, 6, 7], dtype=np.double)
tensor, tensor_id = debug_summary(constant_op.constant(x))
self.assertAllEqual(tensor, [
tensor_id, 2.0, 7.0, 2 * 3 * 4 * 5 * 6 * 7, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0
])
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpFullHealthSmall(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.FULL_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
tensor, tensor_id = debug_summary(constant_op.constant([]))
expected = [tensor_id, -1, 1, 1, 0, 0, 0, 0, 0, 0, 0]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(constant_op.constant(42.0))
expected = [tensor_id, -1, 1, 0, 1, 0, 0, 0, 0, 0, 1]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(constant_op.constant([3.0, 4.0]))
expected = [tensor_id, -1, 1, 1, 2, 0, 0, 0, 0, 0, 2]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([3, -np.inf], dtype=np.float32)))
expected = [tensor_id, -1, 1, 1, 2, 1, 0, 0, 0, 0, 1]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(
constant_op.constant(np.array([[0, 0], [np.nan, 0]], dtype=np.float64)))
expected = [tensor_id, -1, 2, 2, 4, 0, 0, 1, 0, 3, 0]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(
constant_op.constant(
np.array([[0, 0], [np.nan, np.inf]], dtype=np.float16)))
expected = [tensor_id, -1, 19, 2, 4, 0, 1, 1, 0, 2, 0]
self.assertAllEqual(tensor, expected)
tensor, tensor_id = debug_summary(
constant_op.constant(
np.array([[0, np.inf], [np.nan, -np.inf]], dtype=np.float32)))
expected = [tensor_id, -1, 1, 2, 4, 1, 1, 1, 0, 1, 0]
self.assertAllEqual(tensor, expected)
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpFullHealthLarge(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.FULL_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
def tensor_counts(arr):
counts = [len(np.shape(arr)), np.size(arr), 0, 0, 0, 0, 0, 0]
for n in np.ravel(arr):
if np.isneginf(n):
counts[2] += 1
elif np.isposinf(n):
counts[3] += 1
elif np.isnan(n):
counts[4] += 1
elif n < 0.:
counts[5] += 1
elif n == 0.:
counts[6] += 1
else:
counts[7] += 1
return counts
x = np.zeros([50, 50], dtype=np.float16)
x[32, 47] = np.nan
x[0:4, 3] = np.inf
x[40:50, 40:50] = 10
x[3, 20] = -10
tensor, tensor_id = debug_summary(constant_op.constant(x))
expected = [tensor_id, -1, 19] + tensor_counts(x)
self.assertAllEqual(tensor, expected)
x = np.ones([25, 25, 50], dtype=np.float32) * np.inf
x[:, :, 1] = np.nan
x[:, :, 2] = -np.inf
x[:, :, 3] = -1
x[:, :, 4] = 0
x[:, :, 5] = 1
tensor, tensor_id = debug_summary(constant_op.constant(x))
expected = [tensor_id, -1, 1] + tensor_counts(x)
self.assertAllEqual(tensor, expected)
x[0, 0, 0] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
expected = [tensor_id, -1, 1,] + tensor_counts(x)
self.assertAllEqual(tensor, expected)
x = np.zeros([9701], dtype=np.float64)
x[9700] = np.nan
tensor, tensor_id = debug_summary(constant_op.constant(x))
expected = [tensor_id, -1, 2] + tensor_counts(x)
self.assertAllEqual(tensor, expected)
@test_util.run_in_graph_and_eager_modes
def testDebugNumericSummaryV2OpFullHealthConsistency(self):
def debug_summary(x):
return self.evaluate(
gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(debug_event_pb2.TensorDebugMode.FULL_HEALTH),
tensor_id=x._id,
output_dtype=dtypes.float64)), x._id
# Assert the same op is returns a consistent value
x = np.zeros([100, 100], dtype=np.float16)
x[32, 47] = np.nan
x[0:4, 3] = np.inf
x[90:100, 90:100] = 10
x[3, 20] = -10
c = constant_op.constant(x)
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
x = np.ones((100, 200, 3, 10), np.double)
x[1, 30, 2] = 10
x[5, :, 0, 1] = np.nan
x[90:100, 150, :, :] = np.inf
c = constant_op.constant(x)
tensor_1, tensor_id_1 = debug_summary(c)
tensor_2, tensor_id_2 = debug_summary(c)
self.assertAllEqual(tensor_1, tensor_2)
self.assertEqual(tensor_id_1, tensor_id_2)
def testCheckNumericsV2OpNegativeAndPositiveInf(self):
"""Test that CheckNumericsV2 op distinguishes negative and positive infs."""
with self.session(graph=ops.Graph()):
t1 = constant_op.constant([-1.0, 1.0])
t2 = constant_op.constant([0.0, 0.0])
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r"pass through test.*had -Inf and \+Inf values"):
self.evaluate(
array_ops.check_numerics_v2(t1 / t2, message="pass through test"))
def testCheckNumericsV2OpNegativeAndPositiveInfAndNaN(self):
"""CheckNumericsV2 op distinguishes - & + infs when nan is present."""
with self.session(graph=ops.Graph()):
t1 = constant_op.constant([-1.0, 1.0, 0.0])
t2 = constant_op.constant([0.0, 0.0, 0.0])
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r"pass through test.*had -Inf, \+Inf, and NaN values"):
self.evaluate(
array_ops.check_numerics_v2(t1 / t2, message="pass through test"))
def testCheckNumericsV2PositiveInfAndNaN(self):
"""Test that CheckNumericsV2 op shows sign of inf when nan is present."""
with self.session(graph=ops.Graph()):
t1 = constant_op.constant([0.0, 1.0])
t2 = constant_op.constant([0.0, 0.0])
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r"pass through test.*had \+Inf and NaN values"):
self.evaluate(
array_ops.check_numerics_v2(t1 / t2, message="pass through test"))
if __name__ == "__main__":
ops.enable_eager_execution()
googletest.main()
| annarev/tensorflow | tensorflow/python/debug/lib/debug_v2_ops_test.py | Python | apache-2.0 | 30,444 |
'''
module for loading/saving waypoints
'''
import mavutil, time, copy
import logging
import mavutil
try:
from google.protobuf import text_format
import mission_pb2
HAVE_PROTOBUF = True
except ImportError:
HAVE_PROTOBUF = False
class MAVWPError(Exception):
'''MAVLink WP error class'''
def __init__(self, msg):
Exception.__init__(self, msg)
self.message = msg
class MAVWPLoader(object):
'''MAVLink waypoint loader'''
def __init__(self, target_system=0, target_component=0):
self.wpoints = []
self.target_system = target_system
self.target_component = target_component
self.last_change = time.time()
def count(self):
'''return number of waypoints'''
return len(self.wpoints)
def wp(self, i):
'''return a waypoint'''
return self.wpoints[i]
def add(self, w, comment=''):
'''add a waypoint'''
w = copy.copy(w)
if comment:
w.comment = comment
w.seq = self.count()
self.wpoints.append(w)
self.last_change = time.time()
def add_latlonalt(self, lat, lon, altitude):
'''add a point via latitude/longitude/altitude'''
p = mavutil.mavlink.MAVLink_mission_item_message(self.target_system,
self.target_component,
0,
mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT,
mavutil.mavlink.MAV_CMD_NAV_WAYPOINT,
0, 0, 0, 0, 0, 0,
lat, lon, altitude)
self.add(p)
def set(self, w, idx):
'''set a waypoint'''
w.seq = idx
if w.seq == self.count():
return self.add(w)
if self.count() <= idx:
raise MAVWPError('adding waypoint at idx=%u past end of list (count=%u)' % (idx, self.count()))
self.wpoints[idx] = w
self.last_change = time.time()
def remove(self, w):
'''remove a waypoint'''
self.wpoints.remove(w)
self.last_change = time.time()
def clear(self):
'''clear waypoint list'''
self.wpoints = []
self.last_change = time.time()
def _read_waypoints_v100(self, file):
'''read a version 100 waypoint'''
cmdmap = {
2 : mavutil.mavlink.MAV_CMD_NAV_TAKEOFF,
3 : mavutil.mavlink.MAV_CMD_NAV_RETURN_TO_LAUNCH,
4 : mavutil.mavlink.MAV_CMD_NAV_LAND,
24: mavutil.mavlink.MAV_CMD_NAV_TAKEOFF,
26: mavutil.mavlink.MAV_CMD_NAV_LAND,
25: mavutil.mavlink.MAV_CMD_NAV_WAYPOINT ,
27: mavutil.mavlink.MAV_CMD_NAV_LOITER_UNLIM
}
comment = ''
for line in file:
if line.startswith('#'):
comment = line[1:].lstrip()
continue
line = line.strip()
if not line:
continue
a = line.split()
if len(a) != 13:
raise MAVWPError("invalid waypoint line with %u values" % len(a))
if mavutil.mavlink10():
fn = mavutil.mavlink.MAVLink_mission_item_message
else:
fn = mavutil.mavlink.MAVLink_waypoint_message
w = fn(self.target_system, self.target_component,
int(a[0]), # seq
int(a[1]), # frame
int(a[2]), # action
int(a[7]), # current
int(a[12]), # autocontinue
float(a[5]), # param1,
float(a[6]), # param2,
float(a[3]), # param3
float(a[4]), # param4
float(a[9]), # x, latitude
float(a[8]), # y, longitude
float(a[10]) # z
)
if not w.command in cmdmap:
raise MAVWPError("Unknown v100 waypoint action %u" % w.command)
w.command = cmdmap[w.command]
self.add(w, comment)
comment = ''
def _read_waypoints_v110(self, file):
'''read a version 110 waypoint'''
comment = ''
for line in file:
if line.startswith('#'):
comment = line[1:].lstrip()
continue
line = line.strip()
if not line:
continue
a = line.split()
if len(a) != 12:
raise MAVWPError("invalid waypoint line with %u values" % len(a))
if mavutil.mavlink10():
fn = mavutil.mavlink.MAVLink_mission_item_message
else:
fn = mavutil.mavlink.MAVLink_waypoint_message
w = fn(self.target_system, self.target_component,
int(a[0]), # seq
int(a[2]), # frame
int(a[3]), # command
int(a[1]), # current
int(a[11]), # autocontinue
float(a[4]), # param1,
float(a[5]), # param2,
float(a[6]), # param3
float(a[7]), # param4
float(a[8]), # x (latitude)
float(a[9]), # y (longitude)
float(a[10]) # z (altitude)
)
if w.command == 0 and w.seq == 0 and self.count() == 0:
# special handling for Mission Planner created home wp
w.command = mavutil.mavlink.MAV_CMD_NAV_WAYPOINT
self.add(w, comment)
comment = ''
def _read_waypoints_pb_110(self, file):
if not HAVE_PROTOBUF:
raise MAVWPError(
'Cannot read mission file in protobuf format without protobuf '
'library. Try "easy_install protobuf".')
explicit_seq = False
warned_seq = False
mission = mission_pb2.Mission()
text_format.Merge(file.read(), mission)
defaults = mission_pb2.Waypoint()
# Set defaults (may be overriden in file).
defaults.current = False
defaults.autocontinue = True
defaults.param1 = 0.0
defaults.param2 = 0.0
defaults.param3 = 0.0
defaults.param4 = 0.0
defaults.x = 0.0
defaults.y = 0.0
defaults.z = 0.0
# Use defaults specified in mission file, if there are any.
if mission.defaults:
defaults.MergeFrom(mission.defaults)
for seq, waypoint in enumerate(mission.waypoint):
# Consecutive sequence numbers are automatically assigned
# UNLESS the mission file specifies sequence numbers of
# its own.
if waypoint.seq:
explicit_seq = True
else:
if explicit_seq and not warned_seq:
logging.warn(
'Waypoint file %s: mixes explicit and implicit '
'sequence numbers' % (file,))
warned_seq = True
# The first command has current=True, the rest have current=False.
if seq > 0:
current = defaults.current
else:
current = True
w = mavutil.mavlink.MAVLink_mission_item_message(
self.target_system, self.target_component,
waypoint.seq or seq,
waypoint.frame,
waypoint.command,
waypoint.current or current,
waypoint.autocontinue or defaults.autocontinue,
waypoint.param1 or defaults.param1,
waypoint.param2 or defaults.param2,
waypoint.param3 or defaults.param3,
waypoint.param4 or defaults.param4,
waypoint.x or defaults.x,
waypoint.y or defaults.y,
waypoint.z or defaults.z)
self.add(w)
def load(self, filename):
'''load waypoints from a file.
returns number of waypoints loaded'''
f = open(filename, mode='r')
version_line = f.readline().strip()
if version_line == "QGC WPL 100":
readfn = self._read_waypoints_v100
elif version_line == "QGC WPL 110":
readfn = self._read_waypoints_v110
elif version_line == "QGC WPL PB 110":
readfn = self._read_waypoints_pb_110
else:
f.close()
raise MAVWPError("Unsupported waypoint format '%s'" % version_line)
self.clear()
readfn(f)
f.close()
return len(self.wpoints)
def save_as_pb(self, filename):
mission = mission_pb2.Mission()
for w in self.wpoints:
waypoint = mission.waypoint.add()
waypoint.command = w.command
waypoint.frame = w.frame
waypoint.seq = w.seq
waypoint.current = w.current
waypoint.autocontinue = w.autocontinue
waypoint.param1 = w.param1
waypoint.param2 = w.param2
waypoint.param3 = w.param3
waypoint.param4 = w.param4
waypoint.x = w.x
waypoint.y = w.y
waypoint.z = w.z
with open(filename, 'w') as f:
f.write('QGC WPL PB 110\n')
f.write(text_format.MessageToString(mission))
def save(self, filename):
'''save waypoints to a file'''
f = open(filename, mode='w')
f.write("QGC WPL 110\n")
for w in self.wpoints:
if getattr(w, 'comment', None):
f.write("# %s\n" % w.comment)
f.write("%u\t%u\t%u\t%u\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%u\n" % (
w.seq, w.current, w.frame, w.command,
w.param1, w.param2, w.param3, w.param4,
w.x, w.y, w.z, w.autocontinue))
f.close()
def polygon(self, done=None):
'''return a polygon for the waypoints'''
points = []
if done is None:
done = set()
idx = 0
# find first point not done yet
while idx < self.count():
if not idx in done:
break
idx += 1
while idx < self.count():
w = self.wp(idx)
if idx in done:
if w.x != 0 or w.y != 0:
points.append((w.x, w.y))
break
done.add(idx)
if w.command == mavutil.mavlink.MAV_CMD_DO_JUMP:
idx = int(w.param1)
w = self.wp(idx)
if w.x != 0 or w.y != 0:
points.append((w.x, w.y))
continue
idx += 1
if (w.x != 0 or w.y != 0) and w.command in [mavutil.mavlink.MAV_CMD_NAV_WAYPOINT,
mavutil.mavlink.MAV_CMD_NAV_LOITER_UNLIM,
mavutil.mavlink.MAV_CMD_NAV_LOITER_TURNS,
mavutil.mavlink.MAV_CMD_NAV_LOITER_TIME,
mavutil.mavlink.MAV_CMD_NAV_LAND,
mavutil.mavlink.MAV_CMD_NAV_TAKEOFF]:
points.append((w.x, w.y))
return points
def polygon_list(self):
'''return a list of polygons for the waypoints'''
done = set()
ret = []
while len(done) != self.count():
p = self.polygon(done)
if len(p) > 0:
ret.append(p)
return ret
class MAVFenceError(Exception):
'''MAVLink fence error class'''
def __init__(self, msg):
Exception.__init__(self, msg)
self.message = msg
class MAVFenceLoader(object):
'''MAVLink geo-fence loader'''
def __init__(self, target_system=0, target_component=0):
self.points = []
self.target_system = target_system
self.target_component = target_component
self.last_change = time.time()
def count(self):
'''return number of points'''
return len(self.points)
def point(self, i):
'''return a point'''
return self.points[i]
def add(self, p):
'''add a point'''
self.points.append(p)
self.last_change = time.time()
for i in range(self.count()):
self.points[i].count = self.count()
def add_latlon(self, lat, lon):
'''add a point via latitude/longitude'''
p = mavutil.mavlink.MAVLink_fence_point_message(self.target_system, self.target_component,
self.count(), 0, lat, lon)
self.add(p)
def clear(self):
'''clear point list'''
self.points = []
self.last_change = time.time()
def load(self, filename):
'''load points from a file.
returns number of points loaded'''
f = open(filename, mode='r')
self.clear()
for line in f:
if line.startswith('#'):
continue
line = line.strip()
if not line:
continue
a = line.split()
if len(a) != 2:
raise MAVFenceError("invalid fence point line: %s" % line)
self.add_latlon(float(a[0]), float(a[1]))
f.close()
return len(self.points)
def save(self, filename):
'''save fence points to a file'''
f = open(filename, mode='w')
for p in self.points:
f.write("%f\t%f\n" % (p.lat, p.lng))
f.close()
def polygon(self):
'''return a polygon for the fence'''
points = []
for fp in self.points[1:]:
points.append((fp.lat, fp.lng))
return points
| owenson/ardupilot-sdk-python | pymavlink/mavwp.py | Python | lgpl-3.0 | 14,030 |
from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deepwater import H2ODeepWaterEstimator
def cnn(num_classes):
import mxnet as mx
data = mx.symbol.Variable('data')
inputdropout = mx.symbol.Dropout(data=data, p=0.1)
# first conv
conv1 = mx.symbol.Convolution(data=data, kernel=(5,5), num_filter=50)
tanh1 = mx.symbol.Activation(data=conv1, act_type="relu")
pool1 = mx.symbol.Pooling(data=tanh1, pool_type="max", kernel=(3,3), stride=(2,2))
# second conv
conv2 = mx.symbol.Convolution(data=pool1, kernel=(5,5), num_filter=100)
tanh2 = mx.symbol.Activation(data=conv2, act_type="relu")
pool2 = mx.symbol.Pooling(data=tanh2, pool_type="max", kernel=(3,3), stride=(2,2))
# first fullc
flatten = mx.symbol.Flatten(data=pool2)
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=1024)
relu3 = mx.symbol.Activation(data=fc1, act_type="relu")
inputdropout = mx.symbol.Dropout(data=fc1, p=0.5)
# second fullc
flatten = mx.symbol.Flatten(data=relu3)
fc2 = mx.symbol.FullyConnected(data=flatten, num_hidden=1024)
relu4 = mx.symbol.Activation(data=fc2, act_type="relu")
inputdropout = mx.symbol.Dropout(data=fc2, p=0.5)
# third fullc
fc3 = mx.symbol.FullyConnected(data=relu4, num_hidden=num_classes)
# loss
cnn = mx.symbol.SoftmaxOutput(data=fc3, name='softmax')
return cnn
def deepwater_custom_cnn_mnist():
if not H2ODeepWaterEstimator.available(): return
train = h2o.import_file(pyunit_utils.locate("bigdata/laptop/mnist/train.csv.gz"))
test = h2o.import_file(pyunit_utils.locate("bigdata/laptop/mnist/test.csv.gz"))
predictors = list(range(0,784))
resp = 784
train[resp] = train[resp].asfactor()
test[resp] = test[resp].asfactor()
nclasses = train[resp].nlevels()[0]
print("Creating the cnn model architecture from scratch using the MXNet Python API")
cnn(nclasses).save("/tmp/symbol_custom-py.json")
print("Importing the cnn model architecture for training in H2O")
model = H2ODeepWaterEstimator(epochs=100, learning_rate=1e-3, mini_batch_size=64,
network='user', network_definition_file="/tmp/symbol_custom-py.json",
image_shape=[28,28], channels=1)
model.train(x=predictors,y=resp, training_frame=train, validation_frame=test)
model.show()
error = model.model_performance(valid=True).mean_per_class_error()
assert error < 0.1, "mean classification error on validation set is too high : " + str(error)
if __name__ == "__main__":
pyunit_utils.standalone_test(deepwater_custom_cnn_mnist)
else:
deepwater_custom_cnn_mnist()
| mathemage/h2o-3 | h2o-py/tests/testdir_algos/deepwater/pyunit_custom_cnn_mnist_deepwater.py | Python | apache-2.0 | 2,751 |
from yapsy.IPlugin import IPlugin
from logbook.Importer import Plugin
from messages import TimeSeriesData,TimeSeriesMetaData,LogMetaData,UIData,TimeSeries
from sqlalchemy import *
import logging
from tools.profiling import timing
from PyQt5.QtWidgets import QLabel, QFormLayout, QLineEdit
#from PyQt5 import QtCore, QtGui, QtWidgets
class Running(IPlugin,Plugin):
def __init__(self,log_name=None,metadata=None):
self._actions=['import']
self._type=['running']
self.logging = logging.getLogger(__name__)
self._filename = log_name
if metadata:
self._metadata = LogMetaData(file_hash=metadata.file_hash,
date=metadata.creation_date,
name=metadata.event_name,
maintype=metadata.event_type,
subtype=metadata.event_subtype
)
self._formdata = []
self._formdata.append(TimeSeriesMetaData("Total Length",0,"m"))
self._formdata.append(TimeSeriesMetaData("Time per 100m","%.1f" %0,"s"))
self._formdata.append(TimeSeriesMetaData("average speed","%.1f" %0,"m/s"))
self._formdata.append(TimeSeriesMetaData("Total calories",0,"kcal"))
self._formdata.append(TimeSeriesMetaData("Event duration","%.1f" %0,"min"))
@timing
def open_logbook(self,filename):
self._filename = filename
self._alchemy_logbook = create_engine('sqlite:///'+self._filename)
_metadata = MetaData(bind=self._alchemy_logbook)
self.file_table = Table('file', _metadata, autoload=True)
self.running_table = Table("event_running",_metadata,
Column('event_running_id',Integer,primary_key=True),
Column('f_id',Integer,ForeignKey("file.file_id"), nullable=False),
Column('timestamp',DateTime),
Column('cadence',Integer),
Column('distance',Integer),
Column('enhanced_speed',Float),
Column('heart_rate',Integer),
Column('position_lat',Float),
Column('position_long',Float)
)
self.running_table.create(checkfirst=True)
@timing
def import_fit(self,fitfile=None):
stmt = self.file_table.select(self.file_table.c.file_hash==fitfile.digest)
row = stmt.execute().fetchone()
file_id = row.file_id
for record in fitfile.get_messages(["record"]):
timestamp = None
cadence = None
distance = None
enhanced_speed = None
heart_rate = None
lat = None
lon = None
data = []
for record_data in record:
if record_data.name == "timestamp":
timestamp = record_data.value
if record_data.name =="cadence":
cadence = record_data.value
if record_data.name == "distance":
distance = record_data.value
if record_data.name == "enhanced_speed":
enhanced_speed = record_data.value
if record_data.name == "heart_rate":
heart_rate = record_data.value
if record_data.name == "position_lat":
lat = record_data.value*(180.0/2**31)
if record_data.name == "position_long":
lon = record_data.value*(180.0/2**31)
data.append({'f_id':file_id,'timestamp':timestamp,
'cadence':cadence,'distance':distance,
'enhanced_speed':enhanced_speed,'heart_rate':heart_rate,
'position_lat':lat,'position_long':lon})
self._alchemy_logbook.execute(self.running_table.insert(),data)
@timing
def get_data(self,filehash):
s = self.running_table.join(self.file_table).\
select().where(self.file_table.c.file_hash==filehash)
cadence = TimeSeriesData(name="cadence" ,labels=[],data=[],unit='rpm',xlabel="duration(min)")
distance = TimeSeriesData(name="distance" ,labels=[],data=[],unit='m',xlabel="duration(min)")
heart_rate = TimeSeriesData(name="heart_rate",labels=[],data=[],unit="bpm",xlabel="duration(min)")
speed = TimeSeriesData(name="speed" ,labels=[],data=[],unit="m/s",xlabel="duration(min)")
rows = 0
abs_len = 0
last_ts = 0
row = None
for row in self._alchemy_logbook.execute(s):
if row.cadence and row.distance and row.enhanced_speed and row.heart_rate:
rows = rows + 1
if last_ts == 0:
last_ts = row.timestamp
ts = ((row.timestamp-last_ts).seconds/60)
cadence.data.append(row.cadence)
cadence.labels.append(ts)
distance.data.append(row.distance-abs_len)
abs_len = row.distance
distance.labels.append(ts)
heart_rate.data.append(row.heart_rate)
heart_rate.labels.append(ts)
speed.data.append(row.enhanced_speed)
speed.labels.append(ts)
if row:
data = [cadence,distance,heart_rate,speed]
formdata = []
formdata.append(TimeSeriesMetaData("Total Length",row.distance,"m"))
formdata.append(TimeSeriesMetaData("Time per 100m","%.1f" %1,"s"))
formdata.append(TimeSeriesMetaData("average speed","%.1f" %(1/1),"m/s"))
formdata.append(TimeSeriesMetaData("Total calories",1,"kcal"))
formdata.append(TimeSeriesMetaData("Event duration","%.1f" %(1),"min"))
return TimeSeries(data=data,metadata=formdata)
@property
def ui(self):
layout = QFormLayout()
labels=[]
fields=[]
if self._formdata:
for i in range(len(self._formdata)):
labels.append(QLabel(self._formdata[i].name+" ("+self._formdata[i].unit+")"))
fields.append(QLineEdit(str(self._formdata[i].value)))
layout.addRow(labels[-1],
fields[-1]
)
return UIData(ui=layout,labels=labels,fields=fields)
| romses/FitView | logbook/Importer/running.py | Python | bsd-3-clause | 6,955 |
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('rich_string08.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': 1})
italic = workbook.add_format({'italic': 1})
format = workbook.add_format({'align': 'center'})
worksheet.write('A1', 'Foo', bold)
worksheet.write('A2', 'Bar', italic)
worksheet.write_rich_string('A3', 'ab', bold, 'cd', 'efg', format)
workbook.close()
self.assertExcelEqual()
| jmcnamara/XlsxWriter | xlsxwriter/test/comparison/test_rich_string08.py | Python | bsd-2-clause | 1,080 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import multiprocessing
import os
import pathlib
import random
import socket
import sys
import threading
import unittest
from datetime import datetime, timedelta
from tempfile import TemporaryDirectory
from textwrap import dedent
from unittest import mock
from unittest.mock import MagicMock, PropertyMock
import pytest
from freezegun import freeze_time
from airflow.configuration import conf
from airflow.dag_processing.manager import (
DagFileProcessorAgent,
DagFileProcessorManager,
DagFileStat,
DagParsingSignal,
DagParsingStat,
)
from airflow.dag_processing.processor import DagFileProcessorProcess
from airflow.jobs.local_task_job import LocalTaskJob as LJ
from airflow.models import DagBag, DagModel, TaskInstance as TI, errors
from airflow.models.dagcode import DagCode
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance
from airflow.utils import timezone
from airflow.utils.callback_requests import CallbackRequest, TaskCallbackRequest
from airflow.utils.net import get_hostname
from airflow.utils.session import create_session
from airflow.utils.state import DagRunState, State
from airflow.utils.types import DagRunType
from tests.core.test_logging_config import SETTINGS_FILE_VALID, settings_context
from tests.models import TEST_DAGS_FOLDER
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_dags, clear_db_runs, clear_db_serialized_dags
TEST_DAG_FOLDER = pathlib.Path(__file__).parent.parent / 'dags'
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
class FakeDagFileProcessorRunner(DagFileProcessorProcess):
# This fake processor will return the zombies it received in constructor
# as its processing result w/o actually parsing anything.
def __init__(self, file_path, pickle_dags, dag_ids, callbacks):
super().__init__(file_path, pickle_dags, dag_ids, callbacks)
# We need a "real" selectable handle for waitable_handle to work
readable, writable = multiprocessing.Pipe(duplex=False)
writable.send('abc')
writable.close()
self._waitable_handle = readable
self._result = 0, 0
def start(self):
pass
@property
def start_time(self):
return DEFAULT_DATE
@property
def pid(self):
return 1234
@property
def done(self):
return True
@property
def result(self):
return self._result
@staticmethod
def _create_process(file_path, callback_requests, dag_ids, pickle_dags):
return FakeDagFileProcessorRunner(
file_path,
pickle_dags,
dag_ids,
callback_requests,
)
@property
def waitable_handle(self):
return self._waitable_handle
class TestDagFileProcessorManager:
def setup_method(self):
clear_db_runs()
clear_db_serialized_dags()
clear_db_dags()
def teardown_class(self):
clear_db_runs()
clear_db_serialized_dags()
clear_db_dags()
def run_processor_manager_one_loop(self, manager, parent_pipe):
if not manager._async_mode:
parent_pipe.send(DagParsingSignal.AGENT_RUN_ONCE)
results = []
while True:
manager._run_parsing_loop()
while parent_pipe.poll(timeout=0.01):
obj = parent_pipe.recv()
if not isinstance(obj, DagParsingStat):
results.append(obj)
elif obj.done:
return results
raise RuntimeError("Shouldn't get here - nothing to read, but manager not finished!")
@conf_vars({('core', 'load_examples'): 'False'})
def test_remove_file_clears_import_error(self, tmpdir):
filename_to_parse = tmpdir / 'temp_dag.py'
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines('an invalid airflow DAG')
child_pipe, parent_pipe = multiprocessing.Pipe()
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
manager = DagFileProcessorManager(
dag_directory=tmpdir,
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=child_pipe,
dag_ids=[],
pickle_dags=False,
async_mode=async_mode,
)
with create_session() as session:
self.run_processor_manager_one_loop(manager, parent_pipe)
import_errors = session.query(errors.ImportError).all()
assert len(import_errors) == 1
filename_to_parse.remove()
# Rerun the scheduler once the dag file has been removed
self.run_processor_manager_one_loop(manager, parent_pipe)
import_errors = session.query(errors.ImportError).all()
assert len(import_errors) == 0
session.rollback()
child_pipe.close()
parent_pipe.close()
@conf_vars({('core', 'load_examples'): 'False'})
def test_max_runs_when_no_files(self):
child_pipe, parent_pipe = multiprocessing.Pipe()
with TemporaryDirectory(prefix="empty-airflow-dags-") as dags_folder:
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
manager = DagFileProcessorManager(
dag_directory=dags_folder,
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=child_pipe,
dag_ids=[],
pickle_dags=False,
async_mode=async_mode,
)
self.run_processor_manager_one_loop(manager, parent_pipe)
child_pipe.close()
parent_pipe.close()
@pytest.mark.backend("mysql", "postgres")
def test_start_new_processes_with_same_filepath(self):
"""
Test that when a processor already exist with a filepath, a new processor won't be created
with that filepath. The filepath will just be removed from the list.
"""
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
file_1 = 'file_1.py'
file_2 = 'file_2.py'
file_3 = 'file_3.py'
manager._file_path_queue = [file_1, file_2, file_3]
# Mock that only one processor exists. This processor runs with 'file_1'
manager._processors[file_1] = MagicMock()
# Start New Processes
manager.start_new_processes()
# Because of the config: '[scheduler] parsing_processes = 2'
# verify that only one extra process is created
# and since a processor with 'file_1' already exists,
# even though it is first in '_file_path_queue'
# a new processor is created with 'file_2' and not 'file_1'.
assert file_1 in manager._processors.keys()
assert file_2 in manager._processors.keys()
assert [file_3] == manager._file_path_queue
def test_set_file_paths_when_processor_file_path_not_in_new_file_paths(self):
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
mock_processor = MagicMock()
mock_processor.stop.side_effect = AttributeError('DagFileProcessor object has no attribute stop')
mock_processor.terminate.side_effect = None
manager._processors['missing_file.txt'] = mock_processor
manager._file_stats['missing_file.txt'] = DagFileStat(0, 0, None, None, 0)
manager.set_file_paths(['abc.txt'])
assert manager._processors == {}
def test_set_file_paths_when_processor_file_path_is_in_new_file_paths(self):
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
mock_processor = MagicMock()
mock_processor.stop.side_effect = AttributeError('DagFileProcessor object has no attribute stop')
mock_processor.terminate.side_effect = None
manager._processors['abc.txt'] = mock_processor
manager.set_file_paths(['abc.txt'])
assert manager._processors == {'abc.txt': mock_processor}
@conf_vars({("scheduler", "file_parsing_sort_mode"): "alphabetical"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
def test_file_paths_in_queue_sorted_alphabetically(
self, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""Test dag files are sorted alphabetically"""
dag_files = ["file_3.py", "file_2.py", "file_4.py", "file_1.py"]
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
manager.prepare_file_path_queue()
assert manager._file_path_queue == ['file_1.py', 'file_2.py', 'file_3.py', 'file_4.py']
@conf_vars({("scheduler", "file_parsing_sort_mode"): "random_seeded_by_host"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
def test_file_paths_in_queue_sorted_random_seeded_by_host(
self, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""Test files are randomly sorted and seeded by host name"""
dag_files = ["file_3.py", "file_2.py", "file_4.py", "file_1.py"]
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
manager.prepare_file_path_queue()
expected_order = dag_files
random.Random(get_hostname()).shuffle(expected_order)
assert manager._file_path_queue == expected_order
# Verify running it again produces same order
manager._file_paths = []
manager.prepare_file_path_queue()
assert manager._file_path_queue == expected_order
@conf_vars({("scheduler", "file_parsing_sort_mode"): "modified_time"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
@mock.patch("airflow.utils.file.os.path.getmtime")
def test_file_paths_in_queue_sorted_by_modified_time(
self, mock_getmtime, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""Test files are sorted by modified time"""
paths_with_mtime = {"file_3.py": 3.0, "file_2.py": 2.0, "file_4.py": 5.0, "file_1.py": 4.0}
dag_files = list(paths_with_mtime.keys())
mock_getmtime.side_effect = list(paths_with_mtime.values())
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
manager.prepare_file_path_queue()
assert manager._file_path_queue == ['file_4.py', 'file_1.py', 'file_3.py', 'file_2.py']
@conf_vars({("scheduler", "file_parsing_sort_mode"): "modified_time"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
@mock.patch("airflow.utils.file.os.path.getmtime")
def test_file_paths_in_queue_excludes_missing_file(
self, mock_getmtime, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""Check that a file is not enqueued for processing if it has been deleted"""
dag_files = ["file_3.py", "file_2.py", "file_4.py"]
mock_getmtime.side_effect = [1.0, 2.0, FileNotFoundError()]
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
manager.set_file_paths(dag_files)
manager.prepare_file_path_queue()
assert manager._file_path_queue == ['file_2.py', 'file_3.py']
@conf_vars({("scheduler", "file_parsing_sort_mode"): "modified_time"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
@mock.patch("airflow.utils.file.os.path.getmtime")
def test_recently_modified_file_is_parsed_with_mtime_mode(
self, mock_getmtime, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""
Test recently updated files are processed even if min_file_process_interval is not reached
"""
freezed_base_time = timezone.datetime(2020, 1, 5, 0, 0, 0)
initial_file_1_mtime = (freezed_base_time - timedelta(minutes=5)).timestamp()
dag_files = ["file_1.py"]
mock_getmtime.side_effect = [initial_file_1_mtime]
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=3,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
# let's say the DAG was just parsed 2 seconds before the Freezed time
last_finish_time = freezed_base_time - timedelta(seconds=10)
manager._file_stats = {
"file_1.py": DagFileStat(1, 0, last_finish_time, 1.0, 1),
}
with freeze_time(freezed_base_time):
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
# File Path Queue will be empty as the "modified time" < "last finish time"
manager.prepare_file_path_queue()
assert manager._file_path_queue == []
# Simulate the DAG modification by using modified_time which is greater
# than the last_parse_time but still less than now - min_file_process_interval
file_1_new_mtime = freezed_base_time - timedelta(seconds=5)
file_1_new_mtime_ts = file_1_new_mtime.timestamp()
with freeze_time(freezed_base_time):
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
# File Path Queue will be empty as the "modified time" < "last finish time"
mock_getmtime.side_effect = [file_1_new_mtime_ts]
manager.prepare_file_path_queue()
# Check that file is added to the queue even though file was just recently passed
assert manager._file_path_queue == ["file_1.py"]
assert last_finish_time < file_1_new_mtime
assert (
manager._file_process_interval
> (freezed_base_time - manager.get_last_finish_time("file_1.py")).total_seconds()
)
def test_find_zombies(self):
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
dagbag = DagBag(TEST_DAG_FOLDER, read_dags_from_db=False)
with create_session() as session:
session.query(LJ).delete()
dag = dagbag.get_dag('example_branch_operator')
dag.sync_to_db()
task = dag.get_task(task_id='run_this_first')
dag_run = dag.create_dagrun(
state=DagRunState.RUNNING,
execution_date=DEFAULT_DATE,
run_type=DagRunType.SCHEDULED,
session=session,
)
ti = TI(task, run_id=dag_run.run_id, state=State.RUNNING)
local_job = LJ(ti)
local_job.state = State.SHUTDOWN
session.add(local_job)
session.flush()
ti.job_id = local_job.id
session.add(ti)
session.flush()
manager._last_zombie_query_time = timezone.utcnow() - timedelta(
seconds=manager._zombie_threshold_secs + 1
)
manager._find_zombies()
requests = manager._callback_to_execute[dag.fileloc]
assert 1 == len(requests)
assert requests[0].full_filepath == dag.fileloc
assert requests[0].msg == f"Detected {ti} as zombie"
assert requests[0].is_failure_callback is True
assert isinstance(requests[0].simple_task_instance, SimpleTaskInstance)
assert ti.dag_id == requests[0].simple_task_instance.dag_id
assert ti.task_id == requests[0].simple_task_instance.task_id
assert ti.run_id == requests[0].simple_task_instance.run_id
session.query(TI).delete()
session.query(LJ).delete()
@mock.patch('airflow.dag_processing.manager.DagFileProcessorProcess')
def test_handle_failure_callback_with_zombies_are_correctly_passed_to_dag_file_processor(
self, mock_processor
):
"""
Check that the same set of failure callback with zombies are passed to the dag
file processors until the next zombie detection logic is invoked.
"""
test_dag_path = TEST_DAG_FOLDER / 'test_example_bash_operator.py'
with conf_vars({('scheduler', 'parsing_processes'): '1', ('core', 'load_examples'): 'False'}):
dagbag = DagBag(test_dag_path, read_dags_from_db=False)
with create_session() as session:
session.query(LJ).delete()
dag = dagbag.get_dag('test_example_bash_operator')
dag.sync_to_db()
dag_run = dag.create_dagrun(
state=DagRunState.RUNNING,
execution_date=DEFAULT_DATE,
run_type=DagRunType.SCHEDULED,
session=session,
)
task = dag.get_task(task_id='run_this_last')
ti = TI(task, run_id=dag_run.run_id, state=State.RUNNING)
local_job = LJ(ti)
local_job.state = State.SHUTDOWN
session.add(local_job)
session.flush()
# TODO: If there was an actual Relationship between TI and Job
# we wouldn't need this extra commit
session.add(ti)
ti.job_id = local_job.id
session.flush()
expected_failure_callback_requests = [
TaskCallbackRequest(
full_filepath=dag.fileloc,
simple_task_instance=SimpleTaskInstance(ti),
msg="Message",
)
]
test_dag_path = TEST_DAG_FOLDER / 'test_example_bash_operator.py'
child_pipe, parent_pipe = multiprocessing.Pipe()
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
fake_processors = []
def fake_processor_(*args, **kwargs):
nonlocal fake_processors
processor = FakeDagFileProcessorRunner._create_process(*args, **kwargs)
fake_processors.append(processor)
return processor
mock_processor.side_effect = fake_processor_
manager = DagFileProcessorManager(
dag_directory=test_dag_path,
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=child_pipe,
dag_ids=[],
pickle_dags=False,
async_mode=async_mode,
)
self.run_processor_manager_one_loop(manager, parent_pipe)
if async_mode:
# Once for initial parse, and then again for the add_callback_to_queue
assert len(fake_processors) == 2
assert fake_processors[0]._file_path == str(test_dag_path)
assert fake_processors[0]._callback_requests == []
else:
assert len(fake_processors) == 1
assert fake_processors[-1]._file_path == str(test_dag_path)
callback_requests = fake_processors[-1]._callback_requests
assert {zombie.simple_task_instance.key for zombie in expected_failure_callback_requests} == {
result.simple_task_instance.key for result in callback_requests
}
child_pipe.close()
parent_pipe.close()
@mock.patch("airflow.dag_processing.processor.DagFileProcessorProcess.pid", new_callable=PropertyMock)
@mock.patch("airflow.dag_processing.processor.DagFileProcessorProcess.kill")
def test_kill_timed_out_processors_kill(self, mock_kill, mock_pid):
mock_pid.return_value = 1234
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta(seconds=5),
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
processor = DagFileProcessorProcess('abc.txt', False, [], [])
processor._start_time = timezone.make_aware(datetime.min)
manager._processors = {'abc.txt': processor}
manager._kill_timed_out_processors()
mock_kill.assert_called_once_with()
@mock.patch("airflow.dag_processing.processor.DagFileProcessorProcess.pid", new_callable=PropertyMock)
@mock.patch("airflow.dag_processing.processor.DagFileProcessorProcess")
def test_kill_timed_out_processors_no_kill(self, mock_dag_file_processor, mock_pid):
mock_pid.return_value = 1234
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta(seconds=5),
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
processor = DagFileProcessorProcess('abc.txt', False, [], [])
processor._start_time = timezone.make_aware(datetime.max)
manager._processors = {'abc.txt': processor}
manager._kill_timed_out_processors()
mock_dag_file_processor.kill.assert_not_called()
@conf_vars({('core', 'load_examples'): 'False'})
@pytest.mark.execution_timeout(10)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_directory = TEST_DAG_FOLDER.parent / 'dags_with_system_exit'
# Delete the one valid DAG/SerializedDAG, and check that it gets re-created
clear_db_dags()
clear_db_serialized_dags()
child_pipe, parent_pipe = multiprocessing.Pipe()
manager = DagFileProcessorManager(
dag_directory=dag_directory,
dag_ids=[],
max_runs=1,
processor_timeout=timedelta(seconds=5),
signal_conn=child_pipe,
pickle_dags=False,
async_mode=True,
)
manager._run_parsing_loop()
result = None
while parent_pipe.poll(timeout=None):
result = parent_pipe.recv()
if isinstance(result, DagParsingStat) and result.done:
break
# Three files in folder should be processed
assert sum(stat.run_count for stat in manager._file_stats.values()) == 3
with create_session() as session:
assert session.query(DagModel).get(dag_id) is not None
@conf_vars({('core', 'load_examples'): 'False'})
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.execution_timeout(30)
@mock.patch('airflow.dag_processing.manager.DagFileProcessorProcess')
def test_pipe_full_deadlock(self, mock_processor):
dag_filepath = TEST_DAG_FOLDER / "test_scheduler_dags.py"
child_pipe, parent_pipe = multiprocessing.Pipe()
# Shrink the buffers to exacerbate the problem!
for fd in (parent_pipe.fileno(),):
sock = socket.socket(fileno=fd)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024)
sock.detach()
exit_event = threading.Event()
# To test this behaviour we need something that continually fills the
# parent pipe's buffer (and keeps it full).
def keep_pipe_full(pipe, exit_event):
n = 0
while True:
if exit_event.is_set():
break
req = CallbackRequest(str(dag_filepath))
try:
logging.debug("Sending CallbackRequests %d", n + 1)
pipe.send(req)
except TypeError:
# This is actually the error you get when the parent pipe
# is closed! Nicely handled, eh?
break
except OSError:
break
n += 1
logging.debug(" Sent %d CallbackRequests", n)
thread = threading.Thread(target=keep_pipe_full, args=(parent_pipe, exit_event))
fake_processors = []
def fake_processor_(*args, **kwargs):
nonlocal fake_processors
processor = FakeDagFileProcessorRunner._create_process(*args, **kwargs)
fake_processors.append(processor)
return processor
mock_processor.side_effect = fake_processor_
manager = DagFileProcessorManager(
dag_directory=dag_filepath,
dag_ids=[],
# A reasonable large number to ensure that we trigger the deadlock
max_runs=100,
processor_timeout=timedelta(seconds=5),
signal_conn=child_pipe,
pickle_dags=False,
async_mode=True,
)
try:
thread.start()
# If this completes without hanging, then the test is good!
manager._run_parsing_loop()
exit_event.set()
finally:
logging.info("Closing pipes")
parent_pipe.close()
child_pipe.close()
thread.join(timeout=1.0)
@conf_vars({('core', 'load_examples'): 'False'})
@mock.patch('airflow.dag_processing.manager.Stats.timing')
def test_send_file_processing_statsd_timing(self, statsd_timing_mock, tmpdir):
filename_to_parse = tmpdir / 'temp_dag.py'
dag_code = dedent(
"""
from airflow import DAG
dag = DAG(dag_id='temp_dag', schedule_interval='0 0 * * *')
"""
)
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(dag_code)
child_pipe, parent_pipe = multiprocessing.Pipe()
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
manager = DagFileProcessorManager(
dag_directory=tmpdir,
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=child_pipe,
dag_ids=[],
pickle_dags=False,
async_mode=async_mode,
)
self.run_processor_manager_one_loop(manager, parent_pipe)
last_runtime = manager.get_last_runtime(manager.file_paths[0])
child_pipe.close()
parent_pipe.close()
statsd_timing_mock.assert_called_with('dag_processing.last_duration.temp_dag', last_runtime)
def test_refresh_dags_dir_doesnt_delete_zipped_dags(self, tmpdir):
"""Test DagFileProcessorManager._refresh_dag_dir method"""
manager = DagFileProcessorManager(
dag_directory=TEST_DAG_FOLDER,
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
dagbag = DagBag(dag_folder=tmpdir, include_examples=False)
zipped_dag_path = os.path.join(TEST_DAGS_FOLDER, "test_zip.zip")
dagbag.process_file(zipped_dag_path)
dag = dagbag.get_dag("test_zip_dag")
dag.sync_to_db()
SerializedDagModel.write_dag(dag)
manager.last_dag_dir_refresh_time = timezone.utcnow() - timedelta(minutes=10)
manager._refresh_dag_dir()
# Assert dag not deleted in SDM
assert SerializedDagModel.has_dag('test_zip_dag')
# assert code not deleted
assert DagCode.has_dag(dag.fileloc)
class TestDagFileProcessorAgent(unittest.TestCase):
def setUp(self):
# Make sure that the configure_logging is not cached
self.old_modules = dict(sys.modules)
def tearDown(self):
# Remove any new modules imported during the test run. This lets us
# import the same source files for more than one test.
remove_list = []
for mod in sys.modules:
if mod not in self.old_modules:
remove_list.append(mod)
for mod in remove_list:
del sys.modules[mod]
@staticmethod
def _processor_factory(file_path, zombies, dag_ids, pickle_dags):
return DagFileProcessorProcess(file_path, pickle_dags, dag_ids, zombies)
def test_reload_module(self):
"""
Configure the context to have logging.logging_config_class set to a fake logging
class path, thus when reloading logging module the airflow.processor_manager
logger should not be configured.
"""
with settings_context(SETTINGS_FILE_VALID):
# Launch a process through DagFileProcessorAgent, which will try
# reload the logging module.
test_dag_path = TEST_DAG_FOLDER / 'test_scheduler_dags.py'
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
log_file_loc = conf.get('logging', 'DAG_PROCESSOR_MANAGER_LOG_LOCATION')
try:
os.remove(log_file_loc)
except OSError:
pass
# Starting dag processing with 0 max_runs to avoid redundant operations.
processor_agent = DagFileProcessorAgent(test_dag_path, 0, timedelta.max, [], False, async_mode)
processor_agent.start()
if not async_mode:
processor_agent.run_single_parsing_loop()
processor_agent._process.join()
# Since we are reloading logging config not creating this file,
# we should expect it to be nonexistent.
assert not os.path.isfile(log_file_loc)
@conf_vars({('core', 'load_examples'): 'False'})
def test_parse_once(self):
clear_db_serialized_dags()
clear_db_dags()
test_dag_path = TEST_DAG_FOLDER / 'test_scheduler_dags.py'
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
processor_agent = DagFileProcessorAgent(test_dag_path, 1, timedelta.max, [], False, async_mode)
processor_agent.start()
if not async_mode:
processor_agent.run_single_parsing_loop()
while not processor_agent.done:
if not async_mode:
processor_agent.wait_until_finished()
processor_agent.heartbeat()
assert processor_agent.all_files_processed
assert processor_agent.done
with create_session() as session:
dag_ids = session.query(DagModel.dag_id).order_by("dag_id").all()
assert dag_ids == [('test_start_date_scheduling',), ('test_task_start_date_scheduling',)]
dag_ids = session.query(SerializedDagModel.dag_id).order_by("dag_id").all()
assert dag_ids == [('test_start_date_scheduling',), ('test_task_start_date_scheduling',)]
def test_launch_process(self):
test_dag_path = TEST_DAG_FOLDER / 'test_scheduler_dags.py'
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
log_file_loc = conf.get('logging', 'DAG_PROCESSOR_MANAGER_LOG_LOCATION')
try:
os.remove(log_file_loc)
except OSError:
pass
# Starting dag processing with 0 max_runs to avoid redundant operations.
processor_agent = DagFileProcessorAgent(test_dag_path, 0, timedelta.max, [], False, async_mode)
processor_agent.start()
if not async_mode:
processor_agent.run_single_parsing_loop()
processor_agent._process.join()
assert os.path.isfile(log_file_loc)
| apache/incubator-airflow | tests/dag_processing/test_manager.py | Python | apache-2.0 | 35,343 |
from Tkinter import *
import ttk
def calculate(*args):
try:
value = float(feet.get())
meters.set((0.3048 * value * 10000.0 + 0.5)/10000.0)
except ValueError:
print "Error occured."
root = Tk()
root.title("Feet to Meters")
mainframe = ttk.Frame(root, padding="3 3 12 12")
mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
mainframe.columnconfigure(0, weight=1)
mainframe.rowconfigure(0, weight=1)
feet = StringVar()
meters = StringVar()
feet_entry = ttk.Entry(mainframe, width=7, textvariable=feet)
feet_entry.grid(column=2, row=1, sticky=(W, E))
ttk.Label(mainframe, textvariable=meters).grid(column=2, row=2, sticky=(W, E))
ttk.Button(mainframe, text="Calculate", command=calculate).grid(column=3, row=3, sticky=W)
ttk.Label(mainframe, text="feet").grid(column=3, row=1, sticky=W)
ttk.Label(mainframe, text="is equivalent to").grid(column=1, row=2, sticky=E)
ttk.Label(mainframe, text="meters").grid(column=3, row=2, sticky=W)
for child in mainframe.winfo_children(): child.grid_configure(padx=5, pady=5)
feet_entry.focus()
root.bind('<Return>', calculate)
root.mainloop()
| erickmusembi/Robot-Project | Robot Project/tests/foot to meters.py | Python | mit | 1,123 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (C) 2009-2010 Nicolas P. Rougier
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
# -----------------------------------------------------------------------------
''' Various shaders for texture processing.
Shaders are used to program the graphics processing unit (GPU) programmable
rendering pipeline, which has mostly superseded the fixed-function pipeline
that allowed only common geometry transformation and pixel shading
functions; with shaders, customized effects can be used.
Vertex shaders are run once for each vertex given to the graphics
processor. The purpose is to transform each vertex's 3D position in virtual
space to the 2D coordinate at which it appears on the screen (as well as a
depth value for the Z-buffer). Vertex shaders can manipulate properties such
as position, color, and texture coordinate, but cannot create new
vertices. The output of the vertex shader goes to the next stage in the
pipeline, which is either a geometry shader if present or the rasterizer
otherwise.
Geometry shaders can add and remove vertices from a mesh. Geometry shaders
can be used to generate geometry procedurally or to add volumetric detail to
existing meshes that would be too costly to process on the CPU. If geometry
shaders are being used, the output is then sent to the rasterizer.
Pixel shaders, also known as fragment shaders, calculate the color of
individual pixels. The input to this stage comes from the rasterizer, which
fills in the polygons being sent through the graphics pipeline. Pixel
shaders are typically used for scene lighting and related effects such as
bump mapping and color toning. (Direct3D uses the term "pixel shader,"
while OpenGL uses the term "fragment shader." The latter is arguably more
correct, as there is not a one-to-one relationship between calls to the
pixel shader and pixels on the screen. The most common reason for this is
that pixel shaders are often called many times per pixel for every object
that is in the corresponding space, even if it is occluded; the Z-buffer
sorts this out later.)
'''
from shader import Shader
from nearest import Nearest
from bilinear import Bilinear
from bicubic import Bicubic
| davidcox/glumpy | glumpy/shader/__init__.py | Python | bsd-3-clause | 2,497 |
import os
import re
import requests
import time
import urllib
from bs4 import BeautifulSoup
from selenium import webdriver
class Taolvlang(object):
def __init__(self,driver,homePage,outputDir):
self.driver = driver
self.homePage = homePage
self.outputDir = outputDir
def get_detail_imgs(self,detail_url,dir_name):
num = 0 #计数器,用于统计页面上的图片,作为图片名字
self.driver.get(detail_url) #访问个人主页
js="var q=document.documentElement.scrollTop=10000"
self.driver.execute_script(js) #执行JS脚本,这个脚本主要是滚动页面到最下面,
#因为有些网页是动态加载的,用户滑动到哪里加载到哪里
bs = BeautifulSoup(driver.page_source,"html5lib") #使用BeautifulSoup解析网页源码,使用的是html5lib,如果不安装这个库,会报错
allImage = bs.findAll("img",{"src":re.compile("^\/\/img\.alicdn\.com\/imgextra\/.*.jpg$")}) #使用正则表达式匹配所有图片
for image in allImage:
img_url = image["src"] #获取图片的src
if not img_url.startswith("http:"):
img_url = "http:"+img_url #给图片地址加上http:
num = num +1 #计数器+1
r = requests.get(img_url) #使用requests获取图片
if not os.path.exists("%s/%d.jpg"%(dir_name,num)): #判断是否已经存在这个文件了
with open("%s/%d.jpg"%(dir_name,num),"wb") as pic:
pic.write(r.content) #不存在的话就保存到文件中
def get_all_data(self):
self.driver.get(homePage) #访问主页
js="var q=document.documentElement.scrollTop=10000"
self.driver.execute_script(js)
time.sleep(3) #等待网页加载完成
self.driver.get_screenshot_as_file("1.jpg") #保存网页截图
bs = BeautifulSoup(self.driver.page_source,"html5lib") #使用BeautifulSoup解析网页源码,使用的是html5lib,如果不安装这个库,会报错
allItem = bs.findAll(class_="item") #找到所有的项,是class 为item的
for item in allItem:
detail_url = item.find(class_="item-link")["href"] #获取个人主页连接
header_img_url = item.find("img")["src"] #获取封面图片链接
dir_name = outputDir+"%s_%s"%(item.find(class_="name").get_text(),item.find(class_="city").get_text()) #获取名字和城市名组成文件夹名字
if not os.path.exists(dir_name): #如果文件夹不存在新建
os.makedirs(dir_name)
if not detail_url.startswith("http:"):
detail_url = "http:"+detail_url
if not header_img_url.startswith("http:"):
header_img_url = "http:"+header_img_url
print("detail_url=%s"%detail_url)
print("header_img_url=%s"%header_img_url)
#将头像存入目录
if not os.path.exists(outputDir+"%s/0.jpg"%dir_name):
urllib.request.urlretrieve(header_img_url,outputDir+"%s/0.jpg"%dir_name)
#获取详细帖子中的照片
self.get_detail_imgs(detail_url,dir_name)
#本地浏览器路径
browserPath = "phantomjs.exe"
#主页路径
homePage = 'https://mm.taobao.com/search_tstar_model.htm?'
#输出目录
outputDir = "/photos/"
driver = webdriver.PhantomJS(executable_path = browserPath)
#实例化类,执行获取数据
taoObj = Taolvlang(driver,homePage,outputDir)
taoObj.get_all_data() | Mr-meet/PythonApplets | spiders_packege/taobao_girl/temp.py | Python | mit | 3,584 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# coding: utf-8
from setuptools import setup, find_packages
NAME = "auto_rest_report_service_for_azure"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["msrest>=0.0.1", "msrestazure>=0.0.1"]
setup(
name=NAME,
version=VERSION,
description="AutoRestReportServiceForAzure",
author_email="",
url="",
keywords=["Swagger", "AutoRestReportServiceForAzure"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
Test Infrastructure for AutoRest
"""
)
| vulcansteel/autorest | AutoRest/Generators/Python/Azure.Python.Tests/Expected/AcceptanceTests/AzureReport/setup.py | Python | mit | 1,142 |
#!/bin/python
# Single pass with lookahead solution.
#
# The worst-case scenario runs in quadratic time
# O(sum(i^2, i=0..|input|-1))-time,
# which is equivalent to O(|input|^2)-time, whereas the best case obviously
# runs in O(|input|)-time. Moreover, it stands in linear-space complexity,
# O(|input|)-space, across all scenarios.
#
def characterReverse(input):
rev = list(input)
for i in range(0, len(rev) - 1):
if rev[i] == 't':
j = i
while j < len(rev) - 1 and rev[j] == 't' and ((ord(rev[j]) >= 'A' and ord(rev[j]) <= 'Z') or (ord(rev[j]) >= 'a' and ord(rev[j]) <= 'z')):
j += 1
j += 1
if rev[j] == 'h':
rev[j], rev[i] = rev[i], rev[j]
return "".join(rev)
_input = raw_input()
res = characterReverse(_input);
print res
| cassiopagnoncelli/hacker-rank-solutions | reverse.py | Python | mit | 780 |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 19 10:57:30 2016
@author: jmaunon
"""
# 10 minutes to pandas
#==============================================================================
#%% Libraries
#==============================================================================
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#==============================================================================
#%% Object creation
#==============================================================================
## Creating series passing a list of values
s = pd.Series([1, 3, 5, np.nan, 6, 8])
## Creating a DataFrame by passing a numpy.array with a datetime index and labeled columns
dates = pd.date_range("20130101", periods = 6)
df = pd.DataFrame(np.random.randn(6,4), index = dates, columns = list("ABCD"))
## Creating a Dataframe by passing a dict of objects
dict = {
"A" : 1,
"B" : pd.Timestamp("20130102"),
"C" : pd.Series(1, index = list(range(4)), dtype = "float32"),
"D" : np.array([3]*4, dtype="int32"),
"E" : pd.Categorical(["test", "train", "test", "train"]),
"F" : "foo"
}
df2 = pd.DataFrame(dict)
## Notice the types of the variables
df2.dtypes
#============================================================================= | juanmixp/Pandas | 10_min_tutorial/10_min_pandas.py | Python | gpl-3.0 | 1,338 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that shows how to use GoogleAdsToGcsOperator.
"""
import os
from datetime import datetime
from airflow import models
from airflow.providers.google.ads.operators.ads import GoogleAdsListAccountsOperator
from airflow.providers.google.ads.transfers.ads_to_gcs import GoogleAdsToGcsOperator
# [START howto_google_ads_env_variables]
CLIENT_IDS = ["1111111111", "2222222222"]
BUCKET = os.environ.get("GOOGLE_ADS_BUCKET", "gs://INVALID BUCKET NAME")
GCS_OBJ_PATH = "folder_name/google-ads-api-results.csv"
GCS_ACCOUNTS_CSV = "folder_name/accounts.csv"
QUERY = """
SELECT
segments.date,
customer.id,
campaign.id,
ad_group.id,
ad_group_ad.ad.id,
metrics.impressions,
metrics.clicks,
metrics.conversions,
metrics.all_conversions,
metrics.cost_micros
FROM
ad_group_ad
WHERE
segments.date >= '2020-02-01'
AND segments.date <= '2020-02-29'
"""
FIELDS_TO_EXTRACT = [
"segments.date.value",
"customer.id.value",
"campaign.id.value",
"ad_group.id.value",
"ad_group_ad.ad.id.value",
"metrics.impressions.value",
"metrics.clicks.value",
"metrics.conversions.value",
"metrics.all_conversions.value",
"metrics.cost_micros.value",
]
# [END howto_google_ads_env_variables]
with models.DAG(
"example_google_ads",
schedule_interval=None, # Override to match your needs
start_date=datetime(2021, 1, 1),
catchup=False,
) as dag:
# [START howto_google_ads_to_gcs_operator]
run_operator = GoogleAdsToGcsOperator(
client_ids=CLIENT_IDS,
query=QUERY,
attributes=FIELDS_TO_EXTRACT,
obj=GCS_OBJ_PATH,
bucket=BUCKET,
task_id="run_operator",
)
# [END howto_google_ads_to_gcs_operator]
# [START howto_ads_list_accounts_operator]
list_accounts = GoogleAdsListAccountsOperator(
task_id="list_accounts", bucket=BUCKET, object_name=GCS_ACCOUNTS_CSV
)
# [END howto_ads_list_accounts_operator]
| Acehaidrey/incubator-airflow | airflow/providers/google/ads/example_dags/example_ads.py | Python | apache-2.0 | 2,835 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Implementation of Neural Net (NN) functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import candidate_sampling_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.deprecation import deprecated_argument_lookup
def log_poisson_loss(targets, log_input, compute_full_loss=False, name=None):
"""Computes log Poisson loss given `log_input`.
Gives the log-likelihood loss between the prediction and the target under the
assumption that the target has a Poisson distribution.
Caveat: By default, this is not the exact loss, but the loss minus a
constant term [log(z!)]. That has no effect for optimization, but
does not play well with relative loss comparisons. To compute an
approximation of the log factorial term, specify
compute_full_loss=True to enable Stirling's Approximation.
For brevity, let `c = log(x) = log_input`, `z = targets`. The log Poisson
loss is
-log(exp(-x) * (x^z) / z!)
= -log(exp(-x) * (x^z)) + log(z!)
~ -log(exp(-x)) - log(x^z) [+ z * log(z) - z + 0.5 * log(2 * pi * z)]
[ Note the second term is the Stirling's Approximation for log(z!).
It is invariant to x and does not affect optimization, though
important for correct relative loss comparisons. It is only
computed when compute_full_loss == True. ]
= x - z * log(x) [+ z * log(z) - z + 0.5 * log(2 * pi * z)]
= exp(c) - z * c [+ z * log(z) - z + 0.5 * log(2 * pi * z)]
Args:
targets: A `Tensor` of the same type and shape as `log_input`.
log_input: A `Tensor` of type `float32` or `float64`.
compute_full_loss: whether to compute the full loss. If false, a constant
term is dropped in favor of more efficient optimization.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `log_input` with the componentwise
logistic losses.
Raises:
ValueError: If `log_input` and `targets` do not have the same shape.
"""
with ops.name_scope(name, "log_poisson_loss", [log_input, targets]) as name:
log_input = ops.convert_to_tensor(log_input, name="log_input")
targets = ops.convert_to_tensor(targets, name="targets")
try:
targets.get_shape().merge_with(log_input.get_shape())
except ValueError:
raise ValueError(
"log_input and targets must have the same shape (%s vs %s)" %
(log_input.get_shape(), targets.get_shape()))
result = math_ops.exp(log_input) - log_input * targets
if compute_full_loss:
# need to create constant tensors here so that their dtypes can be matched
# to that of the targets.
point_five = constant_op.constant(0.5, dtype=targets.dtype)
two_pi = constant_op.constant(2 * math.pi, dtype=targets.dtype)
stirling_approx = (targets * math_ops.log(targets)) - targets + (
point_five * math_ops.log(two_pi * targets))
zeros = array_ops.zeros_like(targets, dtype=targets.dtype)
ones = array_ops.ones_like(targets, dtype=targets.dtype)
cond = math_ops.logical_and(targets >= zeros, targets <= ones)
result += array_ops.where(cond, zeros, stirling_approx)
return result
def sigmoid_cross_entropy_with_logits( # pylint: disable=invalid-name
_sentinel=None,
labels=None,
logits=None,
name=None):
"""Computes sigmoid cross entropy given `logits`.
Measures the probability error in discrete classification tasks in which each
class is independent and not mutually exclusive. For instance, one could
perform multilabel classification where a picture can contain both an elephant
and a dog at the same time.
For brevity, let `x = logits`, `z = labels`. The logistic loss is
z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
= z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
= z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
= z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
= (1 - z) * x + log(1 + exp(-x))
= x - x * z + log(1 + exp(-x))
For x < 0, to avoid overflow in exp(-x), we reformulate the above
x - x * z + log(1 + exp(-x))
= log(exp(x)) - x * z + log(1 + exp(-x))
= - x * z + log(1 + exp(x))
Hence, to ensure stability and avoid overflow, the implementation uses this
equivalent formulation
max(x, 0) - x * z + log(1 + exp(-abs(x)))
`logits` and `labels` must have the same type and shape.
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: A `Tensor` of the same type and shape as `logits`.
logits: A `Tensor` of type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
logistic losses.
Raises:
ValueError: If `logits` and `labels` do not have the same shape.
"""
# pylint: disable=protected-access
nn_ops._ensure_xent_args("sigmoid_cross_entropy_with_logits", _sentinel,
labels, logits)
# pylint: enable=protected-access
with ops.name_scope(name, "logistic_loss", [logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
try:
labels.get_shape().merge_with(logits.get_shape())
except ValueError:
raise ValueError("logits and labels must have the same shape (%s vs %s)" %
(logits.get_shape(), labels.get_shape()))
# The logistic loss formula from above is
# x - x * z + log(1 + exp(-x))
# For x < 0, a more numerically stable formula is
# -x * z + log(1 + exp(x))
# Note that these two expressions can be combined into the following:
# max(x, 0) - x * z + log(1 + exp(-abs(x)))
# To allow computing gradients at zero, we define custom versions of max and
# abs functions.
zeros = array_ops.zeros_like(logits, dtype=logits.dtype)
cond = (logits >= zeros)
relu_logits = array_ops.where(cond, logits, zeros)
neg_abs_logits = array_ops.where(cond, -logits, logits)
return math_ops.add(
relu_logits - logits * labels,
math_ops.log1p(math_ops.exp(neg_abs_logits)),
name=name)
def weighted_cross_entropy_with_logits(targets, logits, pos_weight, name=None):
"""Computes a weighted cross entropy.
This is like `sigmoid_cross_entropy_with_logits()` except that `pos_weight`,
allows one to trade off recall and precision by up- or down-weighting the
cost of a positive error relative to a negative error.
The usual cross-entropy cost is defined as:
targets * -log(sigmoid(logits)) +
(1 - targets) * -log(1 - sigmoid(logits))
The argument `pos_weight` is used as a multiplier for the positive targets:
targets * -log(sigmoid(logits)) * pos_weight +
(1 - targets) * -log(1 - sigmoid(logits))
For brevity, let `x = logits`, `z = targets`, `q = pos_weight`.
The loss is:
qz * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
= qz * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
= qz * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
= qz * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
= (1 - z) * x + (qz + 1 - z) * log(1 + exp(-x))
= (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x))
Setting `l = (1 + (q - 1) * z)`, to ensure stability and avoid overflow,
the implementation uses
(1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0))
`logits` and `targets` must have the same type and shape.
Args:
targets: A `Tensor` of the same type and shape as `logits`.
logits: A `Tensor` of type `float32` or `float64`.
pos_weight: A coefficient to use on the positive examples.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
weighted logistic losses.
Raises:
ValueError: If `logits` and `targets` do not have the same shape.
"""
with ops.name_scope(name, "logistic_loss", [logits, targets]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
targets = ops.convert_to_tensor(targets, name="targets")
try:
targets.get_shape().merge_with(logits.get_shape())
except ValueError:
raise ValueError(
"logits and targets must have the same shape (%s vs %s)" %
(logits.get_shape(), targets.get_shape()))
# The logistic loss formula from above is
# (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x))
# For x < 0, a more numerically stable formula is
# (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(x)) - l * x
# To avoid branching, we use the combined version
# (1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0))
log_weight = 1 + (pos_weight - 1) * targets
return math_ops.add(
(1 - targets) * logits,
log_weight * (math_ops.log1p(math_ops.exp(-math_ops.abs(logits))) +
nn_ops.relu(-logits)),
name=name)
def relu_layer(x, weights, biases, name=None):
"""Computes Relu(x * weight + biases).
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"nn_relu_layer" is used.
Returns:
A 2-D Tensor computing relu(matmul(x, weights) + biases).
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "relu_layer", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
xw_plus_b = nn_ops.bias_add(math_ops.matmul(x, weights), biases)
return nn_ops.relu(xw_plus_b, name=name)
def _swish_shape(op):
"""Shape helper function for swish and _swish_grad function below."""
return [op.inputs[0].shape]
@function.Defun(shape_func=_swish_shape, func_name="swish_grad", noinline=True)
def _swish_grad(features, grad):
"""Gradient of Swish function defined below."""
sigmoid_features = math_ops.sigmoid(features)
activation_grad = (
sigmoid_features * (1.0 + features * (1.0 - sigmoid_features)))
return grad * activation_grad
# Naively, x * tf.nn.sigmoid(x) requires keeping both x and sigmoid(x) around
# for backprop, effectively doubling the tensor's memory consumption. We use a
# @Defun decorator with noinline=True so that sigmoid(features) is re-computed
# during backprop, and we can free the sigmoid(features) expression immediately
# after use during the forward pass.
@function.Defun(
grad_func=_swish_grad,
shape_func=_swish_shape,
func_name="swish",
noinline=True)
def swish(features):
# pylint: disable=g-doc-args
"""Computes the Swish activation function: `x * sigmoid(x)`.
Source: "Searching for Activation Functions" (Ramachandran et al. 2017)
https://arxiv.org/abs/1710.05941
Args:
features: A `Tensor` representing preactivation values.
name: A name for the operation (optional).
Returns:
The activation value.
"""
# pylint: enable=g-doc-args
features = ops.convert_to_tensor(features, name="features")
return features * math_ops.sigmoid(features)
@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def l2_normalize(x, axis=None, epsilon=1e-12, name=None, dim=None):
"""Normalizes along dimension `axis` using an L2 norm.
For a 1-D tensor with `axis = 0`, computes
output = x / sqrt(max(sum(x**2), epsilon))
For `x` with more dimensions, independently normalizes each 1-D slice along
dimension `axis`.
Args:
x: A `Tensor`.
axis: Dimension along which to normalize. A scalar or a vector of
integers.
epsilon: A lower bound value for the norm. Will use `sqrt(epsilon)` as the
divisor if `norm < sqrt(epsilon)`.
name: A name for this operation (optional).
dim: Deprecated alias for axis.
Returns:
A `Tensor` with the same shape as `x`.
"""
with ops.name_scope(name, "l2_normalize", [x]) as name:
axis = deprecated_argument_lookup("axis", axis, "dim", dim)
x = ops.convert_to_tensor(x, name="x")
square_sum = math_ops.reduce_sum(math_ops.square(x), axis, keepdims=True)
x_inv_norm = math_ops.rsqrt(math_ops.maximum(square_sum, epsilon))
return math_ops.multiply(x, x_inv_norm, name=name)
def zero_fraction(value, name=None):
"""Returns the fraction of zeros in `value`.
If `value` is empty, the result is `nan`.
This is useful in summaries to measure and report sparsity. For example,
```python
z = tf.nn.relu(...)
summ = tf.summary.scalar('sparsity', tf.nn.zero_fraction(z))
```
Args:
value: A tensor of numeric type.
name: A name for the operation (optional).
Returns:
The fraction of zeros in `value`, with type `float32`.
"""
with ops.name_scope(name, "zero_fraction", [value]):
value = ops.convert_to_tensor(value, name="value")
zero = constant_op.constant(0, dtype=value.dtype, name="zero")
return math_ops.reduce_mean(
math_ops.cast(math_ops.equal(value, zero), dtypes.float32))
# pylint: disable=redefined-builtin
def depthwise_conv2d(input,
filter,
strides,
padding,
rate=None,
name=None,
data_format=None):
"""Depthwise 2-D convolution.
Given a 4D input tensor ('NHWC' or 'NCHW' data formats)
and a filter tensor of shape
`[filter_height, filter_width, in_channels, channel_multiplier]`
containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d`
applies a different filter to each input channel (expanding from 1 channel
to `channel_multiplier` channels for each), then concatenates the results
together. The output has `in_channels * channel_multiplier` channels.
In detail,
output[b, i, j, k * channel_multiplier + q] = sum_{di, dj}
filter[di, dj, k, q] * input[b, strides[1] * i + rate[0] * di,
strides[2] * j + rate[1] * dj, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the
same horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
If any value in `rate` is greater than 1, we perform atrous depthwise
convolution, in which case all values in the `strides` tensor must be equal
to 1.
Args:
input: 4-D with shape according to `data_format`.
filter: 4-D with shape
`[filter_height, filter_width, in_channels, channel_multiplier]`.
strides: 1-D of size 4. The stride of the sliding window for each
dimension of `input`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the @{tf.nn.convolution$comment here}
rate: 1-D of size 2. The dilation rate in which we sample input values
across the `height` and `width` dimensions in atrous convolution. If it is
greater than 1, then all values of strides must be 1.
name: A name for this operation (optional).
data_format: The data format for input. Either "NHWC" (default) or "NCHW".
Returns:
A 4-D `Tensor` with shape according to `data_format`. E.g., for
"NHWC" format, shape is
`[batch, out_height, out_width, in_channels * channel_multiplier].`
"""
with ops.name_scope(name, "depthwise", [input, filter]) as name:
input = ops.convert_to_tensor(input, name="tensor_in")
filter = ops.convert_to_tensor(filter, name="filter_in")
if rate is None:
rate = [1, 1]
def op(input_converted, _, padding):
return nn_ops.depthwise_conv2d_native(
input=input_converted,
filter=filter,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return nn_ops.with_space_to_batch(
input=input,
filter_shape=array_ops.shape(filter),
dilation_rate=rate,
padding=padding,
data_format=data_format,
op=op)
# pylint: enable=redefined-builtin
# pylint: disable=redefined-builtin,line-too-long
def separable_conv2d(input,
depthwise_filter,
pointwise_filter,
strides,
padding,
rate=None,
name=None,
data_format=None):
"""2-D convolution with separable filters.
Performs a depthwise convolution that acts separately on channels followed by
a pointwise convolution that mixes channels. Note that this is separability
between dimensions `[1, 2]` and `3`, not spatial separability between
dimensions `1` and `2`.
In detail,
output[b, i, j, k] = sum_{di, dj, q, r}
input[b, strides[1] * i + di, strides[2] * j + dj, q] *
depthwise_filter[di, dj, q, r] *
pointwise_filter[0, 0, q * channel_multiplier + r, k]
`strides` controls the strides for the depthwise convolution only, since
the pointwise convolution has implicit strides of `[1, 1, 1, 1]`. Must have
`strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
If any value in `rate` is greater than 1, we perform atrous depthwise
convolution, in which case all values in the `strides` tensor must be equal
to 1.
Args:
input: 4-D `Tensor` with shape according to `data_format`.
depthwise_filter: 4-D `Tensor` with shape
`[filter_height, filter_width, in_channels, channel_multiplier]`.
Contains `in_channels` convolutional filters of depth 1.
pointwise_filter: 4-D `Tensor` with shape
`[1, 1, channel_multiplier * in_channels, out_channels]`. Pointwise
filter to mix channels after `depthwise_filter` has convolved spatially.
strides: 1-D of size 4. The strides for the depthwise convolution for
each dimension of `input`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the @{tf.nn.convolution$comment here}
rate: 1-D of size 2. The dilation rate in which we sample input values
across the `height` and `width` dimensions in atrous convolution. If it is
greater than 1, then all values of strides must be 1.
name: A name for this operation (optional).
data_format: The data format for input. Either "NHWC" (default) or "NCHW".
Returns:
A 4-D `Tensor` with shape according to 'data_format'. For
example, with data_format="NHWC", shape is [batch, out_height,
out_width, out_channels].
"""
with ops.name_scope(name, "separable_conv2d",
[input, depthwise_filter, pointwise_filter]) as name:
input = ops.convert_to_tensor(input, name="tensor_in")
depthwise_filter = ops.convert_to_tensor(
depthwise_filter, name="depthwise_filter")
pointwise_filter = ops.convert_to_tensor(
pointwise_filter, name="pointwise_filter")
pointwise_filter_shape = pointwise_filter.get_shape().with_rank(4)
pointwise_filter_shape[0].assert_is_compatible_with(1)
pointwise_filter_shape[1].assert_is_compatible_with(1)
if rate is None:
rate = [1, 1]
# The layout of the ops in the graph are expected to be as follows:
# depthwise_conv2d // Conv2D op corresponding to native deptwise conv.
# separable_conv2d // Conv2D op corresponding to the pointwise conv.
def op(input_converted, _, padding):
return nn_ops.depthwise_conv2d_native(
input=input_converted,
filter=depthwise_filter,
strides=strides,
padding=padding,
data_format=data_format,
name="depthwise")
depthwise = nn_ops.with_space_to_batch(
input=input,
filter_shape=array_ops.shape(depthwise_filter),
dilation_rate=rate,
padding=padding,
data_format=data_format,
op=op)
return nn_ops.conv2d(
depthwise,
pointwise_filter, [1, 1, 1, 1],
padding="VALID",
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin,line-too-long
def sufficient_statistics(x, axes, shift=None, keep_dims=False, name=None):
"""Calculate the sufficient statistics for the mean and variance of `x`.
These sufficient statistics are computed using the one pass algorithm on
an input that's optionally shifted. See:
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and variance.
shift: A `Tensor` containing the value by which to shift the data for
numerical stability, or `None` if no shift is to be performed. A shift
close to the true mean provides the most numerically stable results.
keep_dims: produce statistics with the same dimensionality as the input.
name: Name used to scope the operations that compute the sufficient stats.
Returns:
Four `Tensor` objects of the same type as `x`:
* the count (number of elements to average over).
* the (possibly shifted) sum of the elements in the array.
* the (possibly shifted) sum of squares of the elements in the array.
* the shift by which the mean must be corrected or None if `shift` is None.
"""
axes = list(set(axes))
with ops.name_scope(name, "sufficient_statistics", [x, shift]):
x = ops.convert_to_tensor(x, name="x")
x_shape = x.get_shape()
if all(x_shape[d].value is not None for d in axes):
counts = 1
for d in axes:
counts *= x_shape[d].value
counts = constant_op.constant(counts, dtype=x.dtype)
else: # shape needs to be inferred at runtime.
x_dims = array_ops.gather(
math_ops.cast(array_ops.shape(x), x.dtype), axes)
counts = math_ops.reduce_prod(x_dims, name="count")
if shift is not None:
shift = ops.convert_to_tensor(shift, name="shift")
m_ss = math_ops.subtract(x, shift)
v_ss = math_ops.squared_difference(x, shift)
else: # no shift.
m_ss = x
v_ss = math_ops.square(x)
m_ss = math_ops.reduce_sum(m_ss, axes, keepdims=keep_dims, name="mean_ss")
v_ss = math_ops.reduce_sum(v_ss, axes, keepdims=keep_dims, name="var_ss")
return counts, m_ss, v_ss, shift
def normalize_moments(counts, mean_ss, variance_ss, shift, name=None):
"""Calculate the mean and variance of based on the sufficient statistics.
Args:
counts: A `Tensor` containing a the total count of the data (one value).
mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly
shifted) sum of the elements to average over.
variance_ss: A `Tensor` containing the variance sufficient statistics: the
(possibly shifted) squared sum of the data to compute the variance over.
shift: A `Tensor` containing the value by which the data is shifted for
numerical stability, or `None` if no shift was performed.
name: Name used to scope the operations that compute the moments.
Returns:
Two `Tensor` objects: `mean` and `variance`.
"""
with ops.name_scope(name, "normalize", [counts, mean_ss, variance_ss, shift]):
divisor = math_ops.reciprocal(counts, name="divisor")
if shift is not None:
shifted_mean = math_ops.multiply(mean_ss, divisor, name="shifted_mean")
mean = math_ops.add(shifted_mean, shift, name="mean")
else: # no shift.
shifted_mean = math_ops.multiply(mean_ss, divisor, name="mean")
mean = shifted_mean
variance = math_ops.subtract(
math_ops.multiply(variance_ss, divisor),
math_ops.square(shifted_mean),
name="variance")
return (mean, variance)
def moments(x, axes,
shift=None, # pylint: disable=unused-argument
name=None, keep_dims=False):
"""Calculate the mean and variance of `x`.
The mean and variance are calculated by aggregating the contents of `x`
across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean
and variance of a vector.
Note: shift is currently not used; the true mean is computed and used.
When using these moments for batch normalization (see
`tf.nn.batch_normalization`):
* for so-called "global normalization", used with convolutional filters with
shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`.
* for simple batch normalization pass `axes=[0]` (batch only).
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and
variance.
shift: Not used in the current implementation
name: Name used to scope the operations that compute the moments.
keep_dims: produce moments with the same dimensionality as the input.
Returns:
Two `Tensor` objects: `mean` and `variance`.
"""
with ops.name_scope(name, "moments", [x, axes]):
# The dynamic range of fp16 is too limited to support the collection of
# sufficient statistics. As a workaround we simply perform the operations
# on 32-bit floats before converting the mean and variance back to fp16
y = math_ops.cast(x, dtypes.float32) if x.dtype == dtypes.float16 else x
# Compute true mean while keeping the dims for proper broadcasting.
mean = math_ops.reduce_mean(y, axes, keepdims=True, name="mean")
# sample variance, not unbiased variance
variance = math_ops.reduce_mean(
math_ops.squared_difference(y, array_ops.stop_gradient(mean)),
axes,
keepdims=True,
name="variance")
if not keep_dims:
mean = array_ops.squeeze(mean, axes)
variance = array_ops.squeeze(variance, axes)
if x.dtype == dtypes.float16:
return (math_ops.cast(mean, dtypes.float16), math_ops.cast(
variance, dtypes.float16))
else:
return (mean, variance)
def weighted_moments(x, axes, frequency_weights, name=None, keep_dims=False):
"""Returns the frequency-weighted mean and variance of `x`.
Args:
x: A tensor.
axes: 1-d tensor of int32 values; these are the axes along which
to compute mean and variance.
frequency_weights: A tensor of positive weights which can be
broadcast with x.
name: Name used to scope the operation.
keep_dims: Produce moments with the same dimensionality as the input.
Returns:
Two tensors: `weighted_mean` and `weighted_variance`.
"""
with ops.name_scope(name, "weighted_moments", [x, frequency_weights, axes]):
x = ops.convert_to_tensor(x, name="x")
frequency_weights = ops.convert_to_tensor(
frequency_weights, name="frequency_weights")
# Unlike moments(), this just uses a simpler two-pass method.
# See comment in moments() WRT precision; it applies here too.
needs_cast = x.dtype == dtypes.float16
if needs_cast:
x = math_ops.cast(x, dtypes.float32)
if frequency_weights.dtype != x.dtype:
frequency_weights = math_ops.cast(frequency_weights, x.dtype)
# Note that we use keep_dims=True for our reductions regardless of the arg;
# this is so that the results remain broadcast-compatible with the inputs.
weighted_input_sum = math_ops.reduce_sum(
frequency_weights * x, axes, name="weighted_input_sum", keepdims=True)
# The shape of the weights isn't necessarily the same as x's
# shape, just broadcast-compatible with it -- so this expression
# performs broadcasting to give a per-item weight, with the same
# shape as (freqency_weights * x). This avoids having to reason
# through all the broadcast logic to compute a correct
# sum_of_weights.
broadcasted_weights = frequency_weights + array_ops.zeros_like(x)
sum_of_weights = math_ops.reduce_sum(
broadcasted_weights, axes, name="sum_of_weights", keepdims=True)
divisor = math_ops.reciprocal(sum_of_weights, name="inv_weight_sum")
weighted_mean = math_ops.multiply(weighted_input_sum, divisor)
# Have the weighted mean; now on to variance:
weighted_distsq = math_ops.reduce_sum(
frequency_weights * math_ops.squared_difference(x, weighted_mean),
axes,
name="weighted_distsq",
keepdims=True)
weighted_variance = math_ops.multiply(weighted_distsq, divisor)
if not keep_dims:
weighted_mean = array_ops.squeeze(weighted_mean, squeeze_dims=axes)
weighted_variance = array_ops.squeeze(
weighted_variance, squeeze_dims=axes)
if needs_cast:
weighted_mean = math_ops.cast(weighted_mean, dtypes.float16)
weighted_variance = math_ops.cast(weighted_variance, dtypes.float16)
return weighted_mean, weighted_variance
def batch_normalization(x,
mean,
variance,
offset,
scale,
variance_epsilon,
name=None):
r"""Batch normalization.
As described in http://arxiv.org/abs/1502.03167.
Normalizes a tensor by `mean` and `variance`, and applies (optionally) a
`scale` \\(\gamma\\) to it, as well as an `offset` \\(\beta\\):
\\(\frac{\gamma(x-\mu)}{\sigma}+\beta\\)
`mean`, `variance`, `offset` and `scale` are all expected to be of one of two
shapes:
* In all generality, they can have the same number of dimensions as the
input `x`, with identical sizes as `x` for the dimensions that are not
normalized over (the 'depth' dimension(s)), and dimension 1 for the
others which are being normalized over.
`mean` and `variance` in this case would typically be the outputs of
`tf.nn.moments(..., keep_dims=True)` during training, or running averages
thereof during inference.
* In the common case where the 'depth' dimension is the last dimension in
the input tensor `x`, they may be one dimensional tensors of the same
size as the 'depth' dimension.
This is the case for example for the common `[batch, depth]` layout of
fully-connected layers, and `[batch, height, width, depth]` for
convolutions.
`mean` and `variance` in this case would typically be the outputs of
`tf.nn.moments(..., keep_dims=False)` during training, or running averages
thereof during inference.
Args:
x: Input `Tensor` of arbitrary dimensionality.
mean: A mean `Tensor`.
variance: A variance `Tensor`.
offset: An offset `Tensor`, often denoted \\(\beta\\) in equations, or
None. If present, will be added to the normalized tensor.
scale: A scale `Tensor`, often denoted \\(\gamma\\) in equations, or
`None`. If present, the scale is applied to the normalized tensor.
variance_epsilon: A small float number to avoid dividing by 0.
name: A name for this operation (optional).
Returns:
the normalized, scaled, offset tensor.
"""
with ops.name_scope(name, "batchnorm", [x, mean, variance, scale, offset]):
inv = math_ops.rsqrt(variance + variance_epsilon)
if scale is not None:
inv *= scale
return x * inv + (offset - mean * inv
if offset is not None else -mean * inv)
def fused_batch_norm(
x,
scale,
offset, # pylint: disable=invalid-name
mean=None,
variance=None,
epsilon=0.001,
data_format="NHWC",
is_training=True,
name=None):
r"""Batch normalization.
As described in http://arxiv.org/abs/1502.03167.
Args:
x: Input `Tensor` of 4 dimensions.
scale: A `Tensor` of 1 dimension for scaling.
offset: A `Tensor` of 1 dimension for bias.
mean: A `Tensor` of 1 dimension for population mean used for inference.
variance: A `Tensor` of 1 dimension for population variance
used for inference.
epsilon: A small float number added to the variance of x.
data_format: The data format for x. Either "NHWC" (default) or "NCHW".
is_training: A bool value to specify if the operation is used for
training or inference.
name: A name for this operation (optional).
Returns:
y: A 4D Tensor for the normalized, scaled, offsetted x.
batch_mean: A 1D Tensor for the mean of x.
batch_var: A 1D Tensor for the variance of x.
Raises:
ValueError: If mean or variance is not None when is_training is True.
"""
x = ops.convert_to_tensor(x, name="input")
scale = ops.convert_to_tensor(scale, name="scale")
offset = ops.convert_to_tensor(offset, name="offset")
if is_training:
if (mean is not None) or (variance is not None):
raise ValueError("Both 'mean' and 'variance' must be None "
"if is_training is True.")
if mean is None:
mean = constant_op.constant([])
if variance is None:
variance = constant_op.constant([])
# Set a minimum epsilon to 1.001e-5, which is a requirement by CUDNN to
# prevent exception (see cudnn.h).
min_epsilon = 1.001e-5
epsilon = epsilon if epsilon > min_epsilon else min_epsilon
# TODO(reedwm): In a few weeks, switch to using the V2 version exclusively. We
# currently only use the V2 version for float16 inputs, which is not supported
# by the V1 version.
# pylint: disable=protected-access
if x.dtype == dtypes.float16 or x.dtype == dtypes.bfloat16:
fused_batch_norm_func = gen_nn_ops._fused_batch_norm_v2
else:
fused_batch_norm_func = gen_nn_ops._fused_batch_norm
# pylint: enable=protected-access
y, batch_mean, batch_var, _, _ = fused_batch_norm_func(
x,
scale,
offset,
mean,
variance,
epsilon=epsilon,
data_format=data_format,
is_training=is_training,
name=name)
return y, batch_mean, batch_var
def batch_norm_with_global_normalization(t,
m,
v,
beta,
gamma,
variance_epsilon,
scale_after_normalization,
name=None):
"""Batch normalization.
This op is deprecated. See `tf.nn.batch_normalization`.
Args:
t: A 4D input Tensor.
m: A 1D mean Tensor with size matching the last dimension of t.
This is the first output from tf.nn.moments,
or a saved moving average thereof.
v: A 1D variance Tensor with size matching the last dimension of t.
This is the second output from tf.nn.moments,
or a saved moving average thereof.
beta: A 1D beta Tensor with size matching the last dimension of t.
An offset to be added to the normalized tensor.
gamma: A 1D gamma Tensor with size matching the last dimension of t.
If "scale_after_normalization" is true, this tensor will be multiplied
with the normalized tensor.
variance_epsilon: A small float number to avoid dividing by 0.
scale_after_normalization: A bool indicating whether the resulted tensor
needs to be multiplied with gamma.
name: A name for this operation (optional).
Returns:
A batch-normalized `t`.
"""
return batch_normalization(t, m, v, beta, gamma if scale_after_normalization
else None, variance_epsilon, name)
def _sum_rows(x):
"""Returns a vector summing up each row of the matrix x."""
# _sum_rows(x) is equivalent to math_ops.reduce_sum(x, 1) when x is
# a matrix. The gradient of _sum_rows(x) is more efficient than
# reduce_sum(x, 1)'s gradient in today's implementation. Therefore,
# we use _sum_rows(x) in the nce_loss() computation since the loss
# is mostly used for training.
cols = array_ops.shape(x)[1]
ones_shape = array_ops.stack([cols, 1])
ones = array_ops.ones(ones_shape, x.dtype)
return array_ops.reshape(math_ops.matmul(x, ones), [-1])
def _compute_sampled_logits(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
sampled_values=None,
subtract_log_q=True,
remove_accidental_hits=False,
partition_strategy="mod",
name=None):
"""Helper function for nce_loss and sampled_softmax_loss functions.
Computes sampled output training logits and labels suitable for implementing
e.g. noise-contrastive estimation (see nce_loss) or sampled softmax (see
sampled_softmax_loss).
Note: In the case where num_true > 1, we assign to each target class
the target probability 1 / num_true so that the target probabilities
sum to 1 per-example.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
`[num_classes, dim]`. The (possibly-partitioned) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The (possibly-partitioned)
class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes. Note that this format differs from
the `labels` argument of `nn.softmax_cross_entropy_with_logits`.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
subtract_log_q: A `bool`. whether to subtract the log expected count of
the labels in the sample to get the logits of the true labels.
Default is True. Turn off for Negative Sampling.
remove_accidental_hits: A `bool`. whether to remove "accidental hits"
where a sampled class equals one of the target classes. Default is
False.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
Returns:
out_logits: `Tensor` object with shape
`[batch_size, num_true + num_sampled]`, for passing to either
`nn.sigmoid_cross_entropy_with_logits` (NCE) or
`nn.softmax_cross_entropy_with_logits` (sampled softmax).
out_labels: A Tensor object with the same shape as `out_logits`.
"""
if isinstance(weights, variables.PartitionedVariable):
weights = list(weights)
if not isinstance(weights, list):
weights = [weights]
with ops.name_scope(name, "compute_sampled_logits",
weights + [biases, inputs, labels]):
if labels.dtype != dtypes.int64:
labels = math_ops.cast(labels, dtypes.int64)
labels_flat = array_ops.reshape(labels, [-1])
# Sample the negative labels.
# sampled shape: [num_sampled] tensor
# true_expected_count shape = [batch_size, 1] tensor
# sampled_expected_count shape = [num_sampled] tensor
if sampled_values is None:
sampled_values = candidate_sampling_ops.log_uniform_candidate_sampler(
true_classes=labels,
num_true=num_true,
num_sampled=num_sampled,
unique=True,
range_max=num_classes)
# NOTE: pylint cannot tell that 'sampled_values' is a sequence
# pylint: disable=unpacking-non-sequence
sampled, true_expected_count, sampled_expected_count = (
array_ops.stop_gradient(s) for s in sampled_values)
# pylint: enable=unpacking-non-sequence
sampled = math_ops.cast(sampled, dtypes.int64)
# labels_flat is a [batch_size * num_true] tensor
# sampled is a [num_sampled] int tensor
all_ids = array_ops.concat([labels_flat, sampled], 0)
# Retrieve the true weights and the logits of the sampled weights.
# weights shape is [num_classes, dim]
all_w = embedding_ops.embedding_lookup(
weights, all_ids, partition_strategy=partition_strategy)
# true_w shape is [batch_size * num_true, dim]
true_w = array_ops.slice(all_w, [0, 0],
array_ops.stack(
[array_ops.shape(labels_flat)[0], -1]))
sampled_w = array_ops.slice(
all_w, array_ops.stack([array_ops.shape(labels_flat)[0], 0]), [-1, -1])
# inputs has shape [batch_size, dim]
# sampled_w has shape [num_sampled, dim]
# Apply X*W', which yields [batch_size, num_sampled]
sampled_logits = math_ops.matmul(inputs, sampled_w, transpose_b=True)
# Retrieve the true and sampled biases, compute the true logits, and
# add the biases to the true and sampled logits.
all_b = embedding_ops.embedding_lookup(
biases, all_ids, partition_strategy=partition_strategy)
# true_b is a [batch_size * num_true] tensor
# sampled_b is a [num_sampled] float tensor
true_b = array_ops.slice(all_b, [0], array_ops.shape(labels_flat))
sampled_b = array_ops.slice(all_b, array_ops.shape(labels_flat), [-1])
# inputs shape is [batch_size, dim]
# true_w shape is [batch_size * num_true, dim]
# row_wise_dots is [batch_size, num_true, dim]
dim = array_ops.shape(true_w)[1:2]
new_true_w_shape = array_ops.concat([[-1, num_true], dim], 0)
row_wise_dots = math_ops.multiply(
array_ops.expand_dims(inputs, 1),
array_ops.reshape(true_w, new_true_w_shape))
# We want the row-wise dot plus biases which yields a
# [batch_size, num_true] tensor of true_logits.
dots_as_matrix = array_ops.reshape(row_wise_dots,
array_ops.concat([[-1], dim], 0))
true_logits = array_ops.reshape(_sum_rows(dots_as_matrix), [-1, num_true])
true_b = array_ops.reshape(true_b, [-1, num_true])
true_logits += true_b
sampled_logits += sampled_b
if remove_accidental_hits:
acc_hits = candidate_sampling_ops.compute_accidental_hits(
labels, sampled, num_true=num_true)
acc_indices, acc_ids, acc_weights = acc_hits
# This is how SparseToDense expects the indices.
acc_indices_2d = array_ops.reshape(acc_indices, [-1, 1])
acc_ids_2d_int32 = array_ops.reshape(
math_ops.cast(acc_ids, dtypes.int32), [-1, 1])
sparse_indices = array_ops.concat([acc_indices_2d, acc_ids_2d_int32], 1,
"sparse_indices")
# Create sampled_logits_shape = [batch_size, num_sampled]
sampled_logits_shape = array_ops.concat(
[array_ops.shape(labels)[:1],
array_ops.expand_dims(num_sampled, 0)], 0)
if sampled_logits.dtype != acc_weights.dtype:
acc_weights = math_ops.cast(acc_weights, sampled_logits.dtype)
sampled_logits += sparse_ops.sparse_to_dense(
sparse_indices,
sampled_logits_shape,
acc_weights,
default_value=0.0,
validate_indices=False)
if subtract_log_q:
# Subtract log of Q(l), prior probability that l appears in sampled.
true_logits -= math_ops.log(true_expected_count)
sampled_logits -= math_ops.log(sampled_expected_count)
# Construct output logits and labels. The true labels/logits start at col 0.
out_logits = array_ops.concat([true_logits, sampled_logits], 1)
# true_logits is a float tensor, ones_like(true_logits) is a float
# tensor of ones. We then divide by num_true to ensure the per-example
# labels sum to 1.0, i.e. form a proper probability distribution.
out_labels = array_ops.concat([
array_ops.ones_like(true_logits) / num_true,
array_ops.zeros_like(sampled_logits)
], 1)
return out_logits, out_labels
def nce_loss(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
sampled_values=None,
remove_accidental_hits=False,
partition_strategy="mod",
name="nce_loss"):
"""Computes and returns the noise-contrastive estimation training loss.
See [Noise-contrastive estimation: A new estimation principle for
unnormalized statistical
models](http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf).
Also see our [Candidate Sampling Algorithms
Reference](https://www.tensorflow.org/extras/candidate_sampling.pdf)
A common use case is to use this method for training, and calculate the full
sigmoid loss for evaluation or inference. In this case, you must set
`partition_strategy="div"` for the two losses to be consistent, as in the
following example:
```python
if mode == "train":
loss = tf.nn.nce_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
...,
partition_strategy="div")
elif mode == "eval":
logits = tf.matmul(inputs, tf.transpose(weights))
logits = tf.nn.bias_add(logits, biases)
labels_one_hot = tf.one_hot(labels, n_classes)
loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels_one_hot,
logits=logits)
loss = tf.reduce_sum(loss, axis=1)
```
Note: By default this uses a log-uniform (Zipfian) distribution for sampling,
so your labels must be sorted in order of decreasing frequency to achieve
good results. For more details, see
@{tf.nn.log_uniform_candidate_sampler}.
Note: In the case where `num_true` > 1, we assign to each target class
the target probability 1 / `num_true` so that the target probabilities
sum to 1 per-example.
Note: It would be useful to allow a variable number of target classes per
example. We hope to provide this functionality in a future release.
For now, if you have a variable number of target classes, you can pad them
out to a constant number by either repeating them or by padding
with an otherwise unused class.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
[num_classes, dim]. The (possibly-partitioned) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. Whether to remove "accidental hits"
where a sampled class equals one of the target classes. If set to
`True`, this is a "Sampled Logistic" loss instead of NCE, and we are
learning to generate log-odds instead of log probabilities. See
our [Candidate Sampling Algorithms Reference]
(https://www.tensorflow.org/extras/candidate_sampling.pdf).
Default is False.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example NCE losses.
"""
logits, labels = _compute_sampled_logits(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
num_sampled=num_sampled,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_values,
subtract_log_q=True,
remove_accidental_hits=remove_accidental_hits,
partition_strategy=partition_strategy,
name=name)
sampled_losses = sigmoid_cross_entropy_with_logits(
labels=labels, logits=logits, name="sampled_losses")
# sampled_losses is batch_size x {true_loss, sampled_losses...}
# We sum out true and sampled losses.
return _sum_rows(sampled_losses)
def sampled_softmax_loss(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
sampled_values=None,
remove_accidental_hits=True,
partition_strategy="mod",
name="sampled_softmax_loss"):
"""Computes and returns the sampled softmax training loss.
This is a faster way to train a softmax classifier over a huge number of
classes.
This operation is for training only. It is generally an underestimate of
the full softmax loss.
A common use case is to use this method for training, and calculate the full
softmax loss for evaluation or inference. In this case, you must set
`partition_strategy="div"` for the two losses to be consistent, as in the
following example:
```python
if mode == "train":
loss = tf.nn.sampled_softmax_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
...,
partition_strategy="div")
elif mode == "eval":
logits = tf.matmul(inputs, tf.transpose(weights))
logits = tf.nn.bias_add(logits, biases)
labels_one_hot = tf.one_hot(labels, n_classes)
loss = tf.nn.softmax_cross_entropy_with_logits(
labels=labels_one_hot,
logits=logits)
```
See our [Candidate Sampling Algorithms Reference]
(https://www.tensorflow.org/extras/candidate_sampling.pdf)
Also see Section 3 of [Jean et al., 2014](http://arxiv.org/abs/1412.2007)
([pdf](http://arxiv.org/pdf/1412.2007.pdf)) for the math.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
[num_classes, dim]. The (possibly-sharded) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes. Note that this format differs from
the `labels` argument of `nn.softmax_cross_entropy_with_logits`.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. whether to remove "accidental hits"
where a sampled class equals one of the target classes. Default is
True.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example sampled softmax losses.
"""
logits, labels = _compute_sampled_logits(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
num_sampled=num_sampled,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_values,
subtract_log_q=True,
remove_accidental_hits=remove_accidental_hits,
partition_strategy=partition_strategy,
name=name)
sampled_losses = nn_ops.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
# sampled_losses is a [batch_size] tensor.
return sampled_losses
| eadgarchen/tensorflow | tensorflow/python/ops/nn_impl.py | Python | apache-2.0 | 53,908 |
from django import forms
class LoginForm(forms.Form):
login = forms.CharField(max_length=255)
password = forms.CharField(widget=forms.PasswordInput())
target = forms.CharField()
| sstacha/uweb-install | cms_files/forms.py | Python | apache-2.0 | 192 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
NAME = 'recognizers-text-suite'
VERSION = '1.0.0.a0'
REQUIRES = ['recognizers-text', 'recognizers-text-number', 'recognizers-text-number-with-unit', 'recognizers-text-date-time']
setup(
name=NAME,
version=VERSION,
url='https://github.com/Microsoft/Recognizers-Text',
author='Microsoft',
description='recognizers-text-suite README',
keywords=['nlp', 'nlp-entity-extraction', 'entity-extraction', 'parser-library'],
long_description=read('README.rst'),
license='MIT',
packages=find_packages(),
install_requires=REQUIRES,
classifiers=[
'Programming Language :: Python :: 3.6',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Development Status :: 3 - Alpha',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
]
)
| matthewshim-ms/Recognizers-Text | Python/libraries/recognizers-suite/setup.py | Python | mit | 1,126 |
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from random import randrange
from h2o.frame import H2OFrame
from h2o.utils.typechecks import assert_is_type
def h2o_H2OFrame_head():
"""
Python API test: h2o.frame.H2OFrame.head(rows=10, cols=200)
"""
frame = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris.csv"),
col_types=["numeric","numeric","numeric","numeric","string"])
rowNum = randrange(1, frame.nrow)
colNum = randrange(1, frame.ncol)
newFrame = frame.head(rows=rowNum, cols=colNum)
assert_is_type(newFrame, H2OFrame) # check return type
assert newFrame.dim==[rowNum, colNum], "h2o.H2OFrame.head() command is not working." # check frame size
if __name__ == "__main__":
pyunit_utils.standalone_test(h2o_H2OFrame_head())
else:
h2o_H2OFrame_head()
| spennihana/h2o-3 | h2o-py/tests/testdir_apis/Data_Manipulation/pyunit_h2oH2OFrame_head.py | Python | apache-2.0 | 921 |
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.wimax', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## ul-job.h (module 'wimax'): ns3::ReqType [enumeration]
module.add_enum('ReqType', ['DATA', 'UNICAST_POLLING'])
## log.h (module 'core'): ns3::LogLevel [enumeration]
module.add_enum('LogLevel', ['LOG_NONE', 'LOG_ERROR', 'LOG_LEVEL_ERROR', 'LOG_WARN', 'LOG_LEVEL_WARN', 'LOG_DEBUG', 'LOG_LEVEL_DEBUG', 'LOG_INFO', 'LOG_LEVEL_INFO', 'LOG_FUNCTION', 'LOG_LEVEL_FUNCTION', 'LOG_LOGIC', 'LOG_LEVEL_LOGIC', 'LOG_ALL', 'LOG_LEVEL_ALL', 'LOG_PREFIX_FUNC', 'LOG_PREFIX_TIME', 'LOG_PREFIX_NODE'], import_from_module='ns.core')
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::AsciiTraceHelper [class]
module.add_class('AsciiTraceHelper', import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice [class]
module.add_class('AsciiTraceHelperForDevice', allow_subclassing=True, import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## cid.h (module 'wimax'): ns3::Cid [class]
module.add_class('Cid')
## cid.h (module 'wimax'): ns3::Cid::Type [enumeration]
module.add_enum('Type', ['BROADCAST', 'INITIAL_RANGING', 'BASIC', 'PRIMARY', 'TRANSPORT', 'MULTICAST', 'PADDING'], outer_class=root_module['ns3::Cid'])
## cid-factory.h (module 'wimax'): ns3::CidFactory [class]
module.add_class('CidFactory')
## cs-parameters.h (module 'wimax'): ns3::CsParameters [class]
module.add_class('CsParameters')
## cs-parameters.h (module 'wimax'): ns3::CsParameters::Action [enumeration]
module.add_enum('Action', ['ADD', 'REPLACE', 'DELETE'], outer_class=root_module['ns3::CsParameters'])
## dl-mac-messages.h (module 'wimax'): ns3::DcdChannelEncodings [class]
module.add_class('DcdChannelEncodings', allow_subclassing=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): ns3::DlFramePrefixIe [class]
module.add_class('DlFramePrefixIe')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## ipcs-classifier-record.h (module 'wimax'): ns3::IpcsClassifierRecord [class]
module.add_class('IpcsClassifierRecord')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## log.h (module 'core'): ns3::LogComponent [class]
module.add_class('LogComponent', import_from_module='ns.core')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class]
module.add_class('NetDeviceContainer', import_from_module='ns.network')
## node-container.h (module 'network'): ns3::NodeContainer [class]
module.add_class('NodeContainer', import_from_module='ns.network')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## dl-mac-messages.h (module 'wimax'): ns3::OfdmDcdChannelEncodings [class]
module.add_class('OfdmDcdChannelEncodings', parent=root_module['ns3::DcdChannelEncodings'])
## dl-mac-messages.h (module 'wimax'): ns3::OfdmDlBurstProfile [class]
module.add_class('OfdmDlBurstProfile')
## dl-mac-messages.h (module 'wimax'): ns3::OfdmDlBurstProfile::Diuc [enumeration]
module.add_enum('Diuc', ['DIUC_STC_ZONE', 'DIUC_BURST_PROFILE_1', 'DIUC_BURST_PROFILE_2', 'DIUC_BURST_PROFILE_3', 'DIUC_BURST_PROFILE_4', 'DIUC_BURST_PROFILE_5', 'DIUC_BURST_PROFILE_6', 'DIUC_BURST_PROFILE_7', 'DIUC_BURST_PROFILE_8', 'DIUC_BURST_PROFILE_9', 'DIUC_BURST_PROFILE_10', 'DIUC_BURST_PROFILE_11', 'DIUC_GAP', 'DIUC_END_OF_MAP'], outer_class=root_module['ns3::OfdmDlBurstProfile'])
## dl-mac-messages.h (module 'wimax'): ns3::OfdmDlMapIe [class]
module.add_class('OfdmDlMapIe')
## ul-mac-messages.h (module 'wimax'): ns3::OfdmUlBurstProfile [class]
module.add_class('OfdmUlBurstProfile')
## ul-mac-messages.h (module 'wimax'): ns3::OfdmUlBurstProfile::Uiuc [enumeration]
module.add_enum('Uiuc', ['UIUC_INITIAL_RANGING', 'UIUC_REQ_REGION_FULL', 'UIUC_REQ_REGION_FOCUSED', 'UIUC_FOCUSED_CONTENTION_IE', 'UIUC_BURST_PROFILE_5', 'UIUC_BURST_PROFILE_6', 'UIUC_BURST_PROFILE_7', 'UIUC_BURST_PROFILE_8', 'UIUC_BURST_PROFILE_9', 'UIUC_BURST_PROFILE_10', 'UIUC_BURST_PROFILE_11', 'UIUC_BURST_PROFILE_12', 'UIUC_SUBCH_NETWORK_ENTRY', 'UIUC_END_OF_MAP'], outer_class=root_module['ns3::OfdmUlBurstProfile'])
## ul-mac-messages.h (module 'wimax'): ns3::OfdmUlMapIe [class]
module.add_class('OfdmUlMapIe')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## pcap-file.h (module 'network'): ns3::PcapFile [class]
module.add_class('PcapFile', import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::PcapHelper [class]
module.add_class('PcapHelper', import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::PcapHelper [enumeration]
module.add_enum('', ['DLT_NULL', 'DLT_EN10MB', 'DLT_PPP', 'DLT_RAW', 'DLT_IEEE802_11', 'DLT_PRISM_HEADER', 'DLT_IEEE802_11_RADIO'], outer_class=root_module['ns3::PcapHelper'], import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::PcapHelperForDevice [class]
module.add_class('PcapHelperForDevice', allow_subclassing=True, import_from_module='ns.network')
## snr-to-block-error-rate-manager.h (module 'wimax'): ns3::SNRToBlockErrorRateManager [class]
module.add_class('SNRToBlockErrorRateManager')
## snr-to-block-error-rate-record.h (module 'wimax'): ns3::SNRToBlockErrorRateRecord [class]
module.add_class('SNRToBlockErrorRateRecord')
## ss-record.h (module 'wimax'): ns3::SSRecord [class]
module.add_class('SSRecord')
## send-params.h (module 'wimax'): ns3::SendParams [class]
module.add_class('SendParams')
## service-flow.h (module 'wimax'): ns3::ServiceFlow [class]
module.add_class('ServiceFlow')
## service-flow.h (module 'wimax'): ns3::ServiceFlow::Direction [enumeration]
module.add_enum('Direction', ['SF_DIRECTION_DOWN', 'SF_DIRECTION_UP'], outer_class=root_module['ns3::ServiceFlow'])
## service-flow.h (module 'wimax'): ns3::ServiceFlow::Type [enumeration]
module.add_enum('Type', ['SF_TYPE_PROVISIONED', 'SF_TYPE_ADMITTED', 'SF_TYPE_ACTIVE'], outer_class=root_module['ns3::ServiceFlow'])
## service-flow.h (module 'wimax'): ns3::ServiceFlow::SchedulingType [enumeration]
module.add_enum('SchedulingType', ['SF_TYPE_NONE', 'SF_TYPE_UNDEF', 'SF_TYPE_BE', 'SF_TYPE_NRTPS', 'SF_TYPE_RTPS', 'SF_TYPE_UGS', 'SF_TYPE_ALL'], outer_class=root_module['ns3::ServiceFlow'])
## service-flow.h (module 'wimax'): ns3::ServiceFlow::CsSpecification [enumeration]
module.add_enum('CsSpecification', ['ATM', 'IPV4', 'IPV6', 'ETHERNET', 'VLAN', 'IPV4_OVER_ETHERNET', 'IPV6_OVER_ETHERNET', 'IPV4_OVER_VLAN', 'IPV6_OVER_VLAN'], outer_class=root_module['ns3::ServiceFlow'])
## service-flow.h (module 'wimax'): ns3::ServiceFlow::ModulationType [enumeration]
module.add_enum('ModulationType', ['MODULATION_TYPE_BPSK_12', 'MODULATION_TYPE_QPSK_12', 'MODULATION_TYPE_QPSK_34', 'MODULATION_TYPE_QAM16_12', 'MODULATION_TYPE_QAM16_34', 'MODULATION_TYPE_QAM64_23', 'MODULATION_TYPE_QAM64_34'], outer_class=root_module['ns3::ServiceFlow'])
## service-flow-record.h (module 'wimax'): ns3::ServiceFlowRecord [class]
module.add_class('ServiceFlowRecord')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator.h (module 'core'): ns3::Simulator [class]
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## wimax-tlv.h (module 'wimax'): ns3::TlvValue [class]
module.add_class('TlvValue', allow_subclassing=True)
## wimax-tlv.h (module 'wimax'): ns3::TosTlvValue [class]
module.add_class('TosTlvValue', parent=root_module['ns3::TlvValue'])
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## wimax-tlv.h (module 'wimax'): ns3::U16TlvValue [class]
module.add_class('U16TlvValue', parent=root_module['ns3::TlvValue'])
## wimax-tlv.h (module 'wimax'): ns3::U32TlvValue [class]
module.add_class('U32TlvValue', parent=root_module['ns3::TlvValue'])
## wimax-tlv.h (module 'wimax'): ns3::U8TlvValue [class]
module.add_class('U8TlvValue', parent=root_module['ns3::TlvValue'])
## ul-mac-messages.h (module 'wimax'): ns3::UcdChannelEncodings [class]
module.add_class('UcdChannelEncodings', allow_subclassing=True)
## wimax-tlv.h (module 'wimax'): ns3::VectorTlvValue [class]
module.add_class('VectorTlvValue', parent=root_module['ns3::TlvValue'])
## wimax-helper.h (module 'wimax'): ns3::WimaxHelper [class]
module.add_class('WimaxHelper', parent=[root_module['ns3::PcapHelperForDevice'], root_module['ns3::AsciiTraceHelperForDevice']])
## wimax-helper.h (module 'wimax'): ns3::WimaxHelper::NetDeviceType [enumeration]
module.add_enum('NetDeviceType', ['DEVICE_TYPE_SUBSCRIBER_STATION', 'DEVICE_TYPE_BASE_STATION'], outer_class=root_module['ns3::WimaxHelper'])
## wimax-helper.h (module 'wimax'): ns3::WimaxHelper::PhyType [enumeration]
module.add_enum('PhyType', ['SIMPLE_PHY_TYPE_OFDM'], outer_class=root_module['ns3::WimaxHelper'])
## wimax-helper.h (module 'wimax'): ns3::WimaxHelper::SchedulerType [enumeration]
module.add_enum('SchedulerType', ['SCHED_TYPE_SIMPLE', 'SCHED_TYPE_RTPS', 'SCHED_TYPE_MBQOS'], outer_class=root_module['ns3::WimaxHelper'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## simple-ofdm-send-param.h (module 'wimax'): ns3::simpleOfdmSendParam [class]
module.add_class('simpleOfdmSendParam')
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## wimax-tlv.h (module 'wimax'): ns3::ClassificationRuleVectorTlvValue [class]
module.add_class('ClassificationRuleVectorTlvValue', parent=root_module['ns3::VectorTlvValue'])
## wimax-tlv.h (module 'wimax'): ns3::ClassificationRuleVectorTlvValue::ClassificationRuleTlvType [enumeration]
module.add_enum('ClassificationRuleTlvType', ['Priority', 'ToS', 'Protocol', 'IP_src', 'IP_dst', 'Port_src', 'Port_dst', 'Index'], outer_class=root_module['ns3::ClassificationRuleVectorTlvValue'])
## wimax-tlv.h (module 'wimax'): ns3::CsParamVectorTlvValue [class]
module.add_class('CsParamVectorTlvValue', parent=root_module['ns3::VectorTlvValue'])
## wimax-tlv.h (module 'wimax'): ns3::CsParamVectorTlvValue::Type [enumeration]
module.add_enum('Type', ['Classifier_DSC_Action', 'Packet_Classification_Rule'], outer_class=root_module['ns3::CsParamVectorTlvValue'])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## wimax-tlv.h (module 'wimax'): ns3::Ipv4AddressTlvValue [class]
module.add_class('Ipv4AddressTlvValue', parent=root_module['ns3::TlvValue'])
## wimax-tlv.h (module 'wimax'): ns3::Ipv4AddressTlvValue::ipv4Addr [struct]
module.add_class('ipv4Addr', outer_class=root_module['ns3::Ipv4AddressTlvValue'])
## wimax-mac-header.h (module 'wimax'): ns3::MacHeaderType [class]
module.add_class('MacHeaderType', parent=root_module['ns3::Header'])
## wimax-mac-header.h (module 'wimax'): ns3::MacHeaderType::HeaderType [enumeration]
module.add_enum('HeaderType', ['HEADER_TYPE_GENERIC', 'HEADER_TYPE_BANDWIDTH'], outer_class=root_module['ns3::MacHeaderType'])
## mac-messages.h (module 'wimax'): ns3::ManagementMessageType [class]
module.add_class('ManagementMessageType', parent=root_module['ns3::Header'])
## mac-messages.h (module 'wimax'): ns3::ManagementMessageType::MessageType [enumeration]
module.add_enum('MessageType', ['MESSAGE_TYPE_UCD', 'MESSAGE_TYPE_DCD', 'MESSAGE_TYPE_DL_MAP', 'MESSAGE_TYPE_UL_MAP', 'MESSAGE_TYPE_RNG_REQ', 'MESSAGE_TYPE_RNG_RSP', 'MESSAGE_TYPE_REG_REQ', 'MESSAGE_TYPE_REG_RSP', 'MESSAGE_TYPE_DSA_REQ', 'MESSAGE_TYPE_DSA_RSP', 'MESSAGE_TYPE_DSA_ACK'], outer_class=root_module['ns3::ManagementMessageType'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## ofdm-downlink-frame-prefix.h (module 'wimax'): ns3::OfdmDownlinkFramePrefix [class]
module.add_class('OfdmDownlinkFramePrefix', parent=root_module['ns3::Header'])
## send-params.h (module 'wimax'): ns3::OfdmSendParams [class]
module.add_class('OfdmSendParams', parent=root_module['ns3::SendParams'])
## ul-mac-messages.h (module 'wimax'): ns3::OfdmUcdChannelEncodings [class]
module.add_class('OfdmUcdChannelEncodings', parent=root_module['ns3::UcdChannelEncodings'])
## packet-burst.h (module 'network'): ns3::PacketBurst [class]
module.add_class('PacketBurst', import_from_module='ns.network', parent=root_module['ns3::Object'])
## pcap-file-wrapper.h (module 'network'): ns3::PcapFileWrapper [class]
module.add_class('PcapFileWrapper', import_from_module='ns.network', parent=root_module['ns3::Object'])
## wimax-tlv.h (module 'wimax'): ns3::PortRangeTlvValue [class]
module.add_class('PortRangeTlvValue', parent=root_module['ns3::TlvValue'])
## wimax-tlv.h (module 'wimax'): ns3::PortRangeTlvValue::PortRange [struct]
module.add_class('PortRange', outer_class=root_module['ns3::PortRangeTlvValue'])
## ul-job.h (module 'wimax'): ns3::PriorityUlJob [class]
module.add_class('PriorityUlJob', parent=root_module['ns3::Object'])
## propagation-loss-model.h (module 'propagation'): ns3::PropagationLossModel [class]
module.add_class('PropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::Object'])
## wimax-tlv.h (module 'wimax'): ns3::ProtocolTlvValue [class]
module.add_class('ProtocolTlvValue', parent=root_module['ns3::TlvValue'])
## propagation-loss-model.h (module 'propagation'): ns3::RandomPropagationLossModel [class]
module.add_class('RandomPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## random-variable-stream.h (module 'core'): ns3::RandomVariableStream [class]
module.add_class('RandomVariableStream', import_from_module='ns.core', parent=root_module['ns3::Object'])
## propagation-loss-model.h (module 'propagation'): ns3::RangePropagationLossModel [class]
module.add_class('RangePropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## mac-messages.h (module 'wimax'): ns3::RngReq [class]
module.add_class('RngReq', parent=root_module['ns3::Header'])
## mac-messages.h (module 'wimax'): ns3::RngRsp [class]
module.add_class('RngRsp', parent=root_module['ns3::Header'])
## ss-manager.h (module 'wimax'): ns3::SSManager [class]
module.add_class('SSManager', parent=root_module['ns3::Object'])
## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable [class]
module.add_class('SequentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## service-flow-manager.h (module 'wimax'): ns3::ServiceFlowManager [class]
module.add_class('ServiceFlowManager', parent=root_module['ns3::Object'])
## service-flow-manager.h (module 'wimax'): ns3::ServiceFlowManager::ConfirmationCode [enumeration]
module.add_enum('ConfirmationCode', ['CONFIRMATION_CODE_SUCCESS', 'CONFIRMATION_CODE_REJECT'], outer_class=root_module['ns3::ServiceFlowManager'])
## wimax-tlv.h (module 'wimax'): ns3::SfVectorTlvValue [class]
module.add_class('SfVectorTlvValue', parent=root_module['ns3::VectorTlvValue'])
## wimax-tlv.h (module 'wimax'): ns3::SfVectorTlvValue::Type [enumeration]
module.add_enum('Type', ['SFID', 'CID', 'Service_Class_Name', 'reserved1', 'QoS_Parameter_Set_Type', 'Traffic_Priority', 'Maximum_Sustained_Traffic_Rate', 'Maximum_Traffic_Burst', 'Minimum_Reserved_Traffic_Rate', 'Minimum_Tolerable_Traffic_Rate', 'Service_Flow_Scheduling_Type', 'Request_Transmission_Policy', 'Tolerated_Jitter', 'Maximum_Latency', 'Fixed_length_versus_Variable_length_SDU_Indicator', 'SDU_Size', 'Target_SAID', 'ARQ_Enable', 'ARQ_WINDOW_SIZE', 'ARQ_RETRY_TIMEOUT_Transmitter_Delay', 'ARQ_RETRY_TIMEOUT_Receiver_Delay', 'ARQ_BLOCK_LIFETIME', 'ARQ_SYNC_LOSS', 'ARQ_DELIVER_IN_ORDER', 'ARQ_PURGE_TIMEOUT', 'ARQ_BLOCK_SIZE', 'reserved2', 'CS_Specification', 'IPV4_CS_Parameters'], outer_class=root_module['ns3::SfVectorTlvValue'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## ss-service-flow-manager.h (module 'wimax'): ns3::SsServiceFlowManager [class]
module.add_class('SsServiceFlowManager', parent=root_module['ns3::ServiceFlowManager'])
## ss-service-flow-manager.h (module 'wimax'): ns3::SsServiceFlowManager::ConfirmationCode [enumeration]
module.add_enum('ConfirmationCode', ['CONFIRMATION_CODE_SUCCESS', 'CONFIRMATION_CODE_REJECT'], outer_class=root_module['ns3::SsServiceFlowManager'])
## propagation-loss-model.h (module 'propagation'): ns3::ThreeLogDistancePropagationLossModel [class]
module.add_class('ThreeLogDistancePropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## wimax-tlv.h (module 'wimax'): ns3::Tlv [class]
module.add_class('Tlv', parent=root_module['ns3::Header'])
## wimax-tlv.h (module 'wimax'): ns3::Tlv::CommonTypes [enumeration]
module.add_enum('CommonTypes', ['HMAC_TUPLE', 'MAC_VERSION_ENCODING', 'CURRENT_TRANSMIT_POWER', 'DOWNLINK_SERVICE_FLOW', 'UPLINK_SERVICE_FLOW', 'VENDOR_ID_EMCODING', 'VENDOR_SPECIFIC_INFORMATION'], outer_class=root_module['ns3::Tlv'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable [class]
module.add_class('TriangularRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## propagation-loss-model.h (module 'propagation'): ns3::TwoRayGroundPropagationLossModel [class]
module.add_class('TwoRayGroundPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## ul-mac-messages.h (module 'wimax'): ns3::Ucd [class]
module.add_class('Ucd', parent=root_module['ns3::Header'])
## ul-job.h (module 'wimax'): ns3::UlJob [class]
module.add_class('UlJob', parent=root_module['ns3::Object'])
## ul-job.h (module 'wimax'): ns3::UlJob::JobPriority [enumeration]
module.add_enum('JobPriority', ['LOW', 'INTERMEDIATE', 'HIGH'], outer_class=root_module['ns3::UlJob'])
## ul-mac-messages.h (module 'wimax'): ns3::UlMap [class]
module.add_class('UlMap', parent=root_module['ns3::Header'])
## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable [class]
module.add_class('UniformRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## bs-uplink-scheduler.h (module 'wimax'): ns3::UplinkScheduler [class]
module.add_class('UplinkScheduler', parent=root_module['ns3::Object'])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): ns3::UplinkSchedulerMBQoS [class]
module.add_class('UplinkSchedulerMBQoS', parent=root_module['ns3::UplinkScheduler'])
## bs-uplink-scheduler-rtps.h (module 'wimax'): ns3::UplinkSchedulerRtps [class]
module.add_class('UplinkSchedulerRtps', parent=root_module['ns3::UplinkScheduler'])
## bs-uplink-scheduler-simple.h (module 'wimax'): ns3::UplinkSchedulerSimple [class]
module.add_class('UplinkSchedulerSimple', parent=root_module['ns3::UplinkScheduler'])
## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable [class]
module.add_class('WeibullRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## wimax-connection.h (module 'wimax'): ns3::WimaxConnection [class]
module.add_class('WimaxConnection', parent=root_module['ns3::Object'])
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue [class]
module.add_class('WimaxMacQueue', parent=root_module['ns3::Object'])
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::QueueElement [struct]
module.add_class('QueueElement', outer_class=root_module['ns3::WimaxMacQueue'])
## wimax-mac-to-mac-header.h (module 'wimax'): ns3::WimaxMacToMacHeader [class]
module.add_class('WimaxMacToMacHeader', parent=root_module['ns3::Header'])
## wimax-phy.h (module 'wimax'): ns3::WimaxPhy [class]
module.add_class('WimaxPhy', parent=root_module['ns3::Object'])
## wimax-phy.h (module 'wimax'): ns3::WimaxPhy::ModulationType [enumeration]
module.add_enum('ModulationType', ['MODULATION_TYPE_BPSK_12', 'MODULATION_TYPE_QPSK_12', 'MODULATION_TYPE_QPSK_34', 'MODULATION_TYPE_QAM16_12', 'MODULATION_TYPE_QAM16_34', 'MODULATION_TYPE_QAM64_23', 'MODULATION_TYPE_QAM64_34'], outer_class=root_module['ns3::WimaxPhy'])
## wimax-phy.h (module 'wimax'): ns3::WimaxPhy::PhyState [enumeration]
module.add_enum('PhyState', ['PHY_STATE_IDLE', 'PHY_STATE_SCANNING', 'PHY_STATE_TX', 'PHY_STATE_RX'], outer_class=root_module['ns3::WimaxPhy'])
## wimax-phy.h (module 'wimax'): ns3::WimaxPhy::PhyType [enumeration]
module.add_enum('PhyType', ['SimpleWimaxPhy', 'simpleOfdmWimaxPhy'], outer_class=root_module['ns3::WimaxPhy'])
## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable [class]
module.add_class('ZetaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable [class]
module.add_class('ZipfRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## bs-scheduler.h (module 'wimax'): ns3::BSScheduler [class]
module.add_class('BSScheduler', parent=root_module['ns3::Object'])
## bs-scheduler-rtps.h (module 'wimax'): ns3::BSSchedulerRtps [class]
module.add_class('BSSchedulerRtps', parent=root_module['ns3::BSScheduler'])
## bs-scheduler-simple.h (module 'wimax'): ns3::BSSchedulerSimple [class]
module.add_class('BSSchedulerSimple', parent=root_module['ns3::BSScheduler'])
## wimax-mac-header.h (module 'wimax'): ns3::BandwidthRequestHeader [class]
module.add_class('BandwidthRequestHeader', parent=root_module['ns3::Header'])
## wimax-mac-header.h (module 'wimax'): ns3::BandwidthRequestHeader::HeaderType [enumeration]
module.add_enum('HeaderType', ['HEADER_TYPE_INCREMENTAL', 'HEADER_TYPE_AGGREGATE'], outer_class=root_module['ns3::BandwidthRequestHeader'])
## bs-service-flow-manager.h (module 'wimax'): ns3::BsServiceFlowManager [class]
module.add_class('BsServiceFlowManager', parent=root_module['ns3::ServiceFlowManager'])
## bs-service-flow-manager.h (module 'wimax'): ns3::BsServiceFlowManager::ConfirmationCode [enumeration]
module.add_enum('ConfirmationCode', ['CONFIRMATION_CODE_SUCCESS', 'CONFIRMATION_CODE_REJECT'], outer_class=root_module['ns3::BsServiceFlowManager'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## channel.h (module 'network'): ns3::Channel [class]
module.add_class('Channel', import_from_module='ns.network', parent=root_module['ns3::Object'])
## connection-manager.h (module 'wimax'): ns3::ConnectionManager [class]
module.add_class('ConnectionManager', parent=root_module['ns3::Object'])
## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable [class]
module.add_class('ConstantRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## dl-mac-messages.h (module 'wimax'): ns3::Dcd [class]
module.add_class('Dcd', parent=root_module['ns3::Header'])
## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable [class]
module.add_class('DeterministicRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## dl-mac-messages.h (module 'wimax'): ns3::DlMap [class]
module.add_class('DlMap', parent=root_module['ns3::Header'])
## mac-messages.h (module 'wimax'): ns3::DsaAck [class]
module.add_class('DsaAck', parent=root_module['ns3::Header'])
## mac-messages.h (module 'wimax'): ns3::DsaReq [class]
module.add_class('DsaReq', parent=root_module['ns3::Header'])
## mac-messages.h (module 'wimax'): ns3::DsaRsp [class]
module.add_class('DsaRsp', parent=root_module['ns3::Header'])
## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable [class]
module.add_class('EmpiricalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable [class]
module.add_class('ErlangRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable [class]
module.add_class('ExponentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## propagation-loss-model.h (module 'propagation'): ns3::FixedRssLossModel [class]
module.add_class('FixedRssLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## wimax-mac-header.h (module 'wimax'): ns3::FragmentationSubheader [class]
module.add_class('FragmentationSubheader', parent=root_module['ns3::Header'])
## propagation-loss-model.h (module 'propagation'): ns3::FriisPropagationLossModel [class]
module.add_class('FriisPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable [class]
module.add_class('GammaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## wimax-mac-header.h (module 'wimax'): ns3::GenericMacHeader [class]
module.add_class('GenericMacHeader', parent=root_module['ns3::Header'])
## wimax-mac-header.h (module 'wimax'): ns3::GrantManagementSubheader [class]
module.add_class('GrantManagementSubheader', parent=root_module['ns3::Header'])
## ipcs-classifier.h (module 'wimax'): ns3::IpcsClassifier [class]
module.add_class('IpcsClassifier', parent=root_module['ns3::Object'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## propagation-loss-model.h (module 'propagation'): ns3::LogDistancePropagationLossModel [class]
module.add_class('LogDistancePropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable [class]
module.add_class('LogNormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class]
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class]
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## propagation-loss-model.h (module 'propagation'): ns3::MatrixPropagationLossModel [class]
module.add_class('MatrixPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## propagation-loss-model.h (module 'propagation'): ns3::NakagamiPropagationLossModel [class]
module.add_class('NakagamiPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## node.h (module 'network'): ns3::Node [class]
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable [class]
module.add_class('NormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper [class]
module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable [class]
module.add_class('ParetoRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## simple-ofdm-wimax-phy.h (module 'wimax'): ns3::SimpleOfdmWimaxPhy [class]
module.add_class('SimpleOfdmWimaxPhy', parent=root_module['ns3::WimaxPhy'])
## simple-ofdm-wimax-phy.h (module 'wimax'): ns3::SimpleOfdmWimaxPhy::FrameDurationCode [enumeration]
module.add_enum('FrameDurationCode', ['FRAME_DURATION_2_POINT_5_MS', 'FRAME_DURATION_4_MS', 'FRAME_DURATION_5_MS', 'FRAME_DURATION_8_MS', 'FRAME_DURATION_10_MS', 'FRAME_DURATION_12_POINT_5_MS', 'FRAME_DURATION_20_MS'], outer_class=root_module['ns3::SimpleOfdmWimaxPhy'])
## nstime.h (module 'core'): ns3::TimeChecker [class]
module.add_class('TimeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## uinteger.h (module 'core'): ns3::UintegerValue [class]
module.add_class('UintegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## wimax-channel.h (module 'wimax'): ns3::WimaxChannel [class]
module.add_class('WimaxChannel', parent=root_module['ns3::Channel'])
## wimax-net-device.h (module 'wimax'): ns3::WimaxNetDevice [class]
module.add_class('WimaxNetDevice', parent=root_module['ns3::NetDevice'])
## wimax-net-device.h (module 'wimax'): ns3::WimaxNetDevice::Direction [enumeration]
module.add_enum('Direction', ['DIRECTION_DOWNLINK', 'DIRECTION_UPLINK'], outer_class=root_module['ns3::WimaxNetDevice'])
## wimax-net-device.h (module 'wimax'): ns3::WimaxNetDevice::RangingStatus [enumeration]
module.add_enum('RangingStatus', ['RANGING_STATUS_EXPIRED', 'RANGING_STATUS_CONTINUE', 'RANGING_STATUS_ABORT', 'RANGING_STATUS_SUCCESS'], outer_class=root_module['ns3::WimaxNetDevice'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## bs-net-device.h (module 'wimax'): ns3::BaseStationNetDevice [class]
module.add_class('BaseStationNetDevice', parent=root_module['ns3::WimaxNetDevice'])
## bs-net-device.h (module 'wimax'): ns3::BaseStationNetDevice::State [enumeration]
module.add_enum('State', ['BS_STATE_DL_SUB_FRAME', 'BS_STATE_UL_SUB_FRAME', 'BS_STATE_TTG', 'BS_STATE_RTG'], outer_class=root_module['ns3::BaseStationNetDevice'])
## bs-net-device.h (module 'wimax'): ns3::BaseStationNetDevice::MacPreamble [enumeration]
module.add_enum('MacPreamble', ['SHORT_PREAMBLE', 'LONG_PREAMBLE'], outer_class=root_module['ns3::BaseStationNetDevice'])
## simple-ofdm-wimax-channel.h (module 'wimax'): ns3::SimpleOfdmWimaxChannel [class]
module.add_class('SimpleOfdmWimaxChannel', parent=root_module['ns3::WimaxChannel'])
## simple-ofdm-wimax-channel.h (module 'wimax'): ns3::SimpleOfdmWimaxChannel::PropModel [enumeration]
module.add_enum('PropModel', ['RANDOM_PROPAGATION', 'FRIIS_PROPAGATION', 'LOG_DISTANCE_PROPAGATION', 'COST231_PROPAGATION'], outer_class=root_module['ns3::SimpleOfdmWimaxChannel'])
## ss-net-device.h (module 'wimax'): ns3::SubscriberStationNetDevice [class]
module.add_class('SubscriberStationNetDevice', parent=root_module['ns3::WimaxNetDevice'])
## ss-net-device.h (module 'wimax'): ns3::SubscriberStationNetDevice::State [enumeration]
module.add_enum('State', ['SS_STATE_IDLE', 'SS_STATE_SCANNING', 'SS_STATE_SYNCHRONIZING', 'SS_STATE_ACQUIRING_PARAMETERS', 'SS_STATE_WAITING_REG_RANG_INTRVL', 'SS_STATE_WAITING_INV_RANG_INTRVL', 'SS_STATE_WAITING_RNG_RSP', 'SS_STATE_ADJUSTING_PARAMETERS', 'SS_STATE_REGISTERED', 'SS_STATE_TRANSMITTING', 'SS_STATE_STOPPED'], outer_class=root_module['ns3::SubscriberStationNetDevice'])
## ss-net-device.h (module 'wimax'): ns3::SubscriberStationNetDevice::EventType [enumeration]
module.add_enum('EventType', ['EVENT_NONE', 'EVENT_WAIT_FOR_RNG_RSP', 'EVENT_DL_MAP_SYNC_TIMEOUT', 'EVENT_LOST_DL_MAP', 'EVENT_LOST_UL_MAP', 'EVENT_DCD_WAIT_TIMEOUT', 'EVENT_UCD_WAIT_TIMEOUT', 'EVENT_RANG_OPP_WAIT_TIMEOUT'], outer_class=root_module['ns3::SubscriberStationNetDevice'])
module.add_container('std::vector< ns3::ServiceFlow * >', 'ns3::ServiceFlow *', container_type='vector')
module.add_container('std::vector< bool >', 'bool', container_type='vector')
module.add_container('ns3::bvec', 'bool', container_type='vector')
module.add_container('std::vector< ns3::DlFramePrefixIe >', 'ns3::DlFramePrefixIe', container_type='vector')
module.add_container('std::list< ns3::Ptr< ns3::Packet > >', 'ns3::Ptr< ns3::Packet >', container_type='list')
module.add_container('std::vector< ns3::SSRecord * >', 'ns3::SSRecord *', container_type='vector')
module.add_container('std::vector< ns3::OfdmUlBurstProfile >', 'ns3::OfdmUlBurstProfile', container_type='vector')
module.add_container('std::list< ns3::OfdmUlMapIe >', 'ns3::OfdmUlMapIe', container_type='list')
module.add_container('std::list< ns3::Ptr< ns3::UlJob > >', 'ns3::Ptr< ns3::UlJob >', container_type='list')
module.add_container('std::list< ns3::Ptr< ns3::Packet const > >', 'ns3::Ptr< ns3::Packet const >', container_type='list')
module.add_container('std::deque< ns3::WimaxMacQueue::QueueElement >', 'ns3::WimaxMacQueue::QueueElement', container_type='dequeue')
module.add_container('std::list< std::pair< ns3::OfdmDlMapIe *, ns3::Ptr< ns3::PacketBurst > > >', 'std::pair< ns3::OfdmDlMapIe *, ns3::Ptr< ns3::PacketBurst > >', container_type='list')
module.add_container('std::vector< ns3::Ptr< ns3::WimaxConnection > >', 'ns3::Ptr< ns3::WimaxConnection >', container_type='vector')
module.add_container('std::vector< ns3::OfdmDlBurstProfile >', 'ns3::OfdmDlBurstProfile', container_type='vector')
module.add_container('std::list< ns3::OfdmDlMapIe >', 'ns3::OfdmDlMapIe', container_type='list')
typehandlers.add_type_alias('void ( * ) ( std::ostream & ) *', 'ns3::LogNodePrinter')
typehandlers.add_type_alias('void ( * ) ( std::ostream & ) **', 'ns3::LogNodePrinter*')
typehandlers.add_type_alias('void ( * ) ( std::ostream & ) *&', 'ns3::LogNodePrinter&')
typehandlers.add_type_alias('void ( * ) ( std::ostream & ) *', 'ns3::LogTimePrinter')
typehandlers.add_type_alias('void ( * ) ( std::ostream & ) **', 'ns3::LogTimePrinter*')
typehandlers.add_type_alias('void ( * ) ( std::ostream & ) *&', 'ns3::LogTimePrinter&')
typehandlers.add_type_alias('std::vector< bool, std::allocator< bool > >', 'ns3::bvec')
typehandlers.add_type_alias('std::vector< bool, std::allocator< bool > >*', 'ns3::bvec*')
typehandlers.add_type_alias('std::vector< bool, std::allocator< bool > >&', 'ns3::bvec&')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace internal
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_internal(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AsciiTraceHelper_methods(root_module, root_module['ns3::AsciiTraceHelper'])
register_Ns3AsciiTraceHelperForDevice_methods(root_module, root_module['ns3::AsciiTraceHelperForDevice'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3Cid_methods(root_module, root_module['ns3::Cid'])
register_Ns3CidFactory_methods(root_module, root_module['ns3::CidFactory'])
register_Ns3CsParameters_methods(root_module, root_module['ns3::CsParameters'])
register_Ns3DcdChannelEncodings_methods(root_module, root_module['ns3::DcdChannelEncodings'])
register_Ns3DlFramePrefixIe_methods(root_module, root_module['ns3::DlFramePrefixIe'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3IpcsClassifierRecord_methods(root_module, root_module['ns3::IpcsClassifierRecord'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3LogComponent_methods(root_module, root_module['ns3::LogComponent'])
register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])
register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer'])
register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3OfdmDcdChannelEncodings_methods(root_module, root_module['ns3::OfdmDcdChannelEncodings'])
register_Ns3OfdmDlBurstProfile_methods(root_module, root_module['ns3::OfdmDlBurstProfile'])
register_Ns3OfdmDlMapIe_methods(root_module, root_module['ns3::OfdmDlMapIe'])
register_Ns3OfdmUlBurstProfile_methods(root_module, root_module['ns3::OfdmUlBurstProfile'])
register_Ns3OfdmUlMapIe_methods(root_module, root_module['ns3::OfdmUlMapIe'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3PcapFile_methods(root_module, root_module['ns3::PcapFile'])
register_Ns3PcapHelper_methods(root_module, root_module['ns3::PcapHelper'])
register_Ns3PcapHelperForDevice_methods(root_module, root_module['ns3::PcapHelperForDevice'])
register_Ns3SNRToBlockErrorRateManager_methods(root_module, root_module['ns3::SNRToBlockErrorRateManager'])
register_Ns3SNRToBlockErrorRateRecord_methods(root_module, root_module['ns3::SNRToBlockErrorRateRecord'])
register_Ns3SSRecord_methods(root_module, root_module['ns3::SSRecord'])
register_Ns3SendParams_methods(root_module, root_module['ns3::SendParams'])
register_Ns3ServiceFlow_methods(root_module, root_module['ns3::ServiceFlow'])
register_Ns3ServiceFlowRecord_methods(root_module, root_module['ns3::ServiceFlowRecord'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TlvValue_methods(root_module, root_module['ns3::TlvValue'])
register_Ns3TosTlvValue_methods(root_module, root_module['ns3::TosTlvValue'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3U16TlvValue_methods(root_module, root_module['ns3::U16TlvValue'])
register_Ns3U32TlvValue_methods(root_module, root_module['ns3::U32TlvValue'])
register_Ns3U8TlvValue_methods(root_module, root_module['ns3::U8TlvValue'])
register_Ns3UcdChannelEncodings_methods(root_module, root_module['ns3::UcdChannelEncodings'])
register_Ns3VectorTlvValue_methods(root_module, root_module['ns3::VectorTlvValue'])
register_Ns3WimaxHelper_methods(root_module, root_module['ns3::WimaxHelper'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3SimpleOfdmSendParam_methods(root_module, root_module['ns3::simpleOfdmSendParam'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3ClassificationRuleVectorTlvValue_methods(root_module, root_module['ns3::ClassificationRuleVectorTlvValue'])
register_Ns3CsParamVectorTlvValue_methods(root_module, root_module['ns3::CsParamVectorTlvValue'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Ipv4AddressTlvValue_methods(root_module, root_module['ns3::Ipv4AddressTlvValue'])
register_Ns3Ipv4AddressTlvValueIpv4Addr_methods(root_module, root_module['ns3::Ipv4AddressTlvValue::ipv4Addr'])
register_Ns3MacHeaderType_methods(root_module, root_module['ns3::MacHeaderType'])
register_Ns3ManagementMessageType_methods(root_module, root_module['ns3::ManagementMessageType'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3OfdmDownlinkFramePrefix_methods(root_module, root_module['ns3::OfdmDownlinkFramePrefix'])
register_Ns3OfdmSendParams_methods(root_module, root_module['ns3::OfdmSendParams'])
register_Ns3OfdmUcdChannelEncodings_methods(root_module, root_module['ns3::OfdmUcdChannelEncodings'])
register_Ns3PacketBurst_methods(root_module, root_module['ns3::PacketBurst'])
register_Ns3PcapFileWrapper_methods(root_module, root_module['ns3::PcapFileWrapper'])
register_Ns3PortRangeTlvValue_methods(root_module, root_module['ns3::PortRangeTlvValue'])
register_Ns3PortRangeTlvValuePortRange_methods(root_module, root_module['ns3::PortRangeTlvValue::PortRange'])
register_Ns3PriorityUlJob_methods(root_module, root_module['ns3::PriorityUlJob'])
register_Ns3PropagationLossModel_methods(root_module, root_module['ns3::PropagationLossModel'])
register_Ns3ProtocolTlvValue_methods(root_module, root_module['ns3::ProtocolTlvValue'])
register_Ns3RandomPropagationLossModel_methods(root_module, root_module['ns3::RandomPropagationLossModel'])
register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream'])
register_Ns3RangePropagationLossModel_methods(root_module, root_module['ns3::RangePropagationLossModel'])
register_Ns3RngReq_methods(root_module, root_module['ns3::RngReq'])
register_Ns3RngRsp_methods(root_module, root_module['ns3::RngRsp'])
register_Ns3SSManager_methods(root_module, root_module['ns3::SSManager'])
register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable'])
register_Ns3ServiceFlowManager_methods(root_module, root_module['ns3::ServiceFlowManager'])
register_Ns3SfVectorTlvValue_methods(root_module, root_module['ns3::SfVectorTlvValue'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3SsServiceFlowManager_methods(root_module, root_module['ns3::SsServiceFlowManager'])
register_Ns3ThreeLogDistancePropagationLossModel_methods(root_module, root_module['ns3::ThreeLogDistancePropagationLossModel'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3Tlv_methods(root_module, root_module['ns3::Tlv'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable'])
register_Ns3TwoRayGroundPropagationLossModel_methods(root_module, root_module['ns3::TwoRayGroundPropagationLossModel'])
register_Ns3Ucd_methods(root_module, root_module['ns3::Ucd'])
register_Ns3UlJob_methods(root_module, root_module['ns3::UlJob'])
register_Ns3UlMap_methods(root_module, root_module['ns3::UlMap'])
register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable'])
register_Ns3UplinkScheduler_methods(root_module, root_module['ns3::UplinkScheduler'])
register_Ns3UplinkSchedulerMBQoS_methods(root_module, root_module['ns3::UplinkSchedulerMBQoS'])
register_Ns3UplinkSchedulerRtps_methods(root_module, root_module['ns3::UplinkSchedulerRtps'])
register_Ns3UplinkSchedulerSimple_methods(root_module, root_module['ns3::UplinkSchedulerSimple'])
register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable'])
register_Ns3WimaxConnection_methods(root_module, root_module['ns3::WimaxConnection'])
register_Ns3WimaxMacQueue_methods(root_module, root_module['ns3::WimaxMacQueue'])
register_Ns3WimaxMacQueueQueueElement_methods(root_module, root_module['ns3::WimaxMacQueue::QueueElement'])
register_Ns3WimaxMacToMacHeader_methods(root_module, root_module['ns3::WimaxMacToMacHeader'])
register_Ns3WimaxPhy_methods(root_module, root_module['ns3::WimaxPhy'])
register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable'])
register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3BSScheduler_methods(root_module, root_module['ns3::BSScheduler'])
register_Ns3BSSchedulerRtps_methods(root_module, root_module['ns3::BSSchedulerRtps'])
register_Ns3BSSchedulerSimple_methods(root_module, root_module['ns3::BSSchedulerSimple'])
register_Ns3BandwidthRequestHeader_methods(root_module, root_module['ns3::BandwidthRequestHeader'])
register_Ns3BsServiceFlowManager_methods(root_module, root_module['ns3::BsServiceFlowManager'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3Channel_methods(root_module, root_module['ns3::Channel'])
register_Ns3ConnectionManager_methods(root_module, root_module['ns3::ConnectionManager'])
register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable'])
register_Ns3Dcd_methods(root_module, root_module['ns3::Dcd'])
register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable'])
register_Ns3DlMap_methods(root_module, root_module['ns3::DlMap'])
register_Ns3DsaAck_methods(root_module, root_module['ns3::DsaAck'])
register_Ns3DsaReq_methods(root_module, root_module['ns3::DsaReq'])
register_Ns3DsaRsp_methods(root_module, root_module['ns3::DsaRsp'])
register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable'])
register_Ns3FixedRssLossModel_methods(root_module, root_module['ns3::FixedRssLossModel'])
register_Ns3FragmentationSubheader_methods(root_module, root_module['ns3::FragmentationSubheader'])
register_Ns3FriisPropagationLossModel_methods(root_module, root_module['ns3::FriisPropagationLossModel'])
register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable'])
register_Ns3GenericMacHeader_methods(root_module, root_module['ns3::GenericMacHeader'])
register_Ns3GrantManagementSubheader_methods(root_module, root_module['ns3::GrantManagementSubheader'])
register_Ns3IpcsClassifier_methods(root_module, root_module['ns3::IpcsClassifier'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3LogDistancePropagationLossModel_methods(root_module, root_module['ns3::LogDistancePropagationLossModel'])
register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable'])
register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])
register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])
register_Ns3MatrixPropagationLossModel_methods(root_module, root_module['ns3::MatrixPropagationLossModel'])
register_Ns3NakagamiPropagationLossModel_methods(root_module, root_module['ns3::NakagamiPropagationLossModel'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable'])
register_Ns3SimpleOfdmWimaxPhy_methods(root_module, root_module['ns3::SimpleOfdmWimaxPhy'])
register_Ns3TimeChecker_methods(root_module, root_module['ns3::TimeChecker'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3UintegerValue_methods(root_module, root_module['ns3::UintegerValue'])
register_Ns3WimaxChannel_methods(root_module, root_module['ns3::WimaxChannel'])
register_Ns3WimaxNetDevice_methods(root_module, root_module['ns3::WimaxNetDevice'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3BaseStationNetDevice_methods(root_module, root_module['ns3::BaseStationNetDevice'])
register_Ns3SimpleOfdmWimaxChannel_methods(root_module, root_module['ns3::SimpleOfdmWimaxChannel'])
register_Ns3SubscriberStationNetDevice_methods(root_module, root_module['ns3::SubscriberStationNetDevice'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AsciiTraceHelper_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::AsciiTraceHelper::AsciiTraceHelper(ns3::AsciiTraceHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AsciiTraceHelper const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::AsciiTraceHelper::AsciiTraceHelper() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): ns3::Ptr<ns3::OutputStreamWrapper> ns3::AsciiTraceHelper::CreateFileStream(std::string filename, std::_Ios_Openmode filemode=std::ios_base::out) [member function]
cls.add_method('CreateFileStream',
'ns3::Ptr< ns3::OutputStreamWrapper >',
[param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode', default_value='std::ios_base::out')])
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDequeueSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDequeueSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDequeueSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDequeueSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDropSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDropSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDropSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDropSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultEnqueueSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultEnqueueSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultEnqueueSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultEnqueueSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultReceiveSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultReceiveSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultReceiveSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultReceiveSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): std::string ns3::AsciiTraceHelper::GetFilenameFromDevice(std::string prefix, ns3::Ptr<ns3::NetDevice> device, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromDevice',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'useObjectNames', default_value='true')])
## trace-helper.h (module 'network'): std::string ns3::AsciiTraceHelper::GetFilenameFromInterfacePair(std::string prefix, ns3::Ptr<ns3::Object> object, uint32_t interface, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromInterfacePair',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::Object >', 'object'), param('uint32_t', 'interface'), param('bool', 'useObjectNames', default_value='true')])
return
def register_Ns3AsciiTraceHelperForDevice_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice::AsciiTraceHelperForDevice(ns3::AsciiTraceHelperForDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AsciiTraceHelperForDevice const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice::AsciiTraceHelperForDevice() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename=false) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Ptr<ns3::NetDevice> nd) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Ptr< ns3::NetDevice >', 'nd')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, std::string ndName, bool explicitFilename=false) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('std::string', 'ndName'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string ndName) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'ndName')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::NetDeviceContainer d) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('ns3::NetDeviceContainer', 'd')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::NetDeviceContainer d) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::NetDeviceContainer', 'd')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::NodeContainer n) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('ns3::NodeContainer', 'n')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::NodeContainer n) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::NodeContainer', 'n')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, uint32_t nodeid, uint32_t deviceid, bool explicitFilename) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('bool', 'explicitFilename')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, uint32_t nodeid, uint32_t deviceid) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiAll(std::string prefix) [member function]
cls.add_method('EnableAsciiAll',
'void',
[param('std::string', 'prefix')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiAll(ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function]
cls.add_method('EnableAsciiAll',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiInternal(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename) [member function]
cls.add_method('EnableAsciiInternal',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'bool',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'bool',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function]
cls.add_method('CreateFullCopy',
'ns3::Buffer',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function]
cls.add_method('GetCurrentEndOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function]
cls.add_method('GetCurrentStartOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3Cid_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## cid.h (module 'wimax'): ns3::Cid::Cid(ns3::Cid const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Cid const &', 'arg0')])
## cid.h (module 'wimax'): ns3::Cid::Cid() [constructor]
cls.add_constructor([])
## cid.h (module 'wimax'): ns3::Cid::Cid(uint16_t cid) [constructor]
cls.add_constructor([param('uint16_t', 'cid')])
## cid.h (module 'wimax'): static ns3::Cid ns3::Cid::Broadcast() [member function]
cls.add_method('Broadcast',
'ns3::Cid',
[],
is_static=True)
## cid.h (module 'wimax'): uint16_t ns3::Cid::GetIdentifier() const [member function]
cls.add_method('GetIdentifier',
'uint16_t',
[],
is_const=True)
## cid.h (module 'wimax'): static ns3::Cid ns3::Cid::InitialRanging() [member function]
cls.add_method('InitialRanging',
'ns3::Cid',
[],
is_static=True)
## cid.h (module 'wimax'): bool ns3::Cid::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## cid.h (module 'wimax'): bool ns3::Cid::IsInitialRanging() const [member function]
cls.add_method('IsInitialRanging',
'bool',
[],
is_const=True)
## cid.h (module 'wimax'): bool ns3::Cid::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## cid.h (module 'wimax'): bool ns3::Cid::IsPadding() const [member function]
cls.add_method('IsPadding',
'bool',
[],
is_const=True)
## cid.h (module 'wimax'): static ns3::Cid ns3::Cid::Padding() [member function]
cls.add_method('Padding',
'ns3::Cid',
[],
is_static=True)
return
def register_Ns3CidFactory_methods(root_module, cls):
## cid-factory.h (module 'wimax'): ns3::CidFactory::CidFactory(ns3::CidFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CidFactory const &', 'arg0')])
## cid-factory.h (module 'wimax'): ns3::CidFactory::CidFactory() [constructor]
cls.add_constructor([])
## cid-factory.h (module 'wimax'): ns3::Cid ns3::CidFactory::Allocate(ns3::Cid::Type type) [member function]
cls.add_method('Allocate',
'ns3::Cid',
[param('ns3::Cid::Type', 'type')])
## cid-factory.h (module 'wimax'): ns3::Cid ns3::CidFactory::AllocateBasic() [member function]
cls.add_method('AllocateBasic',
'ns3::Cid',
[])
## cid-factory.h (module 'wimax'): ns3::Cid ns3::CidFactory::AllocateMulticast() [member function]
cls.add_method('AllocateMulticast',
'ns3::Cid',
[])
## cid-factory.h (module 'wimax'): ns3::Cid ns3::CidFactory::AllocatePrimary() [member function]
cls.add_method('AllocatePrimary',
'ns3::Cid',
[])
## cid-factory.h (module 'wimax'): ns3::Cid ns3::CidFactory::AllocateTransportOrSecondary() [member function]
cls.add_method('AllocateTransportOrSecondary',
'ns3::Cid',
[])
## cid-factory.h (module 'wimax'): void ns3::CidFactory::FreeCid(ns3::Cid cid) [member function]
cls.add_method('FreeCid',
'void',
[param('ns3::Cid', 'cid')])
## cid-factory.h (module 'wimax'): bool ns3::CidFactory::IsBasic(ns3::Cid cid) const [member function]
cls.add_method('IsBasic',
'bool',
[param('ns3::Cid', 'cid')],
is_const=True)
## cid-factory.h (module 'wimax'): bool ns3::CidFactory::IsPrimary(ns3::Cid cid) const [member function]
cls.add_method('IsPrimary',
'bool',
[param('ns3::Cid', 'cid')],
is_const=True)
## cid-factory.h (module 'wimax'): bool ns3::CidFactory::IsTransport(ns3::Cid cid) const [member function]
cls.add_method('IsTransport',
'bool',
[param('ns3::Cid', 'cid')],
is_const=True)
return
def register_Ns3CsParameters_methods(root_module, cls):
## cs-parameters.h (module 'wimax'): ns3::CsParameters::CsParameters(ns3::CsParameters const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CsParameters const &', 'arg0')])
## cs-parameters.h (module 'wimax'): ns3::CsParameters::CsParameters() [constructor]
cls.add_constructor([])
## cs-parameters.h (module 'wimax'): ns3::CsParameters::CsParameters(ns3::Tlv tlv) [constructor]
cls.add_constructor([param('ns3::Tlv', 'tlv')])
## cs-parameters.h (module 'wimax'): ns3::CsParameters::CsParameters(ns3::CsParameters::Action classifierDscAction, ns3::IpcsClassifierRecord classifier) [constructor]
cls.add_constructor([param('ns3::CsParameters::Action', 'classifierDscAction'), param('ns3::IpcsClassifierRecord', 'classifier')])
## cs-parameters.h (module 'wimax'): ns3::CsParameters::Action ns3::CsParameters::GetClassifierDscAction() const [member function]
cls.add_method('GetClassifierDscAction',
'ns3::CsParameters::Action',
[],
is_const=True)
## cs-parameters.h (module 'wimax'): ns3::IpcsClassifierRecord ns3::CsParameters::GetPacketClassifierRule() const [member function]
cls.add_method('GetPacketClassifierRule',
'ns3::IpcsClassifierRecord',
[],
is_const=True)
## cs-parameters.h (module 'wimax'): void ns3::CsParameters::SetClassifierDscAction(ns3::CsParameters::Action action) [member function]
cls.add_method('SetClassifierDscAction',
'void',
[param('ns3::CsParameters::Action', 'action')])
## cs-parameters.h (module 'wimax'): void ns3::CsParameters::SetPacketClassifierRule(ns3::IpcsClassifierRecord packetClassifierRule) [member function]
cls.add_method('SetPacketClassifierRule',
'void',
[param('ns3::IpcsClassifierRecord', 'packetClassifierRule')])
## cs-parameters.h (module 'wimax'): ns3::Tlv ns3::CsParameters::ToTlv() const [member function]
cls.add_method('ToTlv',
'ns3::Tlv',
[],
is_const=True)
return
def register_Ns3DcdChannelEncodings_methods(root_module, cls):
## dl-mac-messages.h (module 'wimax'): ns3::DcdChannelEncodings::DcdChannelEncodings(ns3::DcdChannelEncodings const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DcdChannelEncodings const &', 'arg0')])
## dl-mac-messages.h (module 'wimax'): ns3::DcdChannelEncodings::DcdChannelEncodings() [constructor]
cls.add_constructor([])
## dl-mac-messages.h (module 'wimax'): uint16_t ns3::DcdChannelEncodings::GetBsEirp() const [member function]
cls.add_method('GetBsEirp',
'uint16_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint16_t ns3::DcdChannelEncodings::GetEirxPIrMax() const [member function]
cls.add_method('GetEirxPIrMax',
'uint16_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint32_t ns3::DcdChannelEncodings::GetFrequency() const [member function]
cls.add_method('GetFrequency',
'uint32_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint16_t ns3::DcdChannelEncodings::GetSize() const [member function]
cls.add_method('GetSize',
'uint16_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::DcdChannelEncodings::Read(ns3::Buffer::Iterator start) [member function]
cls.add_method('Read',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')])
## dl-mac-messages.h (module 'wimax'): void ns3::DcdChannelEncodings::SetBsEirp(uint16_t bs_eirp) [member function]
cls.add_method('SetBsEirp',
'void',
[param('uint16_t', 'bs_eirp')])
## dl-mac-messages.h (module 'wimax'): void ns3::DcdChannelEncodings::SetEirxPIrMax(uint16_t rss_ir_max) [member function]
cls.add_method('SetEirxPIrMax',
'void',
[param('uint16_t', 'rss_ir_max')])
## dl-mac-messages.h (module 'wimax'): void ns3::DcdChannelEncodings::SetFrequency(uint32_t frequency) [member function]
cls.add_method('SetFrequency',
'void',
[param('uint32_t', 'frequency')])
## dl-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::DcdChannelEncodings::Write(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Write',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True)
## dl-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::DcdChannelEncodings::DoRead(ns3::Buffer::Iterator start) [member function]
cls.add_method('DoRead',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, visibility='private', is_virtual=True)
## dl-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::DcdChannelEncodings::DoWrite(ns3::Buffer::Iterator start) const [member function]
cls.add_method('DoWrite',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3DlFramePrefixIe_methods(root_module, cls):
## ofdm-downlink-frame-prefix.h (module 'wimax'): ns3::DlFramePrefixIe::DlFramePrefixIe(ns3::DlFramePrefixIe const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DlFramePrefixIe const &', 'arg0')])
## ofdm-downlink-frame-prefix.h (module 'wimax'): ns3::DlFramePrefixIe::DlFramePrefixIe() [constructor]
cls.add_constructor([])
## ofdm-downlink-frame-prefix.h (module 'wimax'): uint8_t ns3::DlFramePrefixIe::GetDiuc() const [member function]
cls.add_method('GetDiuc',
'uint8_t',
[],
is_const=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): uint16_t ns3::DlFramePrefixIe::GetLength() const [member function]
cls.add_method('GetLength',
'uint16_t',
[],
is_const=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): uint8_t ns3::DlFramePrefixIe::GetPreamblePresent() const [member function]
cls.add_method('GetPreamblePresent',
'uint8_t',
[],
is_const=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): uint8_t ns3::DlFramePrefixIe::GetRateId() const [member function]
cls.add_method('GetRateId',
'uint8_t',
[],
is_const=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): uint16_t ns3::DlFramePrefixIe::GetSize() const [member function]
cls.add_method('GetSize',
'uint16_t',
[],
is_const=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): uint16_t ns3::DlFramePrefixIe::GetStartTime() const [member function]
cls.add_method('GetStartTime',
'uint16_t',
[],
is_const=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): ns3::Buffer::Iterator ns3::DlFramePrefixIe::Read(ns3::Buffer::Iterator start) [member function]
cls.add_method('Read',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')])
## ofdm-downlink-frame-prefix.h (module 'wimax'): void ns3::DlFramePrefixIe::SetDiuc(uint8_t diuc) [member function]
cls.add_method('SetDiuc',
'void',
[param('uint8_t', 'diuc')])
## ofdm-downlink-frame-prefix.h (module 'wimax'): void ns3::DlFramePrefixIe::SetLength(uint16_t length) [member function]
cls.add_method('SetLength',
'void',
[param('uint16_t', 'length')])
## ofdm-downlink-frame-prefix.h (module 'wimax'): void ns3::DlFramePrefixIe::SetPreamblePresent(uint8_t preamblePresent) [member function]
cls.add_method('SetPreamblePresent',
'void',
[param('uint8_t', 'preamblePresent')])
## ofdm-downlink-frame-prefix.h (module 'wimax'): void ns3::DlFramePrefixIe::SetRateId(uint8_t rateId) [member function]
cls.add_method('SetRateId',
'void',
[param('uint8_t', 'rateId')])
## ofdm-downlink-frame-prefix.h (module 'wimax'): void ns3::DlFramePrefixIe::SetStartTime(uint16_t startTime) [member function]
cls.add_method('SetStartTime',
'void',
[param('uint16_t', 'startTime')])
## ofdm-downlink-frame-prefix.h (module 'wimax'): ns3::Buffer::Iterator ns3::DlFramePrefixIe::Write(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Write',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True)
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('==')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
return
def register_Ns3IpcsClassifierRecord_methods(root_module, cls):
## ipcs-classifier-record.h (module 'wimax'): ns3::IpcsClassifierRecord::IpcsClassifierRecord(ns3::IpcsClassifierRecord const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IpcsClassifierRecord const &', 'arg0')])
## ipcs-classifier-record.h (module 'wimax'): ns3::IpcsClassifierRecord::IpcsClassifierRecord() [constructor]
cls.add_constructor([])
## ipcs-classifier-record.h (module 'wimax'): ns3::IpcsClassifierRecord::IpcsClassifierRecord(ns3::Ipv4Address srcAddress, ns3::Ipv4Mask srcMask, ns3::Ipv4Address dstAddress, ns3::Ipv4Mask dstMask, uint16_t srcPortLow, uint16_t srcPortHigh, uint16_t dstPortLow, uint16_t dstPortHigh, uint8_t protocol, uint8_t priority) [constructor]
cls.add_constructor([param('ns3::Ipv4Address', 'srcAddress'), param('ns3::Ipv4Mask', 'srcMask'), param('ns3::Ipv4Address', 'dstAddress'), param('ns3::Ipv4Mask', 'dstMask'), param('uint16_t', 'srcPortLow'), param('uint16_t', 'srcPortHigh'), param('uint16_t', 'dstPortLow'), param('uint16_t', 'dstPortHigh'), param('uint8_t', 'protocol'), param('uint8_t', 'priority')])
## ipcs-classifier-record.h (module 'wimax'): ns3::IpcsClassifierRecord::IpcsClassifierRecord(ns3::Tlv tlv) [constructor]
cls.add_constructor([param('ns3::Tlv', 'tlv')])
## ipcs-classifier-record.h (module 'wimax'): void ns3::IpcsClassifierRecord::AddDstAddr(ns3::Ipv4Address dstAddress, ns3::Ipv4Mask dstMask) [member function]
cls.add_method('AddDstAddr',
'void',
[param('ns3::Ipv4Address', 'dstAddress'), param('ns3::Ipv4Mask', 'dstMask')])
## ipcs-classifier-record.h (module 'wimax'): void ns3::IpcsClassifierRecord::AddDstPortRange(uint16_t dstPortLow, uint16_t dstPortHigh) [member function]
cls.add_method('AddDstPortRange',
'void',
[param('uint16_t', 'dstPortLow'), param('uint16_t', 'dstPortHigh')])
## ipcs-classifier-record.h (module 'wimax'): void ns3::IpcsClassifierRecord::AddProtocol(uint8_t proto) [member function]
cls.add_method('AddProtocol',
'void',
[param('uint8_t', 'proto')])
## ipcs-classifier-record.h (module 'wimax'): void ns3::IpcsClassifierRecord::AddSrcAddr(ns3::Ipv4Address srcAddress, ns3::Ipv4Mask srcMask) [member function]
cls.add_method('AddSrcAddr',
'void',
[param('ns3::Ipv4Address', 'srcAddress'), param('ns3::Ipv4Mask', 'srcMask')])
## ipcs-classifier-record.h (module 'wimax'): void ns3::IpcsClassifierRecord::AddSrcPortRange(uint16_t srcPortLow, uint16_t srcPortHigh) [member function]
cls.add_method('AddSrcPortRange',
'void',
[param('uint16_t', 'srcPortLow'), param('uint16_t', 'srcPortHigh')])
## ipcs-classifier-record.h (module 'wimax'): bool ns3::IpcsClassifierRecord::CheckMatch(ns3::Ipv4Address srcAddress, ns3::Ipv4Address dstAddress, uint16_t srcPort, uint16_t dstPort, uint8_t proto) const [member function]
cls.add_method('CheckMatch',
'bool',
[param('ns3::Ipv4Address', 'srcAddress'), param('ns3::Ipv4Address', 'dstAddress'), param('uint16_t', 'srcPort'), param('uint16_t', 'dstPort'), param('uint8_t', 'proto')],
is_const=True)
## ipcs-classifier-record.h (module 'wimax'): uint16_t ns3::IpcsClassifierRecord::GetCid() const [member function]
cls.add_method('GetCid',
'uint16_t',
[],
is_const=True)
## ipcs-classifier-record.h (module 'wimax'): uint16_t ns3::IpcsClassifierRecord::GetIndex() const [member function]
cls.add_method('GetIndex',
'uint16_t',
[],
is_const=True)
## ipcs-classifier-record.h (module 'wimax'): uint8_t ns3::IpcsClassifierRecord::GetPriority() const [member function]
cls.add_method('GetPriority',
'uint8_t',
[],
is_const=True)
## ipcs-classifier-record.h (module 'wimax'): void ns3::IpcsClassifierRecord::SetCid(uint16_t cid) [member function]
cls.add_method('SetCid',
'void',
[param('uint16_t', 'cid')])
## ipcs-classifier-record.h (module 'wimax'): void ns3::IpcsClassifierRecord::SetIndex(uint16_t index) [member function]
cls.add_method('SetIndex',
'void',
[param('uint16_t', 'index')])
## ipcs-classifier-record.h (module 'wimax'): void ns3::IpcsClassifierRecord::SetPriority(uint8_t prio) [member function]
cls.add_method('SetPriority',
'void',
[param('uint8_t', 'prio')])
## ipcs-classifier-record.h (module 'wimax'): ns3::Tlv ns3::IpcsClassifierRecord::ToTlv() const [member function]
cls.add_method('ToTlv',
'ns3::Tlv',
[],
is_const=True)
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function]
cls.add_method('GetIpv4MappedAddress',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() [member function]
cls.add_method('IsIpv4MappedAddress',
'bool',
[])
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function]
cls.add_method('IsLinkLocalMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function]
cls.add_method('MakeIpv4MappedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv4Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3LogComponent_methods(root_module, cls):
## log.h (module 'core'): ns3::LogComponent::LogComponent(ns3::LogComponent const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LogComponent const &', 'arg0')])
## log.h (module 'core'): ns3::LogComponent::LogComponent(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## log.h (module 'core'): void ns3::LogComponent::Disable(ns3::LogLevel level) [member function]
cls.add_method('Disable',
'void',
[param('ns3::LogLevel', 'level')])
## log.h (module 'core'): void ns3::LogComponent::Enable(ns3::LogLevel level) [member function]
cls.add_method('Enable',
'void',
[param('ns3::LogLevel', 'level')])
## log.h (module 'core'): void ns3::LogComponent::EnvVarCheck(char const * name) [member function]
cls.add_method('EnvVarCheck',
'void',
[param('char const *', 'name')])
## log.h (module 'core'): bool ns3::LogComponent::IsEnabled(ns3::LogLevel level) const [member function]
cls.add_method('IsEnabled',
'bool',
[param('ns3::LogLevel', 'level')],
is_const=True)
## log.h (module 'core'): bool ns3::LogComponent::IsNoneEnabled() const [member function]
cls.add_method('IsNoneEnabled',
'bool',
[],
is_const=True)
## log.h (module 'core'): char const * ns3::LogComponent::Name() const [member function]
cls.add_method('Name',
'char const *',
[],
is_const=True)
return
def register_Ns3Mac48Address_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor]
cls.add_constructor([param('char const *', 'str')])
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac48Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'buffer')])
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'buffer')],
is_const=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv4Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv6Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function]
cls.add_method('GetMulticast6Prefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function]
cls.add_method('GetMulticastPrefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function]
cls.add_method('IsGroup',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3NetDeviceContainer_methods(root_module, cls):
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'arg0')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer() [constructor]
cls.add_constructor([])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::Ptr<ns3::NetDevice> dev) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(std::string devName) [constructor]
cls.add_constructor([param('std::string', 'devName')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & a, ns3::NetDeviceContainer const & b) [constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'a'), param('ns3::NetDeviceContainer const &', 'b')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::NetDeviceContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NetDeviceContainer', 'other')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(std::string deviceName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'deviceName')])
## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >',
[],
is_const=True)
## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >',
[],
is_const=True)
## net-device-container.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::NetDeviceContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_const=True)
## net-device-container.h (module 'network'): uint32_t ns3::NetDeviceContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3NodeContainer_methods(root_module, cls):
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor]
cls.add_constructor([])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor]
cls.add_constructor([param('std::string', 'nodeName')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NodeContainer', 'other')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'nodeName')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n')])
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n'), param('uint32_t', 'systemId')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Node >',
[param('uint32_t', 'i')],
is_const=True)
## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function]
cls.add_method('GetGlobal',
'ns3::NodeContainer',
[],
is_static=True)
## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3OfdmDcdChannelEncodings_methods(root_module, cls):
## dl-mac-messages.h (module 'wimax'): ns3::OfdmDcdChannelEncodings::OfdmDcdChannelEncodings(ns3::OfdmDcdChannelEncodings const & arg0) [copy constructor]
cls.add_constructor([param('ns3::OfdmDcdChannelEncodings const &', 'arg0')])
## dl-mac-messages.h (module 'wimax'): ns3::OfdmDcdChannelEncodings::OfdmDcdChannelEncodings() [constructor]
cls.add_constructor([])
## dl-mac-messages.h (module 'wimax'): ns3::Mac48Address ns3::OfdmDcdChannelEncodings::GetBaseStationId() const [member function]
cls.add_method('GetBaseStationId',
'ns3::Mac48Address',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmDcdChannelEncodings::GetChannelNr() const [member function]
cls.add_method('GetChannelNr',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmDcdChannelEncodings::GetFrameDurationCode() const [member function]
cls.add_method('GetFrameDurationCode',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint32_t ns3::OfdmDcdChannelEncodings::GetFrameNumber() const [member function]
cls.add_method('GetFrameNumber',
'uint32_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmDcdChannelEncodings::GetRtg() const [member function]
cls.add_method('GetRtg',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint16_t ns3::OfdmDcdChannelEncodings::GetSize() const [member function]
cls.add_method('GetSize',
'uint16_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmDcdChannelEncodings::GetTtg() const [member function]
cls.add_method('GetTtg',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDcdChannelEncodings::SetBaseStationId(ns3::Mac48Address baseStationId) [member function]
cls.add_method('SetBaseStationId',
'void',
[param('ns3::Mac48Address', 'baseStationId')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDcdChannelEncodings::SetChannelNr(uint8_t channelNr) [member function]
cls.add_method('SetChannelNr',
'void',
[param('uint8_t', 'channelNr')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDcdChannelEncodings::SetFrameDurationCode(uint8_t frameDurationCode) [member function]
cls.add_method('SetFrameDurationCode',
'void',
[param('uint8_t', 'frameDurationCode')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDcdChannelEncodings::SetFrameNumber(uint32_t frameNumber) [member function]
cls.add_method('SetFrameNumber',
'void',
[param('uint32_t', 'frameNumber')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDcdChannelEncodings::SetRtg(uint8_t rtg) [member function]
cls.add_method('SetRtg',
'void',
[param('uint8_t', 'rtg')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDcdChannelEncodings::SetTtg(uint8_t ttg) [member function]
cls.add_method('SetTtg',
'void',
[param('uint8_t', 'ttg')])
## dl-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::OfdmDcdChannelEncodings::DoRead(ns3::Buffer::Iterator start) [member function]
cls.add_method('DoRead',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
visibility='private', is_virtual=True)
## dl-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::OfdmDcdChannelEncodings::DoWrite(ns3::Buffer::Iterator start) const [member function]
cls.add_method('DoWrite',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3OfdmDlBurstProfile_methods(root_module, cls):
## dl-mac-messages.h (module 'wimax'): ns3::OfdmDlBurstProfile::OfdmDlBurstProfile(ns3::OfdmDlBurstProfile const & arg0) [copy constructor]
cls.add_constructor([param('ns3::OfdmDlBurstProfile const &', 'arg0')])
## dl-mac-messages.h (module 'wimax'): ns3::OfdmDlBurstProfile::OfdmDlBurstProfile() [constructor]
cls.add_constructor([])
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmDlBurstProfile::GetDiuc() const [member function]
cls.add_method('GetDiuc',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmDlBurstProfile::GetFecCodeType() const [member function]
cls.add_method('GetFecCodeType',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmDlBurstProfile::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint16_t ns3::OfdmDlBurstProfile::GetSize() const [member function]
cls.add_method('GetSize',
'uint16_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmDlBurstProfile::GetType() const [member function]
cls.add_method('GetType',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::OfdmDlBurstProfile::Read(ns3::Buffer::Iterator start) [member function]
cls.add_method('Read',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDlBurstProfile::SetDiuc(uint8_t diuc) [member function]
cls.add_method('SetDiuc',
'void',
[param('uint8_t', 'diuc')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDlBurstProfile::SetFecCodeType(uint8_t fecCodeType) [member function]
cls.add_method('SetFecCodeType',
'void',
[param('uint8_t', 'fecCodeType')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDlBurstProfile::SetLength(uint8_t length) [member function]
cls.add_method('SetLength',
'void',
[param('uint8_t', 'length')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDlBurstProfile::SetType(uint8_t type) [member function]
cls.add_method('SetType',
'void',
[param('uint8_t', 'type')])
## dl-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::OfdmDlBurstProfile::Write(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Write',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True)
return
def register_Ns3OfdmDlMapIe_methods(root_module, cls):
## dl-mac-messages.h (module 'wimax'): ns3::OfdmDlMapIe::OfdmDlMapIe(ns3::OfdmDlMapIe const & arg0) [copy constructor]
cls.add_constructor([param('ns3::OfdmDlMapIe const &', 'arg0')])
## dl-mac-messages.h (module 'wimax'): ns3::OfdmDlMapIe::OfdmDlMapIe() [constructor]
cls.add_constructor([])
## dl-mac-messages.h (module 'wimax'): ns3::Cid ns3::OfdmDlMapIe::GetCid() const [member function]
cls.add_method('GetCid',
'ns3::Cid',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmDlMapIe::GetDiuc() const [member function]
cls.add_method('GetDiuc',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmDlMapIe::GetPreamblePresent() const [member function]
cls.add_method('GetPreamblePresent',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint16_t ns3::OfdmDlMapIe::GetSize() const [member function]
cls.add_method('GetSize',
'uint16_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint16_t ns3::OfdmDlMapIe::GetStartTime() const [member function]
cls.add_method('GetStartTime',
'uint16_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::OfdmDlMapIe::Read(ns3::Buffer::Iterator start) [member function]
cls.add_method('Read',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDlMapIe::SetCid(ns3::Cid cid) [member function]
cls.add_method('SetCid',
'void',
[param('ns3::Cid', 'cid')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDlMapIe::SetDiuc(uint8_t diuc) [member function]
cls.add_method('SetDiuc',
'void',
[param('uint8_t', 'diuc')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDlMapIe::SetPreamblePresent(uint8_t preamblePresent) [member function]
cls.add_method('SetPreamblePresent',
'void',
[param('uint8_t', 'preamblePresent')])
## dl-mac-messages.h (module 'wimax'): void ns3::OfdmDlMapIe::SetStartTime(uint16_t startTime) [member function]
cls.add_method('SetStartTime',
'void',
[param('uint16_t', 'startTime')])
## dl-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::OfdmDlMapIe::Write(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Write',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True)
return
def register_Ns3OfdmUlBurstProfile_methods(root_module, cls):
## ul-mac-messages.h (module 'wimax'): ns3::OfdmUlBurstProfile::OfdmUlBurstProfile(ns3::OfdmUlBurstProfile const & arg0) [copy constructor]
cls.add_constructor([param('ns3::OfdmUlBurstProfile const &', 'arg0')])
## ul-mac-messages.h (module 'wimax'): ns3::OfdmUlBurstProfile::OfdmUlBurstProfile() [constructor]
cls.add_constructor([])
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmUlBurstProfile::GetFecCodeType() const [member function]
cls.add_method('GetFecCodeType',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmUlBurstProfile::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint16_t ns3::OfdmUlBurstProfile::GetSize() const [member function]
cls.add_method('GetSize',
'uint16_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmUlBurstProfile::GetType() const [member function]
cls.add_method('GetType',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmUlBurstProfile::GetUiuc() const [member function]
cls.add_method('GetUiuc',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::OfdmUlBurstProfile::Read(ns3::Buffer::Iterator start) [member function]
cls.add_method('Read',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')])
## ul-mac-messages.h (module 'wimax'): void ns3::OfdmUlBurstProfile::SetFecCodeType(uint8_t fecCodeType) [member function]
cls.add_method('SetFecCodeType',
'void',
[param('uint8_t', 'fecCodeType')])
## ul-mac-messages.h (module 'wimax'): void ns3::OfdmUlBurstProfile::SetLength(uint8_t length) [member function]
cls.add_method('SetLength',
'void',
[param('uint8_t', 'length')])
## ul-mac-messages.h (module 'wimax'): void ns3::OfdmUlBurstProfile::SetType(uint8_t type) [member function]
cls.add_method('SetType',
'void',
[param('uint8_t', 'type')])
## ul-mac-messages.h (module 'wimax'): void ns3::OfdmUlBurstProfile::SetUiuc(uint8_t uiuc) [member function]
cls.add_method('SetUiuc',
'void',
[param('uint8_t', 'uiuc')])
## ul-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::OfdmUlBurstProfile::Write(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Write',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True)
return
def register_Ns3OfdmUlMapIe_methods(root_module, cls):
## ul-mac-messages.h (module 'wimax'): ns3::OfdmUlMapIe::OfdmUlMapIe(ns3::OfdmUlMapIe const & arg0) [copy constructor]
cls.add_constructor([param('ns3::OfdmUlMapIe const &', 'arg0')])
## ul-mac-messages.h (module 'wimax'): ns3::OfdmUlMapIe::OfdmUlMapIe() [constructor]
cls.add_constructor([])
## ul-mac-messages.h (module 'wimax'): ns3::Cid ns3::OfdmUlMapIe::GetCid() const [member function]
cls.add_method('GetCid',
'ns3::Cid',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint16_t ns3::OfdmUlMapIe::GetDuration() const [member function]
cls.add_method('GetDuration',
'uint16_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmUlMapIe::GetMidambleRepetitionInterval() const [member function]
cls.add_method('GetMidambleRepetitionInterval',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint16_t ns3::OfdmUlMapIe::GetSize() const [member function]
cls.add_method('GetSize',
'uint16_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint16_t ns3::OfdmUlMapIe::GetStartTime() const [member function]
cls.add_method('GetStartTime',
'uint16_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmUlMapIe::GetSubchannelIndex() const [member function]
cls.add_method('GetSubchannelIndex',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmUlMapIe::GetUiuc() const [member function]
cls.add_method('GetUiuc',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::OfdmUlMapIe::Read(ns3::Buffer::Iterator start) [member function]
cls.add_method('Read',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')])
## ul-mac-messages.h (module 'wimax'): void ns3::OfdmUlMapIe::SetCid(ns3::Cid cid) [member function]
cls.add_method('SetCid',
'void',
[param('ns3::Cid', 'cid')])
## ul-mac-messages.h (module 'wimax'): void ns3::OfdmUlMapIe::SetDuration(uint16_t duration) [member function]
cls.add_method('SetDuration',
'void',
[param('uint16_t', 'duration')])
## ul-mac-messages.h (module 'wimax'): void ns3::OfdmUlMapIe::SetMidambleRepetitionInterval(uint8_t midambleRepetitionInterval) [member function]
cls.add_method('SetMidambleRepetitionInterval',
'void',
[param('uint8_t', 'midambleRepetitionInterval')])
## ul-mac-messages.h (module 'wimax'): void ns3::OfdmUlMapIe::SetStartTime(uint16_t startTime) [member function]
cls.add_method('SetStartTime',
'void',
[param('uint16_t', 'startTime')])
## ul-mac-messages.h (module 'wimax'): void ns3::OfdmUlMapIe::SetSubchannelIndex(uint8_t subchannelIndex) [member function]
cls.add_method('SetSubchannelIndex',
'void',
[param('uint8_t', 'subchannelIndex')])
## ul-mac-messages.h (module 'wimax'): void ns3::OfdmUlMapIe::SetUiuc(uint8_t uiuc) [member function]
cls.add_method('SetUiuc',
'void',
[param('uint8_t', 'uiuc')])
## ul-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::OfdmUlMapIe::Write(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Write',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True)
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PcapFile_methods(root_module, cls):
## pcap-file.h (module 'network'): ns3::PcapFile::PcapFile() [constructor]
cls.add_constructor([])
## pcap-file.h (module 'network'): void ns3::PcapFile::Clear() [member function]
cls.add_method('Clear',
'void',
[])
## pcap-file.h (module 'network'): void ns3::PcapFile::Close() [member function]
cls.add_method('Close',
'void',
[])
## pcap-file.h (module 'network'): static bool ns3::PcapFile::Diff(std::string const & f1, std::string const & f2, uint32_t & sec, uint32_t & usec, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT) [member function]
cls.add_method('Diff',
'bool',
[param('std::string const &', 'f1'), param('std::string const &', 'f2'), param('uint32_t &', 'sec'), param('uint32_t &', 'usec'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT')],
is_static=True)
## pcap-file.h (module 'network'): bool ns3::PcapFile::Eof() const [member function]
cls.add_method('Eof',
'bool',
[],
is_const=True)
## pcap-file.h (module 'network'): bool ns3::PcapFile::Fail() const [member function]
cls.add_method('Fail',
'bool',
[],
is_const=True)
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetDataLinkType() [member function]
cls.add_method('GetDataLinkType',
'uint32_t',
[])
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetMagic() [member function]
cls.add_method('GetMagic',
'uint32_t',
[])
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetSigFigs() [member function]
cls.add_method('GetSigFigs',
'uint32_t',
[])
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetSnapLen() [member function]
cls.add_method('GetSnapLen',
'uint32_t',
[])
## pcap-file.h (module 'network'): bool ns3::PcapFile::GetSwapMode() [member function]
cls.add_method('GetSwapMode',
'bool',
[])
## pcap-file.h (module 'network'): int32_t ns3::PcapFile::GetTimeZoneOffset() [member function]
cls.add_method('GetTimeZoneOffset',
'int32_t',
[])
## pcap-file.h (module 'network'): uint16_t ns3::PcapFile::GetVersionMajor() [member function]
cls.add_method('GetVersionMajor',
'uint16_t',
[])
## pcap-file.h (module 'network'): uint16_t ns3::PcapFile::GetVersionMinor() [member function]
cls.add_method('GetVersionMinor',
'uint16_t',
[])
## pcap-file.h (module 'network'): void ns3::PcapFile::Init(uint32_t dataLinkType, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT, int32_t timeZoneCorrection=ns3::PcapFile::ZONE_DEFAULT, bool swapMode=false) [member function]
cls.add_method('Init',
'void',
[param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT'), param('int32_t', 'timeZoneCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT'), param('bool', 'swapMode', default_value='false')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Open(std::string const & filename, std::_Ios_Openmode mode) [member function]
cls.add_method('Open',
'void',
[param('std::string const &', 'filename'), param('std::_Ios_Openmode', 'mode')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Read(uint8_t * const data, uint32_t maxBytes, uint32_t & tsSec, uint32_t & tsUsec, uint32_t & inclLen, uint32_t & origLen, uint32_t & readLen) [member function]
cls.add_method('Read',
'void',
[param('uint8_t * const', 'data'), param('uint32_t', 'maxBytes'), param('uint32_t &', 'tsSec'), param('uint32_t &', 'tsUsec'), param('uint32_t &', 'inclLen'), param('uint32_t &', 'origLen'), param('uint32_t &', 'readLen')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, uint8_t const * const data, uint32_t totalLen) [member function]
cls.add_method('Write',
'void',
[param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('uint8_t const * const', 'data'), param('uint32_t', 'totalLen')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Header & header, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Header &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file.h (module 'network'): ns3::PcapFile::SNAPLEN_DEFAULT [variable]
cls.add_static_attribute('SNAPLEN_DEFAULT', 'uint32_t const', is_const=True)
## pcap-file.h (module 'network'): ns3::PcapFile::ZONE_DEFAULT [variable]
cls.add_static_attribute('ZONE_DEFAULT', 'int32_t const', is_const=True)
return
def register_Ns3PcapHelper_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::PcapHelper::PcapHelper(ns3::PcapHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PcapHelper const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::PcapHelper::PcapHelper() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): ns3::Ptr<ns3::PcapFileWrapper> ns3::PcapHelper::CreateFile(std::string filename, std::_Ios_Openmode filemode, uint32_t dataLinkType, uint32_t snapLen=65535, int32_t tzCorrection=0) [member function]
cls.add_method('CreateFile',
'ns3::Ptr< ns3::PcapFileWrapper >',
[param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode'), param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='65535'), param('int32_t', 'tzCorrection', default_value='0')])
## trace-helper.h (module 'network'): std::string ns3::PcapHelper::GetFilenameFromDevice(std::string prefix, ns3::Ptr<ns3::NetDevice> device, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromDevice',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'useObjectNames', default_value='true')])
## trace-helper.h (module 'network'): std::string ns3::PcapHelper::GetFilenameFromInterfacePair(std::string prefix, ns3::Ptr<ns3::Object> object, uint32_t interface, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromInterfacePair',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::Object >', 'object'), param('uint32_t', 'interface'), param('bool', 'useObjectNames', default_value='true')])
return
def register_Ns3PcapHelperForDevice_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::PcapHelperForDevice::PcapHelperForDevice(ns3::PcapHelperForDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PcapHelperForDevice const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::PcapHelperForDevice::PcapHelperForDevice() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous=false, bool explicitFilename=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous', default_value='false'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, std::string ndName, bool promiscuous=false, bool explicitFilename=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('std::string', 'ndName'), param('bool', 'promiscuous', default_value='false'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::NetDeviceContainer d, bool promiscuous=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('ns3::NetDeviceContainer', 'd'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::NodeContainer n, bool promiscuous=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('ns3::NodeContainer', 'n'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, uint32_t nodeid, uint32_t deviceid, bool promiscuous=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcapAll(std::string prefix, bool promiscuous=false) [member function]
cls.add_method('EnablePcapAll',
'void',
[param('std::string', 'prefix'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcapInternal(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous, bool explicitFilename) [member function]
cls.add_method('EnablePcapInternal',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous'), param('bool', 'explicitFilename')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3SNRToBlockErrorRateManager_methods(root_module, cls):
## snr-to-block-error-rate-manager.h (module 'wimax'): ns3::SNRToBlockErrorRateManager::SNRToBlockErrorRateManager(ns3::SNRToBlockErrorRateManager const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SNRToBlockErrorRateManager const &', 'arg0')])
## snr-to-block-error-rate-manager.h (module 'wimax'): ns3::SNRToBlockErrorRateManager::SNRToBlockErrorRateManager() [constructor]
cls.add_constructor([])
## snr-to-block-error-rate-manager.h (module 'wimax'): void ns3::SNRToBlockErrorRateManager::ActivateLoss(bool loss) [member function]
cls.add_method('ActivateLoss',
'void',
[param('bool', 'loss')])
## snr-to-block-error-rate-manager.h (module 'wimax'): double ns3::SNRToBlockErrorRateManager::GetBlockErrorRate(double SNR, uint8_t modulation) [member function]
cls.add_method('GetBlockErrorRate',
'double',
[param('double', 'SNR'), param('uint8_t', 'modulation')])
## snr-to-block-error-rate-manager.h (module 'wimax'): ns3::SNRToBlockErrorRateRecord * ns3::SNRToBlockErrorRateManager::GetSNRToBlockErrorRateRecord(double SNR, uint8_t modulation) [member function]
cls.add_method('GetSNRToBlockErrorRateRecord',
'ns3::SNRToBlockErrorRateRecord *',
[param('double', 'SNR'), param('uint8_t', 'modulation')])
## snr-to-block-error-rate-manager.h (module 'wimax'): std::string ns3::SNRToBlockErrorRateManager::GetTraceFilePath() [member function]
cls.add_method('GetTraceFilePath',
'std::string',
[])
## snr-to-block-error-rate-manager.h (module 'wimax'): void ns3::SNRToBlockErrorRateManager::LoadDefaultTraces() [member function]
cls.add_method('LoadDefaultTraces',
'void',
[])
## snr-to-block-error-rate-manager.h (module 'wimax'): void ns3::SNRToBlockErrorRateManager::LoadTraces() [member function]
cls.add_method('LoadTraces',
'void',
[])
## snr-to-block-error-rate-manager.h (module 'wimax'): void ns3::SNRToBlockErrorRateManager::ReLoadTraces() [member function]
cls.add_method('ReLoadTraces',
'void',
[])
## snr-to-block-error-rate-manager.h (module 'wimax'): void ns3::SNRToBlockErrorRateManager::SetTraceFilePath(char * traceFilePath) [member function]
cls.add_method('SetTraceFilePath',
'void',
[param('char *', 'traceFilePath')])
return
def register_Ns3SNRToBlockErrorRateRecord_methods(root_module, cls):
## snr-to-block-error-rate-record.h (module 'wimax'): ns3::SNRToBlockErrorRateRecord::SNRToBlockErrorRateRecord(ns3::SNRToBlockErrorRateRecord const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SNRToBlockErrorRateRecord const &', 'arg0')])
## snr-to-block-error-rate-record.h (module 'wimax'): ns3::SNRToBlockErrorRateRecord::SNRToBlockErrorRateRecord(double snrValue, double bitErrorRate, double BlockErrorRate, double sigma2, double I1, double I2) [constructor]
cls.add_constructor([param('double', 'snrValue'), param('double', 'bitErrorRate'), param('double', 'BlockErrorRate'), param('double', 'sigma2'), param('double', 'I1'), param('double', 'I2')])
## snr-to-block-error-rate-record.h (module 'wimax'): ns3::SNRToBlockErrorRateRecord * ns3::SNRToBlockErrorRateRecord::Copy() [member function]
cls.add_method('Copy',
'ns3::SNRToBlockErrorRateRecord *',
[])
## snr-to-block-error-rate-record.h (module 'wimax'): double ns3::SNRToBlockErrorRateRecord::GetBitErrorRate() [member function]
cls.add_method('GetBitErrorRate',
'double',
[])
## snr-to-block-error-rate-record.h (module 'wimax'): double ns3::SNRToBlockErrorRateRecord::GetBlockErrorRate() [member function]
cls.add_method('GetBlockErrorRate',
'double',
[])
## snr-to-block-error-rate-record.h (module 'wimax'): double ns3::SNRToBlockErrorRateRecord::GetI1() [member function]
cls.add_method('GetI1',
'double',
[])
## snr-to-block-error-rate-record.h (module 'wimax'): double ns3::SNRToBlockErrorRateRecord::GetI2() [member function]
cls.add_method('GetI2',
'double',
[])
## snr-to-block-error-rate-record.h (module 'wimax'): double ns3::SNRToBlockErrorRateRecord::GetSNRValue() [member function]
cls.add_method('GetSNRValue',
'double',
[])
## snr-to-block-error-rate-record.h (module 'wimax'): double ns3::SNRToBlockErrorRateRecord::GetSigma2() [member function]
cls.add_method('GetSigma2',
'double',
[])
## snr-to-block-error-rate-record.h (module 'wimax'): void ns3::SNRToBlockErrorRateRecord::SetBitErrorRate(double arg0) [member function]
cls.add_method('SetBitErrorRate',
'void',
[param('double', 'arg0')])
## snr-to-block-error-rate-record.h (module 'wimax'): void ns3::SNRToBlockErrorRateRecord::SetBlockErrorRate(double arg0) [member function]
cls.add_method('SetBlockErrorRate',
'void',
[param('double', 'arg0')])
## snr-to-block-error-rate-record.h (module 'wimax'): void ns3::SNRToBlockErrorRateRecord::SetI1(double arg0) [member function]
cls.add_method('SetI1',
'void',
[param('double', 'arg0')])
## snr-to-block-error-rate-record.h (module 'wimax'): void ns3::SNRToBlockErrorRateRecord::SetI2(double arg0) [member function]
cls.add_method('SetI2',
'void',
[param('double', 'arg0')])
## snr-to-block-error-rate-record.h (module 'wimax'): void ns3::SNRToBlockErrorRateRecord::SetSNRValue(double arg0) [member function]
cls.add_method('SetSNRValue',
'void',
[param('double', 'arg0')])
return
def register_Ns3SSRecord_methods(root_module, cls):
## ss-record.h (module 'wimax'): ns3::SSRecord::SSRecord(ns3::SSRecord const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SSRecord const &', 'arg0')])
## ss-record.h (module 'wimax'): ns3::SSRecord::SSRecord() [constructor]
cls.add_constructor([])
## ss-record.h (module 'wimax'): ns3::SSRecord::SSRecord(ns3::Mac48Address macAddress) [constructor]
cls.add_constructor([param('ns3::Mac48Address', 'macAddress')])
## ss-record.h (module 'wimax'): ns3::SSRecord::SSRecord(ns3::Mac48Address macAddress, ns3::Ipv4Address IPaddress) [constructor]
cls.add_constructor([param('ns3::Mac48Address', 'macAddress'), param('ns3::Ipv4Address', 'IPaddress')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::AddServiceFlow(ns3::ServiceFlow * serviceFlow) [member function]
cls.add_method('AddServiceFlow',
'void',
[param('ns3::ServiceFlow *', 'serviceFlow')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::DisablePollForRanging() [member function]
cls.add_method('DisablePollForRanging',
'void',
[])
## ss-record.h (module 'wimax'): void ns3::SSRecord::EnablePollForRanging() [member function]
cls.add_method('EnablePollForRanging',
'void',
[])
## ss-record.h (module 'wimax'): bool ns3::SSRecord::GetAreServiceFlowsAllocated() const [member function]
cls.add_method('GetAreServiceFlowsAllocated',
'bool',
[],
is_const=True)
## ss-record.h (module 'wimax'): ns3::Cid ns3::SSRecord::GetBasicCid() const [member function]
cls.add_method('GetBasicCid',
'ns3::Cid',
[],
is_const=True)
## ss-record.h (module 'wimax'): ns3::DsaRsp ns3::SSRecord::GetDsaRsp() const [member function]
cls.add_method('GetDsaRsp',
'ns3::DsaRsp',
[],
is_const=True)
## ss-record.h (module 'wimax'): uint8_t ns3::SSRecord::GetDsaRspRetries() const [member function]
cls.add_method('GetDsaRspRetries',
'uint8_t',
[],
is_const=True)
## ss-record.h (module 'wimax'): bool ns3::SSRecord::GetHasServiceFlowBe() const [member function]
cls.add_method('GetHasServiceFlowBe',
'bool',
[],
is_const=True)
## ss-record.h (module 'wimax'): bool ns3::SSRecord::GetHasServiceFlowNrtps() const [member function]
cls.add_method('GetHasServiceFlowNrtps',
'bool',
[],
is_const=True)
## ss-record.h (module 'wimax'): bool ns3::SSRecord::GetHasServiceFlowRtps() const [member function]
cls.add_method('GetHasServiceFlowRtps',
'bool',
[],
is_const=True)
## ss-record.h (module 'wimax'): bool ns3::SSRecord::GetHasServiceFlowUgs() const [member function]
cls.add_method('GetHasServiceFlowUgs',
'bool',
[],
is_const=True)
## ss-record.h (module 'wimax'): ns3::Ipv4Address ns3::SSRecord::GetIPAddress() [member function]
cls.add_method('GetIPAddress',
'ns3::Ipv4Address',
[])
## ss-record.h (module 'wimax'): uint8_t ns3::SSRecord::GetInvitedRangRetries() const [member function]
cls.add_method('GetInvitedRangRetries',
'uint8_t',
[],
is_const=True)
## ss-record.h (module 'wimax'): bool ns3::SSRecord::GetIsBroadcastSS() [member function]
cls.add_method('GetIsBroadcastSS',
'bool',
[])
## ss-record.h (module 'wimax'): ns3::Mac48Address ns3::SSRecord::GetMacAddress() const [member function]
cls.add_method('GetMacAddress',
'ns3::Mac48Address',
[],
is_const=True)
## ss-record.h (module 'wimax'): ns3::WimaxPhy::ModulationType ns3::SSRecord::GetModulationType() const [member function]
cls.add_method('GetModulationType',
'ns3::WimaxPhy::ModulationType',
[],
is_const=True)
## ss-record.h (module 'wimax'): bool ns3::SSRecord::GetPollForRanging() const [member function]
cls.add_method('GetPollForRanging',
'bool',
[],
is_const=True)
## ss-record.h (module 'wimax'): bool ns3::SSRecord::GetPollMeBit() const [member function]
cls.add_method('GetPollMeBit',
'bool',
[],
is_const=True)
## ss-record.h (module 'wimax'): ns3::Cid ns3::SSRecord::GetPrimaryCid() const [member function]
cls.add_method('GetPrimaryCid',
'ns3::Cid',
[],
is_const=True)
## ss-record.h (module 'wimax'): uint8_t ns3::SSRecord::GetRangingCorrectionRetries() const [member function]
cls.add_method('GetRangingCorrectionRetries',
'uint8_t',
[],
is_const=True)
## ss-record.h (module 'wimax'): ns3::WimaxNetDevice::RangingStatus ns3::SSRecord::GetRangingStatus() const [member function]
cls.add_method('GetRangingStatus',
'ns3::WimaxNetDevice::RangingStatus',
[],
is_const=True)
## ss-record.h (module 'wimax'): std::vector<ns3::ServiceFlow*,std::allocator<ns3::ServiceFlow*> > ns3::SSRecord::GetServiceFlows(ns3::ServiceFlow::SchedulingType schedulingType) const [member function]
cls.add_method('GetServiceFlows',
'std::vector< ns3::ServiceFlow * >',
[param('ns3::ServiceFlow::SchedulingType', 'schedulingType')],
is_const=True)
## ss-record.h (module 'wimax'): uint16_t ns3::SSRecord::GetSfTransactionId() const [member function]
cls.add_method('GetSfTransactionId',
'uint16_t',
[],
is_const=True)
## ss-record.h (module 'wimax'): void ns3::SSRecord::IncrementDsaRspRetries() [member function]
cls.add_method('IncrementDsaRspRetries',
'void',
[])
## ss-record.h (module 'wimax'): void ns3::SSRecord::IncrementInvitedRangingRetries() [member function]
cls.add_method('IncrementInvitedRangingRetries',
'void',
[])
## ss-record.h (module 'wimax'): void ns3::SSRecord::IncrementRangingCorrectionRetries() [member function]
cls.add_method('IncrementRangingCorrectionRetries',
'void',
[])
## ss-record.h (module 'wimax'): void ns3::SSRecord::ResetInvitedRangingRetries() [member function]
cls.add_method('ResetInvitedRangingRetries',
'void',
[])
## ss-record.h (module 'wimax'): void ns3::SSRecord::ResetRangingCorrectionRetries() [member function]
cls.add_method('ResetRangingCorrectionRetries',
'void',
[])
## ss-record.h (module 'wimax'): void ns3::SSRecord::SetAreServiceFlowsAllocated(bool val) [member function]
cls.add_method('SetAreServiceFlowsAllocated',
'void',
[param('bool', 'val')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::SetBasicCid(ns3::Cid basicCid) [member function]
cls.add_method('SetBasicCid',
'void',
[param('ns3::Cid', 'basicCid')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::SetDsaRsp(ns3::DsaRsp dsaRsp) [member function]
cls.add_method('SetDsaRsp',
'void',
[param('ns3::DsaRsp', 'dsaRsp')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::SetDsaRspRetries(uint8_t dsaRspRetries) [member function]
cls.add_method('SetDsaRspRetries',
'void',
[param('uint8_t', 'dsaRspRetries')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::SetIPAddress(ns3::Ipv4Address IPaddress) [member function]
cls.add_method('SetIPAddress',
'void',
[param('ns3::Ipv4Address', 'IPaddress')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::SetIsBroadcastSS(bool arg0) [member function]
cls.add_method('SetIsBroadcastSS',
'void',
[param('bool', 'arg0')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::SetMacAddress(ns3::Mac48Address macAddress) [member function]
cls.add_method('SetMacAddress',
'void',
[param('ns3::Mac48Address', 'macAddress')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::SetModulationType(ns3::WimaxPhy::ModulationType modulationType) [member function]
cls.add_method('SetModulationType',
'void',
[param('ns3::WimaxPhy::ModulationType', 'modulationType')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::SetPollMeBit(bool pollMeBit) [member function]
cls.add_method('SetPollMeBit',
'void',
[param('bool', 'pollMeBit')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::SetPrimaryCid(ns3::Cid primaryCid) [member function]
cls.add_method('SetPrimaryCid',
'void',
[param('ns3::Cid', 'primaryCid')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::SetRangingStatus(ns3::WimaxNetDevice::RangingStatus rangingStatus) [member function]
cls.add_method('SetRangingStatus',
'void',
[param('ns3::WimaxNetDevice::RangingStatus', 'rangingStatus')])
## ss-record.h (module 'wimax'): void ns3::SSRecord::SetSfTransactionId(uint16_t sfTransactionId) [member function]
cls.add_method('SetSfTransactionId',
'void',
[param('uint16_t', 'sfTransactionId')])
return
def register_Ns3SendParams_methods(root_module, cls):
## send-params.h (module 'wimax'): ns3::SendParams::SendParams(ns3::SendParams const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SendParams const &', 'arg0')])
## send-params.h (module 'wimax'): ns3::SendParams::SendParams() [constructor]
cls.add_constructor([])
return
def register_Ns3ServiceFlow_methods(root_module, cls):
## service-flow.h (module 'wimax'): ns3::ServiceFlow::ServiceFlow(ns3::Tlv tlv) [constructor]
cls.add_constructor([param('ns3::Tlv', 'tlv')])
## service-flow.h (module 'wimax'): ns3::ServiceFlow::ServiceFlow(ns3::ServiceFlow::Direction direction) [constructor]
cls.add_constructor([param('ns3::ServiceFlow::Direction', 'direction')])
## service-flow.h (module 'wimax'): ns3::ServiceFlow::ServiceFlow() [constructor]
cls.add_constructor([])
## service-flow.h (module 'wimax'): ns3::ServiceFlow::ServiceFlow(ns3::ServiceFlow const & sf) [copy constructor]
cls.add_constructor([param('ns3::ServiceFlow const &', 'sf')])
## service-flow.h (module 'wimax'): ns3::ServiceFlow::ServiceFlow(uint32_t sfid, ns3::ServiceFlow::Direction direction, ns3::Ptr<ns3::WimaxConnection> connection) [constructor]
cls.add_constructor([param('uint32_t', 'sfid'), param('ns3::ServiceFlow::Direction', 'direction'), param('ns3::Ptr< ns3::WimaxConnection >', 'connection')])
## service-flow.h (module 'wimax'): bool ns3::ServiceFlow::CheckClassifierMatch(ns3::Ipv4Address srcAddress, ns3::Ipv4Address dstAddress, uint16_t srcPort, uint16_t dstPort, uint8_t proto) const [member function]
cls.add_method('CheckClassifierMatch',
'bool',
[param('ns3::Ipv4Address', 'srcAddress'), param('ns3::Ipv4Address', 'dstAddress'), param('uint16_t', 'srcPort'), param('uint16_t', 'dstPort'), param('uint8_t', 'proto')],
is_const=True)
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::CleanUpQueue() [member function]
cls.add_method('CleanUpQueue',
'void',
[])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::CopyParametersFrom(ns3::ServiceFlow sf) [member function]
cls.add_method('CopyParametersFrom',
'void',
[param('ns3::ServiceFlow', 'sf')])
## service-flow.h (module 'wimax'): uint16_t ns3::ServiceFlow::GetArqBlockLifeTime() const [member function]
cls.add_method('GetArqBlockLifeTime',
'uint16_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint16_t ns3::ServiceFlow::GetArqBlockSize() const [member function]
cls.add_method('GetArqBlockSize',
'uint16_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint8_t ns3::ServiceFlow::GetArqDeliverInOrder() const [member function]
cls.add_method('GetArqDeliverInOrder',
'uint8_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint8_t ns3::ServiceFlow::GetArqEnable() const [member function]
cls.add_method('GetArqEnable',
'uint8_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint16_t ns3::ServiceFlow::GetArqPurgeTimeout() const [member function]
cls.add_method('GetArqPurgeTimeout',
'uint16_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint16_t ns3::ServiceFlow::GetArqRetryTimeoutRx() const [member function]
cls.add_method('GetArqRetryTimeoutRx',
'uint16_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint16_t ns3::ServiceFlow::GetArqRetryTimeoutTx() const [member function]
cls.add_method('GetArqRetryTimeoutTx',
'uint16_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint16_t ns3::ServiceFlow::GetArqSyncLoss() const [member function]
cls.add_method('GetArqSyncLoss',
'uint16_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint16_t ns3::ServiceFlow::GetArqWindowSize() const [member function]
cls.add_method('GetArqWindowSize',
'uint16_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint16_t ns3::ServiceFlow::GetCid() const [member function]
cls.add_method('GetCid',
'uint16_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): ns3::Ptr<ns3::WimaxConnection> ns3::ServiceFlow::GetConnection() const [member function]
cls.add_method('GetConnection',
'ns3::Ptr< ns3::WimaxConnection >',
[],
is_const=True)
## service-flow.h (module 'wimax'): ns3::CsParameters ns3::ServiceFlow::GetConvergenceSublayerParam() const [member function]
cls.add_method('GetConvergenceSublayerParam',
'ns3::CsParameters',
[],
is_const=True)
## service-flow.h (module 'wimax'): ns3::ServiceFlow::CsSpecification ns3::ServiceFlow::GetCsSpecification() const [member function]
cls.add_method('GetCsSpecification',
'ns3::ServiceFlow::CsSpecification',
[],
is_const=True)
## service-flow.h (module 'wimax'): ns3::ServiceFlow::Direction ns3::ServiceFlow::GetDirection() const [member function]
cls.add_method('GetDirection',
'ns3::ServiceFlow::Direction',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint8_t ns3::ServiceFlow::GetFixedversusVariableSduIndicator() const [member function]
cls.add_method('GetFixedversusVariableSduIndicator',
'uint8_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): bool ns3::ServiceFlow::GetIsEnabled() const [member function]
cls.add_method('GetIsEnabled',
'bool',
[],
is_const=True)
## service-flow.h (module 'wimax'): bool ns3::ServiceFlow::GetIsMulticast() const [member function]
cls.add_method('GetIsMulticast',
'bool',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint32_t ns3::ServiceFlow::GetMaxSustainedTrafficRate() const [member function]
cls.add_method('GetMaxSustainedTrafficRate',
'uint32_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint32_t ns3::ServiceFlow::GetMaxTrafficBurst() const [member function]
cls.add_method('GetMaxTrafficBurst',
'uint32_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint32_t ns3::ServiceFlow::GetMaximumLatency() const [member function]
cls.add_method('GetMaximumLatency',
'uint32_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint32_t ns3::ServiceFlow::GetMinReservedTrafficRate() const [member function]
cls.add_method('GetMinReservedTrafficRate',
'uint32_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint32_t ns3::ServiceFlow::GetMinTolerableTrafficRate() const [member function]
cls.add_method('GetMinTolerableTrafficRate',
'uint32_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): ns3::WimaxPhy::ModulationType ns3::ServiceFlow::GetModulation() const [member function]
cls.add_method('GetModulation',
'ns3::WimaxPhy::ModulationType',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint8_t ns3::ServiceFlow::GetQosParamSetType() const [member function]
cls.add_method('GetQosParamSetType',
'uint8_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): ns3::Ptr<ns3::WimaxMacQueue> ns3::ServiceFlow::GetQueue() const [member function]
cls.add_method('GetQueue',
'ns3::Ptr< ns3::WimaxMacQueue >',
[],
is_const=True)
## service-flow.h (module 'wimax'): ns3::ServiceFlowRecord * ns3::ServiceFlow::GetRecord() const [member function]
cls.add_method('GetRecord',
'ns3::ServiceFlowRecord *',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint32_t ns3::ServiceFlow::GetRequestTransmissionPolicy() const [member function]
cls.add_method('GetRequestTransmissionPolicy',
'uint32_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): ns3::ServiceFlow::SchedulingType ns3::ServiceFlow::GetSchedulingType() const [member function]
cls.add_method('GetSchedulingType',
'ns3::ServiceFlow::SchedulingType',
[],
is_const=True)
## service-flow.h (module 'wimax'): char * ns3::ServiceFlow::GetSchedulingTypeStr() const [member function]
cls.add_method('GetSchedulingTypeStr',
'char *',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint8_t ns3::ServiceFlow::GetSduSize() const [member function]
cls.add_method('GetSduSize',
'uint8_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): std::string ns3::ServiceFlow::GetServiceClassName() const [member function]
cls.add_method('GetServiceClassName',
'std::string',
[],
is_const=True)
## service-flow.h (module 'wimax'): ns3::ServiceFlow::SchedulingType ns3::ServiceFlow::GetServiceSchedulingType() const [member function]
cls.add_method('GetServiceSchedulingType',
'ns3::ServiceFlow::SchedulingType',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint32_t ns3::ServiceFlow::GetSfid() const [member function]
cls.add_method('GetSfid',
'uint32_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint16_t ns3::ServiceFlow::GetTargetSAID() const [member function]
cls.add_method('GetTargetSAID',
'uint16_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint32_t ns3::ServiceFlow::GetToleratedJitter() const [member function]
cls.add_method('GetToleratedJitter',
'uint32_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint8_t ns3::ServiceFlow::GetTrafficPriority() const [member function]
cls.add_method('GetTrafficPriority',
'uint8_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): ns3::ServiceFlow::Type ns3::ServiceFlow::GetType() const [member function]
cls.add_method('GetType',
'ns3::ServiceFlow::Type',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint16_t ns3::ServiceFlow::GetUnsolicitedGrantInterval() const [member function]
cls.add_method('GetUnsolicitedGrantInterval',
'uint16_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): uint16_t ns3::ServiceFlow::GetUnsolicitedPollingInterval() const [member function]
cls.add_method('GetUnsolicitedPollingInterval',
'uint16_t',
[],
is_const=True)
## service-flow.h (module 'wimax'): bool ns3::ServiceFlow::HasPackets() const [member function]
cls.add_method('HasPackets',
'bool',
[],
is_const=True)
## service-flow.h (module 'wimax'): bool ns3::ServiceFlow::HasPackets(ns3::MacHeaderType::HeaderType packetType) const [member function]
cls.add_method('HasPackets',
'bool',
[param('ns3::MacHeaderType::HeaderType', 'packetType')],
is_const=True)
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::InitValues() [member function]
cls.add_method('InitValues',
'void',
[])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::PrintQoSParameters() const [member function]
cls.add_method('PrintQoSParameters',
'void',
[],
is_const=True)
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetArqBlockLifeTime(uint16_t arg0) [member function]
cls.add_method('SetArqBlockLifeTime',
'void',
[param('uint16_t', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetArqBlockSize(uint16_t arg0) [member function]
cls.add_method('SetArqBlockSize',
'void',
[param('uint16_t', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetArqDeliverInOrder(uint8_t arg0) [member function]
cls.add_method('SetArqDeliverInOrder',
'void',
[param('uint8_t', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetArqEnable(uint8_t arg0) [member function]
cls.add_method('SetArqEnable',
'void',
[param('uint8_t', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetArqPurgeTimeout(uint16_t arg0) [member function]
cls.add_method('SetArqPurgeTimeout',
'void',
[param('uint16_t', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetArqRetryTimeoutRx(uint16_t arg0) [member function]
cls.add_method('SetArqRetryTimeoutRx',
'void',
[param('uint16_t', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetArqRetryTimeoutTx(uint16_t arg0) [member function]
cls.add_method('SetArqRetryTimeoutTx',
'void',
[param('uint16_t', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetArqSyncLoss(uint16_t arg0) [member function]
cls.add_method('SetArqSyncLoss',
'void',
[param('uint16_t', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetArqWindowSize(uint16_t arg0) [member function]
cls.add_method('SetArqWindowSize',
'void',
[param('uint16_t', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetConnection(ns3::Ptr<ns3::WimaxConnection> connection) [member function]
cls.add_method('SetConnection',
'void',
[param('ns3::Ptr< ns3::WimaxConnection >', 'connection')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetConvergenceSublayerParam(ns3::CsParameters arg0) [member function]
cls.add_method('SetConvergenceSublayerParam',
'void',
[param('ns3::CsParameters', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetCsSpecification(ns3::ServiceFlow::CsSpecification arg0) [member function]
cls.add_method('SetCsSpecification',
'void',
[param('ns3::ServiceFlow::CsSpecification', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetDirection(ns3::ServiceFlow::Direction direction) [member function]
cls.add_method('SetDirection',
'void',
[param('ns3::ServiceFlow::Direction', 'direction')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetFixedversusVariableSduIndicator(uint8_t arg0) [member function]
cls.add_method('SetFixedversusVariableSduIndicator',
'void',
[param('uint8_t', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetIsEnabled(bool isEnabled) [member function]
cls.add_method('SetIsEnabled',
'void',
[param('bool', 'isEnabled')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetIsMulticast(bool isMulticast) [member function]
cls.add_method('SetIsMulticast',
'void',
[param('bool', 'isMulticast')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetMaxSustainedTrafficRate(uint32_t arg0) [member function]
cls.add_method('SetMaxSustainedTrafficRate',
'void',
[param('uint32_t', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetMaxTrafficBurst(uint32_t arg0) [member function]
cls.add_method('SetMaxTrafficBurst',
'void',
[param('uint32_t', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetMaximumLatency(uint32_t arg0) [member function]
cls.add_method('SetMaximumLatency',
'void',
[param('uint32_t', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetMinReservedTrafficRate(uint32_t arg0) [member function]
cls.add_method('SetMinReservedTrafficRate',
'void',
[param('uint32_t', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetMinTolerableTrafficRate(uint32_t arg0) [member function]
cls.add_method('SetMinTolerableTrafficRate',
'void',
[param('uint32_t', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetModulation(ns3::WimaxPhy::ModulationType modulationType) [member function]
cls.add_method('SetModulation',
'void',
[param('ns3::WimaxPhy::ModulationType', 'modulationType')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetQosParamSetType(uint8_t arg0) [member function]
cls.add_method('SetQosParamSetType',
'void',
[param('uint8_t', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetRecord(ns3::ServiceFlowRecord * record) [member function]
cls.add_method('SetRecord',
'void',
[param('ns3::ServiceFlowRecord *', 'record')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetRequestTransmissionPolicy(uint32_t arg0) [member function]
cls.add_method('SetRequestTransmissionPolicy',
'void',
[param('uint32_t', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetSduSize(uint8_t arg0) [member function]
cls.add_method('SetSduSize',
'void',
[param('uint8_t', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetServiceClassName(std::string arg0) [member function]
cls.add_method('SetServiceClassName',
'void',
[param('std::string', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetServiceSchedulingType(ns3::ServiceFlow::SchedulingType arg0) [member function]
cls.add_method('SetServiceSchedulingType',
'void',
[param('ns3::ServiceFlow::SchedulingType', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetSfid(uint32_t arg0) [member function]
cls.add_method('SetSfid',
'void',
[param('uint32_t', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetTargetSAID(uint16_t arg0) [member function]
cls.add_method('SetTargetSAID',
'void',
[param('uint16_t', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetToleratedJitter(uint32_t arg0) [member function]
cls.add_method('SetToleratedJitter',
'void',
[param('uint32_t', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetTrafficPriority(uint8_t arg0) [member function]
cls.add_method('SetTrafficPriority',
'void',
[param('uint8_t', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetType(ns3::ServiceFlow::Type type) [member function]
cls.add_method('SetType',
'void',
[param('ns3::ServiceFlow::Type', 'type')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetUnsolicitedGrantInterval(uint16_t arg0) [member function]
cls.add_method('SetUnsolicitedGrantInterval',
'void',
[param('uint16_t', 'arg0')])
## service-flow.h (module 'wimax'): void ns3::ServiceFlow::SetUnsolicitedPollingInterval(uint16_t arg0) [member function]
cls.add_method('SetUnsolicitedPollingInterval',
'void',
[param('uint16_t', 'arg0')])
## service-flow.h (module 'wimax'): ns3::Tlv ns3::ServiceFlow::ToTlv() const [member function]
cls.add_method('ToTlv',
'ns3::Tlv',
[],
is_const=True)
return
def register_Ns3ServiceFlowRecord_methods(root_module, cls):
## service-flow-record.h (module 'wimax'): ns3::ServiceFlowRecord::ServiceFlowRecord(ns3::ServiceFlowRecord const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ServiceFlowRecord const &', 'arg0')])
## service-flow-record.h (module 'wimax'): ns3::ServiceFlowRecord::ServiceFlowRecord() [constructor]
cls.add_constructor([])
## service-flow-record.h (module 'wimax'): uint32_t ns3::ServiceFlowRecord::GetBacklogged() const [member function]
cls.add_method('GetBacklogged',
'uint32_t',
[],
is_const=True)
## service-flow-record.h (module 'wimax'): uint32_t ns3::ServiceFlowRecord::GetBackloggedTemp() const [member function]
cls.add_method('GetBackloggedTemp',
'uint32_t',
[],
is_const=True)
## service-flow-record.h (module 'wimax'): uint32_t ns3::ServiceFlowRecord::GetBwSinceLastExpiry() [member function]
cls.add_method('GetBwSinceLastExpiry',
'uint32_t',
[])
## service-flow-record.h (module 'wimax'): uint32_t ns3::ServiceFlowRecord::GetBytesRcvd() const [member function]
cls.add_method('GetBytesRcvd',
'uint32_t',
[],
is_const=True)
## service-flow-record.h (module 'wimax'): uint32_t ns3::ServiceFlowRecord::GetBytesSent() const [member function]
cls.add_method('GetBytesSent',
'uint32_t',
[],
is_const=True)
## service-flow-record.h (module 'wimax'): ns3::Time ns3::ServiceFlowRecord::GetDlTimeStamp() const [member function]
cls.add_method('GetDlTimeStamp',
'ns3::Time',
[],
is_const=True)
## service-flow-record.h (module 'wimax'): uint32_t ns3::ServiceFlowRecord::GetGrantSize() const [member function]
cls.add_method('GetGrantSize',
'uint32_t',
[],
is_const=True)
## service-flow-record.h (module 'wimax'): ns3::Time ns3::ServiceFlowRecord::GetGrantTimeStamp() const [member function]
cls.add_method('GetGrantTimeStamp',
'ns3::Time',
[],
is_const=True)
## service-flow-record.h (module 'wimax'): uint32_t ns3::ServiceFlowRecord::GetGrantedBandwidth() [member function]
cls.add_method('GetGrantedBandwidth',
'uint32_t',
[])
## service-flow-record.h (module 'wimax'): uint32_t ns3::ServiceFlowRecord::GetGrantedBandwidthTemp() [member function]
cls.add_method('GetGrantedBandwidthTemp',
'uint32_t',
[])
## service-flow-record.h (module 'wimax'): ns3::Time ns3::ServiceFlowRecord::GetLastGrantTime() const [member function]
cls.add_method('GetLastGrantTime',
'ns3::Time',
[],
is_const=True)
## service-flow-record.h (module 'wimax'): uint32_t ns3::ServiceFlowRecord::GetPktsRcvd() const [member function]
cls.add_method('GetPktsRcvd',
'uint32_t',
[],
is_const=True)
## service-flow-record.h (module 'wimax'): uint32_t ns3::ServiceFlowRecord::GetPktsSent() const [member function]
cls.add_method('GetPktsSent',
'uint32_t',
[],
is_const=True)
## service-flow-record.h (module 'wimax'): uint32_t ns3::ServiceFlowRecord::GetRequestedBandwidth() [member function]
cls.add_method('GetRequestedBandwidth',
'uint32_t',
[])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::IncreaseBacklogged(uint32_t backlogged) [member function]
cls.add_method('IncreaseBacklogged',
'void',
[param('uint32_t', 'backlogged')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::IncreaseBackloggedTemp(uint32_t backloggedTemp) [member function]
cls.add_method('IncreaseBackloggedTemp',
'void',
[param('uint32_t', 'backloggedTemp')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetBacklogged(uint32_t backlogged) [member function]
cls.add_method('SetBacklogged',
'void',
[param('uint32_t', 'backlogged')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetBackloggedTemp(uint32_t backloggedTemp) [member function]
cls.add_method('SetBackloggedTemp',
'void',
[param('uint32_t', 'backloggedTemp')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetBwSinceLastExpiry(uint32_t bwSinceLastExpiry) [member function]
cls.add_method('SetBwSinceLastExpiry',
'void',
[param('uint32_t', 'bwSinceLastExpiry')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetBytesRcvd(uint32_t bytesRcvd) [member function]
cls.add_method('SetBytesRcvd',
'void',
[param('uint32_t', 'bytesRcvd')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetBytesSent(uint32_t bytesSent) [member function]
cls.add_method('SetBytesSent',
'void',
[param('uint32_t', 'bytesSent')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetDlTimeStamp(ns3::Time dlTimeStamp) [member function]
cls.add_method('SetDlTimeStamp',
'void',
[param('ns3::Time', 'dlTimeStamp')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetGrantSize(uint32_t grantSize) [member function]
cls.add_method('SetGrantSize',
'void',
[param('uint32_t', 'grantSize')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetGrantTimeStamp(ns3::Time grantTimeStamp) [member function]
cls.add_method('SetGrantTimeStamp',
'void',
[param('ns3::Time', 'grantTimeStamp')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetGrantedBandwidth(uint32_t grantedBandwidth) [member function]
cls.add_method('SetGrantedBandwidth',
'void',
[param('uint32_t', 'grantedBandwidth')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetGrantedBandwidthTemp(uint32_t grantedBandwidthTemp) [member function]
cls.add_method('SetGrantedBandwidthTemp',
'void',
[param('uint32_t', 'grantedBandwidthTemp')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetLastGrantTime(ns3::Time grantTime) [member function]
cls.add_method('SetLastGrantTime',
'void',
[param('ns3::Time', 'grantTime')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetPktsRcvd(uint32_t pktsRcvd) [member function]
cls.add_method('SetPktsRcvd',
'void',
[param('uint32_t', 'pktsRcvd')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetPktsSent(uint32_t pktsSent) [member function]
cls.add_method('SetPktsSent',
'void',
[param('uint32_t', 'pktsSent')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::SetRequestedBandwidth(uint32_t requestedBandwidth) [member function]
cls.add_method('SetRequestedBandwidth',
'void',
[param('uint32_t', 'requestedBandwidth')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::UpdateBwSinceLastExpiry(uint32_t bwSinceLastExpiry) [member function]
cls.add_method('UpdateBwSinceLastExpiry',
'void',
[param('uint32_t', 'bwSinceLastExpiry')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::UpdateBytesRcvd(uint32_t bytesRcvd) [member function]
cls.add_method('UpdateBytesRcvd',
'void',
[param('uint32_t', 'bytesRcvd')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::UpdateBytesSent(uint32_t bytesSent) [member function]
cls.add_method('UpdateBytesSent',
'void',
[param('uint32_t', 'bytesSent')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::UpdateGrantedBandwidth(uint32_t grantedBandwidth) [member function]
cls.add_method('UpdateGrantedBandwidth',
'void',
[param('uint32_t', 'grantedBandwidth')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::UpdateGrantedBandwidthTemp(uint32_t grantedBandwidthTemp) [member function]
cls.add_method('UpdateGrantedBandwidthTemp',
'void',
[param('uint32_t', 'grantedBandwidthTemp')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::UpdatePktsRcvd(uint32_t pktsRcvd) [member function]
cls.add_method('UpdatePktsRcvd',
'void',
[param('uint32_t', 'pktsRcvd')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::UpdatePktsSent(uint32_t pktsSent) [member function]
cls.add_method('UpdatePktsSent',
'void',
[param('uint32_t', 'pktsSent')])
## service-flow-record.h (module 'wimax'): void ns3::ServiceFlowRecord::UpdateRequestedBandwidth(uint32_t requestedBandwidth) [member function]
cls.add_method('UpdateRequestedBandwidth',
'void',
[param('uint32_t', 'requestedBandwidth')])
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Simulator_methods(root_module, cls):
## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Simulator const &', 'arg0')])
## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function]
cls.add_method('GetImplementation',
'ns3::Ptr< ns3::SimulatorImpl >',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function]
cls.add_method('IsFinished',
'bool',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function]
cls.add_method('SetImplementation',
'void',
[param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & time) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'time')],
is_static=True)
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TlvValue_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::TlvValue::TlvValue() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): ns3::TlvValue::TlvValue(ns3::TlvValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TlvValue const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::TlvValue * ns3::TlvValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::TlvValue *',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::TlvValue::Deserialize(ns3::Buffer::Iterator start, uint64_t valueLen) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLen')],
is_pure_virtual=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::TlvValue::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): void ns3::TlvValue::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TosTlvValue_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::TosTlvValue::TosTlvValue(ns3::TosTlvValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TosTlvValue const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::TosTlvValue::TosTlvValue() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): ns3::TosTlvValue::TosTlvValue(uint8_t arg0, uint8_t arg1, uint8_t arg2) [constructor]
cls.add_constructor([param('uint8_t', 'arg0'), param('uint8_t', 'arg1'), param('uint8_t', 'arg2')])
## wimax-tlv.h (module 'wimax'): ns3::TosTlvValue * ns3::TosTlvValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::TosTlvValue *',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::TosTlvValue::Deserialize(ns3::Buffer::Iterator start, uint64_t valueLength) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLength')],
is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint8_t ns3::TosTlvValue::GetHigh() const [member function]
cls.add_method('GetHigh',
'uint8_t',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): uint8_t ns3::TosTlvValue::GetLow() const [member function]
cls.add_method('GetLow',
'uint8_t',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): uint8_t ns3::TosTlvValue::GetMask() const [member function]
cls.add_method('GetMask',
'uint8_t',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::TosTlvValue::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): void ns3::TosTlvValue::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3U16TlvValue_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::U16TlvValue::U16TlvValue(ns3::U16TlvValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::U16TlvValue const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::U16TlvValue::U16TlvValue(uint16_t value) [constructor]
cls.add_constructor([param('uint16_t', 'value')])
## wimax-tlv.h (module 'wimax'): ns3::U16TlvValue::U16TlvValue() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): ns3::U16TlvValue * ns3::U16TlvValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::U16TlvValue *',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::U16TlvValue::Deserialize(ns3::Buffer::Iterator start, uint64_t valueLen) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLen')],
is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::U16TlvValue::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')])
## wimax-tlv.h (module 'wimax'): uint32_t ns3::U16TlvValue::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint16_t ns3::U16TlvValue::GetValue() const [member function]
cls.add_method('GetValue',
'uint16_t',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): void ns3::U16TlvValue::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
return
def register_Ns3U32TlvValue_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::U32TlvValue::U32TlvValue(ns3::U32TlvValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::U32TlvValue const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::U32TlvValue::U32TlvValue(uint32_t value) [constructor]
cls.add_constructor([param('uint32_t', 'value')])
## wimax-tlv.h (module 'wimax'): ns3::U32TlvValue::U32TlvValue() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): ns3::U32TlvValue * ns3::U32TlvValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::U32TlvValue *',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::U32TlvValue::Deserialize(ns3::Buffer::Iterator start, uint64_t valueLen) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLen')],
is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::U32TlvValue::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')])
## wimax-tlv.h (module 'wimax'): uint32_t ns3::U32TlvValue::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::U32TlvValue::GetValue() const [member function]
cls.add_method('GetValue',
'uint32_t',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): void ns3::U32TlvValue::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
return
def register_Ns3U8TlvValue_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::U8TlvValue::U8TlvValue(ns3::U8TlvValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::U8TlvValue const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::U8TlvValue::U8TlvValue(uint8_t value) [constructor]
cls.add_constructor([param('uint8_t', 'value')])
## wimax-tlv.h (module 'wimax'): ns3::U8TlvValue::U8TlvValue() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): ns3::U8TlvValue * ns3::U8TlvValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::U8TlvValue *',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::U8TlvValue::Deserialize(ns3::Buffer::Iterator start, uint64_t valueLen) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLen')],
is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::U8TlvValue::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')])
## wimax-tlv.h (module 'wimax'): uint32_t ns3::U8TlvValue::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint8_t ns3::U8TlvValue::GetValue() const [member function]
cls.add_method('GetValue',
'uint8_t',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): void ns3::U8TlvValue::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
return
def register_Ns3UcdChannelEncodings_methods(root_module, cls):
## ul-mac-messages.h (module 'wimax'): ns3::UcdChannelEncodings::UcdChannelEncodings(ns3::UcdChannelEncodings const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UcdChannelEncodings const &', 'arg0')])
## ul-mac-messages.h (module 'wimax'): ns3::UcdChannelEncodings::UcdChannelEncodings() [constructor]
cls.add_constructor([])
## ul-mac-messages.h (module 'wimax'): uint16_t ns3::UcdChannelEncodings::GetBwReqOppSize() const [member function]
cls.add_method('GetBwReqOppSize',
'uint16_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint32_t ns3::UcdChannelEncodings::GetFrequency() const [member function]
cls.add_method('GetFrequency',
'uint32_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint16_t ns3::UcdChannelEncodings::GetRangReqOppSize() const [member function]
cls.add_method('GetRangReqOppSize',
'uint16_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint16_t ns3::UcdChannelEncodings::GetSize() const [member function]
cls.add_method('GetSize',
'uint16_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::UcdChannelEncodings::Read(ns3::Buffer::Iterator start) [member function]
cls.add_method('Read',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')])
## ul-mac-messages.h (module 'wimax'): void ns3::UcdChannelEncodings::SetBwReqOppSize(uint16_t bwReqOppSize) [member function]
cls.add_method('SetBwReqOppSize',
'void',
[param('uint16_t', 'bwReqOppSize')])
## ul-mac-messages.h (module 'wimax'): void ns3::UcdChannelEncodings::SetFrequency(uint32_t frequency) [member function]
cls.add_method('SetFrequency',
'void',
[param('uint32_t', 'frequency')])
## ul-mac-messages.h (module 'wimax'): void ns3::UcdChannelEncodings::SetRangReqOppSize(uint16_t rangReqOppSize) [member function]
cls.add_method('SetRangReqOppSize',
'void',
[param('uint16_t', 'rangReqOppSize')])
## ul-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::UcdChannelEncodings::Write(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Write',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True)
## ul-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::UcdChannelEncodings::DoRead(ns3::Buffer::Iterator start) [member function]
cls.add_method('DoRead',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, visibility='private', is_virtual=True)
## ul-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::UcdChannelEncodings::DoWrite(ns3::Buffer::Iterator start) const [member function]
cls.add_method('DoWrite',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3VectorTlvValue_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::VectorTlvValue::VectorTlvValue(ns3::VectorTlvValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::VectorTlvValue const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::VectorTlvValue::VectorTlvValue() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): void ns3::VectorTlvValue::Add(ns3::Tlv const & val) [member function]
cls.add_method('Add',
'void',
[param('ns3::Tlv const &', 'val')])
## wimax-tlv.h (module 'wimax'): __gnu_cxx::__normal_iterator<ns3::Tlv* const*,std::vector<ns3::Tlv*, std::allocator<ns3::Tlv*> > > ns3::VectorTlvValue::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Tlv * const *, std::vector< ns3::Tlv * > >',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): ns3::VectorTlvValue * ns3::VectorTlvValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::VectorTlvValue *',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::VectorTlvValue::Deserialize(ns3::Buffer::Iterator start, uint64_t valueLength) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLength')],
is_pure_virtual=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): __gnu_cxx::__normal_iterator<ns3::Tlv* const*,std::vector<ns3::Tlv*, std::allocator<ns3::Tlv*> > > ns3::VectorTlvValue::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Tlv * const *, std::vector< ns3::Tlv * > >',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::VectorTlvValue::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): void ns3::VectorTlvValue::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
return
def register_Ns3WimaxHelper_methods(root_module, cls):
## wimax-helper.h (module 'wimax'): ns3::WimaxHelper::WimaxHelper(ns3::WimaxHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::WimaxHelper const &', 'arg0')])
## wimax-helper.h (module 'wimax'): ns3::WimaxHelper::WimaxHelper() [constructor]
cls.add_constructor([])
## wimax-helper.h (module 'wimax'): int64_t ns3::WimaxHelper::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')])
## wimax-helper.h (module 'wimax'): int64_t ns3::WimaxHelper::AssignStreams(ns3::NetDeviceContainer c, int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('ns3::NetDeviceContainer', 'c'), param('int64_t', 'stream')])
## wimax-helper.h (module 'wimax'): ns3::Ptr<ns3::BSScheduler> ns3::WimaxHelper::CreateBSScheduler(ns3::WimaxHelper::SchedulerType schedulerType) [member function]
cls.add_method('CreateBSScheduler',
'ns3::Ptr< ns3::BSScheduler >',
[param('ns3::WimaxHelper::SchedulerType', 'schedulerType')])
## wimax-helper.h (module 'wimax'): ns3::Ptr<ns3::WimaxPhy> ns3::WimaxHelper::CreatePhy(ns3::WimaxHelper::PhyType phyType) [member function]
cls.add_method('CreatePhy',
'ns3::Ptr< ns3::WimaxPhy >',
[param('ns3::WimaxHelper::PhyType', 'phyType')])
## wimax-helper.h (module 'wimax'): ns3::Ptr<ns3::WimaxPhy> ns3::WimaxHelper::CreatePhy(ns3::WimaxHelper::PhyType phyType, char * SNRTraceFilePath, bool activateLoss) [member function]
cls.add_method('CreatePhy',
'ns3::Ptr< ns3::WimaxPhy >',
[param('ns3::WimaxHelper::PhyType', 'phyType'), param('char *', 'SNRTraceFilePath'), param('bool', 'activateLoss')])
## wimax-helper.h (module 'wimax'): ns3::Ptr<ns3::WimaxPhy> ns3::WimaxHelper::CreatePhyWithoutChannel(ns3::WimaxHelper::PhyType phyType) [member function]
cls.add_method('CreatePhyWithoutChannel',
'ns3::Ptr< ns3::WimaxPhy >',
[param('ns3::WimaxHelper::PhyType', 'phyType')])
## wimax-helper.h (module 'wimax'): ns3::Ptr<ns3::WimaxPhy> ns3::WimaxHelper::CreatePhyWithoutChannel(ns3::WimaxHelper::PhyType phyType, char * SNRTraceFilePath, bool activateLoss) [member function]
cls.add_method('CreatePhyWithoutChannel',
'ns3::Ptr< ns3::WimaxPhy >',
[param('ns3::WimaxHelper::PhyType', 'phyType'), param('char *', 'SNRTraceFilePath'), param('bool', 'activateLoss')])
## wimax-helper.h (module 'wimax'): ns3::ServiceFlow ns3::WimaxHelper::CreateServiceFlow(ns3::ServiceFlow::Direction direction, ns3::ServiceFlow::SchedulingType schedulinType, ns3::IpcsClassifierRecord classifier) [member function]
cls.add_method('CreateServiceFlow',
'ns3::ServiceFlow',
[param('ns3::ServiceFlow::Direction', 'direction'), param('ns3::ServiceFlow::SchedulingType', 'schedulinType'), param('ns3::IpcsClassifierRecord', 'classifier')])
## wimax-helper.h (module 'wimax'): ns3::Ptr<ns3::UplinkScheduler> ns3::WimaxHelper::CreateUplinkScheduler(ns3::WimaxHelper::SchedulerType schedulerType) [member function]
cls.add_method('CreateUplinkScheduler',
'ns3::Ptr< ns3::UplinkScheduler >',
[param('ns3::WimaxHelper::SchedulerType', 'schedulerType')])
## wimax-helper.h (module 'wimax'): static void ns3::WimaxHelper::EnableAsciiForConnection(ns3::Ptr<ns3::OutputStreamWrapper> oss, uint32_t nodeid, uint32_t deviceid, char * netdevice, char * connection) [member function]
cls.add_method('EnableAsciiForConnection',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'oss'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('char *', 'netdevice'), param('char *', 'connection')],
is_static=True)
## wimax-helper.h (module 'wimax'): static void ns3::WimaxHelper::EnableLogComponents() [member function]
cls.add_method('EnableLogComponents',
'void',
[],
is_static=True)
## wimax-helper.h (module 'wimax'): ns3::NetDeviceContainer ns3::WimaxHelper::Install(ns3::NodeContainer c, ns3::WimaxHelper::NetDeviceType type, ns3::WimaxHelper::PhyType phyType, ns3::WimaxHelper::SchedulerType schedulerType) [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('ns3::NodeContainer', 'c'), param('ns3::WimaxHelper::NetDeviceType', 'type'), param('ns3::WimaxHelper::PhyType', 'phyType'), param('ns3::WimaxHelper::SchedulerType', 'schedulerType')])
## wimax-helper.h (module 'wimax'): ns3::NetDeviceContainer ns3::WimaxHelper::Install(ns3::NodeContainer c, ns3::WimaxHelper::NetDeviceType deviceType, ns3::WimaxHelper::PhyType phyType, ns3::Ptr<ns3::WimaxChannel> channel, ns3::WimaxHelper::SchedulerType schedulerType) [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('ns3::NodeContainer', 'c'), param('ns3::WimaxHelper::NetDeviceType', 'deviceType'), param('ns3::WimaxHelper::PhyType', 'phyType'), param('ns3::Ptr< ns3::WimaxChannel >', 'channel'), param('ns3::WimaxHelper::SchedulerType', 'schedulerType')])
## wimax-helper.h (module 'wimax'): ns3::NetDeviceContainer ns3::WimaxHelper::Install(ns3::NodeContainer c, ns3::WimaxHelper::NetDeviceType deviceType, ns3::WimaxHelper::PhyType phyType, ns3::WimaxHelper::SchedulerType schedulerType, double frameDuration) [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('ns3::NodeContainer', 'c'), param('ns3::WimaxHelper::NetDeviceType', 'deviceType'), param('ns3::WimaxHelper::PhyType', 'phyType'), param('ns3::WimaxHelper::SchedulerType', 'schedulerType'), param('double', 'frameDuration')])
## wimax-helper.h (module 'wimax'): ns3::Ptr<ns3::WimaxNetDevice> ns3::WimaxHelper::Install(ns3::Ptr<ns3::Node> node, ns3::WimaxHelper::NetDeviceType deviceType, ns3::WimaxHelper::PhyType phyType, ns3::Ptr<ns3::WimaxChannel> channel, ns3::WimaxHelper::SchedulerType schedulerType) [member function]
cls.add_method('Install',
'ns3::Ptr< ns3::WimaxNetDevice >',
[param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::WimaxHelper::NetDeviceType', 'deviceType'), param('ns3::WimaxHelper::PhyType', 'phyType'), param('ns3::Ptr< ns3::WimaxChannel >', 'channel'), param('ns3::WimaxHelper::SchedulerType', 'schedulerType')])
## wimax-helper.h (module 'wimax'): void ns3::WimaxHelper::SetPropagationLossModel(ns3::SimpleOfdmWimaxChannel::PropModel propagationModel) [member function]
cls.add_method('SetPropagationLossModel',
'void',
[param('ns3::SimpleOfdmWimaxChannel::PropModel', 'propagationModel')])
## wimax-helper.h (module 'wimax'): void ns3::WimaxHelper::EnableAsciiInternal(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename) [member function]
cls.add_method('EnableAsciiInternal',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename')],
visibility='private', is_virtual=True)
## wimax-helper.h (module 'wimax'): void ns3::WimaxHelper::EnablePcapInternal(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename, bool promiscuous) [member function]
cls.add_method('EnablePcapInternal',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename'), param('bool', 'promiscuous')],
visibility='private', is_virtual=True)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
return
def register_Ns3SimpleOfdmSendParam_methods(root_module, cls):
## simple-ofdm-send-param.h (module 'wimax'): ns3::simpleOfdmSendParam::simpleOfdmSendParam(ns3::simpleOfdmSendParam const & arg0) [copy constructor]
cls.add_constructor([param('ns3::simpleOfdmSendParam const &', 'arg0')])
## simple-ofdm-send-param.h (module 'wimax'): ns3::simpleOfdmSendParam::simpleOfdmSendParam() [constructor]
cls.add_constructor([])
## simple-ofdm-send-param.h (module 'wimax'): ns3::simpleOfdmSendParam::simpleOfdmSendParam(ns3::bvec const & fecBlock, uint32_t burstSize, bool isFirstBlock, uint64_t Frequency, ns3::WimaxPhy::ModulationType modulationType, uint8_t direction, double rxPowerDbm) [constructor]
cls.add_constructor([param('ns3::bvec const &', 'fecBlock'), param('uint32_t', 'burstSize'), param('bool', 'isFirstBlock'), param('uint64_t', 'Frequency'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('uint8_t', 'direction'), param('double', 'rxPowerDbm')])
## simple-ofdm-send-param.h (module 'wimax'): ns3::simpleOfdmSendParam::simpleOfdmSendParam(uint32_t burstSize, bool isFirstBlock, uint64_t Frequency, ns3::WimaxPhy::ModulationType modulationType, uint8_t direction, double rxPowerDbm, ns3::Ptr<ns3::PacketBurst> burst) [constructor]
cls.add_constructor([param('uint32_t', 'burstSize'), param('bool', 'isFirstBlock'), param('uint64_t', 'Frequency'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('uint8_t', 'direction'), param('double', 'rxPowerDbm'), param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
## simple-ofdm-send-param.h (module 'wimax'): ns3::Ptr<ns3::PacketBurst> ns3::simpleOfdmSendParam::GetBurst() [member function]
cls.add_method('GetBurst',
'ns3::Ptr< ns3::PacketBurst >',
[])
## simple-ofdm-send-param.h (module 'wimax'): uint32_t ns3::simpleOfdmSendParam::GetBurstSize() [member function]
cls.add_method('GetBurstSize',
'uint32_t',
[])
## simple-ofdm-send-param.h (module 'wimax'): uint8_t ns3::simpleOfdmSendParam::GetDirection() [member function]
cls.add_method('GetDirection',
'uint8_t',
[])
## simple-ofdm-send-param.h (module 'wimax'): ns3::bvec ns3::simpleOfdmSendParam::GetFecBlock() [member function]
cls.add_method('GetFecBlock',
'ns3::bvec',
[])
## simple-ofdm-send-param.h (module 'wimax'): uint64_t ns3::simpleOfdmSendParam::GetFrequency() [member function]
cls.add_method('GetFrequency',
'uint64_t',
[])
## simple-ofdm-send-param.h (module 'wimax'): bool ns3::simpleOfdmSendParam::GetIsFirstBlock() [member function]
cls.add_method('GetIsFirstBlock',
'bool',
[])
## simple-ofdm-send-param.h (module 'wimax'): ns3::WimaxPhy::ModulationType ns3::simpleOfdmSendParam::GetModulationType() [member function]
cls.add_method('GetModulationType',
'ns3::WimaxPhy::ModulationType',
[])
## simple-ofdm-send-param.h (module 'wimax'): double ns3::simpleOfdmSendParam::GetRxPowerDbm() [member function]
cls.add_method('GetRxPowerDbm',
'double',
[])
## simple-ofdm-send-param.h (module 'wimax'): void ns3::simpleOfdmSendParam::SetBurstSize(uint32_t burstSize) [member function]
cls.add_method('SetBurstSize',
'void',
[param('uint32_t', 'burstSize')])
## simple-ofdm-send-param.h (module 'wimax'): void ns3::simpleOfdmSendParam::SetDirection(uint8_t direction) [member function]
cls.add_method('SetDirection',
'void',
[param('uint8_t', 'direction')])
## simple-ofdm-send-param.h (module 'wimax'): void ns3::simpleOfdmSendParam::SetFecBlock(ns3::bvec const & fecBlock) [member function]
cls.add_method('SetFecBlock',
'void',
[param('ns3::bvec const &', 'fecBlock')])
## simple-ofdm-send-param.h (module 'wimax'): void ns3::simpleOfdmSendParam::SetFrequency(uint64_t Frequency) [member function]
cls.add_method('SetFrequency',
'void',
[param('uint64_t', 'Frequency')])
## simple-ofdm-send-param.h (module 'wimax'): void ns3::simpleOfdmSendParam::SetIsFirstBlock(bool isFirstBlock) [member function]
cls.add_method('SetIsFirstBlock',
'void',
[param('bool', 'isFirstBlock')])
## simple-ofdm-send-param.h (module 'wimax'): void ns3::simpleOfdmSendParam::SetModulationType(ns3::WimaxPhy::ModulationType modulationType) [member function]
cls.add_method('SetModulationType',
'void',
[param('ns3::WimaxPhy::ModulationType', 'modulationType')])
## simple-ofdm-send-param.h (module 'wimax'): void ns3::simpleOfdmSendParam::SetRxPowerDbm(double rxPowerDbm) [member function]
cls.add_method('SetRxPowerDbm',
'void',
[param('double', 'rxPowerDbm')])
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3ClassificationRuleVectorTlvValue_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::ClassificationRuleVectorTlvValue::ClassificationRuleVectorTlvValue(ns3::ClassificationRuleVectorTlvValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ClassificationRuleVectorTlvValue const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::ClassificationRuleVectorTlvValue::ClassificationRuleVectorTlvValue() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): ns3::ClassificationRuleVectorTlvValue * ns3::ClassificationRuleVectorTlvValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::ClassificationRuleVectorTlvValue *',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::ClassificationRuleVectorTlvValue::Deserialize(ns3::Buffer::Iterator start, uint64_t valueLength) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLength')],
is_virtual=True)
return
def register_Ns3CsParamVectorTlvValue_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::CsParamVectorTlvValue::CsParamVectorTlvValue(ns3::CsParamVectorTlvValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CsParamVectorTlvValue const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::CsParamVectorTlvValue::CsParamVectorTlvValue() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): ns3::CsParamVectorTlvValue * ns3::CsParamVectorTlvValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::CsParamVectorTlvValue *',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::CsParamVectorTlvValue::Deserialize(ns3::Buffer::Iterator start, uint64_t valueLength) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLength')],
is_virtual=True)
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Ipv4AddressTlvValue_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::Ipv4AddressTlvValue::Ipv4AddressTlvValue(ns3::Ipv4AddressTlvValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressTlvValue const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::Ipv4AddressTlvValue::Ipv4AddressTlvValue() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): void ns3::Ipv4AddressTlvValue::Add(ns3::Ipv4Address address, ns3::Ipv4Mask Mask) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ipv4Address', 'address'), param('ns3::Ipv4Mask', 'Mask')])
## wimax-tlv.h (module 'wimax'): __gnu_cxx::__normal_iterator<const ns3::Ipv4AddressTlvValue::ipv4Addr*,std::vector<ns3::Ipv4AddressTlvValue::ipv4Addr, std::allocator<ns3::Ipv4AddressTlvValue::ipv4Addr> > > ns3::Ipv4AddressTlvValue::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ipv4AddressTlvValue::ipv4Addr const *, std::vector< ns3::Ipv4AddressTlvValue::ipv4Addr > >',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): ns3::Ipv4AddressTlvValue * ns3::Ipv4AddressTlvValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ipv4AddressTlvValue *',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::Ipv4AddressTlvValue::Deserialize(ns3::Buffer::Iterator start, uint64_t valueLength) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLength')],
is_virtual=True)
## wimax-tlv.h (module 'wimax'): __gnu_cxx::__normal_iterator<const ns3::Ipv4AddressTlvValue::ipv4Addr*,std::vector<ns3::Ipv4AddressTlvValue::ipv4Addr, std::allocator<ns3::Ipv4AddressTlvValue::ipv4Addr> > > ns3::Ipv4AddressTlvValue::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ipv4AddressTlvValue::ipv4Addr const *, std::vector< ns3::Ipv4AddressTlvValue::ipv4Addr > >',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::Ipv4AddressTlvValue::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): void ns3::Ipv4AddressTlvValue::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
return
def register_Ns3Ipv4AddressTlvValueIpv4Addr_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::Ipv4AddressTlvValue::ipv4Addr::ipv4Addr() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): ns3::Ipv4AddressTlvValue::ipv4Addr::ipv4Addr(ns3::Ipv4AddressTlvValue::ipv4Addr const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressTlvValue::ipv4Addr const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::Ipv4AddressTlvValue::ipv4Addr::Address [variable]
cls.add_instance_attribute('Address', 'ns3::Ipv4Address', is_const=False)
## wimax-tlv.h (module 'wimax'): ns3::Ipv4AddressTlvValue::ipv4Addr::Mask [variable]
cls.add_instance_attribute('Mask', 'ns3::Ipv4Mask', is_const=False)
return
def register_Ns3MacHeaderType_methods(root_module, cls):
## wimax-mac-header.h (module 'wimax'): ns3::MacHeaderType::MacHeaderType(ns3::MacHeaderType const & arg0) [copy constructor]
cls.add_constructor([param('ns3::MacHeaderType const &', 'arg0')])
## wimax-mac-header.h (module 'wimax'): ns3::MacHeaderType::MacHeaderType() [constructor]
cls.add_constructor([])
## wimax-mac-header.h (module 'wimax'): ns3::MacHeaderType::MacHeaderType(uint8_t type) [constructor]
cls.add_constructor([param('uint8_t', 'type')])
## wimax-mac-header.h (module 'wimax'): uint32_t ns3::MacHeaderType::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## wimax-mac-header.h (module 'wimax'): ns3::TypeId ns3::MacHeaderType::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): std::string ns3::MacHeaderType::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint32_t ns3::MacHeaderType::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::MacHeaderType::GetType() const [member function]
cls.add_method('GetType',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): static ns3::TypeId ns3::MacHeaderType::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## wimax-mac-header.h (module 'wimax'): void ns3::MacHeaderType::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): void ns3::MacHeaderType::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): void ns3::MacHeaderType::SetType(uint8_t type) [member function]
cls.add_method('SetType',
'void',
[param('uint8_t', 'type')])
return
def register_Ns3ManagementMessageType_methods(root_module, cls):
## mac-messages.h (module 'wimax'): ns3::ManagementMessageType::ManagementMessageType(ns3::ManagementMessageType const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ManagementMessageType const &', 'arg0')])
## mac-messages.h (module 'wimax'): ns3::ManagementMessageType::ManagementMessageType() [constructor]
cls.add_constructor([])
## mac-messages.h (module 'wimax'): ns3::ManagementMessageType::ManagementMessageType(uint8_t type) [constructor]
cls.add_constructor([param('uint8_t', 'type')])
## mac-messages.h (module 'wimax'): uint32_t ns3::ManagementMessageType::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## mac-messages.h (module 'wimax'): ns3::TypeId ns3::ManagementMessageType::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): std::string ns3::ManagementMessageType::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint32_t ns3::ManagementMessageType::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): uint8_t ns3::ManagementMessageType::GetType() const [member function]
cls.add_method('GetType',
'uint8_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): static ns3::TypeId ns3::ManagementMessageType::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## mac-messages.h (module 'wimax'): void ns3::ManagementMessageType::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): void ns3::ManagementMessageType::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): void ns3::ManagementMessageType::SetType(uint8_t type) [member function]
cls.add_method('SetType',
'void',
[param('uint8_t', 'type')])
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Start() [member function]
cls.add_method('Start',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3OfdmDownlinkFramePrefix_methods(root_module, cls):
## ofdm-downlink-frame-prefix.h (module 'wimax'): ns3::OfdmDownlinkFramePrefix::OfdmDownlinkFramePrefix(ns3::OfdmDownlinkFramePrefix const & arg0) [copy constructor]
cls.add_constructor([param('ns3::OfdmDownlinkFramePrefix const &', 'arg0')])
## ofdm-downlink-frame-prefix.h (module 'wimax'): ns3::OfdmDownlinkFramePrefix::OfdmDownlinkFramePrefix() [constructor]
cls.add_constructor([])
## ofdm-downlink-frame-prefix.h (module 'wimax'): void ns3::OfdmDownlinkFramePrefix::AddDlFramePrefixElement(ns3::DlFramePrefixIe dlFramePrefixElement) [member function]
cls.add_method('AddDlFramePrefixElement',
'void',
[param('ns3::DlFramePrefixIe', 'dlFramePrefixElement')])
## ofdm-downlink-frame-prefix.h (module 'wimax'): uint32_t ns3::OfdmDownlinkFramePrefix::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): ns3::Mac48Address ns3::OfdmDownlinkFramePrefix::GetBaseStationId() const [member function]
cls.add_method('GetBaseStationId',
'ns3::Mac48Address',
[],
is_const=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): uint8_t ns3::OfdmDownlinkFramePrefix::GetConfigurationChangeCount() const [member function]
cls.add_method('GetConfigurationChangeCount',
'uint8_t',
[],
is_const=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): std::vector<ns3::DlFramePrefixIe, std::allocator<ns3::DlFramePrefixIe> > ns3::OfdmDownlinkFramePrefix::GetDlFramePrefixElements() const [member function]
cls.add_method('GetDlFramePrefixElements',
'std::vector< ns3::DlFramePrefixIe >',
[],
is_const=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): uint32_t ns3::OfdmDownlinkFramePrefix::GetFrameNumber() const [member function]
cls.add_method('GetFrameNumber',
'uint32_t',
[],
is_const=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): uint8_t ns3::OfdmDownlinkFramePrefix::GetHcs() const [member function]
cls.add_method('GetHcs',
'uint8_t',
[],
is_const=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): std::string ns3::OfdmDownlinkFramePrefix::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): uint32_t ns3::OfdmDownlinkFramePrefix::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): void ns3::OfdmDownlinkFramePrefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): void ns3::OfdmDownlinkFramePrefix::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## ofdm-downlink-frame-prefix.h (module 'wimax'): void ns3::OfdmDownlinkFramePrefix::SetBaseStationId(ns3::Mac48Address baseStationId) [member function]
cls.add_method('SetBaseStationId',
'void',
[param('ns3::Mac48Address', 'baseStationId')])
## ofdm-downlink-frame-prefix.h (module 'wimax'): void ns3::OfdmDownlinkFramePrefix::SetConfigurationChangeCount(uint8_t configurationChangeCount) [member function]
cls.add_method('SetConfigurationChangeCount',
'void',
[param('uint8_t', 'configurationChangeCount')])
## ofdm-downlink-frame-prefix.h (module 'wimax'): void ns3::OfdmDownlinkFramePrefix::SetFrameNumber(uint32_t frameNumber) [member function]
cls.add_method('SetFrameNumber',
'void',
[param('uint32_t', 'frameNumber')])
## ofdm-downlink-frame-prefix.h (module 'wimax'): void ns3::OfdmDownlinkFramePrefix::SetHcs(uint8_t hcs) [member function]
cls.add_method('SetHcs',
'void',
[param('uint8_t', 'hcs')])
return
def register_Ns3OfdmSendParams_methods(root_module, cls):
## send-params.h (module 'wimax'): ns3::OfdmSendParams::OfdmSendParams(ns3::OfdmSendParams const & arg0) [copy constructor]
cls.add_constructor([param('ns3::OfdmSendParams const &', 'arg0')])
## send-params.h (module 'wimax'): ns3::OfdmSendParams::OfdmSendParams(ns3::Ptr<ns3::PacketBurst> burst, uint8_t modulationType, uint8_t direction) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::PacketBurst >', 'burst'), param('uint8_t', 'modulationType'), param('uint8_t', 'direction')])
## send-params.h (module 'wimax'): ns3::Ptr<ns3::PacketBurst> ns3::OfdmSendParams::GetBurst() const [member function]
cls.add_method('GetBurst',
'ns3::Ptr< ns3::PacketBurst >',
[],
is_const=True)
## send-params.h (module 'wimax'): uint8_t ns3::OfdmSendParams::GetDirection() const [member function]
cls.add_method('GetDirection',
'uint8_t',
[],
is_const=True)
## send-params.h (module 'wimax'): uint8_t ns3::OfdmSendParams::GetModulationType() const [member function]
cls.add_method('GetModulationType',
'uint8_t',
[],
is_const=True)
return
def register_Ns3OfdmUcdChannelEncodings_methods(root_module, cls):
## ul-mac-messages.h (module 'wimax'): ns3::OfdmUcdChannelEncodings::OfdmUcdChannelEncodings(ns3::OfdmUcdChannelEncodings const & arg0) [copy constructor]
cls.add_constructor([param('ns3::OfdmUcdChannelEncodings const &', 'arg0')])
## ul-mac-messages.h (module 'wimax'): ns3::OfdmUcdChannelEncodings::OfdmUcdChannelEncodings() [constructor]
cls.add_constructor([])
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmUcdChannelEncodings::GetSbchnlFocContCodes() const [member function]
cls.add_method('GetSbchnlFocContCodes',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::OfdmUcdChannelEncodings::GetSbchnlReqRegionFullParams() const [member function]
cls.add_method('GetSbchnlReqRegionFullParams',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint16_t ns3::OfdmUcdChannelEncodings::GetSize() const [member function]
cls.add_method('GetSize',
'uint16_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): void ns3::OfdmUcdChannelEncodings::SetSbchnlFocContCodes(uint8_t sbchnlFocContCodes) [member function]
cls.add_method('SetSbchnlFocContCodes',
'void',
[param('uint8_t', 'sbchnlFocContCodes')])
## ul-mac-messages.h (module 'wimax'): void ns3::OfdmUcdChannelEncodings::SetSbchnlReqRegionFullParams(uint8_t sbchnlReqRegionFullParams) [member function]
cls.add_method('SetSbchnlReqRegionFullParams',
'void',
[param('uint8_t', 'sbchnlReqRegionFullParams')])
## ul-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::OfdmUcdChannelEncodings::DoRead(ns3::Buffer::Iterator start) [member function]
cls.add_method('DoRead',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
visibility='private', is_virtual=True)
## ul-mac-messages.h (module 'wimax'): ns3::Buffer::Iterator ns3::OfdmUcdChannelEncodings::DoWrite(ns3::Buffer::Iterator start) const [member function]
cls.add_method('DoWrite',
'ns3::Buffer::Iterator',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3PacketBurst_methods(root_module, cls):
## packet-burst.h (module 'network'): ns3::PacketBurst::PacketBurst(ns3::PacketBurst const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketBurst const &', 'arg0')])
## packet-burst.h (module 'network'): ns3::PacketBurst::PacketBurst() [constructor]
cls.add_constructor([])
## packet-burst.h (module 'network'): void ns3::PacketBurst::AddPacket(ns3::Ptr<ns3::Packet> packet) [member function]
cls.add_method('AddPacket',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet')])
## packet-burst.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::Packet> > ns3::PacketBurst::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::Ptr< ns3::Packet > >',
[],
is_const=True)
## packet-burst.h (module 'network'): ns3::Ptr<ns3::PacketBurst> ns3::PacketBurst::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::PacketBurst >',
[],
is_const=True)
## packet-burst.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::Packet> > ns3::PacketBurst::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::Ptr< ns3::Packet > >',
[],
is_const=True)
## packet-burst.h (module 'network'): uint32_t ns3::PacketBurst::GetNPackets() const [member function]
cls.add_method('GetNPackets',
'uint32_t',
[],
is_const=True)
## packet-burst.h (module 'network'): std::list<ns3::Ptr<ns3::Packet>, std::allocator<ns3::Ptr<ns3::Packet> > > ns3::PacketBurst::GetPackets() const [member function]
cls.add_method('GetPackets',
'std::list< ns3::Ptr< ns3::Packet > >',
[],
is_const=True)
## packet-burst.h (module 'network'): uint32_t ns3::PacketBurst::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet-burst.h (module 'network'): static ns3::TypeId ns3::PacketBurst::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## packet-burst.h (module 'network'): void ns3::PacketBurst::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3PcapFileWrapper_methods(root_module, cls):
## pcap-file-wrapper.h (module 'network'): static ns3::TypeId ns3::PcapFileWrapper::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## pcap-file-wrapper.h (module 'network'): ns3::PcapFileWrapper::PcapFileWrapper() [constructor]
cls.add_constructor([])
## pcap-file-wrapper.h (module 'network'): bool ns3::PcapFileWrapper::Fail() const [member function]
cls.add_method('Fail',
'bool',
[],
is_const=True)
## pcap-file-wrapper.h (module 'network'): bool ns3::PcapFileWrapper::Eof() const [member function]
cls.add_method('Eof',
'bool',
[],
is_const=True)
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Clear() [member function]
cls.add_method('Clear',
'void',
[])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Open(std::string const & filename, std::_Ios_Openmode mode) [member function]
cls.add_method('Open',
'void',
[param('std::string const &', 'filename'), param('std::_Ios_Openmode', 'mode')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Close() [member function]
cls.add_method('Close',
'void',
[])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Init(uint32_t dataLinkType, uint32_t snapLen=std::numeric_limits<unsigned int>::max(), int32_t tzCorrection=ns3::PcapFile::ZONE_DEFAULT) [member function]
cls.add_method('Init',
'void',
[param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='std::numeric_limits<unsigned int>::max()'), param('int32_t', 'tzCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('ns3::Time', 't'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Header & header, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('ns3::Time', 't'), param('ns3::Header &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, uint8_t const * buffer, uint32_t length) [member function]
cls.add_method('Write',
'void',
[param('ns3::Time', 't'), param('uint8_t const *', 'buffer'), param('uint32_t', 'length')])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetMagic() [member function]
cls.add_method('GetMagic',
'uint32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint16_t ns3::PcapFileWrapper::GetVersionMajor() [member function]
cls.add_method('GetVersionMajor',
'uint16_t',
[])
## pcap-file-wrapper.h (module 'network'): uint16_t ns3::PcapFileWrapper::GetVersionMinor() [member function]
cls.add_method('GetVersionMinor',
'uint16_t',
[])
## pcap-file-wrapper.h (module 'network'): int32_t ns3::PcapFileWrapper::GetTimeZoneOffset() [member function]
cls.add_method('GetTimeZoneOffset',
'int32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetSigFigs() [member function]
cls.add_method('GetSigFigs',
'uint32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetSnapLen() [member function]
cls.add_method('GetSnapLen',
'uint32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetDataLinkType() [member function]
cls.add_method('GetDataLinkType',
'uint32_t',
[])
return
def register_Ns3PortRangeTlvValue_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::PortRangeTlvValue::PortRangeTlvValue(ns3::PortRangeTlvValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PortRangeTlvValue const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::PortRangeTlvValue::PortRangeTlvValue() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): void ns3::PortRangeTlvValue::Add(uint16_t portLow, uint16_t portHigh) [member function]
cls.add_method('Add',
'void',
[param('uint16_t', 'portLow'), param('uint16_t', 'portHigh')])
## wimax-tlv.h (module 'wimax'): __gnu_cxx::__normal_iterator<const ns3::PortRangeTlvValue::PortRange*,std::vector<ns3::PortRangeTlvValue::PortRange, std::allocator<ns3::PortRangeTlvValue::PortRange> > > ns3::PortRangeTlvValue::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::PortRangeTlvValue::PortRange const *, std::vector< ns3::PortRangeTlvValue::PortRange > >',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): ns3::PortRangeTlvValue * ns3::PortRangeTlvValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::PortRangeTlvValue *',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::PortRangeTlvValue::Deserialize(ns3::Buffer::Iterator start, uint64_t valueLength) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLength')],
is_virtual=True)
## wimax-tlv.h (module 'wimax'): __gnu_cxx::__normal_iterator<const ns3::PortRangeTlvValue::PortRange*,std::vector<ns3::PortRangeTlvValue::PortRange, std::allocator<ns3::PortRangeTlvValue::PortRange> > > ns3::PortRangeTlvValue::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::PortRangeTlvValue::PortRange const *, std::vector< ns3::PortRangeTlvValue::PortRange > >',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::PortRangeTlvValue::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): void ns3::PortRangeTlvValue::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
return
def register_Ns3PortRangeTlvValuePortRange_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::PortRangeTlvValue::PortRange::PortRange() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): ns3::PortRangeTlvValue::PortRange::PortRange(ns3::PortRangeTlvValue::PortRange const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PortRangeTlvValue::PortRange const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::PortRangeTlvValue::PortRange::PortHigh [variable]
cls.add_instance_attribute('PortHigh', 'uint16_t', is_const=False)
## wimax-tlv.h (module 'wimax'): ns3::PortRangeTlvValue::PortRange::PortLow [variable]
cls.add_instance_attribute('PortLow', 'uint16_t', is_const=False)
return
def register_Ns3PriorityUlJob_methods(root_module, cls):
## ul-job.h (module 'wimax'): ns3::PriorityUlJob::PriorityUlJob(ns3::PriorityUlJob const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PriorityUlJob const &', 'arg0')])
## ul-job.h (module 'wimax'): ns3::PriorityUlJob::PriorityUlJob() [constructor]
cls.add_constructor([])
## ul-job.h (module 'wimax'): int ns3::PriorityUlJob::GetPriority() [member function]
cls.add_method('GetPriority',
'int',
[])
## ul-job.h (module 'wimax'): ns3::Ptr<ns3::UlJob> ns3::PriorityUlJob::GetUlJob() [member function]
cls.add_method('GetUlJob',
'ns3::Ptr< ns3::UlJob >',
[])
## ul-job.h (module 'wimax'): void ns3::PriorityUlJob::SetPriority(int priority) [member function]
cls.add_method('SetPriority',
'void',
[param('int', 'priority')])
## ul-job.h (module 'wimax'): void ns3::PriorityUlJob::SetUlJob(ns3::Ptr<ns3::UlJob> job) [member function]
cls.add_method('SetUlJob',
'void',
[param('ns3::Ptr< ns3::UlJob >', 'job')])
return
def register_Ns3PropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::PropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::PropagationLossModel::PropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::PropagationLossModel::SetNext(ns3::Ptr<ns3::PropagationLossModel> next) [member function]
cls.add_method('SetNext',
'void',
[param('ns3::Ptr< ns3::PropagationLossModel >', 'next')])
## propagation-loss-model.h (module 'propagation'): ns3::Ptr<ns3::PropagationLossModel> ns3::PropagationLossModel::GetNext() [member function]
cls.add_method('GetNext',
'ns3::Ptr< ns3::PropagationLossModel >',
[])
## propagation-loss-model.h (module 'propagation'): double ns3::PropagationLossModel::CalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('CalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::PropagationLossModel::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')])
## propagation-loss-model.h (module 'propagation'): double ns3::PropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::PropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_pure_virtual=True, visibility='private', is_virtual=True)
return
def register_Ns3ProtocolTlvValue_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::ProtocolTlvValue::ProtocolTlvValue(ns3::ProtocolTlvValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ProtocolTlvValue const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::ProtocolTlvValue::ProtocolTlvValue() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): void ns3::ProtocolTlvValue::Add(uint8_t protiocol) [member function]
cls.add_method('Add',
'void',
[param('uint8_t', 'protiocol')])
## wimax-tlv.h (module 'wimax'): __gnu_cxx::__normal_iterator<const unsigned char*,std::vector<unsigned char, std::allocator<unsigned char> > > ns3::ProtocolTlvValue::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< unsigned char const *, std::vector< unsigned char > >',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): ns3::ProtocolTlvValue * ns3::ProtocolTlvValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::ProtocolTlvValue *',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::ProtocolTlvValue::Deserialize(ns3::Buffer::Iterator start, uint64_t valueLength) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLength')],
is_virtual=True)
## wimax-tlv.h (module 'wimax'): __gnu_cxx::__normal_iterator<const unsigned char*,std::vector<unsigned char, std::allocator<unsigned char> > > ns3::ProtocolTlvValue::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< unsigned char const *, std::vector< unsigned char > >',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::ProtocolTlvValue::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): void ns3::ProtocolTlvValue::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
return
def register_Ns3RandomPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::RandomPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::RandomPropagationLossModel::RandomPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): double ns3::RandomPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::RandomPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3RandomVariableStream_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::RandomVariableStream::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::RandomVariableStream::RandomVariableStream() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetStream(int64_t stream) [member function]
cls.add_method('SetStream',
'void',
[param('int64_t', 'stream')])
## random-variable-stream.h (module 'core'): int64_t ns3::RandomVariableStream::GetStream() const [member function]
cls.add_method('GetStream',
'int64_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetAntithetic(bool isAntithetic) [member function]
cls.add_method('SetAntithetic',
'void',
[param('bool', 'isAntithetic')])
## random-variable-stream.h (module 'core'): bool ns3::RandomVariableStream::IsAntithetic() const [member function]
cls.add_method('IsAntithetic',
'bool',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::RandomVariableStream::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_pure_virtual=True, is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::RandomVariableStream::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_pure_virtual=True, is_virtual=True)
## random-variable-stream.h (module 'core'): ns3::RngStream * ns3::RandomVariableStream::Peek() const [member function]
cls.add_method('Peek',
'ns3::RngStream *',
[],
is_const=True, visibility='protected')
return
def register_Ns3RangePropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::RangePropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::RangePropagationLossModel::RangePropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): double ns3::RangePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::RangePropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3RngReq_methods(root_module, cls):
## mac-messages.h (module 'wimax'): ns3::RngReq::RngReq(ns3::RngReq const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RngReq const &', 'arg0')])
## mac-messages.h (module 'wimax'): ns3::RngReq::RngReq() [constructor]
cls.add_constructor([])
## mac-messages.h (module 'wimax'): uint32_t ns3::RngReq::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## mac-messages.h (module 'wimax'): ns3::TypeId ns3::RngReq::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): ns3::Mac48Address ns3::RngReq::GetMacAddress() const [member function]
cls.add_method('GetMacAddress',
'ns3::Mac48Address',
[],
is_const=True)
## mac-messages.h (module 'wimax'): std::string ns3::RngReq::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint8_t ns3::RngReq::GetRangingAnomalies() const [member function]
cls.add_method('GetRangingAnomalies',
'uint8_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint8_t ns3::RngReq::GetReqDlBurstProfile() const [member function]
cls.add_method('GetReqDlBurstProfile',
'uint8_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint32_t ns3::RngReq::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): static ns3::TypeId ns3::RngReq::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## mac-messages.h (module 'wimax'): void ns3::RngReq::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): void ns3::RngReq::PrintDebug() const [member function]
cls.add_method('PrintDebug',
'void',
[],
is_const=True)
## mac-messages.h (module 'wimax'): void ns3::RngReq::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): void ns3::RngReq::SetMacAddress(ns3::Mac48Address macAddress) [member function]
cls.add_method('SetMacAddress',
'void',
[param('ns3::Mac48Address', 'macAddress')])
## mac-messages.h (module 'wimax'): void ns3::RngReq::SetRangingAnomalies(uint8_t rangingAnomalies) [member function]
cls.add_method('SetRangingAnomalies',
'void',
[param('uint8_t', 'rangingAnomalies')])
## mac-messages.h (module 'wimax'): void ns3::RngReq::SetReqDlBurstProfile(uint8_t reqDlBurstProfile) [member function]
cls.add_method('SetReqDlBurstProfile',
'void',
[param('uint8_t', 'reqDlBurstProfile')])
return
def register_Ns3RngRsp_methods(root_module, cls):
## mac-messages.h (module 'wimax'): ns3::RngRsp::RngRsp(ns3::RngRsp const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RngRsp const &', 'arg0')])
## mac-messages.h (module 'wimax'): ns3::RngRsp::RngRsp() [constructor]
cls.add_constructor([])
## mac-messages.h (module 'wimax'): uint32_t ns3::RngRsp::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## mac-messages.h (module 'wimax'): uint8_t ns3::RngRsp::GetAasBdcastPermission() const [member function]
cls.add_method('GetAasBdcastPermission',
'uint8_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): ns3::Cid ns3::RngRsp::GetBasicCid() const [member function]
cls.add_method('GetBasicCid',
'ns3::Cid',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint32_t ns3::RngRsp::GetDlFreqOverride() const [member function]
cls.add_method('GetDlFreqOverride',
'uint32_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint16_t ns3::RngRsp::GetDlOperBurstProfile() const [member function]
cls.add_method('GetDlOperBurstProfile',
'uint16_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint32_t ns3::RngRsp::GetFrameNumber() const [member function]
cls.add_method('GetFrameNumber',
'uint32_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint8_t ns3::RngRsp::GetInitRangOppNumber() const [member function]
cls.add_method('GetInitRangOppNumber',
'uint8_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): ns3::TypeId ns3::RngRsp::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): ns3::Mac48Address ns3::RngRsp::GetMacAddress() const [member function]
cls.add_method('GetMacAddress',
'ns3::Mac48Address',
[],
is_const=True)
## mac-messages.h (module 'wimax'): std::string ns3::RngRsp::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint32_t ns3::RngRsp::GetOffsetFreqAdjust() const [member function]
cls.add_method('GetOffsetFreqAdjust',
'uint32_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint8_t ns3::RngRsp::GetPowerLevelAdjust() const [member function]
cls.add_method('GetPowerLevelAdjust',
'uint8_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): ns3::Cid ns3::RngRsp::GetPrimaryCid() const [member function]
cls.add_method('GetPrimaryCid',
'ns3::Cid',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint8_t ns3::RngRsp::GetRangStatus() const [member function]
cls.add_method('GetRangStatus',
'uint8_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint8_t ns3::RngRsp::GetRangSubchnl() const [member function]
cls.add_method('GetRangSubchnl',
'uint8_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint32_t ns3::RngRsp::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): uint32_t ns3::RngRsp::GetTimingAdjust() const [member function]
cls.add_method('GetTimingAdjust',
'uint32_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): static ns3::TypeId ns3::RngRsp::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## mac-messages.h (module 'wimax'): uint8_t ns3::RngRsp::GetUlChnlIdOverride() const [member function]
cls.add_method('GetUlChnlIdOverride',
'uint8_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): void ns3::RngRsp::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): void ns3::RngRsp::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetAasBdcastPermission(uint8_t aasBdcastPermission) [member function]
cls.add_method('SetAasBdcastPermission',
'void',
[param('uint8_t', 'aasBdcastPermission')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetBasicCid(ns3::Cid basicCid) [member function]
cls.add_method('SetBasicCid',
'void',
[param('ns3::Cid', 'basicCid')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetDlFreqOverride(uint32_t dlFreqOverride) [member function]
cls.add_method('SetDlFreqOverride',
'void',
[param('uint32_t', 'dlFreqOverride')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetDlOperBurstProfile(uint16_t dlOperBurstProfile) [member function]
cls.add_method('SetDlOperBurstProfile',
'void',
[param('uint16_t', 'dlOperBurstProfile')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetFrameNumber(uint32_t frameNumber) [member function]
cls.add_method('SetFrameNumber',
'void',
[param('uint32_t', 'frameNumber')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetInitRangOppNumber(uint8_t initRangOppNumber) [member function]
cls.add_method('SetInitRangOppNumber',
'void',
[param('uint8_t', 'initRangOppNumber')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetMacAddress(ns3::Mac48Address macAddress) [member function]
cls.add_method('SetMacAddress',
'void',
[param('ns3::Mac48Address', 'macAddress')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetOffsetFreqAdjust(uint32_t offsetFreqAdjust) [member function]
cls.add_method('SetOffsetFreqAdjust',
'void',
[param('uint32_t', 'offsetFreqAdjust')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetPowerLevelAdjust(uint8_t powerLevelAdjust) [member function]
cls.add_method('SetPowerLevelAdjust',
'void',
[param('uint8_t', 'powerLevelAdjust')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetPrimaryCid(ns3::Cid primaryCid) [member function]
cls.add_method('SetPrimaryCid',
'void',
[param('ns3::Cid', 'primaryCid')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetRangStatus(uint8_t rangStatus) [member function]
cls.add_method('SetRangStatus',
'void',
[param('uint8_t', 'rangStatus')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetRangSubchnl(uint8_t rangSubchnl) [member function]
cls.add_method('SetRangSubchnl',
'void',
[param('uint8_t', 'rangSubchnl')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetTimingAdjust(uint32_t timingAdjust) [member function]
cls.add_method('SetTimingAdjust',
'void',
[param('uint32_t', 'timingAdjust')])
## mac-messages.h (module 'wimax'): void ns3::RngRsp::SetUlChnlIdOverride(uint8_t ulChnlIdOverride) [member function]
cls.add_method('SetUlChnlIdOverride',
'void',
[param('uint8_t', 'ulChnlIdOverride')])
return
def register_Ns3SSManager_methods(root_module, cls):
## ss-manager.h (module 'wimax'): ns3::SSManager::SSManager(ns3::SSManager const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SSManager const &', 'arg0')])
## ss-manager.h (module 'wimax'): ns3::SSManager::SSManager() [constructor]
cls.add_constructor([])
## ss-manager.h (module 'wimax'): ns3::SSRecord * ns3::SSManager::CreateSSRecord(ns3::Mac48Address const & macAddress) [member function]
cls.add_method('CreateSSRecord',
'ns3::SSRecord *',
[param('ns3::Mac48Address const &', 'macAddress')])
## ss-manager.h (module 'wimax'): void ns3::SSManager::DeleteSSRecord(ns3::Cid cid) [member function]
cls.add_method('DeleteSSRecord',
'void',
[param('ns3::Cid', 'cid')])
## ss-manager.h (module 'wimax'): ns3::Mac48Address ns3::SSManager::GetMacAddress(ns3::Cid cid) const [member function]
cls.add_method('GetMacAddress',
'ns3::Mac48Address',
[param('ns3::Cid', 'cid')],
is_const=True)
## ss-manager.h (module 'wimax'): uint32_t ns3::SSManager::GetNRegisteredSSs() const [member function]
cls.add_method('GetNRegisteredSSs',
'uint32_t',
[],
is_const=True)
## ss-manager.h (module 'wimax'): uint32_t ns3::SSManager::GetNSSs() const [member function]
cls.add_method('GetNSSs',
'uint32_t',
[],
is_const=True)
## ss-manager.h (module 'wimax'): ns3::SSRecord * ns3::SSManager::GetSSRecord(ns3::Mac48Address const & macAddress) const [member function]
cls.add_method('GetSSRecord',
'ns3::SSRecord *',
[param('ns3::Mac48Address const &', 'macAddress')],
is_const=True)
## ss-manager.h (module 'wimax'): ns3::SSRecord * ns3::SSManager::GetSSRecord(ns3::Cid cid) const [member function]
cls.add_method('GetSSRecord',
'ns3::SSRecord *',
[param('ns3::Cid', 'cid')],
is_const=True)
## ss-manager.h (module 'wimax'): std::vector<ns3::SSRecord*,std::allocator<ns3::SSRecord*> > * ns3::SSManager::GetSSRecords() const [member function]
cls.add_method('GetSSRecords',
'std::vector< ns3::SSRecord * > *',
[],
is_const=True)
## ss-manager.h (module 'wimax'): static ns3::TypeId ns3::SSManager::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ss-manager.h (module 'wimax'): bool ns3::SSManager::IsInRecord(ns3::Mac48Address const & macAddress) const [member function]
cls.add_method('IsInRecord',
'bool',
[param('ns3::Mac48Address const &', 'macAddress')],
is_const=True)
## ss-manager.h (module 'wimax'): bool ns3::SSManager::IsRegistered(ns3::Mac48Address const & macAddress) const [member function]
cls.add_method('IsRegistered',
'bool',
[param('ns3::Mac48Address const &', 'macAddress')],
is_const=True)
return
def register_Ns3SequentialRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::SequentialRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable::SequentialRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): ns3::Ptr<ns3::RandomVariableStream> ns3::SequentialRandomVariable::GetIncrement() const [member function]
cls.add_method('GetIncrement',
'ns3::Ptr< ns3::RandomVariableStream >',
[],
is_const=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetConsecutive() const [member function]
cls.add_method('GetConsecutive',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ServiceFlowManager_methods(root_module, cls):
## service-flow-manager.h (module 'wimax'): ns3::ServiceFlowManager::ServiceFlowManager(ns3::ServiceFlowManager const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ServiceFlowManager const &', 'arg0')])
## service-flow-manager.h (module 'wimax'): ns3::ServiceFlowManager::ServiceFlowManager() [constructor]
cls.add_constructor([])
## service-flow-manager.h (module 'wimax'): void ns3::ServiceFlowManager::AddServiceFlow(ns3::ServiceFlow * serviceFlow) [member function]
cls.add_method('AddServiceFlow',
'void',
[param('ns3::ServiceFlow *', 'serviceFlow')])
## service-flow-manager.h (module 'wimax'): bool ns3::ServiceFlowManager::AreServiceFlowsAllocated() [member function]
cls.add_method('AreServiceFlowsAllocated',
'bool',
[])
## service-flow-manager.h (module 'wimax'): bool ns3::ServiceFlowManager::AreServiceFlowsAllocated(std::vector<ns3::ServiceFlow*,std::allocator<ns3::ServiceFlow*> > * serviceFlows) [member function]
cls.add_method('AreServiceFlowsAllocated',
'bool',
[param('std::vector< ns3::ServiceFlow * > *', 'serviceFlows')])
## service-flow-manager.h (module 'wimax'): bool ns3::ServiceFlowManager::AreServiceFlowsAllocated(std::vector<ns3::ServiceFlow*,std::allocator<ns3::ServiceFlow*> > serviceFlows) [member function]
cls.add_method('AreServiceFlowsAllocated',
'bool',
[param('std::vector< ns3::ServiceFlow * >', 'serviceFlows')])
## service-flow-manager.h (module 'wimax'): ns3::ServiceFlow * ns3::ServiceFlowManager::DoClassify(ns3::Ipv4Address SrcAddress, ns3::Ipv4Address DstAddress, uint16_t SrcPort, uint16_t DstPort, uint8_t Proto, ns3::ServiceFlow::Direction dir) const [member function]
cls.add_method('DoClassify',
'ns3::ServiceFlow *',
[param('ns3::Ipv4Address', 'SrcAddress'), param('ns3::Ipv4Address', 'DstAddress'), param('uint16_t', 'SrcPort'), param('uint16_t', 'DstPort'), param('uint8_t', 'Proto'), param('ns3::ServiceFlow::Direction', 'dir')],
is_const=True)
## service-flow-manager.h (module 'wimax'): void ns3::ServiceFlowManager::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## service-flow-manager.h (module 'wimax'): ns3::ServiceFlow * ns3::ServiceFlowManager::GetNextServiceFlowToAllocate() [member function]
cls.add_method('GetNextServiceFlowToAllocate',
'ns3::ServiceFlow *',
[])
## service-flow-manager.h (module 'wimax'): uint32_t ns3::ServiceFlowManager::GetNrServiceFlows() const [member function]
cls.add_method('GetNrServiceFlows',
'uint32_t',
[],
is_const=True)
## service-flow-manager.h (module 'wimax'): ns3::ServiceFlow * ns3::ServiceFlowManager::GetServiceFlow(uint32_t sfid) const [member function]
cls.add_method('GetServiceFlow',
'ns3::ServiceFlow *',
[param('uint32_t', 'sfid')],
is_const=True)
## service-flow-manager.h (module 'wimax'): ns3::ServiceFlow * ns3::ServiceFlowManager::GetServiceFlow(ns3::Cid cid) const [member function]
cls.add_method('GetServiceFlow',
'ns3::ServiceFlow *',
[param('ns3::Cid', 'cid')],
is_const=True)
## service-flow-manager.h (module 'wimax'): std::vector<ns3::ServiceFlow*,std::allocator<ns3::ServiceFlow*> > ns3::ServiceFlowManager::GetServiceFlows(ns3::ServiceFlow::SchedulingType schedulingType) const [member function]
cls.add_method('GetServiceFlows',
'std::vector< ns3::ServiceFlow * >',
[param('ns3::ServiceFlow::SchedulingType', 'schedulingType')],
is_const=True)
## service-flow-manager.h (module 'wimax'): static ns3::TypeId ns3::ServiceFlowManager::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3SfVectorTlvValue_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::SfVectorTlvValue::SfVectorTlvValue(ns3::SfVectorTlvValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SfVectorTlvValue const &', 'arg0')])
## wimax-tlv.h (module 'wimax'): ns3::SfVectorTlvValue::SfVectorTlvValue() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): ns3::SfVectorTlvValue * ns3::SfVectorTlvValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::SfVectorTlvValue *',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::SfVectorTlvValue::Deserialize(ns3::Buffer::Iterator start, uint64_t valueLength) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint64_t', 'valueLength')],
is_virtual=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount(ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter< ns3::OutputStreamWrapper > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SsServiceFlowManager_methods(root_module, cls):
## ss-service-flow-manager.h (module 'wimax'): ns3::SsServiceFlowManager::SsServiceFlowManager(ns3::SsServiceFlowManager const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SsServiceFlowManager const &', 'arg0')])
## ss-service-flow-manager.h (module 'wimax'): ns3::SsServiceFlowManager::SsServiceFlowManager(ns3::Ptr<ns3::SubscriberStationNetDevice> device) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::SubscriberStationNetDevice >', 'device')])
## ss-service-flow-manager.h (module 'wimax'): void ns3::SsServiceFlowManager::AddServiceFlow(ns3::ServiceFlow * serviceFlow) [member function]
cls.add_method('AddServiceFlow',
'void',
[param('ns3::ServiceFlow *', 'serviceFlow')])
## ss-service-flow-manager.h (module 'wimax'): void ns3::SsServiceFlowManager::AddServiceFlow(ns3::ServiceFlow serviceFlow) [member function]
cls.add_method('AddServiceFlow',
'void',
[param('ns3::ServiceFlow', 'serviceFlow')])
## ss-service-flow-manager.h (module 'wimax'): ns3::Ptr<ns3::Packet> ns3::SsServiceFlowManager::CreateDsaAck() [member function]
cls.add_method('CreateDsaAck',
'ns3::Ptr< ns3::Packet >',
[])
## ss-service-flow-manager.h (module 'wimax'): ns3::DsaReq ns3::SsServiceFlowManager::CreateDsaReq(ns3::ServiceFlow const * serviceFlow) [member function]
cls.add_method('CreateDsaReq',
'ns3::DsaReq',
[param('ns3::ServiceFlow const *', 'serviceFlow')])
## ss-service-flow-manager.h (module 'wimax'): void ns3::SsServiceFlowManager::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## ss-service-flow-manager.h (module 'wimax'): ns3::EventId ns3::SsServiceFlowManager::GetDsaAckTimeoutEvent() const [member function]
cls.add_method('GetDsaAckTimeoutEvent',
'ns3::EventId',
[],
is_const=True)
## ss-service-flow-manager.h (module 'wimax'): ns3::EventId ns3::SsServiceFlowManager::GetDsaRspTimeoutEvent() const [member function]
cls.add_method('GetDsaRspTimeoutEvent',
'ns3::EventId',
[],
is_const=True)
## ss-service-flow-manager.h (module 'wimax'): uint8_t ns3::SsServiceFlowManager::GetMaxDsaReqRetries() const [member function]
cls.add_method('GetMaxDsaReqRetries',
'uint8_t',
[],
is_const=True)
## ss-service-flow-manager.h (module 'wimax'): void ns3::SsServiceFlowManager::InitiateServiceFlows() [member function]
cls.add_method('InitiateServiceFlows',
'void',
[])
## ss-service-flow-manager.h (module 'wimax'): void ns3::SsServiceFlowManager::ProcessDsaRsp(ns3::DsaRsp const & dsaRsp) [member function]
cls.add_method('ProcessDsaRsp',
'void',
[param('ns3::DsaRsp const &', 'dsaRsp')])
## ss-service-flow-manager.h (module 'wimax'): void ns3::SsServiceFlowManager::ScheduleDsaReq(ns3::ServiceFlow const * serviceFlow) [member function]
cls.add_method('ScheduleDsaReq',
'void',
[param('ns3::ServiceFlow const *', 'serviceFlow')])
## ss-service-flow-manager.h (module 'wimax'): void ns3::SsServiceFlowManager::SetMaxDsaReqRetries(uint8_t maxDsaReqRetries) [member function]
cls.add_method('SetMaxDsaReqRetries',
'void',
[param('uint8_t', 'maxDsaReqRetries')])
return
def register_Ns3ThreeLogDistancePropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::ThreeLogDistancePropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::ThreeLogDistancePropagationLossModel::ThreeLogDistancePropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): double ns3::ThreeLogDistancePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::ThreeLogDistancePropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & value) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'value')])
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & from, ns3::Time::Unit timeUnit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'from'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
return
def register_Ns3Tlv_methods(root_module, cls):
## wimax-tlv.h (module 'wimax'): ns3::Tlv::Tlv(uint8_t type, uint64_t length, ns3::TlvValue const & value) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint64_t', 'length'), param('ns3::TlvValue const &', 'value')])
## wimax-tlv.h (module 'wimax'): ns3::Tlv::Tlv() [constructor]
cls.add_constructor([])
## wimax-tlv.h (module 'wimax'): ns3::Tlv::Tlv(ns3::Tlv const & tlv) [copy constructor]
cls.add_constructor([param('ns3::Tlv const &', 'tlv')])
## wimax-tlv.h (module 'wimax'): ns3::Tlv * ns3::Tlv::Copy() const [member function]
cls.add_method('Copy',
'ns3::Tlv *',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): ns3::TlvValue * ns3::Tlv::CopyValue() const [member function]
cls.add_method('CopyValue',
'ns3::TlvValue *',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::Tlv::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## wimax-tlv.h (module 'wimax'): ns3::TypeId ns3::Tlv::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint64_t ns3::Tlv::GetLength() const [member function]
cls.add_method('GetLength',
'uint64_t',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): uint32_t ns3::Tlv::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): uint8_t ns3::Tlv::GetSizeOfLen() const [member function]
cls.add_method('GetSizeOfLen',
'uint8_t',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): uint8_t ns3::Tlv::GetType() const [member function]
cls.add_method('GetType',
'uint8_t',
[],
is_const=True)
## wimax-tlv.h (module 'wimax'): ns3::TlvValue * ns3::Tlv::PeekValue() [member function]
cls.add_method('PeekValue',
'ns3::TlvValue *',
[])
## wimax-tlv.h (module 'wimax'): void ns3::Tlv::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## wimax-tlv.h (module 'wimax'): void ns3::Tlv::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TriangularRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::TriangularRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable::TriangularRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue(double mean, double min, double max) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'min'), param('double', 'max')])
## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger(uint32_t mean, uint32_t min, uint32_t max) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'min'), param('uint32_t', 'max')])
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3TwoRayGroundPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::TwoRayGroundPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::TwoRayGroundPropagationLossModel::TwoRayGroundPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetLambda(double frequency, double speed) [member function]
cls.add_method('SetLambda',
'void',
[param('double', 'frequency'), param('double', 'speed')])
## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetLambda(double lambda) [member function]
cls.add_method('SetLambda',
'void',
[param('double', 'lambda')])
## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetSystemLoss(double systemLoss) [member function]
cls.add_method('SetSystemLoss',
'void',
[param('double', 'systemLoss')])
## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetMinDistance(double minDistance) [member function]
cls.add_method('SetMinDistance',
'void',
[param('double', 'minDistance')])
## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetMinDistance() const [member function]
cls.add_method('GetMinDistance',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetLambda() const [member function]
cls.add_method('GetLambda',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetSystemLoss() const [member function]
cls.add_method('GetSystemLoss',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetHeightAboveZ(double heightAboveZ) [member function]
cls.add_method('SetHeightAboveZ',
'void',
[param('double', 'heightAboveZ')])
## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::TwoRayGroundPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3Ucd_methods(root_module, cls):
## ul-mac-messages.h (module 'wimax'): ns3::Ucd::Ucd(ns3::Ucd const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ucd const &', 'arg0')])
## ul-mac-messages.h (module 'wimax'): ns3::Ucd::Ucd() [constructor]
cls.add_constructor([])
## ul-mac-messages.h (module 'wimax'): void ns3::Ucd::AddUlBurstProfile(ns3::OfdmUlBurstProfile ulBurstProfile) [member function]
cls.add_method('AddUlBurstProfile',
'void',
[param('ns3::OfdmUlBurstProfile', 'ulBurstProfile')])
## ul-mac-messages.h (module 'wimax'): uint32_t ns3::Ucd::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## ul-mac-messages.h (module 'wimax'): ns3::OfdmUcdChannelEncodings ns3::Ucd::GetChannelEncodings() const [member function]
cls.add_method('GetChannelEncodings',
'ns3::OfdmUcdChannelEncodings',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::Ucd::GetConfigurationChangeCount() const [member function]
cls.add_method('GetConfigurationChangeCount',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): ns3::TypeId ns3::Ucd::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## ul-mac-messages.h (module 'wimax'): std::string ns3::Ucd::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::Ucd::GetNrUlBurstProfiles() const [member function]
cls.add_method('GetNrUlBurstProfiles',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::Ucd::GetRangingBackoffEnd() const [member function]
cls.add_method('GetRangingBackoffEnd',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::Ucd::GetRangingBackoffStart() const [member function]
cls.add_method('GetRangingBackoffStart',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::Ucd::GetRequestBackoffEnd() const [member function]
cls.add_method('GetRequestBackoffEnd',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::Ucd::GetRequestBackoffStart() const [member function]
cls.add_method('GetRequestBackoffStart',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint32_t ns3::Ucd::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## ul-mac-messages.h (module 'wimax'): static ns3::TypeId ns3::Ucd::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ul-mac-messages.h (module 'wimax'): std::vector<ns3::OfdmUlBurstProfile, std::allocator<ns3::OfdmUlBurstProfile> > ns3::Ucd::GetUlBurstProfiles() const [member function]
cls.add_method('GetUlBurstProfiles',
'std::vector< ns3::OfdmUlBurstProfile >',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): void ns3::Ucd::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## ul-mac-messages.h (module 'wimax'): void ns3::Ucd::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## ul-mac-messages.h (module 'wimax'): void ns3::Ucd::SetChannelEncodings(ns3::OfdmUcdChannelEncodings channelEncodings) [member function]
cls.add_method('SetChannelEncodings',
'void',
[param('ns3::OfdmUcdChannelEncodings', 'channelEncodings')])
## ul-mac-messages.h (module 'wimax'): void ns3::Ucd::SetConfigurationChangeCount(uint8_t ucdCount) [member function]
cls.add_method('SetConfigurationChangeCount',
'void',
[param('uint8_t', 'ucdCount')])
## ul-mac-messages.h (module 'wimax'): void ns3::Ucd::SetNrUlBurstProfiles(uint8_t nrUlBurstProfiles) [member function]
cls.add_method('SetNrUlBurstProfiles',
'void',
[param('uint8_t', 'nrUlBurstProfiles')])
## ul-mac-messages.h (module 'wimax'): void ns3::Ucd::SetRangingBackoffEnd(uint8_t rangingBackoffEnd) [member function]
cls.add_method('SetRangingBackoffEnd',
'void',
[param('uint8_t', 'rangingBackoffEnd')])
## ul-mac-messages.h (module 'wimax'): void ns3::Ucd::SetRangingBackoffStart(uint8_t rangingBackoffStart) [member function]
cls.add_method('SetRangingBackoffStart',
'void',
[param('uint8_t', 'rangingBackoffStart')])
## ul-mac-messages.h (module 'wimax'): void ns3::Ucd::SetRequestBackoffEnd(uint8_t requestBackoffEnd) [member function]
cls.add_method('SetRequestBackoffEnd',
'void',
[param('uint8_t', 'requestBackoffEnd')])
## ul-mac-messages.h (module 'wimax'): void ns3::Ucd::SetRequestBackoffStart(uint8_t requestBackoffStart) [member function]
cls.add_method('SetRequestBackoffStart',
'void',
[param('uint8_t', 'requestBackoffStart')])
return
def register_Ns3UlJob_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
## ul-job.h (module 'wimax'): ns3::UlJob::UlJob(ns3::UlJob const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UlJob const &', 'arg0')])
## ul-job.h (module 'wimax'): ns3::UlJob::UlJob() [constructor]
cls.add_constructor([])
## ul-job.h (module 'wimax'): ns3::Time ns3::UlJob::GetDeadline() [member function]
cls.add_method('GetDeadline',
'ns3::Time',
[])
## ul-job.h (module 'wimax'): ns3::Time ns3::UlJob::GetPeriod() [member function]
cls.add_method('GetPeriod',
'ns3::Time',
[])
## ul-job.h (module 'wimax'): ns3::Time ns3::UlJob::GetReleaseTime() [member function]
cls.add_method('GetReleaseTime',
'ns3::Time',
[])
## ul-job.h (module 'wimax'): ns3::ServiceFlow::SchedulingType ns3::UlJob::GetSchedulingType() [member function]
cls.add_method('GetSchedulingType',
'ns3::ServiceFlow::SchedulingType',
[])
## ul-job.h (module 'wimax'): ns3::ServiceFlow * ns3::UlJob::GetServiceFlow() [member function]
cls.add_method('GetServiceFlow',
'ns3::ServiceFlow *',
[])
## ul-job.h (module 'wimax'): uint32_t ns3::UlJob::GetSize() [member function]
cls.add_method('GetSize',
'uint32_t',
[])
## ul-job.h (module 'wimax'): ns3::SSRecord * ns3::UlJob::GetSsRecord() [member function]
cls.add_method('GetSsRecord',
'ns3::SSRecord *',
[])
## ul-job.h (module 'wimax'): ns3::ReqType ns3::UlJob::GetType() [member function]
cls.add_method('GetType',
'ns3::ReqType',
[])
## ul-job.h (module 'wimax'): void ns3::UlJob::SetDeadline(ns3::Time deadline) [member function]
cls.add_method('SetDeadline',
'void',
[param('ns3::Time', 'deadline')])
## ul-job.h (module 'wimax'): void ns3::UlJob::SetPeriod(ns3::Time period) [member function]
cls.add_method('SetPeriod',
'void',
[param('ns3::Time', 'period')])
## ul-job.h (module 'wimax'): void ns3::UlJob::SetReleaseTime(ns3::Time releaseTime) [member function]
cls.add_method('SetReleaseTime',
'void',
[param('ns3::Time', 'releaseTime')])
## ul-job.h (module 'wimax'): void ns3::UlJob::SetSchedulingType(ns3::ServiceFlow::SchedulingType schedulingType) [member function]
cls.add_method('SetSchedulingType',
'void',
[param('ns3::ServiceFlow::SchedulingType', 'schedulingType')])
## ul-job.h (module 'wimax'): void ns3::UlJob::SetServiceFlow(ns3::ServiceFlow * serviceFlow) [member function]
cls.add_method('SetServiceFlow',
'void',
[param('ns3::ServiceFlow *', 'serviceFlow')])
## ul-job.h (module 'wimax'): void ns3::UlJob::SetSize(uint32_t size) [member function]
cls.add_method('SetSize',
'void',
[param('uint32_t', 'size')])
## ul-job.h (module 'wimax'): void ns3::UlJob::SetSsRecord(ns3::SSRecord * ssRecord) [member function]
cls.add_method('SetSsRecord',
'void',
[param('ns3::SSRecord *', 'ssRecord')])
## ul-job.h (module 'wimax'): void ns3::UlJob::SetType(ns3::ReqType type) [member function]
cls.add_method('SetType',
'void',
[param('ns3::ReqType', 'type')])
return
def register_Ns3UlMap_methods(root_module, cls):
## ul-mac-messages.h (module 'wimax'): ns3::UlMap::UlMap(ns3::UlMap const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UlMap const &', 'arg0')])
## ul-mac-messages.h (module 'wimax'): ns3::UlMap::UlMap() [constructor]
cls.add_constructor([])
## ul-mac-messages.h (module 'wimax'): void ns3::UlMap::AddUlMapElement(ns3::OfdmUlMapIe ulMapElement) [member function]
cls.add_method('AddUlMapElement',
'void',
[param('ns3::OfdmUlMapIe', 'ulMapElement')])
## ul-mac-messages.h (module 'wimax'): uint32_t ns3::UlMap::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## ul-mac-messages.h (module 'wimax'): uint32_t ns3::UlMap::GetAllocationStartTime() const [member function]
cls.add_method('GetAllocationStartTime',
'uint32_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): ns3::TypeId ns3::UlMap::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## ul-mac-messages.h (module 'wimax'): std::string ns3::UlMap::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): uint32_t ns3::UlMap::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## ul-mac-messages.h (module 'wimax'): static ns3::TypeId ns3::UlMap::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ul-mac-messages.h (module 'wimax'): uint8_t ns3::UlMap::GetUcdCount() const [member function]
cls.add_method('GetUcdCount',
'uint8_t',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): std::list<ns3::OfdmUlMapIe, std::allocator<ns3::OfdmUlMapIe> > ns3::UlMap::GetUlMapElements() const [member function]
cls.add_method('GetUlMapElements',
'std::list< ns3::OfdmUlMapIe >',
[],
is_const=True)
## ul-mac-messages.h (module 'wimax'): void ns3::UlMap::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## ul-mac-messages.h (module 'wimax'): void ns3::UlMap::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## ul-mac-messages.h (module 'wimax'): void ns3::UlMap::SetAllocationStartTime(uint32_t allocationStartTime) [member function]
cls.add_method('SetAllocationStartTime',
'void',
[param('uint32_t', 'allocationStartTime')])
## ul-mac-messages.h (module 'wimax'): void ns3::UlMap::SetUcdCount(uint8_t ucdCount) [member function]
cls.add_method('SetUcdCount',
'void',
[param('uint8_t', 'ucdCount')])
return
def register_Ns3UniformRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::UniformRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable::UniformRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue(double min, double max) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'min'), param('double', 'max')])
## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger(uint32_t min, uint32_t max) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'min'), param('uint32_t', 'max')])
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3UplinkScheduler_methods(root_module, cls):
## bs-uplink-scheduler.h (module 'wimax'): ns3::UplinkScheduler::UplinkScheduler(ns3::UplinkScheduler const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UplinkScheduler const &', 'arg0')])
## bs-uplink-scheduler.h (module 'wimax'): ns3::UplinkScheduler::UplinkScheduler() [constructor]
cls.add_constructor([])
## bs-uplink-scheduler.h (module 'wimax'): ns3::UplinkScheduler::UplinkScheduler(ns3::Ptr<ns3::BaseStationNetDevice> bs) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::BaseStationNetDevice >', 'bs')])
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::AddUplinkAllocation(ns3::OfdmUlMapIe & ulMapIe, uint32_t const & allocationSize, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('AddUplinkAllocation',
'void',
[param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('uint32_t const &', 'allocationSize'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_pure_virtual=True, is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::AllocateInitialRangingInterval(uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('AllocateInitialRangingInterval',
'void',
[param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_pure_virtual=True, is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): uint32_t ns3::UplinkScheduler::CalculateAllocationStartTime() [member function]
cls.add_method('CalculateAllocationStartTime',
'uint32_t',
[],
is_pure_virtual=True, is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): ns3::Ptr<ns3::BaseStationNetDevice> ns3::UplinkScheduler::GetBs() [member function]
cls.add_method('GetBs',
'ns3::Ptr< ns3::BaseStationNetDevice >',
[],
is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::GetChannelDescriptorsToUpdate(bool & arg0, bool & arg1, bool & arg2, bool & arg3) [member function]
cls.add_method('GetChannelDescriptorsToUpdate',
'void',
[param('bool &', 'arg0'), param('bool &', 'arg1'), param('bool &', 'arg2'), param('bool &', 'arg3')],
is_pure_virtual=True, is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): ns3::Time ns3::UplinkScheduler::GetDcdTimeStamp() const [member function]
cls.add_method('GetDcdTimeStamp',
'ns3::Time',
[],
is_const=True, is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): bool ns3::UplinkScheduler::GetIsInvIrIntrvlAllocated() const [member function]
cls.add_method('GetIsInvIrIntrvlAllocated',
'bool',
[],
is_const=True, is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): bool ns3::UplinkScheduler::GetIsIrIntrvlAllocated() const [member function]
cls.add_method('GetIsIrIntrvlAllocated',
'bool',
[],
is_const=True, is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): uint8_t ns3::UplinkScheduler::GetNrIrOppsAllocated() const [member function]
cls.add_method('GetNrIrOppsAllocated',
'uint8_t',
[],
is_const=True, is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): ns3::Time ns3::UplinkScheduler::GetTimeStampIrInterval() [member function]
cls.add_method('GetTimeStampIrInterval',
'ns3::Time',
[],
is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): static ns3::TypeId ns3::UplinkScheduler::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bs-uplink-scheduler.h (module 'wimax'): ns3::Time ns3::UplinkScheduler::GetUcdTimeStamp() const [member function]
cls.add_method('GetUcdTimeStamp',
'ns3::Time',
[],
is_const=True, is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): std::list<ns3::OfdmUlMapIe, std::allocator<ns3::OfdmUlMapIe> > ns3::UplinkScheduler::GetUplinkAllocations() const [member function]
cls.add_method('GetUplinkAllocations',
'std::list< ns3::OfdmUlMapIe >',
[],
is_const=True, is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::InitOnce() [member function]
cls.add_method('InitOnce',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::OnSetRequestedBandwidth(ns3::ServiceFlowRecord * sfr) [member function]
cls.add_method('OnSetRequestedBandwidth',
'void',
[param('ns3::ServiceFlowRecord *', 'sfr')],
is_pure_virtual=True, is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::ProcessBandwidthRequest(ns3::BandwidthRequestHeader const & bwRequestHdr) [member function]
cls.add_method('ProcessBandwidthRequest',
'void',
[param('ns3::BandwidthRequestHeader const &', 'bwRequestHdr')],
is_pure_virtual=True, is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::Schedule() [member function]
cls.add_method('Schedule',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::ServiceBandwidthRequests(ns3::SSRecord const * ssRecord, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ServiceBandwidthRequests',
'void',
[param('ns3::SSRecord const *', 'ssRecord'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_pure_virtual=True, is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): bool ns3::UplinkScheduler::ServiceBandwidthRequests(ns3::ServiceFlow * serviceFlow, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ServiceBandwidthRequests',
'bool',
[param('ns3::ServiceFlow *', 'serviceFlow'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_pure_virtual=True, is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::ServiceUnsolicitedGrants(ns3::SSRecord const * ssRecord, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ServiceUnsolicitedGrants',
'void',
[param('ns3::SSRecord const *', 'ssRecord'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_pure_virtual=True, is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::SetBs(ns3::Ptr<ns3::BaseStationNetDevice> bs) [member function]
cls.add_method('SetBs',
'void',
[param('ns3::Ptr< ns3::BaseStationNetDevice >', 'bs')],
is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::SetDcdTimeStamp(ns3::Time dcdTimeStamp) [member function]
cls.add_method('SetDcdTimeStamp',
'void',
[param('ns3::Time', 'dcdTimeStamp')],
is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::SetIsInvIrIntrvlAllocated(bool isInvIrIntrvlAllocated) [member function]
cls.add_method('SetIsInvIrIntrvlAllocated',
'void',
[param('bool', 'isInvIrIntrvlAllocated')],
is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::SetIsIrIntrvlAllocated(bool isIrIntrvlAllocated) [member function]
cls.add_method('SetIsIrIntrvlAllocated',
'void',
[param('bool', 'isIrIntrvlAllocated')],
is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::SetNrIrOppsAllocated(uint8_t nrIrOppsAllocated) [member function]
cls.add_method('SetNrIrOppsAllocated',
'void',
[param('uint8_t', 'nrIrOppsAllocated')],
is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::SetTimeStampIrInterval(ns3::Time timeStampIrInterval) [member function]
cls.add_method('SetTimeStampIrInterval',
'void',
[param('ns3::Time', 'timeStampIrInterval')],
is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::SetUcdTimeStamp(ns3::Time ucdTimeStamp) [member function]
cls.add_method('SetUcdTimeStamp',
'void',
[param('ns3::Time', 'ucdTimeStamp')],
is_virtual=True)
## bs-uplink-scheduler.h (module 'wimax'): void ns3::UplinkScheduler::SetupServiceFlow(ns3::SSRecord * ssRecord, ns3::ServiceFlow * serviceFlow) [member function]
cls.add_method('SetupServiceFlow',
'void',
[param('ns3::SSRecord *', 'ssRecord'), param('ns3::ServiceFlow *', 'serviceFlow')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3UplinkSchedulerMBQoS_methods(root_module, cls):
## bs-uplink-scheduler-mbqos.h (module 'wimax'): ns3::UplinkSchedulerMBQoS::UplinkSchedulerMBQoS(ns3::UplinkSchedulerMBQoS const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UplinkSchedulerMBQoS const &', 'arg0')])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): ns3::UplinkSchedulerMBQoS::UplinkSchedulerMBQoS() [constructor]
cls.add_constructor([])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): ns3::UplinkSchedulerMBQoS::UplinkSchedulerMBQoS(ns3::Time time) [constructor]
cls.add_constructor([param('ns3::Time', 'time')])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::AddUplinkAllocation(ns3::OfdmUlMapIe & ulMapIe, uint32_t const & allocationSize, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('AddUplinkAllocation',
'void',
[param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('uint32_t const &', 'allocationSize'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::AllocateInitialRangingInterval(uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('AllocateInitialRangingInterval',
'void',
[param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): uint32_t ns3::UplinkSchedulerMBQoS::CalculateAllocationStartTime() [member function]
cls.add_method('CalculateAllocationStartTime',
'uint32_t',
[],
is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::CheckDeadline(uint32_t & availableSymbols) [member function]
cls.add_method('CheckDeadline',
'void',
[param('uint32_t &', 'availableSymbols')])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::CheckMinimumBandwidth(uint32_t & availableSymbols) [member function]
cls.add_method('CheckMinimumBandwidth',
'void',
[param('uint32_t &', 'availableSymbols')])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): uint32_t ns3::UplinkSchedulerMBQoS::CountSymbolsJobs(ns3::Ptr<ns3::UlJob> job) [member function]
cls.add_method('CountSymbolsJobs',
'uint32_t',
[param('ns3::Ptr< ns3::UlJob >', 'job')])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): uint32_t ns3::UplinkSchedulerMBQoS::CountSymbolsQueue(std::list<ns3::Ptr<ns3::UlJob>, std::allocator<ns3::Ptr<ns3::UlJob> > > jobs) [member function]
cls.add_method('CountSymbolsQueue',
'uint32_t',
[param('std::list< ns3::Ptr< ns3::UlJob > >', 'jobs')])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): ns3::Ptr<ns3::UlJob> ns3::UplinkSchedulerMBQoS::CreateUlJob(ns3::SSRecord * ssRecord, ns3::ServiceFlow::SchedulingType schedType, ns3::ReqType reqType) [member function]
cls.add_method('CreateUlJob',
'ns3::Ptr< ns3::UlJob >',
[param('ns3::SSRecord *', 'ssRecord'), param('ns3::ServiceFlow::SchedulingType', 'schedType'), param('ns3::ReqType', 'reqType')])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): ns3::Ptr<ns3::UlJob> ns3::UplinkSchedulerMBQoS::DequeueJob(ns3::UlJob::JobPriority priority) [member function]
cls.add_method('DequeueJob',
'ns3::Ptr< ns3::UlJob >',
[param('ns3::UlJob::JobPriority', 'priority')])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): ns3::Time ns3::UplinkSchedulerMBQoS::DetermineDeadline(ns3::ServiceFlow * serviceFlow) [member function]
cls.add_method('DetermineDeadline',
'ns3::Time',
[param('ns3::ServiceFlow *', 'serviceFlow')])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::EnqueueJob(ns3::UlJob::JobPriority priority, ns3::Ptr<ns3::UlJob> job) [member function]
cls.add_method('EnqueueJob',
'void',
[param('ns3::UlJob::JobPriority', 'priority'), param('ns3::Ptr< ns3::UlJob >', 'job')])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::GetChannelDescriptorsToUpdate(bool & arg0, bool & arg1, bool & arg2, bool & arg3) [member function]
cls.add_method('GetChannelDescriptorsToUpdate',
'void',
[param('bool &', 'arg0'), param('bool &', 'arg1'), param('bool &', 'arg2'), param('bool &', 'arg3')],
is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): uint32_t ns3::UplinkSchedulerMBQoS::GetPendingSize(ns3::ServiceFlow * serviceFlow) [member function]
cls.add_method('GetPendingSize',
'uint32_t',
[param('ns3::ServiceFlow *', 'serviceFlow')])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): static ns3::TypeId ns3::UplinkSchedulerMBQoS::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): std::list<ns3::OfdmUlMapIe, std::allocator<ns3::OfdmUlMapIe> > ns3::UplinkSchedulerMBQoS::GetUplinkAllocations() const [member function]
cls.add_method('GetUplinkAllocations',
'std::list< ns3::OfdmUlMapIe >',
[],
is_const=True, is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::InitOnce() [member function]
cls.add_method('InitOnce',
'void',
[],
is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::OnSetRequestedBandwidth(ns3::ServiceFlowRecord * sfr) [member function]
cls.add_method('OnSetRequestedBandwidth',
'void',
[param('ns3::ServiceFlowRecord *', 'sfr')],
is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::ProcessBandwidthRequest(ns3::BandwidthRequestHeader const & bwRequestHdr) [member function]
cls.add_method('ProcessBandwidthRequest',
'void',
[param('ns3::BandwidthRequestHeader const &', 'bwRequestHdr')],
is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::Schedule() [member function]
cls.add_method('Schedule',
'void',
[],
is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::ServiceBandwidthRequests(ns3::SSRecord const * ssRecord, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ServiceBandwidthRequests',
'void',
[param('ns3::SSRecord const *', 'ssRecord'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): bool ns3::UplinkSchedulerMBQoS::ServiceBandwidthRequests(ns3::ServiceFlow * serviceFlow, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ServiceBandwidthRequests',
'bool',
[param('ns3::ServiceFlow *', 'serviceFlow'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): bool ns3::UplinkSchedulerMBQoS::ServiceBandwidthRequestsBytes(ns3::ServiceFlow * serviceFlow, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols, uint32_t allocationSizeBytes) [member function]
cls.add_method('ServiceBandwidthRequestsBytes',
'bool',
[param('ns3::ServiceFlow *', 'serviceFlow'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols'), param('uint32_t', 'allocationSizeBytes')])
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::ServiceUnsolicitedGrants(ns3::SSRecord const * ssRecord, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ServiceUnsolicitedGrants',
'void',
[param('ns3::SSRecord const *', 'ssRecord'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::SetupServiceFlow(ns3::SSRecord * ssRecord, ns3::ServiceFlow * serviceFlow) [member function]
cls.add_method('SetupServiceFlow',
'void',
[param('ns3::SSRecord *', 'ssRecord'), param('ns3::ServiceFlow *', 'serviceFlow')],
is_virtual=True)
## bs-uplink-scheduler-mbqos.h (module 'wimax'): void ns3::UplinkSchedulerMBQoS::UplinkSchedWindowTimer() [member function]
cls.add_method('UplinkSchedWindowTimer',
'void',
[])
return
def register_Ns3UplinkSchedulerRtps_methods(root_module, cls):
## bs-uplink-scheduler-rtps.h (module 'wimax'): ns3::UplinkSchedulerRtps::UplinkSchedulerRtps(ns3::UplinkSchedulerRtps const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UplinkSchedulerRtps const &', 'arg0')])
## bs-uplink-scheduler-rtps.h (module 'wimax'): ns3::UplinkSchedulerRtps::UplinkSchedulerRtps() [constructor]
cls.add_constructor([])
## bs-uplink-scheduler-rtps.h (module 'wimax'): ns3::UplinkSchedulerRtps::UplinkSchedulerRtps(ns3::Ptr<ns3::BaseStationNetDevice> bs) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::BaseStationNetDevice >', 'bs')])
## bs-uplink-scheduler-rtps.h (module 'wimax'): void ns3::UplinkSchedulerRtps::AddUplinkAllocation(ns3::OfdmUlMapIe & ulMapIe, uint32_t const & allocationSize, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('AddUplinkAllocation',
'void',
[param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('uint32_t const &', 'allocationSize'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): void ns3::UplinkSchedulerRtps::AllocateInitialRangingInterval(uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('AllocateInitialRangingInterval',
'void',
[param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): uint32_t ns3::UplinkSchedulerRtps::CalculateAllocationStartTime() [member function]
cls.add_method('CalculateAllocationStartTime',
'uint32_t',
[],
is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): void ns3::UplinkSchedulerRtps::GetChannelDescriptorsToUpdate(bool & arg0, bool & arg1, bool & arg2, bool & arg3) [member function]
cls.add_method('GetChannelDescriptorsToUpdate',
'void',
[param('bool &', 'arg0'), param('bool &', 'arg1'), param('bool &', 'arg2'), param('bool &', 'arg3')],
is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): static ns3::TypeId ns3::UplinkSchedulerRtps::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): std::list<ns3::OfdmUlMapIe, std::allocator<ns3::OfdmUlMapIe> > ns3::UplinkSchedulerRtps::GetUplinkAllocations() const [member function]
cls.add_method('GetUplinkAllocations',
'std::list< ns3::OfdmUlMapIe >',
[],
is_const=True, is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): void ns3::UplinkSchedulerRtps::InitOnce() [member function]
cls.add_method('InitOnce',
'void',
[],
is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): void ns3::UplinkSchedulerRtps::OnSetRequestedBandwidth(ns3::ServiceFlowRecord * sfr) [member function]
cls.add_method('OnSetRequestedBandwidth',
'void',
[param('ns3::ServiceFlowRecord *', 'sfr')],
is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): void ns3::UplinkSchedulerRtps::ProcessBandwidthRequest(ns3::BandwidthRequestHeader const & bwRequestHdr) [member function]
cls.add_method('ProcessBandwidthRequest',
'void',
[param('ns3::BandwidthRequestHeader const &', 'bwRequestHdr')],
is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): void ns3::UplinkSchedulerRtps::Schedule() [member function]
cls.add_method('Schedule',
'void',
[],
is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): void ns3::UplinkSchedulerRtps::ServiceBandwidthRequests(ns3::SSRecord const * ssRecord, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ServiceBandwidthRequests',
'void',
[param('ns3::SSRecord const *', 'ssRecord'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): bool ns3::UplinkSchedulerRtps::ServiceBandwidthRequests(ns3::ServiceFlow * serviceFlow, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ServiceBandwidthRequests',
'bool',
[param('ns3::ServiceFlow *', 'serviceFlow'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): void ns3::UplinkSchedulerRtps::ServiceUnsolicitedGrants(ns3::SSRecord const * ssRecord, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ServiceUnsolicitedGrants',
'void',
[param('ns3::SSRecord const *', 'ssRecord'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): void ns3::UplinkSchedulerRtps::SetupServiceFlow(ns3::SSRecord * ssRecord, ns3::ServiceFlow * serviceFlow) [member function]
cls.add_method('SetupServiceFlow',
'void',
[param('ns3::SSRecord *', 'ssRecord'), param('ns3::ServiceFlow *', 'serviceFlow')],
is_virtual=True)
## bs-uplink-scheduler-rtps.h (module 'wimax'): void ns3::UplinkSchedulerRtps::ULSchedulerRTPSConnection(uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ULSchedulerRTPSConnection',
'void',
[param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')])
return
def register_Ns3UplinkSchedulerSimple_methods(root_module, cls):
## bs-uplink-scheduler-simple.h (module 'wimax'): ns3::UplinkSchedulerSimple::UplinkSchedulerSimple(ns3::UplinkSchedulerSimple const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UplinkSchedulerSimple const &', 'arg0')])
## bs-uplink-scheduler-simple.h (module 'wimax'): ns3::UplinkSchedulerSimple::UplinkSchedulerSimple() [constructor]
cls.add_constructor([])
## bs-uplink-scheduler-simple.h (module 'wimax'): ns3::UplinkSchedulerSimple::UplinkSchedulerSimple(ns3::Ptr<ns3::BaseStationNetDevice> bs) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::BaseStationNetDevice >', 'bs')])
## bs-uplink-scheduler-simple.h (module 'wimax'): void ns3::UplinkSchedulerSimple::AddUplinkAllocation(ns3::OfdmUlMapIe & ulMapIe, uint32_t const & allocationSize, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('AddUplinkAllocation',
'void',
[param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('uint32_t const &', 'allocationSize'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): void ns3::UplinkSchedulerSimple::AllocateInitialRangingInterval(uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('AllocateInitialRangingInterval',
'void',
[param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): uint32_t ns3::UplinkSchedulerSimple::CalculateAllocationStartTime() [member function]
cls.add_method('CalculateAllocationStartTime',
'uint32_t',
[],
is_virtual=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): void ns3::UplinkSchedulerSimple::GetChannelDescriptorsToUpdate(bool & arg0, bool & arg1, bool & arg2, bool & arg3) [member function]
cls.add_method('GetChannelDescriptorsToUpdate',
'void',
[param('bool &', 'arg0'), param('bool &', 'arg1'), param('bool &', 'arg2'), param('bool &', 'arg3')],
is_virtual=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): static ns3::TypeId ns3::UplinkSchedulerSimple::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): std::list<ns3::OfdmUlMapIe, std::allocator<ns3::OfdmUlMapIe> > ns3::UplinkSchedulerSimple::GetUplinkAllocations() const [member function]
cls.add_method('GetUplinkAllocations',
'std::list< ns3::OfdmUlMapIe >',
[],
is_const=True, is_virtual=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): void ns3::UplinkSchedulerSimple::InitOnce() [member function]
cls.add_method('InitOnce',
'void',
[],
is_virtual=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): void ns3::UplinkSchedulerSimple::OnSetRequestedBandwidth(ns3::ServiceFlowRecord * sfr) [member function]
cls.add_method('OnSetRequestedBandwidth',
'void',
[param('ns3::ServiceFlowRecord *', 'sfr')],
is_virtual=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): void ns3::UplinkSchedulerSimple::ProcessBandwidthRequest(ns3::BandwidthRequestHeader const & bwRequestHdr) [member function]
cls.add_method('ProcessBandwidthRequest',
'void',
[param('ns3::BandwidthRequestHeader const &', 'bwRequestHdr')],
is_virtual=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): void ns3::UplinkSchedulerSimple::Schedule() [member function]
cls.add_method('Schedule',
'void',
[],
is_virtual=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): void ns3::UplinkSchedulerSimple::ServiceBandwidthRequests(ns3::SSRecord const * ssRecord, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ServiceBandwidthRequests',
'void',
[param('ns3::SSRecord const *', 'ssRecord'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): bool ns3::UplinkSchedulerSimple::ServiceBandwidthRequests(ns3::ServiceFlow * serviceFlow, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ServiceBandwidthRequests',
'bool',
[param('ns3::ServiceFlow *', 'serviceFlow'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): void ns3::UplinkSchedulerSimple::ServiceUnsolicitedGrants(ns3::SSRecord const * ssRecord, ns3::ServiceFlow::SchedulingType schedulingType, ns3::OfdmUlMapIe & ulMapIe, ns3::WimaxPhy::ModulationType const modulationType, uint32_t & symbolsToAllocation, uint32_t & availableSymbols) [member function]
cls.add_method('ServiceUnsolicitedGrants',
'void',
[param('ns3::SSRecord const *', 'ssRecord'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType'), param('ns3::OfdmUlMapIe &', 'ulMapIe'), param('ns3::WimaxPhy::ModulationType const', 'modulationType'), param('uint32_t &', 'symbolsToAllocation'), param('uint32_t &', 'availableSymbols')],
is_virtual=True)
## bs-uplink-scheduler-simple.h (module 'wimax'): void ns3::UplinkSchedulerSimple::SetupServiceFlow(ns3::SSRecord * ssRecord, ns3::ServiceFlow * serviceFlow) [member function]
cls.add_method('SetupServiceFlow',
'void',
[param('ns3::SSRecord *', 'ssRecord'), param('ns3::ServiceFlow *', 'serviceFlow')],
is_virtual=True)
return
def register_Ns3WeibullRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::WeibullRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable::WeibullRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetScale() const [member function]
cls.add_method('GetScale',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetShape() const [member function]
cls.add_method('GetShape',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue(double scale, double shape, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'scale'), param('double', 'shape'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger(uint32_t scale, uint32_t shape, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'scale'), param('uint32_t', 'shape'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3WimaxConnection_methods(root_module, cls):
## wimax-connection.h (module 'wimax'): ns3::WimaxConnection::WimaxConnection(ns3::WimaxConnection const & arg0) [copy constructor]
cls.add_constructor([param('ns3::WimaxConnection const &', 'arg0')])
## wimax-connection.h (module 'wimax'): ns3::WimaxConnection::WimaxConnection(ns3::Cid cid, ns3::Cid::Type type) [constructor]
cls.add_constructor([param('ns3::Cid', 'cid'), param('ns3::Cid::Type', 'type')])
## wimax-connection.h (module 'wimax'): void ns3::WimaxConnection::ClearFragmentsQueue() [member function]
cls.add_method('ClearFragmentsQueue',
'void',
[])
## wimax-connection.h (module 'wimax'): ns3::Ptr<ns3::Packet> ns3::WimaxConnection::Dequeue(ns3::MacHeaderType::HeaderType packetType=::ns3::MacHeaderType::HEADER_TYPE_GENERIC) [member function]
cls.add_method('Dequeue',
'ns3::Ptr< ns3::Packet >',
[param('ns3::MacHeaderType::HeaderType', 'packetType', default_value='::ns3::MacHeaderType::HEADER_TYPE_GENERIC')])
## wimax-connection.h (module 'wimax'): ns3::Ptr<ns3::Packet> ns3::WimaxConnection::Dequeue(ns3::MacHeaderType::HeaderType packetType, uint32_t availableByte) [member function]
cls.add_method('Dequeue',
'ns3::Ptr< ns3::Packet >',
[param('ns3::MacHeaderType::HeaderType', 'packetType'), param('uint32_t', 'availableByte')])
## wimax-connection.h (module 'wimax'): bool ns3::WimaxConnection::Enqueue(ns3::Ptr<ns3::Packet> packet, ns3::MacHeaderType const & hdrType, ns3::GenericMacHeader const & hdr) [member function]
cls.add_method('Enqueue',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::MacHeaderType const &', 'hdrType'), param('ns3::GenericMacHeader const &', 'hdr')])
## wimax-connection.h (module 'wimax'): void ns3::WimaxConnection::FragmentEnqueue(ns3::Ptr<const ns3::Packet> fragment) [member function]
cls.add_method('FragmentEnqueue',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'fragment')])
## wimax-connection.h (module 'wimax'): ns3::Cid ns3::WimaxConnection::GetCid() const [member function]
cls.add_method('GetCid',
'ns3::Cid',
[],
is_const=True)
## wimax-connection.h (module 'wimax'): std::list<ns3::Ptr<ns3::Packet const>, std::allocator<ns3::Ptr<ns3::Packet const> > > const ns3::WimaxConnection::GetFragmentsQueue() const [member function]
cls.add_method('GetFragmentsQueue',
'std::list< ns3::Ptr< ns3::Packet const > > const',
[],
is_const=True)
## wimax-connection.h (module 'wimax'): ns3::Ptr<ns3::WimaxMacQueue> ns3::WimaxConnection::GetQueue() const [member function]
cls.add_method('GetQueue',
'ns3::Ptr< ns3::WimaxMacQueue >',
[],
is_const=True)
## wimax-connection.h (module 'wimax'): uint8_t ns3::WimaxConnection::GetSchedulingType() const [member function]
cls.add_method('GetSchedulingType',
'uint8_t',
[],
is_const=True)
## wimax-connection.h (module 'wimax'): ns3::ServiceFlow * ns3::WimaxConnection::GetServiceFlow() const [member function]
cls.add_method('GetServiceFlow',
'ns3::ServiceFlow *',
[],
is_const=True)
## wimax-connection.h (module 'wimax'): ns3::Cid::Type ns3::WimaxConnection::GetType() const [member function]
cls.add_method('GetType',
'ns3::Cid::Type',
[],
is_const=True)
## wimax-connection.h (module 'wimax'): static ns3::TypeId ns3::WimaxConnection::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## wimax-connection.h (module 'wimax'): std::string ns3::WimaxConnection::GetTypeStr() const [member function]
cls.add_method('GetTypeStr',
'std::string',
[],
is_const=True)
## wimax-connection.h (module 'wimax'): bool ns3::WimaxConnection::HasPackets() const [member function]
cls.add_method('HasPackets',
'bool',
[],
is_const=True)
## wimax-connection.h (module 'wimax'): bool ns3::WimaxConnection::HasPackets(ns3::MacHeaderType::HeaderType packetType) const [member function]
cls.add_method('HasPackets',
'bool',
[param('ns3::MacHeaderType::HeaderType', 'packetType')],
is_const=True)
## wimax-connection.h (module 'wimax'): void ns3::WimaxConnection::SetServiceFlow(ns3::ServiceFlow * serviceFlow) [member function]
cls.add_method('SetServiceFlow',
'void',
[param('ns3::ServiceFlow *', 'serviceFlow')])
## wimax-connection.h (module 'wimax'): void ns3::WimaxConnection::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3WimaxMacQueue_methods(root_module, cls):
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::WimaxMacQueue(ns3::WimaxMacQueue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::WimaxMacQueue const &', 'arg0')])
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::WimaxMacQueue() [constructor]
cls.add_constructor([])
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::WimaxMacQueue(uint32_t maxSize) [constructor]
cls.add_constructor([param('uint32_t', 'maxSize')])
## wimax-mac-queue.h (module 'wimax'): bool ns3::WimaxMacQueue::CheckForFragmentation(ns3::MacHeaderType::HeaderType packetType) [member function]
cls.add_method('CheckForFragmentation',
'bool',
[param('ns3::MacHeaderType::HeaderType', 'packetType')])
## wimax-mac-queue.h (module 'wimax'): ns3::Ptr<ns3::Packet> ns3::WimaxMacQueue::Dequeue(ns3::MacHeaderType::HeaderType packetType) [member function]
cls.add_method('Dequeue',
'ns3::Ptr< ns3::Packet >',
[param('ns3::MacHeaderType::HeaderType', 'packetType')])
## wimax-mac-queue.h (module 'wimax'): ns3::Ptr<ns3::Packet> ns3::WimaxMacQueue::Dequeue(ns3::MacHeaderType::HeaderType packetType, uint32_t availableByte) [member function]
cls.add_method('Dequeue',
'ns3::Ptr< ns3::Packet >',
[param('ns3::MacHeaderType::HeaderType', 'packetType'), param('uint32_t', 'availableByte')])
## wimax-mac-queue.h (module 'wimax'): bool ns3::WimaxMacQueue::Enqueue(ns3::Ptr<ns3::Packet> packet, ns3::MacHeaderType const & hdrType, ns3::GenericMacHeader const & hdr) [member function]
cls.add_method('Enqueue',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::MacHeaderType const &', 'hdrType'), param('ns3::GenericMacHeader const &', 'hdr')])
## wimax-mac-queue.h (module 'wimax'): uint32_t ns3::WimaxMacQueue::GetFirstPacketHdrSize(ns3::MacHeaderType::HeaderType packetType) [member function]
cls.add_method('GetFirstPacketHdrSize',
'uint32_t',
[param('ns3::MacHeaderType::HeaderType', 'packetType')])
## wimax-mac-queue.h (module 'wimax'): uint32_t ns3::WimaxMacQueue::GetFirstPacketPayloadSize(ns3::MacHeaderType::HeaderType packetType) [member function]
cls.add_method('GetFirstPacketPayloadSize',
'uint32_t',
[param('ns3::MacHeaderType::HeaderType', 'packetType')])
## wimax-mac-queue.h (module 'wimax'): uint32_t ns3::WimaxMacQueue::GetFirstPacketRequiredByte(ns3::MacHeaderType::HeaderType packetType) [member function]
cls.add_method('GetFirstPacketRequiredByte',
'uint32_t',
[param('ns3::MacHeaderType::HeaderType', 'packetType')])
## wimax-mac-queue.h (module 'wimax'): uint32_t ns3::WimaxMacQueue::GetMaxSize() const [member function]
cls.add_method('GetMaxSize',
'uint32_t',
[],
is_const=True)
## wimax-mac-queue.h (module 'wimax'): uint32_t ns3::WimaxMacQueue::GetNBytes() const [member function]
cls.add_method('GetNBytes',
'uint32_t',
[],
is_const=True)
## wimax-mac-queue.h (module 'wimax'): std::deque<ns3::WimaxMacQueue::QueueElement, std::allocator<ns3::WimaxMacQueue::QueueElement> > const & ns3::WimaxMacQueue::GetPacketQueue() const [member function]
cls.add_method('GetPacketQueue',
'std::deque< ns3::WimaxMacQueue::QueueElement > const &',
[],
is_const=True)
## wimax-mac-queue.h (module 'wimax'): uint32_t ns3::WimaxMacQueue::GetQueueLengthWithMACOverhead() [member function]
cls.add_method('GetQueueLengthWithMACOverhead',
'uint32_t',
[])
## wimax-mac-queue.h (module 'wimax'): uint32_t ns3::WimaxMacQueue::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## wimax-mac-queue.h (module 'wimax'): static ns3::TypeId ns3::WimaxMacQueue::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## wimax-mac-queue.h (module 'wimax'): bool ns3::WimaxMacQueue::IsEmpty() const [member function]
cls.add_method('IsEmpty',
'bool',
[],
is_const=True)
## wimax-mac-queue.h (module 'wimax'): bool ns3::WimaxMacQueue::IsEmpty(ns3::MacHeaderType::HeaderType packetType) const [member function]
cls.add_method('IsEmpty',
'bool',
[param('ns3::MacHeaderType::HeaderType', 'packetType')],
is_const=True)
## wimax-mac-queue.h (module 'wimax'): ns3::Ptr<ns3::Packet> ns3::WimaxMacQueue::Peek(ns3::GenericMacHeader & hdr) const [member function]
cls.add_method('Peek',
'ns3::Ptr< ns3::Packet >',
[param('ns3::GenericMacHeader &', 'hdr')],
is_const=True)
## wimax-mac-queue.h (module 'wimax'): ns3::Ptr<ns3::Packet> ns3::WimaxMacQueue::Peek(ns3::GenericMacHeader & hdr, ns3::Time & timeStamp) const [member function]
cls.add_method('Peek',
'ns3::Ptr< ns3::Packet >',
[param('ns3::GenericMacHeader &', 'hdr'), param('ns3::Time &', 'timeStamp')],
is_const=True)
## wimax-mac-queue.h (module 'wimax'): ns3::Ptr<ns3::Packet> ns3::WimaxMacQueue::Peek(ns3::MacHeaderType::HeaderType packetType) const [member function]
cls.add_method('Peek',
'ns3::Ptr< ns3::Packet >',
[param('ns3::MacHeaderType::HeaderType', 'packetType')],
is_const=True)
## wimax-mac-queue.h (module 'wimax'): ns3::Ptr<ns3::Packet> ns3::WimaxMacQueue::Peek(ns3::MacHeaderType::HeaderType packetType, ns3::Time & timeStamp) const [member function]
cls.add_method('Peek',
'ns3::Ptr< ns3::Packet >',
[param('ns3::MacHeaderType::HeaderType', 'packetType'), param('ns3::Time &', 'timeStamp')],
is_const=True)
## wimax-mac-queue.h (module 'wimax'): void ns3::WimaxMacQueue::SetFragmentNumber(ns3::MacHeaderType::HeaderType packetType) [member function]
cls.add_method('SetFragmentNumber',
'void',
[param('ns3::MacHeaderType::HeaderType', 'packetType')])
## wimax-mac-queue.h (module 'wimax'): void ns3::WimaxMacQueue::SetFragmentOffset(ns3::MacHeaderType::HeaderType packetType, uint32_t offset) [member function]
cls.add_method('SetFragmentOffset',
'void',
[param('ns3::MacHeaderType::HeaderType', 'packetType'), param('uint32_t', 'offset')])
## wimax-mac-queue.h (module 'wimax'): void ns3::WimaxMacQueue::SetFragmentation(ns3::MacHeaderType::HeaderType packetType) [member function]
cls.add_method('SetFragmentation',
'void',
[param('ns3::MacHeaderType::HeaderType', 'packetType')])
## wimax-mac-queue.h (module 'wimax'): void ns3::WimaxMacQueue::SetMaxSize(uint32_t maxSize) [member function]
cls.add_method('SetMaxSize',
'void',
[param('uint32_t', 'maxSize')])
return
def register_Ns3WimaxMacQueueQueueElement_methods(root_module, cls):
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::QueueElement::QueueElement(ns3::WimaxMacQueue::QueueElement const & arg0) [copy constructor]
cls.add_constructor([param('ns3::WimaxMacQueue::QueueElement const &', 'arg0')])
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::QueueElement::QueueElement() [constructor]
cls.add_constructor([])
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::QueueElement::QueueElement(ns3::Ptr<ns3::Packet> packet, ns3::MacHeaderType const & hdrType, ns3::GenericMacHeader const & hdr, ns3::Time timeStamp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::MacHeaderType const &', 'hdrType'), param('ns3::GenericMacHeader const &', 'hdr'), param('ns3::Time', 'timeStamp')])
## wimax-mac-queue.h (module 'wimax'): uint32_t ns3::WimaxMacQueue::QueueElement::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## wimax-mac-queue.h (module 'wimax'): void ns3::WimaxMacQueue::QueueElement::SetFragmentNumber() [member function]
cls.add_method('SetFragmentNumber',
'void',
[])
## wimax-mac-queue.h (module 'wimax'): void ns3::WimaxMacQueue::QueueElement::SetFragmentOffset(uint32_t offset) [member function]
cls.add_method('SetFragmentOffset',
'void',
[param('uint32_t', 'offset')])
## wimax-mac-queue.h (module 'wimax'): void ns3::WimaxMacQueue::QueueElement::SetFragmentation() [member function]
cls.add_method('SetFragmentation',
'void',
[])
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::QueueElement::m_fragmentNumber [variable]
cls.add_instance_attribute('m_fragmentNumber', 'uint32_t', is_const=False)
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::QueueElement::m_fragmentOffset [variable]
cls.add_instance_attribute('m_fragmentOffset', 'uint32_t', is_const=False)
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::QueueElement::m_fragmentation [variable]
cls.add_instance_attribute('m_fragmentation', 'bool', is_const=False)
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::QueueElement::m_hdr [variable]
cls.add_instance_attribute('m_hdr', 'ns3::GenericMacHeader', is_const=False)
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::QueueElement::m_hdrType [variable]
cls.add_instance_attribute('m_hdrType', 'ns3::MacHeaderType', is_const=False)
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::QueueElement::m_packet [variable]
cls.add_instance_attribute('m_packet', 'ns3::Ptr< ns3::Packet >', is_const=False)
## wimax-mac-queue.h (module 'wimax'): ns3::WimaxMacQueue::QueueElement::m_timeStamp [variable]
cls.add_instance_attribute('m_timeStamp', 'ns3::Time', is_const=False)
return
def register_Ns3WimaxMacToMacHeader_methods(root_module, cls):
## wimax-mac-to-mac-header.h (module 'wimax'): ns3::WimaxMacToMacHeader::WimaxMacToMacHeader(ns3::WimaxMacToMacHeader const & arg0) [copy constructor]
cls.add_constructor([param('ns3::WimaxMacToMacHeader const &', 'arg0')])
## wimax-mac-to-mac-header.h (module 'wimax'): ns3::WimaxMacToMacHeader::WimaxMacToMacHeader() [constructor]
cls.add_constructor([])
## wimax-mac-to-mac-header.h (module 'wimax'): ns3::WimaxMacToMacHeader::WimaxMacToMacHeader(uint32_t len) [constructor]
cls.add_constructor([param('uint32_t', 'len')])
## wimax-mac-to-mac-header.h (module 'wimax'): uint32_t ns3::WimaxMacToMacHeader::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## wimax-mac-to-mac-header.h (module 'wimax'): ns3::TypeId ns3::WimaxMacToMacHeader::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## wimax-mac-to-mac-header.h (module 'wimax'): uint32_t ns3::WimaxMacToMacHeader::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-mac-to-mac-header.h (module 'wimax'): uint8_t ns3::WimaxMacToMacHeader::GetSizeOfLen() const [member function]
cls.add_method('GetSizeOfLen',
'uint8_t',
[],
is_const=True)
## wimax-mac-to-mac-header.h (module 'wimax'): static ns3::TypeId ns3::WimaxMacToMacHeader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## wimax-mac-to-mac-header.h (module 'wimax'): void ns3::WimaxMacToMacHeader::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## wimax-mac-to-mac-header.h (module 'wimax'): void ns3::WimaxMacToMacHeader::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
return
def register_Ns3WimaxPhy_methods(root_module, cls):
## wimax-phy.h (module 'wimax'): ns3::WimaxPhy::WimaxPhy(ns3::WimaxPhy const & arg0) [copy constructor]
cls.add_constructor([param('ns3::WimaxPhy const &', 'arg0')])
## wimax-phy.h (module 'wimax'): ns3::WimaxPhy::WimaxPhy() [constructor]
cls.add_constructor([])
## wimax-phy.h (module 'wimax'): int64_t ns3::WimaxPhy::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_pure_virtual=True, is_virtual=True)
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::Attach(ns3::Ptr<ns3::WimaxChannel> channel) [member function]
cls.add_method('Attach',
'void',
[param('ns3::Ptr< ns3::WimaxChannel >', 'channel')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## wimax-phy.h (module 'wimax'): ns3::Ptr<ns3::WimaxChannel> ns3::WimaxPhy::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::WimaxChannel >',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): uint32_t ns3::WimaxPhy::GetChannelBandwidth() const [member function]
cls.add_method('GetChannelBandwidth',
'uint32_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): ns3::EventId ns3::WimaxPhy::GetChnlSrchTimeoutEvent() const [member function]
cls.add_method('GetChnlSrchTimeoutEvent',
'ns3::EventId',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): uint32_t ns3::WimaxPhy::GetDataRate(ns3::WimaxPhy::ModulationType modulationType) const [member function]
cls.add_method('GetDataRate',
'uint32_t',
[param('ns3::WimaxPhy::ModulationType', 'modulationType')],
is_const=True)
## wimax-phy.h (module 'wimax'): ns3::Ptr<ns3::NetDevice> ns3::WimaxPhy::GetDevice() const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): ns3::Time ns3::WimaxPhy::GetFrameDuration() const [member function]
cls.add_method('GetFrameDuration',
'ns3::Time',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): ns3::Time ns3::WimaxPhy::GetFrameDuration(uint8_t frameDurationCode) const [member function]
cls.add_method('GetFrameDuration',
'ns3::Time',
[param('uint8_t', 'frameDurationCode')],
is_const=True)
## wimax-phy.h (module 'wimax'): uint8_t ns3::WimaxPhy::GetFrameDurationCode() const [member function]
cls.add_method('GetFrameDurationCode',
'uint8_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): ns3::Time ns3::WimaxPhy::GetFrameDurationSec() const [member function]
cls.add_method('GetFrameDurationSec',
'ns3::Time',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): uint32_t ns3::WimaxPhy::GetFrequency() const [member function]
cls.add_method('GetFrequency',
'uint32_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): double ns3::WimaxPhy::GetGValue() const [member function]
cls.add_method('GetGValue',
'double',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): ns3::Ptr<ns3::Object> ns3::WimaxPhy::GetMobility() [member function]
cls.add_method('GetMobility',
'ns3::Ptr< ns3::Object >',
[],
is_virtual=True)
## wimax-phy.h (module 'wimax'): uint16_t ns3::WimaxPhy::GetNfft() const [member function]
cls.add_method('GetNfft',
'uint16_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): uint64_t ns3::WimaxPhy::GetNrBytes(uint32_t symbols, ns3::WimaxPhy::ModulationType modulationType) const [member function]
cls.add_method('GetNrBytes',
'uint64_t',
[param('uint32_t', 'symbols'), param('ns3::WimaxPhy::ModulationType', 'modulationType')],
is_const=True)
## wimax-phy.h (module 'wimax'): uint8_t ns3::WimaxPhy::GetNrCarriers() const [member function]
cls.add_method('GetNrCarriers',
'uint8_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): uint64_t ns3::WimaxPhy::GetNrSymbols(uint32_t size, ns3::WimaxPhy::ModulationType modulationType) const [member function]
cls.add_method('GetNrSymbols',
'uint64_t',
[param('uint32_t', 'size'), param('ns3::WimaxPhy::ModulationType', 'modulationType')],
is_const=True)
## wimax-phy.h (module 'wimax'): ns3::WimaxPhy::PhyType ns3::WimaxPhy::GetPhyType() const [member function]
cls.add_method('GetPhyType',
'ns3::WimaxPhy::PhyType',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## wimax-phy.h (module 'wimax'): ns3::Time ns3::WimaxPhy::GetPsDuration() const [member function]
cls.add_method('GetPsDuration',
'ns3::Time',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): uint16_t ns3::WimaxPhy::GetPsPerFrame() const [member function]
cls.add_method('GetPsPerFrame',
'uint16_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): uint16_t ns3::WimaxPhy::GetPsPerSymbol() const [member function]
cls.add_method('GetPsPerSymbol',
'uint16_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): ns3::Callback<void, ns3::Ptr<ns3::PacketBurst const>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ns3::WimaxPhy::GetReceiveCallback() const [member function]
cls.add_method('GetReceiveCallback',
'ns3::Callback< void, ns3::Ptr< ns3::PacketBurst const >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): uint16_t ns3::WimaxPhy::GetRtg() const [member function]
cls.add_method('GetRtg',
'uint16_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): uint64_t ns3::WimaxPhy::GetRxFrequency() const [member function]
cls.add_method('GetRxFrequency',
'uint64_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): double ns3::WimaxPhy::GetSamplingFactor() const [member function]
cls.add_method('GetSamplingFactor',
'double',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): double ns3::WimaxPhy::GetSamplingFrequency() const [member function]
cls.add_method('GetSamplingFrequency',
'double',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): uint64_t ns3::WimaxPhy::GetScanningFrequency() const [member function]
cls.add_method('GetScanningFrequency',
'uint64_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): ns3::WimaxPhy::PhyState ns3::WimaxPhy::GetState() const [member function]
cls.add_method('GetState',
'ns3::WimaxPhy::PhyState',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): ns3::Time ns3::WimaxPhy::GetSymbolDuration() const [member function]
cls.add_method('GetSymbolDuration',
'ns3::Time',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): uint32_t ns3::WimaxPhy::GetSymbolsPerFrame() const [member function]
cls.add_method('GetSymbolsPerFrame',
'uint32_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): ns3::Time ns3::WimaxPhy::GetTransmissionTime(uint32_t size, ns3::WimaxPhy::ModulationType modulationType) const [member function]
cls.add_method('GetTransmissionTime',
'ns3::Time',
[param('uint32_t', 'size'), param('ns3::WimaxPhy::ModulationType', 'modulationType')],
is_const=True)
## wimax-phy.h (module 'wimax'): uint16_t ns3::WimaxPhy::GetTtg() const [member function]
cls.add_method('GetTtg',
'uint16_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): uint64_t ns3::WimaxPhy::GetTxFrequency() const [member function]
cls.add_method('GetTxFrequency',
'uint64_t',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): static ns3::TypeId ns3::WimaxPhy::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## wimax-phy.h (module 'wimax'): bool ns3::WimaxPhy::IsDuplex() const [member function]
cls.add_method('IsDuplex',
'bool',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::Send(ns3::SendParams * params) [member function]
cls.add_method('Send',
'void',
[param('ns3::SendParams *', 'params')],
is_pure_virtual=True, is_virtual=True)
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetChannelBandwidth(uint32_t channelBandwidth) [member function]
cls.add_method('SetChannelBandwidth',
'void',
[param('uint32_t', 'channelBandwidth')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetDataRates() [member function]
cls.add_method('SetDataRates',
'void',
[])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetDevice(ns3::Ptr<ns3::WimaxNetDevice> device) [member function]
cls.add_method('SetDevice',
'void',
[param('ns3::Ptr< ns3::WimaxNetDevice >', 'device')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetDuplex(uint64_t rxFrequency, uint64_t txFrequency) [member function]
cls.add_method('SetDuplex',
'void',
[param('uint64_t', 'rxFrequency'), param('uint64_t', 'txFrequency')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetFrameDuration(ns3::Time frameDuration) [member function]
cls.add_method('SetFrameDuration',
'void',
[param('ns3::Time', 'frameDuration')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetFrequency(uint32_t frequency) [member function]
cls.add_method('SetFrequency',
'void',
[param('uint32_t', 'frequency')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetMobility(ns3::Ptr<ns3::Object> mobility) [member function]
cls.add_method('SetMobility',
'void',
[param('ns3::Ptr< ns3::Object >', 'mobility')],
is_virtual=True)
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetNrCarriers(uint8_t nrCarriers) [member function]
cls.add_method('SetNrCarriers',
'void',
[param('uint8_t', 'nrCarriers')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetPhyParameters() [member function]
cls.add_method('SetPhyParameters',
'void',
[])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetPsDuration(ns3::Time psDuration) [member function]
cls.add_method('SetPsDuration',
'void',
[param('ns3::Time', 'psDuration')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetPsPerFrame(uint16_t psPerFrame) [member function]
cls.add_method('SetPsPerFrame',
'void',
[param('uint16_t', 'psPerFrame')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetPsPerSymbol(uint16_t psPerSymbol) [member function]
cls.add_method('SetPsPerSymbol',
'void',
[param('uint16_t', 'psPerSymbol')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetReceiveCallback(ns3::Callback<void, ns3::Ptr<ns3::PacketBurst const>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::PacketBurst const >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetScanningCallback() const [member function]
cls.add_method('SetScanningCallback',
'void',
[],
is_const=True)
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetSimplex(uint64_t frequency) [member function]
cls.add_method('SetSimplex',
'void',
[param('uint64_t', 'frequency')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetState(ns3::WimaxPhy::PhyState state) [member function]
cls.add_method('SetState',
'void',
[param('ns3::WimaxPhy::PhyState', 'state')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetSymbolDuration(ns3::Time symbolDuration) [member function]
cls.add_method('SetSymbolDuration',
'void',
[param('ns3::Time', 'symbolDuration')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::SetSymbolsPerFrame(uint32_t symbolsPerFrame) [member function]
cls.add_method('SetSymbolsPerFrame',
'void',
[param('uint32_t', 'symbolsPerFrame')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::StartScanning(uint64_t frequency, ns3::Time timeout, ns3::Callback<void, bool, unsigned long long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('StartScanning',
'void',
[param('uint64_t', 'frequency'), param('ns3::Time', 'timeout'), param('ns3::Callback< void, bool, unsigned long long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::DoAttach(ns3::Ptr<ns3::WimaxChannel> channel) [member function]
cls.add_method('DoAttach',
'void',
[param('ns3::Ptr< ns3::WimaxChannel >', 'channel')],
is_pure_virtual=True, visibility='private', is_virtual=True)
## wimax-phy.h (module 'wimax'): uint32_t ns3::WimaxPhy::DoGetDataRate(ns3::WimaxPhy::ModulationType modulationType) const [member function]
cls.add_method('DoGetDataRate',
'uint32_t',
[param('ns3::WimaxPhy::ModulationType', 'modulationType')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## wimax-phy.h (module 'wimax'): ns3::Time ns3::WimaxPhy::DoGetFrameDuration(uint8_t frameDurationCode) const [member function]
cls.add_method('DoGetFrameDuration',
'ns3::Time',
[param('uint8_t', 'frameDurationCode')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## wimax-phy.h (module 'wimax'): uint8_t ns3::WimaxPhy::DoGetFrameDurationCode() const [member function]
cls.add_method('DoGetFrameDurationCode',
'uint8_t',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## wimax-phy.h (module 'wimax'): double ns3::WimaxPhy::DoGetGValue() const [member function]
cls.add_method('DoGetGValue',
'double',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## wimax-phy.h (module 'wimax'): uint16_t ns3::WimaxPhy::DoGetNfft() const [member function]
cls.add_method('DoGetNfft',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## wimax-phy.h (module 'wimax'): uint64_t ns3::WimaxPhy::DoGetNrBytes(uint32_t symbols, ns3::WimaxPhy::ModulationType modulationType) const [member function]
cls.add_method('DoGetNrBytes',
'uint64_t',
[param('uint32_t', 'symbols'), param('ns3::WimaxPhy::ModulationType', 'modulationType')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## wimax-phy.h (module 'wimax'): uint64_t ns3::WimaxPhy::DoGetNrSymbols(uint32_t size, ns3::WimaxPhy::ModulationType modulationType) const [member function]
cls.add_method('DoGetNrSymbols',
'uint64_t',
[param('uint32_t', 'size'), param('ns3::WimaxPhy::ModulationType', 'modulationType')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## wimax-phy.h (module 'wimax'): uint16_t ns3::WimaxPhy::DoGetRtg() const [member function]
cls.add_method('DoGetRtg',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## wimax-phy.h (module 'wimax'): double ns3::WimaxPhy::DoGetSamplingFactor() const [member function]
cls.add_method('DoGetSamplingFactor',
'double',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## wimax-phy.h (module 'wimax'): double ns3::WimaxPhy::DoGetSamplingFrequency() const [member function]
cls.add_method('DoGetSamplingFrequency',
'double',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## wimax-phy.h (module 'wimax'): ns3::Time ns3::WimaxPhy::DoGetTransmissionTime(uint32_t size, ns3::WimaxPhy::ModulationType modulationType) const [member function]
cls.add_method('DoGetTransmissionTime',
'ns3::Time',
[param('uint32_t', 'size'), param('ns3::WimaxPhy::ModulationType', 'modulationType')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## wimax-phy.h (module 'wimax'): uint16_t ns3::WimaxPhy::DoGetTtg() const [member function]
cls.add_method('DoGetTtg',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::DoSetDataRates() [member function]
cls.add_method('DoSetDataRates',
'void',
[],
is_pure_virtual=True, visibility='private', is_virtual=True)
## wimax-phy.h (module 'wimax'): void ns3::WimaxPhy::DoSetPhyParameters() [member function]
cls.add_method('DoSetPhyParameters',
'void',
[],
is_pure_virtual=True, visibility='private', is_virtual=True)
return
def register_Ns3ZetaRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZetaRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable::ZetaRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue(double alpha) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger(uint32_t alpha) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'alpha')])
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ZipfRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZipfRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable::ZipfRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue(uint32_t n, double alpha) [member function]
cls.add_method('GetValue',
'double',
[param('uint32_t', 'n'), param('double', 'alpha')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger(uint32_t n, uint32_t alpha) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'n'), param('uint32_t', 'alpha')])
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3BSScheduler_methods(root_module, cls):
## bs-scheduler.h (module 'wimax'): ns3::BSScheduler::BSScheduler(ns3::BSScheduler const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BSScheduler const &', 'arg0')])
## bs-scheduler.h (module 'wimax'): ns3::BSScheduler::BSScheduler() [constructor]
cls.add_constructor([])
## bs-scheduler.h (module 'wimax'): ns3::BSScheduler::BSScheduler(ns3::Ptr<ns3::BaseStationNetDevice> bs) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::BaseStationNetDevice >', 'bs')])
## bs-scheduler.h (module 'wimax'): void ns3::BSScheduler::AddDownlinkBurst(ns3::Ptr<const ns3::WimaxConnection> connection, uint8_t diuc, ns3::WimaxPhy::ModulationType modulationType, ns3::Ptr<ns3::PacketBurst> burst) [member function]
cls.add_method('AddDownlinkBurst',
'void',
[param('ns3::Ptr< ns3::WimaxConnection const >', 'connection'), param('uint8_t', 'diuc'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('ns3::Ptr< ns3::PacketBurst >', 'burst')],
is_pure_virtual=True, is_virtual=True)
## bs-scheduler.h (module 'wimax'): bool ns3::BSScheduler::CheckForFragmentation(ns3::Ptr<ns3::WimaxConnection> connection, int availableSymbols, ns3::WimaxPhy::ModulationType modulationType) [member function]
cls.add_method('CheckForFragmentation',
'bool',
[param('ns3::Ptr< ns3::WimaxConnection >', 'connection'), param('int', 'availableSymbols'), param('ns3::WimaxPhy::ModulationType', 'modulationType')])
## bs-scheduler.h (module 'wimax'): ns3::Ptr<ns3::PacketBurst> ns3::BSScheduler::CreateUgsBurst(ns3::ServiceFlow * serviceFlow, ns3::WimaxPhy::ModulationType modulationType, uint32_t availableSymbols) [member function]
cls.add_method('CreateUgsBurst',
'ns3::Ptr< ns3::PacketBurst >',
[param('ns3::ServiceFlow *', 'serviceFlow'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('uint32_t', 'availableSymbols')],
is_pure_virtual=True, is_virtual=True)
## bs-scheduler.h (module 'wimax'): ns3::Ptr<ns3::BaseStationNetDevice> ns3::BSScheduler::GetBs() [member function]
cls.add_method('GetBs',
'ns3::Ptr< ns3::BaseStationNetDevice >',
[],
is_virtual=True)
## bs-scheduler.h (module 'wimax'): std::list<std::pair<ns3::OfdmDlMapIe*, ns3::Ptr<ns3::PacketBurst> >,std::allocator<std::pair<ns3::OfdmDlMapIe*, ns3::Ptr<ns3::PacketBurst> > > > * ns3::BSScheduler::GetDownlinkBursts() const [member function]
cls.add_method('GetDownlinkBursts',
'std::list< std::pair< ns3::OfdmDlMapIe *, ns3::Ptr< ns3::PacketBurst > > > *',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## bs-scheduler.h (module 'wimax'): static ns3::TypeId ns3::BSScheduler::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bs-scheduler.h (module 'wimax'): void ns3::BSScheduler::Schedule() [member function]
cls.add_method('Schedule',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## bs-scheduler.h (module 'wimax'): bool ns3::BSScheduler::SelectConnection(ns3::Ptr<ns3::WimaxConnection> & connection) [member function]
cls.add_method('SelectConnection',
'bool',
[param('ns3::Ptr< ns3::WimaxConnection > &', 'connection')],
is_pure_virtual=True, is_virtual=True)
## bs-scheduler.h (module 'wimax'): void ns3::BSScheduler::SetBs(ns3::Ptr<ns3::BaseStationNetDevice> bs) [member function]
cls.add_method('SetBs',
'void',
[param('ns3::Ptr< ns3::BaseStationNetDevice >', 'bs')],
is_virtual=True)
return
def register_Ns3BSSchedulerRtps_methods(root_module, cls):
## bs-scheduler-rtps.h (module 'wimax'): ns3::BSSchedulerRtps::BSSchedulerRtps(ns3::BSSchedulerRtps const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BSSchedulerRtps const &', 'arg0')])
## bs-scheduler-rtps.h (module 'wimax'): ns3::BSSchedulerRtps::BSSchedulerRtps() [constructor]
cls.add_constructor([])
## bs-scheduler-rtps.h (module 'wimax'): ns3::BSSchedulerRtps::BSSchedulerRtps(ns3::Ptr<ns3::BaseStationNetDevice> bs) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::BaseStationNetDevice >', 'bs')])
## bs-scheduler-rtps.h (module 'wimax'): void ns3::BSSchedulerRtps::AddDownlinkBurst(ns3::Ptr<const ns3::WimaxConnection> connection, uint8_t diuc, ns3::WimaxPhy::ModulationType modulationType, ns3::Ptr<ns3::PacketBurst> burst) [member function]
cls.add_method('AddDownlinkBurst',
'void',
[param('ns3::Ptr< ns3::WimaxConnection const >', 'connection'), param('uint8_t', 'diuc'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('ns3::Ptr< ns3::PacketBurst >', 'burst')],
is_virtual=True)
## bs-scheduler-rtps.h (module 'wimax'): void ns3::BSSchedulerRtps::BSSchedulerBEConnection(uint32_t & availableSymbols) [member function]
cls.add_method('BSSchedulerBEConnection',
'void',
[param('uint32_t &', 'availableSymbols')])
## bs-scheduler-rtps.h (module 'wimax'): void ns3::BSSchedulerRtps::BSSchedulerBasicConnection(uint32_t & availableSymbols) [member function]
cls.add_method('BSSchedulerBasicConnection',
'void',
[param('uint32_t &', 'availableSymbols')])
## bs-scheduler-rtps.h (module 'wimax'): void ns3::BSSchedulerRtps::BSSchedulerBroadcastConnection(uint32_t & availableSymbols) [member function]
cls.add_method('BSSchedulerBroadcastConnection',
'void',
[param('uint32_t &', 'availableSymbols')])
## bs-scheduler-rtps.h (module 'wimax'): void ns3::BSSchedulerRtps::BSSchedulerInitialRangingConnection(uint32_t & availableSymbols) [member function]
cls.add_method('BSSchedulerInitialRangingConnection',
'void',
[param('uint32_t &', 'availableSymbols')])
## bs-scheduler-rtps.h (module 'wimax'): void ns3::BSSchedulerRtps::BSSchedulerNRTPSConnection(uint32_t & availableSymbols) [member function]
cls.add_method('BSSchedulerNRTPSConnection',
'void',
[param('uint32_t &', 'availableSymbols')])
## bs-scheduler-rtps.h (module 'wimax'): void ns3::BSSchedulerRtps::BSSchedulerPrimaryConnection(uint32_t & availableSymbols) [member function]
cls.add_method('BSSchedulerPrimaryConnection',
'void',
[param('uint32_t &', 'availableSymbols')])
## bs-scheduler-rtps.h (module 'wimax'): void ns3::BSSchedulerRtps::BSSchedulerRTPSConnection(uint32_t & availableSymbols) [member function]
cls.add_method('BSSchedulerRTPSConnection',
'void',
[param('uint32_t &', 'availableSymbols')])
## bs-scheduler-rtps.h (module 'wimax'): void ns3::BSSchedulerRtps::BSSchedulerUGSConnection(uint32_t & availableSymbols) [member function]
cls.add_method('BSSchedulerUGSConnection',
'void',
[param('uint32_t &', 'availableSymbols')])
## bs-scheduler-rtps.h (module 'wimax'): ns3::Ptr<ns3::PacketBurst> ns3::BSSchedulerRtps::CreateUgsBurst(ns3::ServiceFlow * serviceFlow, ns3::WimaxPhy::ModulationType modulationType, uint32_t availableSymbols) [member function]
cls.add_method('CreateUgsBurst',
'ns3::Ptr< ns3::PacketBurst >',
[param('ns3::ServiceFlow *', 'serviceFlow'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('uint32_t', 'availableSymbols')],
is_virtual=True)
## bs-scheduler-rtps.h (module 'wimax'): std::list<std::pair<ns3::OfdmDlMapIe*, ns3::Ptr<ns3::PacketBurst> >,std::allocator<std::pair<ns3::OfdmDlMapIe*, ns3::Ptr<ns3::PacketBurst> > > > * ns3::BSSchedulerRtps::GetDownlinkBursts() const [member function]
cls.add_method('GetDownlinkBursts',
'std::list< std::pair< ns3::OfdmDlMapIe *, ns3::Ptr< ns3::PacketBurst > > > *',
[],
is_const=True, is_virtual=True)
## bs-scheduler-rtps.h (module 'wimax'): static ns3::TypeId ns3::BSSchedulerRtps::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bs-scheduler-rtps.h (module 'wimax'): void ns3::BSSchedulerRtps::Schedule() [member function]
cls.add_method('Schedule',
'void',
[],
is_virtual=True)
## bs-scheduler-rtps.h (module 'wimax'): bool ns3::BSSchedulerRtps::SelectBEConnection(ns3::Ptr<ns3::WimaxConnection> & connection) [member function]
cls.add_method('SelectBEConnection',
'bool',
[param('ns3::Ptr< ns3::WimaxConnection > &', 'connection')])
## bs-scheduler-rtps.h (module 'wimax'): bool ns3::BSSchedulerRtps::SelectConnection(ns3::Ptr<ns3::WimaxConnection> & connection) [member function]
cls.add_method('SelectConnection',
'bool',
[param('ns3::Ptr< ns3::WimaxConnection > &', 'connection')],
is_virtual=True)
## bs-scheduler-rtps.h (module 'wimax'): bool ns3::BSSchedulerRtps::SelectIRandBCConnection(ns3::Ptr<ns3::WimaxConnection> & connection) [member function]
cls.add_method('SelectIRandBCConnection',
'bool',
[param('ns3::Ptr< ns3::WimaxConnection > &', 'connection')])
## bs-scheduler-rtps.h (module 'wimax'): bool ns3::BSSchedulerRtps::SelectMenagementConnection(ns3::Ptr<ns3::WimaxConnection> & connection) [member function]
cls.add_method('SelectMenagementConnection',
'bool',
[param('ns3::Ptr< ns3::WimaxConnection > &', 'connection')])
## bs-scheduler-rtps.h (module 'wimax'): bool ns3::BSSchedulerRtps::SelectNRTPSConnection(ns3::Ptr<ns3::WimaxConnection> & connection) [member function]
cls.add_method('SelectNRTPSConnection',
'bool',
[param('ns3::Ptr< ns3::WimaxConnection > &', 'connection')])
## bs-scheduler-rtps.h (module 'wimax'): bool ns3::BSSchedulerRtps::SelectRTPSConnection(ns3::Ptr<ns3::WimaxConnection> & connection) [member function]
cls.add_method('SelectRTPSConnection',
'bool',
[param('ns3::Ptr< ns3::WimaxConnection > &', 'connection')])
## bs-scheduler-rtps.h (module 'wimax'): bool ns3::BSSchedulerRtps::SelectUGSConnection(ns3::Ptr<ns3::WimaxConnection> & connection) [member function]
cls.add_method('SelectUGSConnection',
'bool',
[param('ns3::Ptr< ns3::WimaxConnection > &', 'connection')])
return
def register_Ns3BSSchedulerSimple_methods(root_module, cls):
## bs-scheduler-simple.h (module 'wimax'): ns3::BSSchedulerSimple::BSSchedulerSimple(ns3::BSSchedulerSimple const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BSSchedulerSimple const &', 'arg0')])
## bs-scheduler-simple.h (module 'wimax'): ns3::BSSchedulerSimple::BSSchedulerSimple() [constructor]
cls.add_constructor([])
## bs-scheduler-simple.h (module 'wimax'): ns3::BSSchedulerSimple::BSSchedulerSimple(ns3::Ptr<ns3::BaseStationNetDevice> bs) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::BaseStationNetDevice >', 'bs')])
## bs-scheduler-simple.h (module 'wimax'): void ns3::BSSchedulerSimple::AddDownlinkBurst(ns3::Ptr<const ns3::WimaxConnection> connection, uint8_t diuc, ns3::WimaxPhy::ModulationType modulationType, ns3::Ptr<ns3::PacketBurst> burst) [member function]
cls.add_method('AddDownlinkBurst',
'void',
[param('ns3::Ptr< ns3::WimaxConnection const >', 'connection'), param('uint8_t', 'diuc'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('ns3::Ptr< ns3::PacketBurst >', 'burst')],
is_virtual=True)
## bs-scheduler-simple.h (module 'wimax'): ns3::Ptr<ns3::PacketBurst> ns3::BSSchedulerSimple::CreateUgsBurst(ns3::ServiceFlow * serviceFlow, ns3::WimaxPhy::ModulationType modulationType, uint32_t availableSymbols) [member function]
cls.add_method('CreateUgsBurst',
'ns3::Ptr< ns3::PacketBurst >',
[param('ns3::ServiceFlow *', 'serviceFlow'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('uint32_t', 'availableSymbols')],
is_virtual=True)
## bs-scheduler-simple.h (module 'wimax'): std::list<std::pair<ns3::OfdmDlMapIe*, ns3::Ptr<ns3::PacketBurst> >,std::allocator<std::pair<ns3::OfdmDlMapIe*, ns3::Ptr<ns3::PacketBurst> > > > * ns3::BSSchedulerSimple::GetDownlinkBursts() const [member function]
cls.add_method('GetDownlinkBursts',
'std::list< std::pair< ns3::OfdmDlMapIe *, ns3::Ptr< ns3::PacketBurst > > > *',
[],
is_const=True, is_virtual=True)
## bs-scheduler-simple.h (module 'wimax'): static ns3::TypeId ns3::BSSchedulerSimple::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bs-scheduler-simple.h (module 'wimax'): void ns3::BSSchedulerSimple::Schedule() [member function]
cls.add_method('Schedule',
'void',
[],
is_virtual=True)
## bs-scheduler-simple.h (module 'wimax'): bool ns3::BSSchedulerSimple::SelectConnection(ns3::Ptr<ns3::WimaxConnection> & connection) [member function]
cls.add_method('SelectConnection',
'bool',
[param('ns3::Ptr< ns3::WimaxConnection > &', 'connection')],
is_virtual=True)
return
def register_Ns3BandwidthRequestHeader_methods(root_module, cls):
## wimax-mac-header.h (module 'wimax'): ns3::BandwidthRequestHeader::BandwidthRequestHeader(ns3::BandwidthRequestHeader const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BandwidthRequestHeader const &', 'arg0')])
## wimax-mac-header.h (module 'wimax'): ns3::BandwidthRequestHeader::BandwidthRequestHeader() [constructor]
cls.add_constructor([])
## wimax-mac-header.h (module 'wimax'): uint32_t ns3::BandwidthRequestHeader::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## wimax-mac-header.h (module 'wimax'): uint32_t ns3::BandwidthRequestHeader::GetBr() const [member function]
cls.add_method('GetBr',
'uint32_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): ns3::Cid ns3::BandwidthRequestHeader::GetCid() const [member function]
cls.add_method('GetCid',
'ns3::Cid',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::BandwidthRequestHeader::GetEc() const [member function]
cls.add_method('GetEc',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::BandwidthRequestHeader::GetHcs() const [member function]
cls.add_method('GetHcs',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::BandwidthRequestHeader::GetHt() const [member function]
cls.add_method('GetHt',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): ns3::TypeId ns3::BandwidthRequestHeader::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): std::string ns3::BandwidthRequestHeader::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint32_t ns3::BandwidthRequestHeader::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::BandwidthRequestHeader::GetType() const [member function]
cls.add_method('GetType',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): static ns3::TypeId ns3::BandwidthRequestHeader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## wimax-mac-header.h (module 'wimax'): void ns3::BandwidthRequestHeader::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): void ns3::BandwidthRequestHeader::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): void ns3::BandwidthRequestHeader::SetBr(uint32_t br) [member function]
cls.add_method('SetBr',
'void',
[param('uint32_t', 'br')])
## wimax-mac-header.h (module 'wimax'): void ns3::BandwidthRequestHeader::SetCid(ns3::Cid cid) [member function]
cls.add_method('SetCid',
'void',
[param('ns3::Cid', 'cid')])
## wimax-mac-header.h (module 'wimax'): void ns3::BandwidthRequestHeader::SetEc(uint8_t ec) [member function]
cls.add_method('SetEc',
'void',
[param('uint8_t', 'ec')])
## wimax-mac-header.h (module 'wimax'): void ns3::BandwidthRequestHeader::SetHcs(uint8_t hcs) [member function]
cls.add_method('SetHcs',
'void',
[param('uint8_t', 'hcs')])
## wimax-mac-header.h (module 'wimax'): void ns3::BandwidthRequestHeader::SetHt(uint8_t HT) [member function]
cls.add_method('SetHt',
'void',
[param('uint8_t', 'HT')])
## wimax-mac-header.h (module 'wimax'): void ns3::BandwidthRequestHeader::SetType(uint8_t type) [member function]
cls.add_method('SetType',
'void',
[param('uint8_t', 'type')])
## wimax-mac-header.h (module 'wimax'): bool ns3::BandwidthRequestHeader::check_hcs() const [member function]
cls.add_method('check_hcs',
'bool',
[],
is_const=True)
return
def register_Ns3BsServiceFlowManager_methods(root_module, cls):
## bs-service-flow-manager.h (module 'wimax'): ns3::BsServiceFlowManager::BsServiceFlowManager(ns3::BsServiceFlowManager const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BsServiceFlowManager const &', 'arg0')])
## bs-service-flow-manager.h (module 'wimax'): ns3::BsServiceFlowManager::BsServiceFlowManager(ns3::Ptr<ns3::BaseStationNetDevice> device) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::BaseStationNetDevice >', 'device')])
## bs-service-flow-manager.h (module 'wimax'): void ns3::BsServiceFlowManager::AddMulticastServiceFlow(ns3::ServiceFlow sf, ns3::WimaxPhy::ModulationType modulation) [member function]
cls.add_method('AddMulticastServiceFlow',
'void',
[param('ns3::ServiceFlow', 'sf'), param('ns3::WimaxPhy::ModulationType', 'modulation')])
## bs-service-flow-manager.h (module 'wimax'): void ns3::BsServiceFlowManager::AddServiceFlow(ns3::ServiceFlow * serviceFlow) [member function]
cls.add_method('AddServiceFlow',
'void',
[param('ns3::ServiceFlow *', 'serviceFlow')])
## bs-service-flow-manager.h (module 'wimax'): void ns3::BsServiceFlowManager::AllocateServiceFlows(ns3::DsaReq const & dsaReq, ns3::Cid cid) [member function]
cls.add_method('AllocateServiceFlows',
'void',
[param('ns3::DsaReq const &', 'dsaReq'), param('ns3::Cid', 'cid')])
## bs-service-flow-manager.h (module 'wimax'): void ns3::BsServiceFlowManager::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## bs-service-flow-manager.h (module 'wimax'): ns3::EventId ns3::BsServiceFlowManager::GetDsaAckTimeoutEvent() const [member function]
cls.add_method('GetDsaAckTimeoutEvent',
'ns3::EventId',
[],
is_const=True)
## bs-service-flow-manager.h (module 'wimax'): ns3::ServiceFlow * ns3::BsServiceFlowManager::GetServiceFlow(uint32_t sfid) const [member function]
cls.add_method('GetServiceFlow',
'ns3::ServiceFlow *',
[param('uint32_t', 'sfid')],
is_const=True)
## bs-service-flow-manager.h (module 'wimax'): ns3::ServiceFlow * ns3::BsServiceFlowManager::GetServiceFlow(ns3::Cid cid) const [member function]
cls.add_method('GetServiceFlow',
'ns3::ServiceFlow *',
[param('ns3::Cid', 'cid')],
is_const=True)
## bs-service-flow-manager.h (module 'wimax'): std::vector<ns3::ServiceFlow*,std::allocator<ns3::ServiceFlow*> > ns3::BsServiceFlowManager::GetServiceFlows(ns3::ServiceFlow::SchedulingType schedulingType) const [member function]
cls.add_method('GetServiceFlows',
'std::vector< ns3::ServiceFlow * >',
[param('ns3::ServiceFlow::SchedulingType', 'schedulingType')],
is_const=True)
## bs-service-flow-manager.h (module 'wimax'): void ns3::BsServiceFlowManager::ProcessDsaAck(ns3::DsaAck const & dsaAck, ns3::Cid cid) [member function]
cls.add_method('ProcessDsaAck',
'void',
[param('ns3::DsaAck const &', 'dsaAck'), param('ns3::Cid', 'cid')])
## bs-service-flow-manager.h (module 'wimax'): ns3::ServiceFlow * ns3::BsServiceFlowManager::ProcessDsaReq(ns3::DsaReq const & dsaReq, ns3::Cid cid) [member function]
cls.add_method('ProcessDsaReq',
'ns3::ServiceFlow *',
[param('ns3::DsaReq const &', 'dsaReq'), param('ns3::Cid', 'cid')])
## bs-service-flow-manager.h (module 'wimax'): void ns3::BsServiceFlowManager::SetMaxDsaRspRetries(uint8_t maxDsaRspRetries) [member function]
cls.add_method('SetMaxDsaRspRetries',
'void',
[param('uint8_t', 'maxDsaRspRetries')])
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3Channel_methods(root_module, cls):
## channel.h (module 'network'): ns3::Channel::Channel(ns3::Channel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Channel const &', 'arg0')])
## channel.h (module 'network'): ns3::Channel::Channel() [constructor]
cls.add_constructor([])
## channel.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Channel::GetDevice(uint32_t i) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## channel.h (module 'network'): uint32_t ns3::Channel::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## channel.h (module 'network'): uint32_t ns3::Channel::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## channel.h (module 'network'): static ns3::TypeId ns3::Channel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3ConnectionManager_methods(root_module, cls):
## connection-manager.h (module 'wimax'): ns3::ConnectionManager::ConnectionManager(ns3::ConnectionManager const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ConnectionManager const &', 'arg0')])
## connection-manager.h (module 'wimax'): ns3::ConnectionManager::ConnectionManager() [constructor]
cls.add_constructor([])
## connection-manager.h (module 'wimax'): void ns3::ConnectionManager::AddConnection(ns3::Ptr<ns3::WimaxConnection> connection, ns3::Cid::Type type) [member function]
cls.add_method('AddConnection',
'void',
[param('ns3::Ptr< ns3::WimaxConnection >', 'connection'), param('ns3::Cid::Type', 'type')])
## connection-manager.h (module 'wimax'): void ns3::ConnectionManager::AllocateManagementConnections(ns3::SSRecord * ssRecord, ns3::RngRsp * rngrsp) [member function]
cls.add_method('AllocateManagementConnections',
'void',
[param('ns3::SSRecord *', 'ssRecord'), param('ns3::RngRsp *', 'rngrsp')])
## connection-manager.h (module 'wimax'): ns3::Ptr<ns3::WimaxConnection> ns3::ConnectionManager::CreateConnection(ns3::Cid::Type type) [member function]
cls.add_method('CreateConnection',
'ns3::Ptr< ns3::WimaxConnection >',
[param('ns3::Cid::Type', 'type')])
## connection-manager.h (module 'wimax'): void ns3::ConnectionManager::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## connection-manager.h (module 'wimax'): ns3::Ptr<ns3::WimaxConnection> ns3::ConnectionManager::GetConnection(ns3::Cid cid) [member function]
cls.add_method('GetConnection',
'ns3::Ptr< ns3::WimaxConnection >',
[param('ns3::Cid', 'cid')])
## connection-manager.h (module 'wimax'): std::vector<ns3::Ptr<ns3::WimaxConnection>, std::allocator<ns3::Ptr<ns3::WimaxConnection> > > ns3::ConnectionManager::GetConnections(ns3::Cid::Type type) const [member function]
cls.add_method('GetConnections',
'std::vector< ns3::Ptr< ns3::WimaxConnection > >',
[param('ns3::Cid::Type', 'type')],
is_const=True)
## connection-manager.h (module 'wimax'): uint32_t ns3::ConnectionManager::GetNPackets(ns3::Cid::Type type, ns3::ServiceFlow::SchedulingType schedulingType) const [member function]
cls.add_method('GetNPackets',
'uint32_t',
[param('ns3::Cid::Type', 'type'), param('ns3::ServiceFlow::SchedulingType', 'schedulingType')],
is_const=True)
## connection-manager.h (module 'wimax'): static ns3::TypeId ns3::ConnectionManager::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## connection-manager.h (module 'wimax'): bool ns3::ConnectionManager::HasPackets() const [member function]
cls.add_method('HasPackets',
'bool',
[],
is_const=True)
## connection-manager.h (module 'wimax'): void ns3::ConnectionManager::SetCidFactory(ns3::CidFactory * cidFactory) [member function]
cls.add_method('SetCidFactory',
'void',
[param('ns3::CidFactory *', 'cidFactory')])
return
def register_Ns3ConstantRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ConstantRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable::ConstantRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetConstant() const [member function]
cls.add_method('GetConstant',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue(double constant) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'constant')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger(uint32_t constant) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'constant')])
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3Dcd_methods(root_module, cls):
## dl-mac-messages.h (module 'wimax'): ns3::Dcd::Dcd(ns3::Dcd const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Dcd const &', 'arg0')])
## dl-mac-messages.h (module 'wimax'): ns3::Dcd::Dcd() [constructor]
cls.add_constructor([])
## dl-mac-messages.h (module 'wimax'): void ns3::Dcd::AddDlBurstProfile(ns3::OfdmDlBurstProfile dlBurstProfile) [member function]
cls.add_method('AddDlBurstProfile',
'void',
[param('ns3::OfdmDlBurstProfile', 'dlBurstProfile')])
## dl-mac-messages.h (module 'wimax'): uint32_t ns3::Dcd::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## dl-mac-messages.h (module 'wimax'): ns3::OfdmDcdChannelEncodings ns3::Dcd::GetChannelEncodings() const [member function]
cls.add_method('GetChannelEncodings',
'ns3::OfdmDcdChannelEncodings',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::Dcd::GetConfigurationChangeCount() const [member function]
cls.add_method('GetConfigurationChangeCount',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): std::vector<ns3::OfdmDlBurstProfile, std::allocator<ns3::OfdmDlBurstProfile> > ns3::Dcd::GetDlBurstProfiles() const [member function]
cls.add_method('GetDlBurstProfiles',
'std::vector< ns3::OfdmDlBurstProfile >',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): ns3::TypeId ns3::Dcd::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## dl-mac-messages.h (module 'wimax'): std::string ns3::Dcd::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::Dcd::GetNrDlBurstProfiles() const [member function]
cls.add_method('GetNrDlBurstProfiles',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint32_t ns3::Dcd::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## dl-mac-messages.h (module 'wimax'): static ns3::TypeId ns3::Dcd::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## dl-mac-messages.h (module 'wimax'): void ns3::Dcd::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## dl-mac-messages.h (module 'wimax'): void ns3::Dcd::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## dl-mac-messages.h (module 'wimax'): void ns3::Dcd::SetChannelEncodings(ns3::OfdmDcdChannelEncodings channelEncodings) [member function]
cls.add_method('SetChannelEncodings',
'void',
[param('ns3::OfdmDcdChannelEncodings', 'channelEncodings')])
## dl-mac-messages.h (module 'wimax'): void ns3::Dcd::SetConfigurationChangeCount(uint8_t configurationChangeCount) [member function]
cls.add_method('SetConfigurationChangeCount',
'void',
[param('uint8_t', 'configurationChangeCount')])
## dl-mac-messages.h (module 'wimax'): void ns3::Dcd::SetNrDlBurstProfiles(uint8_t nrDlBurstProfiles) [member function]
cls.add_method('SetNrDlBurstProfiles',
'void',
[param('uint8_t', 'nrDlBurstProfiles')])
return
def register_Ns3DeterministicRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::DeterministicRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable::DeterministicRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::DeterministicRandomVariable::SetValueArray(double * values, uint64_t length) [member function]
cls.add_method('SetValueArray',
'void',
[param('double *', 'values'), param('uint64_t', 'length')])
## random-variable-stream.h (module 'core'): double ns3::DeterministicRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::DeterministicRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3DlMap_methods(root_module, cls):
## dl-mac-messages.h (module 'wimax'): ns3::DlMap::DlMap(ns3::DlMap const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DlMap const &', 'arg0')])
## dl-mac-messages.h (module 'wimax'): ns3::DlMap::DlMap() [constructor]
cls.add_constructor([])
## dl-mac-messages.h (module 'wimax'): void ns3::DlMap::AddDlMapElement(ns3::OfdmDlMapIe dlMapElement) [member function]
cls.add_method('AddDlMapElement',
'void',
[param('ns3::OfdmDlMapIe', 'dlMapElement')])
## dl-mac-messages.h (module 'wimax'): uint32_t ns3::DlMap::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## dl-mac-messages.h (module 'wimax'): ns3::Mac48Address ns3::DlMap::GetBaseStationId() const [member function]
cls.add_method('GetBaseStationId',
'ns3::Mac48Address',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint8_t ns3::DlMap::GetDcdCount() const [member function]
cls.add_method('GetDcdCount',
'uint8_t',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): std::list<ns3::OfdmDlMapIe, std::allocator<ns3::OfdmDlMapIe> > ns3::DlMap::GetDlMapElements() const [member function]
cls.add_method('GetDlMapElements',
'std::list< ns3::OfdmDlMapIe >',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): ns3::TypeId ns3::DlMap::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## dl-mac-messages.h (module 'wimax'): std::string ns3::DlMap::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## dl-mac-messages.h (module 'wimax'): uint32_t ns3::DlMap::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## dl-mac-messages.h (module 'wimax'): static ns3::TypeId ns3::DlMap::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## dl-mac-messages.h (module 'wimax'): void ns3::DlMap::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## dl-mac-messages.h (module 'wimax'): void ns3::DlMap::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## dl-mac-messages.h (module 'wimax'): void ns3::DlMap::SetBaseStationId(ns3::Mac48Address baseStationID) [member function]
cls.add_method('SetBaseStationId',
'void',
[param('ns3::Mac48Address', 'baseStationID')])
## dl-mac-messages.h (module 'wimax'): void ns3::DlMap::SetDcdCount(uint8_t dcdCount) [member function]
cls.add_method('SetDcdCount',
'void',
[param('uint8_t', 'dcdCount')])
return
def register_Ns3DsaAck_methods(root_module, cls):
## mac-messages.h (module 'wimax'): ns3::DsaAck::DsaAck(ns3::DsaAck const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DsaAck const &', 'arg0')])
## mac-messages.h (module 'wimax'): ns3::DsaAck::DsaAck() [constructor]
cls.add_constructor([])
## mac-messages.h (module 'wimax'): uint32_t ns3::DsaAck::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## mac-messages.h (module 'wimax'): uint16_t ns3::DsaAck::GetConfirmationCode() const [member function]
cls.add_method('GetConfirmationCode',
'uint16_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): ns3::TypeId ns3::DsaAck::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): std::string ns3::DsaAck::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint32_t ns3::DsaAck::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): uint16_t ns3::DsaAck::GetTransactionId() const [member function]
cls.add_method('GetTransactionId',
'uint16_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): static ns3::TypeId ns3::DsaAck::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## mac-messages.h (module 'wimax'): void ns3::DsaAck::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): void ns3::DsaAck::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): void ns3::DsaAck::SetConfirmationCode(uint16_t confirmationCode) [member function]
cls.add_method('SetConfirmationCode',
'void',
[param('uint16_t', 'confirmationCode')])
## mac-messages.h (module 'wimax'): void ns3::DsaAck::SetTransactionId(uint16_t transactionId) [member function]
cls.add_method('SetTransactionId',
'void',
[param('uint16_t', 'transactionId')])
return
def register_Ns3DsaReq_methods(root_module, cls):
## mac-messages.h (module 'wimax'): ns3::DsaReq::DsaReq(ns3::DsaReq const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DsaReq const &', 'arg0')])
## mac-messages.h (module 'wimax'): ns3::DsaReq::DsaReq() [constructor]
cls.add_constructor([])
## mac-messages.h (module 'wimax'): ns3::DsaReq::DsaReq(ns3::ServiceFlow sf) [constructor]
cls.add_constructor([param('ns3::ServiceFlow', 'sf')])
## mac-messages.h (module 'wimax'): uint32_t ns3::DsaReq::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## mac-messages.h (module 'wimax'): ns3::Cid ns3::DsaReq::GetCid() const [member function]
cls.add_method('GetCid',
'ns3::Cid',
[],
is_const=True)
## mac-messages.h (module 'wimax'): ns3::TypeId ns3::DsaReq::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): std::string ns3::DsaReq::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint32_t ns3::DsaReq::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): ns3::ServiceFlow ns3::DsaReq::GetServiceFlow() const [member function]
cls.add_method('GetServiceFlow',
'ns3::ServiceFlow',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint32_t ns3::DsaReq::GetSfid() const [member function]
cls.add_method('GetSfid',
'uint32_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint16_t ns3::DsaReq::GetTransactionId() const [member function]
cls.add_method('GetTransactionId',
'uint16_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): static ns3::TypeId ns3::DsaReq::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## mac-messages.h (module 'wimax'): void ns3::DsaReq::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): void ns3::DsaReq::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): void ns3::DsaReq::SetCid(ns3::Cid cid) [member function]
cls.add_method('SetCid',
'void',
[param('ns3::Cid', 'cid')])
## mac-messages.h (module 'wimax'): void ns3::DsaReq::SetServiceFlow(ns3::ServiceFlow sf) [member function]
cls.add_method('SetServiceFlow',
'void',
[param('ns3::ServiceFlow', 'sf')])
## mac-messages.h (module 'wimax'): void ns3::DsaReq::SetSfid(uint32_t sfid) [member function]
cls.add_method('SetSfid',
'void',
[param('uint32_t', 'sfid')])
## mac-messages.h (module 'wimax'): void ns3::DsaReq::SetTransactionId(uint16_t transactionId) [member function]
cls.add_method('SetTransactionId',
'void',
[param('uint16_t', 'transactionId')])
return
def register_Ns3DsaRsp_methods(root_module, cls):
## mac-messages.h (module 'wimax'): ns3::DsaRsp::DsaRsp(ns3::DsaRsp const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DsaRsp const &', 'arg0')])
## mac-messages.h (module 'wimax'): ns3::DsaRsp::DsaRsp() [constructor]
cls.add_constructor([])
## mac-messages.h (module 'wimax'): uint32_t ns3::DsaRsp::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## mac-messages.h (module 'wimax'): ns3::Cid ns3::DsaRsp::GetCid() const [member function]
cls.add_method('GetCid',
'ns3::Cid',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint16_t ns3::DsaRsp::GetConfirmationCode() const [member function]
cls.add_method('GetConfirmationCode',
'uint16_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): ns3::TypeId ns3::DsaRsp::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): std::string ns3::DsaRsp::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint32_t ns3::DsaRsp::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): ns3::ServiceFlow ns3::DsaRsp::GetServiceFlow() const [member function]
cls.add_method('GetServiceFlow',
'ns3::ServiceFlow',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint32_t ns3::DsaRsp::GetSfid() const [member function]
cls.add_method('GetSfid',
'uint32_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): uint16_t ns3::DsaRsp::GetTransactionId() const [member function]
cls.add_method('GetTransactionId',
'uint16_t',
[],
is_const=True)
## mac-messages.h (module 'wimax'): static ns3::TypeId ns3::DsaRsp::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## mac-messages.h (module 'wimax'): void ns3::DsaRsp::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): void ns3::DsaRsp::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## mac-messages.h (module 'wimax'): void ns3::DsaRsp::SetCid(ns3::Cid cid) [member function]
cls.add_method('SetCid',
'void',
[param('ns3::Cid', 'cid')])
## mac-messages.h (module 'wimax'): void ns3::DsaRsp::SetConfirmationCode(uint16_t confirmationCode) [member function]
cls.add_method('SetConfirmationCode',
'void',
[param('uint16_t', 'confirmationCode')])
## mac-messages.h (module 'wimax'): void ns3::DsaRsp::SetServiceFlow(ns3::ServiceFlow sf) [member function]
cls.add_method('SetServiceFlow',
'void',
[param('ns3::ServiceFlow', 'sf')])
## mac-messages.h (module 'wimax'): void ns3::DsaRsp::SetSfid(uint32_t sfid) [member function]
cls.add_method('SetSfid',
'void',
[param('uint32_t', 'sfid')])
## mac-messages.h (module 'wimax'): void ns3::DsaRsp::SetTransactionId(uint16_t transactionId) [member function]
cls.add_method('SetTransactionId',
'void',
[param('uint16_t', 'transactionId')])
return
def register_Ns3EmpiricalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable::EmpiricalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::CDF(double v, double c) [member function]
cls.add_method('CDF',
'void',
[param('double', 'v'), param('double', 'c')])
## random-variable-stream.h (module 'core'): uint32_t ns3::EmpiricalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::EmpiricalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::Interpolate(double arg0, double arg1, double arg2, double arg3, double arg4) [member function]
cls.add_method('Interpolate',
'double',
[param('double', 'arg0'), param('double', 'arg1'), param('double', 'arg2'), param('double', 'arg3'), param('double', 'arg4')],
visibility='private', is_virtual=True)
## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::Validate() [member function]
cls.add_method('Validate',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3ErlangRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ErlangRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable::ErlangRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetK() const [member function]
cls.add_method('GetK',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetLambda() const [member function]
cls.add_method('GetLambda',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue(uint32_t k, double lambda) [member function]
cls.add_method('GetValue',
'double',
[param('uint32_t', 'k'), param('double', 'lambda')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger(uint32_t k, uint32_t lambda) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'k'), param('uint32_t', 'lambda')])
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3ExponentialRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ExponentialRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable::ExponentialRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue(double mean, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger(uint32_t mean, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3FixedRssLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::FixedRssLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::FixedRssLossModel::FixedRssLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::FixedRssLossModel::SetRss(double rss) [member function]
cls.add_method('SetRss',
'void',
[param('double', 'rss')])
## propagation-loss-model.h (module 'propagation'): double ns3::FixedRssLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::FixedRssLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3FragmentationSubheader_methods(root_module, cls):
## wimax-mac-header.h (module 'wimax'): ns3::FragmentationSubheader::FragmentationSubheader(ns3::FragmentationSubheader const & arg0) [copy constructor]
cls.add_constructor([param('ns3::FragmentationSubheader const &', 'arg0')])
## wimax-mac-header.h (module 'wimax'): ns3::FragmentationSubheader::FragmentationSubheader() [constructor]
cls.add_constructor([])
## wimax-mac-header.h (module 'wimax'): uint32_t ns3::FragmentationSubheader::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::FragmentationSubheader::GetFc() const [member function]
cls.add_method('GetFc',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::FragmentationSubheader::GetFsn() const [member function]
cls.add_method('GetFsn',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): ns3::TypeId ns3::FragmentationSubheader::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): std::string ns3::FragmentationSubheader::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint32_t ns3::FragmentationSubheader::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): static ns3::TypeId ns3::FragmentationSubheader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## wimax-mac-header.h (module 'wimax'): void ns3::FragmentationSubheader::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): void ns3::FragmentationSubheader::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): void ns3::FragmentationSubheader::SetFc(uint8_t fc) [member function]
cls.add_method('SetFc',
'void',
[param('uint8_t', 'fc')])
## wimax-mac-header.h (module 'wimax'): void ns3::FragmentationSubheader::SetFsn(uint8_t fsn) [member function]
cls.add_method('SetFsn',
'void',
[param('uint8_t', 'fsn')])
return
def register_Ns3FriisPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::FriisPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::FriisPropagationLossModel::FriisPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetLambda(double frequency, double speed) [member function]
cls.add_method('SetLambda',
'void',
[param('double', 'frequency'), param('double', 'speed')])
## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetLambda(double lambda) [member function]
cls.add_method('SetLambda',
'void',
[param('double', 'lambda')])
## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetSystemLoss(double systemLoss) [member function]
cls.add_method('SetSystemLoss',
'void',
[param('double', 'systemLoss')])
## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetMinDistance(double minDistance) [member function]
cls.add_method('SetMinDistance',
'void',
[param('double', 'minDistance')])
## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetMinDistance() const [member function]
cls.add_method('GetMinDistance',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetLambda() const [member function]
cls.add_method('GetLambda',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetSystemLoss() const [member function]
cls.add_method('GetSystemLoss',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::FriisPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3GammaRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::GammaRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable::GammaRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetBeta() const [member function]
cls.add_method('GetBeta',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue(double alpha, double beta) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha'), param('double', 'beta')])
## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger(uint32_t alpha, uint32_t beta) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'alpha'), param('uint32_t', 'beta')])
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3GenericMacHeader_methods(root_module, cls):
## wimax-mac-header.h (module 'wimax'): ns3::GenericMacHeader::GenericMacHeader(ns3::GenericMacHeader const & arg0) [copy constructor]
cls.add_constructor([param('ns3::GenericMacHeader const &', 'arg0')])
## wimax-mac-header.h (module 'wimax'): ns3::GenericMacHeader::GenericMacHeader() [constructor]
cls.add_constructor([])
## wimax-mac-header.h (module 'wimax'): uint32_t ns3::GenericMacHeader::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::GenericMacHeader::GetCi() const [member function]
cls.add_method('GetCi',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): ns3::Cid ns3::GenericMacHeader::GetCid() const [member function]
cls.add_method('GetCid',
'ns3::Cid',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::GenericMacHeader::GetEc() const [member function]
cls.add_method('GetEc',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::GenericMacHeader::GetEks() const [member function]
cls.add_method('GetEks',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::GenericMacHeader::GetHcs() const [member function]
cls.add_method('GetHcs',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::GenericMacHeader::GetHt() const [member function]
cls.add_method('GetHt',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): ns3::TypeId ns3::GenericMacHeader::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): uint16_t ns3::GenericMacHeader::GetLen() const [member function]
cls.add_method('GetLen',
'uint16_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): std::string ns3::GenericMacHeader::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint32_t ns3::GenericMacHeader::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::GenericMacHeader::GetType() const [member function]
cls.add_method('GetType',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): static ns3::TypeId ns3::GenericMacHeader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## wimax-mac-header.h (module 'wimax'): void ns3::GenericMacHeader::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): void ns3::GenericMacHeader::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): void ns3::GenericMacHeader::SetCi(uint8_t ci) [member function]
cls.add_method('SetCi',
'void',
[param('uint8_t', 'ci')])
## wimax-mac-header.h (module 'wimax'): void ns3::GenericMacHeader::SetCid(ns3::Cid cid) [member function]
cls.add_method('SetCid',
'void',
[param('ns3::Cid', 'cid')])
## wimax-mac-header.h (module 'wimax'): void ns3::GenericMacHeader::SetEc(uint8_t ec) [member function]
cls.add_method('SetEc',
'void',
[param('uint8_t', 'ec')])
## wimax-mac-header.h (module 'wimax'): void ns3::GenericMacHeader::SetEks(uint8_t eks) [member function]
cls.add_method('SetEks',
'void',
[param('uint8_t', 'eks')])
## wimax-mac-header.h (module 'wimax'): void ns3::GenericMacHeader::SetHcs(uint8_t hcs) [member function]
cls.add_method('SetHcs',
'void',
[param('uint8_t', 'hcs')])
## wimax-mac-header.h (module 'wimax'): void ns3::GenericMacHeader::SetHt(uint8_t HT) [member function]
cls.add_method('SetHt',
'void',
[param('uint8_t', 'HT')])
## wimax-mac-header.h (module 'wimax'): void ns3::GenericMacHeader::SetLen(uint16_t len) [member function]
cls.add_method('SetLen',
'void',
[param('uint16_t', 'len')])
## wimax-mac-header.h (module 'wimax'): void ns3::GenericMacHeader::SetType(uint8_t type) [member function]
cls.add_method('SetType',
'void',
[param('uint8_t', 'type')])
## wimax-mac-header.h (module 'wimax'): bool ns3::GenericMacHeader::check_hcs() const [member function]
cls.add_method('check_hcs',
'bool',
[],
is_const=True)
return
def register_Ns3GrantManagementSubheader_methods(root_module, cls):
## wimax-mac-header.h (module 'wimax'): ns3::GrantManagementSubheader::GrantManagementSubheader(ns3::GrantManagementSubheader const & arg0) [copy constructor]
cls.add_constructor([param('ns3::GrantManagementSubheader const &', 'arg0')])
## wimax-mac-header.h (module 'wimax'): ns3::GrantManagementSubheader::GrantManagementSubheader() [constructor]
cls.add_constructor([])
## wimax-mac-header.h (module 'wimax'): uint32_t ns3::GrantManagementSubheader::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## wimax-mac-header.h (module 'wimax'): ns3::TypeId ns3::GrantManagementSubheader::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): std::string ns3::GrantManagementSubheader::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint16_t ns3::GrantManagementSubheader::GetPbr() const [member function]
cls.add_method('GetPbr',
'uint16_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::GrantManagementSubheader::GetPm() const [member function]
cls.add_method('GetPm',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): uint32_t ns3::GrantManagementSubheader::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): uint8_t ns3::GrantManagementSubheader::GetSi() const [member function]
cls.add_method('GetSi',
'uint8_t',
[],
is_const=True)
## wimax-mac-header.h (module 'wimax'): static ns3::TypeId ns3::GrantManagementSubheader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## wimax-mac-header.h (module 'wimax'): void ns3::GrantManagementSubheader::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): void ns3::GrantManagementSubheader::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## wimax-mac-header.h (module 'wimax'): void ns3::GrantManagementSubheader::SetPbr(uint16_t pbr) [member function]
cls.add_method('SetPbr',
'void',
[param('uint16_t', 'pbr')])
## wimax-mac-header.h (module 'wimax'): void ns3::GrantManagementSubheader::SetPm(uint8_t pm) [member function]
cls.add_method('SetPm',
'void',
[param('uint8_t', 'pm')])
## wimax-mac-header.h (module 'wimax'): void ns3::GrantManagementSubheader::SetSi(uint8_t si) [member function]
cls.add_method('SetSi',
'void',
[param('uint8_t', 'si')])
return
def register_Ns3IpcsClassifier_methods(root_module, cls):
## ipcs-classifier.h (module 'wimax'): ns3::IpcsClassifier::IpcsClassifier(ns3::IpcsClassifier const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IpcsClassifier const &', 'arg0')])
## ipcs-classifier.h (module 'wimax'): ns3::IpcsClassifier::IpcsClassifier() [constructor]
cls.add_constructor([])
## ipcs-classifier.h (module 'wimax'): ns3::ServiceFlow * ns3::IpcsClassifier::Classify(ns3::Ptr<const ns3::Packet> packet, ns3::Ptr<ns3::ServiceFlowManager> sfm, ns3::ServiceFlow::Direction dir) [member function]
cls.add_method('Classify',
'ns3::ServiceFlow *',
[param('ns3::Ptr< ns3::Packet const >', 'packet'), param('ns3::Ptr< ns3::ServiceFlowManager >', 'sfm'), param('ns3::ServiceFlow::Direction', 'dir')])
## ipcs-classifier.h (module 'wimax'): static ns3::TypeId ns3::IpcsClassifier::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3LogDistancePropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::LogDistancePropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::LogDistancePropagationLossModel::LogDistancePropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::LogDistancePropagationLossModel::SetPathLossExponent(double n) [member function]
cls.add_method('SetPathLossExponent',
'void',
[param('double', 'n')])
## propagation-loss-model.h (module 'propagation'): double ns3::LogDistancePropagationLossModel::GetPathLossExponent() const [member function]
cls.add_method('GetPathLossExponent',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): void ns3::LogDistancePropagationLossModel::SetReference(double referenceDistance, double referenceLoss) [member function]
cls.add_method('SetReference',
'void',
[param('double', 'referenceDistance'), param('double', 'referenceLoss')])
## propagation-loss-model.h (module 'propagation'): double ns3::LogDistancePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::LogDistancePropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3LogNormalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::LogNormalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable::LogNormalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetMu() const [member function]
cls.add_method('GetMu',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetSigma() const [member function]
cls.add_method('GetSigma',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue(double mu, double sigma) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mu'), param('double', 'sigma')])
## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger(uint32_t mu, uint32_t sigma) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mu'), param('uint32_t', 'sigma')])
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3Mac48AddressChecker_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')])
return
def register_Ns3Mac48AddressValue_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'value')])
## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Mac48Address',
[],
is_const=True)
## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Mac48Address const &', 'value')])
return
def register_Ns3MatrixPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::MatrixPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::MatrixPropagationLossModel::MatrixPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::MatrixPropagationLossModel::SetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, double loss, bool symmetric=true) [member function]
cls.add_method('SetLoss',
'void',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('double', 'loss'), param('bool', 'symmetric', default_value='true')])
## propagation-loss-model.h (module 'propagation'): void ns3::MatrixPropagationLossModel::SetDefaultLoss(double arg0) [member function]
cls.add_method('SetDefaultLoss',
'void',
[param('double', 'arg0')])
## propagation-loss-model.h (module 'propagation'): double ns3::MatrixPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::MatrixPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3NakagamiPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::NakagamiPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::NakagamiPropagationLossModel::NakagamiPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): double ns3::NakagamiPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::NakagamiPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3Node_methods(root_module, cls):
## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Node const &', 'arg0')])
## node.h (module 'network'): ns3::Node::Node() [constructor]
cls.add_constructor([])
## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor]
cls.add_constructor([param('uint32_t', 'systemId')])
## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function]
cls.add_method('AddApplication',
'uint32_t',
[param('ns3::Ptr< ns3::Application >', 'application')])
## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddDevice',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function]
cls.add_method('ChecksumEnabled',
'bool',
[],
is_static=True)
## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function]
cls.add_method('GetApplication',
'ns3::Ptr< ns3::Application >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function]
cls.add_method('GetNApplications',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('RegisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function]
cls.add_method('RegisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')])
## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('UnregisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function]
cls.add_method('UnregisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')])
## node.h (module 'network'): void ns3::Node::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## node.h (module 'network'): void ns3::Node::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3NormalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::INFINITE_VALUE [variable]
cls.add_static_attribute('INFINITE_VALUE', 'double const', is_const=True)
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::NormalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::NormalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetVariance() const [member function]
cls.add_method('GetVariance',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue(double mean, double variance, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'variance'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger(uint32_t mean, uint32_t variance, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'variance'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3OutputStreamWrapper_methods(root_module, cls):
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(ns3::OutputStreamWrapper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::OutputStreamWrapper const &', 'arg0')])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::string filename, std::_Ios_Openmode filemode) [constructor]
cls.add_constructor([param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode')])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::ostream * os) [constructor]
cls.add_constructor([param('std::ostream *', 'os')])
## output-stream-wrapper.h (module 'network'): std::ostream * ns3::OutputStreamWrapper::GetStream() [member function]
cls.add_method('GetStream',
'std::ostream *',
[])
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint8_t const * ns3::Packet::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
deprecated=True, is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> arg0) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'arg0')])
return
def register_Ns3ParetoRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ParetoRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable::ParetoRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetShape() const [member function]
cls.add_method('GetShape',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue(double mean, double shape, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'shape'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger(uint32_t mean, uint32_t shape, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'shape'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3SimpleOfdmWimaxPhy_methods(root_module, cls):
## simple-ofdm-wimax-phy.h (module 'wimax'): ns3::SimpleOfdmWimaxPhy::SimpleOfdmWimaxPhy(ns3::SimpleOfdmWimaxPhy const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SimpleOfdmWimaxPhy const &', 'arg0')])
## simple-ofdm-wimax-phy.h (module 'wimax'): ns3::SimpleOfdmWimaxPhy::SimpleOfdmWimaxPhy() [constructor]
cls.add_constructor([])
## simple-ofdm-wimax-phy.h (module 'wimax'): ns3::SimpleOfdmWimaxPhy::SimpleOfdmWimaxPhy(char * tracesPath) [constructor]
cls.add_constructor([param('char *', 'tracesPath')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::ActivateLoss(bool loss) [member function]
cls.add_method('ActivateLoss',
'void',
[param('bool', 'loss')])
## simple-ofdm-wimax-phy.h (module 'wimax'): int64_t ns3::SimpleOfdmWimaxPhy::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::DoAttach(ns3::Ptr<ns3::WimaxChannel> channel) [member function]
cls.add_method('DoAttach',
'void',
[param('ns3::Ptr< ns3::WimaxChannel >', 'channel')],
is_virtual=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): uint32_t ns3::SimpleOfdmWimaxPhy::GetBandwidth() const [member function]
cls.add_method('GetBandwidth',
'uint32_t',
[],
is_const=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): double ns3::SimpleOfdmWimaxPhy::GetNoiseFigure() const [member function]
cls.add_method('GetNoiseFigure',
'double',
[],
is_const=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): ns3::WimaxPhy::PhyType ns3::SimpleOfdmWimaxPhy::GetPhyType() const [member function]
cls.add_method('GetPhyType',
'ns3::WimaxPhy::PhyType',
[],
is_const=True, is_virtual=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): double ns3::SimpleOfdmWimaxPhy::GetTxPower() const [member function]
cls.add_method('GetTxPower',
'double',
[],
is_const=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): static ns3::TypeId ns3::SimpleOfdmWimaxPhy::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::NotifyRxBegin(ns3::Ptr<ns3::PacketBurst> burst) [member function]
cls.add_method('NotifyRxBegin',
'void',
[param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::NotifyRxDrop(ns3::Ptr<ns3::PacketBurst> burst) [member function]
cls.add_method('NotifyRxDrop',
'void',
[param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::NotifyRxEnd(ns3::Ptr<ns3::PacketBurst> burst) [member function]
cls.add_method('NotifyRxEnd',
'void',
[param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::NotifyTxBegin(ns3::Ptr<ns3::PacketBurst> burst) [member function]
cls.add_method('NotifyTxBegin',
'void',
[param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::NotifyTxDrop(ns3::Ptr<ns3::PacketBurst> burst) [member function]
cls.add_method('NotifyTxDrop',
'void',
[param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::NotifyTxEnd(ns3::Ptr<ns3::PacketBurst> burst) [member function]
cls.add_method('NotifyTxEnd',
'void',
[param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::Send(ns3::Ptr<ns3::PacketBurst> burst, ns3::WimaxPhy::ModulationType modulationType, uint8_t direction) [member function]
cls.add_method('Send',
'void',
[param('ns3::Ptr< ns3::PacketBurst >', 'burst'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('uint8_t', 'direction')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::Send(ns3::SendParams * params) [member function]
cls.add_method('Send',
'void',
[param('ns3::SendParams *', 'params')],
is_virtual=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::SetBandwidth(uint32_t BW) [member function]
cls.add_method('SetBandwidth',
'void',
[param('uint32_t', 'BW')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::SetNoiseFigure(double nf) [member function]
cls.add_method('SetNoiseFigure',
'void',
[param('double', 'nf')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::SetReceiveCallback(ns3::Callback<void,ns3::Ptr<ns3::PacketBurst>,ns3::Ptr<ns3::WimaxConnection>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::PacketBurst >, ns3::Ptr< ns3::WimaxConnection >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::SetSNRToBlockErrorRateTracesPath(char * tracesPath) [member function]
cls.add_method('SetSNRToBlockErrorRateTracesPath',
'void',
[param('char *', 'tracesPath')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::SetTxPower(double txPower) [member function]
cls.add_method('SetTxPower',
'void',
[param('double', 'txPower')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::StartReceive(uint32_t burstSize, bool isFirstBlock, uint64_t frequency, ns3::WimaxPhy::ModulationType modulationType, uint8_t direction, double rxPower, ns3::Ptr<ns3::PacketBurst> burst) [member function]
cls.add_method('StartReceive',
'void',
[param('uint32_t', 'burstSize'), param('bool', 'isFirstBlock'), param('uint64_t', 'frequency'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('uint8_t', 'direction'), param('double', 'rxPower'), param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): uint32_t ns3::SimpleOfdmWimaxPhy::DoGetDataRate(ns3::WimaxPhy::ModulationType modulationType) const [member function]
cls.add_method('DoGetDataRate',
'uint32_t',
[param('ns3::WimaxPhy::ModulationType', 'modulationType')],
is_const=True, visibility='private', is_virtual=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): ns3::Time ns3::SimpleOfdmWimaxPhy::DoGetFrameDuration(uint8_t frameDurationCode) const [member function]
cls.add_method('DoGetFrameDuration',
'ns3::Time',
[param('uint8_t', 'frameDurationCode')],
is_const=True, visibility='private', is_virtual=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): uint8_t ns3::SimpleOfdmWimaxPhy::DoGetFrameDurationCode() const [member function]
cls.add_method('DoGetFrameDurationCode',
'uint8_t',
[],
is_const=True, visibility='private', is_virtual=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): double ns3::SimpleOfdmWimaxPhy::DoGetGValue() const [member function]
cls.add_method('DoGetGValue',
'double',
[],
is_const=True, visibility='private', is_virtual=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): uint16_t ns3::SimpleOfdmWimaxPhy::DoGetNfft() const [member function]
cls.add_method('DoGetNfft',
'uint16_t',
[],
is_const=True, visibility='private', is_virtual=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): uint64_t ns3::SimpleOfdmWimaxPhy::DoGetNrBytes(uint32_t symbols, ns3::WimaxPhy::ModulationType modulationType) const [member function]
cls.add_method('DoGetNrBytes',
'uint64_t',
[param('uint32_t', 'symbols'), param('ns3::WimaxPhy::ModulationType', 'modulationType')],
is_const=True, visibility='private', is_virtual=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): uint64_t ns3::SimpleOfdmWimaxPhy::DoGetNrSymbols(uint32_t size, ns3::WimaxPhy::ModulationType modulationType) const [member function]
cls.add_method('DoGetNrSymbols',
'uint64_t',
[param('uint32_t', 'size'), param('ns3::WimaxPhy::ModulationType', 'modulationType')],
is_const=True, visibility='private', is_virtual=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): uint16_t ns3::SimpleOfdmWimaxPhy::DoGetRtg() const [member function]
cls.add_method('DoGetRtg',
'uint16_t',
[],
is_const=True, visibility='private', is_virtual=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): double ns3::SimpleOfdmWimaxPhy::DoGetSamplingFactor() const [member function]
cls.add_method('DoGetSamplingFactor',
'double',
[],
is_const=True, visibility='private', is_virtual=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): double ns3::SimpleOfdmWimaxPhy::DoGetSamplingFrequency() const [member function]
cls.add_method('DoGetSamplingFrequency',
'double',
[],
is_const=True, visibility='private', is_virtual=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): ns3::Time ns3::SimpleOfdmWimaxPhy::DoGetTransmissionTime(uint32_t size, ns3::WimaxPhy::ModulationType modulationType) const [member function]
cls.add_method('DoGetTransmissionTime',
'ns3::Time',
[param('uint32_t', 'size'), param('ns3::WimaxPhy::ModulationType', 'modulationType')],
is_const=True, visibility='private', is_virtual=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): uint16_t ns3::SimpleOfdmWimaxPhy::DoGetTtg() const [member function]
cls.add_method('DoGetTtg',
'uint16_t',
[],
is_const=True, visibility='private', is_virtual=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::DoSetDataRates() [member function]
cls.add_method('DoSetDataRates',
'void',
[],
visibility='private', is_virtual=True)
## simple-ofdm-wimax-phy.h (module 'wimax'): void ns3::SimpleOfdmWimaxPhy::DoSetPhyParameters() [member function]
cls.add_method('DoSetPhyParameters',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3TimeChecker_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker(ns3::TimeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeChecker const &', 'arg0')])
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3UintegerValue_methods(root_module, cls):
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue() [constructor]
cls.add_constructor([])
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue(ns3::UintegerValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UintegerValue const &', 'arg0')])
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue(uint64_t const & value) [constructor]
cls.add_constructor([param('uint64_t const &', 'value')])
## uinteger.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::UintegerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## uinteger.h (module 'core'): bool ns3::UintegerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## uinteger.h (module 'core'): uint64_t ns3::UintegerValue::Get() const [member function]
cls.add_method('Get',
'uint64_t',
[],
is_const=True)
## uinteger.h (module 'core'): std::string ns3::UintegerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## uinteger.h (module 'core'): void ns3::UintegerValue::Set(uint64_t const & value) [member function]
cls.add_method('Set',
'void',
[param('uint64_t const &', 'value')])
return
def register_Ns3WimaxChannel_methods(root_module, cls):
## wimax-channel.h (module 'wimax'): ns3::WimaxChannel::WimaxChannel(ns3::WimaxChannel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::WimaxChannel const &', 'arg0')])
## wimax-channel.h (module 'wimax'): ns3::WimaxChannel::WimaxChannel() [constructor]
cls.add_constructor([])
## wimax-channel.h (module 'wimax'): int64_t ns3::WimaxChannel::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_pure_virtual=True, is_virtual=True)
## wimax-channel.h (module 'wimax'): void ns3::WimaxChannel::Attach(ns3::Ptr<ns3::WimaxPhy> phy) [member function]
cls.add_method('Attach',
'void',
[param('ns3::Ptr< ns3::WimaxPhy >', 'phy')])
## wimax-channel.h (module 'wimax'): ns3::Ptr<ns3::NetDevice> ns3::WimaxChannel::GetDevice(uint32_t i) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
## wimax-channel.h (module 'wimax'): uint32_t ns3::WimaxChannel::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-channel.h (module 'wimax'): static ns3::TypeId ns3::WimaxChannel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## wimax-channel.h (module 'wimax'): void ns3::WimaxChannel::DoAttach(ns3::Ptr<ns3::WimaxPhy> phy) [member function]
cls.add_method('DoAttach',
'void',
[param('ns3::Ptr< ns3::WimaxPhy >', 'phy')],
is_pure_virtual=True, visibility='private', is_virtual=True)
## wimax-channel.h (module 'wimax'): ns3::Ptr<ns3::NetDevice> ns3::WimaxChannel::DoGetDevice(uint32_t i) const [member function]
cls.add_method('DoGetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## wimax-channel.h (module 'wimax'): uint32_t ns3::WimaxChannel::DoGetNDevices() const [member function]
cls.add_method('DoGetNDevices',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3WimaxNetDevice_methods(root_module, cls):
## wimax-net-device.h (module 'wimax'): ns3::WimaxNetDevice::m_direction [variable]
cls.add_static_attribute('m_direction', 'uint8_t', is_const=False)
## wimax-net-device.h (module 'wimax'): ns3::WimaxNetDevice::m_frameStartTime [variable]
cls.add_static_attribute('m_frameStartTime', 'ns3::Time', is_const=False)
## wimax-net-device.h (module 'wimax'): ns3::WimaxNetDevice::m_traceRx [variable]
cls.add_instance_attribute('m_traceRx', 'ns3::TracedCallback< ns3::Ptr< ns3::Packet const >, ns3::Mac48Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', is_const=False)
## wimax-net-device.h (module 'wimax'): ns3::WimaxNetDevice::m_traceTx [variable]
cls.add_instance_attribute('m_traceTx', 'ns3::TracedCallback< ns3::Ptr< ns3::Packet const >, ns3::Mac48Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', is_const=False)
## wimax-net-device.h (module 'wimax'): static ns3::TypeId ns3::WimaxNetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## wimax-net-device.h (module 'wimax'): ns3::WimaxNetDevice::WimaxNetDevice() [constructor]
cls.add_constructor([])
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetTtg(uint16_t ttg) [member function]
cls.add_method('SetTtg',
'void',
[param('uint16_t', 'ttg')])
## wimax-net-device.h (module 'wimax'): uint16_t ns3::WimaxNetDevice::GetTtg() const [member function]
cls.add_method('GetTtg',
'uint16_t',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetRtg(uint16_t rtg) [member function]
cls.add_method('SetRtg',
'void',
[param('uint16_t', 'rtg')])
## wimax-net-device.h (module 'wimax'): uint16_t ns3::WimaxNetDevice::GetRtg() const [member function]
cls.add_method('GetRtg',
'uint16_t',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::Attach(ns3::Ptr<ns3::WimaxChannel> channel) [member function]
cls.add_method('Attach',
'void',
[param('ns3::Ptr< ns3::WimaxChannel >', 'channel')])
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetPhy(ns3::Ptr<ns3::WimaxPhy> phy) [member function]
cls.add_method('SetPhy',
'void',
[param('ns3::Ptr< ns3::WimaxPhy >', 'phy')])
## wimax-net-device.h (module 'wimax'): ns3::Ptr<ns3::WimaxPhy> ns3::WimaxNetDevice::GetPhy() const [member function]
cls.add_method('GetPhy',
'ns3::Ptr< ns3::WimaxPhy >',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetChannel(ns3::Ptr<ns3::WimaxChannel> wimaxChannel) [member function]
cls.add_method('SetChannel',
'void',
[param('ns3::Ptr< ns3::WimaxChannel >', 'wimaxChannel')])
## wimax-net-device.h (module 'wimax'): uint64_t ns3::WimaxNetDevice::GetChannel(uint8_t index) const [member function]
cls.add_method('GetChannel',
'uint64_t',
[param('uint8_t', 'index')],
is_const=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetNrFrames(uint32_t nrFrames) [member function]
cls.add_method('SetNrFrames',
'void',
[param('uint32_t', 'nrFrames')])
## wimax-net-device.h (module 'wimax'): uint32_t ns3::WimaxNetDevice::GetNrFrames() const [member function]
cls.add_method('GetNrFrames',
'uint32_t',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetMacAddress(ns3::Mac48Address address) [member function]
cls.add_method('SetMacAddress',
'void',
[param('ns3::Mac48Address', 'address')])
## wimax-net-device.h (module 'wimax'): ns3::Mac48Address ns3::WimaxNetDevice::GetMacAddress() const [member function]
cls.add_method('GetMacAddress',
'ns3::Mac48Address',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetState(uint8_t state) [member function]
cls.add_method('SetState',
'void',
[param('uint8_t', 'state')])
## wimax-net-device.h (module 'wimax'): uint8_t ns3::WimaxNetDevice::GetState() const [member function]
cls.add_method('GetState',
'uint8_t',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): ns3::Ptr<ns3::WimaxConnection> ns3::WimaxNetDevice::GetInitialRangingConnection() const [member function]
cls.add_method('GetInitialRangingConnection',
'ns3::Ptr< ns3::WimaxConnection >',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): ns3::Ptr<ns3::WimaxConnection> ns3::WimaxNetDevice::GetBroadcastConnection() const [member function]
cls.add_method('GetBroadcastConnection',
'ns3::Ptr< ns3::WimaxConnection >',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetCurrentDcd(ns3::Dcd dcd) [member function]
cls.add_method('SetCurrentDcd',
'void',
[param('ns3::Dcd', 'dcd')])
## wimax-net-device.h (module 'wimax'): ns3::Dcd ns3::WimaxNetDevice::GetCurrentDcd() const [member function]
cls.add_method('GetCurrentDcd',
'ns3::Dcd',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetCurrentUcd(ns3::Ucd ucd) [member function]
cls.add_method('SetCurrentUcd',
'void',
[param('ns3::Ucd', 'ucd')])
## wimax-net-device.h (module 'wimax'): ns3::Ucd ns3::WimaxNetDevice::GetCurrentUcd() const [member function]
cls.add_method('GetCurrentUcd',
'ns3::Ucd',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): ns3::Ptr<ns3::ConnectionManager> ns3::WimaxNetDevice::GetConnectionManager() const [member function]
cls.add_method('GetConnectionManager',
'ns3::Ptr< ns3::ConnectionManager >',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetConnectionManager(ns3::Ptr<ns3::ConnectionManager> connectionManager) [member function]
cls.add_method('SetConnectionManager',
'void',
[param('ns3::Ptr< ns3::ConnectionManager >', 'connectionManager')],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): ns3::Ptr<ns3::BurstProfileManager> ns3::WimaxNetDevice::GetBurstProfileManager() const [member function]
cls.add_method('GetBurstProfileManager',
'ns3::Ptr< ns3::BurstProfileManager >',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetBurstProfileManager(ns3::Ptr<ns3::BurstProfileManager> burstProfileManager) [member function]
cls.add_method('SetBurstProfileManager',
'void',
[param('ns3::Ptr< ns3::BurstProfileManager >', 'burstProfileManager')])
## wimax-net-device.h (module 'wimax'): ns3::Ptr<ns3::BandwidthManager> ns3::WimaxNetDevice::GetBandwidthManager() const [member function]
cls.add_method('GetBandwidthManager',
'ns3::Ptr< ns3::BandwidthManager >',
[],
is_const=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetBandwidthManager(ns3::Ptr<ns3::BandwidthManager> bandwidthManager) [member function]
cls.add_method('SetBandwidthManager',
'void',
[param('ns3::Ptr< ns3::BandwidthManager >', 'bandwidthManager')])
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::CreateDefaultConnections() [member function]
cls.add_method('CreateDefaultConnections',
'void',
[])
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::Start() [member function]
cls.add_method('Start',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetReceiveCallback() [member function]
cls.add_method('SetReceiveCallback',
'void',
[])
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::ForwardUp(ns3::Ptr<ns3::Packet> packet, ns3::Mac48Address const & source, ns3::Mac48Address const & dest) [member function]
cls.add_method('ForwardUp',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Mac48Address const &', 'source'), param('ns3::Mac48Address const &', 'dest')])
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::Enqueue(ns3::Ptr<ns3::Packet> packet, ns3::MacHeaderType const & hdrType, ns3::Ptr<ns3::WimaxConnection> connection) [member function]
cls.add_method('Enqueue',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::MacHeaderType const &', 'hdrType'), param('ns3::Ptr< ns3::WimaxConnection >', 'connection')],
is_pure_virtual=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::ForwardDown(ns3::Ptr<ns3::PacketBurst> burst, ns3::WimaxPhy::ModulationType modulationType) [member function]
cls.add_method('ForwardDown',
'void',
[param('ns3::Ptr< ns3::PacketBurst >', 'burst'), param('ns3::WimaxPhy::ModulationType', 'modulationType')])
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetName(std::string const name) [member function]
cls.add_method('SetName',
'void',
[param('std::string const', 'name')],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): std::string ns3::WimaxNetDevice::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): uint32_t ns3::WimaxNetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): ns3::Ptr<ns3::Channel> ns3::WimaxNetDevice::GetPhyChannel() const [member function]
cls.add_method('GetPhyChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): ns3::Ptr<ns3::Channel> ns3::WimaxNetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): ns3::Address ns3::WimaxNetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): uint16_t ns3::WimaxNetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('SetLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): ns3::Address ns3::WimaxNetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): ns3::Address ns3::WimaxNetDevice::GetMulticast() const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): ns3::Address ns3::WimaxNetDevice::MakeMulticastAddress(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('MakeMulticastAddress',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): ns3::Ptr<ns3::Node> ns3::WimaxNetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> ns3::WimaxNetDevice::GetPromiscReceiveCallback() [member function]
cls.add_method('GetPromiscReceiveCallback',
'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >',
[])
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## wimax-net-device.h (module 'wimax'): ns3::Address ns3::WimaxNetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): ns3::Address ns3::WimaxNetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_const=True, is_virtual=True)
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::IsPromisc() [member function]
cls.add_method('IsPromisc',
'bool',
[])
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::NotifyPromiscTrace(ns3::Ptr<ns3::Packet> p) [member function]
cls.add_method('NotifyPromiscTrace',
'void',
[param('ns3::Ptr< ns3::Packet >', 'p')])
## wimax-net-device.h (module 'wimax'): bool ns3::WimaxNetDevice::DoSend(ns3::Ptr<ns3::Packet> packet, ns3::Mac48Address const & source, ns3::Mac48Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('DoSend',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Mac48Address const &', 'source'), param('ns3::Mac48Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, visibility='private', is_virtual=True)
## wimax-net-device.h (module 'wimax'): void ns3::WimaxNetDevice::DoReceive(ns3::Ptr<ns3::Packet> packet) [member function]
cls.add_method('DoReceive',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet')],
is_pure_virtual=True, visibility='private', is_virtual=True)
## wimax-net-device.h (module 'wimax'): ns3::Ptr<ns3::WimaxChannel> ns3::WimaxNetDevice::DoGetChannel() const [member function]
cls.add_method('DoGetChannel',
'ns3::Ptr< ns3::WimaxChannel >',
[],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_Ns3BaseStationNetDevice_methods(root_module, cls):
## bs-net-device.h (module 'wimax'): static ns3::TypeId ns3::BaseStationNetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bs-net-device.h (module 'wimax'): ns3::BaseStationNetDevice::BaseStationNetDevice() [constructor]
cls.add_constructor([])
## bs-net-device.h (module 'wimax'): ns3::BaseStationNetDevice::BaseStationNetDevice(ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::WimaxPhy> phy) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::WimaxPhy >', 'phy')])
## bs-net-device.h (module 'wimax'): ns3::BaseStationNetDevice::BaseStationNetDevice(ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::WimaxPhy> phy, ns3::Ptr<ns3::UplinkScheduler> uplinkScheduler, ns3::Ptr<ns3::BSScheduler> bsScheduler) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::WimaxPhy >', 'phy'), param('ns3::Ptr< ns3::UplinkScheduler >', 'uplinkScheduler'), param('ns3::Ptr< ns3::BSScheduler >', 'bsScheduler')])
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetInitialRangingInterval(ns3::Time initialRangInterval) [member function]
cls.add_method('SetInitialRangingInterval',
'void',
[param('ns3::Time', 'initialRangInterval')])
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::InitBaseStationNetDevice() [member function]
cls.add_method('InitBaseStationNetDevice',
'void',
[])
## bs-net-device.h (module 'wimax'): ns3::Time ns3::BaseStationNetDevice::GetInitialRangingInterval() const [member function]
cls.add_method('GetInitialRangingInterval',
'ns3::Time',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetDcdInterval(ns3::Time dcdInterval) [member function]
cls.add_method('SetDcdInterval',
'void',
[param('ns3::Time', 'dcdInterval')])
## bs-net-device.h (module 'wimax'): ns3::Time ns3::BaseStationNetDevice::GetDcdInterval() const [member function]
cls.add_method('GetDcdInterval',
'ns3::Time',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetUcdInterval(ns3::Time ucdInterval) [member function]
cls.add_method('SetUcdInterval',
'void',
[param('ns3::Time', 'ucdInterval')])
## bs-net-device.h (module 'wimax'): ns3::Time ns3::BaseStationNetDevice::GetUcdInterval() const [member function]
cls.add_method('GetUcdInterval',
'ns3::Time',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetIntervalT8(ns3::Time interval) [member function]
cls.add_method('SetIntervalT8',
'void',
[param('ns3::Time', 'interval')])
## bs-net-device.h (module 'wimax'): ns3::Time ns3::BaseStationNetDevice::GetIntervalT8() const [member function]
cls.add_method('GetIntervalT8',
'ns3::Time',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetMaxRangingCorrectionRetries(uint8_t maxRangCorrectionRetries) [member function]
cls.add_method('SetMaxRangingCorrectionRetries',
'void',
[param('uint8_t', 'maxRangCorrectionRetries')])
## bs-net-device.h (module 'wimax'): uint8_t ns3::BaseStationNetDevice::GetMaxRangingCorrectionRetries() const [member function]
cls.add_method('GetMaxRangingCorrectionRetries',
'uint8_t',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetMaxInvitedRangRetries(uint8_t maxInvitedRangRetries) [member function]
cls.add_method('SetMaxInvitedRangRetries',
'void',
[param('uint8_t', 'maxInvitedRangRetries')])
## bs-net-device.h (module 'wimax'): uint8_t ns3::BaseStationNetDevice::GetMaxInvitedRangRetries() const [member function]
cls.add_method('GetMaxInvitedRangRetries',
'uint8_t',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetRangReqOppSize(uint8_t rangReqOppSize) [member function]
cls.add_method('SetRangReqOppSize',
'void',
[param('uint8_t', 'rangReqOppSize')])
## bs-net-device.h (module 'wimax'): uint8_t ns3::BaseStationNetDevice::GetRangReqOppSize() const [member function]
cls.add_method('GetRangReqOppSize',
'uint8_t',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetBwReqOppSize(uint8_t bwReqOppSize) [member function]
cls.add_method('SetBwReqOppSize',
'void',
[param('uint8_t', 'bwReqOppSize')])
## bs-net-device.h (module 'wimax'): uint8_t ns3::BaseStationNetDevice::GetBwReqOppSize() const [member function]
cls.add_method('GetBwReqOppSize',
'uint8_t',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetNrDlSymbols(uint32_t dlSymbols) [member function]
cls.add_method('SetNrDlSymbols',
'void',
[param('uint32_t', 'dlSymbols')])
## bs-net-device.h (module 'wimax'): uint32_t ns3::BaseStationNetDevice::GetNrDlSymbols() const [member function]
cls.add_method('GetNrDlSymbols',
'uint32_t',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetNrUlSymbols(uint32_t ulSymbols) [member function]
cls.add_method('SetNrUlSymbols',
'void',
[param('uint32_t', 'ulSymbols')])
## bs-net-device.h (module 'wimax'): uint32_t ns3::BaseStationNetDevice::GetNrUlSymbols() const [member function]
cls.add_method('GetNrUlSymbols',
'uint32_t',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): uint32_t ns3::BaseStationNetDevice::GetNrDcdSent() const [member function]
cls.add_method('GetNrDcdSent',
'uint32_t',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): uint32_t ns3::BaseStationNetDevice::GetNrUcdSent() const [member function]
cls.add_method('GetNrUcdSent',
'uint32_t',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): ns3::Time ns3::BaseStationNetDevice::GetDlSubframeStartTime() const [member function]
cls.add_method('GetDlSubframeStartTime',
'ns3::Time',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): ns3::Time ns3::BaseStationNetDevice::GetUlSubframeStartTime() const [member function]
cls.add_method('GetUlSubframeStartTime',
'ns3::Time',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): uint8_t ns3::BaseStationNetDevice::GetRangingOppNumber() const [member function]
cls.add_method('GetRangingOppNumber',
'uint8_t',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): ns3::Ptr<ns3::SSManager> ns3::BaseStationNetDevice::GetSSManager() const [member function]
cls.add_method('GetSSManager',
'ns3::Ptr< ns3::SSManager >',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetSSManager(ns3::Ptr<ns3::SSManager> ssManager) [member function]
cls.add_method('SetSSManager',
'void',
[param('ns3::Ptr< ns3::SSManager >', 'ssManager')])
## bs-net-device.h (module 'wimax'): ns3::Ptr<ns3::UplinkScheduler> ns3::BaseStationNetDevice::GetUplinkScheduler() const [member function]
cls.add_method('GetUplinkScheduler',
'ns3::Ptr< ns3::UplinkScheduler >',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetUplinkScheduler(ns3::Ptr<ns3::UplinkScheduler> ulScheduler) [member function]
cls.add_method('SetUplinkScheduler',
'void',
[param('ns3::Ptr< ns3::UplinkScheduler >', 'ulScheduler')])
## bs-net-device.h (module 'wimax'): ns3::Ptr<ns3::BSLinkManager> ns3::BaseStationNetDevice::GetLinkManager() const [member function]
cls.add_method('GetLinkManager',
'ns3::Ptr< ns3::BSLinkManager >',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetBSScheduler(ns3::Ptr<ns3::BSScheduler> bsSchedule) [member function]
cls.add_method('SetBSScheduler',
'void',
[param('ns3::Ptr< ns3::BSScheduler >', 'bsSchedule')])
## bs-net-device.h (module 'wimax'): ns3::Ptr<ns3::BSScheduler> ns3::BaseStationNetDevice::GetBSScheduler() const [member function]
cls.add_method('GetBSScheduler',
'ns3::Ptr< ns3::BSScheduler >',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetLinkManager(ns3::Ptr<ns3::BSLinkManager> linkManager) [member function]
cls.add_method('SetLinkManager',
'void',
[param('ns3::Ptr< ns3::BSLinkManager >', 'linkManager')])
## bs-net-device.h (module 'wimax'): ns3::Ptr<ns3::IpcsClassifier> ns3::BaseStationNetDevice::GetBsClassifier() const [member function]
cls.add_method('GetBsClassifier',
'ns3::Ptr< ns3::IpcsClassifier >',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetBsClassifier(ns3::Ptr<ns3::IpcsClassifier> classifier) [member function]
cls.add_method('SetBsClassifier',
'void',
[param('ns3::Ptr< ns3::IpcsClassifier >', 'classifier')])
## bs-net-device.h (module 'wimax'): ns3::Time ns3::BaseStationNetDevice::GetPsDuration() const [member function]
cls.add_method('GetPsDuration',
'ns3::Time',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): ns3::Time ns3::BaseStationNetDevice::GetSymbolDuration() const [member function]
cls.add_method('GetSymbolDuration',
'ns3::Time',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::Start() [member function]
cls.add_method('Start',
'void',
[],
is_virtual=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_virtual=True)
## bs-net-device.h (module 'wimax'): bool ns3::BaseStationNetDevice::Enqueue(ns3::Ptr<ns3::Packet> packet, ns3::MacHeaderType const & hdrType, ns3::Ptr<ns3::WimaxConnection> connection) [member function]
cls.add_method('Enqueue',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::MacHeaderType const &', 'hdrType'), param('ns3::Ptr< ns3::WimaxConnection >', 'connection')],
is_virtual=True)
## bs-net-device.h (module 'wimax'): ns3::Ptr<ns3::WimaxConnection> ns3::BaseStationNetDevice::GetConnection(ns3::Cid cid) [member function]
cls.add_method('GetConnection',
'ns3::Ptr< ns3::WimaxConnection >',
[param('ns3::Cid', 'cid')])
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::MarkUplinkAllocations() [member function]
cls.add_method('MarkUplinkAllocations',
'void',
[])
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::MarkRangingOppStart(ns3::Time rangingOppStartTime) [member function]
cls.add_method('MarkRangingOppStart',
'void',
[param('ns3::Time', 'rangingOppStartTime')])
## bs-net-device.h (module 'wimax'): ns3::Ptr<ns3::BsServiceFlowManager> ns3::BaseStationNetDevice::GetServiceFlowManager() const [member function]
cls.add_method('GetServiceFlowManager',
'ns3::Ptr< ns3::BsServiceFlowManager >',
[],
is_const=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::SetServiceFlowManager(ns3::Ptr<ns3::BsServiceFlowManager> arg0) [member function]
cls.add_method('SetServiceFlowManager',
'void',
[param('ns3::Ptr< ns3::BsServiceFlowManager >', 'arg0')])
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
## bs-net-device.h (module 'wimax'): bool ns3::BaseStationNetDevice::DoSend(ns3::Ptr<ns3::Packet> packet, ns3::Mac48Address const & source, ns3::Mac48Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('DoSend',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Mac48Address const &', 'source'), param('ns3::Mac48Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
visibility='private', is_virtual=True)
## bs-net-device.h (module 'wimax'): void ns3::BaseStationNetDevice::DoReceive(ns3::Ptr<ns3::Packet> packet) [member function]
cls.add_method('DoReceive',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet')],
visibility='private', is_virtual=True)
return
def register_Ns3SimpleOfdmWimaxChannel_methods(root_module, cls):
## simple-ofdm-wimax-channel.h (module 'wimax'): ns3::SimpleOfdmWimaxChannel::SimpleOfdmWimaxChannel(ns3::SimpleOfdmWimaxChannel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SimpleOfdmWimaxChannel const &', 'arg0')])
## simple-ofdm-wimax-channel.h (module 'wimax'): ns3::SimpleOfdmWimaxChannel::SimpleOfdmWimaxChannel() [constructor]
cls.add_constructor([])
## simple-ofdm-wimax-channel.h (module 'wimax'): ns3::SimpleOfdmWimaxChannel::SimpleOfdmWimaxChannel(ns3::SimpleOfdmWimaxChannel::PropModel propModel) [constructor]
cls.add_constructor([param('ns3::SimpleOfdmWimaxChannel::PropModel', 'propModel')])
## simple-ofdm-wimax-channel.h (module 'wimax'): int64_t ns3::SimpleOfdmWimaxChannel::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_virtual=True)
## simple-ofdm-wimax-channel.h (module 'wimax'): void ns3::SimpleOfdmWimaxChannel::Send(ns3::Time BlockTime, uint32_t burstSize, ns3::Ptr<ns3::WimaxPhy> phy, bool isFirstBlock, bool isLastBlock, uint64_t frequency, ns3::WimaxPhy::ModulationType modulationType, uint8_t direction, double txPowerDbm, ns3::Ptr<ns3::PacketBurst> burst) [member function]
cls.add_method('Send',
'void',
[param('ns3::Time', 'BlockTime'), param('uint32_t', 'burstSize'), param('ns3::Ptr< ns3::WimaxPhy >', 'phy'), param('bool', 'isFirstBlock'), param('bool', 'isLastBlock'), param('uint64_t', 'frequency'), param('ns3::WimaxPhy::ModulationType', 'modulationType'), param('uint8_t', 'direction'), param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::PacketBurst >', 'burst')])
## simple-ofdm-wimax-channel.h (module 'wimax'): void ns3::SimpleOfdmWimaxChannel::SetPropagationModel(ns3::SimpleOfdmWimaxChannel::PropModel propModel) [member function]
cls.add_method('SetPropagationModel',
'void',
[param('ns3::SimpleOfdmWimaxChannel::PropModel', 'propModel')])
## simple-ofdm-wimax-channel.h (module 'wimax'): void ns3::SimpleOfdmWimaxChannel::DoAttach(ns3::Ptr<ns3::WimaxPhy> phy) [member function]
cls.add_method('DoAttach',
'void',
[param('ns3::Ptr< ns3::WimaxPhy >', 'phy')],
visibility='private', is_virtual=True)
## simple-ofdm-wimax-channel.h (module 'wimax'): ns3::Ptr<ns3::NetDevice> ns3::SimpleOfdmWimaxChannel::DoGetDevice(uint32_t i) const [member function]
cls.add_method('DoGetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_const=True, visibility='private', is_virtual=True)
## simple-ofdm-wimax-channel.h (module 'wimax'): uint32_t ns3::SimpleOfdmWimaxChannel::DoGetNDevices() const [member function]
cls.add_method('DoGetNDevices',
'uint32_t',
[],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3SubscriberStationNetDevice_methods(root_module, cls):
## ss-net-device.h (module 'wimax'): ns3::SubscriberStationNetDevice::m_linkManager [variable]
cls.add_instance_attribute('m_linkManager', 'ns3::Ptr< ns3::SSLinkManager >', is_const=False)
## ss-net-device.h (module 'wimax'): static ns3::TypeId ns3::SubscriberStationNetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ss-net-device.h (module 'wimax'): ns3::SubscriberStationNetDevice::SubscriberStationNetDevice() [constructor]
cls.add_constructor([])
## ss-net-device.h (module 'wimax'): ns3::SubscriberStationNetDevice::SubscriberStationNetDevice(ns3::Ptr<ns3::Node> arg0, ns3::Ptr<ns3::WimaxPhy> arg1) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'arg0'), param('ns3::Ptr< ns3::WimaxPhy >', 'arg1')])
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::InitSubscriberStationNetDevice() [member function]
cls.add_method('InitSubscriberStationNetDevice',
'void',
[])
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetLostDlMapInterval(ns3::Time lostDlMapInterval) [member function]
cls.add_method('SetLostDlMapInterval',
'void',
[param('ns3::Time', 'lostDlMapInterval')])
## ss-net-device.h (module 'wimax'): ns3::Time ns3::SubscriberStationNetDevice::GetLostDlMapInterval() const [member function]
cls.add_method('GetLostDlMapInterval',
'ns3::Time',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetLostUlMapInterval(ns3::Time lostUlMapInterval) [member function]
cls.add_method('SetLostUlMapInterval',
'void',
[param('ns3::Time', 'lostUlMapInterval')])
## ss-net-device.h (module 'wimax'): ns3::Time ns3::SubscriberStationNetDevice::GetLostUlMapInterval() const [member function]
cls.add_method('GetLostUlMapInterval',
'ns3::Time',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetMaxDcdInterval(ns3::Time maxDcdInterval) [member function]
cls.add_method('SetMaxDcdInterval',
'void',
[param('ns3::Time', 'maxDcdInterval')])
## ss-net-device.h (module 'wimax'): ns3::Time ns3::SubscriberStationNetDevice::GetMaxDcdInterval() const [member function]
cls.add_method('GetMaxDcdInterval',
'ns3::Time',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetMaxUcdInterval(ns3::Time maxUcdInterval) [member function]
cls.add_method('SetMaxUcdInterval',
'void',
[param('ns3::Time', 'maxUcdInterval')])
## ss-net-device.h (module 'wimax'): ns3::Time ns3::SubscriberStationNetDevice::GetMaxUcdInterval() const [member function]
cls.add_method('GetMaxUcdInterval',
'ns3::Time',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetIntervalT1(ns3::Time interval1) [member function]
cls.add_method('SetIntervalT1',
'void',
[param('ns3::Time', 'interval1')])
## ss-net-device.h (module 'wimax'): ns3::Time ns3::SubscriberStationNetDevice::GetIntervalT1() const [member function]
cls.add_method('GetIntervalT1',
'ns3::Time',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetIntervalT2(ns3::Time interval2) [member function]
cls.add_method('SetIntervalT2',
'void',
[param('ns3::Time', 'interval2')])
## ss-net-device.h (module 'wimax'): ns3::Time ns3::SubscriberStationNetDevice::GetIntervalT2() const [member function]
cls.add_method('GetIntervalT2',
'ns3::Time',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetIntervalT3(ns3::Time interval3) [member function]
cls.add_method('SetIntervalT3',
'void',
[param('ns3::Time', 'interval3')])
## ss-net-device.h (module 'wimax'): ns3::Time ns3::SubscriberStationNetDevice::GetIntervalT3() const [member function]
cls.add_method('GetIntervalT3',
'ns3::Time',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetIntervalT7(ns3::Time interval7) [member function]
cls.add_method('SetIntervalT7',
'void',
[param('ns3::Time', 'interval7')])
## ss-net-device.h (module 'wimax'): ns3::Time ns3::SubscriberStationNetDevice::GetIntervalT7() const [member function]
cls.add_method('GetIntervalT7',
'ns3::Time',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetIntervalT12(ns3::Time interval12) [member function]
cls.add_method('SetIntervalT12',
'void',
[param('ns3::Time', 'interval12')])
## ss-net-device.h (module 'wimax'): ns3::Time ns3::SubscriberStationNetDevice::GetIntervalT12() const [member function]
cls.add_method('GetIntervalT12',
'ns3::Time',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetIntervalT20(ns3::Time interval20) [member function]
cls.add_method('SetIntervalT20',
'void',
[param('ns3::Time', 'interval20')])
## ss-net-device.h (module 'wimax'): ns3::Time ns3::SubscriberStationNetDevice::GetIntervalT20() const [member function]
cls.add_method('GetIntervalT20',
'ns3::Time',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetIntervalT21(ns3::Time interval21) [member function]
cls.add_method('SetIntervalT21',
'void',
[param('ns3::Time', 'interval21')])
## ss-net-device.h (module 'wimax'): ns3::Time ns3::SubscriberStationNetDevice::GetIntervalT21() const [member function]
cls.add_method('GetIntervalT21',
'ns3::Time',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetMaxContentionRangingRetries(uint8_t maxContentionRangingRetries) [member function]
cls.add_method('SetMaxContentionRangingRetries',
'void',
[param('uint8_t', 'maxContentionRangingRetries')])
## ss-net-device.h (module 'wimax'): uint8_t ns3::SubscriberStationNetDevice::GetMaxContentionRangingRetries() const [member function]
cls.add_method('GetMaxContentionRangingRetries',
'uint8_t',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetBasicConnection(ns3::Ptr<ns3::WimaxConnection> basicConnection) [member function]
cls.add_method('SetBasicConnection',
'void',
[param('ns3::Ptr< ns3::WimaxConnection >', 'basicConnection')])
## ss-net-device.h (module 'wimax'): ns3::Ptr<ns3::WimaxConnection> ns3::SubscriberStationNetDevice::GetBasicConnection() const [member function]
cls.add_method('GetBasicConnection',
'ns3::Ptr< ns3::WimaxConnection >',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetPrimaryConnection(ns3::Ptr<ns3::WimaxConnection> primaryConnection) [member function]
cls.add_method('SetPrimaryConnection',
'void',
[param('ns3::Ptr< ns3::WimaxConnection >', 'primaryConnection')])
## ss-net-device.h (module 'wimax'): ns3::Ptr<ns3::WimaxConnection> ns3::SubscriberStationNetDevice::GetPrimaryConnection() const [member function]
cls.add_method('GetPrimaryConnection',
'ns3::Ptr< ns3::WimaxConnection >',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): ns3::Cid ns3::SubscriberStationNetDevice::GetBasicCid() const [member function]
cls.add_method('GetBasicCid',
'ns3::Cid',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): ns3::Cid ns3::SubscriberStationNetDevice::GetPrimaryCid() const [member function]
cls.add_method('GetPrimaryCid',
'ns3::Cid',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetModulationType(ns3::WimaxPhy::ModulationType modulationType) [member function]
cls.add_method('SetModulationType',
'void',
[param('ns3::WimaxPhy::ModulationType', 'modulationType')])
## ss-net-device.h (module 'wimax'): ns3::WimaxPhy::ModulationType ns3::SubscriberStationNetDevice::GetModulationType() const [member function]
cls.add_method('GetModulationType',
'ns3::WimaxPhy::ModulationType',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetAreManagementConnectionsAllocated(bool areManagementConnectionsAllocated) [member function]
cls.add_method('SetAreManagementConnectionsAllocated',
'void',
[param('bool', 'areManagementConnectionsAllocated')])
## ss-net-device.h (module 'wimax'): bool ns3::SubscriberStationNetDevice::GetAreManagementConnectionsAllocated() const [member function]
cls.add_method('GetAreManagementConnectionsAllocated',
'bool',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetAreServiceFlowsAllocated(bool areServiceFlowsAllocated) [member function]
cls.add_method('SetAreServiceFlowsAllocated',
'void',
[param('bool', 'areServiceFlowsAllocated')])
## ss-net-device.h (module 'wimax'): bool ns3::SubscriberStationNetDevice::GetAreServiceFlowsAllocated() const [member function]
cls.add_method('GetAreServiceFlowsAllocated',
'bool',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): ns3::Ptr<ns3::SSScheduler> ns3::SubscriberStationNetDevice::GetScheduler() const [member function]
cls.add_method('GetScheduler',
'ns3::Ptr< ns3::SSScheduler >',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetScheduler(ns3::Ptr<ns3::SSScheduler> ssScheduler) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::Ptr< ns3::SSScheduler >', 'ssScheduler')])
## ss-net-device.h (module 'wimax'): bool ns3::SubscriberStationNetDevice::HasServiceFlows() const [member function]
cls.add_method('HasServiceFlows',
'bool',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): bool ns3::SubscriberStationNetDevice::Enqueue(ns3::Ptr<ns3::Packet> packet, ns3::MacHeaderType const & hdrType, ns3::Ptr<ns3::WimaxConnection> connection) [member function]
cls.add_method('Enqueue',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::MacHeaderType const &', 'hdrType'), param('ns3::Ptr< ns3::WimaxConnection >', 'connection')],
is_virtual=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SendBurst(uint8_t uiuc, uint16_t nrSymbols, ns3::Ptr<ns3::WimaxConnection> connection, ns3::MacHeaderType::HeaderType packetType=::ns3::MacHeaderType::HEADER_TYPE_GENERIC) [member function]
cls.add_method('SendBurst',
'void',
[param('uint8_t', 'uiuc'), param('uint16_t', 'nrSymbols'), param('ns3::Ptr< ns3::WimaxConnection >', 'connection'), param('ns3::MacHeaderType::HeaderType', 'packetType', default_value='::ns3::MacHeaderType::HEADER_TYPE_GENERIC')])
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::Start() [member function]
cls.add_method('Start',
'void',
[],
is_virtual=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_virtual=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::AddServiceFlow(ns3::ServiceFlow * sf) [member function]
cls.add_method('AddServiceFlow',
'void',
[param('ns3::ServiceFlow *', 'sf')])
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::AddServiceFlow(ns3::ServiceFlow sf) [member function]
cls.add_method('AddServiceFlow',
'void',
[param('ns3::ServiceFlow', 'sf')])
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetTimer(ns3::EventId eventId, ns3::EventId & event) [member function]
cls.add_method('SetTimer',
'void',
[param('ns3::EventId', 'eventId'), param('ns3::EventId &', 'event')])
## ss-net-device.h (module 'wimax'): bool ns3::SubscriberStationNetDevice::IsRegistered() const [member function]
cls.add_method('IsRegistered',
'bool',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): ns3::Time ns3::SubscriberStationNetDevice::GetTimeToAllocation(ns3::Time defferTime) [member function]
cls.add_method('GetTimeToAllocation',
'ns3::Time',
[param('ns3::Time', 'defferTime')])
## ss-net-device.h (module 'wimax'): ns3::Ptr<ns3::IpcsClassifier> ns3::SubscriberStationNetDevice::GetIpcsClassifier() const [member function]
cls.add_method('GetIpcsClassifier',
'ns3::Ptr< ns3::IpcsClassifier >',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetIpcsPacketClassifier(ns3::Ptr<ns3::IpcsClassifier> arg0) [member function]
cls.add_method('SetIpcsPacketClassifier',
'void',
[param('ns3::Ptr< ns3::IpcsClassifier >', 'arg0')])
## ss-net-device.h (module 'wimax'): ns3::Ptr<ns3::SSLinkManager> ns3::SubscriberStationNetDevice::GetLinkManager() const [member function]
cls.add_method('GetLinkManager',
'ns3::Ptr< ns3::SSLinkManager >',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetLinkManager(ns3::Ptr<ns3::SSLinkManager> arg0) [member function]
cls.add_method('SetLinkManager',
'void',
[param('ns3::Ptr< ns3::SSLinkManager >', 'arg0')])
## ss-net-device.h (module 'wimax'): ns3::Ptr<ns3::SsServiceFlowManager> ns3::SubscriberStationNetDevice::GetServiceFlowManager() const [member function]
cls.add_method('GetServiceFlowManager',
'ns3::Ptr< ns3::SsServiceFlowManager >',
[],
is_const=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::SetServiceFlowManager(ns3::Ptr<ns3::SsServiceFlowManager> arg0) [member function]
cls.add_method('SetServiceFlowManager',
'void',
[param('ns3::Ptr< ns3::SsServiceFlowManager >', 'arg0')])
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
## ss-net-device.h (module 'wimax'): bool ns3::SubscriberStationNetDevice::DoSend(ns3::Ptr<ns3::Packet> packet, ns3::Mac48Address const & source, ns3::Mac48Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('DoSend',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Mac48Address const &', 'source'), param('ns3::Mac48Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
visibility='private', is_virtual=True)
## ss-net-device.h (module 'wimax'): void ns3::SubscriberStationNetDevice::DoReceive(ns3::Ptr<ns3::Packet> packet) [member function]
cls.add_method('DoReceive',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet')],
visibility='private', is_virtual=True)
return
def register_functions(root_module):
module = root_module
## crc8.h (module 'wimax'): extern uint8_t ns3::CRC8Calculate(uint8_t const * data, int length) [free function]
module.add_function('CRC8Calculate',
'uint8_t',
[param('uint8_t const *', 'data'), param('int', 'length')])
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_internal(module.get_submodule('internal'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_internal(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| binhqnguyen/lena-local | src/wimax/bindings/modulegen__gcc_ILP32.py | Python | gpl-2.0 | 757,508 |
from crystal_filter_middleware.handlers import CrystalBaseHandler
from swift.common.swob import HTTPMethodNotAllowed
from swift.common.wsgi import make_subrequest
from swift.common.utils import public
import operator
import json
import copy
import urllib
import os
import re
mappings = {'>': operator.gt, '>=': operator.ge,
'==': operator.eq, '<=': operator.le, '<': operator.lt,
'!=': operator.ne, "OR": operator.or_, "AND": operator.and_}
class CrystalProxyHandler(CrystalBaseHandler):
def __init__(self, request, conf, app, logger):
super(CrystalProxyHandler, self).__init__(request, conf,
app, logger)
self.etag = None
self.filter_exec_list = None
def _get_dynamic_filters(self):
# Dynamic binding of policies: using a Lua script that executes
# a hgetall on the first matching key of a list and also returns
# the global filters
lua_sha = self.conf.get('LUA_get_pipeline_sha')
args = (self.account.replace('AUTH_', ''), '' if self.container is None else self.container)
redis_list = self.redis.evalsha(lua_sha, 0, *args)
index = redis_list.index("@@@@") # Separator between pipeline and global filters
self.filter_list = dict(zip(redis_list[0:index:2], redis_list[1:index:2]))
self.global_filters = dict(zip(redis_list[index+1::2], redis_list[index+2::2]))
self.proxy_filter_exec_list = {}
self.object_filter_exec_list = {}
if self.global_filters or self.filter_list:
self.proxy_filter_exec_list = self._build_filter_execution_list('proxy')
self.object_filter_exec_list = self._build_filter_execution_list('object')
def _parse_vaco(self):
return self.request.split_path(2, 4, rest_with_last=True)
def handle_request(self):
if self.is_crystal_valid_request and hasattr(self, self.request.method):
try:
self._get_dynamic_filters()
handler = getattr(self, self.request.method)
getattr(handler, 'publicly_accessible')
except AttributeError:
return HTTPMethodNotAllowed(request=self.request)
return handler()
else:
self.logger.info('Request disabled for Crystal')
return self.request.get_response(self.app)
def _check_conditions(self, filter_metadata):
"""
This method ckecks the object_tag, object_type and object_size parameters
introduced by the dashborad to run the filter.
"""
if not filter_metadata['object_type'] and \
not filter_metadata['object_tag'] and \
not filter_metadata['object_size']:
return True
metadata = {}
if self.method == 'put':
for key in self.request.headers.keys():
metadata[key.lower()] = self.request.headers.get(key)
else:
sub_req = make_subrequest(self.request.environ, method='HEAD',
path=self.request.path_info,
headers=self.request.headers,
swift_source='Crystal Filter Middleware')
resp = sub_req.get_response(self.app)
metadata = resp.headers
correct_type = True
correct_size = True
correct_tags = True
try:
if filter_metadata['object_type']:
object_name = filter_metadata['object_name']
filename = self.request.environ['PATH_INFO']
pattern = re.compile(object_name)
if not pattern.search(filename):
correct_type = False
if filter_metadata['object_tag']:
tags = filter_metadata['object_tag'].split(',')
tag_checking = list()
for tag in tags:
key, value = tag.split(':')
meta_key = ('X-Object-Meta-'+key).lower()
sysmeta_key = ('X-Object-Sysmeta-Meta-'+key).lower()
correct_tag = (meta_key in metadata and
metadata[meta_key] == value) or \
(sysmeta_key in metadata and
metadata[sysmeta_key] == value)
tag_checking.append(correct_tag)
correct_tags = all(tag_checking)
if filter_metadata['object_size']:
object_size = filter_metadata['object_size']
op = mappings[object_size[0]]
obj_lenght = int(object_size[1])
correct_size = op(int(metadata['Content-Length']),
obj_lenght)
except Exception as e:
self.logger.error(str(e))
return False
return correct_type and correct_size and correct_tags
def _parse_filter_metadata(self, filter_metadata):
"""
This method parses the filter metadata
"""
filter_name = filter_metadata['filter_name']
language = filter_metadata["language"]
params = filter_metadata["params"]
filter_type = filter_metadata["filter_type"]
filter_main = filter_metadata["main"]
filter_dep = filter_metadata["dependencies"]
filter_size = filter_metadata["content_length"]
reverse = filter_metadata["reverse"]
filter_data = {'name': filter_name,
'language': language,
'params': self._parse_csv_params(params),
'reverse': reverse,
'type': filter_type,
'main': filter_main,
'dependencies': filter_dep,
'size': filter_size}
return filter_data
def _build_filter_execution_list(self, server):
"""
This method builds the filter execution list (ordered).
"""
filter_execution_list = {}
''' Parse global filters '''
for _, filter_metadata in self.global_filters.items():
filter_metadata = json.loads(filter_metadata)
if self.method in filter_metadata and filter_metadata[self.method] \
and filter_metadata['execution_server'] == server \
and self._check_conditions(filter_metadata):
filter_data = self._parse_filter_metadata(filter_metadata)
order = filter_metadata["execution_order"]
filter_execution_list[int(order)] = filter_data
''' Parse Project specific filters'''
for _, filter_metadata in self.filter_list.items():
filter_metadata = json.loads(filter_metadata)
if self.method in filter_metadata and filter_metadata[self.method] \
and filter_metadata['execution_server'] == server \
and self._check_conditions(filter_metadata):
filter_data = self._parse_filter_metadata(filter_metadata)
order = filter_metadata["execution_order"]
filter_execution_list[order] = filter_data
return filter_execution_list
def _format_crystal_metadata(self, filter_list):
"""
This method generates the metadata that will be stored alongside the
object in the PUT requests. It allows the reverse case of the filters
without querying the centralized controller.
"""
for key in filter_list.keys():
cfilter = filter_list[key]
if cfilter['reverse'] != 'False':
current_params = cfilter['params']
if current_params:
cfilter['params']['reverse'] = 'True'
else:
cfilter['params'] = {'reverse': 'True'}
cfilter['execution_server'] = cfilter['reverse']
cfilter.pop('reverse')
else:
filter_list.pop(key)
return filter_list
def _set_crystal_metadata(self):
"""
This method generates the metadata that will be stored alongside the
object in the PUT requests. It allows the reverse case of the filters
without querying the centralized controller.
"""
filter_exec_list = {}
for key in sorted(self.proxy_filter_exec_list.keys()):
filter_exec_list[len(filter_exec_list)] = self.proxy_filter_exec_list[key]
for key in sorted(self.object_filter_exec_list.keys()):
filter_exec_list[len(filter_exec_list)] = self.object_filter_exec_list[key]
filter_list = copy.deepcopy(filter_exec_list)
crystal_md = self._format_crystal_metadata(filter_list)
if crystal_md:
self.request.headers['X-Object-Sysmeta-Crystal'] = crystal_md
def _save_size_and_etag(self):
"""
Save original object Size and Etag
"""
etag = self.request.headers.get('ETag', None)
if etag:
self.request.headers['X-Object-Sysmeta-Etag'] = etag
self.request.headers['X-Backend-Container-Update-Override-Etag'] = etag
size = self.request.headers.get('Content-Length')
self.request.headers['X-Object-Sysmeta-Size'] = size
self.request.headers['X-Backend-Container-Update-Override-Size'] = size
def _recover_size_and_etag(self, response):
"""
Recovers the original Object Size and Etag
"""
if 'X-Object-Sysmeta-Size' in response.headers and self.obj:
size = response.headers.pop('X-Object-Sysmeta-Size')
response.headers['Content-Length'] = size
if 'X-Object-Sysmeta-Etag' in response.headers and self.obj:
etag = response.headers.pop('X-Object-Sysmeta-Etag')
response.headers['etag'] = etag
if 'Transfer-Encoding' in response.headers and self.obj:
response.headers.pop('Transfer-Encoding')
def _parse_csv_params(self, csv_params):
"""
Provides comma separated parameters "a=1,b=2" as a dictionary
"""
params_dict = dict()
params = [x.strip() for x in csv_params.split('=')]
for index in range(len(params)):
if len(params) > index + 1:
if index == 0:
params_dict[params[index]] = params[index + 1].rsplit(',', 1)[0].strip()
elif index < len(params):
params_dict[params[index].rsplit(',', 1)[1].strip()] = params[index + 1].rsplit(',', 1)[0].strip()
else:
params_dict[params[index].rsplit(',', 1)[1].strip()] = params[index + 1]
return params_dict
def _parse_headers_params(self):
"""
Extract parameters from headers
"""
parameters = dict()
for param in self.request.headers:
if param.lower().startswith('x-crystal-parameter'):
keyvalue = self.request.headers[param]
keyvalue = urllib.unquote(keyvalue)
[key, value] = keyvalue.split(':')
parameters[key] = value
return parameters
@public
def GET(self):
"""Handler for HTTP GET requests."""
return self.GETorHEAD()
@public
def HEAD(self):
"""Handler for HTTP HEAD requests."""
return self.GETorHEAD()
@public
def POST(self):
"""Handler for HTTP POST requests."""
return self.POSTorDELETE()
@public
def DELETE(self):
"""Handler for HTTP DELETE requests."""
return self.POSTorDELETE()
def GETorHEAD(self):
"""
Handle HTTP GET or HEAD requests.
"""
if self.proxy_filter_exec_list:
self.logger.info('There are Filters to execute')
self.logger.info(str(self.proxy_filter_exec_list))
self._build_pipeline(self.proxy_filter_exec_list)
else:
self.logger.info('No Filters to execute')
if self.object_filter_exec_list:
object_server_filters = json.dumps(self.object_filter_exec_list)
self.request.headers['crystal.filters'] = object_server_filters
response = self.request.get_response(self.app)
self._recover_size_and_etag(response)
return response
@public
def PUT(self):
"""
Handle HTTP PUT requests.
"""
if self.proxy_filter_exec_list:
self.logger.info('There are Filters to execute')
self.logger.info(str(self.proxy_filter_exec_list))
self._set_crystal_metadata()
self._save_size_and_etag()
self._build_pipeline(self.proxy_filter_exec_list)
else:
self.logger.info('No filters to execute')
if self.object_filter_exec_list:
object_server_filters = json.dumps(self.object_filter_exec_list)
self.request.headers['crystal.filters'] = object_server_filters
return self.request.get_response(self.app)
@public
def POSTorDELETE(self):
"""
Handle HTTP POST or DELETE requests.
"""
if self.proxy_filter_exec_list:
self.logger.info('There are Filters to execute')
self.logger.info(str(self.proxy_filter_exec_list))
self._build_pipeline(self.proxy_filter_exec_list)
else:
self.logger.info('No filters to execute')
if self.object_filter_exec_list:
object_server_filters = json.dumps(self.object_filter_exec_list)
self.request.headers['crystal.filters'] = object_server_filters
return self.request.get_response(self.app)
| Crystal-SDS/filter-middleware | crystal_filter_middleware/handlers/proxy.py | Python | gpl-3.0 | 13,744 |
"""Database models used by django-reversion."""
from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.conf import settings
from django.core import serializers
from django.core.exceptions import ObjectDoesNotExist
from django.db import models, IntegrityError
from django.dispatch.dispatcher import Signal
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import force_text, python_2_unicode_compatible
def safe_revert(versions):
"""
Attempts to revert the given models contained in the give versions.
This method will attempt to resolve dependencies between the versions to revert
them in the correct order to avoid database integrity errors.
"""
unreverted_versions = []
for version in versions:
try:
version.revert()
except (IntegrityError, ObjectDoesNotExist):
unreverted_versions.append(version)
if len(unreverted_versions) == len(versions):
raise RevertError("Could not revert revision, due to database integrity errors.")
if unreverted_versions:
safe_revert(unreverted_versions)
class RevertError(Exception):
"""Exception thrown when something goes wrong with reverting a model."""
UserModel = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
@python_2_unicode_compatible
class Revision(models.Model):
"""A group of related object versions."""
manager_slug = models.CharField(
max_length = 191,
db_index = True,
default = "default",
)
date_created = models.DateTimeField(auto_now_add=True,
db_index=True,
verbose_name=_("date created"),
help_text="The date and time this revision was created.")
user = models.ForeignKey(UserModel,
blank=True,
null=True,
on_delete=models.SET_NULL,
verbose_name=_("user"),
help_text="The user who created this revision.")
comment = models.TextField(blank=True,
verbose_name=_("comment"),
help_text="A text comment on this revision.")
def revert(self, delete=False):
"""Reverts all objects in this revision."""
version_set = self.version_set.all()
# Optionally delete objects no longer in the current revision.
if delete:
# Get a dict of all objects in this revision.
old_revision = {}
for version in version_set:
try:
obj = version.object
except ContentType.objects.get_for_id(version.content_type_id).model_class().DoesNotExist:
pass
else:
old_revision[obj] = version
# Calculate the set of all objects that are in the revision now.
from reversion.revisions import RevisionManager
current_revision = RevisionManager.get_manager(self.manager_slug)._follow_relationships(obj for obj in old_revision.keys() if obj is not None)
# Delete objects that are no longer in the current revision.
for item in current_revision:
if item not in old_revision:
item.delete()
# Attempt to revert all revisions.
safe_revert(version_set)
def __str__(self):
"""Returns a unicode representation."""
return ", ".join(force_text(version) for version in self.version_set.all())
#Meta
class Meta:
app_label = 'reversion'
def has_int_pk(model):
"""Tests whether the given model has an integer primary key."""
pk = model._meta.pk
return (
(
isinstance(pk, (models.IntegerField, models.AutoField)) and
not isinstance(pk, models.BigIntegerField)
) or (
isinstance(pk, models.ForeignKey) and has_int_pk(pk.rel.to)
)
)
@python_2_unicode_compatible
class Version(models.Model):
"""A saved version of a database model."""
revision = models.ForeignKey(Revision,
help_text="The revision that contains this version.")
object_id = models.TextField(help_text="Primary key of the model under version control.")
object_id_int = models.IntegerField(
blank = True,
null = True,
db_index = True,
help_text = "An indexed, integer version of the stored model's primary key, used for faster lookups.",
)
content_type = models.ForeignKey(ContentType,
help_text="Content type of the model under version control.")
# A link to the current instance, not the version stored in this Version!
object = generic.GenericForeignKey()
format = models.CharField(max_length=255,
help_text="The serialization format used by this model.")
serialized_data = models.TextField(help_text="The serialized form of this version of the model.")
object_repr = models.TextField(help_text="A string representation of the object.")
@property
def object_version(self):
"""The stored version of the model."""
data = self.serialized_data
data = force_text(data.encode("utf8"))
return list(serializers.deserialize(self.format, data, ignorenonexistent=True))[0]
@property
def field_dict(self):
"""
A dictionary mapping field names to field values in this version
of the model.
This method will follow parent links, if present.
"""
if not hasattr(self, "_field_dict_cache"):
object_version = self.object_version
obj = object_version.object
result = {}
for field in obj._meta.fields:
result[field.name] = field.value_from_object(obj)
result.update(object_version.m2m_data)
# Add parent data.
for parent_class, field in obj._meta.concrete_model._meta.parents.items():
if obj._meta.proxy and parent_class == obj._meta.concrete_model:
continue
content_type = ContentType.objects.get_for_model(parent_class)
if field:
parent_id = force_text(getattr(obj, field.attname))
else:
parent_id = obj.pk
try:
parent_version = Version.objects.get(revision__id=self.revision_id,
content_type=content_type,
object_id=parent_id)
except Version.DoesNotExist:
pass
else:
result.update(parent_version.field_dict)
setattr(self, "_field_dict_cache", result)
return getattr(self, "_field_dict_cache")
def revert(self):
"""Recovers the model in this version."""
self.object_version.save()
def __str__(self):
"""Returns a unicode representation."""
return self.object_repr
#Meta
class Meta:
app_label = 'reversion'
# Version management signals.
pre_revision_commit = Signal(providing_args=["instances", "revision", "versions"])
post_revision_commit = Signal(providing_args=["instances", "revision", "versions"])
| Beauhurst/django-reversion | src/reversion/models.py | Python | bsd-3-clause | 7,592 |
import pygame, subprocess, os, sys, shutil, button, urllib2, json, hashlib, threading
class Updater(object):
def __init__(self, screen, clock, fps, resolution, version):
self.screen = screen
self.clock = clock
self.fps = fps
self.resolution = resolution
self.version = version
self.latest = "searching..."
self.triggerSwap = False
self.small = pygame.font.Font("assets/fonts/OpenSans-Semibold.ttf", 14)
self.font = pygame.font.Font("assets/fonts/OpenSans-Semibold.ttf", 30)
self.running = True
self.downloading = False
button.Button("text", self.font, "Current version: %s" % self.version, (0, 10), self.resolution)
button.Button("text", self.font, "Latest version: %s" % self.latest, (0, 45), self.resolution)
button.Button("text", self.small, "", (0, 125), self.resolution)
button.Button("big", self.font, "Update", (0, self.resolution[1] - 150), self.resolution, self.update)
button.Button("big", self.font, "Play", (0, self.resolution[1] - 75), self.resolution, self.play)
button.Button.group[3].locked = True
self.getLink()
self.run(self.checkUpdates)
while self.running:
dt = self.clock.tick(self.fps)
pygame.display.set_caption("Layer Switcher Updater", "Layer Switcher Updater")
mouseTrigger = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.leave()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.leave()
if event.key == pygame.K_SPACE:
self.play()
if event.type == pygame.MOUSEBUTTONUP:
if event.button == 1:
mouseTrigger = True
self.screen.fill((82, 246, 255))
mPos = pygame.mouse.get_pos()
for butt in button.Button.group:
butt.updateAndDraw(self.screen, mPos, mouseTrigger)
pygame.display.flip()
def getLink(self):
self.link = ""
with open("updater.dat") as f:
self.link = f.readline().rstrip()
def checkUpdates(self):
try:
buf = urllib2.urlopen(self.link + "metadata")
self.metadata = json.load(buf)
if len(self.metadata) > 0:
self.latest = self.metadata["version"]
if self.version == self.latest:
button.Button.group[3].locked = True
else:
button.Button.group[3].locked = False
button.Button.group[1].setText("Latest version: %s" % self.latest)
except urllib2.HTTPError, e:
button.Button.group[1].setText("Latest version: not found (%s)" % e.code)
except urllib2.URLError, e:
button.Button.group[1].setText("Latest version: not found (%s)" % e.code)
def update(self):
button.Button.group[3].locked = True
self.toDownload = ["version.dat", "updater.dat"]
for folder in self.metadata["files"]:
if folder != "layerswitcher":
if not os.path.isdir(folder):
os.makedirs(folder)
for fn in self.metadata["files"][folder]:
if not os.path.isfile(os.path.join(folder, fn)) or self.hashfile(open(os.path.join(folder, fn), "rb")) != self.metadata["files"][folder][fn]:
self.toDownload.append(os.path.join(folder, fn))
else:
for fn in self.metadata["files"][folder]:
if not os.path.isfile(fn) or self.hashfile(open(fn, "rb")) != self.metadata["files"][folder][fn]:
self.toDownload.append(fn)
for dirpath, dirnames, filenames in os.walk("assets"):
if not dirpath in self.metadata["files"]:
shutil.rmtree(dirpath)
continue
for fn in filenames:
if not fn in self.metadata["files"][dirpath]:
os.unlink(os.path.join(dirpath, fn))
for dirpath, dirnames, filenames in os.walk("maps"):
if not dirpath in self.metadata["files"]:
shutil.rmtree(dirpath)
continue
for fn in filenames:
if not fn in self.metadata["files"][dirpath]:
os.unlink(os.path.join(dirpath, fn))
button.Button.group[2].setText(self.toDownload[-1])
self.run(self.downloadManager)
def downloadManager(self):
while len(self.toDownload) > 0:
try:
self.downloadNext()
except urllib2.HTTPError, e:
button.Button.group[2].setText("error - try again later (%s)" % e.code)
except urllib2.URLError, e:
button.Button.group[2].setText("error - try again later (%s)" % e.code)
button.Button.group[0].setText("Current version: %s" % self.latest)
button.Button.group[2].setText("Done!")
def downloadNext(self):
fn = self.toDownload.pop()
button.Button.group[2].setText(fn)
request = urllib2.urlopen(self.link + "layerswitcher/" + fn.replace("\\", "/").replace(" ", "%20"))
total = int(request.info().getheader('Content-Length').strip())
written = 0
raw = fn
if fn == "lwupdater.exe":
fn += ".new"
self.triggerSwap = True
with open(fn, "wb") as out:
buf = request.read(8192)
while len(buf) > 0:
out.write(buf)
written += len(buf)
button.Button.group[2].setText(raw + " - %d%%" % int((float(written) / total) * 100))
buf = request.read(8192)
def play(self):
subprocess.Popen(["layerswitcher.exe"])
self.leave()
def run(self, call):
t = threading.Thread(target = call)
t.daemon = True
t.start()
def hashfile(self, fn):
hasher = hashlib.md5()
buf = fn.read(8192)
while len(buf) > 0:
hasher.update(buf)
buf = fn.read(8192)
return hasher.hexdigest()
def leave(self):
if self.triggerSwap:
subprocess.Popen(["layerswitcher.exe", "-k"])
self.running = False
pygame.quit()
sys.exit(0)
| pedro-b/layer-switcher | updater/updater.py | Python | mit | 5,310 |
#!/usr/bin/python
import sys
import xml.dom.minidom
import os
if sys.argv[1] == 'deploy':
flag = sys.argv[2]
f = open("/usr/share/hazelwire/testmodule3/exploit/flag.txt", 'w')
f.write(flag)
f.close()
if sys.argv[1] == "configure":
dom = xml.dom.minidom.parse(os.getenv("MODULEDIR")+"testmodule3/config.xml")
#The service port is configurable in this module, get it from the XML:
for option in dom.getElementsByTagName("option"):
if option.getElementsByTagName("name")[0].childNodes[0].data == "Service port":
port = option.getElementsByTagName("value")[0].childNodes[0].data
#Write the port number to a file in the module dir
f = open("/usr/share/hazelwire/testmodule3/port", "w")
f.write(port)
f.close()
| Hazelwire/hazelwire-modules | testmodule3/testmodule3-0.2/manage.py | Python | gpl-3.0 | 772 |
#!/usr/bin/env python
"""Top level ``eval`` module.
"""
import warnings
import tokenize
from pandas.core import common as com
from pandas.computation import _NUMEXPR_INSTALLED
from pandas.computation.expr import Expr, _parsers, tokenize_string
from pandas.computation.scope import _ensure_scope
from pandas.compat import string_types
from pandas.computation.engines import _engines
def _check_engine(engine):
"""Make sure a valid engine is passed.
Parameters
----------
engine : str
Raises
------
KeyError
* If an invalid engine is passed
ImportError
* If numexpr was requested but doesn't exist
"""
if engine not in _engines:
raise KeyError('Invalid engine {0!r} passed, valid engines are'
' {1}'.format(engine, list(_engines.keys())))
# TODO: validate this in a more general way (thinking of future engines
# that won't necessarily be import-able)
# Could potentially be done on engine instantiation
if engine == 'numexpr':
if not _NUMEXPR_INSTALLED:
raise ImportError("'numexpr' is not installed or an "
"unsupported version. Cannot use "
"engine='numexpr' for query/eval "
"if 'numexpr' is not installed")
def _check_parser(parser):
"""Make sure a valid parser is passed.
Parameters
----------
parser : str
Raises
------
KeyError
* If an invalid parser is passed
"""
if parser not in _parsers:
raise KeyError('Invalid parser {0!r} passed, valid parsers are'
' {1}'.format(parser, _parsers.keys()))
def _check_resolvers(resolvers):
if resolvers is not None:
for resolver in resolvers:
if not hasattr(resolver, '__getitem__'):
name = type(resolver).__name__
raise TypeError('Resolver of type %r does not implement '
'the __getitem__ method' % name)
def _check_expression(expr):
"""Make sure an expression is not an empty string
Parameters
----------
expr : object
An object that can be converted to a string
Raises
------
ValueError
* If expr is an empty string
"""
if not expr:
raise ValueError("expr cannot be an empty string")
def _convert_expression(expr):
"""Convert an object to an expression.
Thus function converts an object to an expression (a unicode string) and
checks to make sure it isn't empty after conversion. This is used to
convert operators to their string representation for recursive calls to
:func:`~pandas.eval`.
Parameters
----------
expr : object
The object to be converted to a string.
Returns
-------
s : unicode
The string representation of an object.
Raises
------
ValueError
* If the expression is empty.
"""
s = com.pprint_thing(expr)
_check_expression(s)
return s
def _check_for_locals(expr, stack_level, parser):
at_top_of_stack = stack_level == 0
not_pandas_parser = parser != 'pandas'
if not_pandas_parser:
msg = "The '@' prefix is only supported by the pandas parser"
elif at_top_of_stack:
msg = ("The '@' prefix is not allowed in "
"top-level eval calls, \nplease refer to "
"your variables by name without the '@' "
"prefix")
if at_top_of_stack or not_pandas_parser:
for toknum, tokval in tokenize_string(expr):
if toknum == tokenize.OP and tokval == '@':
raise SyntaxError(msg)
def eval(expr, parser='pandas', engine='numexpr', truediv=True,
local_dict=None, global_dict=None, resolvers=(), level=0,
target=None, inplace=None):
"""Evaluate a Python expression as a string using various backends.
The following arithmetic operations are supported: ``+``, ``-``, ``*``,
``/``, ``**``, ``%``, ``//`` (python engine only) along with the following
boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not).
Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`,
:keyword:`or`, and :keyword:`not` with the same semantics as the
corresponding bitwise operators. :class:`~pandas.Series` and
:class:`~pandas.DataFrame` objects are supported and behave as they would
with plain ol' Python evaluation.
Parameters
----------
expr : str or unicode
The expression to evaluate. This string cannot contain any Python
`statements
<http://docs.python.org/2/reference/simple_stmts.html#simple-statements>`__,
only Python `expressions
<http://docs.python.org/2/reference/simple_stmts.html#expression-statements>`__.
parser : string, default 'pandas', {'pandas', 'python'}
The parser to use to construct the syntax tree from the expression. The
default of ``'pandas'`` parses code slightly different than standard
Python. Alternatively, you can parse an expression using the
``'python'`` parser to retain strict Python semantics. See the
:ref:`enhancing performance <enhancingperf.eval>` documentation for
more details.
engine : string, default 'numexpr', {'python', 'numexpr'}
The engine used to evaluate the expression. Supported engines are
- ``'numexpr'``: This default engine evaluates pandas objects using
numexpr for large speed ups in complex expressions
with large frames.
- ``'python'``: Performs operations as if you had ``eval``'d in top
level python. This engine is generally not that useful.
More backends may be available in the future.
truediv : bool, optional
Whether to use true division, like in Python >= 3
local_dict : dict or None, optional
A dictionary of local variables, taken from locals() by default.
global_dict : dict or None, optional
A dictionary of global variables, taken from globals() by default.
resolvers : list of dict-like or None, optional
A list of objects implementing the ``__getitem__`` special method that
you can use to inject an additional collection of namespaces to use for
variable lookup. For example, this is used in the
:meth:`~pandas.DataFrame.query` method to inject the
:attr:`~pandas.DataFrame.index` and :attr:`~pandas.DataFrame.columns`
variables that refer to their respective :class:`~pandas.DataFrame`
instance attributes.
level : int, optional
The number of prior stack frames to traverse and add to the current
scope. Most users will **not** need to change this parameter.
target : a target object for assignment, optional, default is None
essentially this is a passed in resolver
inplace : bool, default True
If expression mutates, whether to modify object inplace or return
copy with mutation.
WARNING: inplace=None currently falls back to to True, but
in a future version, will default to False. Use inplace=True
explicitly rather than relying on the default.
Returns
-------
ndarray, numeric scalar, DataFrame, Series
Notes
-----
The ``dtype`` of any objects involved in an arithmetic ``%`` operation are
recursively cast to ``float64``.
See the :ref:`enhancing performance <enhancingperf.eval>` documentation for
more details.
See Also
--------
pandas.DataFrame.query
pandas.DataFrame.eval
"""
first_expr = True
if isinstance(expr, string_types):
exprs = [e for e in expr.splitlines() if e != '']
else:
exprs = [expr]
multi_line = len(exprs) > 1
if multi_line and target is None:
raise ValueError("multi-line expressions are only valid in the "
"context of data, use DataFrame.eval")
first_expr = True
for expr in exprs:
expr = _convert_expression(expr)
_check_engine(engine)
_check_parser(parser)
_check_resolvers(resolvers)
_check_for_locals(expr, level, parser)
# get our (possibly passed-in) scope
level += 1
env = _ensure_scope(level, global_dict=global_dict,
local_dict=local_dict, resolvers=resolvers,
target=target)
parsed_expr = Expr(expr, engine=engine, parser=parser, env=env,
truediv=truediv)
# construct the engine and evaluate the parsed expression
eng = _engines[engine]
eng_inst = eng(parsed_expr)
ret = eng_inst.evaluate()
if parsed_expr.assigner is None and multi_line:
raise ValueError("Multi-line expressions are only valid"
" if all expressions contain an assignment")
# assign if needed
if env.target is not None and parsed_expr.assigner is not None:
if inplace is None:
warnings.warn(
"eval expressions containing an assignment currently"
"default to operating inplace.\nThis will change in "
"a future version of pandas, use inplace=True to "
"avoid this warning.",
FutureWarning, stacklevel=3)
inplace = True
# if returning a copy, copy only on the first assignment
if not inplace and first_expr:
target = env.target.copy()
else:
target = env.target
target[parsed_expr.assigner] = ret
if not resolvers:
resolvers = ({parsed_expr.assigner: ret},)
else:
# existing resolver needs updated to handle
# case of mutating existing column in copy
for resolver in resolvers:
if parsed_expr.assigner in resolver:
resolver[parsed_expr.assigner] = ret
break
else:
resolvers += ({parsed_expr.assigner: ret},)
ret = None
first_expr = False
if not inplace and inplace is not None:
return target
return ret
| pjryan126/solid-start-careers | store/api/zillow/venv/lib/python2.7/site-packages/pandas/computation/eval.py | Python | gpl-2.0 | 10,401 |
from Bio import SeqIO
from datetime import date
fname = '../data/gisaid_H3N2_all_years_human.fasta'
all_seqs = []
def parse_gisaid_date(date_str):
if len(date_str.split('-'))==3:
year, month, day = map(int, date_str.split('-'))
return date(year =year, month=month, day=day)
elif len(date_str.split('-'))==2:
year, month = map(int, date_str.split()[0].split('-'))
print date_str
return date(year =year, month=month, day=15)
else:
None
with open(fname, 'r') as seqfile:
for seq in SeqIO.parse(seqfile, 'fasta'):
collection_date = parse_gisaid_date(seq.description.split('|')[1].strip())
if collection_date is not None:
all_seqs.append([seq, collection_date])
all_seqs.sort(key = lambda x:x[1])
outfname = '../data/gisaid_H3N2_all_years_human_full_date_'
step = 500
for si in range(len(all_seqs)/step+1):
with open(outfname+format(si,'03d')+'.fasta', 'w') as outfile:
for seq, d in all_seqs[si*step:(si+1)*step]:
SeqIO.write(seq, outfile, 'fasta')
| rneher/FitnessInference | flu/sequence_and_annotations/filter_gisaid_by_full_date.py | Python | mit | 1,099 |
from hsph.fields import SiteField
class HSPHSiteDataMixin(object):
_site_map = None
@property
def site_map(self):
if self._site_map is None:
self._site_map = SiteField.getFacilities(domain=self.domain)
return self._site_map
_selected_site_map = None
@property
def selected_site_map(self):
if self._selected_site_map is None:
site_map = {}
region = self.request.GET.get(SiteField.slugs['region'], None)
district = self.request.GET.get(SiteField.slugs['district'], None)
site = self.request.GET.get(SiteField.slugs['site'], None)
if region:
site_map[region] = dict(districts=self.site_map[region].get("districts", {}),
name=self.site_map[region].get("name", ""))
if district:
site_map[region]["districts"] = dict()
site_map[region]["districts"][district] = dict(
sites=self.site_map[region]["districts"][district].get("sites", {}),
name=self.site_map[region]["districts"][district].get("name", "")
)
if site:
site_map[region]["districts"][district]["sites"] = dict()
site_map[region]["districts"][district]["sites"][site] = dict(
name=self.site_map[region]["districts"][district]["sites"][site].get("name", "")
)
self._selected_site_map = site_map
return self._selected_site_map
def get_site_table_values(self, key):
return self.get_region_name(key[0]),\
self.get_district_name(key[0], key[1]),\
self.get_site_name(key[0], key[1], key[2])
def get_region_name(self, region):
return self.site_map.get(region, {}).get("name", region)
def get_district_name(self, region, district):
return self.site_map.get(region, {}).get("districts", {}).get(district, {}).get("name", district)
def get_site_name(self, region, district, site):
return self.site_map.get(region, {}).get("districts", {}).get(district, {}).get("sites", {}).get(site, {}).get("name", site)
def generate_keys(self, prefix=None, suffix=None):
keys = [(prefix or [])+[region, district, site]+(suffix or [])
for region, districts in self.selected_site_map.items()
for district, sites in districts.get("districts",{}).items()
for site, site_name in sites.get("sites",{}).items()]
return keys
| SEL-Columbia/commcare-hq | custom/_legacy/hsph/reports/__init__.py | Python | bsd-3-clause | 2,576 |
import unittest
import os
if __name__ == '__main__' and __package__ is None:
from os import sys, path
sys.path.append(path.abspath(path.join(__file__, "..", "..")))
from src.MirroredDirectory import MirroredDirectory
from src.mocking.MockFileSystem import MockFileSystem
class MirroredDirectoryTest(unittest.TestCase):
def _getInstance(self):
aPath = "/MyProject1/library/aae/mvc/Controller.php"
testDir = "/MyProject1/library/aaeTest"
mockFileSystem = MockFileSystem()
mockFileSystem.createFolder(testDir)
md = MirroredDirectory(aPath)
md.fileSystem = mockFileSystem
return md
def test___init__(self):
aPath = os.path.join("a", "path", "to", "a", "file.php")
obj = MirroredDirectory(aPath)
def test_getTestFileDir_has_no_test_dir(self):
expected = os.path.join(os.sep, "Folder1", "Folder2")
aPath = os.path.join(os.sep, "Folder1", "Folder2", "FileName.php")
md = MirroredDirectory(aPath)
md.fileSystem = MockFileSystem()
result = md.getTestFileDir()
self.assertEqual(expected, result)
def test_getDBTestFileDir_has_no_test_dir(self):
expected = os.path.join(os.sep, "Folder1", "Folder2")
aPath = os.path.join(os.sep, "Folder1", "Folder2", "FileName.php")
md = MirroredDirectory(aPath)
md.fileSystem = MockFileSystem()
result = md.getDBTestFileDir()
self.assertEqual(expected, result)
def test__determineKind(self):
md = MirroredDirectory()
data = [
(os.path.join(os.sep, "Folder1", "Folder2", "FileName.php"), MirroredDirectory.KIND_IS_CLASS),
(os.path.join(os.sep, "Folder1", "Folder2", "FileNameTest.php"), MirroredDirectory.KIND_IS_TEST),
(os.path.join(os.sep, "Folder1", "Folder2", "FileName_test.php"), MirroredDirectory.KIND_IS_TEST),
(os.path.join(os.sep, "Folder1", "Folder2", "FileName_db_test.php"), MirroredDirectory.KIND_IS_DB_TEST),
(os.path.join(os.sep, "Folder1", "Folder2", "FileNameDB_Test.php"), MirroredDirectory.KIND_IS_DB_TEST),
(os.path.join(os.sep, "Folder1", "Folder2", "FileName"), MirroredDirectory.KIND_IS_CLASS),
(os.path.join(os.sep, "Folder1", "Folder2", "FileNameTest"), MirroredDirectory.KIND_IS_TEST),
(os.path.join(os.sep, "Folder1", "Folder2", "FileName_test"), MirroredDirectory.KIND_IS_TEST),
(os.path.join(os.sep, "Folder1", "Folder2", "FileName_db_test"), MirroredDirectory.KIND_IS_DB_TEST),
(os.path.join(os.sep, "Folder1", "Folder2", "FileNameDB_Test"), MirroredDirectory.KIND_IS_DB_TEST)
]
for aPath, kind in data:
result = md._determineKind(aPath)
self.assertEqual(kind, result)
def test__scrubPath(self):
md = MirroredDirectory()
data = [
(os.path.join(os.sep, "Folder1", "Folder2", "FileName.php"), MirroredDirectory.KIND_IS_CLASS, os.path.join(os.sep, "Folder1", "Folder2", "FileName.php")),
(os.path.join(os.sep, "Folder1", "Folder2", "FileNameTest.php"), MirroredDirectory.KIND_IS_TEST, os.path.join(os.sep, "Folder1", "Folder2", "FileName.php")),
(os.path.join(os.sep, "Folder1Test", "Folder2Test", "FileNameTest.php"), MirroredDirectory.KIND_IS_TEST, os.path.join(os.sep, "Folder1", "Folder2", "FileName.php")),
(os.path.join(os.sep, "Folder1", "Folder2", "FileName_test.php"), MirroredDirectory.KIND_IS_TEST, os.path.join(os.sep, "Folder1", "Folder2", "FileName.php")),
(os.path.join(os.sep, "Folder1_test", "Folder2", "FileName_test.php"), MirroredDirectory.KIND_IS_TEST, os.path.join(os.sep, "Folder1", "Folder2", "FileName.php")),
(os.path.join(os.sep, "Folder1", "Folder2", "FileName_db_test.php"), MirroredDirectory.KIND_IS_DB_TEST, os.path.join(os.sep, "Folder1", "Folder2", "FileName.php")),
(os.path.join(os.sep, "Folder1", "Folder2_db_test", "FileName_db_test.php"), MirroredDirectory.KIND_IS_DB_TEST, os.path.join(os.sep, "Folder1", "Folder2", "FileName.php")),
(os.path.join(os.sep, "Folder1", "Folder2", "FileNameDB_Test.php"), MirroredDirectory.KIND_IS_DB_TEST, os.path.join(os.sep, "Folder1", "Folder2", "FileName.php")),
(os.path.join(os.sep, "Folder1DB_Test", "Folder2DB_Test", "FileNameDB_Test.php"), MirroredDirectory.KIND_IS_DB_TEST, os.path.join(os.sep, "Folder1", "Folder2", "FileName.php")),
(os.path.join(os.sep, "Folder1", "Folder2", "FileName"), MirroredDirectory.KIND_IS_CLASS, os.path.join(os.sep, "Folder1", "Folder2", "FileName")),
(os.path.join(os.sep, "Folder1", "Folder2", "FileNameTest"), MirroredDirectory.KIND_IS_TEST, os.path.join(os.sep, "Folder1", "Folder2", "FileName")),
(os.path.join(os.sep, "Folder1", "Folder2", "FileName_test"), MirroredDirectory.KIND_IS_TEST, os.path.join(os.sep, "Folder1", "Folder2", "FileName")),
(os.path.join(os.sep, "Folder1", "Folder2", "FileName_db_test"), MirroredDirectory.KIND_IS_DB_TEST, os.path.join(os.sep, "Folder1", "Folder2", "FileName")),
(os.path.join(os.sep, "Folder1", "Folder2", "FileNameDB_Test"), MirroredDirectory.KIND_IS_DB_TEST, os.path.join(os.sep, "Folder1", "Folder2", "FileName"))
]
for aPath, kind, scrubbedPath in data:
result = md._scrubPath(aPath, kind)
self.assertEqual(scrubbedPath, result)
def test_getBasePath_test_file_and_two_test_folders_present(self):
print("")
aFileName = os.path.join(os.sep, "MyProject1", "library", "aaeTest", "mvc", "ControllerTest.php")
otherTestDir = os.path.join(os.sep, "MyProject1", "libraryTest")
expectedBasePath = os.path.join(os.sep, "MyProject1", "library", "aae")
mockFileSystem = MockFileSystem()
mockFileSystem.createFile(aFileName)
mockFileSystem.createFolder(otherTestDir)
md = MirroredDirectory(aFileName)
md.fileSystem = mockFileSystem
resultBasePath = md.getBasePath()
self.assertEqual(expectedBasePath, resultBasePath)
def test_test_file_from_class_file_test_file_and_two_test_folders_present(self):
aFileName = os.path.join(os.sep, "MyProject1", "library", "aaeTest", "mvc", "ControllerTest.php")
otherTestDir = os.path.join(os.sep, "MyProject1", "libraryTest",)
testDir = os.path.join(os.sep, "MyProject1", "library", "aaeTest",)
mockFileSystem = MockFileSystem()
mockFileSystem.createFolder(otherTestDir)
mockFileSystem.createFolder(testDir)
md = MirroredDirectory(aFileName)
md.fileSystem = mockFileSystem
testFileName = md.getTestFileName()
self.assertEqual(aFileName, testFileName)
def test_getToggledFileName_class_to_test(self):
aFileName = os.path.join(os.sep, "a", "path", "to", "a", "file.php")
aTestFileName = os.path.join(os.sep, "a", "pathTest", "to", "a", "fileTest.php")
mockFileSystem = MockFileSystem()
mockFileSystem.createFile(aFileName, "")
mockFileSystem.createFile(aTestFileName, "")
md = MirroredDirectory(aFileName)
md.fileSystem = mockFileSystem
result = md.getToggledFileName();
self.assertEqual(aTestFileName, result)
def test_getToggledFileName_test_to_class(self):
aFileName = os.path.join(os.sep, "a", "path", "to", "a", "file.php")
aTestFileName = os.path.join(os.sep, "a", "pathTest", "to", "a", "fileTest.php")
mockFileSystem = MockFileSystem()
mockFileSystem.createFile(aFileName, "")
mockFileSystem.createFile(aTestFileName, "")
md = MirroredDirectory(aTestFileName)
md.fileSystem = mockFileSystem
result = md.getToggledFileName();
self.assertEqual(aFileName, result)
def test__discoverBasePath(self):
aFileName = os.path.join(os.sep, "MyProject1", "library", "aae", "mvc", "Controller.php")
testDir = os.path.join(os.sep, "MyProject1", "libraryTest")
expectedBasePath = os.path.join(os.sep, "MyProject1", "library")
expectedRelativePath = os.path.join("aae", "mvc")
mockFileSystem = MockFileSystem()
mockFileSystem.createFolder(testDir)
md = MirroredDirectory(aFileName)
md.fileSystem = mockFileSystem
md._discoverBasePath()
resultBasePath = md.getBasePath()
resultRelativePath = md.getRelativePath()
self.assertEqual(expectedBasePath, resultBasePath)
self.assertEqual(expectedRelativePath, resultRelativePath)
def test__discoverBasePath_with_relative_path_provided(self):
aFileName = os.path.join("MyProject1", "library", "aae", "mvc", "Controller.php")
testDir = os.path.join("MyProject1", "libraryTest")
expectedRelativePath = os.path.join("MyProject1", "library", "aae", "mvc")
expectedBasePath = None
mockFileSystem = MockFileSystem()
mockFileSystem.createFolder(testDir)
md = MirroredDirectory(aFileName)
md.fileSystem = mockFileSystem
md._discoverBasePath()
resultBasePath = md.getBasePath()
resultRelativePath = md.getRelativePath()
self.assertEqual(expectedBasePath, resultBasePath)
self.assertEqual(expectedRelativePath, resultRelativePath)
def test__discoverBasePath_test_file(self):
aFileName = os.path.join(os.sep, "MyProject1", "libraryTest", "aae", "mvc", "ControllerTest.php")
baseDir = os.path.join(os.sep, "MyProject1", "libraryTest")
expectedBasePath = os.path.join(os.sep, "MyProject1", "library")
expectedRelativePath = os.path.join("aae", "mvc")
mockFileSystem = MockFileSystem()
mockFileSystem.createFolder(baseDir)
md = MirroredDirectory(aFileName)
md.fileSystem = mockFileSystem
md._discoverBasePath()
resultBasePath = md.getBasePath()
resultRelativePath = md.getRelativePath()
self.assertEqual(expectedBasePath, resultBasePath)
self.assertEqual(expectedRelativePath, resultRelativePath)
def test_getFileDir_with_empty_dir(self):
aFileName = ""
md = MirroredDirectory(aFileName)
result = md.getFileDir()
self.assertEqual(None, result)
def test_getFileName_with_empty_dir(self):
aFileName = ""
md = MirroredDirectory(aFileName)
result = md.getFileName()
self.assertEqual(None, result)
def test_getTestFileName_with_empty_dir(self):
aFileName = ""
md = MirroredDirectory(aFileName)
result = md.getTestFileName()
self.assertEqual(None, result)
def test_getTestFileName_from_class_file_with_test_folder(self):
expectedPath = "/MyProject1/library/aaeTest/mvc/ControllerTest.php"
md = self._getInstance()
result = md.getTestFileName()
self.assertEqual(expectedPath, result)
def test_getOriginalFileName(self):
aFileName = os.path.join(os.sep, "MyProject1", "library", "aae", "mvc", "Controller.php")
md = MirroredDirectory(aFileName)
result = md.getOriginalFileName()
self.assertEqual(aFileName, result)
def test_setKind(self):
aPath = "/MyProject1/library/aae/mvc/Controller.php"
testDir = "/MyProject1/library/aaeTest"
expectedPath = "/MyProject1/library/aaeTest/mvc/ControllerTest.php"
mockFileSystem = MockFileSystem()
mockFileSystem.createFolder(testDir)
md = MirroredDirectory(aPath)
md.fileSystem = mockFileSystem
md.setKind(MirroredDirectory.KIND_IS_TEST)
result = md.getOriginalFileName()
resultKind = md.getKind()
self.assertEqual(MirroredDirectory.KIND_IS_TEST, resultKind)
self.assertEqual(expectedPath, result)
def test_setKind_retain_base_path(self):
aPath = "/MyProject1/library1/aae/mvc/Controller.php"
testDir = "/MyProject1/library1/aaeTest"
basePath = "/MyProject1/library1/aae"
expectedPath = "/MyProject1/library1/aaeTest/mvc/ControllerTest.php"
mockFileSystem = MockFileSystem()
mockFileSystem.createFolder(testDir)
md = MirroredDirectory(aPath)
md.fileSystem = mockFileSystem
md.setBasePath(basePath)
md.setKind(MirroredDirectory.KIND_IS_TEST)
result = md.getOriginalFileName()
resultBasePath = md.getBasePath()
self.assertEqual(expectedPath, result)
self.assertEqual(basePath, resultBasePath)
def test_getBasePath_no_base_path_set_but_has_test_folder(self):
expected = os.path.join(os.sep, "MyProject1", "library", "aae")
md = self._getInstance()
result = md.getBasePath()
self.assertEqual(expected, result)
def test_getRelativeFileName(self):
expected = os.path.join("mvc", "Controller.php")
md = self._getInstance()
result = md.getRelativeFileName()
self.assertEqual(expected, result)
def test_setDefaultExtension(self):
aPath = "/MyProject1/library/aae/mvc/Controller"
defaultFileExtension = "php"
expected = "/MyProject1/library/aae/mvc/Controller.php"
md = self._getInstance()
md.setDefaultExtension(defaultFileExtension)
result = md.getOriginalFileName()
self.assertEqual(expected, result)
def test_setDefaultExtension_call_set_after_setting_default_file_extension(self):
aPath = "/Some/Thing/Completely/different.php"
anotherPath = "/MyProject1/library/aae/mvc/Controller"
defaultFileExtension = "php"
expected = "/MyProject1/library/aae/mvc/Controller.php"
md = MirroredDirectory(aPath)
md.setDefaultExtension(defaultFileExtension)
md.set(anotherPath)
result = md.getOriginalFileName()
self.assertEqual(expected, result)
if __name__ == '__main__':
unittest.main() | anconaesselmann/ClassesAndTests | classes_and_tests/srcTest/MirroredDirectoryTest.py | Python | mit | 14,147 |
+#Задача №12, Вариант 30
+#Разработайте игру "Крестики-нолики". (см. М.Доусон Программируем на Python гл. 6)
+
+#Шеменев Андрей.
+#25.04.2016
+def display_instruct():
+ print('''
+ Добро пожаловать на ринг грандиознейших интеллектуальных состязаний всех времён.
+ Твой мозг и мой процессор сойдутся в схватке за доской игры "Крестики-нолики".
+ Чтобы сделать ход, введи число от 0 до 8. Числа однозначно соответствуют полям
+ доски - так, как показано ниже:
+ 0 | 1 | 2
+ ---------
+ 3 | 4 | 5
+ ---------
+ 6 | 7 | 8
+ ''')
+X="X"
+O="O"
+EMPTY=" "
+TIE="Ничья"
+NUM_SQUARES=9
+def ask_yes_no(question):
+ response=None
+ while response not in ("y","n"):
+ response=input(question).lower()
+ return response
+def ask_number(question, low, high):
+ response=None
+ while response not in range(low, high):
+ response=int(input(question))
+ return response
+def pieces():
+ go_first=ask_yes_no("Хочешь ходить первым? (y/n): ")
+ if go_first=="y":
+ print("\nНу что ж, ходи первым: играй крестиками.")
+ human=X
+ computer=O
+ else:
+ print("\nТвоя самоуверенность тебя погубит... Буду начинать я.")
+ computer=X
+ human=O
+ return computer, human
+def new_board():
+ board=[]
+ for square in range(NUM_SQUARES):
+ board.append(EMPTY)
+ return board
+def display_board(board):
+ print("\n\t", board[0], "|", board[1], "|", board[2])
+ print("\t", "---------")
+ print("\t", board[3], "|", board[4], "|", board[5])
+ print("\t", "---------")
+ print("\t", board[6], "|", board[7], "|", board[8])
+def legal_moves(board):
+ moves = []
+ for square in range(NUM_SQUARES):
+ if board[square]==EMPTY:
+ moves.append(square)
+ return moves
+def winner(board):
+ WAYS_TO_WIN=((0, 1, 2),
+ (3, 4, 5),
+ (6, 7, 8),
+ (0, 3, 6),
+ (1, 4, 7),
+ (2, 5, 8),
+ (0, 4, 8),
+ (2, 4, 6))
+ for row in WAYS_TO_WIN:
+ if board[row[0]]==board[row[1]]==board[row[2]]!=EMPTY:
+ winner=board[row[0]]
+ return winner
+ if EMPTY not in board:
+ return TIE
+ return None
+def human_move(board, human):
+ legal=legal_moves(board)
+ move=None
+ while move not in legal:
+ move=ask_number("Твой ход. Выбери одно из полей (0-8):", 0, NUM_SQUARES)
+ if move not in legal:
+ print("\n Это поле уже занято. Выбери другое.\n")
+ print("Ладно...")
+ return move
+def computer_move(board, computer, human):
+ board=board[:]
+ BEST_MOVES=(4, 0, 2, 6, 8, 1, 3, 5, 7)
+ print("Я выберу поле номер", end=" ")
+ for move in legal_moves(board):
+ board[move]=computer
+ if winner(board)==computer:
+ print(move)
+ return move
+ board[move] = EMPTY
+ for move in legal_moves(board):
+ board[move]=human
+ if winner(board)==human:
+ print(move)
+ return move
+ board[move]=EMPTY
+ for move in BEST_MOVES:
+ if move in legal_moves(board):
+ print(move)
+ return move
+def next_turn(turn):
+ if turn==X:
+ return O
+ else:
+ return X
+def congrat_winner(the_winner, computer, human):
+ if the_winner !=TIE:
+ print("Три", the_winner, "в ряд!\n")
+ else:
+ print("Ничья!\n")
+ if the_winner==computer:
+ print("Kaк я и говорил. победил опять я.\nВот еще один довод в пользу того. что компьютеры превосходят людей решительно во всем.")
+ elif the_winner==human:
+ print("О нет, этого не может быть! Неужели ты как-то сумел перехитрить меня, человек?\nКлянусь: я, компьютер, не допущу этого больше никогда!")
+ elif the_winner==TIE:
+ print("Тебе очень сильно повезло, человек: ты сумел свести игру вничью.\nРадуйся пока есть время! Завтра такого не повториться.")
+def main():
+ display_instruct()
+ computer, human=pieces()
+ turn=X
+ board=new_board()
+ display_board(board)
+ while not winner(board):
+ if turn==human:
+ move=human_move(board, human)
+ board[move]=human
+ else:
+ move=computer_move(board, computer, human)
+ board[move]=computer
+ display_board(board)
+ turn=next_turn(turn)
+ the_winner=winner(board)
+ congrat_winner(the_winner, computer, human)
+main()
+input("\n\nНажмите Enter, чтобы выйти.")
| Mariaanisimova/pythonintask | INBa/2015/Shemenev_A_V/task_122_30.py | Python | apache-2.0 | 4,900 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
#
# MarkAsCodeCoverageNonFeasible.py
# Copyright 2008 Google Inc.
#
# Marks a block of code as non feasible with regards to code coverage.
# To use it with Xcode 3.x, go to the scripts menu and choose
# "Edit User Scripts...". Then "Add Script File..." under the plus in
# the lower left hand corner.
#
# Set Input to "Selection"
# Directory to "Home Directory"
# Output to "Replace Selection"
# Errors to "Display in Alert"
#
# Then select the line(s) in your code that you want to mark as not
# covered, and select the script. Mapping it to Cntl-Option-N makes
# it easy to do from the keyboard.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import string
def main():
inputLines = sys.stdin.readlines()
if len(inputLines) == 1:
resultText = inputLines[0].rstrip() + """ // COV_NF_LINE\r"""
else:
firstLine = inputLines[0]
spaces = firstLine[0:-len(firstLine.lstrip())]
resultText = spaces + """// COV_NF_START\r"""
for curLine in inputLines:
resultText += curLine
resultText += spaces + """// COV_NF_END\r"""
print resultText
if __name__ == '__main__':
main() | nimbusios/CoverStory | Tools/MarkAsCodeCoverageNonFeasible.py | Python | apache-2.0 | 1,657 |
#!/usr/bin/env python
from distutils.core import setup
setup(name='REP-instrumentation',
version='0.20120411',
description='Python interfaces to lab instruments',
author='Philip Chimento',
author_email='[email protected]',
url='http://ptomato.github.com/REP-instrumentation',
license='gpl3',
requires=[
'pyvisa (< 1.5)',
'VideoCapture (>= 0.9.5)'
],
packages=[
'rep',
'rep.generic',
'rep.apogee',
'rep.newport',
'rep.ocean_optics',
'rep.thorlabs',
'rep.hp',
#'rep.spectra_physics'
])
| ptomato/REP-instrumentation | setup.py | Python | gpl-3.0 | 614 |
#!/usr/bin/env python
import rospy
from lab_ros_perception.ArucoTagModule import ArucoTagModule
import time
import tf2_ros
import tf2_geometry_msgs
import math
def Quaternion_toEulerianAngle(x, y, z, w):
ysqr = y*y
t0 = +2.0 * (w * x + y*z)
t1 = +1.0 - 2.0 * (x*x + ysqr)
X = math.degrees(math.atan2(t0, t1))
t2 = +2.0 * (w*y - z*x)
t2 = 1 if t2 > 1 else t2
t2 = -1 if t2 < -1 else t2
Y = math.degrees(math.asin(t2))
t3 = +2.0 * (w * z + x*y)
t4 = +1.0 - 2.0 * (ysqr + z*z)
Z = math.degrees(math.atan2(t3, t4))
return X, Y, Z
def main():
rospy.init_node('ArucoTagModule_test')
tag_module = ArucoTagModule()
tf_buffer = tf2_ros.Buffer()
listener = tf2_ros.TransformListener(tf_buffer)
r = rospy.Rate(1)
while not rospy.is_shutdown():
pose = tag_module.getPoseForID(7, duration=rospy.Duration(4.0), frame_id='base')
if pose is not None:
p = pose.pose
print("x:{}, y:{}, z:{}".format(p.position.x, p.position.y, p.position.z))
yaw, pitch, roll = Quaternion_toEulerianAngle(p.orientation.x, p.orientation.y, p.orientation.z, p.orientation.w)
print("BASE: yaw:{}, pitch:{}, roll:{}".format(yaw, pitch, roll))
#print("BASE quad: x:{}, y:{}, z:{}, w:{}".format(p.orientation.x, p.orientation.y, p.orientation.z, p.orientation.w))
r.sleep()
if __name__ == '__main__':
main() | CMU-ARM/lab_ros_perception | scripts/aruco_demo.py | Python | mit | 1,463 |
# -*- coding: utf-8 -*-
{
'name': 'Time Tracking',
'version': '1.0',
'category': 'Human Resources',
'sequence': 23,
'description': """
This module implements a timesheet system.
==========================================
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': ['images/hr_timesheet_lines.jpeg'],
'depends': ['hr_timesheet', 'project'],
'data': [
],
'demo': [],
'test': [
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| syci/ingadhoc-odoo-addons | hr_timesheet_project/__openerp__.py | Python | agpl-3.0 | 591 |
"""
test_g2tools.py
"""
# Copyright (c) 2016-17 G. Peter Lepage.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version (see <http://www.gnu.org/licenses/>).
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from __future__ import print_function # makes this work for python2 and 3
import unittest
import numpy as np
import gvar as gv
from g2tools import *
SHOW_OUTPUT = False
def optprint(*args):
pass
if SHOW_OUTPUT:
optprint = print
MPI = 0.13957
MK = 0.4937
class test_g2tools(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_moments(self):
" moments(G) "
optprint('\n=========== Test moments')
mom = moments([1., 2., 3., 2.], nlist=[0, 2, 4])
assert mom[0] == 11. and mom[2] == 28. and mom[4] == 100.
mom = moments([1., 2., 3., 2.], nlist=[0, 2, 4], tmax=1.)
assert mom[0] == 5. and mom[2] == 4. and mom[4] == 4.
mom = moments([1., 2., 3., 2.], nlist=[0, 2, 4], tmin=1.1)
assert mom[0] == 6. and mom[2] == 24. and mom[4] == 96.
mom = moments([1., 2., 3., 3., 2.], nlist=[0, 2, 4])
assert mom[0] == 11. and mom[2] == 28. and mom[4] == 100.
mom = moments([1., 2., 3., 2.], nlist=[0, 2, 4], periodic=False)
assert mom[0] == 15. and mom[2] == 64. and mom[4] == 424.
mom = moments([1., 2., 3., 2.], nlist=[0, 2, 4], periodic=False, tmax=1.)
assert mom[0] == 5. and mom[2] == 4. and mom[4] == 4.
mom = moments([1., 2., 3., 2.], nlist=[0, 2, 4], periodic=False, tmin=1.1)
assert mom[0] == 10. and mom[2] == 60. and mom[4] == 420.
tayl = [1., -2. , 3.]
mom = taylor2mom(tayl)
assert mom[4] == 24. and mom[6] == 1440. and mom[8] == 120960.
assert numpy.allclose(mom2taylor(mom), tayl)
optprint('nothing to report -- all is good')
def test_moments_tmin_tmax(self):
" moments vs fourier "
optprint('\n=========== moments vs fourier')
# fake data --- N=3 states
N = 3
ainv = 2.5
Z = 1.5
# the following are in lattice units, simulating lattice output
m = np.array([0.5, 1.0, 1.5])[:N, None]
t = np.arange(100)[None,:]
G = np.sum(m / 4 * np.exp(-t*m), axis=0) / Z**2
vpol = vacpol(moments(G, ainv=ainv, Z=Z, periodic=False))
fvpol = fourier_vacpol(G, ainv=ainv, Z=Z, periodic=False)
a_mu_m = a_mu(vpol, qmax=1000.)
a_mu_f = a_mu(fvpol, qmax=1000.)
self.assertLess(abs(1 - a_mu_m/a_mu_f), 1e-4)
def test_pade_svd(self):
" pade_svd(tayl, n, m) "
optprint('\n=========== Test pade_svd')
# Taylor expansion for exp(x)
e_exp = [1.0, 1.0, 1.0/2.0, 1.0/6.0, 1.0/24.0, 1.0/120.0]
# test against scipy
p0, q0 = scipy_pade(e_exp, 2)
p0 = p0.c[-1::-1]
q0 = q0.c[-1::-1]
p, q = pade_svd(e_exp, 3, 2)
assert numpy.allclose(p, p0)
assert numpy.allclose(q, q0)
optprint('(3,2) Pade of exp(x) - num:', p)
optprint('(3,2) Pade of exp(x) - den:', q)
e = sum(p) / sum(q)
optprint('Pade(x=1) = {:.6} error = {:7.2}'.format(
e,
abs(e/numpy.exp(1) - 1.),
))
# now with 10% errors --- automatically reduces to (2,1)
p0, q0 = scipy_pade(e_exp[:4], 1)
p0 = p0.c[::-1]
q0 = q0.c[::-1]
p, q = pade_svd(e_exp, 3, 2, rtol=0.1)
assert numpy.allclose(p, p0)
assert numpy.allclose(q, q0)
optprint('(2,1) Pade of exp(x) - num:', p)
optprint('(2,1) Pade of exp(x) - den:', q)
e = sum(p) / sum(q)
optprint('Pade(x=1) = {:.6} error = {:7.2}'.format(
e,
abs(e/numpy.exp(1) - 1.)
))
# now with 90% errors --- automatically reduces to (1,0)
p, q = pade_svd(e_exp, 3, 2, rtol=0.9)
optprint('(1,0) Pade of exp(x) - num:', p)
optprint('(1,0) Pade of exp(x) - den:', q)
e = sum(p) / sum(q)
optprint('Pade(x=1) = {:.6} error = {:7.2}'.format(
e,
abs(e/numpy.exp(1) - 1.)
))
assert numpy.allclose(p, [1., 1.])
assert numpy.allclose(q, [1.])
def test_pade_svd_consistency(self):
" pade_svd self consistency "
# high-order taylor series
x = gv.powerseries.PowerSeries([0,1], order=20)
f = np.exp(x).c
# verify that reduced-order Pades are exact Pades
m,n = 7,7
for rtol in [1, 0.1, 0.01, 0.001]:
a, b = pade_svd(f, m, n, rtol=rtol)
mm = len(a) - 1
nn = len(b) - 1
if (m,n) != (mm,nn):
aa, bb = pade_svd(f, mm, nn)
self.assertTrue(np.allclose(aa, a))
self.assertTrue(np.allclose(bb, b))
def test_pade_gvar(self):
" pade_gvar(tayl, m, n) "
optprint('\n=========== Test pade_gvar')
e_exp = [1.0, 1.0, 1.0/2.0, 1.0/6.0, 1.0/24.0, 1.0/120.0, 1.0/720.]
def _scipy_pade(m, n):
p, q = scipy_pade(e_exp[:m + n + 1], n)
return p.c[-1::-1], q.c[-1::-1]
def print_result(p, q):
optprint('num =', p)
optprint('den =', q)
def test_result(p, q, e_exp):
m = len(p) - 1
n = len(q) - 1
# test against scipy
p0, q0 = _scipy_pade(m, n)
try:
assert numpy.allclose(gvar.mean(p), p0)
except:
print (m,n, p0, p, q0, q)
assert numpy.allclose(gvar.mean(q), q0)
# test that errors correlate with input coefficients
num = gvar.powerseries.PowerSeries(p, order=m + n)
den = gvar.powerseries.PowerSeries(q, order=m + n)
ratio = (num/den).c / e_exp[:m + n + 1]
assert numpy.allclose(gvar.mean(ratio), 1.)
assert numpy.allclose(gvar.sdev(ratio), 0.0)
# print('scipy', _scipy_pade(1,1), pade_svd(e_exp, 3,2, rtol=0.01))
# 1% noise --- automatically reduces to (2,1)
e_exp_noise = [x * gvar.gvar('1.0(1)') for x in e_exp]
p, q = pade_gvar(e_exp_noise, 3, 2)
print_result(p, q)
self.assertEqual(len(p), 3)
self.assertEqual(len(q), 2)
test_result(p, q, e_exp_noise)
# 30% noise --- automatically reduces to (1,1)
e_exp_noise = [x * gvar.gvar('1.0(3)') for x in e_exp]
p, q = pade_gvar(e_exp_noise, 3, 2)
self.assertEqual(len(p), 2)
self.assertEqual(len(q), 2)
test_result(p, q, e_exp_noise)
def test_amu(self):
" a_mu(vpol) "
optprint('\n=========== Test a_mu')
def no_vacpol(q2):
return 0.25 / ALPHA ** 2
# coefficient of alpha/pi
amu = a_mu(no_vacpol)
optprint('coef of alpha/pi = {} error = {:7.2}'.format(
amu, abs(amu-0.5) / 0.5
))
assert numpy.allclose(amu, 0.5)
# R. Karplus and N.M. Kroll result from Phys Rev 77 (#4), 536 (1950):
# (alpha/pi)**2 * (3 + 11/36 - pi**2 / 3.)
amu = a_mu(vacpol.fermion(m=Mmu))
exact = (ALPHA/numpy.pi) ** 2 * ( 3 + 11./36 - numpy.pi**2 / 3.)
optprint('a_mu(m=mu) = {} error = {:7.2}'.format(
amu, abs(amu/exact - 1.)
))
assert numpy.allclose(amu/exact, 1.)
# H. Suura and E.H. Wichmann in Phys Rev 105, 1930 (1950):
# (alpha/pi)**2 ( log(mmu/me)/3 - 25/36 + O(me/mu))
ratio = 1e5
amu = a_mu(vacpol.fermion(Mmu/ratio))
exact = (ALPHA/numpy.pi) ** 2 * ( numpy.log(ratio)/3. - 25./36.)
assert numpy.allclose(amu/exact, 1., rtol=3/ratio)
def test_noise(self):
" a_mu(vpol) with noisy fermion loop "
optprint('\n=========== Test noise (fermion loop)')
def print_result(tag, amu, exact, pihat):
line = '{:11} {:<13} {:15} {:15} {:15}'
line = line.format(
tag,
amu if isinstance(amu, gvar.GVar) else '{:.8}'.format(amu),
' error = {:7.2}'.format(abs(gvar.mean(amu)/exact - 1.)),
' order = {}'.format(pihat.order),
' bad poles = {}'.format(pihat.badpoles())
)
optprint(line)
# test at mK
pihat_exact = vacpol.fermion(m=0.4937)
exact = a_mu(pihat_exact)
pihat = vacpol(pihat_exact.taylor(), (9,9))
amu = a_mu(pihat)
print_result('1loop(mK):', amu, exact, pihat)
assert numpy.allclose(amu/exact, 1., rtol=1e-5)
# mK with noise
tayl = [
ci * gvar.gvar('1.00(1)')
for ci in pihat_exact.taylor()
]
pihat = vacpol(tayl, (2,2))
amu = a_mu(pihat)
print_result('1loop(mK):', amu, exact, pihat)
assert numpy.allclose(amu.mean/exact, 1., rtol=1e-2)
# test at mpi
pihat_exact = vacpol.fermion(m=MPI)
exact = a_mu(pihat_exact)
pihat = vacpol(pihat_exact.taylor(), (9,9))
amu = a_mu(pihat)
print_result('1loop(mpi):', amu, exact, pihat)
assert numpy.allclose(amu/exact, 1., rtol=1e-4)
# mpi with noise
tayl = [
ci * gvar.gvar('1.00(1)')
for ci in pihat_exact.taylor()
]
pihat = vacpol(tayl, (2,2), warn=True)
amu = a_mu(pihat)
print_result('1loop(mpi):', amu, exact, pihat)
assert numpy.allclose(amu.mean/exact, 1., rtol=1e-2)
def test_scalar(self):
" vacpole.scalar(mpi) "
optprint('\n=========== Test scalar loop')
for mpi, amu_vegas in [(MPI, '7.076903(1)e-9'), (MK, '6.631148(1)e-10')]:
amu = a_mu(vacpol.scalar(mpi)) # a_mu_pipi(mpi)
amu_vegas = gvar.gvar(amu_vegas)
diff = gvar.fabs(amu - amu_vegas)
assert diff.mean < 5 * diff.sdev
optprint('1-loop({}) = {!s} error = {}'.format(mpi, amu, diff))
def test_exact_vs_pade(self):
" a_mu from pade vs from function"
optprint('\n=========== Test exact vs pade')
m = MPI
for n in [4, 5, 6, 7]:
for f in [
('scalar', vacpol.scalar),
('fermion', vacpol.fermion),
('vector', vacpol.vector)
]:
amu_exact = a_mu(f[1](m, n=n))
vpol = f[1](m, n=n, use_pade=True)
amu_pade = a_mu(vpol)
optprint('{:>7}: order = {} pade/exact = {}'.format(
f[0], vpol.order, amu_pade / amu_exact
))
assert abs(amu_pade / amu_exact - 1.) < 0.01
optprint(5 * '-')
def test_exact_vs_fourier(self):
" a_mu from pade vs from function"
optprint('\n=========== Test exact vs fourier')
# fake data --- N=3 states
N = 3
ainv = 2.5
Z = 1.5
# the following are in lattice units, simulating lattice output
m = np.array([0.5, 1.0, 1.5])[:N, None]
t = np.arange(100)[None,:]
G = np.sum(m / 4 * np.exp(-t*m), axis=0) / Z**2
# fourier analysis
fvpol = fourier_vacpol(G, ainv=ainv, Z=Z, periodic=False)
a_mu_fourier = a_mu(fvpol, qmax=1000.)
optprint('a_mu from fourier: {}'.format(a_mu_fourier))
# exact result for 1, 2, and 3 states
for n in range(1, N+1):
a_mu_exact = np.sum(
[a_mu(vacpol.vector(mi*ainv)) * ainv**2 for mi in m[:n]]
)
optprint('a_mu from {} states: {}'.format(n, a_mu_exact))
self.assertLess(abs(1 - a_mu_fourier/a_mu_exact), 1e-4)
def test_fourier_tmin_tmax(self):
" fourier_vacpol with tmin,tmax "
optprint('\n=========== Test exact vs fourier')
# fake data --- N=3 states
N = 3
ainv = 2.5
Z = 1.5
# the following are in lattice units, simulating lattice output
m = np.array([0.5, 1.0, 1.5])[:N, None]
t = np.arange(100)[None,:]
G = np.sum(m / 4 * np.exp(-t*m), axis=0) / Z**2
# fourier analysis
fvpol = fourier_vacpol(G, ainv=ainv, Z=Z, periodic=False)
fvpolp = fourier_vacpol(G, ainv=ainv, Z=Z, periodic=False, tmin=10.)
fvpolm = fourier_vacpol(G, ainv=ainv, Z=Z, periodic=False, tmax=10.)
a_mu_all = a_mu(fvpol, qmax=1000.)
a_mu_p = a_mu(fvpolp, qmax=1000.)
a_mu_m = a_mu(fvpolm, qmax=1000.)
self.assertLess(abs(1 - a_mu_all/(a_mu_p + a_mu_m )), 1e-6)
def test_exact_vs_fourier_periodic(self):
" a_mu from pade vs from function"
optprint('\n=========== Test exact vs fourier')
# loop over len(G) = even and odd
for start in [-2, -1]:
# fake data --- N=3 states
N = 3
ainv = 2.5
Z = 1.5
# the following are in lattice units, simulating lattice output
m = np.array([0.5, 1.0, 1.5])[:N, None]
t = np.arange(100)
t = np.concatenate((t, t[start:0:-1]))
G = np.sum(m / 4 * np.exp(-t*m), axis=0) / Z**2
# fourier analysis
fvpol = fourier_vacpol(G, ainv=ainv, Z=Z, periodic=True)
a_mu_fourier = a_mu(fvpol, qmax=1000.)
optprint('a_mu from fourier: {}'.format(a_mu_fourier))
# exact result for 1, 2, and 3 states
for n in range(1, N+1):
a_mu_exact = np.sum(
[a_mu(vacpol.vector(mi*ainv)) * ainv**2 for mi in m[:n]]
)
optprint('a_mu from {} states: {}'.format(n, a_mu_exact))
self.assertLess(abs(1 - a_mu_fourier/a_mu_exact), 1e-4)
def test_fourier_periodic_tmin_tmax(self):
" a_mu from pade vs from function"
optprint('\n=========== Test exact vs fourier')
# loop over len(G) = even and odd
for start in [-2, -1]:
# fake data --- N=3 states
N = 3
ainv = 2.5
Z = 1.5
# the following are in lattice units, simulating lattice output
m = np.array([0.5, 1.0, 1.5])[:N, None]
t = np.arange(100)
t = np.concatenate((t, t[start:0:-1]))
G = np.sum(m / 4 * np.exp(-t*m), axis=0) / Z**2
# fourier analysis
fvpol = fourier_vacpol(G, ainv=ainv, Z=Z, periodic=True)
fvpolp = fourier_vacpol(G, ainv=ainv, Z=Z, periodic=True, tmin=10.)
fvpolm = fourier_vacpol(G, ainv=ainv, Z=Z, periodic=True, tmax=10.)
a_mu_all = a_mu(fvpol, qmax=1000.)
a_mu_p = a_mu(fvpolp, qmax=1000.)
a_mu_m = a_mu(fvpolm, qmax=1000.)
self.assertLess(abs(1 - a_mu_all/(a_mu_p + a_mu_m )), 1e-6)
def test_exact_vs_vacpol_FT(self):
" a_mu from fourier_vacpol(vacpol.FT) "
optprint('\n=========== Test exact vs fourier')
# fake data --- N=3 states
N = 3
ainv = 2.5
Z = 1.5
# the following are in lattice units, simulating lattice output
m = np.array([0.5, 1.0, 1.5])[:N, None]
t = np.arange(100)[None,:]
G = np.sum(m / 4 * np.exp(-t*m), axis=0) / Z**2
t = t.reshape(-1)
m = m.reshape(-1)
# fourier analysis
vpol = vacpol(moments(G, ainv=ainv, Z=Z, periodic=False), order=(3,3))
self.assertLess(np.fabs(vpol.E[-1] - m[0] * ainv) / m[0]*ainv, 1e-6)
self.assertLess(np.fabs(vpol.ampl[-1] - m[0] * ainv**3/4) / (m[0]*ainv**3/4), 1e-6)
fvpol = fourier_vacpol(vpol.FT(t, ainv=ainv), ainv=ainv, periodic=False)
a_mu_fmom = a_mu(fvpol, qmax=1000.)
optprint('a_mu from FT of moments: {}'.format(a_mu_fmom))
# exact result for 1, 2, and 3 states
for n in range(1, N+1):
a_mu_exact = np.sum(
[a_mu(vacpol.vector(mi*ainv)) * ainv**2 for mi in m[:n]]
)
optprint('a_mu from {} states: {}'.format(n, a_mu_exact))
self.assertLess(abs(1 - a_mu_fmom/a_mu_exact), 1e-4)
def test_vacpol_poles(self):
" vacpol.poles and vacpol.residues "
m1 = gv.gvar('1.0(1)')
f1 = gv.gvar('0.25(1)')
vpol1 = vacpol.vector(m1, f=f1)
m2 = gv.gvar('2.0(1)')
f2 = gv.gvar('0.5(1)')
vpol2 = vacpol.vector(m2, f=f2)
# add two vectors together and check poles, residues
vpol = vacpol(vpol1.taylor() + vpol2.taylor(), order=(2,2))
self.assertEqual(gv.fabs(vpol.poles[0] + m2**2).fmt(5), '0.00000(0)')
# print(gv.fabs(vpol.residues[0] + f2**2/2).fmt(5))
self.assertEqual(gv.fabs(vpol.residues[0] + f2**2/2).fmt(5), '0.00000(0)')
self.assertEqual(gv.fabs(vpol.poles[1] + m1**2).fmt(5), '0.00000(0)')
self.assertEqual(gv.fabs(vpol.residues[1] + f1**2/2).fmt(5), '0.00000(0)')
def test_warn_exception(self):
" vacpol(warn=True) "
# vacpol.scalar(MPI).taylor()
tayl = np.array([ 1.08361463e-02, -3.97340348e-02, 2.26639708e-01,
-1.58653778e+00, 1.25300529e+01, -1.07205606e+02,
9.71193290e+02, -9.18408638e+03, 8.98033421e+04,
-9.01971926e+05])
tayl = tayl * gv.gvar(len(tayl) * ['1(1)'])
tayl += np.array([ 5.12534367e-05, -2.70757996e-04, 5.49464167e-04,
-2.69828134e-02, 9.43691955e-02, -1.64530731e+00,
4.97938388e-02, -1.10418131e+01, -7.24696697e+02,
2.59030047e+04])
with self.assertRaises(ValueError):
vpol = vacpol(tayl, warn=True, qth=2*MPI, order=(3,3), rtol=1e-14)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# (1,1) Pade is exact for vector --- should reduce (3,3) to (1,1)
m = gv.gvar('1.0(1)')
f = gv.gvar('0.25(1)')
vpol = vacpol(vacpol.vector(m, f, n=10).taylor(), order=(3,3), warn=True)
self.assertTrue(w)
self.assertEqual(vpol.order, (1,1))
if __name__ == '__main__':
unittest.main()
| gplepage/g2tools | tests/test_g2tools.py | Python | gpl-3.0 | 18,525 |
import datetime
import time
import urllib2
import re
from bs4 import BeautifulSoup
from datetime import timedelta, date
from urllib2 import HTTPError
#Initialize Variables
gameMatrix = []
gameList =[]
# Get Webpage Data
class GetData:
def __init__(self):
self.awayTeam = []
self.homeTeam = []
def getplayerstats(self,s):
websiteString = "http://espn.go.com/mlb/player/splits/_/id/" + s + "/type/batting3/"
try:
sock2 = urllib2.urlopen(websiteString)
print websiteString
except urllib2.HTTPError, e:
urlerrorCode = e.code
print urlerrorCode
print e.fp.read()
dataslerp = sock2.read()
dataderp = dataslerp.replace(' ', '')
soup = BeautifulSoup(dataderp)
table = soup.find_all("table", { "class" : "tablehead" })
rows = table[0].find_all("tr")
for item in rows:
cells = item.find_all("td")
if cells[0].text =="Total":
playerAtBats = float(cells[1].text)
playerHits = float(cells[3].text)
playerDoubles = float(cells[4].text)
playerTriples = float(cells[5].text)
playerHomeRuns = float(cells[6].text)
playerWalks = float(cells[8].text)
playerHBP = float(cells[9].text)
playerStrikeOuts = float(cells[10].text)
if cells[0].text =="vs. Left":
playerVsLeft = float(cells[14].text)
if cells[0].text =="vs. Right":
playerVsRight = float(cells[14].text)
try:
playerSingles = (playerHits - playerDoubles - playerTriples - playerHomeRuns)
except:
print "Shwat?"
try:
player1B = round(float(playerSingles)/float(playerHits),3)
except:
player1B = 0
try:
player2B = round(float(playerDoubles)/float(playerHits),3)
except:
player2B = 0
try:
player3B = round(float(playerTriples)/float(playerHits),3)
except:
player3B = 0
try:
playerHR = round(float(playerHomeRuns)/float(playerHits),3)
except:
playerHR = 0
try:
playerWalkRate = round((playerWalks + playerHBP )/(playerWalks + playerHBP + playerAtBats),3)
except:
playerWalkRate = 0
try:
playerStrikeOutRate = round(float(playerStrikeOuts) / float(playerAtBats),3)
except:
playerStrikeOutRate = 0
if self.homeadd == 1:
self.homeTeam.append([s,playerVsLeft,playerVsRight,player1B,player2B,player3B,playerHR,playerWalkRate,playerStrikeOutRate])
elif self.homeadd == 0:
self.awayTeam.append([s,playerVsLeft,playerVsRight,player1B,player2B,player3B,playerHR,playerWalkRate,playerStrikeOutRate])
time.sleep(1)
def getteamdata(self):
websiteString = "http://espn.go.com/mlb/preview?id=" + self.s
try:
sock2 = urllib2.urlopen(websiteString)
print "Opened:" + " " + websiteString
except urllib2.HTTPError, e:
urlerrorCode = e.code
print urlerrorCode
print e.fp.read()
dataslerp = sock2.read()
dataderp = dataslerp.replace(' ', '')
soup = BeautifulSoup(dataderp)
table = soup.find_all("div", { "class" : "mod-container mod-open mlb-box mod-open-gamepack" })
rowCounter = 0
playerList = []
for item in table[1].find_all("td"):
if rowCounter % 3 == 0:
rowNum = item.text
rowCounter +=1
elif rowCounter % 3 == 1:
for link in item.find_all('a'):
leftPlayer = re.findall(r'\d+',link['href'].replace("http://espn.go.com/mlb/player/_/id/",""))
rowCounter +=1
elif rowCounter % 3 == 2:
for link in item.find_all('a'):
rightPlayer = re.findall(r'\d+',link['href'].replace("http://espn.go.com/mlb/player/_/id/",""))
playerList.append([rowNum,leftPlayer[0],rightPlayer[0]])
rowCounter +=1
for item in playerList:
self.homeadd = 1
self.getplayerstats(item[1])
for item in playerList:
self.homeadd = 0
self.getplayerstats(item[2])
#main function
#Find games between these dates and get data
| Aketay/Baseball-Projection | lineup.py | Python | mit | 4,700 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class FlattenParameterGroup(Model):
"""
Additional parameters for the putSimpleProductWithGrouping operation.
:param name: Product name with value 'groupproduct'
:type name: str
:param product_id: Unique identifier representing a specific product for
a given latitude & longitude. For example, uberX in San Francisco will
have a different product_id than uberX in Los Angeles.
:type product_id: str
:param description: Description of product.
:type description: str
:param max_product_display_name: Display name of product.
:type max_product_display_name: str
:param odatavalue: URL value.
:type odatavalue: str
"""
_validation = {
'name': {'required': True},
'product_id': {'required': True},
'max_product_display_name': {'required': True},
}
def __init__(self, name, product_id, max_product_display_name, description=None, odatavalue=None, **kwargs):
self.name = name
self.product_id = product_id
self.description = description
self.max_product_display_name = max_product_display_name
self.odatavalue = odatavalue
| stankovski/AutoRest | AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/ModelFlattening/autorestresourceflatteningtestservice/models/flatten_parameter_group.py | Python | mit | 1,673 |
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2001-2006 Donald N. Allingham
# Copyright (C) 2008 Gary Burton
# Copyright (C) 2010 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Note View.
"""
#-------------------------------------------------------------------------
#
# python modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
import logging
_LOG = logging.getLogger(".plugins.noteview")
#-------------------------------------------------------------------------
#
# GTK/Gnome modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from gramps.gui.views.listview import ListView, TEXT, MARKUP, ICON
from gramps.gui.views.treemodels import NoteModel
from gramps.gen.utils.db import get_note_referents
from gramps.gen.errors import WindowActiveError
from gramps.gui.views.bookmarks import NoteBookmarks
from gramps.gen.config import config
from gramps.gen.lib import Note
from gramps.gui.ddtargets import DdTargets
from gramps.gui.dialog import ErrorDialog
from gramps.gui.filters.sidebar import NoteSidebarFilter
from gramps.gui.editors import EditNote, DeleteNoteQuery
from gramps.gui.merge import MergeNote
from gramps.gen.plug import CATEGORY_QR_NOTE
#-------------------------------------------------------------------------
#
# NoteView
#
#-------------------------------------------------------------------------
class NoteView(ListView):
"""
Noteview, a normal flat listview for the notes
"""
COL_PREVIEW = 0
COL_ID = 1
COL_TYPE = 2
COL_PRIV = 3
COL_TAGS = 4
COL_CHAN = 5
# column definitions
COLUMNS = [
(_('Preview'), TEXT, None),
(_('ID'), TEXT, None),
(_('Type'), TEXT, None),
(_('Private'), ICON, 'gramps-lock'),
(_('Tags'), TEXT, None),
(_('Last Changed'), TEXT, None),
]
# default setting with visible columns, order of the col, and their size
CONFIGSETTINGS = (
('columns.visible', [COL_PREVIEW, COL_ID, COL_TYPE]),
('columns.rank', [COL_PREVIEW, COL_ID, COL_TYPE, COL_PRIV, COL_TAGS,
COL_CHAN]),
('columns.size', [350, 75, 100, 40, 100, 100]))
ADD_MSG = _("Add a new note")
EDIT_MSG = _("Edit the selected note")
DEL_MSG = _("Delete the selected note")
MERGE_MSG = _("Merge the selected notes")
FILTER_TYPE = "Note"
QR_CATEGORY = CATEGORY_QR_NOTE
def __init__(self, pdata, dbstate, uistate, nav_group=0):
signal_map = {
'note-add' : self.row_add,
'note-update' : self.row_update,
'note-delete' : self.row_delete,
'note-rebuild' : self.object_build,
}
ListView.__init__(
self, _('Notes'), pdata, dbstate, uistate,
NoteModel, signal_map,
NoteBookmarks, nav_group,
filter_class=NoteSidebarFilter,
multiple=True)
self.func_list.update({
'<PRIMARY>J' : self.jump,
'<PRIMARY>BackSpace' : self.key_delete,
})
self.additional_uis.append(self.additional_ui())
def navigation_type(self):
return 'Note'
def drag_info(self):
"""
Indicate that the drag type is an EVENT
"""
return DdTargets.NOTE_LINK
def get_stock(self):
"""
Use the gramps-event stock icon
"""
return 'gramps-notes'
def additional_ui(self):
"""
Defines the UI string for UIManager
"""
return '''<ui>
<menubar name="MenuBar">
<menu action="FileMenu">
<placeholder name="LocalExport">
<menuitem action="ExportTab"/>
</placeholder>
</menu>
<menu action="BookMenu">
<placeholder name="AddEditBook">
<menuitem action="AddBook"/>
<menuitem action="EditBook"/>
</placeholder>
</menu>
<menu action="GoMenu">
<placeholder name="CommonGo">
<menuitem action="Back"/>
<menuitem action="Forward"/>
<separator/>
</placeholder>
</menu>
<menu action="EditMenu">
<placeholder name="CommonEdit">
<menuitem action="Add"/>
<menuitem action="Edit"/>
<menuitem action="Remove"/>
<menuitem action="Merge"/>
</placeholder>
<menuitem action="FilterEdit"/>
</menu>
</menubar>
<toolbar name="ToolBar">
<placeholder name="CommonNavigation">
<toolitem action="Back"/>
<toolitem action="Forward"/>
</placeholder>
<placeholder name="CommonEdit">
<toolitem action="Add"/>
<toolitem action="Edit"/>
<toolitem action="Remove"/>
<toolitem action="Merge"/>
</placeholder>
</toolbar>
<popup name="Popup">
<menuitem action="Back"/>
<menuitem action="Forward"/>
<separator/>
<menuitem action="Add"/>
<menuitem action="Edit"/>
<menuitem action="Remove"/>
<menuitem action="Merge"/>
<separator/>
<menu name="QuickReport" action="QuickReport"/>
</popup>
</ui>'''
def define_actions(self):
ListView.define_actions(self)
self._add_action('FilterEdit', None, _('Note Filter Editor'),
callback=self.filter_editor,)
self._add_action('QuickReport', None, _("Quick View"), None, None, None)
def get_handle_from_gramps_id(self, gid):
obj = self.dbstate.db.get_note_from_gramps_id(gid)
if obj:
return obj.get_handle()
else:
return None
def add(self, obj):
try:
EditNote(self.dbstate, self.uistate, [], Note())
except WindowActiveError:
pass
def remove(self, obj):
self.remove_selected_objects()
def remove_object_from_handle(self, handle):
the_lists = get_note_referents(handle, self.dbstate.db)
object = self.dbstate.db.get_note_from_handle(handle)
query = DeleteNoteQuery(self.dbstate, self.uistate, object, the_lists)
is_used = any(the_lists)
return (query, is_used, object)
def edit(self, obj):
for handle in self.selected_handles():
note = self.dbstate.db.get_note_from_handle(handle)
try:
EditNote(self.dbstate, self.uistate, [], note)
except WindowActiveError:
pass
def merge(self, obj):
"""
Merge the selected notes.
"""
mlist = self.selected_handles()
if len(mlist) != 2:
msg = _("Cannot merge notes.")
msg2 = _("Exactly two notes must be selected to perform a merge. "
"A second note can be selected by holding down the "
"control key while clicking on the desired note.")
ErrorDialog(msg, msg2)
else:
MergeNote(self.dbstate, self.uistate, mlist[0], mlist[1])
def tag_updated(self, handle_list):
"""
Update tagged rows when a tag color changes.
"""
all_links = set([])
for tag_handle in handle_list:
links = set([link[1] for link in
self.dbstate.db.find_backlink_handles(tag_handle,
include_classes='Note')])
all_links = all_links.union(links)
self.row_update(list(all_links))
def add_tag(self, transaction, note_handle, tag_handle):
"""
Add the given tag to the given note.
"""
note = self.dbstate.db.get_note_from_handle(note_handle)
note.add_tag(tag_handle)
self.dbstate.db.commit_note(note, transaction)
def get_default_gramplets(self):
"""
Define the default gramplets for the sidebar and bottombar.
"""
return (("Note Filter",),
("Note Backlinks",))
| pmghalvorsen/gramps_branch | gramps/plugins/view/noteview.py | Python | gpl-2.0 | 9,257 |
import piglow
from time import sleep
import psutil
piglow.auto_update = True
while True:
cpu = psutil.cpu_percent()
#piglow.all(0)
if cpu < 5:
piglow.all(0)
if cpu > 10:
piglow.white(20)
if cpu > 20:
piglow.blue(20)
if cpu > 40:
piglow.green(20)
if cpu > 60:
piglow.yellow(20)
if cpu > 80:
piglow.orange(20)
if cpu > 90:
piglow.red(20)
piglow.show()
sleep(0.01)
| developius/piometer | cpu.py | Python | mit | 391 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Unit tests for writer filewriter module."""
import os
import unittest
import cclib
__filedir__ = os.path.dirname(__file__)
__filepath__ = os.path.realpath(__filedir__)
__datadir__ = os.path.join(__filepath__, "..", "..")
class WriterTest(unittest.TestCase):
def test_init(self):
"""Does the class initialize correctly?"""
fpath = os.path.join(__datadir__, "data/ADF/basicADF2007.01/dvb_gopt.adfout")
data = cclib.io.ccopen(fpath).parse()
writer = cclib.io.filewriter.Writer(data)
# The object should keep the ccData instance passed to its constructor.
self.assertEqual(writer.ccdata, data)
if __name__ == "__main__":
unittest.main()
| Schamnad/cclib | test/io/testfilewriter.py | Python | bsd-3-clause | 900 |
## ENVISIoN
##
## Copyright (c) 2021 Gabriel Anderberg, Didrik Axén, Adam Engman,
## Kristoffer Gubberud Maras, Joakim Stenborg
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## 1. Redistributions of source code must retain the above copyright notice, this
## list of conditions and the following disclaimer.
## 2. Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions and the following disclaimer in the documentation
## and/or other materials provided with the distribution.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
## DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
## ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
## ##############################################################################################
import sys, os, inspect
import time
import select
import json
import random as rd
import PySimpleGUI as sg
os.popen('export INVIWO_HOME="$HOME/ENVISIoN/inviwo-build/bin"')
path_to_current_folder = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.append(path_to_current_folder + "/../")
from envisionpy.EnvisionMain import EnvisionMain
import envisionpy
from envisionpy.hdf5parser import *
import threading
import queue
def send_request(rtype, data):
return envisionMain.handle_request({'type': rtype, 'parameters': data})
envisionMain = EnvisionMain()
sg.theme('DarkGrey14')
# ------------------------------------------------------------------------- #
# Variables and Globals #
# ------------------------------------------------------------------------- #
allow_simultaneous_visualisations = True
allow_simultaneous_visualisations_over_datasets = True
canvas = True
slice_canvas = False
vectors = True
toggle_iso = True
slice_plane = True
current_dataset = None
current_dataset_is_hdf5 = None
current_folder = None
current_file = None
current_vis = None
current_vis_key = None
current_vis_hdf5 = None
number_of_buttons = 4
number_of_sliders = 4
number_of_comboboxes = 4
max_datasets = 5
dataset_dir = {}
dataset_vises = {}
dataset_if_hdf5 = {}
current_vises = {}
visualisations = {'Force' : 'force',
'Molecular Dynamics' : 'molecular_dynamics',
'PCF' : 'pcf',
'BandStructure' : 'band2d',
'BandStructure 3D' : 'band3d',
'Charge' : 'charge',
'ELF' : 'elf',
'Fermi Surface' : 'fermi',
'Atom Positions' : 'atom'}
visualisations_reverse = {'force' : 'Force',
'molecular_dynamics' : 'Molecular Dynamics',
'pcf' : 'PCF',
'band2d' : 'BandStructure',
'band3d' : 'BandStructure 3D',
'charge' : 'Charge',
'elf' : 'ELF',
'fermi' : 'Fermi Surface',
'atom' : 'Atom Positions'}
visualisations_button_tuple = tuple([i for i in visualisations.keys()])
force_attr = {'button0' : 'Toggle Canvas',
'button1' : 'Toggle Force Vectors',
'slider': {'Set Radius': [(0,100), 50],
'Set Opacity': [(0,100), 100]}}
moldyn_atttr = {'button0' : 'Toggle Canvas',
'button1' : 'Play/Pause',
'slider': {'Set Radius': [(0,100), 50],
'Set Speed': [(0,100), 50],
'Set Opacity': [(0,100), 100]}}
pcf_attr = {'button0' : 'Toggle Canvas'}
band2d_attr = {'button0' : 'Toggle Canvas'}
band3d_attr = {'button0' : 'Toggle Canvas'}
volume_attr = {'button0' : 'Toggle Canvas',
'button1' : 'Toggle ISO',
'button2' : 'Toggle Slice Canvas',
'button3' : 'Toggle Slice Plane',
'combo' : {'Shading Mode' : ['No Shading', 'Ambient', 'Diffuse',
'Specular', 'Blinn Phong', 'Phong'],
'Volume Selection' : ['/0', '/1', '/final']},
'slider': {'ISO Surface Value': [(0,100), 50],
'Slice Plane Height': [(0,100), 50]}}
atom_attr = {'button0' : 'Toggle Canvas'}
vis_attributes = {'Force' : force_attr,
'Molecular Dynamics' : moldyn_atttr,
'PCF' : pcf_attr,
'BandStructure' : band2d_attr,
'BandStructure 3D' : band3d_attr,
'Charge' : volume_attr,
'ELF' : volume_attr,
'Fermi Surface' : volume_attr,
'Atom Positions' : atom_attr}
# ------------------------------------------------------------------------- #
# Functions that set up the GUI #
# ------------------------------------------------------------------------- #
def setup_option_buttons(return_keys = False):
if not return_keys:
return [[sg.Button(i, key = 'opt' + str(i), visible = False)]
for i in range(number_of_buttons)]
else:
return tuple('opt' + str(i) for i in range(number_of_buttons))
def setup_combo_boxes(return_keys = False):
if not return_keys:
combo_row = []
for i in range(number_of_comboboxes):
combo_row.append([sg.Text('Combo' + str(i), key = 'com' + \
str(i) + 't',
visible = False, size = (18, 1))])
combo_row.append([sg.Combo([str(i)], key = 'com' + str(i),
visible = False,
readonly = True, enable_events = True)])
return combo_row
else:
return tuple('com' + str(i) for i in range(number_of_comboboxes))
def setup_sliders(return_keys = False):
if not return_keys:
slider_row = []
for i in range(number_of_sliders):
slider_row.append([sg.Slider(range = (0, 100), key = 'sli' + str(i),
visible = False,
orientation = 'horizontal',
resolution = 5,
default_value = 50, size = (15,20),
enable_events = True,
disable_number_display = True)])
slider_row.append([sg.Text('Slider' + str(i), key = 'sli' + str(i) + \
't', visible = False,
size = (30,2))])
return slider_row
else:
return tuple('sli' + str(i) for i in range(number_of_sliders))
def setup_datasets(return_keys = False):
global dataset_dir, dataset_vises, current_vises
if not return_keys:
for i in range(max_datasets):
dataset_dir['data' + str(i)] = None
dataset_vises['data' + str(i)] = None
current_vises['data' + str(i)] = []
dataset_if_hdf5['data' + str(i)] = None
return [[sg.Button('Empty Dataset', key = 'data' + str(i),
visible = True, size = (18, 3),
enable_events = True, button_color = 'darkgrey')]
for i in range(max_datasets)]
else:
return tuple('data' + str(i) for i in range(max_datasets))
def setup_folderloader(return_key = False):
if not return_key:
return [[sg.FolderBrowse(button_text = 'Choose VASP-directory or ELK-directory',
initial_folder = path_to_current_folder +
'/../unit_testing/resources',
enable_events = True, key = 'foldload',
size = (20,2))],
[sg.Text('', visible = False, key = 'foldloadtext',
size = (20,3))],
[sg.Button('Parse Selected \n Folder', size = (20,2),
enable_events = True, key = 'parsefolder')]
]
def setup_fileloader(return_key = False):
if not return_key:
return [[sg.Text('Note: Loading HDF5-files\ndirectly is not\nwell supported', visible = True, key = 'text',
size = (18,4))],
[sg.FileBrowse(button_text = 'Choose a HDF5-file',
initial_folder = path_to_current_folder + '/../ENVISIoN',
enable_events = True, key = 'fileload',
size = (18,2), file_types = (('*.hdf5', '.hdf5'),))],
[sg.Text('', visible = False, key = 'fileloadtext',
size = (18,3))],
[sg.Button('Load selected file \nin to dataset', size = (18,2),
enable_events = True, key = 'loadfile')]
]
def setup_vis_buttons():
button_row = []
temp_row1 = []
temp_row2 = []
half_vis = round(len(visualisations)/2)
vis_first_half = list(visualisations.keys())[:half_vis]
vis_last_half = list(visualisations.keys())[half_vis:]
for i in vis_first_half:
temp_row1.append(sg.Button(i, button_color = 'lightgrey', disabled = True))
for i in vis_last_half:
temp_row2.append(sg.Button(i, button_color = 'lightgrey', disabled = True))
button_row.append(temp_row1)
button_row.append(temp_row2)
return button_row
def get_selected_dataset():
return current_dataset
def set_selected_dataset(event):
global current_dataset
for i in list(setup_datasets(True)):
if i == event:
window.FindElement(i).Update(visible = True,
button_color = 'green')
current_dataset = i
else:
window.FindElement(i).Update(visible = True,
button_color = ("#fafbfc", "darkgrey"))
def set_dataset_to_vises_and_dir(vasp_path = None, current_visualisations = None, hdf5 = False):
global dataset_dir, dataset_vises, visualisations, current_dataset, current_dataset_is_hdf5
dataset_dir[current_dataset] = vasp_path
dataset_vises[current_dataset] = current_visualisations
if hdf5:
dataset_if_hdf5[current_dataset] = current_file
window.FindElement(current_dataset).Update(current_file.rsplit('/', 1)[-1])
else:
dataset_if_hdf5[current_dataset] = None
window.FindElement(current_dataset).Update(current_folder.rsplit('/', 1)[-1])
for key, value in visualisations.items():
if key in dataset_vises[current_dataset]:
window.FindElement(key).Update(disabled = False, button_color = 'green')
else:
window.FindElement(key).Update(disabled = True, button_color = 'lightgrey')
def console_message(str):
pass
# ------------------------------------------------------------------------- #
# Parsing related functions #
# ------------------------------------------------------------------------- #
def parse(vasp_path, current_dataset):
clear_hdf5(current_dataset)
pos_vises = []
#vasp
if envisionpy.hdf5parser.check_directory_force_parser(vasp_path):
pos_vises.append('Force')
envisionpy.hdf5parser.force_parser('force' + current_dataset + '.hdf5', vasp_path)
if envisionpy.hdf5parser.check_directory_molecular_dynamics_parser(vasp_path):
pos_vises.append('Molecular Dynamics')
envisionpy.hdf5parser.mol_dynamic_parser('molecular_dynamics' + current_dataset + '.hdf5', vasp_path)
if envisionpy.hdf5parser.check_directory_elf(vasp_path):
pos_vises.append('ELF')
envisionpy.hdf5parser.elf('elf' + current_dataset + '.hdf5', vasp_path)
if envisionpy.hdf5parser.check_directory_charge(vasp_path):
if envisionpy.hdf5parser.check_directory_unitcell(vasp_path):
pos_vises.append('Charge')
envisionpy.hdf5parser.charge('charge' + current_dataset + '.hdf5', vasp_path)
envisionpy.hdf5parser.unitcell('charge' + current_dataset + '.hdf5', vasp_path)
else:
pos_vises.append('Charge')
envisionpy.hdf5parser.charge('charge' + current_dataset + '.hdf5', vasp_path)
if envisionpy.hdf5parser.check_directory_unitcell(vasp_path):
pos_vises.append('Atom Positions')
envisionpy.hdf5parser.unitcell('atom' + current_dataset + '.hdf5', vasp_path)
if envisionpy.hdf5parser.check_directory_bandstructure(vasp_path):
pos_vises.append('BandStructure')
envisionpy.hdf5parser.bandstructure('band2d' + current_dataset + '.hdf5', vasp_path)
pos_vises.append('BandStructure 3D')
envisionpy.hdf5parser.bandstructure('band3d' + current_dataset + '.hdf5', vasp_path)
if envisionpy.hdf5parser.check_directory_fermi(vasp_path):
pos_vises.append('Fermi Surface')
envisionpy.hdf5parser.fermi_parser('fermi' + current_dataset + '.hdf5', vasp_path)
if envisionpy.hdf5parser.check_directory_pcf(vasp_path):
pos_vises.append('PCF')
envisionpy.hdf5parser.paircorrelation('pcf' + current_dataset + '.hdf5', vasp_path)
#elk
if envisionpy.hdf5parser.check_directory_unitcell_elk(vasp_path):
pos_vises.append('Atom Positions')
envisionpy.hdf5parser.unitcell_parser('atom' + current_dataset + '.hdf5', vasp_path)
if envisionpy.hdf5parser.check_directory_force_elk(vasp_path):
pos_vises.append('Force')
envisionpy.hdf5parser.parse_force_elk('force' + current_dataset + '.hdf5', vasp_path)
if envisionpy.hdf5parser.check_directory_elf_elk(vasp_path):
pos_vises.append('ELF')
envisionpy.hdf5parser.parse_elf('elf' + current_dataset + '.hdf5', vasp_path)
# Följt av if satser för alla parsers.
set_dataset_to_vises_and_dir(vasp_path, pos_vises)
def parse_progress_bar(vasp_path, current_dataset):
stop1 = rd.randint(10,30)
stop2 = rd.randint(40,60)
stop3 = rd.randint(70,90)
lenght = 100
layout = [[sg.Text('Working on it')],
[sg.ProgressBar(lenght, orientation='h', size=(20, 20),
key='progressbar')],
[sg.Cancel()]]
window2 = sg.Window('', layout)
progress_bar = window2['progressbar']
for i in range(lenght):
event, values = window2.read(timeout=10)
if i == stop1:
time.sleep(0.2)
if i == stop2:
parse(vasp_path, current_dataset)
if i == stop3:
time.sleep(0.3)
if event == 'Cancel' or event == sg.WIN_CLOSED:
break
progress_bar.UpdateBar(i + 1)
window2.close()
return True
def load_hdf5_file(hdf5_path, current_dataset):
init = send_request('init_manager', [hdf5_path])
init = init['data'][2]
envisionMain.update()
pos_vises = []
for key, value in visualisations.items():
if value in init:
pos_vises.append(key)
set_dataset_to_vises_and_dir(hdf5_path, pos_vises, True)
#print(pos_vises)
def clear_hdf5(current_dataset, exit = False):
if not exit:
try:
test = os.listdir(path_to_current_folder + '/../envisionGUI/')
#print(test)
for item in test:
if item.endswith(current_dataset + ".hdf5"):
os.remove(os.path.join(path_to_current_folder + '/../envisionGUI',item))
return True
except:
print('Couldnt remove')
else:
try:
test = os.listdir(path_to_current_folder + '/../envisionGUI/')
#print(test)
for item in test:
if item.endswith(".hdf5"):
os.remove(os.path.join(path_to_current_folder + '/../envisionGUI',item))
return True
except:
print('Couldnt remove')
def clear_options():
b=0
c=0
s=0
while b < number_of_buttons:
window.FindElement('opt' + str(b)).Update(visible = False,
button_color = 'green')
b += 1
while c < number_of_comboboxes:
window.FindElement('com' + str(c)).Update(visible = False)
window.FindElement('com' + str(c) + 't').Update(visible = False)
c += 1
while s < number_of_sliders:
window.FindElement('sli' + str(s) + 't').Update(visible = False)
window.FindElement('sli' + str(s)).Update(visible = False)
s += 1
return True
# ------------------------------------------------------------------------- #
# Functions that control the visualisations #
# ------------------------------------------------------------------------- #
def create_vis_attributes(attr):
clear_options()
button_count = 0
combo_count = 0
slider_count = 0
for key, value in attr.items():
if 'button' in key:
window.FindElement('opt' +
str(button_count)).Update(text = value,
visible = True, button_color = 'green')
button_count += 1
elif 'combo' in key:
for key1, value1 in value.items():
window.FindElement('com' + str(combo_count) +
't').Update(value = key1, visible = True)
window.FindElement('com' +
str(combo_count)).Update(values = value1,
visible = True)
combo_count += 1
elif 'slider' in key:
for key2, value2 in value.items():
window.FindElement('sli' + str(slider_count) +
't').Update(value = key2 + ': ' +
str(value2[1]/100), visible = True)
window.FindElement('sli' +
str(slider_count)).Update(range = value2[0],
value = value2[1], visible = True,
disabled = False)
slider_count += 1
return
def switch_dataset(event):
global dataset_dir, dataset_vises
if dataset_dir[event] != None and dataset_vises[event] != None:
for key, value in visualisations.items():
if key in dataset_vises[event]:
window.FindElement(key).Update(disabled = False, button_color = 'green')
else:
window.FindElement(key).Update(disabled = True, button_color = 'lightgrey')
elif dataset_dir[event] == None:
for key in visualisations.keys():
window.FindElement(key).Update(disabled = True, button_color = 'lightgrey')
else:
pass
def get_selected_folder():
return current_folder
def set_selected_folder(values):
global current_folder
current_folder = values['foldload']
window.FindElement('foldloadtext').Update('Currently Selected: \n' +
current_folder.rsplit('/', 1)[-1], visible = True)
def set_selected_file(values):
global current_file
current_file = values['fileload']
window.FindElement('fileloadtext').Update('Currently Selected: \n' +
current_file.rsplit('/', 1)[-1], visible = True)
def get_loaded_datasets():
return tuple([i for i in dataset_dir.values() if i != None])
def handle_visualisation_request(event, current_dataset, hdf5 = False):
if dataset_if_hdf5[current_dataset] == None:
hdf5_file = visualisations[event] + current_dataset + '.hdf5'
hdf5_file_name = visualisations[event] + current_dataset
else:
hdf5_file = dataset_if_hdf5[current_dataset]
hdf5_file_name = dataset_if_hdf5[current_dataset].rsplit('.hdf5')[0]
hdf5_file_name = hdf5_file_name.rsplit('/', 1)[-1]
# print()
if event not in current_vises[current_dataset]:
# print(current_vises[current_dataset])
# print(hdf5_file_name + ' ' + hdf5_file + ' ' + visualisations[event])
start_visualisation(hdf5_file_name, hdf5_file, visualisations[event])
current_vises[current_dataset].append(event)
#print(current_vises[current_dataset])
#print(hdf5_file)
def start_visualisation(filename, file, type):
envisionMain.update()
send_request('init_manager', [file])
envisionMain.update()
#print(filename)
#print(type)
send_request('start_visualisation', [filename, type])
envisionMain.update()
def stop_visualisation(filename, type):
try:
envisionMain.update()
send_request('stop_visualisation', [filename, type])
envisionMain.update()
except:
pass
def set_current(event, current_dataset):
global current_vis_hdf5, current_vis, current_vis_key
if dataset_if_hdf5[current_dataset] == None:
current_vis_hdf5 = visualisations[event] + current_dataset
current_vis = visualisations[event]
current_vis_key = str([vis for vis,val in visualisations.items() if val == current_vis][0])
else:
current_vis_hdf5 = dataset_if_hdf5[current_dataset].rsplit('.hdf5')[0]
current_vis_hdf5 = current_vis_hdf5.rsplit('/', 1)[-1]
current_vis = visualisations[event]
current_vis_key = str([vis for vis,val in visualisations.items() if val == current_vis][0])
def stop_selected(current_vis_hdf5, current_vis):
try:
current_vises[current_dataset].remove(str([vis for vis,val in visualisations.items() if val == current_vis][0]))
stop_visualisation(current_vis_hdf5, current_vis)
except:
pass
def set_selected(event):
window.FindElement(event).Update(button_color = 'darkgreen', disabled = True)
def unset_selected(event):
window.FindElement(current_vis_key).Update(button_color = 'green', disabled = False)
# ------------------------------------------------------------------------- #
# Functions that control the look of visualisations #
# ------------------------------------------------------------------------- #
def toggle_canvas(file, type):
global canvas
#try:
#print(file + type)
if canvas:
envisionMain.update()
send_request('visualisation_request', [file, type, "hide", []])
envisionMain.update()
canvas = False
else:
envisionMain.update()
send_request('visualisation_request', [file, type, "show", []])
envisionMain.update()
canvas = True
#console_message('Canvas Toggled')
#except:
#console_message('Failed to toggle Canvas')
return
def set_shading_mode(file, type, key):
try:
send_request("visualisation_request",
[file, type, "set_shading_mode", [key]])
except:
console_message('Could not set shading mode')
def set_color(file, type, key):
try:
send_request("visualisation_request",
[file, type, "set_color", [key]])
except:
console_message('Could not set color')
def set_volume_selection(file, type, key):
try:
send_request("visualisation_request", [file, type,
"set_volume_selection", [key]])
except:
console_message('Could not set volume selection')
def toggle_force_vectors(file, type):
global vectors
try:
if vectors:
envisionMain.update()
send_request("visualisation_request", [file, type, "hide_vectors"])
envisionMain.update()
vectors = False
else:
envisionMain.update()
send_request("visualisation_request", [file, type, "show_vectors"])
envisionMain.update()
vectors = True
console_message('Vectors Toggled')
except:
console_message('Failed to toggle Vectors')
return
def toggle_iso_surface(file, type):
global toggle_iso
try:
if toggle_iso:
envisionMain.update()
send_request("visualisation_request", [file, type, 'toggle_iso',
[toggle_iso]])
envisionMain.update()
toggle_iso = False
else:
envisionMain.update()
send_request("visualisation_request", [file, type, 'toggle_iso',
[toggle_iso]])
envisionMain.update()
toggle_iso = True
console_message('ISO Toggled')
except:
console_message('Failed to toggle ISO')
return
def set_iso_surface(file, type, value):
try:
send_request("visualisation_request", [file, type, "set_iso_surface",
[value]])
except:
console_message('Could not set ISO-surface value')
def set_radius(file, type, value):
try:
send_request("visualisation_request", [file, type, "set_radius",
[value]])
except:
console_message('Could not set radius')
def toggle_slice_canvas(file, type):
global slice_canvas
try:
if slice_canvas:
envisionMain.update()
send_request('visualisation_request', [file, type, "hide",
[False, True]])
envisionMain.update()
slice_canvas = False
else:
envisionMain.update()
send_request('visualisation_request', [file, type, "show",
[False, True]])
envisionMain.update()
slice_canvas = True
console_message('Slice Canvas Toggled')
except:
console_message('Failed to toggle Slice Canvas')
return
def toggle_slice_plane(file, type):
global slice_plane
try:
if slice_plane:
envisionMain.update()
send_request("visualisation_request", [file, type,
'toggle_slice_plane', [slice_plane]])
envisionMain.update()
slice_plane = False
else:
envisionMain.update()
send_request("visualisation_request", [file, type,
'toggle_slice_plane', [slice_plane]])
envisionMain.update()
slice_plane = True
console_message('Slice Plane Toggled')
except:
console_message('Failed to toggle Slice Plane')
return
def set_slice_plane_height(file, type, value):
try:
send_request("visualisation_request", [file, type,
"set_plane_height", [value]])
except:
console_message('Could not set slice plane height')
def unfinished(file, type):
console_message('Not yet implemented')
return
def set_animation_speed(file, type, speed):
speed = round(-3.7647 + 103.7647*2.7182**(-3.3164*speed))
try:
send_request("visualisation_request", [file, type, "set_speed",
[speed]])
except:
console_message('Could not set speed')
def set_opacity(file, type, value):
try:
send_request("visualisation_request", [file, type, "set_opacity",
[value]])
except:
console_message('Could not set opacity')
def play_pause(file, type):
try:
send_request("visualisation_request", [file, type, "play_pause"])
except:
console_message('Could not set play/pause')
def set_standard_parameters(file, type):
try:
set_radius(file, type, 0.5)
except:
pass
try:
set_animation_speed(file, type, 0.5)
except:
pass
try:
set_opacity(file, type, 1)
except:
pass
try:
set_color(file, type, [1,1,1])
except:
pass
# ------------------------------------------------------------------------- #
# Layout Settings #
# ------------------------------------------------------------------------- #
layout = [[sg.Text('ENVISIoN GUI v0.4', justification = 'center',
font = ("Helvetica", 40, 'bold', 'italic'))],
[ sg.Frame(layout = setup_datasets(), title = ''),
sg.Frame(layout = setup_folderloader(), title = '',
vertical_alignment = 'bottom'),
sg.Frame(layout = setup_fileloader(), title = '',
vertical_alignment = 'bottom'),
],
[ sg.Frame(layout = setup_vis_buttons(), title = '', border_width = 0)],
[[sg.Frame(layout = setup_option_buttons(), title = '', border_width = 0),
sg.Frame(layout = setup_sliders(), title = '', border_width = 0),
sg.Frame(layout = setup_combo_boxes(), title = '', border_width = 0)]],
[ sg.Button('Stop Currently Selected Visualisation',
key = 'stop', button_color = 'red'),
sg.Text('FPS:'),
sg.Text(' ', key = 'fps'), sg.Button('test', key ='t')]]
window = sg.Window('',layout)
button_to_function = {'Toggle Canvas' : toggle_canvas,
'Toggle Force Vectors' : toggle_force_vectors,
'Play/Pause' : play_pause,
'Change Color' : unfinished,
'Toggle ISO' : toggle_iso_surface,
'Toggle Slice Canvas' : toggle_slice_canvas,
'Toggle Slice Plane' : toggle_slice_plane}
combo_to_function = {'Shading Mode' : set_shading_mode,
'Volume Selection' : set_volume_selection,
'Color' : set_color}
slider_to_function = {'ISO Surface Value' : set_iso_surface,
'Slice Plane Height' : set_slice_plane_height,
'Set Radius' : set_radius,
'Set Speed' : set_animation_speed,
'Set Opacity' : set_opacity}
# ------------------------------------------------------------------------- #
# Event Loop #
# ------------------------------------------------------------------------- #
while True:
start = time.time()
envisionMain.update()
event, values = window.read(timeout = 20)
if event == 'foldload':
set_selected_folder(values)
if event == 'fileload':
set_selected_file(values)
if (event == 'parsefolder' and current_folder != None
and current_folder not in get_loaded_datasets()
and current_dataset != None):
parse_progress_bar(current_folder, current_dataset)
if (event == 'loadfile' and current_file != None
and current_file not in get_loaded_datasets()
and current_dataset != None):
load_hdf5_file(current_file, current_dataset)
if event in setup_datasets(True):
set_selected_dataset(event)
switch_dataset(event)
if (event in visualisations_button_tuple and (current_folder != None or current_file != None)
and current_dataset != None):
switch_dataset(current_dataset)
set_selected(event)
create_vis_attributes(vis_attributes[event])
# print(event)
handle_visualisation_request(event, current_dataset)
set_current(event, current_dataset)
set_standard_parameters(current_vis_hdf5, current_vis)
if (event == 'stop' and (current_folder != None or current_file != None)
and current_dataset != None):
unset_selected(current_vis)
stop_selected(current_vis_hdf5, current_vis)
clear_options()
if event == 't':
print(current_dataset)
print(current_vises)
if event in setup_option_buttons(True):
button_to_function[window.FindElement(event).get_text()](current_vis_hdf5,
current_vis)
if event in setup_combo_boxes(True):
combo_to_function[window.FindElement(event + 't').get()](current_vis_hdf5,
current_vis, values[event])
if event in setup_sliders(True):
window.FindElement(event + 't').Update(value =
(window.FindElement(event + 't').get().split(':'))[0]
+ ': ' + str(round(values[event])/100))
slider_to_function[window.FindElement(event +
't').get().split(':')[0]](current_vis_hdf5,
current_vis, round(values[event])/100)
if event in (sg.WINDOW_CLOSED, 'Exit'):
clear_hdf5(current_dataset, True)
break
end = time.time()
window.FindElement('fps').Update(str(round(1/(end - start), 1)))
window.close()
| rartino/ENVISIoN | envisionGUI/GUI.py | Python | bsd-2-clause | 33,351 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import quickstart
@mock.patch('google.cloud.storage.client.Client.create_bucket')
def test_quickstart(create_bucket_mock, capsys):
# Unlike other quickstart tests, this one mocks out the creation
# because buckets are expensive, globally-namespaced object.
create_bucket_mock.return_value = mock.sentinel.bucket
quickstart.run_quickstart()
create_bucket_mock.assert_called_with('my-new-bucket')
| amboutin/GCP | storage/cloud-client/quickstart_test.py | Python | apache-2.0 | 1,029 |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .ray_tune_search_engine import RayTuneSearchEngine
| intel-analytics/BigDL | python/orca/src/bigdl/orca/automl/search/ray_tune/__init__.py | Python | apache-2.0 | 642 |
#!/usr/bin/env python3
import os
import sys
_upper_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
if _upper_dir not in sys.path:
sys.path.append(_upper_dir)
import chdb
import config
import utils
import time
import subprocess
import argparse
import tempfile
import dateutil.parser
import datetime
import traceback
import logging
def email(message):
subprocess.getoutput(
'/usr/bin/mail -s "%s" ' % message +
' [email protected]')
time.sleep(2*60)
def shell(logger, cmdline):
logger.info('Running %s' % cmdline)
status, output = subprocess.getstatusoutput(cmdline)
for l in output.splitlines():
logger.info(l)
return status == 0
def get_db_names_to_archive(lang_code):
database_names = []
for db in [chdb.init_db(lang_code), chdb.init_stats_db()]:
with db.cursor() as cursor:
cursor.execute('SELECT DATABASE()')
database_names.append(cursor.fetchone()[0])
return database_names
def delete_old_archives(logger, archive_dir, archive_duration_days):
try:
all_archives = os.listdir(archive_dir)
except OSError:
logger.info('No archives to delete!')
return
for a in all_archives:
# format: YYYYMMDD-HHMM.sql.gz
when = dateutil.parser.parse(a.split('.', 1)[0])
age = (datetime.datetime.today() - when).days
if age > archive_duration_days:
logger.info('Archive %s is %d days old, deleting' % (a, age))
os.remove(os.path.join(archive_dir, a))
def archive_database(logger, cfg):
dbs_to_archive = get_db_names_to_archive(cfg.lang_code)
archive_dir = os.path.join(cfg.archive_dir, cfg.lang_code)
if cfg.archive_duration_days > 0:
delete_old_archives(logger, archive_dir, cfg.archive_duration_days)
utils.mkdir_p(archive_dir)
now = datetime.datetime.now()
output = os.path.join(archive_dir, now.strftime('%Y%m%d-%H%M.sql.gz'))
logger.info('Archiving the current database')
return shell(
logger,
'mysqldump --defaults-file="%s" --host=%s --databases %s | '
'gzip > %s' % (chdb.REPLICA_MY_CNF, chdb.TOOLS_LABS_CH_MYSQL_HOST,
' '.join(dbs_to_archive), output))
def expire_stats(cfg):
stats_db = chdb.init_stats_db()
with chdb.init_stats_db().cursor() as cursor, chdb.ignore_warnings():
cursor.execute('DELETE FROM requests WHERE DATEDIFF(NOW(), ts) > %s',
(cfg.stats_max_age_days,))
def _update_db_tools_labs(logger, cfg):
os.environ['CH_LANG'] = cfg.lang_code
chdb.initialize_all_databases()
if cfg.archive_dir and not archive_database(logger, cfg):
# Log, but don't assert, this is not fatal
logger.warning('Failed to archive database!')
expire_stats(cfg)
# FIXME Import and calll these scripts instead of shelling out?
def run_script(script, cmdline = '', optional = False):
scripts_dir = os.path.dirname(os.path.realpath(__file__))
script_path = os.path.join(scripts_dir, script)
cmdline = ' '.join([sys.executable, script_path, cmdline])
assert shell(logger, cmdline) == True or optional
unsourced = tempfile.NamedTemporaryFile()
run_script(
'print_unsourced_pageids_from_wikipedia.py', '> ' + unsourced.name)
run_script('parse_live.py', unsourced.name)
run_script('assign_categories.py')
run_script('update_intersections.py')
run_script('install_new_database.py')
unsourced.close() # deletes the file
def update_db_tools_labs(logger, cfg):
try:
_update_db_tools_labs(logger, cfg)
except Exception as e:
traceback.print_exc(file = sys.stderr)
email('Failed to build database for %s' % cfg.lang_code)
sys.exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Update the CitationHunt databases.')
parser.add_argument('lang_code',
help='One of the language codes in ../config.py')
args = parser.parse_args()
logname = 'citationhunt_update_' + args.lang_code
logger = logging.getLogger(logname)
utils.setup_logger_to_logfile(logger, logname + '.log')
if not utils.running_in_tools_labs():
logger.error('Not running in Tools Labs!')
sys.exit(1)
if args.lang_code not in config.LANG_CODES_TO_LANG_NAMES:
logger.error('Invalid lang code {}!'.format(args.lang_code))
sys.exit(1)
cfg = config.get_localized_config(args.lang_code)
update_db_tools_labs(logger, cfg)
| eggpi/citationhunt | scripts/update_db_tools_labs.py | Python | mit | 4,576 |
# -*- coding: utf-8 -*-
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Additional help about gsutil's interaction with Cloud Storage APIs."""
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>OVERVIEW</B>
Google Cloud Storage offers two APIs: an XML and a JSON API. Gsutil can
interact with both APIs. By default, gsutil versions starting with 4.0
interact with the JSON API. If it is not possible to perform a command using
one of the APIs (for example, the notification command is not supported in
the XML API), gsutil will silently fall back to using the other API. Also,
gsutil will automatically fall back to using the XML API when interacting
with cloud storage providers that only support that API.
<B>CONFIGURING WHICH API IS USED</B>
To use a certain API for interacting with Google Cloud Storage, you can set
the 'prefer_api' variable in the "GSUtil" section of .boto config file to
'xml' or 'json' like so:
prefer_api = json
This will cause gsutil to use that API where possible (falling back to the
other API in cases as noted above). This applies to the gsutil test command
as well; it will run integration tests against the preferred API.
<B>PERFORMANCE DIFFERENCES BETWEEN APIS</B>
The XML API uses the boto framework. This framework re-reads downloaded files
to compute an MD5 hash if one is not present. For objects that do not
include MD5 hashes in their metadata (for example Google Cloud Storage
composite objects), this doubles the bandwidth consumed and elapsed time
needed by the download. Therefore, if you are working with composite objects,
it is recommended that you use the default value for prefer_api.
""")
class CommandOptions(HelpProvider):
"""Additional help about gsutil's interaction with Cloud Storage APIs."""
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='apis',
help_name_aliases=['XML', 'JSON', 'api', 'force_api', 'prefer_api'],
help_type='additional_help',
help_one_line_summary='Cloud Storage APIs',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
| harshilasu/GraphicMelon | y/google-cloud-sdk/platform/gsutil/gslib/addlhelp/apis.py | Python | gpl-3.0 | 2,736 |
"""
These validate methods are never run by FlexGet anymore, but these tests serve as a sanity check that the
old validators will get converted to new schemas properly for plugins still using the `validator` method.
"""
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from flexget import validator
class TestValidator(object):
def test_default(self):
root = validator.factory()
assert root.name == 'root', 'expected root'
dv = root.accept('dict')
assert dv.name == 'dict', 'expected dict'
dv.accept('text', key='text')
def test_dict(self):
dv = validator.factory('dict')
dv.accept('dict', key='foo')
result = dv.validate({'foo': {}})
assert not dv.errors.messages, 'should have passed foo'
assert result, 'invalid result for foo'
result = dv.validate({'bar': {}})
assert dv.errors.messages, 'should not have passed bar'
assert not result, 'should have an invalid result for bar'
# Test validation of dictionary keys
dv = validator.factory('dict')
dv.accept_valid_keys('dict', key_type='number')
result = dv.validate({3: {}})
assert not dv.errors.messages, 'should have passed 3'
assert result, 'invalid result for key 3'
def test_regexp_match(self):
re_match = validator.factory('regexp_match')
re_match.accept('abc.*')
assert not re_match.validate('foobar'), 'foobar should not have passed'
assert re_match.validate('abcdefg'), 'abcdefg should have passed'
def test_interval(self):
interval = validator.factory('interval')
assert interval.validate('3 days')
assert interval.validate('12 hours')
assert interval.validate('1 minute')
assert not interval.validate('aoeu')
assert not interval.validate('14')
assert not interval.validate('3 dayz')
assert not interval.validate('about 5 minutes')
def test_choice(self):
choice = validator.factory('choice')
choice.accept('foo')
choice.accept('Bar', ignore_case=True)
choice.accept(120)
choice.validate('foo')
assert not choice.errors.messages, 'foo should be valid'
choice.validate(120)
assert not choice.errors.messages, '120 should be valid'
choice.validate('bAR')
assert not choice.errors.messages, 'bAR should be valid'
choice.validate('xxx')
assert choice.errors.messages, 'xxx should be invalid'
choice.errors.messages = []
choice.validate(300)
assert choice.errors.messages, '300 should be invalid'
choice.errors.messages = []
choice.validate('fOO')
assert choice.errors.messages, 'fOO should be invalid'
# This validator is not supported with json schema
def _lazy(self):
"""Test lazy validators by making a recursive one."""
def recursive_validator():
root = validator.factory('dict')
root.accept('integer', key='int')
root.accept(recursive_validator, key='recurse')
return root
test_config = {'int': 1,
'recurse': {
'int': 2,
'recurse': {
'int': 3}}}
assert recursive_validator().validate(test_config), 'Config should pass validation'
test_config['recurse']['badkey'] = 4
assert not recursive_validator().validate(test_config), 'Config should not be valid'
def test_path(self, tmpdir):
path = validator.factory('path')
path_allow_missing = validator.factory('path', allow_missing=True)
path.validate(tmpdir.strpath)
assert not path.errors.messages, '%s should be valid' % tmpdir.strpath
path_allow_missing.validate('missing_directory')
assert not path_allow_missing.errors.messages, 'missing_directory should be valid with allow_missing'
path.validate('missing_directory')
assert path.errors.messages, 'missing_directory should be invalid'
path_allow_missing.errors.messages = []
| oxc/Flexget | flexget/tests/test_validator.py | Python | mit | 4,250 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for PauseEvaluationJob
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-datalabeling
# [START datalabeling_v1beta1_generated_DataLabelingService_PauseEvaluationJob_sync]
from google.cloud import datalabeling_v1beta1
def sample_pause_evaluation_job():
# Create a client
client = datalabeling_v1beta1.DataLabelingServiceClient()
# Initialize request argument(s)
request = datalabeling_v1beta1.PauseEvaluationJobRequest(
name="name_value",
)
# Make the request
client.pause_evaluation_job(request=request)
# [END datalabeling_v1beta1_generated_DataLabelingService_PauseEvaluationJob_sync]
| googleapis/python-datalabeling | samples/generated_samples/datalabeling_v1beta1_generated_data_labeling_service_pause_evaluation_job_sync.py | Python | apache-2.0 | 1,488 |
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from mo_logs.log_usingNothing import StructuredLogger
from mo_logs.strings import expand_template
from mo_threads import Queue
class StructuredLogger_usingQueue(StructuredLogger):
def __init__(self, name=None):
queue_name = "log messages to queue"
if name:
queue_name += " "+name
self.queue = Queue(queue_name)
def write(self, template, params):
self.queue.add(expand_template(template, params))
def stop(self):
self.queue.close()
def pop(self):
lines = self.queue.pop()
output = []
for l in lines.split("\n"):
if l[19:22] == " - ":
l = l[22:]
if l.strip().startswith("File"):
continue
output.append(l)
return "\n".join(output).strip()
| klahnakoski/Bugzilla-ETL | vendor/mo_logs/log_usingQueue.py | Python | mpl-2.0 | 1,197 |
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.report.plugins import Plugin, RedHatPlugin, UbuntuPlugin
class OpenStackDesignate(Plugin):
short_desc = 'Openstack Designate'
plugin_name = "openstack_designate"
profiles = ('openstack', 'openstack_controller')
var_puppet_gen = "/var/lib/config-data/puppet-generated/designate"
def setup(self):
# configs
self.add_copy_spec([
"/etc/designate/*",
self.var_puppet_gen + "/etc/designate/designate.conf",
self.var_puppet_gen + "/etc/designate/pools.yaml",
])
# logs
if self.get_option("all_logs"):
self.add_copy_spec([
"/var/log/designate/*",
"/var/log/containers/designate/*",
])
else:
self.add_copy_spec([
"/var/log/designate/*.log",
"/var/log/containers/designate/*.log"
])
subcmds = [
'dns service list',
'dns quota list',
'ptr record list',
'tld list',
'tsigkey list --column name --column algorithm --column scope',
'zone blacklist list',
'zone export list',
'zone import list',
'zone list',
'zone transfer accept list',
'zone transfer request list'
]
# commands
self.add_cmd_output([
'openstack %s --all-projects' % sub for sub in subcmds
])
# get recordsets for each zone
cmd = "openstack zone list -f value -c id"
ret = self.exec_cmd(cmd)
if ret['status'] == 0:
for zone in ret['output'].splitlines():
zone = zone.split()[0]
self.add_cmd_output(
"openstack recordset list --all-projects %s" % zone,
subdir='recordset')
def postproc(self):
protect_keys = [
"password", "connection", "transport_url", "admin_password",
"ssl_key_password", "ssl_client_key_password",
"memcache_secret_key"
]
regexp = r"((?m)^\s*(%s)\s*=\s*)(.*)" % "|".join(protect_keys)
self.do_path_regex_sub("/etc/designate/*", regexp, r"\1*********")
self.do_path_regex_sub(
self.var_puppet_gen + "/etc/designate/*",
regexp, r"\1*********"
)
class RedHatdesignate(OpenStackDesignate, RedHatPlugin):
packages = ('openstack-selinux',)
class Ubuntudesignate(OpenStackDesignate, UbuntuPlugin):
packages = ('designate-common',)
# vim: set et ts=4 sw=4 :
| BryanQuigley/sos | sos/report/plugins/openstack_designate.py | Python | gpl-2.0 | 2,931 |
#-*- coding:utf-8 -*-
from __future__ import division
from __future__ import absolute_import
from __future__ import with_statement
from __future__ import print_function
from __future__ import unicode_literals
from attest import Tests
suite = lambda mod: 'tests.' + mod + '.suite'
all = Tests([suite('schemata'),
suite('models'),
suite('api'),
suite('zodb'),
suite('export'),
suite('utils'),
])
| dag/stutuz | tests/__init__.py | Python | bsd-2-clause | 476 |
__author__ = 'Oleg Butovich'
__copyright__ = '(c) Oleg Butovich 2013-2015'
__licence__ = 'MIT'
from mock import patch
from proxmoxer import ProxmoxAPI
from tests.base.base_ssh_suite import BaseSSHSuite
class TestOpenSSHSuite(BaseSSHSuite):
proxmox = None
client = None
# noinspection PyMethodOverriding
@patch('openssh_wrapper.SSHConnection')
def setUp(self, _):
self.proxmox = ProxmoxAPI('proxmox', user='root', backend='openssh', port=123)
self.client = self.proxmox._store['session'].ssh_client
self._set_stderr('200 OK')
self._set_stdout('')
def _get_called_cmd(self):
return self.client.run.call_args[0][0]
def _set_stdout(self, stdout):
self.client.run.return_value.stdout = stdout
def _set_stderr(self, stderr):
self.client.run.return_value.stderr = stderr
| petzah/proxmoxer | tests/openssh_tests.py | Python | mit | 859 |
"""
Support for the OpenWeatherMap (OWM) service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/weather.openweathermap/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.weather import (
WeatherEntity, PLATFORM_SCHEMA, ATTR_FORECAST_TEMP, ATTR_FORECAST_TIME)
from homeassistant.const import (CONF_API_KEY, CONF_NAME, CONF_LATITUDE,
CONF_LONGITUDE, STATE_UNKNOWN, TEMP_CELSIUS)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
REQUIREMENTS = ['pyowm==2.6.1']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'OpenWeatherMap'
ATTRIBUTION = 'Data provided by OpenWeatherMap'
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=10)
MIN_TIME_BETWEEN_FORECAST_UPDATES = timedelta(minutes=30)
CONDITION_CLASSES = {
'cloudy': [804],
'fog': [701, 741],
'hail': [906],
'lightning': [210, 211, 212, 221],
'lightning-rainy': [200, 201, 202, 230, 231, 232],
'partlycloudy': [801, 802, 803],
'pouring': [504, 314, 502, 503, 522],
'rainy': [300, 301, 302, 310, 311, 312, 313, 500, 501, 520, 521],
'snowy': [600, 601, 602, 611, 612, 620, 621, 622],
'snowy-rainy': [511, 615, 616],
'sunny': [800],
'windy': [905, 951, 952, 953, 954, 955, 956, 957],
'windy-variant': [958, 959, 960, 961],
'exceptional': [711, 721, 731, 751, 761, 762, 771, 900, 901, 962, 903,
904],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the OpenWeatherMap weather platform."""
import pyowm
longitude = config.get(CONF_LONGITUDE, round(hass.config.longitude, 5))
latitude = config.get(CONF_LATITUDE, round(hass.config.latitude, 5))
name = config.get(CONF_NAME)
try:
owm = pyowm.OWM(config.get(CONF_API_KEY))
except pyowm.exceptions.api_call_error.APICallError:
_LOGGER.error("Error while connecting to OpenWeatherMap")
return False
data = WeatherData(owm, latitude, longitude)
add_devices([OpenWeatherMapWeather(
name, data, hass.config.units.temperature_unit)], True)
class OpenWeatherMapWeather(WeatherEntity):
"""Implementation of an OpenWeatherMap sensor."""
def __init__(self, name, owm, temperature_unit):
"""Initialize the sensor."""
self._name = name
self._owm = owm
self._temperature_unit = temperature_unit
self.data = None
self.forecast_data = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def condition(self):
"""Return the current condition."""
try:
return [k for k, v in CONDITION_CLASSES.items() if
self.data.get_weather_code() in v][0]
except IndexError:
return STATE_UNKNOWN
@property
def temperature(self):
"""Return the temperature."""
return self.data.get_temperature('celsius').get('temp')
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def pressure(self):
"""Return the pressure."""
return self.data.get_pressure().get('press')
@property
def humidity(self):
"""Return the humidity."""
return self.data.get_humidity()
@property
def wind_speed(self):
"""Return the wind speed."""
return self.data.get_wind().get('speed')
@property
def wind_bearing(self):
"""Return the wind bearing."""
return self.data.get_wind().get('deg')
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def forecast(self):
"""Return the forecast array."""
return [{
ATTR_FORECAST_TIME: entry.get_reference_time('iso'),
ATTR_FORECAST_TEMP: entry.get_temperature('celsius').get('temp')}
for entry in self.forecast_data.get_weathers()]
def update(self):
"""Get the latest data from OWM and updates the states."""
self._owm.update()
self._owm.update_forecast()
self.data = self._owm.data
self.forecast_data = self._owm.forecast_data
class WeatherData(object):
"""Get the latest data from OpenWeatherMap."""
def __init__(self, owm, latitude, longitude):
"""Initialize the data object."""
self.owm = owm
self.latitude = latitude
self.longitude = longitude
self.data = None
self.forecast_data = None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from OpenWeatherMap."""
obs = self.owm.weather_at_coords(self.latitude, self.longitude)
if obs is None:
_LOGGER.warning("Failed to fetch data from OWM")
return
self.data = obs.get_weather()
@Throttle(MIN_TIME_BETWEEN_FORECAST_UPDATES)
def update_forecast(self):
"""Get the lastest forecast from OpenWeatherMap."""
fcd = self.owm.three_hours_forecast_at_coords(
self.latitude, self.longitude)
if fcd is None:
_LOGGER.warning("Failed to fetch forecast data from OWM")
return
self.forecast_data = fcd.get_forecast()
| Duoxilian/home-assistant | homeassistant/components/weather/openweathermap.py | Python | mit | 5,668 |
# -*- coding: utf-8 -*-
import datetime as dt
import unittest.mock
from django.test import TestCase
from influxdb import InfluxDBClient
from core.metrics.conf import settings as metrics_settings
from core.metrics.metric import metric
_test_points = []
def fake_write_points(points):
global _test_points
_test_points.extend(points)
class TestMetric(TestCase):
def tearDown(self):
super(TestMetric, self).tearDown()
global _test_points
_test_points = []
@unittest.mock.patch.object(InfluxDBClient, 'get_list_database')
@unittest.mock.patch.object(InfluxDBClient, 'create_database')
@unittest.mock.patch.object(InfluxDBClient, 'write_points')
def test_can_increment_a_simple_metric(self, mock_write_points, mock_list_db, mock_create_db):
# Setup
mock_write_points.side_effect = fake_write_points
# Run
metric('test__metric')
# Check
global _test_points
self.assertEqual(
_test_points, [{'tags': {}, 'fields': {'num': 1}, 'measurement': 'test__metric'}])
@unittest.mock.patch.object(InfluxDBClient, 'get_list_database')
@unittest.mock.patch.object(InfluxDBClient, 'create_database')
@unittest.mock.patch.object(InfluxDBClient, 'write_points')
def test_can_increment_a_simple_metric_by_a_specific_number(
self, mock_write_points, mock_list_db, mock_create_db):
# Setup
mock_write_points.side_effect = fake_write_points
# Run
metric('test__metric', num=4)
# Check
global _test_points
self.assertEqual(
_test_points, [{'tags': {}, 'fields': {'num': 4}, 'measurement': 'test__metric'}])
@unittest.mock.patch.object(InfluxDBClient, 'get_list_database')
@unittest.mock.patch.object(InfluxDBClient, 'create_database')
@unittest.mock.patch.object(InfluxDBClient, 'write_points')
def test_can_increment_a_simple_metric_by_specifying_a_specific_time(
self, mock_write_points, mock_list_db, mock_create_db):
# Setup
mock_write_points.side_effect = fake_write_points
nowd = dt.datetime.now()
# Run
metric('test__metric', time=nowd)
# Check
global _test_points
self.assertEqual(
_test_points,
[{
'tags': {},
'fields': {'num': 1},
'measurement': 'test__metric',
'time': nowd,
}])
@unittest.mock.patch.object(InfluxDBClient, 'get_list_database')
@unittest.mock.patch.object(InfluxDBClient, 'create_database')
@unittest.mock.patch.object(InfluxDBClient, 'write_points')
def test_do_nothing_if_the_metrics_capturing_is_deactivated(
self, mock_write_points, mock_list_db, mock_create_db):
# Setup
mock_write_points.side_effect = fake_write_points
metrics_settings.ACTIVATED = False
# Run
metric('test__metric')
# Check
global _test_points
self.assertFalse(len(_test_points))
metrics_settings.ACTIVATED = True
def test_do_not_raise_in_case_of_connection_error(self):
# Run
metric('test__metric')
# Check
global _test_points
self.assertFalse(len(_test_points))
| erudit/zenon | tests/unit/core/metrics/test_metric.py | Python | gpl-3.0 | 3,303 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.