code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1
value | license stringclasses 15
values | size int64 3 1.05M |
|---|---|---|---|---|---|
#Escreva um algoritmo para ler um valor e escrever o seu antecessor.
n=int(input("digite número: "))
a=(n-1)
print(a)
| erikaklein/algoritmo---programas-em-Python | LerUmValorEscreverAntecessor.py | Python | mit | 128 |
# Copyright 2011 David Malcolm <[email protected]>
# Copyright 2011 Red Hat, Inc.
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import sys
import gcc
# Verify that the various error and warning methods work:
def on_pass_execution(p, fn):
if p.name == '*warn_function_return':
gcc.error(fn.start, 'this is an error (with positional args)')
gcc.error(location=fn.start,
message='this is an error (with keyword args)')
gcc.warning(fn.end, 'this is a warning (with positional args)',
gcc.Option('-Wdiv-by-zero'))
gcc.warning(location=fn.end,
message='this is a warning (with keyword args)',
option=gcc.Option('-Wdiv-by-zero'))
gcc.error(fn.start,
# These should be passed through, without triggering errors:
'a warning with some embedded format strings %s and %i')
# Verify that -Wno-format was honored
# The behavior of these flags changed in 4.8, so skip this part
# on gcc 4.8 onwards:
if gcc.GCC_VERSION <= 4007:
gcc.warning(fn.end,
'this warning ought not to appear',
gcc.Option('-Wformat'))
# Verify that we can issue an unconditional warning, with no option
# (as per https://fedorahosted.org/gcc-python-plugin/ticket/8 ):
gcc.warning(fn.end, 'this is an unconditional warning')
gcc.warning(fn.end, 'this is another unconditional warning', None)
# Verify that gcc.warning handles an object of the wrong type by
# raising a TypeError
try:
gcc.warning(fn.end, 'this is another unconditional warning',
'this should have been a gcc.Option instance, or None')
except TypeError:
err = sys.exc_info()[1]
sys.stderr.write('expected error was found: %s\n' % err)
else:
raise RuntimeError('expected exception was not raised')
# Exercise gcc.inform:
gcc.inform(fn.start, 'This is the start of the function')
gcc.inform(fn.end, 'This is the end of the function')
# Wire up our callback:
gcc.register_callback(gcc.PLUGIN_PASS_EXECUTION,
on_pass_execution)
| davidmalcolm/gcc-python-plugin | tests/plugin/diagnostics/script.py | Python | gpl-3.0 | 2,908 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import operator
from django.core.urlresolvers import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_post_parameters
from horizon import exceptions
from horizon import forms
from horizon import tables
from openstack_dashboard import api
from .forms import CreateUserForm, UpdateUserForm
from .tables import UsersTable
class IndexView(tables.DataTableView):
table_class = UsersTable
template_name = 'admin/users/index.html'
def get_data(self):
users = []
try:
users = api.keystone.user_list(self.request)
except:
exceptions.handle(self.request,
_('Unable to retrieve user list.'))
return users
class UpdateView(forms.ModalFormView):
form_class = UpdateUserForm
template_name = 'admin/users/update.html'
success_url = reverse_lazy('horizon:admin:users:index')
@method_decorator(sensitive_post_parameters('password',
'confirm_password'))
def dispatch(self, *args, **kwargs):
return super(UpdateView, self).dispatch(*args, **kwargs)
def get_object(self):
if not hasattr(self, "_object"):
try:
self._object = api.keystone.user_get(self.request,
self.kwargs['user_id'],
admin=True)
except:
redirect = reverse("horizon:admin:users:index")
exceptions.handle(self.request,
_('Unable to update user.'),
redirect=redirect)
return self._object
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
context['user'] = self.get_object()
return context
#trunglq add
def get_initial(self):
user = self.get_object()
try:
print user.secretkey
return {'id': user.id,
'name': user.name,
'tenant_id': getattr(user, 'tenantId', None),
'email': user.email,
'secretkey': user.secretkey}
except Exception as err:
return {'id': user.id,
'name': user.name,
'tenant_id': getattr(user, 'tenantId', None),
'email': user.email}
#end
class CreateView(forms.ModalFormView):
form_class = CreateUserForm
template_name = 'admin/users/create.html'
success_url = reverse_lazy('horizon:admin:users:index')
@method_decorator(sensitive_post_parameters('password',
'confirm_password'))
def dispatch(self, *args, **kwargs):
return super(CreateView, self).dispatch(*args, **kwargs)
def get_form_kwargs(self):
kwargs = super(CreateView, self).get_form_kwargs()
try:
roles = api.keystone.role_list(self.request)
except:
redirect = reverse("horizon:admin:users:index")
exceptions.handle(self.request,
_("Unable to retrieve user roles."),
redirect=redirect)
roles.sort(key=operator.attrgetter("id"))
kwargs['roles'] = roles
return kwargs
def get_initial(self):
default_role = api.keystone.get_default_role(self.request)
return {'role_id': getattr(default_role, "id", None)}
| trunglq7/horizon | openstack_dashboard/dashboards/admin/users/views.py | Python | apache-2.0 | 4,417 |
import json
import requests
from django.views.decorators.csrf import csrf_exempt
FB_MESSENGER_ACCESS_TOKEN = "[TOKEN]"
def respond_FB(sender_id, text):
json_data = {
"recipient": {"id": sender_id},
"message": {"text": text + " to you!"}
}
params = {
"access_token": FB_MESSENGER_ACCESS_TOKEN
}
r = requests.post('https://graph.facebook.com/v2.6/me/messages', json=json_data, params=params)
print(r, r.status_code, r.text)
@csrf_exempt
def fb_webhook(request):
if request.method == "GET":
if (request.GET.get('hub.verify_token') == 'this_is_a_verify_token_created_by_sean'):
return HttpResponse(request.GET.get('hub.challenge'))
return HttpResponse('Error, wrong validation token')
if request.method == "POST":
body = request.body
print("BODY", body)
messaging_events = json.loads(body.decode("utf-8"))
print("JSON BODY", body)
sender_id = messaging_events["entry"][0]["messaging"][0]["sender"]["id"]
message = messaging_events["entry"][0]["messaging"][0]["message"]["text"]
respond_FB(sender_id, message)
return HttpResponse('Received.')
| voidabhi/python-scripts | webhook-fb-messenger.py | Python | mit | 1,193 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2020 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""QWebHistory serializer for QtWebEngine."""
import time
from PyQt5.QtCore import QByteArray, QDataStream, QIODevice, QUrl
from qutebrowser.utils import qtutils
HISTORY_STREAM_VERSION = 3
def _serialize_item(item, stream):
"""Serialize a single WebHistoryItem into a QDataStream.
Args:
item: The WebHistoryItem to write.
stream: The QDataStream to write to.
"""
### Thanks to Otter Browser:
### https://github.com/OtterBrowser/otter-browser/blob/v0.9.10/src/modules/backends/web/qtwebengine/QtWebEngineWebWidget.cpp#L1210
### src/core/web_contents_adapter.cpp serializeNavigationHistory
## toQt(entry->GetVirtualURL());
qtutils.serialize_stream(stream, item.url)
## toQt(entry->GetTitle());
stream.writeQString(item.title)
## QByteArray(encodedPageState.data(), encodedPageState.size());
qtutils.serialize_stream(stream, QByteArray())
## static_cast<qint32>(entry->GetTransitionType());
# chromium/ui/base/page_transition_types.h
stream.writeInt32(0) # PAGE_TRANSITION_LINK
## entry->GetHasPostData();
stream.writeBool(False)
## toQt(entry->GetReferrer().url);
qtutils.serialize_stream(stream, QUrl())
## static_cast<qint32>(entry->GetReferrer().policy);
# chromium/third_party/WebKit/public/platform/WebReferrerPolicy.h
stream.writeInt32(0) # WebReferrerPolicyAlways
## toQt(entry->GetOriginalRequestURL());
qtutils.serialize_stream(stream, item.original_url)
## entry->GetIsOverridingUserAgent();
stream.writeBool(False)
## static_cast<qint64>(entry->GetTimestamp().ToInternalValue());
stream.writeInt64(int(time.time()))
## entry->GetHttpStatusCode();
stream.writeInt(200)
def serialize(items):
"""Serialize a list of WebHistoryItems to a data stream.
Args:
items: An iterable of WebHistoryItems.
Return:
A (stream, data, user_data) tuple.
stream: The reset QDataStream.
data: The QByteArray with the raw data.
cur_user_data: The user data for the current item or None.
Warning:
If 'data' goes out of scope, reading from 'stream' will result in a
segfault!
"""
data = QByteArray()
stream = QDataStream(data, QIODevice.ReadWrite)
cur_user_data = None
current_idx = None
for i, item in enumerate(items):
if item.active:
if current_idx is not None:
raise ValueError("Multiple active items ({} and {}) "
"found!".format(current_idx, i))
current_idx = i
cur_user_data = item.user_data
if items:
if current_idx is None:
raise ValueError("No active item found!")
else:
current_idx = -1
### src/core/web_contents_adapter.cpp serializeNavigationHistory
# kHistoryStreamVersion
stream.writeInt(HISTORY_STREAM_VERSION)
# count
stream.writeInt(len(items))
# currentIndex
stream.writeInt(current_idx)
for item in items:
_serialize_item(item, stream)
stream.device().reset()
qtutils.check_qdatastream(stream)
return stream, data, cur_user_data
| t-wissmann/qutebrowser | qutebrowser/browser/webengine/tabhistory.py | Python | gpl-3.0 | 3,979 |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 7 11:49:59 2015
mono.py
Executa os algoritmos mono-objetivo n vezes conforme o teorema do limite central da normalidade
Cria o gráfico comparativo das médias de cada geração/iteração
@author: victor
@todo Adicionar busca local ao fim da execução de cada algoritmo
"""
import d519
import mono_ga
import mono_aco
import mono_ils
import timeit
import sys
import scipy.stats.mstats as mstats # Cálculo média geométrica
from multiprocessing import Pool
cluster = False
if not cluster:
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
import mysql.connector
plt.ion()
cnx = mysql.connector.connect(user='root', password='root', database='cemig_d519')
cursor = cnx.cursor()
pool = Pool(processes=None) # If processes is None then the number returned by cpu_count() is used.
# Quantidade de vezes a ser executado
n = 30
# Solucoes intermediarias a salvar
interS = [5000,10000,15000,20000]
# Problema de otimização: 0 max(1-IC) 1 min(Custo)
problemaOtim = 0
# Problema Knapsack a ser resolvido
'''
AlimPeq - ID 5 W 100 IC 0.5
AlimGra - ID 2 W 150 IC 0.6
'''
W = 100 # ACO com problema relaxado, de cara ja acha solucao boa, problema muito restrito, outros ganham (muito) rapidamente
ICLimite = 0.5 # Problema min(Custo)
M = []
V = []
segmentosGrupos = []
segmentos = []
if not cluster:
query = ("SELECT DISTINCT EquipamentoMT.ID AS ID, Segmento.ID AS SEGMENTOID, EquipamentoMT.IC AS IC, EquipamentoMT.TIPO_EQUIPAMENTO_ID as TIPOEQP, "
"IF(EquipamentoMT.TIPO_EQUIPAMENTO_ID != 1, EquipamentoNovo.CUSTO, (EquipamentoNovo.CUSTO/1000*CaboMT.COMPRIMENTO)) AS CUSTO "
"/*EquipamentoNovo.CUSTO,*/ "
"/*EquipamentoMT.TIPO_EQUIPAMENTO_ID */ "
"FROM EquipamentoMT "
"JOIN Segmento ON EquipamentoMT.SEGMENTO_ID = Segmento.ID "
"JOIN Alimentador ON Segmento.ALIMENTADOR_ID = Alimentador.ID "
"JOIN EquipamentoNovo ON EquipamentoNovo.TIPO_EQUIPAMENTO_ID = EquipamentoMT.TIPO_EQUIPAMENTO_ID "
"LEFT JOIN CaboMT ON EquipamentoMT.ID = CaboMT.ID "
#" ") # SED inteira
"WHERE Alimentador.ID = 5") # 5 pequeno # 2 grande
cursor.execute(query)
i = 0
for(ID,SEGMENTOID,IC,TIPOEQP,CUSTO) in cursor:
M.append(CUSTO)
V.append((1-IC))
if SEGMENTOID not in segmentosGrupos:
segmentosGrupos.append(SEGMENTOID)
segmentos.append([])
posSeg = [k for k,x in enumerate(segmentosGrupos) if x == SEGMENTOID][0]
segmentos[posSeg].append(i)
i = i+1
cursor.close()
print V
print M
print segmentos
raw_input('pause')
else: # Cluster
M = []
V = [] # Completar apenas no cluster
segmentos = [] # lista de listas com os ids(posições) de V e M que formam os segmentos que formam o alimentador
print 'sum(M): %f sum(V): %f len(segmentos): %d' % (sum(M),sum(V), len(segmentos))
#Configuracoes ILS
# AlimPeq
if problemaOtim == 0:
iterMaxILS = 60000 #30000 original 60000 avaliacoes
iterMaxBL = 1
pPert = 2
iterMaxBL_P = 1
else:
iterMaxILS = 150000 #30000 original 150000 avaliacoes
iterMaxBL = 1
pPert = 2
iterMaxBL_P = 1
"""
#AlimGra
if problemaOtim == 0:
iterMaxILS = 50000
iterMaxBL = 1
pPert = 2
iterMaxBL_P = 1
else:
iterMaxILS = 70000
iterMaxBL = 1
pPert = 2
iterMaxBL_P = 1
"""
# CONFIGURACOES AG
#AlimPeq
# Configuração max(1-IC)
if problemaOtim == 0:
geracoes = 300
tampop = 200
crossover = 90
mutacao = 5
pressaoSel = 4
cortes = 3
buscaLocal = 2
else:
# Configuração min(custo)
geracoes = 500
tampop = 300
crossover = 90
mutacao = 15
pressaoSel = 2
cortes = 3
buscaLocal = 2
"""
#AlimGra
# Configuração max(1-IC)
if problemaOtim == 0:
geracoes = 400
tampop = 600 # Testei com g800 mas na g400/300 já estava bom
crossover = 90
mutacao = 5
pressaoSel = 4
cortes = 3
buscaLocal = 2
else:
# Configuração min(custo)
geracoes = 300
tampop = 700
crossover = 90
mutacao = 5
pressaoSel = 2
cortes = 3
buscaLocal = 2
"""
# Configuracoes ACO
#AlimPeq
if problemaOtim == 0:
colonias = 400 # 50 convergencia #400 avaliacao
numForm = 150
# Constante inicial de ferormonio
C = 100
Q = d519.randrange(11,100)
alpha = 1 # deixar alpha mais forte, aumenta a força do ferormônio, porém retirar alelos aleatórios sem mudar o ferormônio é cagada
beta = 5
rho = 0.3
else:
colonias = 500 # 250 convergencia # 500 avaliacao
numForm = 300
# Constante inicial de ferormonio
C = 100
Q = d519.randrange(11,100)
alpha = 1 # deixar alpha mais forte, aumenta a força do ferormônio, porém retirar alelos aleatórios sem mudar o ferormônio é cagada
beta = 5
rho = 0.3
"""
#AlimGra
if problemaOtim == 0:
colonias = 90 # a partir da 80 começa a convergir muito rapido
numForm = 150
# Constante inicial de ferormonio
C = 100
Q = d519.randrange(11,100)
alpha = 1 # deixar alpha mais forte, aumenta a força do ferormônio, porém retirar alelos aleatórios sem mudar o ferormônio é cagada
beta = 5
rho = 0.3
else:
colonias = 500
numForm = 300
# Constante inicial de ferormonio
C = 100
Q = d519.randrange(11,100)
alpha = 1 # deixar alpha mais forte, aumenta a força do ferormônio, porém retirar alelos aleatórios sem mudar o ferormônio é cagada
beta = 5
rho = 0.3
"""
#M = [92,4,43,83,84,68,92,82,6,44,32,18,56,83,25,96,70,48,14,58]
#V = [44,46,90,72,91,40,75,35,8,54,78,40,77,15,61,17,75,29,75,63]
#W = 878
# lista dos métodos a serem chamados
#algoritmos = ['mono_ils','mono_ga', 'mono_aco']
#algoritmos = [ 'mono_ga']
#algoritmos = ['mono_ils', 'mono_ils','mono_ils','mono_ils']
#algoritmos = ['mono_aco','mono_aco','mono_aco','mono_aco','mono_aco']
#algoritmos = ['mono_aco']
#algoritmos = ['mono_ga', 'mono_ga', 'mono_ga', 'mono_ga', 'mono_ga']
#algoritmos = ['mono_ils','mono_ils','mono_ils','mono_ils','mono_ils']
#algoritmos = ['mono_ga','mono_ga','mono_ga', 'mono_ga','mono_ga']
#algoritmos = ['mono_aco','mono_aco','mono_aco','mono_aco','mono_aco']
algoritmos = ['mono_ils', 'mono_ga', 'mono_aco']
#algoritmos = ['mono_aco']
# nomes a serem plotados (caso False, o nome do algoritmo)
#nomesAlg = ['mono_ils', 'mono_ga', 'mono_aco']
#nomesAlg = [ 'mono_ga']
#nomesAlg = ['mono_ils_7000P', 'mono_ils_8000P', 'mono_ils_9000P', 'mono_ils_10000P']
#nomesAlg = ['mono_aco_a1b5', 'mono_aco_a3b5', 'mono_aco_a5b3', 'mono_aco_a5b1', 'mono_aco_a3b3']
#nomesAlg = ['mono_aco']
#nomesAlg = ['mono_ag_BL1', 'mono_ag_BL2', 'mono_ag_BL3', 'mono_ag_BL4', 'mono_ag_BL5']
#nomesAlg = ['mono_ils_BL1','mono_ils_BL2','mono_ils_BL3','mono_ils_BL4','mono_ils_BL5']
#nomesAlg = ['mono_ga_p50','mono_ga_p100','mono_ga_p200','mono_ga_p300', 'mono_ga_p400']
#nomesAlg = ['mono_aco_f10', 'mono_aco_f75', 'mono_aco_f150', 'mono_aco_f300', 'mono_aco_f700']
nomesAlg = ['mono_ils_BL1', 'mono_ga_200', 'mono_aco_150']
#nomesAlg = ['mono_aco_150']
# lista de marcadores do matplotlib
marcadores = ['b*','gx','r+', 'c^', 'mv', 'yo', 'kH']
# Conjunto de parâmetros de cada algoritmo para execução
#todos
#parametrizacao = [[False,iterMaxILS,iterMaxBL,pPert,iterMaxBL_P,W,M,V,interS,segmentos,ICLimite,problemaOtim],[False,W,M,V,geracoes,tampop,crossover,mutacao,pressaoSel,interS,segmentos,ICLimite,problemaOtim],[False,colonias,numForm,C,Q,alpha,beta,rho,W,M,V,interS,segmentos,ICLimite,problemaOtim]]
#ag
#parametrizacao= [[False,W,M,V,geracoes,tampop,crossover,mutacao,pressaoSel,cortes,buscaLocal,interS,segmentos,ICLimite,problemaOtim]]
#ils
#parametrizacao= [[False,15000,10,pPert,iterMaxBL_P,W,M,V,interS,segmentos,ICLimite,problemaOtim], [False,15000,20,pPert,iterMaxBL_P,W,M,V,interS,segmentos,ICLimite,problemaOtim], [False,15000,30,pPert,iterMaxBL_P,W,M,V,interS,segmentos,ICLimite,problemaOtim], [False,15000,40,pPert,iterMaxBL_P,W,M,V,interS,segmentos,ICLimite,problemaOtim]]
#acos
#parametrizacao=[[False,colonias,numForm,C,Q,alpha,beta,rho,W,M,V,interS,segmentos,ICLimite,problemaOtim],[False,colonias,numForm,C,Q,3,5,rho,W,M,V,interS,segmentos,ICLimite,problemaOtim],[False,colonias,numForm,C,Q,5,3,rho,W,M,V,interS,segmentos,ICLimite,problemaOtim],[False,colonias,numForm,C,Q,5,1,rho,W,M,V,interS,segmentos,ICLimite,problemaOtim],[False,colonias,numForm,C,Q,3,3,rho,W,M,V,interS,segmentos,ICLimite,problemaOtim]]
#aco
#parametrizacao=[[False,colonias,numForm,C,Q,alpha,beta,rho,W,M,V,interS,segmentos,ICLimite,problemaOtim]]
#ag
#parametrizacao = [[False,W,M,V,geracoes,tampop,crossover,mutacao,pressaoSel,cortes,1,interS,segmentos,ICLimite,problemaOtim],[False,W,M,V,geracoes,tampop,crossover,mutacao,pressaoSel,cortes,2,interS,segmentos,ICLimite,problemaOtim],[False,W,M,V,geracoes,tampop,crossover,mutacao,pressaoSel,cortes,3,interS,segmentos,ICLimite,problemaOtim],[False,W,M,V,geracoes,tampop,crossover,mutacao,pressaoSel,cortes,4,interS,segmentos,ICLimite,problemaOtim],[False,W,M,V,geracoes,tampop,crossover,mutacao,pressaoSel,cortes,5,interS,segmentos,ICLimite,problemaOtim]]
#ils
#parametrizacao = [[False,iterMaxILS,1,pPert,iterMaxBL_P,W,M,V,interS,segmentos,ICLimite,problemaOtim],[False,iterMaxILS,2,pPert,iterMaxBL_P,W,M,V,interS,segmentos,ICLimite,problemaOtim],[False,iterMaxILS,3,pPert,iterMaxBL_P,W,M,V,interS,segmentos,ICLimite,problemaOtim],[False,iterMaxILS,4,pPert,iterMaxBL_P,W,M,V,interS,segmentos,ICLimite,problemaOtim],[False,iterMaxILS,5,pPert,iterMaxBL_P,W,M,V,interS,segmentos,ICLimite,problemaOtim]]
#ag
#parametrizacao = [[False,W,M,V,geracoes,50,crossover,mutacao,pressaoSel,cortes,buscaLocal,interS,segmentos,ICLimite,problemaOtim],[False,W,M,V,geracoes,100,crossover,mutacao,pressaoSel,cortes,buscaLocal,interS,segmentos,ICLimite,problemaOtim],[False,W,M,V,geracoes,200,crossover,mutacao,pressaoSel,cortes,buscaLocal,interS,segmentos,ICLimite,problemaOtim],[False,W,M,V,geracoes,300,crossover,mutacao,pressaoSel,cortes,buscaLocal,interS,segmentos,ICLimite,problemaOtim],[False,W,M,V,geracoes,400,crossover,mutacao,pressaoSel,cortes,buscaLocal,interS,segmentos,ICLimite,problemaOtim]]
#aco
#parametrizacao = [[False,colonias,10,C,Q,alpha,beta,rho,W,M,V,interS,segmentos,ICLimite,problemaOtim],[False,colonias,75,C,Q,alpha,beta,rho,W,M,V,interS,segmentos,ICLimite,problemaOtim],[False,colonias,150,C,Q,alpha,beta,rho,W,M,V,interS,segmentos,ICLimite,problemaOtim],[False,colonias,300,C,Q,alpha,beta,rho,W,M,V,interS,segmentos,ICLimite,problemaOtim],[False,colonias,700,C,Q,alpha,beta,rho,W,M,V,interS,segmentos,ICLimite,problemaOtim]]
#allAval
parametrizacao = [[False,iterMaxILS,1,pPert,iterMaxBL_P,W,M,V,interS,segmentos,ICLimite,problemaOtim], [False,W,M,V,geracoes,tampop,crossover,mutacao,pressaoSel,cortes,buscaLocal,interS,segmentos,ICLimite,problemaOtim],[False,colonias,numForm,C,Q,alpha,beta,rho,W,M,V,interS,segmentos,ICLimite,problemaOtim]]
#parametrizacao = [[False,colonias,numForm,C,Q,alpha,beta,rho,W,M,V,interS,segmentos,ICLimite,problemaOtim]]
# Médias das melhores execuções de cada iteração/geração
execucoes = []
for i in range(0,len(algoritmos)): # para cada algoritmo
execucoes.append([])
m = __import__ (algoritmos[i])
toCall = getattr(m,algoritmos[i])
res = []
for j in range(0,n): # Obter uma lista de 30 execuções cada uma com parametro dentro do AG de gerações/iterações
print 'Executando o algoritmo \'%s\' da posição %i na vez %i' % (nomesAlg[i],i,j)
#res = toCall(parametrizacao[i])
#execucoes[i].append(res)
res.append(pool.apply_async(toCall,[parametrizacao[i]]))
start_time = timeit.default_timer()
print 'Executando as %d vezes... ' % (len(res))
for j in range(0,len(res)):
execucoes[i].append(res[j].get())
if problemaOtim == 0:
print 'Valor maximo da primeira execucao: %f' % (max(execucoes[i][0][0]))
else:
print 'Valor minimo da primeira execucao: %f' % (min(execucoes[i][0][0]))
elapsed = timeit.default_timer() - start_time
print 'Tempo de execucao médio para o algoritmo %s: %f segundos' % (nomesAlg[i],elapsed/float(n))
print '----------------------'
# Depois de todos os algoritmos rodados, crio uma média das iterações dos algoritmos para plotar
# e faço um único gráfico com os resultados
for i in range(0,len(algoritmos)):
# Recrio uma lista apenas com as melhores (problemas com zip())
best = []
for j in range(0,len(execucoes[i])):
teste = execucoes[i][j][0]
best.append(teste) # teste[1::300] ils teste[1::5] ga
# Crio uma única lista com a soma de todas as execuções
#zipped_list = zip(*execucoes[i])
zipped_list = zip(*best)
medias = [sum(item)/float(n) for item in zipped_list]
# @todo olhar se python tem operador ternário
if not nomesAlg[i]:
nomesAlg[i] = algoritmos[i]
print 'Resultado final (medias) para o algoritmo %s:' % (nomesAlg[i])
#print medias
if not cluster:
# plota no grafico
plt.plot( range(0,len(medias)), medias, marcadores[i], label=nomesAlg[i])
else:
print 'plt.plot(',range(0,len(medias)),', ',medias,', \'',marcadores[i],'\', label=\'',nomesAlg[i],'\')'
if cluster:
print 'plt.legend(loc=\'best\')'
print 'plt.grid()'
print 'plt.show()'
print "\n\nHISTOGRAMAS\n\n"
# O histograma é feito fora porque o cluster nao tem numpy
histoDados = []
for i in range(0,len(algoritmos)):
histoDados.append([])
for j in range(0,len(execucoes[i])):
if problemaOtim == 0:
histoDados[i].append(max(execucoes[i][j][0])) # tera cada melhor das 30 execucoes
else:
histoDados[i].append(min(execucoes[i][j][0])) # tera cada melhor das 30 execucoes
h = sorted(histoDados[i]) #sorted
print 'fit = stats.norm.pdf(',h,', np.mean(',h,'), np.std(',h,'))'
print 'plt.figure()'
print 'plt.plot(',h,',fit,\'-o\')'
print 'plt.hist(',h,',normed=True)'
print 'plt.title(\'Histograma ',nomesAlg[i],'\') '
print 'plt.show()'
print 'std = np.std(',h,')'
print 'mean = np.mean(',h,')'
print 'z,pval = stats.mstats.normaltest(',h,')'
print 'print \'std for algorithm %s: %f\' % (\'',nomesAlg[i],'\',std)'
print 'print \'mean for algorithm %s: %f\' % (\'',nomesAlg[i],'\',mean)'
print 'print \'p-value %s: %f\' % (\'',nomesAlg[i],'\',pval)'
print '---\nExecuções intermediárias'
for i in range(0,len(algoritmos)):
# Recrio uma lista apenas com as soluções intermediárias (problemas com zip())
parciais = []
avaliacao = 0
for j in range(0,len(execucoes[i])):
parciais.append(execucoes[i][j][1])
avaliacao = avaliacao + execucoes[i][j][2]
# Crio uma única lista com a soma de todas as execuções
zipped_list = zip(*parciais)
medias = [sum(item)/float(n) for item in zipped_list]
print 'Médias parciais do alg %s' % (nomesAlg[i])
print medias
print 'Avaliacoes Media do alg %s: %d' % (nomesAlg[i], avaliacao/float(n))
sys.exit(0) #Exits with zero, which is generally interpreted as success. Non-zero codes are usually treated as errors. The default is to exit with zero.
plt.legend(loc='best')
if problemaOtim == 0:
plt.xlabel(u'Gerações (x5)')
plt.ylabel(u'Média$_{v}$')
plt.title(u'max(1-$\overline{IC}$)')
else:
plt.xlabel(u'Gerações (x5)')
plt.ylabel(u'Média$_{p}$')
plt.title(u'min(Custo)')
plt.grid()
plt.show()
# Salvo os histogramas
# Recupero o melhor resultado das 30 execuções e ploto o histograma pra cada algoritmo, ou seja, o último vetor de cada execução que contém a resposta final
histoDados = []
for i in range(0,len(algoritmos)):
histoDados.append([])
for j in range(0,len(execucoes[i])):
if problemaOtim == 0:
histoDados[i].append(max(execucoes[i][j][0])) # tera cada melhor das 30 execucoes
else:
histoDados[i].append(min(execucoes[i][j][0])) # tera cada melhor das 30 execucoes
h = sorted(histoDados[i]) #sorted
# norm: A normal continuous random variable.
#The probability density function for norm is:
#norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
fit = stats.norm.pdf(h, np.mean(h), np.std(h)) #this is a fitting indeed
plt.figure()
plt.plot(h,fit,'-o')
plt.hist(h,normed=True) #use this to draw histogram of your data
plt.title('Histograma %s' % (nomesAlg[i]))
plt.show() #use may also need add this
std = np.std(h)
mean = np.mean(h)
z,pval = stats.mstats.normaltest(h)
print 'std for algorithm %s: %f' % (nomesAlg[i],std)
print 'mean for algorithm %s: %f'% (nomesAlg[i],mean)
print 'p-value %s: %f'% (nomesAlg[i],pval)
for i in range(0,len(histoDados)):
print 'histoDados do algoritmo %s:' % (nomesAlg[i])
print histoDados[i]
print '---\nExecuções intermediárias e Avaliacoes'
for i in range(0,len(algoritmos)):
# Recrio uma lista apenas com as soluções intermediárias (problemas com zip())
parciais = []
avaliacao = 0
for j in range(0,len(execucoes[i])):
parciais.append(execucoes[i][j][1])
avaliacao = avaliacao + execucoes[i][j][2]
# Crio uma única lista com a soma de todas as execuções
zipped_list = zip(*parciais)
medias = [sum(item)/float(n) for item in zipped_list]
print 'Médias parciais do alg %s' % (nomesAlg[i])
print medias
print 'Avaliacoes Media do alg %s: %d' % (nomesAlg[i], avaliacao/float(n))
pool.terminate() | vhte/cefetdiss | mono.py | Python | mit | 17,883 |
# Copyright (c) 2015-2016 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from neutron_lib import constants
from neutron_lib import exceptions as n_exc
except ImportError:
from neutron.common import constants
from neutron.common import exceptions as n_exc
try:
from oslo_log import log
except ImportError: # Icehouse, Juno
from neutron.openstack.common import log
try:
from oslo_config import cfg
except ImportError:
# Icehouse, Juno
from oslo.config import cfg
try:
from oslo_db import exception as db_exc
except ImportError:
try:
# Juno
from oslo.db import exception as db_exc
except ImportError:
# Icehouse
from neutron.openstack.common.db import exception as db_exc
try:
from oslo_concurrency import lockutils
except ImportError:
# Icehouse, Juno
from neutron.openstack.common import lockutils
try:
from neutron_lib.constants import DHCPV6_STATEFUL
except ImportError:
# Mitaka and earlier
from neutron.common.constants import DHCPV6_STATEFUL
try:
# Introduced during Ocata development cycle.
from neutron_lib.plugins import directory as plugin_dir
except ImportError:
# Pre-Ocata.
from neutron.manager import NeutronManager as plugin_dir
try:
# Present here since January 2016 (commit c8be1a1be91).
from neutron_lib.constants import IP_PROTOCOL_MAP
except ImportError:
# We probably don't need to support IP protocol names for older
# OpenStack versions. But if such a need arises, we can add code
# here to get IP_PROTOCOL_MAP in the appropriate way from those
# old versions.
IP_PROTOCOL_MAP = {}
| projectcalico/calico | networking-calico/networking_calico/compat.py | Python | apache-2.0 | 2,199 |
import os
from functools import wraps
from flask import abort, current_app
from flask_login import current_user, login_required
from app.notify_client.organisations_api_client import organisations_client
user_is_logged_in = login_required
with open('{}/email_domains.txt'.format(
os.path.dirname(os.path.realpath(__file__))
)) as email_domains:
GOVERNMENT_EMAIL_DOMAIN_NAMES = [line.strip() for line in email_domains]
def user_has_permissions(*permissions, **permission_kwargs):
def wrap(func):
@wraps(func)
def wrap_func(*args, **kwargs):
if not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
if not current_user.has_permissions(*permissions, **permission_kwargs):
abort(403)
return func(*args, **kwargs)
return wrap_func
return wrap
def user_is_gov_user(f):
@wraps(f)
def wrapped(*args, **kwargs):
if not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
if not current_user.is_gov_user:
abort(403)
return f(*args, **kwargs)
return wrapped
def user_is_platform_admin(f):
@wraps(f)
def wrapped(*args, **kwargs):
if not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
if not current_user.platform_admin:
abort(403)
return f(*args, **kwargs)
return wrapped
def is_gov_user(email_address):
return _email_address_ends_with(
email_address, GOVERNMENT_EMAIL_DOMAIN_NAMES
) or _email_address_ends_with(
email_address, organisations_client.get_domains()
)
def _email_address_ends_with(email_address, known_domains):
return any(
email_address.lower().endswith((
"@{}".format(known),
".{}".format(known),
))
for known in known_domains
)
def normalise_email_address_aliases(email_address):
local_part, domain = email_address.split('@')
local_part = local_part.split('+')[0].replace('.', '')
return f'{local_part}@{domain}'.lower()
def distinct_email_addresses(*args):
return len(args) == len(set(map(normalise_email_address_aliases, args)))
| alphagov/notifications-admin | app/utils/user.py | Python | mit | 2,271 |
# -*- coding: utf8 -*-
"""
Модуль предоставляет обраточик Http-запросов.
"""
import cgi
from http.server import BaseHTTPRequestHandler
a="""
<html>
<head>
<meta charset="utf-8">
</head>
<body>
Привет !
</body>
</html>
"""
class clsHttpRequest(BaseHTTPRequestHandler):
def do_GET(self):
print('client adress=', self.client_address, self.requestline)
self.send_response(200)
self.send_header('Сontent-type'.encode('utf8'), 'text/html'.encode('utf8'))
self.end_headers()
self.wfile.write(a.encode('utf8'))
def do_POST(self):
"""
При использовании модуля cgi -- происходит разбор параметров запроса.
"""
try:
ctype, pdict = cgi.parse_header(self.headers.getheader("content-type"))
if ctype == "multipart/form-data":
query = cgi.parse_multipart(self.rfile, pdict)
self.send_response(200)
self.end_headers()
#upfile = query.get("file")
params = " np output.exe"
p = query.get("encryption")
if p[0] == "aes":
params += " sf 1"
elif p[0] == "rc5":
params += " sf 2"
elif p[0] == "xor":
params += " sf 3"
else:
params += " sf 0"
p = query.get("hw_bind")
if p[0] == "yes":
p = query.get("hw_bind_serial")
assert len(p[0]) == 8
params += " sn " + p[0]
else:
params += " sn 0"
p = query.get("passwd")
assert len(p[0]) > 0
params += " pass " + p[0]
p = query.get("pack")
if p[0] == "yes":
params += " pack 1"
else:
params += " pack 0"
self.wfile.write('Download results.')
except Exception:
pass
| prospero78/pyTrans | Server/pakTransServ/pakControl/pakServerThread/pakHttpRequest/modHttpRequest.py | Python | bsd-2-clause | 2,061 |
#!/usr/bin/python
#
# Copyright (c) 2018 Yuwei Zhou, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_route
version_added: "2.7"
short_description: Manage Azure route resource.
description:
- Create, update or delete a route.
options:
resource_group:
description:
- name of resource group.
required: true
name:
description:
- name of the route.
required: true
state:
description:
- Assert the state of the route. Use C(present) to create or update and
C(absent) to delete.
default: present
choices:
- absent
- present
address_prefix:
description:
- The destination CIDR to which the route applies.
next_hop_type:
description:
- The type of Azure hop the packet should be sent to.
choices:
- virtual_network_gateway
- vnet_local
- internet
- virtual_appliance
- none
default: 'none'
next_hop_ip_address:
description:
- The IP address packets should be forwarded to.
- Next hop values are only allowed in routes where the next hop type is VirtualAppliance.
route_table_name:
description:
- The name of the route table.
required: true
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Yuwei Zhou (@yuwzho)"
'''
EXAMPLES = '''
- name: Create a route
azure_rm_route:
name: foobar
resource_group: myResourceGroup
address_prefix: 10.1.0.0/16
next_hop_type: virtual_network_gateway
route_table_name: table
- name: Delete a route
azure_rm_route:
name: foobar
resource_group: myResourceGroup
route_table_name: table
state: absent
'''
RETURN = '''
id:
description: Current state of the route.
returned: success
type: str
'''
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.common.dict_transformations import _snake_to_camel
class AzureRMRoute(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
address_prefix=dict(type='str'),
next_hop_type=dict(type='str',
choices=['virtual_network_gateway',
'vnet_local',
'internet',
'virtual_appliance',
'none'],
default='none'),
next_hop_ip_address=dict(type='str'),
route_table_name=dict(type='str', required=True)
)
required_if = [
('state', 'present', ['next_hop_type'])
]
self.resource_group = None
self.name = None
self.state = None
self.address_prefix = None
self.next_hop_type = None
self.next_hop_ip_address = None
self.route_table_name = None
self.results = dict(
changed=False,
id=None
)
super(AzureRMRoute, self).__init__(self.module_arg_spec,
required_if=required_if,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
setattr(self, key, kwargs[key])
result = dict()
changed = False
self.next_hop_type = _snake_to_camel(self.next_hop_type, capitalize_first=True)
result = self.get_route()
if self.state == 'absent' and result:
changed = True
if not self.check_mode:
self.delete_route()
elif self.state == 'present':
if not result:
changed = True # create new route
else: # check update
if result.next_hop_type != self.next_hop_type:
self.log('Update: {0} next_hop_type from {1} to {2}'.format(self.name, result.next_hop_type, self.next_hop_type))
changed = True
if result.next_hop_ip_address != self.next_hop_ip_address:
self.log('Update: {0} next_hop_ip_address from {1} to {2}'.format(self.name, result.next_hop_ip_address, self.next_hop_ip_address))
changed = True
if result.address_prefix != self.address_prefix:
self.log('Update: {0} address_prefix from {1} to {2}'.format(self.name, result.address_prefix, self.address_prefix))
changed = True
if changed:
result = self.network_models.Route(name=self.name,
address_prefix=self.address_prefix,
next_hop_type=self.next_hop_type,
next_hop_ip_address=self.next_hop_ip_address)
if not self.check_mode:
result = self.create_or_update_route(result)
self.results['id'] = result.id if result else None
self.results['changed'] = changed
return self.results
def create_or_update_route(self, param):
try:
poller = self.network_client.routes.create_or_update(self.resource_group, self.route_table_name, self.name, param)
return self.get_poller_result(poller)
except Exception as exc:
self.fail("Error creating or updating route {0} - {1}".format(self.name, str(exc)))
def delete_route(self):
try:
poller = self.network_client.routes.delete(self.resource_group, self.route_table_name, self.name)
result = self.get_poller_result(poller)
return result
except Exception as exc:
self.fail("Error deleting route {0} - {1}".format(self.name, str(exc)))
def get_route(self):
try:
return self.network_client.routes.get(self.resource_group, self.route_table_name, self.name)
except CloudError as cloud_err:
# Return None iff the resource is not found
if cloud_err.status_code == 404:
self.log('{0}'.format(str(cloud_err)))
return None
self.fail('Error: failed to get resource {0} - {1}'.format(self.name, str(cloud_err)))
except Exception as exc:
self.fail('Error: failed to get resource {0} - {1}'.format(self.name, str(exc)))
def main():
AzureRMRoute()
if __name__ == '__main__':
main()
| EvanK/ansible | lib/ansible/modules/cloud/azure/azure_rm_route.py | Python | gpl-3.0 | 7,364 |
# Author: duramato <[email protected]>
# URL: https://github.com/SickRage/sickrage
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from urllib import quote_plus
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import helpers
from sickbeard.providers import generic
from sickbeard.common import USER_AGENT
class TORRENTPROJECTProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "TorrentProject")
self.supportsBacklog = True
self.public = True
self.ratio = 0
self.urls = {'api': u'https://torrentproject.se/',}
self.url = self.urls['api']
self.headers.update({'User-Agent': USER_AGENT})
self.minseed = None
self.minleech = None
self.cache = TORRENTPROJECTCache(self)
def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
for mode in search_strings.keys(): # Mode = RSS, Season, Episode
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_strings[mode]:
if mode is not 'RSS':
logger.log(u"Search string: %s " % search_string, logger.DEBUG)
searchURL = self.urls['api'] + "?s=%s&out=json&filter=2101&num=150" % quote_plus(search_string.encode('utf-8'))
logger.log(u"Search URL: %s" % searchURL, logger.DEBUG)
torrents = self.getURL(searchURL, json=True)
if not (torrents and "total_found" in torrents and int(torrents["total_found"]) > 0):
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue
del torrents["total_found"]
results = []
for i in torrents:
title = torrents[i]["title"]
seeders = helpers.tryInt(torrents[i]["seeds"], 1)
leechers = helpers.tryInt(torrents[i]["leechs"], 0)
if seeders < self.minseed or leechers < self.minleech:
if mode is not 'RSS':
logger.log(u"Torrent doesn't meet minimum seeds & leechers not selecting : %s" % title, logger.DEBUG)
continue
t_hash = torrents[i]["torrent_hash"]
size = int(torrents[i]["torrent_size"])
try:
assert seeders < 10
assert mode is not 'RSS'
logger.log(u"Torrent has less than 10 seeds getting dyn trackers: " + title, logger.DEBUG)
trackerUrl = self.urls['api'] + "" + t_hash + "/trackers_json"
jdata = self.getURL(trackerUrl, json=True)
assert jdata is not "maintenance"
download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + title + "".join(["&tr=" + s for s in jdata])
except (Exception, AssertionError):
download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + title + "&tr=udp://tracker.openbittorrent.com:80&tr=udp://tracker.coppersurfer.tk:6969&tr=udp://open.demonii.com:1337&tr=udp://tracker.leechers-paradise.org:6969&tr=udp://exodus.desync.com:6969"
if not all([title, download_url]):
continue
item = title, download_url, size, seeders, leechers
if mode is not 'RSS':
logger.log(u"Found result: %s" % title, logger.DEBUG)
items[mode].append(item)
# For each search mode sort all the items by seeders
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def seedRatio(self):
return self.ratio
class TORRENTPROJECTCache(tvcache.TVCache):
def __init__(self, provider_obj):
tvcache.TVCache.__init__(self, provider_obj)
self.minTime = 20
def _getRSSData(self):
search_params = {'RSS': ['0day']}
return {'entries': self.provider._doSearch(search_params)}
provider = TORRENTPROJECTProvider()
| badloop/SickRage | sickbeard/providers/torrentproject.py | Python | gpl-3.0 | 4,952 |
"""
autoGonk -- auto-configuration script for break
Step 0: be vewy quiet, wew hunting wabbits! (silence on the wire)
Step 1: calculate victim IP and MAC address
Step 2: calculate gateway IP and MAC
Step 3: ?
Step 4: write our configuration to file
"""
import sys
from os import getuid
try:
from scapy.all import *
from netaddr import *
except ImportError as e:
print("[E] %s" % e)
sys.exit(2)
ROOTUID = 100000
class Host():
ip = None
mac = None
mask = None
gateway = None
def __init__(self, ip='0', mac='0', mask='0', gw='0'):
if not self.setIP(ip) or not self.setMAC(mac) or not self.setMask(mask) or not self.setGateway(gw):
raise ValueError
def setIP(self, ip='0.0.0.0'):
self.ip = ip
return True
def setMAC(self, mac='00:00:00:00:00:00'):
self.mac = mac
return True
def setMask(self, netmask='255.255.255.255'):
self.mask = netmask
return True
def setGateway(self, gw='127.0.0.1'):
self.gateway = gw
return True
def __repr__(self):
return "%s/%s (%s) -> %s" % (self.ip, self.mask, self.mac, self.gateway)
class Gateway(Host):
pass
class Victim(Host):
pass
if __name__ == "__main__":
# TODO: this will eventually use a live capture, which will require root...
gw = Host()
vic = Host()
if getuid() > ROOTUID:
print "[E] must be root!"
sys.exit(2)
# TODO: when this switches to a live capture, make sure it ONLY captures on
# the interface our victim is plugged in to.
capfile = '/Users/sodaphish/Desktop/sample.pcap'
a = rdpcap(capfile)
sessions = a.sessions()
for session in sessions:
for pkt in sessions[session]:
try:
# pkt[ARP].psrc will have the IP of the target/victim and
# pkt[ARP].hwsrc will have the target/victim's MAC address
# print pkt[ARP].hwsrc, pkt[ARP].hwdst, pkt[ARP].psrc,
# pkt[ARP].pdst
# we can use any arp packet because we're only going to cap on
# the interface the client is connected to.
if pkt[ARP].op == 2:
# TODO create a dictionary of potential VICTIM IP's and
# MAC's and pick the one with the most hits on it.
vic.ip = pkt[ARP].pdst
vic.mac = pkt[ARP].hwdst
gw.ip = pkt[ARP].psrc
gw.mac = pkt[ARP].hwsrc
except:
# only concerned with arp packets.
pass
vic.gateway = gw.ip
print vic
print gw
'''
try:
# the TTL threshold will be important. we'll also want to
# combine it with knowledge gained about the target/victim.
if int(pkt[IP].ttl) < 38:
pass
# print pkt[Ether].src, pkt[Ether].dst, int(pkt[IP].ttl)
except:
pass
'''
# __EOF__
| sodaphish/break | autoGonk.py | Python | mit | 3,048 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 Martine Lenders <[email protected]>
#
# Distributed under terms of the MIT license.
from __future__ import print_function
import os
import sys
import random
import subprocess
import time
import types
import pexpect
import socket
DEFAULT_TIMEOUT = 5
class Strategy(object):
def __init__(self, func=None):
if func is not None:
if sys.version_info < (3,):
self.__class__.execute = types.MethodType(func, self, self.__class__)
else:
self.__class__.execute = types.MethodType(func, self)
def execute(self, *args, **kwargs):
raise NotImplementedError()
class ApplicationStrategy(Strategy):
def __init__(self, app_dir=os.getcwd(), func=None):
super(ApplicationStrategy, self).__init__(func)
self.app_dir = app_dir
class BoardStrategy(Strategy):
def __init__(self, board, func=None):
super(BoardStrategy, self).__init__(func)
self.board = board
def __run_make(self, application, make_targets, env=None):
env = os.environ.copy()
if env is not None:
env.update(env)
env.update(self.board.to_env())
cmd = ("make", "-C", application) + make_targets
print(' '.join(cmd))
print(subprocess.check_output(cmd, env=env))
def execute(self, application):
super(BoardStrategy, self).execute(application)
class CleanStrategy(BoardStrategy):
def execute(self, application, env=None):
super(CleanStrategy, self).__run_make(application, ("-B", "clean"), env)
class BuildStrategy(BoardStrategy):
def execute(self, application, env=None):
super(BuildStrategy, self).__run_make(application, ("all",), env)
class FlashStrategy(BoardStrategy):
def execute(self, application, env=None):
super(FlashStrategy, self).__run_make(application, ("all",), env)
class ResetStrategy(BoardStrategy):
def execute(self, application, env=None):
super(ResetStrategy, self).__run_make(application, ("reset",), env)
class Board(object):
def __init__(self, name, port=None, serial=None, clean=None,
build=None, flash=None,
reset=None, term=None):
def _reset_native_execute(obj, application, env=None, *args, **kwargs):
pass
if (name == "native") and (reset is None):
reset = _reset_native_execute
self.name = name
self.port = port
self.serial = serial
self.clean_strategy = CleanStrategy(self, clean)
self.build_strategy = BuildStrategy(self, build)
self.flash_strategy = FlashStrategy(self, flash)
self.reset_strategy = ResetStrategy(self, reset)
def __len__(self):
return 1
def __iter__(self):
return self
def next(self):
raise StopIteration()
def __repr__(self):
return ("<Board %s,port=%s,serial=%s>" %
(repr(self.name), repr(self.port), repr(self.serial)))
def to_env(self):
env = {}
if self.name:
env['BOARD'] = self.name
if self.port:
env['PORT'] = self.port
if self.serial:
env['SERIAL'] = self.serial
return env
def clean(self, application=os.getcwd(), env=None):
self.build_strategy.execute(application, env)
def build(self, application=os.getcwd(), env=None):
self.build_strategy.execute(application, env)
def flash(self, application=os.getcwd(), env=None):
self.flash_strategy.execute(application, env)
def reset(self, application=os.getcwd(), env=None):
self.reset_strategy.execute(application, env)
class BoardGroup(object):
def __init__(self, boards):
self.boards = boards
def __len__(self):
return len(self.boards)
def __iter__(self):
return iter(self.boards)
def __repr__(self):
return str(self.boards)
def clean(self, application=os.getcwd(), env=None):
for board in self.boards:
board.clean(application, env)
def build(self, application=os.getcwd(), env=None):
for board in self.boards:
board.build(application, env)
def flash(self, application=os.getcwd(), env=None):
for board in self.boards:
board.flash(application, env)
def reset(self, application=os.getcwd(), env=None):
for board in self.boards:
board.reset(application, env)
def default_test_case(board_group, application, env=None):
for board in board_group:
env = os.environ.copy()
if env is not None:
env.update(env)
env.update(board.to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env,
timeout=DEFAULT_TIMEOUT,
logfile=sys.stdout) as spawn:
spawn.expect("TEST: SUCCESS")
class TestStrategy(ApplicationStrategy):
def execute(self, board_groups, test_cases=[default_test_case],
timeout=DEFAULT_TIMEOUT, env=None):
for board_group in board_groups:
print("Testing for %s: " % board_group)
for test_case in test_cases:
board_group.reset()
test_case(board_group, self.app_dir, env=None)
sys.stdout.write('.')
sys.stdout.flush()
print()
def get_ipv6_address(spawn):
spawn.sendline(u"ifconfig")
spawn.expect(r"[A-Za-z0-9]{2}_[0-9]+: inet6 (fe80::[0-9a-f:]+)\s")
return spawn.match.group(1)
def test_ipv6_send(board_group, application, env=None):
env_sender = os.environ.copy()
if env is not None:
env_sender.update(env)
env_sender.update(board_group.boards[0].to_env())
env_receiver = os.environ.copy()
if env is not None:
env_receiver.update(env)
env_receiver.update(board_group.boards[1].to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env_sender,
timeout=DEFAULT_TIMEOUT) as sender, \
pexpect.spawnu("make", ["-C", application, "term"], env=env_receiver,
timeout=DEFAULT_TIMEOUT) as receiver:
ipprot = random.randint(0x00, 0xff)
receiver_ip = get_ipv6_address(receiver)
receiver.sendline(u"ip server start %d" % ipprot)
# wait for neighbor discovery to be done
time.sleep(5)
sender.sendline(u"ip send %s %d 01:23:45:67:89:ab:cd:ef" % (receiver_ip, ipprot))
sender.expect_exact(u"Success: send 8 byte over IPv6 to %s (next header: %d)" %
(receiver_ip, ipprot))
receiver.expect(u"00000000 01 23 45 67 89 AB CD EF")
def test_udpv6_send(board_group, application, env=None):
env_sender = os.environ.copy()
if env is not None:
env_sender.update(env)
env_sender.update(board_group.boards[0].to_env())
env_receiver = os.environ.copy()
if env is not None:
env_receiver.update(env)
env_receiver.update(board_group.boards[1].to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env_sender,
timeout=DEFAULT_TIMEOUT) as sender, \
pexpect.spawnu("make", ["-C", application, "term"], env=env_receiver,
timeout=DEFAULT_TIMEOUT) as receiver:
port = random.randint(0x0000, 0xffff)
receiver_ip = get_ipv6_address(receiver)
receiver.sendline(u"udp server start %d" % port)
# wait for neighbor discovery to be done
time.sleep(5)
sender.sendline(u"udp send %s %d ab:cd:ef" % (receiver_ip, port))
sender.expect_exact("Success: send 3 byte over UDP to [{}]:{}"
.format(receiver_ip, port))
receiver.expect(u"00000000 AB CD EF")
def test_tcpv6_send(board_group, application, env=None):
env_client = os.environ.copy()
if env is not None:
env_client.update(env)
env_client.update(board_group.boards[0].to_env())
env_server = os.environ.copy()
if env is not None:
env_server.update(env)
env_server.update(board_group.boards[1].to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env_client,
timeout=DEFAULT_TIMEOUT) as client, \
pexpect.spawnu("make", ["-C", application, "term"], env=env_server,
timeout=DEFAULT_TIMEOUT) as server:
port = random.randint(0x0000, 0xffff)
server_ip = get_ipv6_address(server)
client_ip = get_ipv6_address(client)
server.sendline(u"tcp server start %d" % port)
# wait for neighbor discovery to be done
time.sleep(5)
client.sendline(u"tcp connect %s %d" % (server_ip, port))
server.expect(u"TCP client \\[%s\\]:[0-9]+ connected" % client_ip)
client.sendline(u"tcp send affe:abe")
client.expect_exact(u"Success: send 4 byte over TCP to server")
server.expect(u"00000000 AF FE AB E0")
client.sendline(u"tcp disconnect")
client.sendline(u"tcp send affe:abe")
client.expect_exact(u"could not send")
def test_tcpv6_multiconnect(board_group, application, env=None):
if any(b.name != "native" for b in board_group.boards):
# run test only with native
print("SKIP_TEST INFO found non-native board")
return
env_client = os.environ.copy()
if env is not None:
env_client.update(env)
env_client.update(board_group.boards[0].to_env())
env_server = os.environ.copy()
if env is not None:
env_server.update(env)
env_server.update(board_group.boards[1].to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env_client,
timeout=DEFAULT_TIMEOUT) as client, \
pexpect.spawnu("make", ["-C", application, "term"], env=env_server,
timeout=DEFAULT_TIMEOUT) as server:
port = random.randint(0x0000, 0xffff)
server_ip = get_ipv6_address(server)
client_ip = get_ipv6_address(client)
try:
connect_addr = socket.getaddrinfo(
"%s%%tapbr0" % server_ip, port)[0][4]
except socket.gaierror as e:
print("SKIP_TEST INFO", e)
return
server.sendline(u"tcp server start %d" % port)
# wait for neighbor discovery to be done
time.sleep(5)
client.sendline(u"tcp connect %s %d" % (server_ip, port))
server.expect(u"TCP client \\[%s\\]:[0-9]+ connected" % client_ip)
with socket.socket(socket.AF_INET6) as sock:
sock.connect(connect_addr)
server.expect(u"Error on TCP accept \\[-[0-9]+\\]")
client.sendline(u"tcp disconnect")
server.expect(u"TCP connection to \\[%s\\]:[0-9]+ reset" % client_ip)
client.sendline(u"tcp connect %s %d" % (server_ip, port))
server.expect(u"TCP client \\[%s\\]:[0-9]+ connected" % client_ip)
client.sendline(u"tcp disconnect")
server.expect(u"TCP connection to \\[%s\\]:[0-9]+ reset" % client_ip)
with socket.socket(socket.AF_INET6) as sock:
sock.connect(connect_addr)
server.expect(u"TCP client \\[[0-9a-f:]+\\]:[0-9]+ connected")
server.expect(u"TCP connection to \\[[0-9a-f:]+\\]:[0-9]+ reset")
def test_triple_send(board_group, application, env=None):
env_sender = os.environ.copy()
if env is not None:
env_sender.update(env)
env_sender.update(board_group.boards[0].to_env())
env_receiver = os.environ.copy()
if env is not None:
env_receiver.update(env)
env_receiver.update(board_group.boards[1].to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env_sender,
timeout=DEFAULT_TIMEOUT) as sender, \
pexpect.spawnu("make", ["-C", application, "term"], env=env_receiver,
timeout=DEFAULT_TIMEOUT) as receiver:
udp_port = random.randint(0x0000, 0xffff)
tcp_port = random.randint(0x0000, 0xffff)
ipprot = random.randint(0x00, 0xff)
receiver_ip = get_ipv6_address(receiver)
sender_ip = get_ipv6_address(sender)
receiver.sendline(u"ip server start %d" % ipprot)
receiver.sendline(u"udp server start %d" % udp_port)
receiver.sendline(u"tcp server start %d" % tcp_port)
# wait for neighbor discovery to be done
time.sleep(5)
sender.sendline(u"udp send %s %d 01:23" % (receiver_ip, udp_port))
sender.expect_exact(u"Success: send 2 byte over UDP to [%s]:%d" %
(receiver_ip, udp_port))
receiver.expect(u"00000000 01 23")
sender.sendline(u"ip send %s %d 01:02:03:04" % (receiver_ip, ipprot))
sender.expect_exact(u"Success: send 4 byte over IPv6 to %s (next header: %d)" %
(receiver_ip, ipprot))
receiver.expect(u"00000000 01 02 03 04")
sender.sendline(u"tcp connect %s %d" % (receiver_ip, tcp_port))
receiver.expect(u"TCP client \\[%s\\]:[0-9]+ connected" % sender_ip)
sender.sendline(u"tcp send dead:beef")
sender.expect_exact(u"Success: send 4 byte over TCP to server")
receiver.expect(u"00000000 DE AD BE EF")
if __name__ == "__main__":
TestStrategy().execute([BoardGroup((Board("native", "tap0"),
Board("native", "tap1")))],
[test_ipv6_send, test_udpv6_send, test_tcpv6_send,
test_tcpv6_multiconnect, test_triple_send])
| smlng/RIOT | tests/lwip/tests/01-run.py | Python | lgpl-2.1 | 13,682 |
# -- coding: utf-8 --
# ===========================================================================
# eXe
# Copyright 2013, Pedro Peña Pérez, Open Phoenix IT
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
import sys
import os
from exe.engine.path import Path
from exe import globals as G
class LocationButtons(object):
def __init__(self):
if sys.platform[:3] == "win":
self.names_map = {0: x_('Desktop'),
5: x_('My Documents'),
40: x_('Home Folder')}
elif sys.platform[:6] == "darwin":
self.names_map = {'kDesktopFolderType': x_('Desktop'),
'kDocumentsFolderType': x_('Documents'),
'kCurrentUserFolderType': x_('Home Folder')}
else:
self.names_map = {'DESKTOP': x_('Desktop'),
'DOCUMENTS': x_('Documents'),
'HOME': x_('Home Folder')}
self.compute()
def compute(self):
self.buttons = []
for key, value in self.names_map.items():
try:
button = {'xtype': 'button', 'text': _(value),
'location': self.MapDir(key)}
self.buttons.append(button)
except:
pass
def updateText(self):
i = 0
for value in self.names_map.values():
button = self.buttons[i]
button['text'] = _(value)
i = i + 1
def MapDir(self, code):
if sys.platform[:3] == "win":
try:
from ctypes import WinDLL, create_unicode_buffer
dll = WinDLL('shell32')
result = create_unicode_buffer(260)
resource = dll.SHGetFolderPathW(None, code, None, 0, result)
if resource != 0:
raise Exception
else:
path = result.value
except:
if code == 0:
path = os.environ['HOMEPATH']
else:
raise
elif sys.platform[:6] == "darwin":
try:
from Carbon import Folder, Folders
folderref = Folder.FSFindFolder(Folders.kUserDomain,
getattr(Folders, code), False)
path = folderref.as_pathname()
except:
if code == 'kCurrentUserFolderType':
path = os.environ['HOME']
else:
raise
else:
try:
XDG_USER_DIR_CMD = 'xdg-user-dir'
import subprocess
p = subprocess.Popen([XDG_USER_DIR_CMD, code],
stdout=subprocess.PIPE)
path, _ = p.communicate()
path = path.rstrip('\n')
except:
if code == 'HOME':
path = os.environ['HOME']
elif G.application.snap and code == 'DESKTOP':
path = os.environ.get('XDG_DESKTOP_DIR')
if not path:
raise Exception
elif G.application.snap and code == 'DOCUMENTS':
path = os.environ.get('XDG_DOCUMENTS_DIR')
if not path:
raise Exception
return Path(path).abspath()
| exelearning/iteexe | exe/engine/locationbuttons.py | Python | gpl-2.0 | 4,114 |
def string_matching_rabin_karp(text='', pattern='', hash_base=256):
"""Returns positions where pattern is found in text.
worst case: O(nm)
O(n+m) if the number of valid matches is small and the pattern is large.
Performance: ord() is slow so we shouldn't use it here
Example: text = 'ababbababa', pattern = 'aba'
string_matching_rabin_karp(text, pattern) returns [0, 5, 7]
@param text text to search inside
@param pattern string to search for
@param hash_base base to calculate the hash value
@return list containing offsets (shifts) where pattern is found inside text
"""
n = len(text)
m = len(pattern)
offsets = []
htext = hash_value(text[:m], hash_base)
hpattern = hash_value(pattern, hash_base)
for i in range(n-m+1):
if htext == hpattern:
if text[i:i+m] == pattern:
offsets.append(i)
if i < n-m:
htext = (hash_base *
(htext -
(ord(text[i]) *
(hash_base ** (m-1))))) + ord(text[i+m])
return offsets
def hash_value(s, base):
"""Calculate the hash value of a string using base.
Example: 'abc' = 97 x base^2 + 98 x base^1 + 99 x base^0
@param s string to compute hash value for
@param base base to use to compute hash value
@return hash value
"""
v = 0
p = len(s)-1
for i in range(p+1):
v += ord(s[i]) * (base ** p)
p -= 1
return v
def string_matching_knuth_morris_pratt(text='', pattern=''):
"""Returns positions where pattern is found in text.
O(m+n)
Example: text = 'ababbababa', pattern = 'aba'
string_matching_knuth_morris_pratt(text, pattern) returns [0, 5, 7]
@param text text to search inside
@param pattern string to search for
@return list containing offsets (shifts) where pattern is found inside text
"""
n = len(text)
m = len(pattern)
offsets = []
pi = compute_prefix_function(pattern)
q = 0
for i in range(n):
while q > 0 and pattern[q] != text[i]:
q = pi[q - 1]
if pattern[q] == text[i]:
q = q + 1
if q == m:
offsets.append(i - m + 1)
q = pi[q-1]
return offsets
def compute_prefix_function(p):
m = len(p)
pi = [0] * m
k = 0
for q in range(1, m):
while k > 0 and p[k] != p[q]:
k = pi[k - 1]
if p[k] == p[q]:
k = k + 1
pi[q] = k
return pi
| hadyelsahar/RE-NLG-Dataset | utils/matching.py | Python | mit | 2,527 |
#!/usr/bin/env python
# vim: sw=4:ts=4:sts=4:fdm=indent:fdl=0:
# -*- coding: UTF8 -*-
#
# I/O Utility functions.
# Copyright (C) 2012 Josiah Gordon <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" get_codec Function for loading the default/first filetype codecs
get_io Function for loading the default/first device
"""
from contextlib import contextmanager
from importlib import import_module
from os.path import splitext as os_splitext
from os.path import join as os_join
from os.path import basename as os_basename
from os import listdir as os_listdir
from os.path import isdir as os_isdir
from os.path import abspath as os_abspath
from os.path import dirname as os_dirname
from ctypes.util import find_library as ctypes_find_library
try:
from .magic import magic as _magic
except OSError:
_magic = None
from .io_base import AudioIO, DevIO
# Codec cache dictionary
__codec_cache = {}
# Audio IO device cache dictionary
__io_cache = {}
# Set to True to enable debut output
DEBUG=False
def msg_out(message, *args):
""" Print message if DEBUG is True.
"""
if DEBUG:
print(message)
def slice_buffer(data, size):
""" slice_buffer(data, size) -> A generator that yields slices of bytes
from the data buffer.
"""
for i in range(0, len(data), size):
yield data[i:i + size]
def _build_mod_list(mod_path, suffix, blacklist):
""" _build_mod_list(mod_path, suffix) -> Add all the paths in mod_path to
sys.path and return a list of all modules in sys.path ending in suffix.
"""
from sys import path as sys_path
mod_path = [mod_path] if type(mod_path) is str else mod_path
blacklist = [blacklist] if type(blacklist) is str else blacklist
# Add suffix to all names in blacklist.
blacklist.extend(['%s%s' % (name, suffix) for name in blacklist
if not name.endswith(suffix)])
# Add the path of this file to the search path.
mod_path.append(os_abspath(os_dirname(__file__)))
# Add the path(s) in mod_path to sys.path so we can import from
# them.
[sys_path.extend((path, os_dirname(path.rstrip('/'))))
for path in mod_path
if path not in sys_path]
# Build the list of modules ending in suffix in the mod_path(s).
mod_list = ((path, name) for path in sys_path
if os_isdir(path)
for name in os_listdir(path)
if name.endswith(suffix) and
name not in blacklist)
return mod_list
def _check_dependencies(dependencies):
""" Returns True if all the dependencies pass.
"""
# Loop through the dependencies and check each one.
for key, value in dependencies.items():
if key == 'ctypes':
# Check for c libraries.
if not all((ctypes_find_library(lib) for lib in value)):
return False
elif key == 'python':
# Check for python modules.
try:
[__import__(mod) for mod in value]
except ImportError as err:
return False
# Everything passed.
return True
def get_codec(filename, mod_path=[], cached=True,
blacklist=[]):
""" get_codec(filename, mod_path=[], cached=True, blacklist=[]) -> Load the
codecs in the path and return the first one that can play the file, or the
one with the default attribute set.
filename The file the codec needs to handle
mod_path Additional search paths for modules
cached Use cached codecs if available
blacklist Modules not to load
"""
# Codec cache dictionary
global __codec_cache
from urlparse import urlparse
from .import_util import load_lazy_import, unload_lazy_import
# Get the file extension.
file_ext = os_splitext(filename)[1].lower()
# Get protocol.
file_prot = urlparse(filename).scheme
if cached:
# Load and already cached codec.
if file_ext in __codec_cache:
return __codec_cache[file_ext]
elif file_prot in __codec_cache:
return __codec_cache[file_prot]
# Get a list of modules ending in '_file.py'
mod_list = _build_mod_list(mod_path, '_file.py', blacklist)
codec = None
dummy = None
# Make importing lazy.
# load_lazy_import(mod_path=mod_path)
# This packages name.
this_pkgname = __name__.split('.', 1)[0]
# Load the codec module that can handle file.
for path, name in mod_list:
# Get the package name from path.
pkgname = os_basename(path.rstrip('/'))
# Import the package if it is different from this one.
if pkgname != this_pkgname and pkgname:
try:
__import__(pkgname)
except ImportError as err:
continue
# Load the module.
try:
module = import_module('.%s' % os_splitext(name)[0], pkgname)
except ImportError as err:
print("Skipping module: (%s) because of error: %s" % (name, err))
continue
# Get the filetypes and handler from module.
supported_dict = getattr(module, '__supported_dict', {})
# Get the handler.
handler = getattr(module, supported_dict.get('handler', 'dummy'), None)
# Don't even check this module if it does not have a handler.
if not handler:
continue
# Try not to use the dummy handler.
if 'dummy' in name:
dummy = handler
continue
# Check the module dependencies.
dependencies = supported_dict.get('dependencies', {})
if not _check_dependencies(dependencies):
continue
issupported = supported_dict.get('issupported', lambda *a: False)
ext = supported_dict.get('ext', [])
protocol = supported_dict.get('protocol', [])
default = supported_dict.get('default', False)
# Add filetype handlers to the codec cache.
__codec_cache.update(((key, handler) for key in ext))
# Add protocol handlers to the codec cache.
__codec_cache.update(((key, handler) for key in protocol))
# Check if filename is supported.
if issupported(filename) or file_ext in ext or file_prot in protocol:
codec = handler
if default: break
elif not codec and '.*' in ext:
codec = handler
# Turn off lazy imports.
# unload_lazy_import()
# No codec was found so default to the dummy codec.
if not codec: codec = dummy
return codec
def get_io(fileobj, mod_path=[], cached=True,
blacklist=[]):
""" get_io(fileobj, mod_path=[], cached=True, blacklist=[]) -> Finds a
audio device that can take the data read from fileobj and returns it.
"""
# IO device cache dictionary
global __io_cache
from .import_util import load_lazy_import, unload_lazy_import
# Get the file input data type.
annotations = getattr(getattr(fileobj, 'read'), '__annotations__', {})
file_input = annotations.get('return', unicode)
# Get the file output data type.
annotations = getattr(getattr(fileobj, 'write'), '__annotations__', {})
file_output = annotations.get('data', unicode)
if cached:
# Load and already cached audio device.
if file_input in __io_cache:
return __io_cache[file_input]
elif file_output in __io_cache:
return __io_cache[file_output]
# Get a list of modules ending in '_io.py'
mod_list = _build_mod_list(mod_path, '_io.py', blacklist)
device = None
dummy = None
# Make importing lazy.
# load_lazy_import(mod_path=mod_path)
# This packages name.
this_pkgname = __name__.split('.', 1)[0]
# Load the codec module that can handle file.
for path, name in mod_list:
# Get the package name from path.
pkgname = os_basename(path.rstrip('/'))
# Import the package if it is different from this one.
if pkgname != this_pkgname and pkgname:
try:
__import__(pkgname)
except ImportError as err:
continue
# Load the module.
module = import_module('.%s' % os_splitext(name)[0], pkgname)
# Get the filetypes and handler from module.
supported_dict = getattr(module, '__supported_dict', {})
handler = getattr(module, supported_dict.get('handler', 'dummy'), None)
if not handler:
continue
# Try not to use the dummy.
if 'dummy' in name:
dummy = handler
continue
# Check the module dependencies.
dependencies = supported_dict.get('dependencies', {})
if not _check_dependencies(dependencies):
continue
input_t = supported_dict.get('input', [])
output_t = supported_dict.get('output', [])
default = supported_dict.get('default', False)
# Add device input to io cache
__io_cache.update(((key, handler) for key in input_t))
# Add device output to io cache.
__io_cache.update(((key, handler) for key in output_t))
# Check if filename is supported.
if 'r' in fileobj.mode and file_input in output_t:
device = handler
if default: break
elif 'w' in fileobj.mode and file_output in input_t:
device = handler
if default: break
# Turn off lazy imports.
# unload_lazy_import()
# No device was found so use the dummy_io.
if not device: device = dummy
return device
def open_file(filename, mode='r', mod_path=[],
**kwargs):
""" open_file(filename, mode='r') -> Returns the open file.
"""
blacklist = kwargs.get('blacklist', [])
open_codec = None
# Loop until a codec is found that can open the file.
while not open_codec:
codec = get_codec(filename, mod_path=mod_path, blacklist=blacklist,
cached=False)
if not codec:
raise IOError("Error opening codec %s." % codec)
try:
open_codec = codec(filename, mode=mode, **kwargs)
open_codec.loops = kwargs.get('loops', -1)
except IOError as err:
return_err = err
print('Blacklisting (%s) because of error: %s' % (codec, err))
mod_name = '%s.py' % codec.__module__.split('.')[-1]
# Add the module to the blacklist.
blacklist.append(mod_name)
return open_codec
def open_device(fileobj, mode='w', mod_path=[],
**kwargs):
""" open_device(fileobj, mode='r') -> Returns an open audio device.
"""
blacklist = kwargs.get('blacklist', [])
dev_name = kwargs.get('device', 'default')
rate = kwargs.get('rate', fileobj.rate)
while True:
# Get the supported device
device = get_io(fileobj, mod_path=mod_path, blacklist=blacklist,
cached=False)
if not device:
raise IOError("Error opening device %s." % device)
# Open and return the device.
try:
result = device(mode=mode, rate=rate, channels=fileobj.channels,
depth=fileobj.depth, bigendian=fileobj.bigendian,
unsigned=fileobj.unsigned, floatp=fileobj.floatp,
device=dev_name, three_byte=fileobj.three_byte)
break
except Exception as err:
return_err = err
print('Blacklisting (%s) because of error: %s' % (device, err))
mod_name = '%s.py' % device.__module__.split('.')[-1]
# Add the module to the blacklist.
blacklist.append(mod_name)
continue
return result
@contextmanager
def silence(fd):
""" silence(fd) -> Silence any output from fd.
"""
from os import dup as os_dup
from os import pipe as os_pipe
from os import dup2 as os_dup2
from os import close as os_close
from os import fdopen as os_fdopen
# Backup the file
old_fd = fd
# Flush the file so it can be silenced properly.
fd.flush()
# Create a duplicate of fd
new_fd = os_dup(fd.fileno())
# Create a pipe to write to.
read, write = os_pipe()
# Set the write to the fd filenumber
os_dup2(write, fd.fileno())
# Close the pipe.
os_close(write)
os_close(read)
# Set fd to the new fd
fd = os_fdopen(new_fd, 'w')
try:
# Run the commands in the 'with' statement.
yield
finally:
# Return the fd back to its original state.
os_dup2(fd.fileno(), old_fd.fileno())
fd = old_fd
@contextmanager
def py_silence(new_stdout=None,
new_stderr=None):
""" py_silence(new_stdout, new_stderr) -> Silence any output from fd. In
python.
"""
from os import devnull as os_devnull
import sys
stdout = new_stdout or sys.stdout
stderr = new_stderr or sys.stderr
# Backup the file
old_stdout = sys.stdout
old_stderr = sys.stderr
# Flush the file so it can be silenced properly.
sys.stdout.flush()
sys.stderr.flush()
sys.stdout, sys.stderr = new_stdout, new_stderr
try:
# Run the commands in the 'with' statement.
yield
finally:
old_stdout.flush()
old_stderr.flush()
# Return the fd back to its original state.
sys.stdout = old_stdout
sys.stderr = old_stderr
class Magic(object):
""" Magic object for testing string encoding.
"""
def __init__(self, flags=1024):
""" Magic(flags=magic.MAGIC_MIME_ENCODING) -> Object for testing text
encoding.
"""
if not _magic:
return None
self._magic = _magic.magic_open(flags)
if _magic.magic_load(self._magic, None) != 0:
print("Error: %s" % _magic.magic_error(self._magic).decode())
def check(self, data):
""" Return the encoding of data.
"""
if not _magic:
return b'utf8'
return _magic.magic_buffer(self._magic, data, len(data))
| zepto/musio-python2 | musio/io_util.py | Python | gpl-3.0 | 14,987 |
# -*- coding: utf-8 -*-
#
# evaluate_quantal_stp_synapse.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Example for the quantal_stp_synapse
-----------------------------------
The quantal_stp_synapse is a stochastic version of the Tsodys-Markram model for synaptic short term plasticity (STP).
This script compares the two variants of the Tsodyks/Markram synapse in NEST.
This synapse model implements synaptic short-term depression and
short-term facilitation according to the quantal release model
described by Fuhrmann et al. [1] and Loebel et al. [2].
Each presynaptic spike will stochastically activate a fraction of
the available release sites. This fraction is binomialy
distributed and the release probability per site is governed by the
Fuhrmann et al. (2002) model. The solution of the differential
equations is taken from Maass and Markram 2002 [3].
The connection weight is interpreted as the maximal weight that can
be obtained if all n release sites are activated.
Parameters:
The following parameters can be set in the status dictionary:
U double - Maximal fraction of available resources [0,1], default=
u double - available fraction of resources [0,1], default=0.5
p double - probability that a vesicle is available, default = 1.0
n long - total number of release sites, default = 1
a long - number of available release sites, default = n
tau_rec double - time constant for depression in ms, default=800 ms
tau_rec double - time constant for facilitation in ms, default=0 (off)
References:
[1] Fuhrmann, G., Segev, I., Markram, H., & Tsodyks, M. V. (2002). Coding of
information by activity-dependent synapses. Journal of neurophysiology, 8
[2] Loebel, A., Silberberg, G., Helbig, D., Markram, H., Tsodyks,
M. V, & Richardson, M. J. E. (2009). Multiquantal release underlies
the distribution of synaptic efficacies in the neocortex. Frontiers
in computational neuroscience, 3(November), 27. doi:10.3389/neuro.10.027.
[3] Maass, W., & Markram, H. (2002). Synapses as dynamic memory buffers. Neur
'''
import nest
import nest.voltage_trace
import numpy
import pylab
nest.ResetKernel()
'''
On average, the quantal_stp_synapse converges to the tsodyks2_synapse, so we can compare the two by running multiple trials.
First we define the number of trials as well as the number of release sites.
'''
n_syn=10.0 # number of synapses in a connection
n_trials=100# number of measurement trials
'''
Next, we define parameter sets for facilitation
'''
fac_params={"U":0.02, "u":0.02, "tau_fac":500., "tau_rec":200.,"weight":1.}
'''
Then, we assign the parameter set to the synapse models
'''
t1_params=fac_params # for tsodyks2_synapse
t2_params=t1_params.copy() # for quantal_stp_synapse
t1_params['x']=t1_params['U']
t2_params['n']=n_syn
'''
To make the responses comparable, we have to scale the weight by the number of
synapses.
'''
t2_params['weight']=1./n_syn
'''
Next, we chage the defaults of the various models to our parameters.
'''
nest.SetDefaults("tsodyks2_synapse",t1_params)
nest.SetDefaults("quantal_stp_synapse",t2_params)
nest.SetDefaults("iaf_psc_exp",{"tau_syn_ex": 3.})
'''
We create three different neurons.
Neuron one is the sender, the two other neurons receive the synapses.
'''
neuron = nest.Create("iaf_psc_exp",3)
'''
The connection from neuron 1 to neuron 2 is a deterministic synapse.
'''
nest.Connect([neuron[0]],[neuron[1]],syn_spec="tsodyks2_synapse")
'''
The connection from neuron 1 to neuron 3 has a stochastic quantal_stp_synapse.
'''
nest.Connect([neuron[0]],[neuron[2]],syn_spec="quantal_stp_synapse")
'''
The voltmeter will show us the synaptic responses in neurons 2 and 3.
'''
voltmeter = nest.Create("voltmeter",2)
nest.SetStatus(voltmeter, {"withgid": True, "withtime": True})
'''
One dry run to bring all synapses into their rest state.
The default initialization does not achieve this. In large network simulations this problem does not show, but in small simulations like this, we would see it.
'''
nest.SetStatus([neuron[0]], "I_e", 376.0)
nest.Simulate(500.0)
nest.SetStatus([neuron[0]], "I_e", 0.0)
nest.Simulate(1000.0)
'''
Only now do we connect the voltmeter to the neurons.
'''
nest.Connect([voltmeter[0]], [neuron[1]])
nest.Connect([voltmeter[1]], [neuron[2]])
'''
This loop runs over the n_trials trials and performs a standard protocol of a high-rate response, followed by a pause and then a recovery response.
'''
for t in range(n_trials):
nest.SetStatus([neuron[0]], "I_e", 376.0)
nest.Simulate(500.0)
nest.SetStatus([neuron[0]], "I_e", 0.0)
nest.Simulate(1000.0)
'''
Flush the last voltmeter events from the queue by simulating one time-step.
'''
nest.Simulate(.1)
'''
Extract the reference trace.
'''
vm= numpy.array(nest.GetStatus([voltmeter[1]],'events')[0]['V_m'])
vm_reference=numpy.array(nest.GetStatus([voltmeter[0]],'events')[0]['V_m'])
vm.shape=(n_trials,1500)
vm_reference.shape=(n_trials,1500)
'''
Now compute the mean of all trials and plot agains trials and references.
'''
vm_mean=numpy.array([numpy.mean(vm[:,i]) for (i,j) in enumerate(vm[0,:])])
vm_ref_mean=numpy.array([numpy.mean(vm_reference[:,i]) for (i,j) in enumerate(vm_reference[0,:])])
pylab.plot(vm_mean)
pylab.plot(vm_ref_mean)
'''
Finally, print the mean-suqared error between the trial-average and the reference trace. The value should be < 10^-9.
'''
print (numpy.mean((vm_ref_mean-vm_mean)**2))
#pylab.show()
| kristoforcarlson/nest-simulator-fork | pynest/examples/evaluate_quantal_stp_synapse.py | Python | gpl-2.0 | 6,184 |
# This file is part of exhale: https://github.com/svenevs/exhale
#
# This file was generated on/around (date -Ru):
#
# Tue, 08 Nov 2016 07:18:48 +0000
#
# Copyright (c) 2016, Stephen McDowell
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of exhale nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from breathe.parser.index import parse as breathe_parse
import sys
import re
import os
import itertools
try:
# Python 2 StringIO
from cStringIO import StringIO
except ImportError:
# Python 3 StringIO
from io import StringIO
__all__ = ['generate', 'ExhaleRoot', 'ExhaleNode', 'exclaimError', 'qualifyKind',
'kindAsBreatheDirective', 'specificationsForKind', 'EXHALE_FILE_HEADING',
'EXHALE_SECTION_HEADING', 'EXHALE_SUBSECTION_HEADING']
__name__ = "exhale"
__docformat__ = "reStructuredText"
EXHALE_API_TOCTREE_MAX_DEPTH = 5 # DO NOT EXPOSE
'''
The value used as ``:maxdepth:`` with restructured text ``.. toctree::`` directives.
The default value is 5, as any larger will likely produce errors with a LaTeX build.
Change this value by specifying the proper value to the dictionary passed to the
`generate` function.
'''
EXHALE_API_DOXY_OUTPUT_DIR = "" # DO NOT EXPOSE
'''
The path to the doxygen xml output **directory**, relative to ``conf.py`` (or whichever
file is calling `generate`. This value **must** be set for `generate` to be able to do
anything.
'''
EXHALE_API_DOXYGEN_STRIP_FROM_PATH = None # DO NOT EXPOSE
'''
Accounts for broken STRIP_FROM_PATH handling on RTD.
'''
EXHALE_GENERATE_BREATHE_FILE_DIRECTIVES = False # DO NOT EXPOSE
'''
Currently, Exhale (I...) do not know how to extract the documentation string for a given
file being produced. If True, then the breathe directive (``doxygenfile``) will be
incorporated at the bottom of the file. This will duplicate a lot of information, but
will include the file's description at the beginning.
'''
EXHALE_FILE_HEADING = "=" * 88
''' The restructured text file heading separator (``"=" * 88``). '''
EXHALE_SECTION_HEADING = "-" * 88
''' The restructured text section heading separator (``"-" * 88``). '''
EXHALE_SUBSECTION_HEADING = "*" * 88
''' The restructured text sub-section heading separator (``"*" * 88``).'''
EXHALE_CUSTOM_SPECIFICATIONS_FUNCTION = None # DO NOT EXPOSE
'''
User specified override of `specificationsForKind`. No safety checks are performed for
externally provided functions. Change the functionality of `specificationsForKind` by
specifiying a function in the dictionary passed to `generate`.
'''
########################################################################################
#
##
###
####
##### Primary entry point.
####
###
##
#
########################################################################################
def generate(exhaleArgs):
'''
The main entry point to exhale, which parses and generates the full API.
:Parameters:
``exhaleArgs`` (dict)
The dictionary of arguments to configure exhale with. All keys are strings,
and most values should also be strings. See below.
**Required Entries:**
**key**: ``"doxygenIndexXMLPath"`` --- value type: ``str``
The absolute or relative path to where the Doxygen index.xml is. A relative
path must be relative to the file **calling** exhale.
**key**: ``"containmentFolder"`` --- value type: ``str``
The folder the generated API will be created in. If the folder does not exist,
exhale will create the folder. The path can be absolute, or relative to the
file that is **calling** exhale. For example, ``"./generated_api"``.
**key**: ``"rootFileName"`` --- value type: ``str``
The name of the file that **you** will be linking to from your reStructuredText
documents. Do not include the ``containmentFolder`` path in this file name,
exhale will create the file ``"{}/{}".format(containmentFolder, rootFileName)``.
In order for Sphinx to be happy, you should include a ``.rst`` suffix. All of
the generated API uses reStructuredText, and that will not ever change.
For example, if you specify
- ``"containmentFolder" = "./generated_api"``, and
- ``"rootFileName" = "library_root.rst"``
Then exhale will generate the file ``./generated_api/library_root.rst``.
You could include this file in a toctree directive (say in ``index.rst``) with::
.. toctree:
:maxdepth: 2
generated_api/library_root
Since Sphinx allows for some flexibility (e.g. your primary domain may be using
``.txt`` files), **no error checking will be performed**.
**key**: ``"rootFileTitle"`` --- value type: ``str``
The title to be written at the top of ``rootFileName``, which will appear in
your file including it in the ``toctree`` directive.
**key**: ``"doxygenStripFromPath"`` --- value type: ``str``
When building on Read the Docs, there seem to be issues regarding the Doxygen
variable ``STRIP_FROM_PATH`` when built remotely. That is, it isn't stripped at
all. Provide me with a string path (e.g. ``".."``), and I will strip this for
you for the File nodes being generated. I will use the exact value of
``os.path.abspath("..")`` in the example above, so you can supply either a
relative or absolute path. The File view hierarchy **will** break if you do
not give me a value for this, and therefore I hesitantly require this argument.
The value ``".."`` assumes that ``conf.py`` is in a ``docs/`` or similar folder
exactly one level below the repository's root.
**Additional Options:**
**key**: ``"afterTitleDescription"`` --- value type: ``str``
Properly formatted reStructuredText with **no indentation** to be included
directly after the title. You can use any rst directives or formatting you wish
in this string. I suggest using the ``textwrap`` module, e.g.::
description = textwrap.dedent(\'\'\'
This is a description of the functionality of the library being documented.
.. warning::
Please be advised that this library does not do anything.
\'\'\')
Then you can add ``"afterTitleDescription" = description`` to your dictionary.
**key**: ``"afterBodySummary"`` --- value type: ``str``
Similar to ``afterTitleDescription``, this is a string with reStructuredText
formatting. This will be inserted after the generated API body. The layout
looks something like this::
rootFileTitle
============================================================================
afterTitleDescription (if provided)
[[[ GENERATED API BODY ]]]
afterBodySummary (if provided)
**key**: ``"createTreeView"`` --- value type: ``bool``
For portability, the default value if not specified is ``False``, which will
generate reStructuredText bulleted lists for the Class View and File View
hierarchies. If ``True``, raw html unordered lists will be generated. Please
refer to the *Clickable Hierarchies* subsection of :ref:`usage_advanced_usage`
for more details.
**key**: ``"fullToctreeMaxDepth"`` --- value type: ``int``
Beneath the Class View and File View hierarchies a Full API listing is generated
as there are items that may not appear in the Class View hierarchy, as well as
without this an obscene amount of warnings are generated from Sphinx because
neither view actually uses a ``toctree``, they link directly.
The default value is 5 if not specified, but you may want to give a smaller
value depending on the framework being documented. This value must be greater
than or equal to 1 (this is the value of ``:maxdepth:``).
**key**: ``"appendBreatheFileDirective"`` --- value type: ``bool``
Currently, I do not know how to reliably extract the brief / detailed file
descriptions for a given file node. Therefore, if you have file level
documentation in your project that has meaning, it would otherwise be omitted.
As a temporary patch, if you specify this value as ``True`` then at the bottom
of the file page the full ``doxygenfile`` directive output from Breathe will
be appended to the file documentiation. File level brief and detailed
descriptions will be included, followed by a large amount of duplication. I
hope to remove this value soon, in place of either parsing the xml more
carefully or finding out how to extract this information directly from Breathe.
The default value of this behavior is ``False`` if it is not specified in the
dictionary passed as input for this method. Please refer to the *Customizing
File Pages* subsection of :ref:`usage_customizing_file_pages` for more
information on what the impact of this variable is.
**key**: ``"customSpecificationFunction"`` --- value type: ``function``
The custom specification function to override the default behavior of exhale.
Please refer to the :func:`exhale.specificationsForKind` documentation.
:raises ValueError:
If the required dictionary arguments are not present, or any of the (key, value)
pairs are invalid.
:raises RuntimeError:
If any **fatal** error is caught during the generation of the API.
'''
if type(exhaleArgs) is not dict:
raise ValueError("The type of 'exhaleArgs' must be a dictionary.")
# Gather mandatory input
if "doxygenIndexXMLPath" not in exhaleArgs:
raise ValueError("'doxygenIndexXMLPath' must be present in the arguments to generate.")
try:
global EXHALE_API_DOXY_OUTPUT_DIR
doxygenIndexXMLPath = exhaleArgs["doxygenIndexXMLPath"]
EXHALE_API_DOXY_OUTPUT_DIR = doxygenIndexXMLPath.split("index.xml")[0]
except Exception as e:
raise ValueError("Unable to utilize the provided 'doxygenIndexXMLPath'\n{}".format(e))
if "containmentFolder" not in exhaleArgs:
raise ValueError("'containmentFolder' must be present in the arguments to generate.")
containmentFolder = exhaleArgs["containmentFolder"]
if type(containmentFolder) is not str:
raise ValueError("The type of the value for the key 'containmentFolder' must be a string.")
if "rootFileName" not in exhaleArgs:
raise ValueError("'rootFileName' must be present in the arguments passed to generate.")
rootFileName = exhaleArgs["rootFileName"]
if type(rootFileName) is not str:
raise ValueError("The type of the value for the key 'rootFileName' must be a string.")
if "rootFileTitle" not in exhaleArgs:
raise ValueError("'rootFileTitle' must be present in the arguments passed to generate.")
rootFileTitle = exhaleArgs["rootFileTitle"]
if type(rootFileTitle) is not str:
raise ValueError("The type of the value for the key 'rootFileTitle' must be a string.")
if "doxygenStripFromPath" not in exhaleArgs:
raise ValueError("'doxygenStripFromPath' must be present in the arguments passed to generate.")
doxygenStripFromPath = exhaleArgs["doxygenStripFromPath"]
if type(doxygenStripFromPath) is not str:
raise ValueError("The type of the value for the key 'doxygenStripFromPath' must be a string.")
try:
strip = os.path.abspath(doxygenStripFromPath)
if not os.path.isdir(strip):
raise ValueError("The value for the key 'doxygenStripFromPath' does not appear to be a valid path")
except Exception as e:
raise RuntimeError("Error coordinating the 'doxygenStripFromPath' variable: {}".format(e))
global EXHALE_API_DOXYGEN_STRIP_FROM_PATH
EXHALE_API_DOXYGEN_STRIP_FROM_PATH = strip
# gather the optional configurations
if "afterTitleDescription" in exhaleArgs:
afterTitleDescription = exhaleArgs["afterTitleDescription"]
if type(afterTitleDescription) is not str:
raise ValueError("The type of the value for the key 'afterTitleDescription' must be a string.")
else:
afterTitleDescription = ""
if "afterBodySummary" in exhaleArgs:
afterBodySummary = exhaleArgs["afterBodySummary"]
if type(afterBodySummary) is not str:
raise ValueError("The type of the value for the key 'afterBodySummary' must be a string.")
else:
afterBodySummary = ""
if "createTreeView" in exhaleArgs:
createTreeView = exhaleArgs["createTreeView"]
if type(createTreeView) is not bool:
raise ValueError("The type of the value for the key 'createTreeView' must be a boolean.")
else:
createTreeView = False
if "fullToctreeMaxDepth" in exhaleArgs:
fullToctreeMaxDepth = exhaleArgs["fullToctreeMaxDepth"]
if type(fullToctreeMaxDepth) is not int:
raise ValueError("The type of the value for the key 'fullToctreeMaxDepth' must be an int.")
global EXHALE_API_TOCTREE_MAX_DEPTH
EXHALE_API_TOCTREE_MAX_DEPTH = fullToctreeMaxDepth
if "appendBreatheFileDirective" in exhaleArgs:
appendBreatheFileDirective = exhaleArgs["appendBreatheFileDirective"]
if type(appendBreatheFileDirective) is not bool:
raise ValueError("The type of the value for the key 'appendBreatheFileDirective' must be a boolean.")
global EXHALE_GENERATE_BREATHE_FILE_DIRECTIVES
EXHALE_GENERATE_BREATHE_FILE_DIRECTIVES = appendBreatheFileDirective
if "customSpecificationFunction" in exhaleArgs:
customSpecificationFunction = exhaleArgs["customSpecificationFunction"]
try:
ret = customSpecificationFunction("class")
except:
raise ValueError("Unable to call your custom specification function with 'class' as input...")
if type(ret) is not str:
raise ValueError("Your custom specification function did not return a string...")
global EXHALE_CUSTOM_SPECIFICATIONS_FUNCTION
EXHALE_CUSTOM_SPECIFICATIONS_FUNCTION = customSpecificationFunction
# input gathered, try creating the breathe root compound
try:
breatheRoot = breathe_parse(doxygenIndexXMLPath)
except Exception as e:
raise RuntimeError("Unable to use Breathe to parse the specified doxygen index.xml: {}".format(e))
if breatheRoot is not None:
# split into multiple try-except blocks to make it a little easier to identify
# where the error comes from
try:
textRoot = ExhaleRoot(breatheRoot, containmentFolder, rootFileName,
rootFileTitle, afterTitleDescription,
afterBodySummary, createTreeView)
except Exception as e:
raise RuntimeError("Exception caught creating the ExhaleRoot object: {}".format(e))
try:
textRoot.parse()
except Exception as e:
raise RuntimeError("Exception caught while parsing: {}".format(e))
try:
textRoot.generateFullAPI()
except Exception as e:
raise RuntimeError("Exception caught while generating: {}".format(e))
else:
raise RuntimeError("Critical error: the returned Breathe root is 'None'.")
########################################################################################
#
##
###
####
##### Utility / helper functions.
####
###
##
#
########################################################################################
def qualifyKind(kind):
'''
Qualifies the breathe ``kind`` and returns an qualifier string describing this
to be used for the text output (e.g. in generated file headings and link names).
The output for a given kind is as follows:
+-------------+------------------+
| Input Kind | Output Qualifier |
+=============+==================+
| "class" | "Class" |
+-------------+------------------+
| "define" | "Define" |
+-------------+------------------+
| "enum" | "Enum" |
+-------------+------------------+
| "enumvalue" | "Enumvalue" |
+-------------+------------------+
| "file" | "File" |
+-------------+------------------+
| "function" | "Function" |
+-------------+------------------+
| "group" | "Group" |
+-------------+------------------+
| "namespace" | "Namespace" |
+-------------+------------------+
| "struct" | "Struct" |
+-------------+------------------+
| "typedef" | "Typedef" |
+-------------+------------------+
| "union" | "Union" |
+-------------+------------------+
| "variable" | "Variable" |
+-------------+------------------+
The following breathe kinds are ignored:
- "autodoxygenfile"
- "doxygenindex"
- "autodoxygenindex"
Note also that although a return value is generated, neither "enumvalue" nor
"group" are actually used.
:Parameters:
``kind`` (str)
The return value of a Breathe ``compound`` object's ``get_kind()`` method.
:Return (str):
The qualifying string that will be used to build the reStructuredText titles and
other qualifying names. If the empty string is returned then it was not
recognized.
'''
if kind == "class":
qualifier = "Class"
elif kind == "struct":
qualifier = "Struct"
elif kind == "function":
qualifier = "Function"
elif kind == "enum":
qualifier = "Enum"
elif kind == "enumvalue":# unused
qualifier = "Enumvalue"
elif kind == "namespace":
qualifier = "Namespace"
elif kind == "define":
qualifier = "Define"
elif kind == "typedef":
qualifier = "Typedef"
elif kind == "variable":
qualifier = "Variable"
elif kind == "file":
qualifier = "File"
elif kind == "dir":
qualifier = "Directory"
elif kind == "group":
qualifier = "Group"
elif kind == "union":
qualifier = "Union"
else:
qualifier = ""
return qualifier
def kindAsBreatheDirective(kind):
'''
Returns the appropriate breathe restructured text directive for the specified kind.
The output for a given kind is as follows:
+-------------+--------------------+
| Input Kind | Output Directive |
+=============+====================+
| "class" | "doxygenclass" |
+-------------+--------------------+
| "define" | "doxygendefine" |
+-------------+--------------------+
| "enum" | "doxygenenum" |
+-------------+--------------------+
| "enumvalue" | "doxygenenumvalue" |
+-------------+--------------------+
| "file" | "doxygenfile" |
+-------------+--------------------+
| "function" | "doxygenfunction" |
+-------------+--------------------+
| "group" | "doxygengroup" |
+-------------+--------------------+
| "namespace" | "doxygennamespace" |
+-------------+--------------------+
| "struct" | "doxygenstruct" |
+-------------+--------------------+
| "typedef" | "doxygentypedef" |
+-------------+--------------------+
| "union" | "doxygenunion" |
+-------------+--------------------+
| "variable" | "doxygenvariable" |
+-------------+--------------------+
The following breathe kinds are ignored:
- "autodoxygenfile"
- "doxygenindex"
- "autodoxygenindex"
Note also that although a return value is generated, neither "enumvalue" nor
"group" are actually used.
:Parameters:
``kind`` (str)
The kind of the breathe compound / ExhaleNode object (same values).
:Return (str):
The directive to be used for the given ``kind``. The empty string is returned
for both unrecognized and ignored input values.
'''
if kind == "class":
directive = "doxygenclass"
elif kind == "struct":
directive = "doxygenstruct"
elif kind == "function":
directive = "doxygenfunction"
elif kind == "enum":
directive = "doxygenenum"
elif kind == "enumvalue":# unused
directive = "doxygenenumvalue"
elif kind == "namespace":
directive = "doxygennamespace"
elif kind == "define":
directive = "doxygendefine"
elif kind == "typedef":
directive = "doxygentypedef"
elif kind == "variable":
directive = "doxygenvariable"
elif kind == "file":
directive = "doxygenfile"
elif kind == "union":
directive = "doxygenunion"
elif kind == "group":# unused
directive = "doxygengroup"
else:
directive = ""
return directive
def specificationsForKind(kind):
'''
Returns the relevant modifiers for the restructured text directive associated with
the input kind. The only considered values for the default implementation are
``class`` and ``struct``, for which the return value is exactly::
" :members:\\n :protected-members:\\n :undoc-members:\\n"
Formatting of the return is fundamentally important, it must include both the prior
indentation as well as newlines separating any relevant directive modifiers. The
way the framework uses this function is very specific; if you do not follow the
conventions then sphinx will explode.
Consider a ``struct thing`` being documented. The file generated for this will be::
.. _struct_thing:
Struct thing
================================================================================
.. doxygenstruct:: thing
:members:
:protected-members:
:undoc-members:
Assuming the first two lines will be in a variable called ``link_declaration``, and
the next three lines are stored in ``header``, the following is performed::
directive = ".. {}:: {}\\n".format(kindAsBreatheDirective(node.kind), node.name)
specifications = "{}\\n\\n".format(specificationsForKind(node.kind))
gen_file.write("{}{}{}{}".format(link_declaration, header, directive, specifications))
That is, **no preceding newline** should be returned from your custom function, and
**no trailing newline** is needed. Your indentation for each specifier should be
**exactly three spaces**, and if you want more than one you need a newline in between
every specification you want to include. Whitespace control is handled internally
because many of the directives do not need anything added. For a full listing of
what your specifier options are, refer to the breathe documentation:
http://breathe.readthedocs.io/en/latest/directives.html
:Parameters:
``kind`` (str)
The kind of the node we are generating the directive specifications for.
:Return (str):
The correctly formatted specifier(s) for the given ``kind``. If no specifier(s)
are necessary or desired, the empty string is returned.
'''
# use the custom directives function
if EXHALE_CUSTOM_SPECIFICATIONS_FUNCTION is not None:
return EXHALE_CUSTOM_SPECIFICATIONS_FUNCTION(kind)
# otherwise, just provide class and struct
if kind == "class" or kind == "struct":
directive = " :members:\n :protected-members:\n :undoc-members:"
else:
directive = ""
return directive
def exclaimError(msg, ansi_fmt="34;1m"):
'''
Prints ``msg`` to the console in color with ``(!)`` prepended in color.
Example (uncolorized) output of ``exclaimError("No leading space needed.")``::
(!) No leading space needed.
All messages are written to ``sys.stderr``, and are closed with ``[0m``. The
default color is blue, but can be changed using ``ansi_fmt``.
Documentation building has a verbose output process, this just helps distinguish an
error message coming from exhale.
:Parameters:
``msg`` (str)
The message you want printed to standard error.
``ansi_fmt`` (str)
An ansi color format. ``msg`` is printed as
``"\\033[" + ansi_fmt + msg + "\\033[0m\\n``, so you should specify both the
color code and the format code (after the semicolon). The default value is
``34;1m`` --- refer to
http://misc.flogisoft.com/bash/tip_colors_and_formatting for alternatives.
'''
sys.stderr.write("\033[{}(!) {}\033[0m\n".format(ansi_fmt, msg))
########################################################################################
#
##
###
####
##### Graph representation.
####
###
##
#
########################################################################################
class ExhaleNode:
'''
A wrapper class to track parental relationships, filenames, etc.
:Parameters:
``breatheCompound`` (breathe.compound)
The Breathe compound object we will use to gather the name, chilren, etc.
:Attributes:
``compound`` (breathe.compound)
The compound discovered from breathe that we are going to track.
``kind`` (str)
The string returned by the ``breatheCompound.get_kind()`` method. Used to
qualify this node throughout the framework, as well as for hierarchical
sorting.
``name`` (str)
The string returned by the ``breatheCompound.get_name()`` method. This name
will be fully qualified --- ``class A`` inside of ``namespace n`` will have
a ``name`` of ``n::A``. Files and directories may have ``/`` characters as
well.
``refid`` (str)
The reference ID as created by Doxygen. This will be used to scrape files
and see if a given reference identification number should be associated with
that file or not.
``children`` (list)
A potentially empty list of ``ExhaleNode`` object references that are
considered a child of this Node. Please note that a child reference in any
``children`` list may be stored in **many** other lists. Mutating a given
child will mutate the object, and therefore affect other parents of this
child. Lastly, a node of kind ``enum`` will never have its ``enumvalue``
children as it is impossible to rebuild that relationship without more
Doxygen xml parsing.
``parent`` (ExhaleNode)
If an ExhaleNode is determined to be a child of another ExhaleNode, this
node will be added to its parent's ``children`` list, and a reference to
the parent will be in this field. Initialized to ``None``, make sure you
check that it is an object first.
.. warning::
Do not ever set the ``parent`` of a given node if the would-be parent's
kind is ``"file"``. Doing so will break many important relationships,
such as nested class definitions. Effectively, **every** node will be
added as a child to a file node at some point. The file node will track
this, but the child should not.
The following three member variables are stored internally, but managed
externally by the :class:`exhale.ExhaleRoot` class:
``file_name`` (str)
The name of the file to create. Set to ``None`` on creation, refer to
:func:`exhale.ExhaleRoot.initializeNodeFilenameAndLink`.
``link_name`` (str)
The name of the reStructuredText link that will be at the top of the file.
Set to ``None`` on creation, refer to
:func:`exhale.ExhaleRoot.initializeNodeFilenameAndLink`.
``title`` (str)
The title that will appear at the top of the reStructuredText file
``file_name``. When the reStructuredText document for this node is being
written, the root object will set this field.
The following two fields are used for tracking what has or has not already been
included in the hierarchy views. Things like classes or structs in the global
namespace will not be found by :func:`exhale.ExhaleNode.inClassView`, and the
ExhaleRoot object will need to track which ones were missed.
``in_class_view`` (bool)
Whether or not this node has already been incorporated in the class view.
``in_file_view`` (bool)
Whether or not this node has already been incorporated in the file view.
This class wields duck typing. If ``self.kind == "file"``, then the additional
member variables below exist:
``namespaces_used`` (list)
A list of namespace nodes that are either defined or used in this file.
``includes`` (list)
A list of strings that are parsed from the Doxygen xml for this file as
include directives.
``included_by`` (list)
A list of (refid, name) string tuples that are parsed from the Doxygen xml
for this file presenting all of the other files that include this file.
They are stored this way so that the root class can later link to that file
by its refid.
``location`` (str)
A string parsed from the Doxygen xml for this file stating where this file
is physically in relation to the *Doxygen* root.
``program_listing`` (list)
A list of strings that is the Doxygen xml <programlisting>, without the
opening or closing <programlisting> tags.
``program_file`` (list)
Managed externally by the root similar to ``file_name`` etc, this is the
name of the file that will be created to display the program listing if it
exists. Set to ``None`` on creation, refer to
:func:`exhale.ExhaleRoot.initializeNodeFilenameAndLink`.
``program_link_name`` (str)
Managed externally by the root similar to ``file_name`` etc, this is the
reStructuredText link that will be declared at the top of the
``program_file``. Set to ``None`` on creation, refer to
:func:`exhale.ExhaleRoot.initializeNodeFilenameAndLink`.
'''
def __init__(self, breatheCompound):
self.compound = breatheCompound
self.kind = breatheCompound.get_kind()
self.name = breatheCompound.get_name()
self.refid = breatheCompound.get_refid()
self.children = [] # ExhaleNodes
self.parent = None # if reparented, will be an ExhaleNode
# managed externally
self.file_name = None
self.link_name = None
self.title = None
# representation of hierarchies
self.in_class_view = False
self.in_directory_view = False
# kind-specific additional information
if self.kind == "file":
self.namespaces_used = [] # ExhaleNodes
self.includes = [] # strings
self.included_by = [] # (refid, name) tuples
self.location = ""
self.program_listing = [] # strings
self.program_file = ""
self.program_link_name = ""
def __lt__(self, other):
'''
The ``ExhaleRoot`` class stores a bunch of lists of ``ExhaleNode`` objects.
When these lists are sorted, this method will be called to perform the sorting.
:Parameters:
``other`` (ExhaleNode)
The node we are comparing whether ``self`` is less than or not.
:Return (bool):
True if ``self`` is less than ``other``, False otherwise.
'''
# allows alphabetical sorting within types
if self.kind == other.kind:
return self.name.lower() < other.name.lower()
# treat structs and classes as the same type
elif self.kind == "struct" or self.kind == "class":
if other.kind != "struct" and other.kind != "class":
return True
else:
if self.kind == "struct" and other.kind == "class":
return True
elif self.kind == "class" and other.kind == "struct":
return False
else:
return self.name < other.name
# otherwise, sort based off the kind
else:
return self.kind < other.kind
def findNestedNamespaces(self, lst):
'''
Recursive helper function for finding nested namespaces. If this node is a
namespace node, it is appended to ``lst``. Each node also calls each of its
child ``findNestedNamespaces`` with the same list.
:Parameters:
``lst`` (list)
The list each namespace node is to be appended to.
'''
if self.kind == "namespace":
lst.append(self)
for c in self.children:
c.findNestedNamespaces(lst)
def findNestedDirectories(self, lst):
'''
Recursive helper function for finding nested directories. If this node is a
directory node, it is appended to ``lst``. Each node also calls each of its
child ``findNestedDirectories`` with the same list.
:Parameters:
``lst`` (list)
The list each directory node is to be appended to.
'''
if self.kind == "dir":
lst.append(self)
for c in self.children:
c.findNestedDirectories(lst)
def findNestedClassLike(self, lst):
'''
Recursive helper function for finding nested classes and structs. If this node
is a class or struct, it is appended to ``lst``. Each node also calls each of
its child ``findNestedClassLike`` with the same list.
:Parameters:
``lst`` (list)
The list each class or struct node is to be appended to.
'''
if self.kind == "class" or self.kind == "struct":
lst.append(self)
for c in self.children:
c.findNestedClassLike(lst)
def findNestedEnums(self, lst):
'''
Recursive helper function for finding nested enums. If this node is a class or
struct it may have had an enum added to its child list. When this occurred, the
enum was removed from ``self.enums`` in the :class:`exhale.ExhaleRoot` class and
needs to be rediscovered by calling this method on all of its children. If this
node is an enum, it is because a parent class or struct called this method, in
which case it is added to ``lst``.
**Note**: this is used slightly differently than nested directories, namespaces,
and classes will be. Refer to :func:`exhale.ExhaleRoot.generateNodeDocuments`
function for details.
:Parameters:
``lst`` (list)
The list each enum is to be appended to.
'''
if self.kind == "enum":
lst.append(self)
for c in self.children:
c.findNestedEnums(lst)
def findNestedUnions(self, lst):
'''
Recursive helper function for finding nested unions. If this node is a class or
struct it may have had a union added to its child list. When this occurred, the
union was removed from ``self.unions`` in the :class:`exhale.ExhaleRoot` class
and needs to be rediscovered by calling this method on all of its children. If
this node is a union, it is because a parent class or struct called this method,
in which case it is added to ``lst``.
**Note**: this is used slightly differently than nested directories, namespaces,
and classes will be. Refer to :func:`exhale.ExhaleRoot.generateNodeDocuments`
function for details.
:Parameters:
``lst`` (list)
The list each union is to be appended to.
'''
if self.kind == "union":
lst.append(self)
for c in self.children:
c.findNestedUnions(lst)
def toConsole(self, level, printChildren=True):
'''
Debugging tool for printing hierarchies / ownership to the console. Recursively
calls children ``toConsole`` if this node is not a directory or a file, and
``printChildren == True``.
:Parameters:
``level`` (int)
The indentation level to be used, should be greater than or equal to 0.
``printChildren`` (bool)
Whether or not the ``toConsole`` method for the children found in
``self.children`` should be called with ``level+1``. Default is True,
set to False for directories and files.
'''
indent = " " * level
print("{}- [{}]: {}".format(indent, self.kind, self.name))
# files are children of directories, the file section will print those children
if self.kind == "dir":
for c in self.children:
c.toConsole(level + 1, printChildren=False)
elif printChildren:
if self.kind == "file":
print("{}[[[ location=\"{}\" ]]]".format(" " * (level + 1), self.location))
for i in self.includes:
print("{}- #include <{}>".format(" " * (level + 1), i))
for ref, name in self.included_by:
print("{}- included by: [{}]".format(" " * (level + 1), name))
for n in self.namespaces_used:
n.toConsole(level + 1, printChildren=False)
for c in self.children:
c.toConsole(level + 1)
elif self.kind == "class" or self.kind == "struct":
relevant_children = []
for c in self.children:
if c.kind == "class" or c.kind == "struct" or \
c.kind == "enum" or c.kind == "union":
relevant_children.append(c)
for rc in sorted(relevant_children):
rc.toConsole(level + 1)
elif self.kind != "union":
for c in self.children:
c.toConsole(level + 1)
def typeSort(self):
'''
Sorts ``self.children`` in place, and has each child sort its own children.
Refer to :func:`exhale.ExhaleRoot.deepSortList` for more information on when
this is necessary.
'''
self.children.sort()
for c in self.children:
c.typeSort()
def inClassView(self):
'''
Whether or not this node should be included in the class view hierarchy. Helper
method for :func:`exhale.ExhaleNode.toClassView`. Sets the member variable
``self.in_class_view`` to True if appropriate.
:Return (bool):
True if this node should be included in the class view --- either it is a
node of kind ``struct``, ``class``, ``enum``, ``union``, or it is a
``namespace`` that one or more if its descendants was one of the previous
four kinds. Returns False otherwise.
'''
if self.kind == "namespace":
for c in self.children:
if c.inClassView():
return True
return False
else:
# flag that this node is already in the class view so we can find the
# missing top level nodes at the end
self.in_class_view = True
return self.kind == "struct" or self.kind == "class" or \
self.kind == "enum" or self.kind == "union"
def toClassView(self, level, stream, treeView, lastChild=False):
'''
Recursively generates the class view hierarchy using this node and its children,
if it is determined by :func:`exhale.ExhaleNode.inClassView` that this node
should be included.
:Parameters:
``level`` (int)
An integer greater than or equal to 0 representing the indentation level
for this node.
``stream`` (StringIO)
The stream that is being written to by all of the nodes (created and
destroyed by the ExhaleRoot object).
``treeView`` (bool)
If False, standard reStructuredText bulleted lists will be written to
the ``stream``. If True, then raw html unordered lists will be written
to the ``stream``.
``lastChild`` (bool)
When ``treeView == True``, the unordered lists generated need to have
an <li class="lastChild"> tag on the last child for the
``collapsibleList`` to work correctly. The default value of this
parameter is False, and should only ever be set to True internally by
recursive calls to this method.
'''
if self.inClassView():
if not treeView:
stream.write("{}- :ref:`{}`\n".format(' ' * level, self.link_name))
else:
indent = ' ' * (level * 2)
if lastChild:
opening_li = '<li class="lastChild">'
else:
opening_li = '<li>'
# turn double underscores into underscores, then underscores into hyphens
html_link = self.link_name.replace("__", "_").replace("_", "-")
# should always have two parts
title_as_link_parts = self.title.split(" ")
qualifier = title_as_link_parts[0]
link_title = title_as_link_parts[1]
html_link = '{} <a href="{}.html#{}">{}</a>'.format(qualifier,
self.file_name.split('.rst')[0],
html_link,
link_title)
has_nested_children = False
if self.kind == "class" or self.kind == "struct":
nested_enums = []
nested_unions = []
nested_class_like = []
# important: only scan self.children, do not use recursive findNested* methods
for c in self.children:
if c.kind == "enum":
nested_enums.append(c)
elif c.kind == "union":
nested_unions.append(c)
elif c.kind == "struct" or c.kind == "class":
nested_class_like.append(c)
has_nested_children = nested_enums or nested_unions or nested_class_like # <3 Python
# if there are sub children, there needs to be a new html list generated
if self.kind == "namespace" or has_nested_children:
next_indent = ' {}'.format(indent)
stream.write('{}{}\n{}{}\n{}<ul>\n'.format(indent, opening_li,
next_indent, html_link,
next_indent))
else:
stream.write('{}{}{}</li>\n'.format(indent, opening_li, html_link))
# include the relevant children (class like or nested namespaces / classes)
if self.kind == "namespace":
# pre-process and find everything that is relevant
kids = []
nspaces = []
for c in self.children:
if c.inClassView():
if c.kind == "namespace":
nspaces.append(c)
else:
kids.append(c)
# always put nested namespaces last; parent dictates to the child if
# they are the last child being printed
kids.sort()
num_kids = len(kids)
nspaces.sort()
num_nspaces = len(nspaces)
last_child_index = num_kids + num_nspaces - 1
child_idx = 0
# first all of the child class like, then any nested namespaces
for node in itertools.chain(kids, nspaces):
node.toClassView(level + 1, stream, treeView, child_idx == last_child_index)
child_idx += 1
# now that all of the children haven been written, close the tags
if treeView:
stream.write(" {}</ul>\n{}</li>\n".format(indent, indent))
# current node is a class or struct with nested children
elif has_nested_children:
nested_class_like.sort()
num_class_like = len(nested_class_like)
nested_enums.sort()
num_enums = len(nested_enums)
nested_unions.sort()
num_unions = len(nested_unions)
last_child_index = num_class_like + num_enums + num_unions - 1
child_idx = 0
# first all of the classes / structs, then enums, then unions
for node in itertools.chain(nested_class_like, nested_enums, nested_unions):
node.toClassView(level + 1, stream, treeView, child_idx == last_child_index)
child_idx += 1
# now that all of the children haven been written, close the tags
if treeView:
stream.write(" {}</ul>\n{}</li>\n".format(indent, indent))
def inDirectoryView(self):
'''
Whether or not this node should be included in the file view hierarchy. Helper
method for :func:`exhale.ExhaleNode.toDirectoryView`. Sets the member variable
``self.in_directory_view`` to True if appropriate.
:Return (bool):
True if this node should be included in the file view --- either it is a
node of kind ``file``, or it is a ``dir`` that one or more if its
descendants was a ``file``. Returns False otherwise.
'''
if self.kind == "file":
# flag that this file is already in the directory view so that potential
# missing files can be found later.
self.in_directory_view = True
return True
elif self.kind == "dir":
for c in self.children:
if c.inDirectoryView():
return True
return False
def toDirectoryView(self, level, stream, treeView, lastChild=False):
'''
Recursively generates the file view hierarchy using this node and its children,
if it is determined by :func:`exhale.ExhaleNode.inDirectoryView` that this node
should be included.
:Parameters:
``level`` (int)
An integer greater than or equal to 0 representing the indentation level
for this node.
``stream`` (StringIO)
The stream that is being written to by all of the nodes (created and
destroyed by the ExhaleRoot object).
``treeView`` (bool)
If False, standard reStructuredText bulleted lists will be written to
the ``stream``. If True, then raw html unordered lists will be written
to the ``stream``.
``lastChild`` (bool)
When ``treeView == True``, the unordered lists generated need to have
an <li class="lastChild"> tag on the last child for the
``collapsibleList`` to work correctly. The default value of this
parameter is False, and should only ever be set to True internally by
recursive calls to this method.
'''
if self.inDirectoryView():
if not treeView:
stream.write("{}- :ref:`{}`\n".format(' ' * level, self.link_name))
else:
indent = ' ' * (level * 2)
if lastChild:
opening_li = '<li class="lastChild">'
else:
opening_li = '<li>'
# turn double underscores into underscores, then underscores into hyphens
html_link = self.link_name.replace("__", "_").replace("_", "-")
# should always have two parts
title_as_link_parts = self.title.split(" ")
qualifier = title_as_link_parts[0]
link_title = title_as_link_parts[1]
html_link = '{} <a href="{}.html#{}">{}</a>'.format(qualifier,
self.file_name.split('.rst')[0],
html_link,
link_title)
if self.kind == "dir":
next_indent = ' {}'.format(indent)
stream.write('{}{}\n{}{}\n{}<ul>\n'.format(indent, opening_li,
next_indent, html_link,
next_indent))
else:
stream.write('{}{}{}</li>\n'.format(indent, opening_li, html_link))
# include the relevant children (class like or nested namespaces)
if self.kind == "dir":
# pre-process and find everything that is relevant
kids = []
dirs = []
for c in self.children:
if c.inDirectoryView():
if c.kind == "dir":
dirs.append(c)
elif c.kind == "file":
kids.append(c)
# always put nested namespaces last; parent dictates to the child if
# they are the last child being printed
kids.sort()
num_kids = len(kids)
dirs.sort()
num_dirs = len(dirs)
last_child_index = num_kids + num_dirs - 1
child_idx = 0
for k in kids:
k.toDirectoryView(level + 1, stream, treeView, child_idx == last_child_index)
child_idx += 1
for n in dirs:
n.toDirectoryView(level + 1, stream, treeView, child_idx == last_child_index)
child_idx += 1
# now that all of the children haven been written, close the tags
if treeView:
stream.write(" {}</ul>\n{}</li>\n".format(indent, indent))
class ExhaleRoot:
'''
The full representation of the hierarchy graphs. In addition to containing specific
lists of ExhaleNodes of interest, the ExhaleRoot class is responsible for comparing
the parsed breathe hierarchy and rebuilding lost relationships using the Doxygen
xml files. Once the graph parsing has finished, the ExhaleRoot generates all of the
relevant reStructuredText documents and links them together.
The ExhaleRoot class is not designed for reuse at this time. If you want to
generate a new hierarchy with a different directory or something, changing all of
the right fields may be difficult and / or unsuccessful. Refer to the bottom of the
source code for :func:`exhale.generate` for safe usage (just exception handling),
but the design of this class is to be used as follows:
.. code-block:: py
textRoot = ExhaleRoot(... args ...)
textRoot.parse()
textRoot.generateFullAPI()
Zero checks are in place to enforce this usage, and if you are modifying the
execution of this class and things are not working make sure you follow the ordering
of those methods.
:Parameters:
``breatheRoot`` (instance)
Type unknown, this is the return value of ``breathe.breathe_parse``.
``rootDirectory`` (str)
The name of the root directory to put everything in. This should be the
value of the key ``containmentFolder`` in the dictionary passed to
:func:`exhale.generate`.
``rootFileName`` (str)
The name of the file the root library api will be put into. This should not
contain the ``rootDirectory`` path. This should be the value of the key
``rootFileName`` in the dictionary passed to :func:`exhale.generate`.
``rootFileTitle`` (str)
The title to be written to the top of ``rootFileName``. This should be the
value of the key ``rootFileTitle`` in the dictionary passed to
:func:`exhale.generate`.
``rootFileDescription`` (str)
The description of the library api file placed after ``rootFileTitle``.
This should be the value of the key ``afterTitleDescription`` in the
dictionary passed to :func:`exhale.generate`.
``rootFileSummary`` (str)
The summary of the library api placed after the generated hierarchy views.
This should be the value of the key ``afterBodySummary`` in the dictionary
passed to :func:`exhale.generate`.
``createTreeView`` (bool)
Creates the raw html unordered lists for use with ``collapsibleList`` if
True. Otherwise, creates standard reStructuredText bulleted lists. Should
be the value of the key ``createTreeView`` in the dictionary passed to
:func:`exhale.generate`.
:Attributes:
``breathe_root`` (instance)
The value of the parameter ``breatheRoot``.
``root_directory`` (str)
The value of the parameter ``rootDirectory``.
``root_file_name`` (str)
The value of the parameter ``rootFileName``.
``full_root_file_path`` (str)
The full file path of the root file (``"root_directory/root_file_name"``).
``root_file_title`` (str)
The value of the parameter ``rootFileTitle``.
``root_file_description`` (str)
The value of the parameter ``rootFileDescription``.
``root_file_summary`` (str)
The value of the parameter ``rootFileSummary``.
``class_view_file`` (str)
The full file path the class view hierarchy will be written to. This is
incorporated into ``root_file_name`` using an ``.. include:`` directive.
``directory_view_file`` (str)
The full file path the file view hierarchy will be written to. This is
incorporated into ``root_file_name`` using an ``.. include:`` directive.
``unabridged_api_file`` (str)
The full file path the full API will be written to. This is incorporated
into ``root_file_name`` using a ``.. toctree:`` directive with a
``:maxdepth:`` according to the value of the key ``fullToctreeMaxDepth``
in the dictionary passed into :func:`exhale.generate`.
``use_tree_view`` (bool)
The value of the parameter ``createTreeView``.
``all_compounds`` (list)
A list of all the Breathe compound objects discovered along the way.
Populated during :func:`exhale.ExhaleRoot.discoverAllNodes`.
``all_nodes`` (list)
A list of all of the ExhaleNode objects created. Populated during
:func:`exhale.ExhaleRoot.discoverAllNodes`.
``node_by_refid`` (dict)
A dictionary with string ExhaleNode ``refid`` values, and values that are the
ExhaleNode it came from. Storing it this way is convenient for when the
Doxygen xml file is being parsed.
``class_like`` (list)
The full list of ExhaleNodes of kind ``struct`` or ``class``
``defines`` (list)
The full list of ExhaleNodes of kind ``define``.
``enums`` (list)
The full list of ExhaleNodes of kind ``enum``.
``enum_values`` (list)
The full list of ExhaleNodes of kind ``enumvalue``. Populated, not used.
``functions`` (list)
The full list of ExhaleNodes of kind ``function``.
``dirs`` (list)
The full list of ExhaleNodes of kind ``dir``.
``files`` (list)
The full list of ExhaleNodes of kind ``file``.
``groups`` (list)
The full list of ExhaleNodes of kind ``group``. Pupulated, not used.
``namespaces`` (list)
The full list of ExhaleNodes of kind ``namespace``.
``typedefs`` (list)
The full list of ExhaleNodes of kind ``typedef``.
``unions`` (list)
The full list of ExhaleNodes of kind ``union``.
``variables`` (list)
The full list of ExhaleNodes of kind ``variable``.
'''
def __init__(self, breatheRoot, rootDirectory, rootFileName, rootFileTitle,
rootFileDescription, rootFileSummary, createTreeView):
# the Breathe root object (main entry point to Breathe graph)
self.breathe_root = breatheRoot
# file generation location and root index data
self.root_directory = rootDirectory
self.root_file_name = rootFileName
self.full_root_file_path = "{}/{}".format(self.root_directory, self.root_file_name)
self.root_file_title = rootFileTitle
self.root_file_description = rootFileDescription
self.root_file_summary = rootFileSummary
self.class_view_file = "{}.rst".format(
self.full_root_file_path.replace(self.root_file_name, "class_view_hierarchy")
)
self.directory_view_file = "{}.rst".format(
self.full_root_file_path.replace(self.root_file_name, "directory_view_hierarchy")
)
self.unabridged_api_file = "{}.rst".format(
self.full_root_file_path.replace(self.root_file_name, "unabridged_api")
)
# whether or not we should generate the raw html tree view
self.use_tree_view = createTreeView
# track all compounds (from Breathe) to build all nodes (ExhaleNodes)
self.all_compounds = [self.breathe_root.get_compound()]
self.all_nodes = []
# convenience lookup: keys are string Doxygen refid's, values are ExhaleNodes
self.node_by_refid = {}
# breathe directive breathe kind
#--------------------+----------------+
# autodoxygenfile <-+-> IGNORE |
# doxygenindex <-+-> IGNORE |
# autodoxygenindex <-+-> IGNORE |
#--------------------+----------------+
# doxygenclass <-+-> "class" |
# doxygenstruct <-+-> "struct" |
self.class_like = [] # |
# doxygendefine <-+-> "define" |
self.defines = [] # |
# doxygenenum <-+-> "enum" |
self.enums = [] # |
# ---> largely ignored by framework, |
# but stored if desired |
# doxygenenumvalue <-+-> "enumvalue" |
self.enum_values = [] # |
# doxygenfunction <-+-> "function" |
self.functions = [] # |
# no directive <-+-> "dir" |
self.dirs = [] # |
# doxygenfile <-+-> "file" |
self.files = [] # |
# not used, but could be supported in |
# the future? |
# doxygengroup <-+-> "group" |
self.groups = [] # |
# doxygennamespace <-+-> "namespace" |
self.namespaces = [] # |
# doxygentypedef <-+-> "typedef" |
self.typedefs = [] # |
# doxygenunion <-+-> "union" |
self.unions = [] # |
# doxygenvariable <-+-> "variable" |
self.variables = [] # |
#-------------------------------------+
####################################################################################
#
##
### Parsing
##
#
####################################################################################
def parse(self):
'''
The first method that should be called after creating an ExhaleRoot object. The
Breathe graph is parsed first, followed by the Doxygen xml documents. By the
end of this method, all of the ``self.<breathe_kind>``, ``self.all_compounds``,
and ``self.all_nodes`` lists as well as the ``self.node_by_refid`` dictionary
will be populated. Lastly, this method sorts all of the internal lists. The
order of execution is exactly
1. :func:`exhale.ExhaleRoot.discoverAllNodes`
2. :func:`exhale.ExhaleRoot.reparentAll`
3. Populate ``self.node_by_refid`` using ``self.all_nodes``.
4. :func:`exhale.ExhaleRoot.fileRefDiscovery`
5. :func:`exhale.ExhaleRoot.filePostProcess`
6. :func:`exhale.ExhaleRoot.sortInternals`
'''
# Find and reparent everything from the Breathe graph.
self.discoverAllNodes()
self.reparentAll()
# now that we have all of the nodes, store them in a convenient manner for refid
# lookup when parsing the Doxygen xml files
for n in self.all_nodes:
self.node_by_refid[n.refid] = n
# find missing relationships using the Doxygen xml files
self.fileRefDiscovery()
self.filePostProcess()
# sort all of the lists we just built
self.sortInternals()
def discoverAllNodes(self):
'''
Stack based traversal of breathe graph, creates some parental relationships
between different ExhaleNode objects. Upon termination, this method will have
populated the lists ``self.all_compounds``, ``self.all_nodes``, and the
``self.<breathe_kind>`` lists for different types of objects.
'''
# When you call the breathe_root.get_compound() method, it returns a list of the
# top level source nodes. These start out on the stack, and we add their
# children if they have not already been visited before.
nodes_remaining = [ExhaleNode(compound) for compound in self.breathe_root.get_compound()]
while len(nodes_remaining) > 0:
curr_node = nodes_remaining.pop()
self.trackNodeIfUnseen(curr_node)
self.discoverNeigbors(nodes_remaining, curr_node)
def trackNodeIfUnseen(self, node):
'''
Helper method for :func:`exhale.ExhaleRoot.discoverAllNodes`. If the node is
not in self.all_nodes yet, add it to both self.all_nodes as well as the
corresponding ``self.<breathe_kind>`` list.
:Parameters:
``node`` (ExhaleNode)
The node to begin tracking if not already present.
'''
if node not in self.all_nodes:
self.all_nodes.append(node)
if node.kind == "class" or node.kind == "struct":
self.class_like.append(node)
elif node.kind == "namespace":
self.namespaces.append(node)
elif node.kind == "enum":
self.enums.append(node)
elif node.kind == "enumvalue":
self.enum_values.append(node)
elif node.kind == "define":
self.defines.append(node)
elif node.kind == "file":
self.files.append(node)
elif node.kind == "dir":
self.dirs.append(node)
elif node.kind == "function":
self.functions.append(node)
elif node.kind == "variable":
self.variables.append(node)
elif node.kind == "group":
self.groups.append(node)
elif node.kind == "typedef":
self.typedefs.append(node)
elif node.kind == "union":
self.unions.append(node)
def discoverNeigbors(self, nodesRemaining, node):
'''
Helper method for :func:`exhale.ExhaleRoot.discoverAllNodes`. Some of the
compound objects received from Breathe have a member function ``get_member()``
that returns all of the children. Some do not. This method checks to see if
the method is present first, and if so performs the following::
For every compound in node.compound.get_member():
If compound not present in self.all_compounds:
- Add compound to self.all_compounds
- Create a child ExhaleNode
- If it is not a class, struct, or union, add to nodesRemaining
- If it is not an enumvalue, make it a child of node parameter
:Parameters:
``nodesRemaining`` (list)
The list of nodes representing the stack traversal being done by
:func:`exhale.ExhaleRoot.discoverAllNodes`. New neighbors found will
be appended to this list.
``node`` (ExhaleNode)
The node we are trying to discover potential new neighbors from.
'''
# discover neighbors of current node; some seem to not have get_member()
if "member" in node.compound.__dict__:
for member in node.compound.get_member():
# keep track of every breathe compound we have seen
if member not in self.all_compounds:
self.all_compounds.append(member)
# if we haven't seen this compound yet, make a node
child_node = ExhaleNode(member)
# if the current node is a class, struct, union, or enum ignore
# its variables, functions, etc
if node.kind == "class" or node.kind == "struct" or node.kind == "union":
if child_node.kind == "enum" or child_node.kind == "union":
nodesRemaining.append(child_node)
else:
nodesRemaining.append(child_node)
# the enum is presented separately, enumvals are haphazard and i hate them
# ... determining the enumvalue parent would be painful and i don't want to do it
if child_node.kind != "enumvalue":
node.children.append(child_node)
child_node.parent = node
def reparentAll(self):
'''
Fixes some of the parental relationships lost in parsing the Breathe graph.
File relationships are recovered in :func:`exhale.ExhaleRoot.fileRefDiscovery`.
This method simply calls in this order:
1. :func:`exhale.ExhaleRoot.reparentUnions`
2. :func:`exhale.ExhaleRoot.reparentClassLike`
3. :func:`exhale.ExhaleRoot.reparentDirectories`
4. :func:`exhale.ExhaleRoot.renameToNamespaceScopes`
5. :func:`exhale.ExhaleRoot.reparentNamespaces`
'''
self.reparentUnions()
self.reparentClassLike()
self.reparentDirectories()
self.renameToNamespaceScopes()
self.reparentNamespaces()
def reparentUnions(self):
'''
Helper method for :func:`exhale.ExhaleRoot.reparentAll`. Namespaces and classes
should have the unions defined in them to be in the child list of itself rather
than floating around. Union nodes that are reparented (e.g. a union defined in
a class) will be removed from the list ``self.unions`` since the Breathe
directive for its parent (e.g. the class) will include the documentation for the
union. The consequence of this is that a union defined in a class will **not**
appear in the full api listing of Unions.
'''
# unions declared in a class will not link to the individual union page, so
# we will instead elect to remove these from the list of unions
removals = []
for u in self.unions:
parts = u.name.split("::")
num_parts = len(parts)
if num_parts > 1:
# it can either be a child of a namespace or a class_like
if num_parts > 2:
namespace_name = "::".join(p for p in parts[:-2])
potential_class = parts[-2]
# see if it belongs to a class like object first. if so, remove this
# union from the list of unions
reparented = False
for cl in self.class_like:
if cl.name == potential_class:
cl.children.append(u)
u.parent = cl
reparented = True
break
if reparented:
removals.append(u)
continue
# otherwise, see if it belongs to a namespace
alt_namespace_name = "{}::{}".format(namespace_name, potential_class)
for n in self.namespaces:
if namespace_name == n.name or alt_namespace_name == n.name:
n.children.append(u)
u.parent = n
break
else:
name_or_class_name = "::".join(p for p in parts[:-1])
# see if it belongs to a class like object first. if so, remove this
# union from the list of unions
reparented = False
for cl in self.class_like:
if cl.name == name_or_class_name:
cl.children.append(u)
u.parent = cl
reparented = True
break
if reparented:
removals.append(u)
continue
# next see if it belongs to a namespace
for n in self.namespaces:
if n.name == name_or_class_name:
n.children.append(u)
u.parent = n
break
# remove the unions from self.unions that were declared in class_like objects
for rm in removals:
self.unions.remove(rm)
def reparentClassLike(self):
'''
Helper method for :func:`exhale.ExhaleRoot.reparentAll`. Iterates over the
``self.class_like`` list and adds each object as a child to a namespace if the
class, or struct is a member of that namespace. Many classes / structs will be
reparented to a namespace node, these will remain in ``self.class_like``.
However, if a class or struct is reparented to a different class or struct (it
is a nested class / struct), it *will* be removed from so that the class view
hierarchy is generated correctly.
'''
removals = []
for cl in self.class_like:
parts = cl.name.split("::")
if len(parts) > 1:
# first try and reparent to namespaces
namespace_name = "::".join(parts[:-1])
parent_found = False
for n in self.namespaces:
if n.name == namespace_name:
n.children.append(cl)
cl.parent = n
parent_found = True
break
# if a namespace parent wasn not found, try and reparent to a class
if not parent_found:
# parent class name would be namespace_name
for p_cls in self.class_like:
if p_cls.name == namespace_name:
p_cls.children.append(cl)
cl.parent = p_cls
removals.append(cl)
break
for rm in removals:
if rm in self.class_like:
self.class_like.remove(rm)
def reparentDirectories(self):
'''
Helper method for :func:`exhale.ExhaleRoot.reparentAll`. Adds subdirectories as
children to the relevant directory ExhaleNode. If a node in ``self.dirs`` is
added as a child to a different directory node, it is removed from the
``self.dirs`` list.
'''
dir_parts = []
dir_ranks = []
for d in self.dirs:
parts = d.name.split("/")
for p in parts:
if p not in dir_parts:
dir_parts.append(p)
dir_ranks.append((len(parts), d))
traversal = sorted(dir_ranks)
removals = []
for rank, directory in reversed(traversal):
# rank one means top level directory
if rank < 2:
break
# otherwise, this is nested
for p_rank, p_directory in reversed(traversal):
if p_rank == rank - 1:
if p_directory.name == "/".join(directory.name.split("/")[:-1]):
p_directory.children.append(directory)
directory.parent = p_directory
if directory not in removals:
removals.append(directory)
break
for rm in removals:
self.dirs.remove(rm)
def renameToNamespaceScopes(self):
'''
Helper method for :func:`exhale.ExhaleRoot.reparentAll`. Some compounds in
Breathe such as functions and variables do not have the namespace name they are
declared in before the name of the actual compound. This method prepends the
appropriate (nested) namespace name before the name of any child that does not
already have it.
For example, the variable ``MAX_DEPTH`` declared in namespace ``external`` would
have its ExhaleNode's ``name`` attribute changed from ``MAX_DEPTH`` to
``external::MAX_DEPTH``.
'''
for n in self.namespaces:
namespace_name = "{}::".format(n.name)
for child in n.children:
if namespace_name not in child.name:
child.name = "{}{}".format(namespace_name, child.name)
def reparentNamespaces(self):
'''
Helper method for :func:`exhale.ExhaleRoot.reparentAll`. Adds nested namespaces
as children to the relevant namespace ExhaleNode. If a node in
``self.namespaces`` is added as a child to a different namespace node, it is
removed from the ``self.namespaces`` list. Because these are removed from
``self.namespaces``, it is important that
:func:`exhale.ExhaleRoot.renameToNamespaceScopes` is called before this method.
'''
namespace_parts = []
namespace_ranks = []
for n in self.namespaces:
parts = n.name.split("::")
for p in parts:
if p not in namespace_parts:
namespace_parts.append(p)
namespace_ranks.append((len(parts), n))
traversal = sorted(namespace_ranks)
removals = []
for rank, namespace in reversed(traversal):
# rank one means top level namespace
if rank < 2:
break
# otherwise, this is nested
for p_rank, p_namespace in reversed(traversal):
if p_rank == rank - 1:
if p_namespace.name == "::".join(namespace.name.split("::")[:-1]):
p_namespace.children.append(namespace)
namespace.parent = p_namespace
if namespace not in removals:
removals.append(namespace)
break
for rm in removals:
self.namespaces.remove(rm)
def fileRefDiscovery(self):
'''
Finds the missing components for file nodes by parsing the Doxygen xml (which is
just the ``doxygen_output_dir/node.refid``). Additional items parsed include
adding items whose ``refid`` tag are used in this file, the <programlisting> for
the file, what it includes and what includes it, as well as the location of the
file (with respsect to the *Doxygen* root).
Care must be taken to only include a refid found with specific tags. The
parsing of the xml file was done by just looking at some example outputs. It
seems to be working correctly, but there may be some subtle use cases that break
it.
.. warning::
Some enums, classes, variables, etc declared in the file will not have their
associated refid in the declaration of the file, but will be present in the
<programlisting>. These are added to the files' list of children when they
are found, but this parental relationship cannot be formed if you set
``XML_PROGRAMLISTING = NO`` with Doxygen. An example of such an enum would
be an enum declared inside of a namespace within this file.
'''
if EXHALE_API_DOXY_OUTPUT_DIR == "":
exclaimError("The doxygen xml output directory was not specified!")
return
# parse the doxygen xml file and extract all refid's put in it
# keys: file object, values: list of refid's
doxygen_xml_file_ownerships = {}
# innerclass, innernamespace, etc
ref_regex = re.compile(r'.*<inner.*refid="(\w+)".*')
# what files this file includes
inc_regex = re.compile(r'.*<includes.*>(.+)</includes>')
# what files include this file
inc_by_regex = re.compile(r'.*<includedby refid="(\w+)".*>(.*)</includedby>')
# the actual location of the file
loc_regex = re.compile(r'.*<location file="(.*)"/>')
for f in self.files:
doxygen_xml_file_ownerships[f] = []
try:
doxy_xml_path = "{}{}.xml".format(EXHALE_API_DOXY_OUTPUT_DIR, f.refid)
with open(doxy_xml_path, "r") as doxy_file:
processing_code_listing = False # shows up at bottom of xml
for line in doxy_file:
# see if this line represents the location tag
match = loc_regex.match(line)
if match is not None:
f.location = match.groups()[0]
continue
if not processing_code_listing:
# gather included by references
match = inc_by_regex.match(line)
if match is not None:
ref, name = match.groups()
f.included_by.append((ref, name))
continue
# gather includes lines
match = inc_regex.match(line)
if match is not None:
inc = match.groups()[0]
f.includes.append(inc)
continue
# gather any classes, namespaces, etc declared in the file
match = ref_regex.match(line)
if match is not None:
match_refid = match.groups()[0]
if match_refid in self.node_by_refid:
doxygen_xml_file_ownerships[f].append(match_refid)
continue
# lastly, see if we are starting the code listing
if "<programlisting>" in line:
processing_code_listing = True
elif processing_code_listing:
if "</programlisting>" in line:
processing_code_listing = False
else:
f.program_listing.append(line)
except:
exclaimError("Unable to process doxygen xml for file [{}].\n".format(f.name))
#
# IMPORTANT: do not set the parent field of anything being added as a child to the file
#
# hack to make things work right on RTD
if EXHALE_API_DOXYGEN_STRIP_FROM_PATH is not None:
for f in self.files:
f.location = f.location.replace(EXHALE_API_DOXYGEN_STRIP_FROM_PATH, "")
if f.location[0] == "/":
f.location = f.location[1:]
# now that we have parsed all the listed refid's in the doxygen xml, reparent
# the nodes that we care about
for f in self.files:
for match_refid in doxygen_xml_file_ownerships[f]:
child = self.node_by_refid[match_refid]
if child.kind == "struct" or child.kind == "class" or child.kind == "function" or \
child.kind == "typedef" or child.kind == "define" or child.kind == "enum" or \
child.kind == "union":
already_there = False
for fc in f.children:
if child.name == fc.name:
already_there = True
break
if not already_there:
# special treatment for unions: ignore if it is a class union
if child.kind == "union":
for u in self.unions:
if child.name == u.name:
f.children.append(child)
break
else:
f.children.append(child)
elif child.kind == "namespace":
already_there = False
for fc in f.namespaces_used:
if child.name == fc.name:
already_there = True
break
if not already_there:
f.namespaces_used.append(child)
# last but not least, some different kinds declared in the file that are scoped
# in a namespace they will show up in the programlisting, but not at the toplevel.
for f in self.files:
potential_orphans = []
for n in f.namespaces_used:
for child in n.children:
if child.kind == "enum" or child.kind == "variable" or \
child.kind == "function" or child.kind == "typedef" or \
child.kind == "union":
potential_orphans.append(child)
# now that we have a list of potential orphans, see if this doxygen xml had
# the refid of a given child present.
for orphan in potential_orphans:
unresolved_name = orphan.name.split("::")[-1]
if f.refid in orphan.refid and any(unresolved_name in line for line in f.program_listing):
if orphan not in f.children:
f.children.append(orphan)
def filePostProcess(self):
'''
The real name of this method should be ``reparentFiles``, but to avoid confusion
with what stage this must happen at it is called this instead. After the
:func:`exhale.ExhaleRoot.fileRefDiscovery` method has been called, each file
will have its location parsed. This method reparents files to directories
accordingly, so the file view hierarchy can be complete.
'''
for f in self.files:
dir_loc_parts = f.location.split("/")[:-1]
num_parts = len(dir_loc_parts)
# nothing to do, at the top level
if num_parts == 0:
continue
dir_path = "/".join(p for p in dir_loc_parts)
nodes_remaining = [d for d in self.dirs]
while len(nodes_remaining) > 0:
d = nodes_remaining.pop()
if d.name in dir_path:
# we have found the directory we want
if d.name == dir_path:
d.children.append(f)
f.parent = d
break
# otherwise, try and find an owner
else:
nodes_remaining = []
for child in d.children:
if child.kind == "dir":
nodes_remaining.append(child)
def sortInternals(self):
'''
Sort all internal lists (``class_like``, ``namespaces``, ``variables``, etc)
mostly how doxygen would, alphabetical but also hierarchical (e.g. structs
appear before classes in listings). Some internal lists are just sorted, and
some are deep sorted (:func:`exhale.ExhaleRoot.deepSortList`).
'''
# some of the lists only need to be sorted, some of them need to be sorted and
# have each node sort its children
# leaf-like lists: no child sort
self.defines.sort()
self.enums.sort()
self.enum_values.sort()
self.functions.sort()
self.groups.sort()
self.typedefs.sort()
self.variables.sort()
# hierarchical lists: sort children
self.deepSortList(self.class_like)
self.deepSortList(self.namespaces)
self.deepSortList(self.unions)
self.deepSortList(self.files)
self.deepSortList(self.dirs)
def deepSortList(self, lst):
'''
For hierarchical internal lists such as ``namespaces``, we want to sort both the
list as well as have each child sort its children by calling
:func:`exhale.ExhaleNode.typeSort`.
:Parameters:
``lst`` (list)
The list of ExhaleNode objects to be deep sorted.
'''
lst.sort()
for l in lst:
l.typeSort()
####################################################################################
#
##
### Library generation.
##
#
####################################################################################
def generateFullAPI(self):
'''
Since we are not going to use some of the breathe directives (e.g. namespace or
file), when representing the different views of the generated API we will need:
1. Generate a single file restructured text document for all of the nodes that
have either no children, or children that are leaf nodes.
2. When building the view hierarchies (class view and file view), provide a link
to the appropriate files generated previously.
If adding onto the framework to say add another view (from future import groups)
you would link from a restructured text document to one of the individually
generated files using the value of ``link_name`` for a given ExhaleNode object.
This method calls in this order:
1. :func:`exhale.ExhaleRoot.generateAPIRootHeader`
2. :func:`exhale.ExhaleRoot.generateNodeDocuments`
3. :func:`exhale.ExhaleRoot.generateAPIRootBody`
4. :func:`exhale.ExhaleRoot.generateAPIRootSummary`
'''
self.generateAPIRootHeader()
self.generateNodeDocuments()
self.generateAPIRootBody()
self.generateAPIRootSummary()
def generateAPIRootHeader(self):
'''
This method creates the root library api file that will include all of the
different hierarchy views and full api listing. If ``self.root_directory`` is
not a current directory, it is created first. Afterward, the root API file is
created and its title is written, as well as the value of
``self.root_file_description``.
'''
try:
if not os.path.isdir(self.root_directory):
os.mkdir(self.root_directory)
except Exception as e:
exclaimError("Cannot create the directory: {}\nError message: {}".format(self.root_directory, e))
raise Exception("Fatal error generating the api root, cannot continue.")
try:
with open(self.full_root_file_path, "w") as generated_index:
generated_index.write("{}\n{}\n\n{}\n\n".format(
self.root_file_title, EXHALE_FILE_HEADING, self.root_file_description)
)
except:
exclaimError("Unable to create the root api file / header: {}".format(self.full_root_file_path))
raise Exception("Fatal error generating the api root, cannot continue.")
def generateNodeDocuments(self):
'''
Creates all of the reStructuredText documents related to types parsed by
Doxygen. This includes all leaf-like documents (``class``, ``struct``,
``enum``, ``typedef``, ``union``, ``variable``, and ``define``), as well as
namespace, file, and directory pages.
During the reparenting phase of the parsing process, nested items were added as
a child to their actual parent. For classes, structs, enums, and unions, if
it was reparented to a ``namespace`` it will *remain* in its respective
``self.<breathe_kind>`` list. However, if it was an internally declared child
of a class or struct (nested classes, structs, enums, and unions), this node
will be removed from its ``self.<breathe_kind>`` list to avoid duplication in
the class hierarchy generation.
When generating the full API, though, we will want to include all of these and
therefore must call :func:`exhale.ExhaleRoot.generateSingleNodeRST` with all of
the nested items. For nested classes and structs, this is done by just calling
``node.findNestedClassLike`` for every node in ``self.class_like``. The
resulting list then has all of ``self.class_like``, as well as any nested
classes and structs found. With ``enum`` and ``union``, these would have been
reparented to a **class** or **struct** if it was removed from the relevant
``self.<breathe_kind>`` list. Meaning we must make sure that we genererate the
single node RST documents for everything by finding the nested enums and unions
from ``self.class_like``, as well as everything in ``self.enums`` and
``self.unions``.
'''
# initialize all of the nodes
for node in self.all_nodes:
self.initializeNodeFilenameAndLink(node)
# find the potentially nested items that were reparented
nested_enums = []
nested_unions = []
nested_class_like = []
for cl in self.class_like:
cl.findNestedEnums(nested_enums)
cl.findNestedUnions(nested_unions)
cl.findNestedClassLike(nested_class_like)
# generate all of the leaf-like documents
for node in itertools.chain(nested_class_like, self.enums, nested_enums,
self.unions, nested_unions, self.functions,
self.typedefs, self.variables, self.defines):
self.generateSingleNodeRST(node)
# generate the remaining parent-like documents
self.generateNamespaceNodeDocuments()
self.generateFileNodeDocuments()
self.generateDirectoryNodeDocuments()
def initializeNodeFilenameAndLink(self, node):
'''
Sets the ``file_name`` and ``link_name`` for the specified node. If the kind
of this node is "file", then this method will also set the ``program_file``
as well as the ``program_link_name`` fields.
Since we are operating inside of a ``containmentFolder``, this method **will**
include ``self.root_directory`` in this path so that you can just use::
with open(node.file_name, "w") as gen_file:
... write the file ...
Having the ``containmentFolder`` is important for when we want to generate the
file, but when we want to use it with ``include`` or ``toctree`` this will
need to change. Refer to :func:`exhale.ExhaleRoot.gerrymanderNodeFilenames`.
This method also sets the value of ``node.title``, which will be used in both
the reStructuredText document of the node as well as the links generated in the
class view hierarchy (<a href="..."> for the ``createTreeView = True`` option).
:type: exhale.ExhaleNode
:param: node
The node that we are setting the above information for.
'''
# create the file and link names
html_safe_name = node.name.replace(":", "_").replace("/", "_")
node.file_name = "{}/exhale_{}_{}.rst".format(self.root_directory, node.kind, html_safe_name)
node.link_name = "{}_{}".format(qualifyKind(node.kind).lower(), html_safe_name)
if node.kind == "file":
# account for same file name in different directory
html_safe_name = node.location.replace("/", "_")
node.file_name = "{}/exhale_{}_{}.rst".format(self.root_directory, node.kind, html_safe_name)
node.link_name = "{}_{}".format(qualifyKind(node.kind).lower(), html_safe_name)
node.program_file = "{}/exhale_program_listing_file_{}.rst".format(
self.root_directory, html_safe_name
)
node.program_link_name = "program_listing_file_{}".format(html_safe_name)
# create the title for this node.
if node.kind == "dir":
title = node.name.split("/")[-1]
# breathe does not prepend the namespace for variables and typedefs, so
# I choose to leave the fully qualified name in the title for added clarity
elif node.kind == "variable" or node.kind == "typedef":
title = node.name
else:
#
# :TODO: This is probably breaking template specializations, need to redo
# the html_safe_name, file_name, and link_name to account for these
# as well as include documentation for how to link to partial
# template specializations.
#
# That is, need to do something like
#
# html_safe_name = node.name.replace(":", "_")
# .replace("/", "_")
# .replace(" ", "_")
# .replace("<", "LT_")
# .replace(">", "_GT")
#
# Or something like that...
#
first_lt = node.name.find("<")
last_gt = node.name.rfind(">")
# dealing with a template, special treatment necessary
if first_lt > -1 and last_gt > -1:
title = "{}{}".format(
node.name[:first_lt].split("::")[-1], # remove namespaces
node.name[first_lt:last_gt + 1] # template params
)
html_safe_name = title.replace(":", "_").replace("/", "_").replace(" ", "_").replace("<", "LT_").replace(">", "_GT").replace(",", "")
node.file_name = "{}/exhale_{}_{}.rst".format(self.root_directory, node.kind, html_safe_name)
node.link_name = "{}_{}".format(qualifyKind(node.kind).lower(), html_safe_name)
if node.kind == "file":
node.program_file = "{}/exhale_program_listing_file_{}.rst".format(
self.root_directory, html_safe_name
)
node.program_link_name = "program_listing_file_{}".format(html_safe_name)
else:
title = node.name.split("::")[-1]
# additionally, I feel that nested classes should have their fully qualified
# name without namespaces for clarity
prepend_parent = False
if node.kind == "class" or node.kind == "struct" or node.kind == "enum" or node.kind == "union":
if node.parent is not None and (node.parent.kind == "class" or node.parent.kind == "struct"):
prepend_parent = True
if prepend_parent:
title = "{}::{}".format(node.parent.name.split("::")[-1], title)
node.title = "{} {}".format(qualifyKind(node.kind), title)
def generateSingleNodeRST(self, node):
'''
Creates the reStructuredText document for the leaf like node object. This
method should only be used with nodes in the following member lists:
- ``self.class_like``
- ``self.enums``
- ``self.functions``
- ``self.typedefs``
- ``self.unions``
- ``self.variables``
- ``self.defines``
File, directory, and namespace nodes are treated separately.
:Parameters:
``node`` (ExhaleNode)
The leaf like node being generated by this method.
'''
try:
with open(node.file_name, "w") as gen_file:
# generate a link label for every generated file
link_declaration = ".. _{}:\n\n".format(node.link_name)
header = "{}\n{}\n\n".format(node.title, EXHALE_FILE_HEADING)
# link back to the file this was defined in
file_included = False
for f in self.files:
if node in f.children:
if file_included:
raise RuntimeError("Critical error: this node is parented to multiple files.\n\nNode: {}".format(node.name))
header = "{}- Defined in :ref:`{}`\n\n".format(header, f.link_name)
file_included = True
# if this is a nested type, link back to its parent
if node.parent is not None and (node.parent.kind == "struct" or node.parent.kind == "class"):
# still a chance to recover if the parent worked. probably doesn't work past one layer
# TODO: create like quadruple nested classes and find a way to reverse upward. parent links
# should just be class or struct until it is a namespace or file?
if not file_included:
parent_traverser = node.parent
while parent_traverser is not None:
for f in self.files:
if node.parent in f.children:
if file_included:
raise RuntimeError("Critical error: this node is parented to multiple files.\n\nNode: {}".format(node.name))
header = "{}- Defined in :ref:`{}`\n\n".format(header, f.link_name)
file_included = True
if node not in f.children:
f.children.append(node)
if file_included:
parent_traverser = None
else:
parent_traverser = parent_traverser.parent
header = "{}- Nested type of :ref:`{}`\n\n".format(header, node.parent.link_name)
# if this has nested types, link to them
if node.kind == "class" or node.kind == "struct":
nested_children = []
for c in node.children:
c.findNestedEnums(nested_children)
c.findNestedUnions(nested_children)
c.findNestedClassLike(nested_children)
if nested_children:
# build up a list of links, custom sort function will force
# double nested and beyond to appear after their parent by
# sorting on their name
nested_children.sort(key=lambda x: x.name)
nested_child_stream = StringIO()
for nc in nested_children:
nested_child_stream.write("- :ref:`{}`\n".format(nc.link_name))
# extract the list of links and add them as a subsection in the header
nested_child_string = nested_child_stream.getvalue()
nested_child_stream.close()
header = "{}**Nested Types**:\n\n{}\n\n".format(header, nested_child_string)
# inject the appropriate doxygen directive and name of this node
directive = ".. {}:: {}\n".format(kindAsBreatheDirective(node.kind), node.name)
# include any specific directives for this doxygen directive
specifications = "{}\n\n".format(specificationsForKind(node.kind))
gen_file.write("{}{}{}{}".format(link_declaration, header, directive, specifications))
except:
exclaimError("Critical error while generating the file for [{}]".format(node.file_name))
def generateNamespaceNodeDocuments(self):
'''
Generates the reStructuredText document for every namespace, including nested
namespaces that were removed from ``self.namespaces`` (but added as children
to one of the namespaces in ``self.namespaces``).
The documents generated do not use the Breathe namespace directive, but instead
link to the relevant documents associated with this namespace.
'''
# go through all of the top level namespaces
for n in self.namespaces:
# find any nested namespaces
nested_namespaces = []
for child in n.children:
child.findNestedNamespaces(nested_namespaces)
# generate the children first
for nested in reversed(sorted(nested_namespaces)):
self.generateSingleNamespace(nested)
# generate this top level namespace
self.generateSingleNamespace(n)
def generateSingleNamespace(self, nspace):
'''
Helper method for :func:`exhale.ExhaleRoot.generateNamespaceNodeDocuments`.
Writes the reStructuredText file for the given namespace.
:Parameters:
``nspace`` (ExhaleNode)
The namespace node to create the reStructuredText document for.
'''
try:
with open(nspace.file_name, "w") as gen_file:
# generate a link label for every generated file
link_declaration = ".. _{}:\n\n".format(nspace.link_name)
# every generated file must have a header for sphinx to be happy
nspace.title = "{} {}".format(qualifyKind(nspace.kind), nspace.name)
header = "{}\n{}\n\n".format(nspace.title, EXHALE_FILE_HEADING)
# generate the headings and links for the children
children_string = self.generateNamespaceChildrenString(nspace)
# write it all out
gen_file.write("{}{}{}\n\n".format(link_declaration, header, children_string))
except:
exclaimError("Critical error while generating the file for [{}]".format(nspace.file_name))
def generateNamespaceChildrenString(self, nspace):
'''
Helper method for :func:`exhale.ExhaleRoot.generateSingleNamespace`, and
:func:`exhale.ExhaleRoot.generateFileNodeDocuments`. Builds the
body text for the namespace node document that links to all of the child
namespaces, structs, classes, functions, typedefs, unions, and variables
associated with this namespace.
:Parameters:
``nspace`` (ExhaleNode)
The namespace node we are generating the body text for.
:Return (str):
The string to be written to the namespace node's reStructuredText document.
'''
# sort the children
nsp_namespaces = []
nsp_nested_class_like = []
nsp_enums = []
nsp_functions = []
nsp_typedefs = []
nsp_unions = []
nsp_variables = []
for child in nspace.children:
if child.kind == "namespace":
nsp_namespaces.append(child)
elif child.kind == "struct" or child.kind == "class":
child.findNestedClassLike(nsp_nested_class_like)
child.findNestedEnums(nsp_enums)
child.findNestedUnions(nsp_unions)
elif child.kind == "enum":
nsp_enums.append(child)
elif child.kind == "function":
nsp_functions.append(child)
elif child.kind == "typedef":
nsp_typedefs.append(child)
elif child.kind == "union":
nsp_unions.append(child)
elif child.kind == "variable":
nsp_variables.append(child)
# generate their headings if they exist (no Defines...that's not a C++ thing...)
children_string = self.generateSortedChildListString("Namespaces", "", nsp_namespaces)
children_string = self.generateSortedChildListString("Classes", children_string, nsp_nested_class_like)
children_string = self.generateSortedChildListString("Enums", children_string, nsp_enums)
children_string = self.generateSortedChildListString("Functions", children_string, nsp_functions)
children_string = self.generateSortedChildListString("Typedefs", children_string, nsp_typedefs)
children_string = self.generateSortedChildListString("Unions", children_string, nsp_unions)
children_string = self.generateSortedChildListString("Variables", children_string, nsp_variables)
return children_string
def generateSortedChildListString(self, sectionTitle, previousString, lst):
'''
Helper method for :func:`exhale.ExhaleRoot.generateNamespaceChildrenString`.
Used to build up a continuous string with all of the children separated out into
titled sections.
This generates a new titled section with ``sectionTitle`` and puts a link to
every node found in ``lst`` in this section. The newly created section is
appended to ``previousString`` and then returned.
:TODO:
Change this to use string streams like the other methods instead.
:Parameters:
``sectionTitle`` (str)
The title of the section for this list of children.
``previousString`` (str)
The string to append the newly created section to.
``lst`` (list)
A list of ExhaleNode objects that are to be linked to from this section.
This method sorts ``lst`` in place.
'''
if lst:
lst.sort()
new_string = "{}\n\n{}\n{}\n".format(previousString, sectionTitle, EXHALE_SECTION_HEADING)
for l in lst:
new_string = "{}\n- :ref:`{}`".format(new_string, l.link_name)
return new_string
else:
return previousString
def generateFileNodeDocuments(self):
'''
Generates the reStructuredText documents for files as well as the file's
program listing reStructuredText document if applicable. Refer to
:ref:`usage_customizing_file_pages` for changing the output of this method.
The remainder of the file lists all nodes that have been discovered to be
defined (e.g. classes) or referred to (e.g. included files or files that include
this file).
'''
for f in self.files:
# if the programlisting was included, length will be at least 1 line
if len(f.program_listing) > 0:
include_program_listing = True
full_program_listing = '.. code-block:: cpp\n\n'
# need to reformat each line to remove xml tags / put <>& back in
for pgf_line in f.program_listing:
fixed_whitespace = re.sub(r'<sp/>', ' ', pgf_line)
# for our purposes, this is good enough:
# http://stackoverflow.com/a/4869782/3814202
no_xml_tags = re.sub(r'<[^<]+?>', '', fixed_whitespace)
revive_lt = re.sub(r'<', '<', no_xml_tags)
revive_gt = re.sub(r'>', '>', revive_lt)
revive_quote = re.sub(r'"', '"', revive_gt)
revive_apos = re.sub(r''', "'", revive_quote)
revive_amp = re.sub(r'&', '&', revive_apos)
full_program_listing = "{} {}".format(full_program_listing, revive_amp)
# create the programlisting file
try:
with open(f.program_file, "w") as gen_file:
# generate a link label for every generated file
link_declaration = ".. _{}:\n\n".format(f.program_link_name)
# every generated file must have a header for sphinx to be happy
prog_title = "Program Listing for {} {}".format(qualifyKind(f.kind), f.name)
header = "{}\n{}\n\n".format(prog_title, EXHALE_FILE_HEADING)
return_link = "- Return to documentation for :ref:`{}`\n\n".format(f.link_name)
# write it all out
gen_file.write("{}{}{}{}\n\n".format(
link_declaration, header, return_link, full_program_listing)
)
except:
exclaimError("Critical error while generating the file for [{}]".format(f.file_name))
else:
include_program_listing = False
for f in self.files:
if len(f.location) > 0:
file_definition = "Definition (``{}``)\n{}\n\n".format(
f.location, EXHALE_SECTION_HEADING
)
else:
file_definition = ""
if include_program_listing and file_definition != "":
file_definition = "{}.. toctree::\n :maxdepth: 1\n\n {}\n\n".format(
file_definition, f.program_file.split("/")[-1] # file path still has directory
)
if len(f.includes) > 0:
file_includes = "Includes\n{}\n\n".format(EXHALE_SECTION_HEADING)
for incl in sorted(f.includes):
local_file = None
for incl_file in self.files:
if incl in incl_file.location:
local_file = incl_file
break
if local_file is not None:
file_includes = "{}- ``{}`` (:ref:`{}`)\n".format(
file_includes, incl, local_file.link_name
)
else:
file_includes = "{}- ``{}``\n".format(file_includes, incl)
else:
file_includes = ""
if len(f.included_by) > 0:
file_included_by = "Included By\n{}\n\n".format(EXHALE_SECTION_HEADING)
for incl_ref, incl_name in f.included_by:
for incl_file in self.files:
if incl_ref == incl_file.refid:
file_included_by = "{}- :ref:`{}`\n".format(file_included_by, incl_file.link_name)
break
else:
file_included_by = ""
# generate their headings if they exist --- DO NOT USE findNested*, these are included recursively
file_structs = []
file_classes = []
file_enums = []
file_functions = []
file_typedefs = []
file_unions = []
file_variables = []
file_defines = []
for child in f.children:
if child.kind == "struct":
file_structs.append(child)
elif child.kind == "class":
file_classes.append(child)
elif child.kind == "enum":
file_enums.append(child)
elif child.kind == "function":
file_functions.append(child)
elif child.kind == "typedef":
file_typedefs.append(child)
elif child.kind == "union":
file_unions.append(child)
elif child.kind == "variable":
file_variables.append(child)
elif child.kind == "define":
file_defines.append(child)
children_string = self.generateSortedChildListString("Namespaces", "", f.namespaces_used)
children_string = self.generateSortedChildListString("Classes", children_string, file_structs + file_classes)
children_string = self.generateSortedChildListString("Enums", children_string, file_enums)
children_string = self.generateSortedChildListString("Functions", children_string, file_functions)
children_string = self.generateSortedChildListString("Defines", children_string, file_defines)
children_string = self.generateSortedChildListString("Typedefs", children_string, file_typedefs)
children_string = self.generateSortedChildListString("Unions", children_string, file_unions)
children_string = self.generateSortedChildListString("Variables", children_string, file_variables)
try:
with open(f.file_name, "w") as gen_file:
# generate a link label for every generated file
link_declaration = ".. _{}:\n\n".format(f.link_name)
# every generated file must have a header for sphinx to be happy
f.title = "{} {}".format(qualifyKind(f.kind), f.name)
header = "{}\n{}\n\n".format(f.title, EXHALE_FILE_HEADING)
# write it all out
gen_file.write("{}{}{}{}\n{}\n{}\n\n".format(
link_declaration, header, file_definition, file_includes, file_included_by, children_string)
)
except:
exclaimError("Critical error while generating the file for [{}]".format(f.file_name))
if EXHALE_GENERATE_BREATHE_FILE_DIRECTIVES:
try:
with open(f.file_name, "a") as gen_file:
# add the breathe directive ???
gen_file.write(
"\nFull File Listing\n{}\n\n"
".. {}:: {}\n"
"{}\n\n".format(EXHALE_SECTION_HEADING, kindAsBreatheDirective(f.kind), f.location, specificationsForKind(f.kind))
)
except:
exclaimError("Critical error while generating the breathe directive for [{}]".format(f.file_name))
def generateDirectoryNodeDocuments(self):
'''
Generates all of the directory reStructuredText documents.
'''
all_dirs = []
for d in self.dirs:
d.findNestedDirectories(all_dirs)
for d in all_dirs:
self.generateDirectoryNodeRST(d)
def generateDirectoryNodeRST(self, node):
'''
Helper method for :func:`exhale.ExhaleRoot.generateDirectoryNodeDocuments`.
Generates the reStructuredText documents for the given directory node.
Directory nodes will only link to files and subdirectories within it.
:Parameters:
``node`` (ExhaleNode)
The directory node to generate the reStructuredText document for.
'''
# find the relevant children: directories and files only
child_dirs = []
child_files = []
for c in node.children:
if c.kind == "dir":
child_dirs.append(c)
elif c.kind == "file":
child_files.append(c)
# generate the subdirectory section
if len(child_dirs) > 0:
child_dirs_string = "Subdirectories\n{}\n\n".format(EXHALE_SECTION_HEADING)
for child_dir in sorted(child_dirs):
child_dirs_string = "{}- :ref:`{}`\n".format(child_dirs_string, child_dir.link_name)
else:
child_dirs_string = ""
# generate the files section
if len(child_files) > 0:
child_files_string = "Files\n{}\n\n".format(EXHALE_SECTION_HEADING)
for child_file in sorted(child_files):
child_files_string = "{}- :ref:`{}`\n".format(child_files_string, child_file.link_name)
else:
child_files_string = ""
# generate the file for this directory
try:
with open(node.file_name, "w") as gen_file:
# generate a link label for every generated file
link_declaration = ".. _{}:\n\n".format(node.link_name)
header = "{}\n{}\n\n".format(node.title, EXHALE_FILE_HEADING)
# generate the headings and links for the children
# write it all out
gen_file.write("{}{}{}\n{}\n\n".format(
link_declaration, header, child_dirs_string, child_files_string)
)
except:
exclaimError("Critical error while generating the file for [{}]".format(node.file_name))
def generateAPIRootBody(self):
'''
Generates the root library api file's body text. The method calls
:func:`exhale.ExhaleRoot.gerrymanderNodeFilenames` first to enable proper
internal linkage between reStructuredText documents. Afterward, it calls
:func:`exhale.ExhaleRoot.generateViewHierarchies` followed by
:func:`exhale.ExhaleRoot.generateUnabridgedAPI` to generate both hierarchies as
well as the full API listing. As a result, three files will now be ready:
1. ``self.class_view_file``
2. ``self.directory_view_file``
3. ``self.unabridged_api_file``
These three files are then *included* into the root library file. The
consequence of using an ``include`` directive is that Sphinx will complain about
these three files never being included in any ``toctree`` directive. These
warnings are expected, and preferred to using a ``toctree`` because otherwise
the user would have to click on the class view link from the ``toctree`` in
order to see it. This behavior has been acceptable for me so far, but if it
is causing you problems please raise an issue on GitHub and I may be able to
conditionally use a ``toctree`` if you really need it.
'''
try:
self.gerrymanderNodeFilenames()
self.generateViewHierarchies()
self.generateUnabridgedAPI()
with open(self.full_root_file_path, "a") as generated_index:
generated_index.write(
".. include:: {}\n\n".format(self.class_view_file.split("/")[-1])
)
generated_index.write(
".. include:: {}\n\n".format(self.directory_view_file.split("/")[-1])
)
generated_index.write(
".. include:: {}\n\n".format(self.unabridged_api_file.split("/")[-1])
)
except Exception as e:
exclaimError("Unable to create the root api body: {}".format(e))
def gerrymanderNodeFilenames(self):
'''
When creating nodes, the filename needs to be relative to ``conf.py``, so it
will include ``self.root_directory``. However, when generating the API, the
file we are writing to is in the same directory as the generated node files so
we need to remove the directory path from a given ExhaleNode's ``file_name``
before we can ``include`` it or use it in a ``toctree``.
'''
for node in self.all_nodes:
node.file_name = node.file_name.split("/")[-1]
if node.kind == "file":
node.program_file = node.program_file.split("/")[-1]
def generateViewHierarchies(self):
'''
Wrapper method to create the view hierarchies. Currently it just calls
:func:`exhale.ExhaleRoot.generateClassView` and
:func:`exhale.ExhaleRoot.generateDirectoryView` --- if you want to implement
additional hierarchies, implement the additionaly hierarchy method and call it
from here. Then make sure to ``include`` it in
:func:`exhale.ExhaleRoot.generateAPIRootBody`.
'''
self.generateClassView(self.use_tree_view)
self.generateDirectoryView(self.use_tree_view)
def generateClassView(self, treeView):
'''
Generates the class view hierarchy, writing it to ``self.class_view_file``.
:Parameters:
``treeView`` (bool)
Whether or not to use the collapsibleList version. See the
``createTreeView`` description in :func:`exhale.generate`.
'''
class_view_stream = StringIO()
for n in self.namespaces:
n.toClassView(0, class_view_stream, treeView)
# Add everything that was not nested in a namespace.
missing = []
# class-like objects (structs and classes)
for cl in sorted(self.class_like):
if not cl.in_class_view:
missing.append(cl)
# enums
for e in sorted(self.enums):
if not e.in_class_view:
missing.append(e)
# unions
for u in sorted(self.unions):
if not u.in_class_view:
missing.append(u)
if len(missing) > 0:
idx = 0
last_missing_child = len(missing) - 1
for m in missing:
m.toClassView(0, class_view_stream, treeView, idx == last_missing_child)
idx += 1
elif treeView:
# need to restart since there were no missing children found, otherwise the
# last namespace will not correctly have a lastChild
class_view_stream.close()
class_view_stream = StringIO()
last_nspace_index = len(self.namespaces) - 1
for idx in range(last_nspace_index + 1):
nspace = self.namespaces[idx]
nspace.toClassView(0, class_view_stream, treeView, idx == last_nspace_index)
# extract the value from the stream and close it down
class_view_string = class_view_stream.getvalue()
class_view_stream.close()
# inject the raw html for the treeView unordered lists
if treeView:
# we need to indent everything to be under the .. raw:: html directive, add
# indentation so the html is readable while we are at it
indented = re.sub(r'(.+)', r' \1', class_view_string)
class_view_string = \
'.. raw:: html\n\n' \
' <ul class="treeView">\n' \
' <li>\n' \
' <ul class="collapsibleList">\n' \
'{}' \
' </ul><!-- collapsibleList -->\n' \
' </li><!-- only tree view element -->\n' \
' </ul><!-- treeView -->\n'.format(indented)
# write everything to file to be included in the root api later
try:
with open(self.class_view_file, "w") as cvf:
cvf.write("Class Hierarchy\n{}\n\n{}\n\n".format(EXHALE_SECTION_HEADING,
class_view_string))
except Exception as e:
exclaimError("Error writing the class hierarchy: {}".format(e))
def generateDirectoryView(self, treeView):
'''
Generates the file view hierarchy, writing it to ``self.directory_view_file``.
:Parameters:
``treeView`` (bool)
Whether or not to use the collapsibleList version. See the
``createTreeView`` description in :func:`exhale.generate`.
'''
directory_view_stream = StringIO()
for d in self.dirs:
d.toDirectoryView(0, directory_view_stream, treeView)
# add potential missing files (not sure if this is possible though)
missing = []
for f in sorted(self.files):
if not f.in_directory_view:
missing.append(f)
found_missing = len(missing) > 0
if found_missing:
idx = 0
last_missing_child = len(missing) - 1
for m in missing:
m.toDirectoryView(0, directory_view_stream, treeView, idx == last_missing_child)
idx += 1
elif treeView:
# need to restart since there were no missing children found, otherwise the
# last directory will not correctly have a lastChild
directory_view_stream.close()
directory_view_stream = StringIO()
last_dir_index = len(self.dirs) - 1
for idx in range(last_dir_index + 1):
curr_d = self.dirs[idx]
curr_d.toDirectoryView(0, directory_view_stream, treeView, idx == last_dir_index)
# extract the value from the stream and close it down
directory_view_string = directory_view_stream.getvalue()
directory_view_stream.close()
# inject the raw html for the treeView unordered lists
if treeView:
# we need to indent everything to be under the .. raw:: html directive, add
# indentation so the html is readable while we are at it
indented = re.sub(r'(.+)', r' \1', directory_view_string)
directory_view_string = \
'.. raw:: html\n\n' \
' <ul class="treeView">\n' \
' <li>\n' \
' <ul class="collapsibleList">\n' \
'{}' \
' </ul><!-- collapsibleList -->\n' \
' </li><!-- only tree view element -->\n' \
' </ul><!-- treeView -->\n'.format(indented)
# write everything to file to be included in the root api later
try:
with open(self.directory_view_file, "w") as dvf:
dvf.write("File Hierarchy\n{}\n\n{}\n\n".format(EXHALE_SECTION_HEADING,
directory_view_string))
except Exception as e:
exclaimError("Error writing the directory hierarchy: {}".format(e))
def generateUnabridgedAPI(self):
'''
Generates the unabridged (full) API listing into ``self.unabridged_api_file``.
This is necessary as some items may not show up in either hierarchy view,
depending on:
1. The item. For example, if a namespace has only one member which is a
variable, then neither the namespace nor the variable will be declared in the
class view hierarchy. It will be present in the file page it was declared in
but not on the main library page.
2. The configurations of Doxygen. For example, see the warning in
:func:`exhale.ExhaleRoot.fileRefDiscovery`. Items whose parents cannot be
rediscovered withouth the programlisting will still be documented, their link
appearing in the unabridged API listing.
Currently, the API is generated in the following (somewhat arbitrary) order:
- Namespaces
- Classes and Structs
- Enums
- Unions
- Functions
- Variables
- Defines
- Typedefs
- Directories
- Files
If you want to change the ordering, just change the order of the calls to
:func:`exhale.ExhaleRoot.enumerateAll` in this method.
'''
try:
with open(self.unabridged_api_file, "w") as full_api_file:
# write the header
full_api_file.write("Full API\n{}\n\n".format(EXHALE_SECTION_HEADING))
# recover all namespaces that were reparented
all_namespaces = []
for n in self.namespaces:
n.findNestedNamespaces(all_namespaces)
# recover all directories that were reparented
all_directories = []
for d in self.dirs:
d.findNestedDirectories(all_directories)
# recover classes and structs that were reparented
all_class_like = []
for cl in self.class_like:
cl.findNestedClassLike(all_class_like)
# write everything to file: reorder these lines for different outcomes
self.enumerateAll("Namespaces", all_namespaces, full_api_file)
self.enumerateAll("Classes and Structs", all_class_like, full_api_file)
self.enumerateAll("Enums", self.enums, full_api_file)
self.enumerateAll("Unions", self.unions, full_api_file)
self.enumerateAll("Functions", self.functions, full_api_file)
self.enumerateAll("Variables", self.variables, full_api_file)
self.enumerateAll("Defines", self.defines, full_api_file)
self.enumerateAll("Typedefs", self.typedefs, full_api_file)
self.enumerateAll("Directories", all_directories, full_api_file)
self.enumerateAll("Files", self.files, full_api_file)
except Exception as e:
exclaimError("Error writing the unabridged API: {}".format(e))
def enumerateAll(self, subsectionTitle, lst, openFile):
'''
Helper function for :func:`exhale.ExhaleRoot.generateUnabridgedAPI`. Simply
writes a subsection to ``openFile`` (a ``toctree`` to the ``file_name``) of each
ExhaleNode in ``sorted(lst)`` if ``len(lst) > 0``. Otherwise, nothing is
written to the file.
:Parameters:
``subsectionTitle`` (str)
The title of this subsection, e.g. ``"Namespaces"`` or ``"Files"``.
``lst`` (list)
The list of ExhaleNodes to be enumerated in this subsection.
``openFile`` (File)
The **already open** file object to write to directly. No safety checks
are performed, make sure this is a real file object that has not been
closed already.
'''
if len(lst) > 0:
openFile.write("{}\n{}\n\n".format(subsectionTitle, EXHALE_SUBSECTION_HEADING))
for l in sorted(lst):
openFile.write(
".. toctree::\n"
" :maxdepth: {}\n\n"
" {}\n\n".format(EXHALE_API_TOCTREE_MAX_DEPTH, l.file_name)
)
def generateAPIRootSummary(self):
'''
Writes the library API root summary to the main library file. See the
documentation for the key ``afterBodySummary`` in :func:`exhale.generate`.
'''
try:
with open(self.full_root_file_path, "a") as generated_index:
generated_index.write("{}\n\n".format(self.root_file_summary))
except Exception as e:
exclaimError("Unable to create the root api summary: {}".format(e))
####################################################################################
#
##
### Miscellaneous utility functions.
##
#
####################################################################################
def toConsole(self):
'''
Convenience function for printing out the entire API being generated to the
console. Unused in the release, but is helpful for debugging ;)
'''
self.consoleFormat("Classes and Structs", self.class_like)
self.consoleFormat("Defines", self.defines)
self.consoleFormat("Enums", self.enums)
self.consoleFormat("Enum Values", self.enum_values)
self.consoleFormat("Functions", self.functions)
self.consoleFormat("Files", self.files)
self.consoleFormat("Directories", self.dirs)
self.consoleFormat("Groups", self.groups)
self.consoleFormat("Namespaces", self.namespaces)
self.consoleFormat("Typedefs", self.typedefs)
self.consoleFormat("Unions", self.unions)
self.consoleFormat("Variables", self.variables)
def consoleFormat(self, sectionTitle, lst):
'''
Helper method for :func:`exhale.ExhaleRoot.toConsole`. Prints the given
``sectionTitle`` and calls :func:`exhale.ExhaleNode.toConsole` with ``0`` as the
level for every ExhaleNode in ``lst``.
:Parameters:
``sectionTitle`` (str)
The title that will be printed with some visual separators around it.
``lst`` (list)
The list of ExhaleNodes to print to the console.
'''
print("###########################################################")
print("## {}".format(sectionTitle))
print("###########################################################")
for l in lst:
l.toConsole(0)
| KIKI007/ReusedPrinter | external/nanogui/docs/exhale.py | Python | mpl-2.0 | 136,535 |
import os
import sys
from setuptools import setup, find_packages
pwd = os.path.abspath(os.path.dirname(__file__))
sys.path.append(pwd)
try:
import nuclai.__main__ as nuclai
VERSION = nuclai.__version__
except ImportError as e:
VERSION = 'N/A'
setup(name='nuclai',
version=VERSION,
description='Cross-platform Installer & Runner for nucl.ai tutorials and excercises.',
author='Alex J. Champandard',
url='https://github.com/aigamedev/nuclai-installer',
long_description=open(os.path.join(pwd, 'README.rst')).read(),
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3.4',
'License :: Free For Home Use',
'Topic :: Scientific/Engineering :: Artificial Intelligence'
],
license='GNU GPLv3',
scripts=['scripts/nuclai', 'scripts/nuclai.bat'],
packages=find_packages(),
include_package_data=True,
install_requires=[
'colorama' if sys.platform == 'win32' else '',
])
| aigamedev/nuclai-installer | setup.py | Python | gpl-3.0 | 1,050 |
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import pickle
from matplotlib import pyplot as plt
import matplotlib as mpl
import numpy
from plot import computeAccuracy
from plot import readExperiment
mpl.rcParams['pdf.fonttype'] = 42
plt.ion()
plt.close('all')
def loadRepeatedExperiments(expDir, killCellPercentRange, seedRange, killCellAt):
meanAccuracy = []
stdAccuracy = []
for killCellPercent in killCellPercentRange:
accuracyList = []
for seed in range(10):
if killCellPercent == 0:
experiment = os.path.join(
expDir,"kill_cell_percent{:1.1f}seed{:1.1f}/0.log".format(
killCellPercent, seed))
else:
experiment = os.path.join(
expDir,"kill_cell_percent{:1.2f}seed{:1.1f}/0.log".format(
killCellPercent, seed))
expResults = readExperiment(experiment)
print "Load Experiment: ", experiment
(accuracy, x) = computeAccuracy(expResults['predictions'],
expResults['truths'],
expResults['iterations'],
resets=expResults['resets'],
randoms=expResults['randoms'])
idx = numpy.array([i for i in range(len(x)) if x[i] > killCellAt])
accuracy = numpy.array(accuracy)
accuracyList.append(float(numpy.sum(accuracy[idx])) / len(accuracy[idx]))
meanAccuracy.append(numpy.mean(accuracyList))
stdAccuracy.append(numpy.std(accuracyList))
return meanAccuracy, stdAccuracy
if __name__ == '__main__':
try:
# Load raw experiment results
# You have to run the experiments
killCellPercentRange = list(numpy.arange(6) / 10.0)
seedRange = range(10)
(meanAccuracyLSTM, stdAccuracyLSTM) = loadRepeatedExperiments(
"lstm/results/high-order-distributed-random-kill-cell/",
killCellPercentRange, seedRange, killCellAt=10000)
(meanAccuracyELM, stdAccuracyELM) = loadRepeatedExperiments(
"elm/results/high-order-distributed-random-kill-cell/",
killCellPercentRange, seedRange, killCellAt=20000)
(meanAccuracyHTM, stdAccuracyHTM) = loadRepeatedExperiments(
"tm/results/high-order-distributed-random-kill-cell/",
killCellPercentRange, seedRange, killCellAt=10000)
expResults = {}
expResults['HTM'] = {'x': killCellPercentRange,
'meanAccuracy': meanAccuracyHTM,
'stdAccuracy': stdAccuracyHTM}
expResults['ELM'] = {'x': killCellPercentRange,
'meanAccuracy': meanAccuracyELM,
'stdAccuracy': stdAccuracyELM}
expResults['LSTM'] = {'x': killCellPercentRange,
'meanAccuracy': meanAccuracyLSTM,
'stdAccuracy': stdAccuracyLSTM}
output = open('./result/FaultTolerantExpt.pkl', 'wb')
pickle.dump(expResults, output, -1)
output.close()
except:
print "Cannot find raw experiment results"
print "Plot using saved processed experiment results"
expResults = pickle.load(open('./result/FaultTolerantExpt.pkl', 'rb'))
plt.figure(1)
colorList = {'HTM': 'r', 'ELM': 'b', 'LSTM': 'g'}
for model in ['HTM', 'ELM', 'LSTM']:
plt.errorbar(expResults[model]['x'],
expResults[model]['meanAccuracy'],
expResults[model]['stdAccuracy'],
color=colorList[model],
marker='o')
plt.legend(['HTM', 'ELM', 'LSTM'], loc=3)
plt.xlabel('Fraction of cell death ')
plt.ylabel('Accuracy after cell death')
plt.ylim([0.1, 1.05])
plt.xlim([-0.02, .52])
plt.savefig('./result/model_performance_after_cell_death.pdf')
#
# for killCellPercent in KILLCELL_PERCENT:
# # HTM experiments
# tmResultDir = 'tm/result/'
# experiment = os.path.join(tmResultDir, "kill_cell_percent{:1.1f}".format(
# killCellPercent)) + '/0.log'
#
# expResults = readExperiment(experiment)
#
# killCellAt = 10000
# (accuracy, x) = computeAccuracy(expResults['predictions'][killCellAt:],
# expResults['truths'][killCellAt:],
# expResults['iterations'][killCellAt:],
# resets=expResults['resets'][killCellAt:],
# randoms=expResults['randoms'][killCellAt:])
# accuracyListTM.append(float(numpy.sum(accuracy)) / len(accuracy))
#
# # LSTM experiments
# lstmResultDir = "lstm/results/high-order-distributed-random-kill-cell/"
# experiment = lstmResultDir + \
# "kill_cell_percent{:1.2f}/0.log".format(killCellPercent)
#
# expResults = readExperiment(experiment)
#
# killCellAt = 10000
# (accuracy, x) = computeAccuracy(expResults['predictions'][killCellAt:],
# expResults['truths'][killCellAt:],
# expResults['iterations'][killCellAt:],
# resets=expResults['resets'][killCellAt:],
# randoms=expResults['randoms'][killCellAt:])
# accuracyListLSTM.append(float(numpy.sum(accuracy)) / len(accuracy))
#
# # ELM
# experiment = 'elm/results/high-order-distributed-random-kill-cell/' \
# 'kill_cell_percent' + "{:1.2f}".format(killCellPercent) + '/0.log'
#
# expResults = readExperiment(experiment)
#
# killCellAt = 20000
# (accuracy, x) = computeAccuracy(expResults['predictions'][killCellAt:],
# expResults['truths'][killCellAt:],
# expResults['iterations'][killCellAt:],
# resets=expResults['resets'][killCellAt:],
# randoms=expResults['randoms'][killCellAt:])
# accuracyListELM.append(float(numpy.sum(accuracy)) / len(accuracy))
#
# plt.figure(2)
# plt.plot(KILLCELL_PERCENT, accuracyListTM, 'r-^', label="HTM")
# plt.plot(KILLCELL_PERCENT, accuracyListLSTM, 'b-s', label="LSTM")
# plt.plot(KILLCELL_PERCENT, accuracyListELM, 'g-s', label="ELM")
# plt.xlabel('Fraction of cell death ')
# plt.ylabel('Accuracy after cell death')
# plt.ylim([0.1, 1.05])
# plt.legend()
# plt.savefig('./result/model_performance_after_cell_death.pdf')
| ThomasMiconi/htmresearch | projects/sequence_prediction/discrete_sequences/plotFaultyTMPerformance.py | Python | agpl-3.0 | 7,323 |
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from code_generator import DEVICECONFDIR
import json
import os
from pprint import pprint as pp
from urlparse import urlparse
class OCCrawler(object):
def __init__(self, bigip, OC_path_element):
self.bigip = bigip
self.session = self.bigip._meta_data[u"icr_session"]
self.uri = self.bigip._meta_data['uri'] + OC_path_element
self.configs = [self.session.get(self.uri).json()]
self.build_referenced_uris()
def _get_uri_from_OC_item(self, item):
if u"reference" in item and u"link" in item[u"reference"]:
return item[u"reference"][u"link"]\
.replace("localhost",
self.bigip._meta_data[u"hostname"])
def build_referenced_uris(self):
self.referenced = []
for item in self.configs[0][u"items"]:
self.referenced.append(self._get_uri_from_OC_item(item))
def get_referenced_configs(self):
for uri in self.referenced:
self.configs.append(self.session.get(uri).json())
class ConfigWriter(object):
def __init__(self, config_list, complete_oc_name):
self.oc_name = complete_oc_name
self.oc_basename = self.oc_name.split('/')[-1]
self.configs = config_list
def _get_fname(self, conf):
sl = conf[u"selfLink"]
scheme, netloc, path, params, qargs, frags = urlparse(sl)
ps = path.split('/')
if ps[-1] == self.oc_basename:
return self.oc_basename + '_GET'
else:
return self.oc_basename + '_' + ps[-1] + '_GET'
def dump_configs(self):
for conf in self.configs:
fname = self._get_fname(conf)
if not os.path.exists(os.path.join(DEVICECONFDIR, fname)):
outname = os.path.join(DEVICECONFDIR, fname) + ".json"
with open(outname, 'w') as fh:
json.dump(conf, fh)
def main():
from f5.bigip import BigIP
b = BigIP('10.190.5.7', 'admin', 'admin')
occrawler = OCCrawler(b, 'ltm/persistence')
pp(occrawler.referenced)
occrawler.get_referenced_configs()
pp(occrawler.configs)
config_writer = ConfigWriter(occrawler.configs, u"ltm/persistence")
config_writer.dump_configs()
if __name__ == '__main__':
main()
| wojtek0806/f5-common-python | devtools/crawler.py | Python | apache-2.0 | 2,855 |
"""
This bootstrap module should be used to setup parts of the ircbot plugin
that need to exist before all controllers are loaded. It is best used to
define/register hooks, setup namespaces, and the like.
"""
import os
import json
import re
from urllib2 import urlopen, HTTPError, URLError
from time import sleep
from pkg_resources import get_distribution
from cement import namespaces
from cement.core.namespace import CementNamespace, register_namespace
from cement.core.namespace import get_config
from cement.core.testing import simulate
from cement.core.controller import run_controller_command
from cement.core.hook import define_hook, register_hook, run_hooks
from iustools.core import irc_commands
from iustools.core.exc import IUSToolsArgumentError
from iustools.lib.bitly import shorten_url
VERSION = get_distribution('iustools.ircbot').version
define_hook('ircbot_process_hook')
define_hook('ircbot_parsemsg_hook')
# Setup the 'ircbot' namespace object
ircbot = CementNamespace(
label='ircbot',
description='IRC Bot Plugin for IUS Community Project Tools',
version=VERSION,
controller='IRCBotController',
provider='iustools'
)
# default config options
ircbot.config['server'] = 'irc.freenode.net'
ircbot.config['port'] = 6667
ircbot.config['channel'] = 'iuscommunity'
ircbot.config['nick'] = 'iusbot'
ircbot.config['ping_cycle'] = 60
ircbot.config['recv_bytes'] = 2048
ircbot.config['process_user'] = 'iusdaemon'
ircbot.config['pid_file'] = '/var/run/ius-tools/ircbot.pid'
# command line options
ircbot.options.add_option('--irc-channel', action='store', dest='channel',
help='the irc channel to join')
ircbot.options.add_option('--irc-nick', action='store', dest='nick',
help='the irc nick to register as')
ircbot.options.add_option('--run-once', action='store', dest='run_once',
help='just run a specific ircbot process hook once')
# Officialize and register the namespace
register_namespace(ircbot)
@register_hook()
def post_options_hook(*args, **kw):
config = get_config()
if not os.path.exists(config['ircbot']['pid_file']):
os.makedirs(config['ircbot']['pid_file'])
@register_hook(name='ircbot_process_hook')
def interactive_ircbot_process_hook(config, log, irc):
"""
This process hook listens on the IRC channel, and responds to interactive
requests. NOTE: only one process can do regular 'polls' on the channel.
"""
while True:
res = irc.poll()
if res:
for hook in run_hooks('ircbot_parsemsg_hook', config, log, irc, res):
pass
#(from_nick, from_chan, msg, dest) = res
sleep(1)
@register_hook(name='ircbot_process_hook')
def keepalive_process_hook(config, log, irc):
"""
Send PINGs to the server to keep the connection alive
based on config['ircbot']['ping_cycle'].
"""
while True:
irc.ping()
sleep(int(config['ircbot']['ping_cycle']))
@register_hook(name='ircbot_parsemsg_hook')
def exec_commands_ircbot_parsemsg_hook(config, log, irc, poll_result):
"""
Parse the result of irc.poll() and execute commands if the msg was
a command.
"""
(from_nick, from_chan, msg, dest) = poll_result
# its a command,
if not re.match('^\.[A-z]', msg):
log.debug('msg did not start with a .command... skipping')
return
args = msg.split()
cmd = args.pop(0) # first part of msg is command, rest args
if cmd in irc_commands.keys():
# need to keep arg order
args.insert(0, irc_commands[cmd]['func'])
if irc_commands[cmd]['namespace'] != 'root':
args.insert(0, irc_commands[cmd]['namespace'])
args.insert(0, 'ius-tools')
try:
# FIX ME: this is a bit of a hack
nam = namespaces[irc_commands[cmd]['namespace']]
nam.controller.cli_opts = None
nam.controller.cli_args = None
namespaces[irc_commands[cmd]['namespace']] = nam
(out_dict, out_txt) = simulate(args)
reply = out_dict['irc_data']
except IUSToolsArgumentError, e:
reply = e.msg
out_dict = {}
# FIX ME: Need to consolidate all this .startswith('#') stuff
# only send to user directly?
if out_dict.has_key('irc_pm') and out_dict['irc_pm']:
if dest.startswith('#'):
irc.send(from_chan, "%s: check your PM." % from_nick)
irc.send(from_nick, "%s" % reply)
else:
irc.send(from_nick, "%s" % reply)
else:
if dest.startswith('#'):
irc.send(dest, "%s: %s" % (from_nick, reply))
else:
irc.send(dest, "%s" % reply)
else:
reply = "I don't understand that command."
if dest.startswith('#'):
irc.send(dest, "%s: %s" % (from_nick, reply))
else:
irc.send(dest, "%s" % reply)
| iuscommunity/ius-tools | src/iustools.ircbot/iustools/bootstrap/ircbot.py | Python | gpl-2.0 | 5,050 |
#
# iutil.py - generic install utility functions
#
# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
# Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Erik Troan <[email protected]>
#
import glob
import os, string, stat, sys
import signal
import os.path
from errno import *
import warnings
import subprocess
from flags import flags
from constants import *
import re
import threading
import gettext
_ = lambda x: gettext.ldgettext("anaconda", x)
import logging
log = logging.getLogger("anaconda")
program_log = logging.getLogger("program")
class ExecProduct(object):
def __init__(self, rc, stdout, stderr):
self.rc = rc
self.stdout = stdout
self.stderr = stderr
#Python reimplementation of the shell tee process, so we can
#feed the pipe output into two places at the same time
class tee(threading.Thread):
def __init__(self, inputdesc, outputdesc, logmethod):
threading.Thread.__init__(self)
self.inputdesc = os.fdopen(inputdesc, "r")
self.outputdesc = outputdesc
self.logmethod = logmethod
self.running = True
def run(self):
while self.running:
data = self.inputdesc.readline()
if data == "":
self.running = False
else:
self.logmethod(data.rstrip('\n'))
os.write(self.outputdesc, data)
def stop(self):
self.running = False
return self
## Run an external program and redirect the output to a file.
# @param command The command to run.
# @param argv A list of arguments.
# @param stdin The file descriptor to read stdin from.
# @param stdout The file descriptor to redirect stdout to.
# @param stderr The file descriptor to redirect stderr to.
# @param root The directory to chroot to before running command.
# @return The return code of command.
def execWithRedirect(command, argv, stdin = None, stdout = None,
stderr = None, root = '/'):
def chroot ():
os.chroot(root)
stdinclose = stdoutclose = stderrclose = lambda : None
argv = list(argv)
if isinstance(stdin, str):
if os.access(stdin, os.R_OK):
stdin = os.open(stdin, os.O_RDONLY)
stdinclose = lambda : os.close(stdin)
else:
stdin = sys.stdin.fileno()
elif isinstance(stdin, int):
pass
elif stdin is None or not isinstance(stdin, file):
stdin = sys.stdin.fileno()
if isinstance(stdout, str):
stdout = os.open(stdout, os.O_RDWR|os.O_CREAT)
stdoutclose = lambda : os.close(stdout)
elif isinstance(stdout, int):
pass
elif stdout is None or not isinstance(stdout, file):
stdout = sys.stdout.fileno()
if isinstance(stderr, str):
stderr = os.open(stderr, os.O_RDWR|os.O_CREAT)
stderrclose = lambda : os.close(stderr)
elif isinstance(stderr, int):
pass
elif stderr is None or not isinstance(stderr, file):
stderr = sys.stderr.fileno()
program_log.info("Running... %s" % (" ".join([command] + argv),))
#prepare os pipes for feeding tee proceses
pstdout, pstdin = os.pipe()
perrout, perrin = os.pipe()
env = os.environ.copy()
env.update({"LC_ALL": "C"})
try:
#prepare tee proceses
proc_std = tee(pstdout, stdout, program_log.info)
proc_err = tee(perrout, stderr, program_log.error)
#start monitoring the outputs
proc_std.start()
proc_err.start()
proc = subprocess.Popen([command] + argv, stdin=stdin,
stdout=pstdin,
stderr=perrin,
preexec_fn=chroot, cwd=root,
env=env)
proc.wait()
ret = proc.returncode
#close the input ends of pipes so we get EOF in the tee processes
os.close(pstdin)
os.close(perrin)
#wait for the output to be written and destroy them
proc_std.join()
del proc_std
proc_err.join()
del proc_err
stdinclose()
stdoutclose()
stderrclose()
except OSError as e:
errstr = "Error running %s: %s" % (command, e.strerror)
log.error(errstr)
program_log.error(errstr)
#close the input ends of pipes so we get EOF in the tee processes
os.close(pstdin)
os.close(perrin)
proc_std.join()
proc_err.join()
stdinclose()
stdoutclose()
stderrclose()
raise RuntimeError, errstr
return ret
## Run an external program and capture standard out.
# @param command The command to run.
# @param argv A list of arguments.
# @param stdin The file descriptor to read stdin from.
# @param stderr The file descriptor to redirect stderr to.
# @param root The directory to chroot to before running command.
# @return The output of command from stdout.
def execWithCapture(command, argv, stdin = None, stderr = None, root='/'):
def chroot():
os.chroot(root)
def closefds ():
stdinclose()
stderrclose()
stdinclose = stderrclose = lambda : None
rc = ""
argv = list(argv)
if isinstance(stdin, str):
if os.access(stdin, os.R_OK):
stdin = os.open(stdin, os.O_RDONLY)
stdinclose = lambda : os.close(stdin)
else:
stdin = sys.stdin.fileno()
elif isinstance(stdin, int):
pass
elif stdin is None or not isinstance(stdin, file):
stdin = sys.stdin.fileno()
if isinstance(stderr, str):
stderr = os.open(stderr, os.O_RDWR|os.O_CREAT)
stderrclose = lambda : os.close(stderr)
elif isinstance(stderr, int):
pass
elif stderr is None or not isinstance(stderr, file):
stderr = sys.stderr.fileno()
program_log.info("Running... %s" % (" ".join([command] + argv),))
env = os.environ.copy()
env.update({"LC_ALL": "C"})
try:
proc = subprocess.Popen([command] + argv, stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=chroot, cwd=root,
env=env)
while True:
(outStr, errStr) = proc.communicate()
if outStr:
map(program_log.info, outStr.splitlines())
rc += outStr
if errStr:
map(program_log.error, errStr.splitlines())
os.write(stderr, errStr)
if proc.returncode is not None:
break
except OSError as e:
log.error ("Error running " + command + ": " + e.strerror)
closefds()
raise RuntimeError, "Error running " + command + ": " + e.strerror
closefds()
return rc
def execWithCallback(command, argv, stdin = None, stdout = None,
stderr = None, echo = True, callback = None,
callback_data = None, root = '/'):
def chroot():
os.chroot(root)
def closefds ():
stdinclose()
stdoutclose()
stderrclose()
stdinclose = stdoutclose = stderrclose = lambda : None
argv = list(argv)
if isinstance(stdin, str):
if os.access(stdin, os.R_OK):
stdin = os.open(stdin, os.O_RDONLY)
stdinclose = lambda : os.close(stdin)
else:
stdin = sys.stdin.fileno()
elif isinstance(stdin, int):
pass
elif stdin is None or not isinstance(stdin, file):
stdin = sys.stdin.fileno()
if isinstance(stdout, str):
stdout = os.open(stdout, os.O_RDWR|os.O_CREAT)
stdoutclose = lambda : os.close(stdout)
elif isinstance(stdout, int):
pass
elif stdout is None or not isinstance(stdout, file):
stdout = sys.stdout.fileno()
if isinstance(stderr, str):
stderr = os.open(stderr, os.O_RDWR|os.O_CREAT)
stderrclose = lambda : os.close(stderr)
elif isinstance(stderr, int):
pass
elif stderr is None or not isinstance(stderr, file):
stderr = sys.stderr.fileno()
program_log.info("Running... %s" % (" ".join([command] + argv),))
p = os.pipe()
p_stderr = os.pipe()
childpid = os.fork()
if not childpid:
os.close(p[0])
os.close(p_stderr[0])
os.dup2(p[1], 1)
os.dup2(p_stderr[1], 2)
os.dup2(stdin, 0)
os.close(stdin)
os.close(p[1])
os.close(p_stderr[1])
os.execvp(command, [command] + argv)
os._exit(1)
os.close(p[1])
os.close(p_stderr[1])
log_output = ''
while 1:
try:
s = os.read(p[0], 1)
except OSError as e:
if e.errno != 4:
map(program_log.info, log_output.splitlines())
raise IOError, e.args
if echo:
os.write(stdout, s)
log_output += s
if callback:
callback(s, callback_data=callback_data)
# break out early if the sub-process changes status.
# no need to flush the stream if the process has exited
try:
(pid, status) = os.waitpid(childpid,os.WNOHANG)
if pid != 0:
break
except OSError as e:
log.critical("exception from waitpid: %s %s" %(e.errno, e.strerror))
if len(s) < 1:
break
map(program_log.info, log_output.splitlines())
log_errors = ''
while 1:
try:
err = os.read(p_stderr[0], 128)
except OSError as e:
if e.errno != 4:
map(program_log.error, log_errors.splitlines())
raise IOError, e.args
break
log_errors += err
if len(err) < 1:
break
os.write(stderr, log_errors)
map(program_log.error, log_errors.splitlines())
os.close(p[0])
os.close(p_stderr[0])
try:
#if we didn't already get our child's exit status above, do so now.
if not pid:
(pid, status) = os.waitpid(childpid, 0)
except OSError as e:
log.critical("exception from waitpid: %s %s" %(e.errno, e.strerror))
closefds()
rc = 1
if os.WIFEXITED(status):
rc = os.WEXITSTATUS(status)
return ExecProduct(rc, log_output , log_errors)
def _pulseProgressCallback(data, callback_data=None):
if callback_data:
callback_data.pulse()
def execWithPulseProgress(command, argv, stdin = None, stdout = None,
stderr = None, echo = True, progress = None,
root = '/'):
return execWithCallback(command, argv, stdin=stdin, stdout=stdout,
stderr=stderr, echo=echo, callback=_pulseProgressCallback,
callback_data=progress, root=root)
## Run a shell.
def execConsole():
try:
proc = subprocess.Popen(["/bin/sh"])
proc.wait()
except OSError as e:
raise RuntimeError, "Error running /bin/sh: " + e.strerror
## Get the size of a directory and all its subdirectories.
# @param dir The name of the directory to find the size of.
# @return The size of the directory in kilobytes.
def getDirSize(dir):
def getSubdirSize(dir):
# returns size in bytes
mydev = os.lstat(dir)[stat.ST_DEV]
dsize = 0
for f in os.listdir(dir):
curpath = '%s/%s' % (dir, f)
sinfo = os.lstat(curpath)
if stat.S_ISDIR(sinfo[stat.ST_MODE]):
if mydev == sinfo[stat.ST_DEV]:
dsize += getSubdirSize(curpath)
elif stat.S_ISREG(sinfo[stat.ST_MODE]):
dsize += sinfo[stat.ST_SIZE]
else:
pass
return dsize
return getSubdirSize(dir)/1024
## Get the amount of RAM not used by /tmp.
# @return The amount of available memory in kilobytes.
def memAvailable():
tram = memInstalled()
ramused = getDirSize("/tmp")
return tram - ramused
## Get the amount of RAM installed in the machine.
# @return The amount of installed memory in kilobytes.
def memInstalled():
f = open("/proc/meminfo", "r")
lines = f.readlines()
f.close()
for l in lines:
if l.startswith("MemTotal:"):
fields = string.split(l)
mem = fields[1]
break
return int(mem)
## Suggest the size of the swap partition that will be created.
# @param quiet Should size information be logged?
# @return A tuple of the minimum and maximum swap size, in megabytes.
def swapSuggestion(quiet=0):
mem = memInstalled()/1024
mem = ((mem/16)+1)*16
if not quiet:
log.info("Detected %sM of memory", mem)
if mem <= 256:
minswap = 256
maxswap = 512
else:
if mem > 2048:
minswap = 1024
maxswap = 2048 + mem
else:
minswap = mem
maxswap = 2*mem
if isS390():
minswap = 1
if not quiet:
log.info("Swap attempt of %sM to %sM", minswap, maxswap)
return (minswap, maxswap)
## Create a directory path. Don't fail if the directory already exists.
# @param dir The directory path to create.
def mkdirChain(dir):
try:
os.makedirs(dir, 0755)
except OSError as e:
try:
if e.errno == EEXIST and stat.S_ISDIR(os.stat(dir).st_mode):
return
except:
pass
log.error("could not create directory %s: %s" % (dir, e.strerror))
## Get the total amount of swap memory.
# @return The total amount of swap memory in kilobytes, or 0 if unknown.
def swapAmount():
f = open("/proc/meminfo", "r")
lines = f.readlines()
f.close()
for l in lines:
if l.startswith("SwapTotal:"):
fields = string.split(l)
return int(fields[1])
return 0
## Copy a device node.
# Copies a device node by looking at the device type, major and minor device
# numbers, and doing a mknod on the new device name.
#
# @param src The name of the source device node.
# @param dest The name of the new device node to create.
def copyDeviceNode(src, dest):
filestat = os.lstat(src)
mode = filestat[stat.ST_MODE]
if stat.S_ISBLK(mode):
type = stat.S_IFBLK
elif stat.S_ISCHR(mode):
type = stat.S_IFCHR
else:
# XXX should we just fallback to copying normally?
raise RuntimeError, "Tried to copy %s which isn't a device node" % (src,)
os.mknod(dest, mode | type, filestat.st_rdev)
## Get the SPARC machine variety type.
# @return The SPARC machine type, or 0 if not SPARC.
def getSparcMachine():
if not isSparc():
return 0
machine = None
f = open('/proc/cpuinfo', 'r')
lines = f.readlines()
f.close()
for line in lines:
if line.find('type') != -1:
machine = line.split(':')[1].strip()
return machine
return None
## Get the PPC machine variety type.
# @return The PPC machine type, or 0 if not PPC.
def getPPCMachine():
if not isPPC():
return 0
ppcMachine = None
machine = None
platform = None
# ppc machine hash
ppcType = { 'Mac' : 'PMac',
'Book' : 'PMac',
'CHRP IBM' : 'pSeries',
'Pegasos' : 'Pegasos',
'Efika' : 'Efika',
'iSeries' : 'iSeries',
'pSeries' : 'pSeries',
'PReP' : 'PReP',
'CHRP' : 'pSeries',
'Amiga' : 'APUS',
'Gemini' : 'Gemini',
'Shiner' : 'ANS',
'BRIQ' : 'BRIQ',
'Teron' : 'Teron',
'AmigaOne' : 'Teron',
'Maple' : 'pSeries',
'Cell' : 'pSeries',
'Momentum' : 'pSeries',
'PS3' : 'PS3'
}
f = open('/proc/cpuinfo', 'r')
lines = f.readlines()
f.close()
for line in lines:
if line.find('machine') != -1:
machine = line.split(':')[1]
elif line.find('platform') != -1:
platform = line.split(':')[1]
for part in (machine, platform):
if ppcMachine is None and part is not None:
for type in ppcType.items():
if part.find(type[0]) != -1:
ppcMachine = type[1]
if ppcMachine is None:
log.warning("Unable to find PowerPC machine type")
elif ppcMachine == 0:
log.warning("Unknown PowerPC machine type: %s" %(ppcMachine,))
return ppcMachine
## Get the powermac machine ID.
# @return The powermac machine id, or 0 if not PPC.
def getPPCMacID():
machine = None
if not isPPC():
return 0
if getPPCMachine() != "PMac":
return 0
f = open('/proc/cpuinfo', 'r')
lines = f.readlines()
f.close()
for line in lines:
if line.find('machine') != -1:
machine = line.split(':')[1]
machine = machine.strip()
return machine
log.warning("No Power Mac machine id")
return 0
## Get the powermac generation.
# @return The powermac generation, or 0 if not powermac.
def getPPCMacGen():
# XXX: should NuBus be here?
pmacGen = ['OldWorld', 'NewWorld', 'NuBus']
if not isPPC():
return 0
if getPPCMachine() != "PMac":
return 0
f = open('/proc/cpuinfo', 'r')
lines = f.readlines()
f.close()
gen = None
for line in lines:
if line.find('pmac-generation') != -1:
gen = line.split(':')[1]
break
if gen is None:
log.warning("Unable to find pmac-generation")
for type in pmacGen:
if gen.find(type) != -1:
return type
log.warning("Unknown Power Mac generation: %s" %(gen,))
return 0
## Determine if the hardware is an iBook or PowerBook
# @return 1 if so, 0 otherwise.
def getPPCMacBook():
if not isPPC():
return 0
if getPPCMachine() != "PMac":
return 0
f = open('/proc/cpuinfo', 'r')
lines = f.readlines()
f.close()
for line in lines:
if not string.find(string.lower(line), 'book') == -1:
return 1
return 0
cell = None
## Determine if the hardware is the Cell platform.
# @return True if so, False otherwise.
def isCell():
global cell
if cell is not None:
return cell
cell = False
if not isPPC():
return cell
f = open('/proc/cpuinfo', 'r')
lines = f.readlines()
f.close()
for line in lines:
if not string.find(line, 'Cell') == -1:
cell = True
return cell
mactel = None
## Determine if the hardware is an Intel-based Apple Mac.
# @return True if so, False otherwise.
def isMactel():
global mactel
if mactel is not None:
return mactel
if not isX86():
mactel = False
elif not os.path.exists("/usr/sbin/dmidecode"):
mactel = False
else:
buf = execWithCapture("/usr/sbin/dmidecode",
["dmidecode", "-s", "system-manufacturer"])
if buf.lower().find("apple") != -1:
mactel = True
else:
mactel = False
return mactel
efi = None
## Determine if the hardware supports EFI.
# @return True if so, False otherwise.
def isEfi():
global efi
if efi is not None:
return efi
efi = False
# XXX need to make sure efivars is loaded...
if os.path.exists("/sys/firmware/efi"):
efi = True
return efi
# Architecture checking functions
def isX86(bits=None):
arch = os.uname()[4]
# x86 platforms include:
# i*86
# athlon*
# x86_64
# amd*
# ia32e
if bits is None:
if (arch.startswith('i') and arch.endswith('86')) or \
arch.startswith('athlon') or arch.startswith('amd') or \
arch == 'x86_64' or arch == 'ia32e':
return True
elif bits == 32:
if arch.startswith('i') and arch.endswith('86'):
return True
elif bits == 64:
if arch.startswith('athlon') or arch.startswith('amd') or \
arch == 'x86_64' or arch == 'ia32e':
return True
return False
def isPPC():
return os.uname()[4].startswith('ppc')
def isS390():
return os.uname()[4].startswith('s390')
def isIA64():
return os.uname()[4] == 'ia64'
def isAlpha():
return os.uname()[4].startswith('alpha')
def isSparc():
return os.uname()[4].startswith('sparc')
def getArch():
if isX86(bits=32):
return 'i386'
elif isX86(bits=64):
return 'x86_64'
elif isPPC():
return 'ppc'
elif isAlpha():
return 'alpha'
elif isSparc():
return 'sparc'
else:
return os.uname()[4]
def isConsoleOnVirtualTerminal():
# XXX PJFIX is there some way to ask the kernel this instead?
if isS390():
return False
return not flags.serial
def strip_markup(text):
if text.find("<") == -1:
return text
r = ""
inTag = False
for c in text:
if c == ">" and inTag:
inTag = False
continue
elif c == "<" and not inTag:
inTag = True
continue
elif not inTag:
r += c
return r.encode("utf-8")
def notify_kernel(path, action="change"):
""" Signal the kernel that the specified device has changed. """
log.debug("notifying kernel of '%s' event on device %s" % (action, path))
path = os.path.join(path, "uevent")
if not path.startswith("/sys/") or not os.access(path, os.W_OK):
log.debug("sysfs path '%s' invalid" % path)
raise ValueError("invalid sysfs path")
f = open(path, "a")
f.write("%s\n" % action)
f.close()
def get_sysfs_path_by_name(dev_node, class_name="block"):
""" Return sysfs path for a given device.
For a device node (e.g. /dev/vda2) get the respective sysfs path
(e.g. /sys/class/block/vda2). This also has to work for device nodes
that are in a subdirectory of /dev like '/dev/cciss/c0d0p1'.
"""
dev_name = os.path.basename(dev_node)
if dev_node.startswith("/dev/"):
dev_name = dev_node[5:].replace("/", "!")
sysfs_class_dir = "/sys/class/%s" % class_name
dev_path = os.path.join(sysfs_class_dir, dev_name)
if os.path.exists(dev_path):
return dev_path
else:
raise RuntimeError("get_sysfs_path_by_name: Could not find sysfs path "
"for '%s' (it is not at '%s')" % (dev_node, dev_path))
def numeric_type(num):
""" Verify that a value is given as a numeric data type.
Return the number if the type is sensible or raise ValueError
if not.
"""
if num is None:
num = 0
elif not (isinstance(num, int) or \
isinstance(num, long) or \
isinstance(num, float)):
raise ValueError("value (%s) must be either a number or None" % num)
return num
def reIPL(anaconda, loader_pid):
try:
ipldev = anaconda.id.bootloader.getDevice()
except:
message = _("Error determining boot device's disk name")
log.warning(message)
return message
try:
rc = execWithRedirect("chreipl", ["node", "/dev/" + ipldev],
stdout = "/dev/tty5",
stderr = "/dev/tty5")
except Exception, e:
log.info("Unable to set reIPL device to %s: %s",
ipldev, e.message)
if rc:
anaconda.canReIPL = False
devstring = None
for disk in anaconda.storage.disks:
if disk.name == ipldev:
devstring = disk.description
break
if devstring is None:
devstring = _("the device containing /boot")
message = _("After shutdown, please perform a manual IPL from %s "
"to continue installation." % devstring)
log.info("reIPL configuration failed => halt")
os.kill(os.getppid(), signal.SIGUSR1)
else:
anaconda.canReIPL = True
message = None
log.info("reIPL configuration successful => reboot")
os.kill(os.getppid(), signal.SIGUSR2)
return message
def resetRpmDb(rootdir):
for rpmfile in glob.glob("%s/var/lib/rpm/__db.*" % rootdir):
try:
os.unlink(rpmfile)
except Exception, e:
log.debug("error %s removing file: %s" %(e,rpmfile))
def parseNfsUrl(nfsurl):
options = ''
host = ''
path = ''
if nfsurl:
s = nfsurl.split(":")
s.pop(0)
if len(s) >= 3:
(options, host, path) = s[:3]
elif len(s) == 2:
(host, path) = s
else:
host = s[0]
return (options, host, path)
def insert_colons(a_string):
"""
Insert colon between every second character.
E.g. creates 'al:go:ri:th:ms' from 'algoritms'. Useful for formatting MAC
addresses and wwids for output.
"""
suffix = a_string[-2:]
if len(a_string) > 2:
return insert_colons(a_string[:-2]) + ':' + suffix
else:
return suffix
def add_po_path(module, dir):
""" Looks to see what translations are under a given path and tells
the gettext module to use that path as the base dir """
for d in os.listdir(dir):
if not os.path.isdir("%s/%s" %(dir,d)):
continue
if not os.path.exists("%s/%s/LC_MESSAGES" %(dir,d)):
continue
for basename in os.listdir("%s/%s/LC_MESSAGES" %(dir,d)):
if not basename.endswith(".mo"):
continue
log.info("setting %s as translation source for %s" %(dir, basename[:-3]))
module.bindtextdomain(basename[:-3], dir)
def setup_translations(module):
if os.path.isdir(TRANSLATIONS_UPDATE_DIR):
add_po_path(module, TRANSLATIONS_UPDATE_DIR)
module.textdomain("anaconda")
def get_sysfs_attr(path, attr):
if not attr:
log.debug("get_sysfs_attr() called with attr=None")
return None
attribute = "/sys%s/%s" % (path, attr)
attribute = os.path.realpath(attribute)
if not os.path.isfile(attribute) and not os.path.islink(attribute):
log.warning("%s is not a valid attribute" % (attr,))
return None
return open(attribute, "r").read().strip()
def find_program_in_path(prog, raise_on_error=False):
for d in os.environ["PATH"].split(os.pathsep):
full = os.path.join(d, prog)
if os.access(full, os.X_OK):
return full
if raise_on_error:
raise RuntimeError("Unable to locate a needed executable: '%s'" % prog)
return None
| masami256/Anaconda-for-ore-ore-kernel | pyanaconda/iutil.py | Python | gpl-2.0 | 27,310 |
from askbot.tests.cache_tests import *
from askbot.tests.email_alert_tests import *
from askbot.tests.on_screen_notification_tests import *
from askbot.tests.page_load_tests import *
from askbot.tests.permission_assertion_tests import *
from askbot.tests.db_api_tests import *
from askbot.tests.skin_tests import *
from askbot.tests.badge_tests import *
from askbot.tests.management_command_tests import *
from askbot.tests.search_state_tests import *
from askbot.tests.form_tests import *
from askbot.tests.follow_tests import *
from askbot.tests.markup_test import *
from askbot.tests.post_model_tests import *
from askbot.tests.thread_model_tests import *
from askbot.tests.reply_by_email_tests import *
from askbot.tests.haystack_search_tests import *
from askbot.tests.email_parsing_tests import *
from askbot.tests.widget_tests import *
from askbot.tests.category_tree_tests import CategoryTreeTests
from askbot.tests.question_views_tests import *
from askbot.tests.user_model_tests import UserModelTests
from askbot.tests.user_views_tests import *
from askbot.tests.utils_tests import *
from askbot.tests.view_context_tests import *
from askbot.tests.api_v1_tests import *
from askbot.tests.jive_tests import *
from askbot.tests.signal_handler_tests import *
| divio/askbot-devel | askbot/tests/__init__.py | Python | gpl-3.0 | 1,266 |
# fly ArduPlane QuadPlane in SITL
import util, pexpect, sys, time, math, shutil, os
from common import *
from pymavlink import mavutil
import random
# get location of scripts
testdir=os.path.dirname(os.path.realpath(__file__))
HOME_LOCATION='-27.274439,151.290064,343,8.7'
MISSION='ArduPlane-Missions/Dalby-OBC2016.txt'
FENCE='ArduPlane-Missions/Dalby-OBC2016-fence.txt'
WIND="0,180,0.2" # speed,direction,variance
homeloc = None
def fly_mission(mavproxy, mav, filename, fence, height_accuracy=-1):
'''fly a mission from a file'''
print("Flying mission %s" % filename)
mavproxy.send('wp load %s\n' % filename)
mavproxy.expect('Flight plan received')
mavproxy.send('fence load %s\n' % fence)
mavproxy.send('wp list\n')
mavproxy.expect('Requesting [0-9]+ waypoints')
mavproxy.send('mode AUTO\n')
wait_mode(mav, 'AUTO')
if not wait_waypoint(mav, 1, 9, max_dist=60, timeout=1200):
return False
mavproxy.expect('DISARMED')
# wait for blood sample here
mavproxy.send('wp set 10\n')
mavproxy.send('arm throttle\n')
mavproxy.expect('ARMED')
if not wait_waypoint(mav, 10, 18, max_dist=60, timeout=1200):
return False
mavproxy.expect('DISARMED')
print("Mission OK")
return True
def fly_QuadPlane(binary, viewerip=None, map=False, valgrind=False, gdb=False):
'''fly QuadPlane in SIL
you can pass viewerip as an IP address to optionally send fg and
mavproxy packets too for local viewing of the flight in real time
'''
global homeloc
options = '--sitl=127.0.0.1:5501 --out=127.0.0.1:19550 --streamrate=10'
if viewerip:
options += " --out=%s:14550" % viewerip
if map:
options += ' --map'
sil = util.start_SIL(binary, model='quadplane', wipe=True, home=HOME_LOCATION, speedup=10,
defaults_file=os.path.join(testdir, 'quadplane.parm'), valgrind=valgrind, gdb=gdb)
mavproxy = util.start_MAVProxy_SIL('QuadPlane', options=options)
mavproxy.expect('Telemetry log: (\S+)')
logfile = mavproxy.match.group(1)
print("LOGFILE %s" % logfile)
buildlog = util.reltopdir("../buildlogs/QuadPlane-test.tlog")
print("buildlog=%s" % buildlog)
if os.path.exists(buildlog):
os.unlink(buildlog)
try:
os.link(logfile, buildlog)
except Exception:
pass
util.expect_setup_callback(mavproxy, expect_callback)
mavproxy.expect('Received [0-9]+ parameters')
expect_list_clear()
expect_list_extend([sil, mavproxy])
print("Started simulator")
# get a mavlink connection going
try:
mav = mavutil.mavlink_connection('127.0.0.1:19550', robust_parsing=True)
except Exception, msg:
print("Failed to start mavlink connection on 127.0.0.1:19550" % msg)
raise
mav.message_hooks.append(message_hook)
mav.idle_hooks.append(idle_hook)
failed = False
e = 'None'
try:
print("Waiting for a heartbeat with mavlink protocol %s" % mav.WIRE_PROTOCOL_VERSION)
mav.wait_heartbeat()
print("Waiting for GPS fix")
mav.recv_match(condition='VFR_HUD.alt>10', blocking=True)
mav.wait_gps_fix()
while mav.location().alt < 10:
mav.wait_gps_fix()
homeloc = mav.location()
print("Home location: %s" % homeloc)
# wait for EKF to settle
wait_seconds(mav, 15)
mavproxy.send('arm throttle\n')
mavproxy.expect('ARMED')
if not fly_mission(mavproxy, mav,
os.path.join(testdir, "ArduPlane-Missions/Dalby-OBC2016.txt"),
os.path.join(testdir, "ArduPlane-Missions/Dalby-OBC2016-fence.txt")):
print("Failed mission")
failed = True
except pexpect.TIMEOUT, e:
print("Failed with timeout")
failed = True
mav.close()
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
valgrind_log = sil.valgrind_log_filepath()
if os.path.exists(valgrind_log):
os.chmod(valgrind_log, 0644)
shutil.copy(valgrind_log, util.reltopdir("../buildlogs/QuadPlane-valgrind.log"))
if failed:
print("FAILED: %s" % e)
return False
return True
| chapman/ardupilot | Tools/autotest/quadplane.py | Python | gpl-3.0 | 4,237 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import DAG
from airflow.providers.google.cloud.transfers import gcs_to_bigquery
default_args = {
"owner": "Google",
"depends_on_past": False,
"start_date": "2021-05-01",
}
with DAG(
dag_id="covid19_vaccination_access.vaccination_access_to_bq",
default_args=default_args,
max_active_runs=1,
schedule_interval="@daily",
catchup=False,
default_view="graph",
) as dag:
# Task to load CSV file from covid19-open-data bucket to facility_boundary_us_all
gcs_to_bq_table_us_all = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="gcs_to_bq_table_us_all",
bucket="{{ var.json.covid19_vaccination_access.source_bucket }}",
source_objects=[
"{{ var.json.covid19_vaccination_access.source_prefix }}/facility-boundary-us-all.csv"
],
source_format="CSV",
destination_project_dataset_table="covid19_vaccination_access.facility_boundary_us_all",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "facility_place_id",
"type": "STRING",
"mode": "REQUIRED",
"description": "The Google Place ID of the vaccination site. For example, ChIJV3woGFkSK4cRWP9s3-kIFGk.",
},
{
"name": "facility_provider_id",
"type": "STRING",
"mode": "NULLABLE",
"description": "An identifier imported from the provider of the vaccination site information. In the US, we use the ID provided by VaccineFinder when available. For example, 7ede5bd5-44da-4a59-b4d9-b3a49c53472c.",
},
{
"name": "facility_name",
"type": "STRING",
"mode": "NULLABLE",
"description": "The name of the vaccination site. For example, St. Joseph's Hospital.",
},
{
"name": "facility_latitude",
"type": "FLOAT",
"mode": "REQUIRED",
"description": "The latitude of the vaccination site. For example, 36.0507",
},
{
"name": "facility_longitude",
"type": "FLOAT",
"mode": "REQUIRED",
"description": "The longitude of the vaccination site. For example, 41.4356",
},
{
"name": "facility_country_region",
"type": "STRING",
"mode": "NULLABLE",
"description": "The name of the country or region in English. For example, United States.",
},
{
"name": "facility_country_region_code",
"type": "STRING",
"mode": "NULLABLE",
"description": "The ISO 3166-1 code for the country or region. For example, US.",
},
{
"name": "facility_sub_region_1",
"type": "STRING",
"mode": "NULLABLE",
"description": "The name of a region in the country. For example, California.",
},
{
"name": "facility_sub_region_1_code",
"type": "STRING",
"mode": "NULLABLE",
"description": "A country-specific ISO 3166-2 code for the region. For example, US-CA.",
},
{
"name": "facility_sub_region_2",
"type": "STRING",
"mode": "NULLABLE",
"description": "The name (or type) of a region in the country. Typically a subdivision of sub_region_1. For example, Santa Clara County or municipal_borough.",
},
{
"name": "facility_sub_region_2_code",
"type": "STRING",
"mode": "NULLABLE",
"description": "In the US, the FIPS code for a US county (or equivalent). For example, 06085.",
},
{
"name": "facility_region_place_id",
"type": "STRING",
"mode": "NULLABLE",
"description": "The Google place ID for the most-specific region, used in Google Places API and on Google Maps. For example, ChIJd_Y0eVIvkIARuQyDN0F1LBA.",
},
{
"name": "mode_of_transportation",
"type": "STRING",
"mode": "NULLABLE",
"description": "The mode of transport used to calculate the catchment boundary. For example, driving.",
},
{
"name": "travel_time_threshold_minutes",
"type": "INTEGER",
"mode": "NULLABLE",
"description": "The maximum travel time, in minutes, used to calculate the catchment boundary. For example, 30.",
},
{
"name": "facility_catchment_boundary",
"type": "GEOGRAPHY",
"mode": "NULLABLE",
"description": "A GeoJSON representation of the catchment area boundary of the site, for a particular mode of transportation and travel time threshold. Consists of multiple latitude and longitude points.",
},
],
)
# Task to load CSV file from covid19-open-data bucket to facility_boundary_us_drive
gcs_to_bq_table_us_drive = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="gcs_to_bq_table_us_drive",
bucket="{{ var.json.covid19_vaccination_access.source_bucket }}",
source_objects=[
"{{ var.json.covid19_vaccination_access.source_prefix }}/facility-boundary-us-drive.csv"
],
source_format="CSV",
destination_project_dataset_table="covid19_vaccination_access.facility_boundary_us_drive",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "facility_place_id",
"type": "STRING",
"mode": "REQUIRED",
"description": "The Google Place ID of the vaccination site. For example, ChIJV3woGFkSK4cRWP9s3-kIFGk.",
},
{
"name": "facility_provider_id",
"type": "STRING",
"mode": "NULLABLE",
"description": "An identifier imported from the provider of the vaccination site information. In the US, we use the ID provided by VaccineFinder when available. For example, 7ede5bd5-44da-4a59-b4d9-b3a49c53472c.",
},
{
"name": "facility_name",
"type": "STRING",
"mode": "NULLABLE",
"description": "The name of the vaccination site. For example, St. Joseph's Hospital.",
},
{
"name": "facility_latitude",
"type": "FLOAT",
"mode": "REQUIRED",
"description": "The latitude of the vaccination site. For example, 36.0507",
},
{
"name": "facility_longitude",
"type": "FLOAT",
"mode": "REQUIRED",
"description": "The longitude of the vaccination site. For example, 41.4356",
},
{
"name": "facility_country_region",
"type": "STRING",
"mode": "NULLABLE",
"description": "The name of the country or region in English. For example, United States.",
},
{
"name": "facility_country_region_code",
"type": "STRING",
"mode": "NULLABLE",
"description": "The ISO 3166-1 code for the country or region. For example, US.",
},
{
"name": "facility_sub_region_1",
"type": "STRING",
"mode": "NULLABLE",
"description": "The name of a region in the country. For example, California.",
},
{
"name": "facility_sub_region_1_code",
"type": "STRING",
"mode": "NULLABLE",
"description": "A country-specific ISO 3166-2 code for the region. For example, US-CA.",
},
{
"name": "facility_sub_region_2",
"type": "STRING",
"mode": "NULLABLE",
"description": "The name (or type) of a region in the country. Typically a subdivision of sub_region_1. For example, Santa Clara County or municipal_borough.",
},
{
"name": "facility_sub_region_2_code",
"type": "STRING",
"mode": "NULLABLE",
"description": "In the US, the FIPS code for a US county (or equivalent). For example, 06085.",
},
{
"name": "facility_region_place_id",
"type": "STRING",
"mode": "NULLABLE",
"description": "The Google place ID for the most-specific region, used in Google Places API and on Google Maps. For example, ChIJd_Y0eVIvkIARuQyDN0F1LBA.",
},
{
"name": "mode_of_transportation",
"type": "STRING",
"mode": "NULLABLE",
"description": "The mode of transport used to calculate the catchment boundary. For example, driving.",
},
{
"name": "travel_time_threshold_minutes",
"type": "INTEGER",
"mode": "NULLABLE",
"description": "The maximum travel time, in minutes, used to calculate the catchment boundary. For example, 30.",
},
{
"name": "facility_catchment_boundary",
"type": "GEOGRAPHY",
"mode": "NULLABLE",
"description": "A GeoJSON representation of the catchment area boundary of the site, for a particular mode of transportation and travel time threshold. Consists of multiple latitude and longitude points.",
},
],
)
# Task to load CSV file from covid19-open-data bucket to facility_boundary_us_transit
gcs_to_bq_table_us_transit = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="gcs_to_bq_table_us_transit",
bucket="{{ var.json.covid19_vaccination_access.source_bucket }}",
source_objects=[
"{{ var.json.covid19_vaccination_access.source_prefix }}/facility-boundary-us-transit.csv"
],
source_format="CSV",
destination_project_dataset_table="covid19_vaccination_access.facility_boundary_us_transit",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "facility_place_id",
"type": "STRING",
"mode": "REQUIRED",
"description": "The Google Place ID of the vaccination site. For example, ChIJV3woGFkSK4cRWP9s3-kIFGk.",
},
{
"name": "facility_provider_id",
"type": "STRING",
"mode": "NULLABLE",
"description": "An identifier imported from the provider of the vaccination site information. In the US, we use the ID provided by VaccineFinder when available. For example, 7ede5bd5-44da-4a59-b4d9-b3a49c53472c.",
},
{
"name": "facility_name",
"type": "STRING",
"mode": "NULLABLE",
"description": "The name of the vaccination site. For example, St. Joseph's Hospital.",
},
{
"name": "facility_latitude",
"type": "FLOAT",
"mode": "REQUIRED",
"description": "The latitude of the vaccination site. For example, 36.0507",
},
{
"name": "facility_longitude",
"type": "FLOAT",
"mode": "REQUIRED",
"description": "The longitude of the vaccination site. For example, 41.4356",
},
{
"name": "facility_country_region",
"type": "STRING",
"mode": "NULLABLE",
"description": "The name of the country or region in English. For example, United States.",
},
{
"name": "facility_country_region_code",
"type": "STRING",
"mode": "NULLABLE",
"description": "The ISO 3166-1 code for the country or region. For example, US.",
},
{
"name": "facility_sub_region_1",
"type": "STRING",
"mode": "NULLABLE",
"description": "The name of a region in the country. For example, California.",
},
{
"name": "facility_sub_region_1_code",
"type": "STRING",
"mode": "NULLABLE",
"description": "A country-specific ISO 3166-2 code for the region. For example, US-CA.",
},
{
"name": "facility_sub_region_2",
"type": "STRING",
"mode": "NULLABLE",
"description": "The name (or type) of a region in the country. Typically a subdivision of sub_region_1. For example, Santa Clara County or municipal_borough.",
},
{
"name": "facility_sub_region_2_code",
"type": "STRING",
"mode": "NULLABLE",
"description": "In the US, the FIPS code for a US county (or equivalent). For example, 06085.",
},
{
"name": "facility_region_place_id",
"type": "STRING",
"mode": "NULLABLE",
"description": "The Google place ID for the most-specific region, used in Google Places API and on Google Maps. For example, ChIJd_Y0eVIvkIARuQyDN0F1LBA.",
},
{
"name": "mode_of_transportation",
"type": "STRING",
"mode": "NULLABLE",
"description": "The mode of transport used to calculate the catchment boundary. For example, driving.",
},
{
"name": "travel_time_threshold_minutes",
"type": "INTEGER",
"mode": "NULLABLE",
"description": "The maximum travel time, in minutes, used to calculate the catchment boundary. For example, 30.",
},
{
"name": "facility_catchment_boundary",
"type": "GEOGRAPHY",
"mode": "NULLABLE",
"description": "A GeoJSON representation of the catchment area boundary of the site, for a particular mode of transportation and travel time threshold. Consists of multiple latitude and longitude points.",
},
],
)
# Task to load CSV file from covid19-open-data bucket to facility_boundary_us_walk
gcs_to_bq_table_us_walk = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="gcs_to_bq_table_us_walk",
bucket="{{ var.json.covid19_vaccination_access.source_bucket }}",
source_objects=[
"{{ var.json.covid19_vaccination_access.source_prefix }}/facility-boundary-us-walk.csv"
],
source_format="CSV",
destination_project_dataset_table="covid19_vaccination_access.facility_boundary_us_walk",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "facility_place_id",
"type": "STRING",
"mode": "REQUIRED",
"description": "The Google Place ID of the vaccination site. For example, ChIJV3woGFkSK4cRWP9s3-kIFGk.",
},
{
"name": "facility_provider_id",
"type": "STRING",
"mode": "NULLABLE",
"description": "An identifier imported from the provider of the vaccination site information. In the US, we use the ID provided by VaccineFinder when available. For example, 7ede5bd5-44da-4a59-b4d9-b3a49c53472c.",
},
{
"name": "facility_name",
"type": "STRING",
"mode": "NULLABLE",
"description": "The name of the vaccination site. For example, St. Joseph's Hospital.",
},
{
"name": "facility_latitude",
"type": "FLOAT",
"mode": "REQUIRED",
"description": "The latitude of the vaccination site. For example, 36.0507",
},
{
"name": "facility_longitude",
"type": "FLOAT",
"mode": "REQUIRED",
"description": "The longitude of the vaccination site. For example, 41.4356",
},
{
"name": "facility_country_region",
"type": "STRING",
"mode": "NULLABLE",
"description": "The name of the country or region in English. For example, United States.",
},
{
"name": "facility_country_region_code",
"type": "STRING",
"mode": "NULLABLE",
"description": "The ISO 3166-1 code for the country or region. For example, US.",
},
{
"name": "facility_sub_region_1",
"type": "STRING",
"mode": "NULLABLE",
"description": "The name of a region in the country. For example, California.",
},
{
"name": "facility_sub_region_1_code",
"type": "STRING",
"mode": "NULLABLE",
"description": "A country-specific ISO 3166-2 code for the region. For example, US-CA.",
},
{
"name": "facility_sub_region_2",
"type": "STRING",
"mode": "NULLABLE",
"description": "The name (or type) of a region in the country. Typically a subdivision of sub_region_1. For example, Santa Clara County or municipal_borough.",
},
{
"name": "facility_sub_region_2_code",
"type": "STRING",
"mode": "NULLABLE",
"description": "In the US, the FIPS code for a US county (or equivalent). For example, 06085.",
},
{
"name": "facility_region_place_id",
"type": "STRING",
"mode": "NULLABLE",
"description": "The Google place ID for the most-specific region, used in Google Places API and on Google Maps. For example, ChIJd_Y0eVIvkIARuQyDN0F1LBA.",
},
{
"name": "mode_of_transportation",
"type": "STRING",
"mode": "NULLABLE",
"description": "The mode of transport used to calculate the catchment boundary. For example, driving.",
},
{
"name": "travel_time_threshold_minutes",
"type": "INTEGER",
"mode": "NULLABLE",
"description": "The maximum travel time, in minutes, used to calculate the catchment boundary. For example, 30.",
},
{
"name": "facility_catchment_boundary",
"type": "GEOGRAPHY",
"mode": "NULLABLE",
"description": "A GeoJSON representation of the catchment area boundary of the site, for a particular mode of transportation and travel time threshold. Consists of multiple latitude and longitude points.",
},
],
)
gcs_to_bq_table_us_all
gcs_to_bq_table_us_drive
gcs_to_bq_table_us_transit
gcs_to_bq_table_us_walk
| GoogleCloudPlatform/public-datasets-pipelines | datasets/covid19_vaccination_access/pipelines/vaccination_access_to_bq/vaccination_access_to_bq_dag.py | Python | apache-2.0 | 20,609 |
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Edit dialog to change authentification information such as url, username, password, ldap support
"""
from PyQt4 import QtGui
from datafinder.gui.gen.user.authentification_edit_wizard_ui import Ui_editAuth
from datafinder.gui.user.dialogs.authentification_dialog.auth_pref_dialog import AuthPrefDialogView
__version__ = "$Revision-Id:$"
class AuthEditDialogView(QtGui.QDialog, Ui_editAuth):
"""
This dialog provides an interface to change credentials that belong to a specified connection
"""
def __init__(self, parent=None, preferences=None, currentUri = None):
"""
Constructor.
@param parent: Parent window of this L{QtGui.QDialog}
@type parent: C{QtGui.QWidget}
@param preferences: The preferences object.
@type preferences: L{PreferencesHandler<datafinder.core.configuration.preferences.PreferencesHandler>}
"""
QtGui.QDialog.__init__(self, parent)
Ui_editAuth.__init__(self)
self.setupUi(self)
self._preferences = preferences
self.savePasswordFlag = False
self._urlChangedSlot(currentUri)
def _urlChangedSlot(self, newUri):
""" Implementing changing of connection URI. """
self.currenturi = unicode(newUri)
connection = self._preferences.getConnection(self.currenturi)
if not connection is None:
self.username = connection.username
self.authentification = connection.password
self.savePasswordFlag = not connection.password is None
def _getUrl(self):
"""
Returns the entered url.
@return: The url that was entered in the combobox.
@rtype: C{string}
"""
return unicode(self.serverLineEdit.text())
def _setUrl(self, urls):
"""
Appends urls to the L{QtGui.QComboBox} widget.
@param urls: A list of urls that has to be added.
@type urls: C{list}
"""
self.serverLineEdit.setText(urls)
def _getUsername(self):
"""
Returns the username that was entered by the user.
@return: The username that was entered.
@rtype: C{string}
"""
return unicode(self.usernameLineEdit.text())
def _setUsername(self, username):
"""
Set a string that in the username field.
@param username: The username that has to be in the username field.
@type username: C{string}
"""
self.usernameLineEdit.setText(username or "")
def _getAuthentification(self):
"""
Returns the authentification information from the password/certificate location field.
@return: Returns the authentification information in the password field.
@rtype: C{string}
"""
return unicode(self.passwordLineEdit.text())
def _setAuthentification(self, authentification):
"""
Sets the password/credentials link in the credentials field.
@param authentification: The credential information that has to be in the password/certificate location field.
@type authentification: C{string}
"""
self.authLineEdit.setText(authentification or "")
def _getAuthMechanism(self):
"""
Returns the authentification mechanism from the authentification mechanism field.
@return: Returns the authentificationMechanism in the authentification mechanism field.
@rtype: C{string}
"""
return unicode(self.authMechanismCombo.text())
def _setAuthMechanism (self, authMechanism):
"""
Sets the authentification mechanism from the authentification mechanism field.
@param: Integer to the password field.
@rtype: C{string}
"""
self.authMechanismCombo.setCurrentIndex(authMechanism)
def _getComment(self):
"""
Returns the comment from the comment field.
@return: Returns the comment in the comment field.
@rtype: C{string}
"""
return unicode(self.commentPlainText.text())
def _setComment(self, comment):
"""
Sets the comment from the comment field.
@param: Sets the comment in the comment field.
@rtype: C{string}
"""
self.commentPlainText.setPlainText(comment)
currenturi = property(_getUrl, _setUrl)
username = property(_getUsername, _setUsername)
authentification = property(_getAuthentification, _setAuthentification)
authMechanism = property (_getAuthMechanism, _setAuthMechanism)
comment = property (_getComment, _setComment )
def _preferencesActionSlot(self):
""" Shows the preferences dialog for connection settings. """
preferencesDialog = AuthPrefDialogView(self)
preferencesDialog.useLdap = self._preferences.useLdap
preferencesDialog.ldapBaseDn = self._preferences.ldapBaseDn
preferencesDialog.ldapServerUri = self._preferences.ldapServerUri
if preferencesDialog.exec_() == QtGui.QDialog.Accepted:
self._preferences.useLdap = preferencesDialog.useLdap
self._preferences.ldapBaseDn = preferencesDialog.ldapBaseDn
self._preferences.ldapServerUri = preferencesDialog.ldapServerUri
| DLR-SC/DataFinder | src/datafinder/gui/user/dialogs/authentification_dialog/auth_edit_dialog.py | Python | bsd-3-clause | 7,236 |
class Solution(object):
def isPalindrome(self, s):
s = s.lower()
start, end = 0, len(s) - 1
while start < end:
if not s[start].isalnum():
start += 1
elif not s[end].isalnum():
end -= 1
else:
if s[start] == s[end]:
start += 1
end -= 1
else:
return False
return True
| luosch/leetcode | python/Valid Palindrome.py | Python | mit | 461 |
from dimensioning import *
from dimensioning import __dir__ # not imported with * directive
import dimensioning
class PreviewVars:
def __init__(self):
self.SVG_initialization_width = -1
self.SVG_initialization_height = -1
def setTransform(self,drawingVars):
self.x_offset = drawingVars.VRT_ox
self.y_offset = drawingVars.VRT_oy
self.scale = drawingVars.VRT_scale
def applyTransform(self, pos ):
x_new = ( pos.x() - self.x_offset )/ self.scale
y_new = ( pos.y() - self.y_offset )/ self.scale
return x_new, y_new
preview = PreviewVars()
def initializePreview( dimensioningProcessTracker, dimensionSvgFun, dimensionClickHandler ):
drawingVars = dimensioningProcessTracker.drawingVars #shorthand
preview.dimensioningProcessTracker = dimensioningProcessTracker
preview.drawingVars = drawingVars
preview.setTransform(drawingVars)
if not hasattr(preview, 'SVG'):
createQtItems = True
elif not preview.removedQtItems:
debugPrint(3, 'initializePreview: flag indicating preview QtItems not removed from scene,')
#there are two possible options here
case_msgs = [
'FreeCAD.ActivieDocument.recompute() has been called without clean up, therefore Qt items would have been deleted'
'dimensioningPreview interrupted by user selecting another dimensioning tool'
]
case = 0
for c in drawingVars.graphicsScene.children():
if isinstance(c,DimensionPreviewRect):
case = 1
debugPrint(3, 'initializePreview: case %s' % case_msgs[case])
if case == 0:
createQtItems = True
else:
removePreviewGraphicItems( recomputeActiveDocument=False)
createQtItems = False
else:
createQtItems = False
if preview.SVG_initialization_width <> drawingVars.width or preview.SVG_initialization_height <> drawingVars.height:
debugPrint(3, 'initializePreview: change in page rect size dected, recreating SVG graphics item')
createQtItems = True
if createQtItems:
# then initialize graphicsScene Objects, otherwise dont recreate objects.
# initializing dimPreview is particularly troublesome, rather unstable and occasionally causes FreeCAD to crash.
debugPrint(3, 'creating dimPreview QtGraphicsItems')
preview.rect = DimensionPreviewRect()
preview.SVG = QtSvg.QGraphicsSvgItem()
debugPrint(3, 'creating dimPreview SVG renderer')
preview.SVGRenderer = QtSvg.QSvgRenderer()
preview.SVGRenderer.load( QtCore.QByteArray( '''<svg width="%i" height="%i"> </svg>''' % (drawingVars.width, drawingVars.height) ) ) #without this something goes wrong...
preview.SVG_initialization_width = drawingVars.width
preview.SVG_initialization_height = drawingVars.height
preview.SVG.setSharedRenderer( preview.SVGRenderer )
preview.SVG.setTransform( drawingVars.transform )
preview.removedQtItems = False
debugPrint(4, 'adding SVG')
preview.SVGRenderer.load( QtCore.QByteArray( '''<svg width="%i" height="%i"> </svg>''' % (drawingVars.width, drawingVars.height) ) )
preview.SVG.update()
#preview.SVG.
drawingVars.graphicsScene.addItem( preview.SVG )
debugPrint(4, 'adding Rect')
preview.rect.setRect(0, 0, drawingVars.width, drawingVars.height)
preview.rect.dimensionSvgFun = dimensionSvgFun
preview.rect.dimensionClickHandler = dimensionClickHandler
preview.rect.setAcceptHoverEvents(True)
preview.rect.setFlag( QtGui.QGraphicsItem.GraphicsItemFlag.ItemIsFocusable, True )
preview.rect.setCursor( QtCore.Qt.ArrowCursor ) # http://qt-project.org/doc/qt-5/qt.html#CursorShape-enum
preview.rect.setZValue( 0.1 )
drawingVars.graphicsScene.addItem( preview.rect )
debugPrint(4, 'DimensionPreviewSvgGraphicsItem added to graphics Scene')
def removePreviewGraphicItems( recomputeActiveDocument = True, launchEndFunction=False, closeDialog=True ):
debugPrint(4,'removePreviewGraphicItems called, recomputeActiveDocument %s' % recomputeActiveDocument)
preview.drawingVars.graphicsScene.removeItem( preview.SVG )
preview.drawingVars.graphicsScene.removeItem( preview.rect )
preview.removedQtItems = True
if recomputeActiveDocument:
debugPrint(3,'removePreviewGraphicItems: recomputing')
recomputeWithOutViewReset( preview.drawingVars )
if closeDialog and preview.dimensioningProcessTracker.taskDialog <> None:
FreeCADGui.Control.closeDialog()
del preview.drawingVars
if launchEndFunction and preview.dimensioningProcessTracker.endFunction <> None:
timer.start( 1 ) # 1 ms (in theory)
def executeEndFunction():
'if problems try increasing time tick...'
timer.stop()
preview.dimensioningProcessTracker.endFunction()
timer = QtCore.QTimer()
timer.timeout.connect(executeEndFunction)
class DimensionPreviewRect(QtGui.QGraphicsRectItem):
def keyPressEvent(self, event):
#if len(event.text()) == 1:
# debugPrint(2, 'key pressed: event.text %s (ord %i)' % (event.text(), ord(event.text())))
if event.text() == chr(27): #escape key
removePreviewGraphicItems( recomputeActiveDocument = True )
def mousePressEvent( self, event ):
try:
if event.button() == QtCore.Qt.MouseButton.LeftButton:
x, y = preview.applyTransform( event.scenePos() )
debugPrint(3, 'mousePressEvent: x %f, y %f' % (x, y) )
instruction = self.dimensionClickHandler(x,y)
if instruction == None:
pass
elif instruction.startswith('createDimension:'):
viewName = instruction.split(':')[1]
FreeCAD.ActiveDocument.openTransaction(viewName)
XML = self.dimensionSvgFun( x, y )
debugPrint(3, XML)
debugPrint(2, 'creating dimension %s' % viewName)
obj = App.ActiveDocument.addObject('Drawing::FeatureView',viewName)
obj.ViewResult = XML
for prop in ['Rotation', 'Scale', 'ViewResult', 'X', 'Y']:
obj.setEditorMode(prop, 2)
preview.drawingVars.page.addObject( obj ) #App.ActiveDocument.getObject(viewName) )
removePreviewGraphicItems( recomputeActiveDocument=True, launchEndFunction=True )
FreeCAD.ActiveDocument.commitTransaction()
elif instruction == 'stopPreview':
removePreviewGraphicItems( recomputeActiveDocument=True, launchEndFunction=True )
else:
event.ignore()
except:
App.Console.PrintError(traceback.format_exc())
def hoverMoveEvent(self, event):
try:
x, y = preview.applyTransform( event.scenePos() )
debugPrint(4, 'hoverMoveEvent: x %f, y %f' % (x, y) )
XML = '<svg width="%i" height="%i"> %s </svg>' % (preview.drawingVars.width, preview.drawingVars.height, self.dimensionSvgFun( x, y ))
if isinstance(XML, unicode):
XML = XML.encode('utf8')
debugPrint(5, XML)
preview.SVGRenderer.load( QtCore.QByteArray( XML ) )
preview.SVG.update()
except:
App.Console.PrintError(traceback.format_exc())
| ulikoehler/FreeCAD_drawing_dimensioning | previewDimension.py | Python | gpl-3.0 | 7,480 |
from unittest import TestCase
from unittest.mock import Mock
from grortir.main.optimizers.grouping_strategy import GroupingStrategy
class TestGroupingStrategy(TestCase):
def test_get_items_from_group(self):
grouping_strategy = Mock()
grouping_strategy.get_actual_numbers_of_groups.return_value = 3
with self.assertRaises(ValueError):
GroupingStrategy.get_items_from_group(grouping_strategy, 4)
| qbahn/grortir | grortir/test/optimizers/test_groupingStrategy.py | Python | mit | 437 |
#!python
#!/usr/bin/env python
#
# Script to export a Cubit13+/Trelis 2D mesh in specfem2d format for the elements QUAD9
# pour creer des fichiers correspondant a Specfem2d a partir du maillage de Cubit
# Initial author unknown, comments and modifications by Alexis Bottero (alexis dot bottero At gmail dot com) et Ting YU, CNRS and EDF, France, December 2015
# From Ting Yu: type d'elements : QUAD9 pour les elements surface et BAR3 pour les bords (bottom, right, top, left)
# (si j'ai bien compris ton dernier mail, on a besoin seulement les deux bouts du edge pour
# les conditions aux limites des bords (absorbants, libres). Je suis d'accord, mais pour que tous les elements
# de surface dans le maillage ont 9 noeuds, je prefere changer le type des bords a BAR3, qui donne la
# simplification de modifier le script.)
#
# Create your mesh in Cubit (or build the MeshSansCPMLquad9.py example) and play this script within Cubit as a Python journal file.
# Instructions for mesh creation :
# _The mesh must be in XZ plane!
# _One block per material :
# cubit.cmd('block 1 name "Acoustic channel" ') # acoustic material region
# cubit.cmd('block 1 attribute count 6') # number of attributes
# cubit.cmd('block 1 attribute index 1 1') # material index
# cubit.cmd('block 1 attribute index 2 1500 ') # vp
# cubit.cmd('block 1 attribute index 3 0 ') # vs
# cubit.cmd('block 1 attribute index 4 1000 ') # rho
# cubit.cmd('block 1 attribute index 5 0 ') # Q_flag
# cubit.cmd('block 1 attribute index 6 0 ') # anisotropy_flag
# cubit.cmd('block 1 element type QUAD9')
#
# _One block per border (abs_bottom, abs_right, abs_left, abs_top, topo, axis). If axisymmetric simulation don't create a block
# abs_left but a block axis.
# Ex:
# cubit.cmd('block 3 edge in surf all with z_coord > -0.1') # topo
# cubit.cmd('block 3 name "topo"')
#
#_ One block per pml layer of a given type (acoustic or elastic) : pml_x_acoust,pml_z_acoust,pml_xz_acoust,pml_x_elast,pml_z_elast,pml_xz_elast
# !! Warning !! pml blocks don't have faces in common
# !! Warning !! you must create the corresponding absorbing surface blocks (abs_bottom, abs_right, abs_left, abs_top)!
#
# Ideas to improve that script (ctrl+f for TODO also): _Allow 2D models built in XY and ZY planes
#
# The names of the block and the entities types must match the ones given during the definition of the class mesh on this file :
# Below :
# class mesh(object,mesh_tools):
# """ A class to store the mesh """
# def __init__(self):
#
#!! Warning : a block in cubit != quad !! A block is a group of something (quads, edges, volumes, surfaces...)
# On this case the blocks are used to gather faces corresponding to different materials and edges corresponding to free surfaces,
# absorbing surfaces, topography or axis
import cubit
class mtools(object):
"""docstring for mtools"""
def __init__(self,frequency,list_surf,list_vp):
super(mtools, self).__init__()
self.frequency = frequency
self.list_surf = list_surf
self.list_vp = list_vp
self.ngll = 5
self.percent_gll = 0.172
self.point_wavelength = 5
def __repr__(self):
txt = 'Meshing for frequency up to '+str(self.frequency)+'Hz\n'
for surf,vp in zip(self.list_surf,self.list_vp):
txt = txt+'surface '+str(surf)+', vp ='+str(vp)+' -> size '+str(self.freq2meshsize(vp)[0])+' -> dt '+str(self.freq2meshsize(vp)[0])+'\n'
return txt
def freq2meshsize(self,vp):
velocity = vp*.5
self.size = (1/2.5)*velocity/self.frequency*(self.ngll-1)/self.point_wavelength
self.dt = .4*self.size/vp*self.percent_gll
return self.size,self.dt
def mesh_it(self):
for surf,vp in zip(self.list_surf,self.list_vp):
command = "surface "+str(surf)+" size "+str(self.freq2meshsize(vp)[0])
cubit.cmd(command)
command = "surface "+str(surf)+ 'scheme pave'
cubit.cmd(command)
command = "mesh surf "+str(surf)
cubit.cmd(command)
class block_tools:
def __int__(self):
pass
def create_blocks(self,mesh_entity,list_entity = None,):
if mesh_entity =='surface':
txt = ' face in surface '
elif mesh_entity == 'curve':
txt = ' edge in curve '
elif mesh_entity == 'group':
txt = ' face in group '
if list_entity:
if not isinstance(list_entity,list):
list_entity = [list_entity]
for entity in list_entity:
iblock = cubit.get_next_block_id()
command = "block "+str(iblock)+ txt +str(entity)
cubit.cmd(command)
def material_file(self,filename):
matfile = open(filename,'w')
material = []
for record in matfile:
mat_name,vp_str = record.split()
vp = float(vp_str)
material.append([mat_name,vp])
self.material = dict(material)
def assign_block_material(self,id_block,mat_name,vp = None):
try:
material = self.material
except:
material = None
cubit.cmd('block '+str(id_block)+' attribute count 2')
cubit.cmd('block '+str(id_block)+' attribute index 1 '+str(id_block))
if material:
if material.has_key(mat_name):
cubit.cmd('block '+str(id_block)+' attribute index 2 '+str(material[mat_name]))
print 'block '+str(id_block)+' - material '+mat_name+' - vp '+str(material[mat_name])+' from database'
elif vp:
cubit.cmd('block '+str(id_block)+' attribute index 2 '+str(vp))
print 'block '+str(id_block)+' - material '+mat_name+' - vp '+str(vp)
else:
print 'assignment impossible: check if '+mat_name+' is in the database or specify vp'
class mesh_tools(block_tools):
"""Tools for the mesh
#########
dt,edge_dt,freq,edge_freq = seismic_resolution(edges,velocity,bins_d = None,bins_u = None,sidelist = None,ngll = 5,np = 8)
Given the velocity of a list of edges, seismic_resolution provides the minimum Dt required for the stability condition (and the corrisponding edge).
Furthermore, given the number of gll point in the element (ngll) and the number of GLL point for wavelength, it provide the maximum resolved frequency.
#########
length = edge_length(edge)
return the length of a edge
#########
edge_min,length = edge_min_length(surface)
given the cubit id of a surface, it return the edge with minimun length
#########
"""
def __int__(self):
pass
def seismic_resolution(self,edges,velocity,bins_d = None,bins_u = None,sidelist = None):
"""
dt,edge_dt,freq,edge_freq = seismic_resolution(edges,velocity,bins_d = None,bins_u = None,sidelist = None,ngll = 5,np = 8)
Given the velocity of a list of edges, seismic_resolution provides the minimum Dt required for the stability condition (and the corrisponding edge).
Furthermore, given the number of gll point in the element (ngll) and the number of GLL point for wavelength, it provide the maximum resolved frequency.
"""
ratiostore = 1e10
dtstore = 1e10
edgedtstore = -1
edgeratiostore = -1
for edge in edges:
d = self.edge_length(edge)
ratio = (1/2.5)*velocity/d*(self.ngll-1)/self.point_wavelength
dt = .4*d/velocity*self.percent_gll
if dt<dtstore:
dtstore = dt
edgedtstore = edge
if ratio < ratiostore:
ratiostore = ratio
edgeratiostore = edge
try:
for bin_d,bin_u,side in zip(bins_d,bins_u,sidelist):
if ratio >= bin_d and ratio < bin_u:
command = "sideset "+str(side)+" edge "+str(edge)
cubit.cmd(command)
#print command
break
except:
pass
return dtstore,edgedtstore,ratiostore,edgeratiostore
def edge_length(self,edge):
"""
length = edge_length(edge)
return the length of a edge
"""
from math import sqrt
nodes = cubit.get_connectivity('edge',edge)
x0,y0,z0 = cubit.get_nodal_coordinates(nodes[0])
x1,y1,z1 = cubit.get_nodal_coordinates(nodes[1])
d = sqrt((x1-x0)**2+(y1-y0)**2+(z1-z0)**2)
return d
def edge_min_length(self,surface):
"""
edge_min,length = edge_min_length(surface)
given the cubit id of a surface, it return the edge with minimun length
"""
from math import sqrt
self.dmin = 99999
edge_store = 0
command = "group 'list_edge' add edge in surf "+str(surface)
command = command.replace("["," ").replace("]"," ")
#print command
cubit.cmd(command)
group = cubit.get_id_from_name("list_edge")
edges = cubit.get_group_edges(group)
command = "delete group "+ str(group)
cubit.cmd(command)
for edge in edges:
d = self.edge_length(edge)
if d<dmin:
self.dmin = d
edge_store = edge
self.edgemin = edge_store
return self.edgemin,self.dmin
def jac_check(self,nodes):
x0 = cubit.get_nodal_coordinates(nodes[0])
x1 = cubit.get_nodal_coordinates(nodes[1])
x2 = cubit.get_nodal_coordinates(nodes[2])
xv1 = x1[0]-x0[0]
xv2 = x2[0]-x1[0]
zv1 = x1[2]-x0[2]
zv2 = x2[2]-x1[2]
jac = -xv2*zv1+xv1*zv2
if jac > 0:
return nodes
elif jac < 0: # change the ordre for the local coordinate system for 9 node finite elements Page.11 in Specfem2d-manual.pdf
return nodes[0],nodes[3],nodes[2],nodes[1],nodes[7],nodes[6],nodes[5],nodes[4],nodes[8]
else:
print 'error, jacobian = 0', jac,nodes
def mesh_analysis(self,frequency):
cubit.cmd('set info off') # Turn off return messages from Cubit commands
cubit.cmd('set echo off') # Turn off echo of Cubit commands
cubit.cmd('set journal off') # Do not save journal file
bins_d = [0.0001]+range(0,int(frequency)+1)+[1000]
bins_u = bins_d[1:]
dt = []
ed_dt = []
r = []
ed_r = []
nstart = cubit.get_next_sideset_id()
command = "del sideset all"
cubit.cmd(command)
for bin_d,bin_u in zip(bins_d,bins_u):
nsideset = cubit.get_next_sideset_id()
command = 'create sideset '+str(nsideset)
cubit.cmd(command)
command = "sideset "+str(nsideset)+ " name "+ "'ratio-["+str(bin_d)+"_"+str(bin_u)+"['"
cubit.cmd(command)
nend = cubit.get_next_sideset_id()
sidelist = range(nstart,nend)
for block in self.block_mat:
name = cubit.get_exodus_entity_name('block',block)
velocity = self.material[name][1]
if velocity > 0:
faces = cubit.get_block_faces(block)
edges = []
for face in faces:
es = cubit.get_sub_elements("face", face, 1)
edges = edges+list(es)
dtstore,edgedtstore,ratiostore,edgeratiostore = self.seismic_resolution(edges,velocity,bins_d,bins_u,sidelist)
dt.append(dtstore)
ed_dt.append(edgedtstore)
r.append(ratiostore)
ed_r.append(edgeratiostore)
self.ddt = zip(ed_dt,dt)
self.dr = zip(ed_r,r)
def sorter(x, y):
return cmp(x[1],y[1])
self.ddt.sort(sorter)
self.dr.sort(sorter)
print self.ddt,self.dr
print 'Deltat minimum => edge:'+str(self.ddt[0][0])+' dt: '+str(self.ddt[0][1])
print 'Minimum frequency resolved => edge:'+str(self.dr[0][0])+' frequency: '+str(self.dr[0][1])
return self.ddt[0],self.dr[0]
class mesh(object,mesh_tools):
""" A class to store the mesh """
def __init__(self):
super(mesh, self).__init__()
self.mesh_name = 'mesh_file'
self.axisymmetric_mesh = False # Will be set to true if a group self.pml_boun_name is found
self.topo_mesh = False # Will be set to true if a group self.topo is found
self.abs_mesh = False # Will be set to true if a group self.pml_boun_name or self.abs_boun_name is found
self.pml_layers = False # Will be set to true if a group self.pml_boun_name is found
self.write_nummaterial_velocity_file = False # Will be set to True if 2d blocks have 6 attributes
self.nodecoord_name = 'nodes_coords_file' # Name of nodes coordinates file to create
self.material_name = 'materials_file' # Name of material file to create
self.nummaterial_name = 'nummaterial_velocity_file'
self.absname = 'absorbing_surface_file' # Name of absorbing surface file to create
self.freename = 'free_surface_file' # Name of free surface file to create
self.pmlname = 'elements_cpml_list' # Name of cpml file to create
self.axisname = 'elements_axis' # Name of axial elements file to create and name of the block containing axial edges
self.recname = 'STATIONS'
self.face = 'QUAD9' # Faces' type
self.edge = 'BAR3' # Edges' type
self.topo = 'topo' # Name of the block containing topography edges
self.pml_boun_name = ['pml_x_acoust','pml_z_acoust','pml_xz_acoust','pml_x_elast','pml_z_elast','pml_xz_elast'] # Name of the block containing pml layers elements
self.abs_boun_name = ['abs_bottom','abs_right','abs_top','abs_left'] # Name of the block containing absorbing layer edges
self.abs_boun = [] # block numbers for abs boundaries
self.pml_boun = [] # block numbers for pml boundaries
self.nabs = 4 # Maximum number of absorbing surfaces (4)
self.rec = 'receivers'
self.block_definition() # Import blocks features from Cubit
self.ngll = 5
self.percent_gll = 0.172
self.point_wavelength = 5
cubit.cmd('compress') # Fill the gaps in the numbering of the entities
def __repr__(self):
pass
def block_definition(self):
""" Import blocks features from Cubit """
block_flag = [] # Will contain material id (1 if fluid 2 if solid)
block_mat = [] # Will contain face block ids
block_bc = [] # Will contain edge block ids
block_bc_flag = [] # Will contain edge id -> 2
abs_boun = [-1] * self.nabs # total 4 sides of absorbing boundaries (index 0 : bottom, index 1 : right, index 2 : top, index 3 : left)
#pml_boun = [-1] * 6 # To store pml layers id (for each pml layer : x_acoust, z_acoust, xz_acoust, x_elast, z_elast, xz_elast)
pml_boun = [[] for _ in range(6)] # To store the block id corresponding to pml layers id (arbitrary number of blocks for each pml layer : x_acoust, z_acoust, xz_acoust, x_elast, z_elast, xz_elast)
material = {} # Will contain each material name and their properties
bc = {} # Will contains each boundary name and their connectivity -> 2
blocks = cubit.get_block_id_list() # Load the blocks list
for block in blocks: # Loop on the blocks
name = cubit.get_exodus_entity_name('block',block) # Contains the name of the blocks
ty = cubit.get_block_element_type(block) # Contains the block element type (QUAD4...)
if ty == self.face: # If we are dealing with a block containing faces
nAttributes = cubit.get_block_attribute_count(block)
if (nAttributes != 1 and nAttributes != 6):
print 'Blocks not properly defined, 2d blocks must have one attribute (material id) or 6 attributes'
return None,None,None,None,None,None,None,None
flag=int(cubit.get_block_attribute_value(block,0)) # Fetch the first attribute value (containing material id)
print "nAttributes : ",nAttributes
if nAttributes == 6:
self.write_nummaterial_velocity_file = True
velP = cubit.get_block_attribute_value(block,1) # Fetch the first attribute value (containing P wave velocity)
velS = cubit.get_block_attribute_value(block,2) # Fetch the second attribute value (containing S wave velocity)
rho = cubit.get_block_attribute_value(block,3) # Fetch the third attribute value (containing material density)
qFlag = cubit.get_block_attribute_value(block,4) # Fetch the first attribute value (containing Qflag)
anisotropy_flag = cubit.get_block_attribute_value(block,5) # Fetch the first attribute value (containing anisotropy_flag)
# Store (material_id,rho,velP,velS,Qflag,anisotropy_flag) in par :
par = tuple([flag,rho,velP,velS,qFlag,anisotropy_flag])
material[name] = par # associate the name of the block to its id and properties
block_flag.append(int(flag)) # Append material id to block_flag
block_mat.append(block) # Append block id to block_mat
if name in self.pml_boun_name : # If the block considered refered to one of the pml layer
self.abs_mesh=True
self.pml_layers=True
pml_boun[self.pml_boun_name.index(name)]=block
# -> Put it at the correct position in pml_boun
# (index 0 : pml_x_acoust, index 1 : pml_z_acoust, index 2 : pml_xz_acoust,
# index 3 : pml_x_elast, index 4 : pml_z_elast, index 5 : pml_xz_elast)
elif ty == self.edge: # If we are dealing with a block containing edges
block_bc_flag.append(2) # Append "2" to block_bc_flag
block_bc.append(block) # Append block id to block_bc
bc[name] = 2 # Associate the name of the block with its connectivity : an edge has connectivity = 2
if name == self.topo:
self.topo_mesh=True
topography=block # If the block considered refered to topography store its id in "topography"
if name == self.axisname:
self.axisymmetric_mesh = True
axisId = block # AXISYM If the block considered refered to the axis store its id in "axisId"
if name in self.abs_boun_name : # If the block considered refered to one of the boundaries
self.abs_mesh = True
abs_boun[self.abs_boun_name.index(name)] = block
# -> Put it at the correct position in abs_boun (index 0 : bottom, index 1 : right, index 2 : top, index 3 : left)
else:
print 'Blocks not properly defined', ty
return None,None,None,None,None,None,None,None
nsets = cubit.get_nodeset_id_list() # Get the list of all nodeset
if len(nsets) == 0: self.receivers = None # If this list is empty : put None in self.receivers
for nset in nsets:
name = cubit.get_exodus_entity_name('nodeset',nset) # Contains the name of the nodeset
if name == self.rec: # If the name considered match self.rec (receivers)
self.receivers = nset # Store the id of the nodeset in self.receivers
else:
print 'nodeset '+name+' not defined'
self.receivers = None
# Store everything in the object :
try:
self.block_mat = block_mat
self.block_flag = block_flag
self.block_bc = block_bc
self.block_bc_flag = block_bc_flag
self.bc = bc
if self.write_nummaterial_velocity_file:
self.material = material
if self.abs_mesh:
self.abs_boun = abs_boun
if self.topo_mesh:
self.topography = topography
if self.axisymmetric_mesh:
self.axisId = axisId
if self.pml_layers:
self.pml_boun = pml_boun
except:
print 'Blocks not properly defined'
# def tomo(self,flag,vel):
# vp = vel/1000
# rho = (1.6612*vp-0.472*vp**2+0.0671*vp**3-0.0043*vp**4+0.000106*vp**4)*1000
# txt = '%3i %1i %20f %20f %20f %1i %1i\n' % (flag,1,rho,vel,vel/(3**.5),0,0)
# return txt
def nummaterial_write(self,nummaterial_name):
""" Write material features on file : nummaterial_name """
print 'Writing '+nummaterial_name+'.....'
nummaterial = open(nummaterial_name,'w') # Create the file "nummaterial_name" and open it
for block in self.block_mat: # For each 2D block
name = cubit.get_exodus_entity_name('block',block) # Extract the name of the block
lineToWrite = str(self.material[name][0])+" 1 "+str(self.material[name][1])+" " \
+str(self.material[name][2])+" "+str(self.material[name][3])+" "+str(self.material[name][4])+" " \
+str(self.material[name][5])+"\n" # flag rho vp vs rho Qflag anisotropy_flag
nummaterial.write(lineToWrite)
#nummaterial.write(self.tomo(self.material[name][0],self.material[name][2]))
nummaterial.close()
print 'Ok'
def mesh_write(self,mesh_name):
""" Write mesh (quads ids with their corresponding nodes ids) on file : mesh_name """
meshfile = open(mesh_name,'w')
print 'Writing '+mesh_name+'.....'
num_elems = cubit.get_quad_count() # Store the number of elements
meshfile.write(str(num_elems)+'\n') # Write it on first line
num_write = 0
for block,flag in zip(self.block_mat,self.block_flag): # for each 2D block
quads = cubit.get_block_faces(block) # Import quads ids
for inum,quad in enumerate(quads): # For each of these quads
nodes = cubit.get_expanded_connectivity('face',quad) # Get all the nodes in quad
nodes = self.jac_check(nodes) # Check the jacobian
txt = ('%10i %10i %10i %10i %10i %10i %10i %10i %10i\n')% nodes
meshfile.write(txt) # Write a line to mesh file
num_write = num_write+inum+1
print 'block', block, 'number of ',self.face,' : ', inum+1
meshfile.close()
print 'Ok num elements/write=',str(num_elems), str(num_write)
def material_write(self,mat_name):
""" Write quads material on file : mat_name """
mat = open(mat_name,'w')
print 'Writing '+mat_name+'.....'
for block,flag in zip(self.block_mat,self.block_flag): # for each 2D block
quads = cubit.get_block_faces(block) # Import quads id
for quad in quads: # For each quad
mat.write(('%10i\n') % flag) # Write its id in the file
mat.close()
print 'Ok'
def pmls_write(self,pml_name):
""" Write pml elements on file : mat_name """
cubit.cmd('set info off') # Turn off return messages from Cubit commands
cubit.cmd('set echo off') # Turn off echo of Cubit commands
pml_file = open(pml_name,'w')
print 'Writing '+pml_name+'.....'
npml_elements = 0
id_element = 0 # Global id
faces_all = [0]*6
for block,flag in zip(self.block_mat,self.block_flag): # For each 2D block
for ipml in range(0, 6): # iabs = 0,1,2,3,4,5 : for each pml layer (x_acoust, z_acoust, xz_acoust,x_elast, z_elast, xz_elast)
if block == self.pml_boun[ipml]: # If the block considered correspond to the pml
faces_all[ipml]=cubit.get_block_faces(block) # Import all pml faces id as a Set
npml_elements=npml_elements+len(faces_all[ipml])
pml_file.write('%10i\n' % npml_elements) # Print the number of faces on the pmls
print 'Number of elements in all PMLs :',npml_elements
for block,flag in zip(self.block_mat,self.block_flag): # For each 2D block
quads = cubit.get_block_faces(block) # Import quads id
for quad in quads: # For each quad
id_element = id_element+1 # global id of this quad
for ipml in range(0, 6): # iabs = 0,1,2,3,4,5 : for each pml layer (x_acoust, z_acoust, xz_acoust,x_elast, z_elast, xz_elast)
if type(faces_all[ipml]) is not int: # ~ if there are elements in that pml
if quad in faces_all[ipml]: # If this quad is belong to that pml
# nodes = cubit.get_connectivity('face',quad) # Import the nodes describing the quad
# nodes = self.jac_check(list(nodes)) # Check the jacobian of the quad
pml_file.write(('%10i %10i\n') % (id_element,ipml%3+1)) # Write its id in the file next to its type
# ipml%3+1 = 1 -> element belongs to a X CPML layer only (either in Xmin or in Xmax)
# ipml%3+1 = 2 -> element belongs to a Z CPML layer only (either in Zmin or in Zmax)
# ipml%3+1 = 3 -> element belongs to both a X and a Y CPML layer (i.e., to a CPML corner)
pml_file.close()
print 'Ok'
cubit.cmd('set info on') # Turn on return messages from Cubit commands
cubit.cmd('set echo on') # Turn on echo of Cubit commands
def nodescoord_write(self,nodecoord_name):
""" Write nodes coordinates on file : nodecoord_name """
nodecoord = open(nodecoord_name,'w')
print 'Writing '+nodecoord_name+'.....'
node_list = cubit.parse_cubit_list('node','all') # Import all the nodes of the model
num_nodes = len(node_list) # Total number of nodes
nodecoord.write('%10i\n' % num_nodes) # Write the number of nodes on the first line
for node in node_list: # For all nodes
x,y,z = cubit.get_nodal_coordinates(node) # Import its coordinates (3 coordinates even for a 2D model in cubit)
txt = ('%20f %20f\n') % (x,z)
nodecoord.write(txt) # Write x and z coordinates on the file -> Model must be in x,z coordinates. TODO
nodecoord.close()
print 'Ok'
def free_write(self,freename): #freename = None):
""" Write free surface on file : freename """
cubit.cmd('set info off') # Turn off return messages from Cubit commands
cubit.cmd('set echo off') # Turn off echo of Cubit commands
cubit.cmd('set journal off') # Do not save journal file
from sets import Set
# if not freename: freename = self.freename
freeedge = open(freename,'w')
print 'Writing '+freename+'.....'
if self.topo_mesh:
for block,flag in zip(self.block_bc,self.block_bc_flag): # For each 1D block
if block == self.topography: # If the block correspond to topography
edges_all = Set(cubit.get_block_edges(block)) # Import all topo edges id as a Set
freeedge.write('%10i\n' % len(edges_all)) # Print the number of edges on the free surface
print 'Number of edges in free surface :',len(edges_all)
id_element=0
for block,flag in zip(self.block_mat,self.block_flag): # For each 2D block
quads = cubit.get_block_faces(block) # Import quads id
for quad in quads: # For each quad
id_element = id_element+1 # id of this quad
edges = Set(cubit.get_sub_elements("face", quad, 1)) # Get the lower dimension entities associated with a higher dimension entities.
# Here it gets the 1D edges associates with the face of id "quad". Store it as a Set
intersection = edges & edges_all # Contains the edges of the considered quad that is on the free surface
if len(intersection) != 0: # If this quad touch the free surface
nodes = cubit.get_expanded_connectivity('face',quad) # Import the nodes describing the quad
nodes = self.jac_check(list(nodes)) # Check the jacobian of the quad
for e in intersection: # For each edge on the free surface
node_edge = cubit.get_connectivity('edge',e) # Import the nodes describing the edge
nodes_ok = []
for i in nodes: # ??? TODO nodes_ok == node_edge ???
if i in node_edge:
nodes_ok.append(i)
txt='%10i %10i %10i %10i\n' % (id_element,2,nodes_ok[0],nodes_ok[1])
# Write the id of the quad, 2 (number of nodes describing a free surface elements), and the nodes
freeedge.write(txt)
else:
freeedge.write('0') # Even without any free surface specfem2d need a file with a 0 in first line
freeedge.close()
print 'Ok'
cubit.cmd('set info on') # Turn on return messages from Cubit commands
cubit.cmd('set echo on') # Turn on echo of Cubit commands
def abs_write(self,absname): #absname=None):
""" Write absorbing surfaces on file : absname """
cubit.cmd('set info off') # Turn off return messages from Cubit commands
cubit.cmd('set echo off') # Turn off echo of Cubit commands
cubit.cmd('set journal off') # Do not save journal file
from sets import Set
# if not absname: absname = self.absname
absedge = open(absname,'w')
print 'Writing '+absname+'.....'
edges_abs = [Set()]*self.nabs # edges_abs[0] will be a Set containing the nodes describing bottom adsorbing boundary
# (index 0 : bottom, index 1 : right, index 2 : top, index 3 : left)
nedges_all = 0 # To count the total number of absorbing edges
for block,flag in zip(self.block_bc,self.block_bc_flag): # For each 1D block
for iabs in range(0, self.nabs): # iabs = 0,1,2,3 : for each absorbing boundaries
if block == self.abs_boun[iabs]: # If the block considered correspond to the boundary
edges_abs[iabs] = Set(cubit.get_block_edges(block)) # Store each edge on edges_abs
nedges_all = nedges_all+len(edges_abs[iabs]); # add the number of edges to nedges_all
absedge.write('%10i\n' % nedges_all) # Write the total number of absorbing edges to the first line of file
print 'Number of edges', nedges_all
id_element = 0
for block,flag in zip(self.block_mat,self.block_flag): # For each 2D block
quads = cubit.get_block_faces(block) # Import quads id
for quad in quads: # For each quad
id_element = id_element+1 # id of this quad
edges = Set(cubit.get_sub_elements("face", quad, 1)) # Get the lower dimension entities associated with a higher dimension entities.
# Here it gets the 1D edges associates with the face of id "quad". Store it as a Set
for iabs in range(0,self.nabs): # iabs = 0,1,2,3 : for each absorbing boundaries
intersection = edges & edges_abs[iabs] # Contains the edges of the considered quad that is on the absorbing boundary considered
if len(intersection) != 0: # If this quad touch the absorbing boundary considered
nodes = cubit.get_expanded_connectivity('face',quad) # Import the nodes describing the quad
nodes = self.jac_check(list(nodes)) # Check the jacobian of the quad
for e in intersection: # For each edge on the absorbing boundary considered
node_edge = cubit.get_connectivity('edge',e) # Import the nodes describing the edge
nodes_ok = []
for i in nodes: # Loop on the nodes of the quad
if i in node_edge: # If this node is belonging to absorbing surface
nodes_ok.append(i) # Add it to nodes_ok
txt = '%10i %10i %10i %10i %10i\n' % (id_element,2,nodes_ok[0],nodes_ok[1],iabs+1)
# Write the id of the quad, 2 (number of nodes describing a free surface elements), the nodes and the type of boundary
absedge.write(txt)
absedge.close()
print 'Ok'
cubit.cmd('set info on') # Turn on return messages from Cubit commands
cubit.cmd('set echo on') # Turn on echo of Cubit commands
def axis_write(self,axis_name):
""" Write axis on file """
cubit.cmd('set info off') # Turn off return messages from Cubit commands
cubit.cmd('set echo off') # Turn off echo of Cubit commands
cubit.cmd('set journal off') # Do not save journal file
from sets import Set
axisedge = open(axis_name,'w')
print 'Writing '+axis_name+'.....'
for block,flag in zip(self.block_bc,self.block_bc_flag): # For each 1D block
if block == self.axisId: # If the block correspond to the axis
edges_all = Set(cubit.get_block_edges(block)) # Import all axis edges id as a Set
axisedge.write('%10i\n' % len(edges_all)) # Write the number of edges on the axis
print 'Number of edges on the axis :',len(edges_all)
id_element = 0
for block,flag in zip(self.block_mat,self.block_flag): # For each 2D block
quads = cubit.get_block_faces(block) # Import quads id
for quad in quads: # For each quad
id_element = id_element+1 # id of this quad
edges = Set(cubit.get_sub_elements("face", quad, 1)) # Get the lower dimension entities associated with a higher dimension entities.
# Here it gets the 1D edges associates with the face of id "quad". Store it as a Set
intersection = edges & edges_all # Contains the edges of the considered quad that are on the axis
if len(intersection) != 0: # If this quad touch the axis
nodes = cubit.get_connectivity('face',quad) # Import the nodes describing the quad
nodes = self.jac_check(list(nodes)) # Check the jacobian of the quad
for e in intersection: # For each edge on the axis
node_edge = cubit.get_connectivity('edge',e) # Import the nodes describing the edge
nodes_ok = []
for i in nodes: # Loop on the nodes of the quad
if i in node_edge: # If this node is belonging to the axis
nodes_ok.append(i) # Add it to nodes_ok
txt = '%10i %10i %10i %10i %10i\n' % (id_element,2,nodes_ok[0],nodes_ok[1],4)
# Write the id of the quad, 2 (number of nodes describing a free surface elements), the nodes, and the orientation (4=left)
axisedge.write(txt)
axisedge.close()
print 'Ok'
cubit.cmd('set info on') # Turn on return messages from Cubit commands
cubit.cmd('set echo on') # Turn on echo of Cubit commands
def rec_write(self,recname):
""" Write receivers coordinates on file recname """
print 'Writing '+self.recname+'.....'
recfile = open(self.recname,'w')
nodes = cubit.get_nodeset_nodes(self.receivers) # Import nodes in nodeset containing receiver positions
for i,n in enumerate(nodes): # For each receiver
x,y,z = cubit.get_nodal_coordinates(n) # Import its coordinates (3 coordinates even for a 2D model in cubit)
recfile.write('ST%i XX %20f %20f 0.0 0.0 \n' % (i,x,z)) # Write x and z coordinates on the file -> Model must be in x,z coordinates. TODO
recfile.close()
print 'Ok'
def write(self,path = ''):
""" Write mesh in specfem2d format """
print 'Writing '+self.recname+'.....'
import os
cubit.cmd('set info off') # Turn off return messages from Cubit commands
cubit.cmd('set echo off') # Turn off echo of Cubit commands
cubit.cmd('set journal off') # Do not save journal file
if len(path) != 0: # If a path is supplied add a / at the end if needed
if path[-1] != '/': path = path+'/'
else:
path = os.getcwd()+'/'
self.mesh_write(path+self.mesh_name) # Write mesh file
self.material_write(path+self.material_name) # Write material file
self.nodescoord_write(path+self.nodecoord_name) # Write nodes coord file
self.free_write(path+self.freename) # Write free surface file (specfem2d needs it even if there is no free surface)
if self.abs_mesh:
self.abs_write(path+self.absname) # Write absorbing surface file
if self.axisymmetric_mesh:
self.axis_write(path+self.axisname) # Write axis on file
if self.pml_layers:
self.pmls_write(path+self.pmlname) # Write axis on file
if self.write_nummaterial_velocity_file:
self.nummaterial_write(path+self.nummaterial_name) # Write nummaterial file
if self.receivers:
self.rec_write(path+self.recname) # If receivers has been set (as nodeset) write receiver file as well
print 'Mesh files has been writen in '+path
cubit.cmd('set info on') # Turn on return messages from Cubit commands
cubit.cmd('set echo on') # Turn on echo of Cubit commands
profile = mesh() # Store the mesh from Cubit
profile.write() # Write it into files (in specfem2d format). profile.write(/path/to/directory)
| geodynamics/specfem2d | utils/cubit2specfem2d/cubit2specfem2d_QUAD9.py | Python | gpl-3.0 | 38,224 |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
# file: main.py
import el, he | markomanninen/isopsephy | romanize/main.py | Python | mit | 78 |
# Copyright (C) 2009 Nokia Corporation
# Copyright (C) 2009-2012 Collabora Ltd.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
import dbus
import dbus
import dbus.service
from servicetest import EventPattern, tp_name_prefix, tp_path_prefix, \
call_async, assertEquals, assertContains
from mctest import (exec_test, SimulatedConnectionManager, AccountManager)
import constants as cs
def test(q, bus, mc):
account_manager = AccountManager(bus)
# Check AccountManager has D-Bus property interface
call_async(q, account_manager.Properties, 'GetAll', cs.AM)
properties, = q.expect('dbus-return', method='GetAll').value
assert properties is not None
assert properties.get('ValidAccounts') == [], \
properties.get('ValidAccounts')
assert properties.get('InvalidAccounts') == [], \
properties.get('InvalidAccounts')
interfaces = properties.get('Interfaces')
supported = properties.get('SupportedAccountProperties')
assert (cs.ACCOUNT + '.AutomaticPresence') in supported
assert (cs.ACCOUNT + '.Enabled') in supported
assert (cs.ACCOUNT + '.Icon') in supported
assert (cs.ACCOUNT + '.Nickname') in supported
assert (cs.ACCOUNT + '.ConnectAutomatically') in supported
assert (cs.ACCOUNT_IFACE_AVATAR + '.Avatar') in supported
assert (cs.ACCOUNT + '.RequestedPresence') in supported
assert (cs.ACCOUNT + '.Supersedes') in supported
assertContains(cs.ACCOUNT + '.Service', supported)
params = dbus.Dictionary({"account": "[email protected]",
"password": "secrecy"}, signature='sv')
simulated_cm = SimulatedConnectionManager(q, bus)
creation_properties = dbus.Dictionary({
cs.ACCOUNT + '.Enabled': True,
cs.ACCOUNT + '.AutomaticPresence': dbus.Struct((
dbus.UInt32(cs.PRESENCE_BUSY),
'busy', 'Exploding'), signature='uss'),
cs.ACCOUNT + '.RequestedPresence': dbus.Struct((
dbus.UInt32(cs.PRESENCE_AWAY),
'away', 'Respawning'), signature='uss'),
cs.ACCOUNT + '.Icon': 'quake3arena',
cs.ACCOUNT + '.Nickname': 'AnArKi',
cs.ACCOUNT + '.ConnectAutomatically': True,
cs.ACCOUNT_IFACE_AVATAR + '.Avatar': (dbus.ByteArray('foo'),
'image/jpeg'),
cs.ACCOUNT + '.Supersedes': dbus.Array([
cs.ACCOUNT_PATH_PREFIX + 'q1/q1/Ranger',
cs.ACCOUNT_PATH_PREFIX + 'q2/q2/Grunt',
], signature='o'),
cs.ACCOUNT + '.Service': 'arena',
}, signature='sv')
call_async(q, account_manager, 'CreateAccount',
'fakecm',
'fakeprotocol',
'fakeaccount',
params,
creation_properties)
# The spec has no order guarantee here.
# FIXME: MC ought to also introspect the CM and find out that the params
# are in fact sufficient
am_signal, ret, rc = q.expect_many(
EventPattern('dbus-signal', path=cs.AM_PATH,
signal='AccountValidityChanged', interface=cs.AM),
EventPattern('dbus-return', method='CreateAccount'),
EventPattern('dbus-method-call', method='RequestConnection'),
)
account_path = ret.value[0]
assert am_signal.args == [account_path, True], am_signal.args
# We called IdentifyAccount, which normalized the silly account name.
# The _xx hex-escaping and the trailing digit are implementation details.
assert account_path.endswith('/anarki_40example_2ecom0'), account_path
assert account_path is not None
account = bus.get_object(
cs.tp_name_prefix + '.AccountManager',
account_path)
account_props = dbus.Interface(account, cs.PROPERTIES_IFACE)
properties = account_props.GetAll(cs.ACCOUNT)
assert properties.get('AutomaticPresence') == (cs.PRESENCE_BUSY,
'busy', 'Exploding'), \
properties.get('AutomaticPresence')
assert properties.get('RequestedPresence') == (cs.PRESENCE_AWAY,
'away', 'Respawning'), \
properties.get('RequestedPresence')
assert properties.get('ConnectAutomatically') == True, \
properties.get('ConnectAutomatically')
assert properties.get('Enabled') == True, \
properties.get('Enabled')
assert properties.get('Valid') == True, \
properties.get('Valid')
assert properties.get('Icon') == 'quake3arena', \
properties.get('Icon')
assert properties.get('Nickname') == 'AnArKi', \
properties.get('Nickname')
assertEquals(
dbus.Array([
cs.ACCOUNT_PATH_PREFIX + 'q1/q1/Ranger',
cs.ACCOUNT_PATH_PREFIX + 'q2/q2/Grunt',
], signature='o'),
properties.get('Supersedes'))
assertEquals('arena', properties.get('Service'))
properties = account_props.GetAll(cs.ACCOUNT_IFACE_AVATAR)
assert properties.get('Avatar') == ([ord('f'), ord('o'), ord('o')],
'image/jpeg')
# tests for errors when creating an account
creation_properties2 = creation_properties.copy()
creation_properties2[cs.ACCOUNT + '.NonExistent'] = 'foo'
call_async(q, account_manager, 'CreateAccount',
'fakecm',
'fakeprotocol',
'fakeaccount',
params,
creation_properties2)
q.expect('dbus-error', method='CreateAccount')
params2 = params.copy()
params2['fake_param'] = 'foo'
call_async(q, account_manager, 'CreateAccount',
'fakecm',
'fakeprotocol',
'fakeaccount',
params2,
creation_properties)
q.expect('dbus-error', method='CreateAccount')
if __name__ == '__main__':
exec_test(test, {})
| freedesktop-unofficial-mirror/telepathy__telepathy-mission-control | tests/twisted/account-manager/create-with-properties.py | Python | lgpl-2.1 | 6,362 |
# -*- coding: utf-8 -*-
# added new list_tbl definition
from functools import partial
from navmazing import NavigateToAttribute, NavigateToSibling
from cfme.common import SummaryMixin, Taggable
from cfme.fixtures import pytest_selenium as sel
from cfme.web_ui import CheckboxTable, toolbar as tb, paginator, InfoBlock, match_location
from utils.appliance import Navigatable
from utils.appliance.implementations.ui import CFMENavigateStep, navigator, navigate_to
from . import pol_btn, mon_btn
list_tbl = CheckboxTable(table_locator="//div[@id='list_grid']//table")
match_page = partial(match_location, controller='container_node', title='Nodes')
class Node(Taggable, SummaryMixin, Navigatable):
def __init__(self, name, provider, appliance=None):
self.name = name
self.provider = provider
Navigatable.__init__(self, appliance=appliance)
def load_details(self, refresh=False):
navigate_to(self, 'Details')
if refresh:
tb.refresh()
def get_detail(self, *ident):
""" Gets details from the details infoblock
Args:
*ident: Table name and Key name, e.g. "Relationships", "Images"
Returns: A string representing the contents of the summary's value.
"""
self.load_details(refresh=False)
return InfoBlock.text(*ident)
@navigator.register(Node, 'All')
class All(CFMENavigateStep):
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def am_i_here(self):
return match_page(summary='Nodes')
def step(self):
from cfme.web_ui.menu import nav
nav._nav_to_fn('Compute', 'Containers', 'Container Nodes')(None)
def resetter(self):
# Reset view and selection
tb.select("List View")
sel.check(paginator.check_all())
sel.uncheck(paginator.check_all())
@navigator.register(Node, 'Details')
class Details(CFMENavigateStep):
prerequisite = NavigateToSibling('All')
def am_i_here(self):
return match_page(summary='{} (Summary)'.format(self.obj.name))
def step(self):
# Assuming default list view from prerequisite
list_tbl.click_row_by_cells({'Name': self.obj.name, 'Provider': self.obj.provider.name})
@navigator.register(Node, 'EditTags')
class EditTags(CFMENavigateStep):
prerequisite = NavigateToSibling('Details')
def am_i_here(self):
match_page(summary='Tag Assignment')
def step(self):
pol_btn('Edit Tags')
@navigator.register(Node, 'ManagePolicies')
class ManagePolicies(CFMENavigateStep):
prerequisite = NavigateToSibling('Details')
def am_i_here(self):
match_page(summary='Select Policy Profiles')
def step(self):
pol_btn('Manage Policies')
@navigator.register(Node, 'Utilization')
class Utilization(CFMENavigateStep):
prerequisite = NavigateToSibling('Details')
def am_i_here(self):
match_page(summary='{} Capacity & Utilization'.format(self.obj.name))
def step(self):
mon_btn('Utilization')
| kzvyahin/cfme_tests | cfme/containers/node.py | Python | gpl-2.0 | 3,038 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BibliotikFulltext',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,
primary_key=True)),
('info', models.TextField()),
('more_info', models.TextField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BibliotikTorrent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,
primary_key=True)),
('info_hash', models.CharField(max_length=40, db_index=True)),
('retrieved', models.DateTimeField()),
('category', models.CharField(max_length=32)),
('format', models.CharField(max_length=16)),
('retail', models.BooleanField(default=False)),
('pages', models.IntegerField()),
('language', models.CharField(max_length=32)),
('isbn', models.CharField(max_length=16)),
('cover_url', models.TextField()),
('tags', models.TextField()),
('publisher', models.TextField()),
('year', models.IntegerField()),
('author', models.TextField()),
('title', models.TextField()),
('html_page', models.TextField()),
('torrent_filename', models.TextField(null=True)),
('torrent_file', models.BinaryField(null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BibliotikTransTorrent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,
primary_key=True)),
('info_hash', models.CharField(max_length=40)),
('torrent_id', models.IntegerField(null=True)),
('torrent_name', models.TextField(null=True)),
('torrent_size', models.BigIntegerField(null=True)),
('torrent_uploaded', models.BigIntegerField(null=True)),
('torrent_done', models.FloatField(null=True)),
('torrent_date_added', models.DateTimeField(null=True)),
('torrent_error', models.IntegerField(null=True)),
('torrent_error_string', models.TextField(null=True)),
('bibliotik_torrent', models.ForeignKey(to='bibliotik.BibliotikTorrent')),
('instance', models.ForeignKey(to='home.TransInstance')),
('location', models.ForeignKey(to='home.DownloadLocation')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
# Add indexes and fix fulltext
migrations.RunSQL(
'ALTER TABLE `bibliotik_bibliotikfulltext` ENGINE = MYISAM',
'ALTER TABLE `bibliotik_bibliotikfulltext` ENGINE = INNODB',
),
migrations.RunSQL(
'ALTER TABLE `bibliotik_bibliotikfulltext` ADD FULLTEXT `info_fts` (`info`)',
'ALTER TABLE `bibliotik_bibliotikfulltext` DROP INDEX `info_fts`',
),
migrations.RunSQL(
'ALTER TABLE `bibliotik_bibliotikfulltext` ADD ' +
'FULLTEXT `info_more_info_fts` (`info`,`more_info`)',
'ALTER TABLE `bibliotik_bibliotikfulltext` DROP INDEX `info_more_info_fts`'
),
]
| grandmasterchef/WhatManager2 | bibliotik/migrations/0001_initial.py | Python | mit | 3,906 |
a = [int(i) for i in input().split()]
print(sum(a))
| maisilex/Lets-Begin-Python | list.py | Python | mit | 52 |
n5a_type_marker = '__is_n5a_type__'
from n5a.n5atype import *
| sschaetz/n5a | n5a/__init__.py | Python | mit | 62 |
from __future__ import absolute_import, unicode_literals
import operator
import sys
from collections import OrderedDict
from functools import reduce
from django import forms
from django.contrib.admin import FieldListFilter, widgets
from django.contrib.admin.exceptions import DisallowedModelAdminLookup
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.utils import (
get_fields_from_path, lookup_needs_distinct, prepare_lookup_value, quote, unquote)
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ImproperlyConfigured, PermissionDenied, SuspiciousOperation
from django.core.paginator import InvalidPage, Paginator
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import FieldDoesNotExist
from django.db.models.fields.related import ForeignObjectRel
from django.db.models.sql.constants import QUERY_TERMS
from django.shortcuts import get_object_or_404, redirect, render
from django.template.defaultfilters import filesizeformat
from django.utils import six
from django.utils.decorators import method_decorator
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.views.generic import TemplateView
from django.views.generic.edit import FormView
from wagtail.wagtailadmin import messages
from wagtail.wagtailadmin.edit_handlers import (
ObjectList, extract_panel_definitions_from_model_class)
from wagtail.wagtaildocs.models import get_document_model
from wagtail.wagtailimages.models import Filter, get_image_model
from .forms import ParentChooserForm
class WMABaseView(TemplateView):
"""
Groups together common functionality for all app views.
"""
model_admin = None
meta_title = ''
page_title = ''
page_subtitle = ''
def __init__(self, model_admin):
self.model_admin = model_admin
self.model = model_admin.model
self.opts = self.model._meta
self.app_label = force_text(self.opts.app_label)
self.model_name = force_text(self.opts.model_name)
self.verbose_name = force_text(self.opts.verbose_name)
self.verbose_name_plural = force_text(self.opts.verbose_name_plural)
self.pk_attname = self.opts.pk.attname
self.is_pagemodel = model_admin.is_pagemodel
self.permission_helper = model_admin.permission_helper
self.url_helper = model_admin.url_helper
def check_action_permitted(self, user):
return True
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if not self.check_action_permitted(request.user):
raise PermissionDenied
button_helper_class = self.model_admin.get_button_helper_class()
self.button_helper = button_helper_class(self, request)
return super(WMABaseView, self).dispatch(request, *args, **kwargs)
@cached_property
def menu_icon(self):
return self.model_admin.get_menu_icon()
@cached_property
def header_icon(self):
return self.menu_icon
def get_page_title(self):
return self.page_title or capfirst(self.opts.verbose_name_plural)
def get_meta_title(self):
return self.meta_title or self.get_page_title()
@cached_property
def index_url(self):
return self.url_helper.index_url
@cached_property
def create_url(self):
return self.url_helper.create_url
def get_base_queryset(self, request=None):
return self.model_admin.get_queryset(request or self.request)
class ModelFormView(WMABaseView, FormView):
def get_edit_handler_class(self):
if hasattr(self.model, 'edit_handler'):
edit_handler = self.model.edit_handler
else:
panels = extract_panel_definitions_from_model_class(self.model)
edit_handler = ObjectList(panels)
return edit_handler.bind_to_model(self.model)
def get_form_class(self):
return self.get_edit_handler_class().get_form_class(self.model)
def get_success_url(self):
return self.index_url
def get_instance(self):
return getattr(self, 'instance', None) or self.model()
def get_form_kwargs(self):
kwargs = FormView.get_form_kwargs(self)
kwargs.update({'instance': self.get_instance()})
return kwargs
@property
def media(self):
return forms.Media(
css={'all': self.model_admin.get_form_view_extra_css()},
js=self.model_admin.get_form_view_extra_js()
)
def get_context_data(self, **kwargs):
context = super(ModelFormView, self).get_context_data(**kwargs)
instance = self.get_instance()
edit_handler_class = self.get_edit_handler_class()
form = self.get_form()
context.update({
'view': self,
'model_admin': self.model_admin,
'is_multipart': form.is_multipart(),
'edit_handler': edit_handler_class(instance=instance, form=form),
'form': form,
})
return context
def get_success_message(self, instance):
return _("{model_name} '{instance}' created.").format(
model_name=capfirst(self.opts.verbose_name), instance=instance)
def get_success_message_buttons(self, instance):
button_url = self.url_helper.get_action_url('edit', quote(instance.pk))
return [
messages.button(button_url, _('Edit'))
]
def get_error_message(self):
model_name = self.verbose_name
return _("The %s could not be created due to errors.") % model_name
def form_valid(self, form):
instance = form.save()
messages.success(
self.request, self.get_success_message(instance),
buttons=self.get_success_message_buttons(instance)
)
return redirect(self.get_success_url())
def form_invalid(self, form):
messages.error(self.request, self.get_error_message())
return self.render_to_response(self.get_context_data())
class InstanceSpecificView(WMABaseView):
instance_pk = None
pk_quoted = None
instance = None
def __init__(self, model_admin, instance_pk):
super(InstanceSpecificView, self).__init__(model_admin)
self.instance_pk = unquote(instance_pk)
self.pk_quoted = quote(self.instance_pk)
filter_kwargs = {}
filter_kwargs[self.pk_attname] = self.instance_pk
object_qs = model_admin.model._default_manager.get_queryset().filter(
**filter_kwargs)
self.instance = get_object_or_404(object_qs)
def get_page_subtitle(self):
return self.instance
@cached_property
def edit_url(self):
return self.url_helper.get_action_url('edit', self.pk_quoted)
@cached_property
def delete_url(self):
return self.url_helper.get_action_url('delete', self.pk_quoted)
class IndexView(WMABaseView):
# IndexView settings
ORDER_VAR = 'o'
ORDER_TYPE_VAR = 'ot'
PAGE_VAR = 'p'
SEARCH_VAR = 'q'
ERROR_FLAG = 'e'
IGNORED_PARAMS = (ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
# Only continue if logged in user has list permission
if not self.permission_helper.user_can_list(request.user):
raise PermissionDenied
self.list_display = self.model_admin.get_list_display(request)
self.list_filter = self.model_admin.get_list_filter(request)
self.search_fields = self.model_admin.get_search_fields(request)
self.items_per_page = self.model_admin.list_per_page
self.select_related = self.model_admin.list_select_related
# Get search parameters from the query string.
try:
self.page_num = int(request.GET.get(self.PAGE_VAR, 0))
except ValueError:
self.page_num = 0
self.params = dict(request.GET.items())
if self.PAGE_VAR in self.params:
del self.params[self.PAGE_VAR]
if self.ERROR_FLAG in self.params:
del self.params[self.ERROR_FLAG]
self.query = request.GET.get(self.SEARCH_VAR, '')
self.queryset = self.get_queryset(request)
return super(IndexView, self).dispatch(request, *args, **kwargs)
@property
def media(self):
return forms.Media(
css={'all': self.model_admin.get_index_view_extra_css()},
js=self.model_admin.get_index_view_extra_js()
)
def get_buttons_for_obj(self, obj):
return self.button_helper.get_buttons_for_obj(
obj, classnames_add=['button-small', 'button-secondary'])
def get_search_results(self, request, queryset, search_term):
"""
Returns a tuple containing a queryset to implement the search,
and a boolean indicating if the results may contain duplicates.
"""
use_distinct = False
if self.search_fields and search_term:
orm_lookups = ['%s__icontains' % str(search_field)
for search_field in self.search_fields]
for bit in search_term.split():
or_queries = [models.Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
if not use_distinct:
for search_spec in orm_lookups:
if lookup_needs_distinct(self.opts, search_spec):
use_distinct = True
break
return queryset, use_distinct
def lookup_allowed(self, lookup, value):
# Check FKey lookups that are allowed, so that popups produced by
# ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to,
# are allowed to work.
for l in self.model._meta.related_fkey_lookups:
for k, v in widgets.url_params_from_lookup_dict(l).items():
if k == lookup and v == value:
return True
parts = lookup.split(LOOKUP_SEP)
# Last term in lookup is a query term (__exact, __startswith etc)
# This term can be ignored.
if len(parts) > 1 and parts[-1] in QUERY_TERMS:
parts.pop()
# Special case -- foo__id__exact and foo__id queries are implied
# if foo has been specifically included in the lookup list; so
# drop __id if it is the last part. However, first we need to find
# the pk attribute name.
rel_name = None
for part in parts[:-1]:
try:
field = self.model._meta.get_field(part)
except FieldDoesNotExist:
# Lookups on non-existent fields are ok, since they're ignored
# later.
return True
if hasattr(field, 'rel'):
if field.rel is None:
# This property or relation doesn't exist, but it's allowed
# since it's ignored in ChangeList.get_filters().
return True
model = field.rel.to
rel_name = field.rel.get_related_field().name
elif isinstance(field, ForeignObjectRel):
model = field.model
rel_name = model._meta.pk.name
else:
rel_name = None
if rel_name and len(parts) > 1 and parts[-1] == rel_name:
parts.pop()
if len(parts) == 1:
return True
clean_lookup = LOOKUP_SEP.join(parts)
return clean_lookup in self.list_filter
def get_filters_params(self, params=None):
"""
Returns all params except IGNORED_PARAMS
"""
if not params:
params = self.params
lookup_params = params.copy() # a dictionary of the query string
# Remove all the parameters that are globally and systematically
# ignored.
for ignored in self.IGNORED_PARAMS:
if ignored in lookup_params:
del lookup_params[ignored]
return lookup_params
def get_filters(self, request):
lookup_params = self.get_filters_params()
use_distinct = False
for key, value in lookup_params.items():
if not self.lookup_allowed(key, value):
raise DisallowedModelAdminLookup(
"Filtering by %s not allowed" % key)
filter_specs = []
if self.list_filter:
for list_filter in self.list_filter:
if callable(list_filter):
# This is simply a custom list filter class.
spec = list_filter(
request,
lookup_params,
self.model,
self.model_admin)
else:
field_path = None
if isinstance(list_filter, (tuple, list)):
# This is a custom FieldListFilter class for a given
# field.
field, field_list_filter_class = list_filter
else:
# This is simply a field name, so use the default
# FieldListFilter class that has been registered for
# the type of the given field.
field = list_filter
field_list_filter_class = FieldListFilter.create
if not isinstance(field, models.Field):
field_path = field
field = get_fields_from_path(self.model,
field_path)[-1]
spec = field_list_filter_class(
field,
request,
lookup_params,
self.model,
self.model_admin,
field_path=field_path)
# Check if we need to use distinct()
use_distinct = (
use_distinct or lookup_needs_distinct(self.opts,
field_path))
if spec and spec.has_output():
filter_specs.append(spec)
# At this point, all the parameters used by the various ListFilters
# have been removed from lookup_params, which now only contains other
# parameters passed via the query string. We now loop through the
# remaining parameters both to ensure that all the parameters are valid
# fields and to determine if at least one of them needs distinct(). If
# the lookup parameters aren't real fields, then bail out.
try:
for key, value in lookup_params.items():
lookup_params[key] = prepare_lookup_value(key, value)
use_distinct = (
use_distinct or lookup_needs_distinct(self.opts, key))
return (
filter_specs, bool(filter_specs), lookup_params, use_distinct
)
except FieldDoesNotExist as e:
six.reraise(
IncorrectLookupParameters,
IncorrectLookupParameters(e),
sys.exc_info()[2])
def get_query_string(self, new_params=None, remove=None):
if new_params is None:
new_params = {}
if remove is None:
remove = []
p = self.params.copy()
for r in remove:
for k in list(p):
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return '?%s' % urlencode(sorted(p.items()))
def _get_default_ordering(self):
ordering = []
if self.model_admin.ordering:
ordering = self.model_admin.ordering
elif self.opts.ordering:
ordering = self.opts.ordering
return ordering
def get_default_ordering(self, request):
if self.model_admin.get_ordering(request):
return self.model_admin.get_ordering(request)
if self.opts.ordering:
return self.opts.ordering
return ()
def get_ordering_field(self, field_name):
"""
Returns the proper model field name corresponding to the given
field_name to use for ordering. field_name may either be the name of a
proper model field or the name of a method (on the admin or model) or a
callable with the 'admin_order_field' attribute. Returns None if no
proper model field name can be matched.
"""
try:
field = self.opts.get_field(field_name)
return field.name
except FieldDoesNotExist:
# See whether field_name is a name of a non-field
# that allows sorting.
if callable(field_name):
attr = field_name
elif hasattr(self.model_admin, field_name):
attr = getattr(self.model_admin, field_name)
else:
attr = getattr(self.model, field_name)
return getattr(attr, 'admin_order_field', None)
def get_ordering(self, request, queryset):
"""
Returns the list of ordering fields for the change list.
First we check the get_ordering() method in model admin, then we check
the object's default ordering. Then, any manually-specified ordering
from the query string overrides anything. Finally, a deterministic
order is guaranteed by ensuring the primary key is used as the last
ordering field.
"""
params = self.params
ordering = list(self.get_default_ordering(request))
if self.ORDER_VAR in params:
# Clear ordering and used params
ordering = []
order_params = params[self.ORDER_VAR].split('.')
for p in order_params:
try:
none, pfx, idx = p.rpartition('-')
field_name = self.list_display[int(idx)]
order_field = self.get_ordering_field(field_name)
if not order_field:
continue # No 'admin_order_field', skip it
# reverse order if order_field has already "-" as prefix
if order_field.startswith('-') and pfx == "-":
ordering.append(order_field[1:])
else:
ordering.append(pfx + order_field)
except (IndexError, ValueError):
continue # Invalid ordering specified, skip it.
# Add the given query's ordering fields, if any.
ordering.extend(queryset.query.order_by)
# Ensure that the primary key is systematically present in the list of
# ordering fields so we can guarantee a deterministic order across all
# database backends.
pk_name = self.opts.pk.name
if not (set(ordering) & {'pk', '-pk', pk_name, '-' + pk_name}):
# The two sets do not intersect, meaning the pk isn't present. So
# we add it.
ordering.append('-pk')
return ordering
def get_ordering_field_columns(self):
"""
Returns an OrderedDict of ordering field column numbers and asc/desc
"""
# We must cope with more than one column having the same underlying
# sort field, so we base things on column numbers.
ordering = self._get_default_ordering()
ordering_fields = OrderedDict()
if self.ORDER_VAR not in self.params:
# for ordering specified on model_admin or model Meta, we don't
# know the right column numbers absolutely, because there might be
# morr than one column associated with that ordering, so we guess.
for field in ordering:
if field.startswith('-'):
field = field[1:]
order_type = 'desc'
else:
order_type = 'asc'
for index, attr in enumerate(self.list_display):
if self.get_ordering_field(attr) == field:
ordering_fields[index] = order_type
break
else:
for p in self.params[self.ORDER_VAR].split('.'):
none, pfx, idx = p.rpartition('-')
try:
idx = int(idx)
except ValueError:
continue # skip it
ordering_fields[idx] = 'desc' if pfx == '-' else 'asc'
return ordering_fields
def get_queryset(self, request=None):
request = request or self.request
# First, we collect all the declared list filters.
(self.filter_specs, self.has_filters, remaining_lookup_params,
filters_use_distinct) = self.get_filters(request)
# Then, we let every list filter modify the queryset to its liking.
qs = self.get_base_queryset(request)
for filter_spec in self.filter_specs:
new_qs = filter_spec.queryset(request, qs)
if new_qs is not None:
qs = new_qs
try:
# Finally, we apply the remaining lookup parameters from the query
# string (i.e. those that haven't already been processed by the
# filters).
qs = qs.filter(**remaining_lookup_params)
except (SuspiciousOperation, ImproperlyConfigured):
# Allow certain types of errors to be re-raised as-is so that the
# caller can treat them in a special way.
raise
except Exception as e:
# Every other error is caught with a naked except, because we don't
# have any other way of validating lookup parameters. They might be
# invalid if the keyword arguments are incorrect, or if the values
# are not in the correct type, so we might get FieldError,
# ValueError, ValidationError, or ?.
raise IncorrectLookupParameters(e)
if not qs.query.select_related:
qs = self.apply_select_related(qs)
# Set ordering.
ordering = self.get_ordering(request, qs)
qs = qs.order_by(*ordering)
# Apply search results
qs, search_use_distinct = self.get_search_results(
request, qs, self.query)
# Remove duplicates from results, if necessary
if filters_use_distinct | search_use_distinct:
return qs.distinct()
else:
return qs
def apply_select_related(self, qs):
if self.select_related is True:
return qs.select_related()
if self.select_related is False:
if self.has_related_field_in_list_display():
return qs.select_related()
if self.select_related:
return qs.select_related(*self.select_related)
return qs
def has_related_field_in_list_display(self):
for field_name in self.list_display:
try:
field = self.opts.get_field(field_name)
except FieldDoesNotExist:
pass
else:
if isinstance(field, models.ManyToOneRel):
return True
return False
def get_context_data(self, *args, **kwargs):
user = self.request.user
all_count = self.get_base_queryset().count()
queryset = self.get_queryset()
result_count = queryset.count()
paginator = Paginator(queryset, self.items_per_page)
try:
page_obj = paginator.page(self.page_num + 1)
except InvalidPage:
page_obj = paginator.page(1)
context = {
'view': self,
'all_count': all_count,
'result_count': result_count,
'paginator': paginator,
'page_obj': page_obj,
'object_list': page_obj.object_list,
'user_can_create': self.permission_helper.user_can_create(user)
}
if self.is_pagemodel:
models = self.model.allowed_parent_page_models()
allowed_parent_types = [m._meta.verbose_name for m in models]
valid_parents = self.permission_helper.get_valid_parent_pages(user)
valid_parent_count = valid_parents.count()
context.update({
'no_valid_parents': not valid_parent_count,
'required_parent_types': allowed_parent_types,
})
return context
def get_template_names(self):
return self.model_admin.get_index_template()
class CreateView(ModelFormView):
page_title = _('New')
def check_action_permitted(self, user):
return self.permission_helper.user_can_create(user)
def dispatch(self, request, *args, **kwargs):
if self.is_pagemodel:
user = request.user
parents = self.permission_helper.get_valid_parent_pages(user)
parent_count = parents.count()
# There's only one available parent for this page type for this
# user, so we send them along with that as the chosen parent page
if parent_count == 1:
parent = parents.get()
parent_pk = quote(parent.pk)
return redirect(self.url_helper.get_action_url(
'add', self.app_label, self.model_name, parent_pk))
# The page can be added in multiple places, so redirect to the
# choose_parent view so that the parent can be specified
return redirect(self.url_helper.get_action_url('choose_parent'))
return super(CreateView, self).dispatch(request, *args, **kwargs)
def get_meta_title(self):
return _('Create new %s') % self.verbose_name
def get_page_subtitle(self):
return capfirst(self.verbose_name)
def get_template_names(self):
return self.model_admin.get_create_template()
class EditView(ModelFormView, InstanceSpecificView):
page_title = _('Editing')
def check_action_permitted(self, user):
return self.permission_helper.user_can_edit_obj(user, self.instance)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if self.is_pagemodel:
return redirect(
self.url_helper.get_action_url('edit', self.pk_quoted)
)
return super(EditView, self).dispatch(request, *args, **kwargs)
def get_meta_title(self):
return _('Editing %s') % self.verbose_name
def get_success_message(self, instance):
return _("{model_name} '{instance}' updated.").format(
model_name=capfirst(self.verbose_name), instance=instance)
def get_context_data(self, **kwargs):
kwargs['user_can_delete'] = self.permission_helper.user_can_delete_obj(
self.request.user, self.instance)
return super(EditView, self).get_context_data(**kwargs)
def get_error_message(self):
name = self.verbose_name
return _("The %s could not be saved due to errors.") % name
def get_template_names(self):
return self.model_admin.get_edit_template()
class ChooseParentView(WMABaseView):
def dispatch(self, request, *args, **kwargs):
if not self.permission_helper.user_can_create(request.user):
raise PermissionDenied
return super(ChooseParentView, self).dispatch(request, *args, **kwargs)
def get_page_title(self):
return _('Add %s') % self.verbose_name
def get_form(self, request):
parents = self.permission_helper.get_valid_parent_pages(request.user)
return ParentChooserForm(parents, request.POST or None)
def get(self, request, *args, **kwargs):
form = self.get_form(request)
context = {'view': self, 'form': form}
return render(request, self.get_template(), context)
def post(self, request, *args, **kargs):
form = self.get_form(request)
if form.is_valid():
parent_pk = quote(form.cleaned_data['parent_page'].pk)
return redirect(self.url_helper.get_action_url(
'add', self.app_label, self.model_name, parent_pk))
context = {'view': self, 'form': form}
return render(request, self.get_template(), context)
def get_template(self):
return self.model_admin.get_choose_parent_template()
class DeleteView(InstanceSpecificView):
page_title = _('Delete')
def check_action_permitted(self, user):
return self.permission_helper.user_can_delete_obj(user, self.instance)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if not self.check_action_permitted(request.user):
raise PermissionDenied
if self.is_pagemodel:
return redirect(
self.url_helper.get_action_url('delete', self.pk_quoted)
)
return super(DeleteView, self).dispatch(request, *args, **kwargs)
def get_meta_title(self):
return _('Confirm deletion of %s') % self.verbose_name
def confirmation_message(self):
return _(
"Are you sure you want to delete this %s? If other things in your "
"site are related to it, they may also be affected."
) % self.verbose_name
def get(self, request, *args, **kwargs):
context = {'view': self, 'instance': self.instance}
return self.render_to_response(context)
def delete_instance(self):
self.instance.delete()
def post(self, request, *args, **kwargs):
try:
self.delete_instance()
messages.success(
request,
_("{model} '{instance}' deleted.").format(
model=self.verbose_name, instance=self.instance))
return redirect(self.index_url)
except models.ProtectedError:
linked_objects = []
for rel in self.model._meta.get_all_related_objects():
if rel.on_delete == models.PROTECT:
qs = getattr(self.instance, rel.get_accessor_name())
for obj in qs.all():
linked_objects.append(obj)
context = {
'view': self,
'instance': self.instance,
'protected_error': True,
'linked_objects': linked_objects,
}
return self.render_to_response(context)
def get_template_names(self):
return self.model_admin.get_delete_template()
class InspectView(InstanceSpecificView):
page_title = _('Inspecting')
def check_action_permitted(self, user):
return self.permission_helper.user_can_inspect_obj(user, self.instance)
@property
def media(self):
return forms.Media(
css={'all': self.model_admin.get_inspect_view_extra_css()},
js=self.model_admin.get_inspect_view_extra_js()
)
def get_meta_title(self):
return _('Inspecting %s') % self.verbose_name
def get_field_label(self, field_name, field=None):
""" Return a label to display for a field """
label = None
if field is not None:
label = getattr(field, 'verbose_name', None)
if label is None:
label = getattr(field, 'name', None)
if label is None:
label = field_name
return label
def get_field_display_value(self, field_name, field=None):
""" Return a display value for a field """
# First we check for a 'get_fieldname_display' property/method on
# the model, and return the value of that, if present.
val_funct = getattr(self.instance, 'get_%s_display' % field_name, None)
if val_funct is not None:
if callable(val_funct):
return val_funct()
return val_funct
# If we have a real field, we can utilise that to try to display
# something more useful
if field is not None:
try:
field_type = field.get_internal_type()
if (
field_type == 'ForeignKey' and
field.related_model == get_image_model()
):
# The field is an image
return self.get_image_field_display(field_name, field)
if (
field_type == 'ForeignKey' and
field.related_model == get_document_model()
):
# The field is a document
return self.get_document_field_display(field_name, field)
except AttributeError:
pass
# Resort to getting the value of 'field_name' from the instance
return getattr(self.instance, field_name,
self.model_admin.get_empty_value_display(field_name))
def get_image_field_display(self, field_name, field):
""" Render an image """
image = getattr(self.instance, field_name)
if image:
fltr, _ = Filter.objects.get_or_create(spec='max-400x400')
rendition = image.get_rendition(fltr)
return rendition.img_tag
return self.model_admin.get_empty_value_display(field_name)
def get_document_field_display(self, field_name, field):
""" Render a link to a document """
document = getattr(self.instance, field_name)
if document:
return mark_safe(
'<a href="%s">%s <span class="meta">(%s, %s)</span></a>' % (
document.url,
document.title,
document.file_extension.upper(),
filesizeformat(document.file.size),
)
)
return self.model_admin.get_empty_value_display(field_name)
def get_dict_for_field(self, field_name):
"""
Return a dictionary containing `label` and `value` values to display
for a field.
"""
try:
field = self.model._meta.get_field(field_name)
except FieldDoesNotExist:
field = None
return {
'label': self.get_field_label(field_name, field),
'value': self.get_field_display_value(field_name, field),
}
def get_fields_dict(self):
"""
Return a list of `label`/`value` dictionaries to represent the
fiels named by the model_admin class's `get_inspect_view_fields` method
"""
fields = []
for field_name in self.model_admin.get_inspect_view_fields():
fields.append(self.get_dict_for_field(field_name))
return fields
def get_context_data(self, **kwargs):
context = super(InspectView, self).get_context_data(**kwargs)
buttons = self.button_helper.get_buttons_for_obj(
self.instance, exclude=['inspect'])
context.update({
'view': self,
'fields': self.get_fields_dict(),
'buttons': buttons,
'instance': self.instance,
})
return context
def get_template_names(self):
return self.model_admin.get_inspect_template()
| hamsterbacke23/wagtail | wagtail/contrib/modeladmin/views.py | Python | bsd-3-clause | 35,740 |
"""
gw2copilot/wine_mumble_reader.py
The latest version of this package is available at:
<https://github.com/jantman/gw2copilot>
################################################################################
Copyright 2016 Jason Antman <[email protected]> <http://www.jasonantman.com>
This file is part of gw2copilot.
gw2copilot is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
gw2copilot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with gw2copilot. If not, see <http://www.gnu.org/licenses/>.
The Copyright and Authors attributions contained herein may not be removed or
otherwise altered, except to add the Author attribution of a contributor to
this work. (Additional Terms pursuant to Section 7b of the AGPL v3)
################################################################################
While not legally required, I sincerely request that anyone who finds
bugs please submit them at <https://github.com/jantman/gw2copilot> or
to me via email, and that you send any contributions or improvements
either as a pull request on GitHub, or to me via email.
################################################################################
AUTHORS:
Jason Antman <[email protected]> <http://www.jasonantman.com>
################################################################################
"""
import logging
import os
import json
import psutil
import pkg_resources
from twisted.internet import protocol
from twisted.internet.task import LoopingCall
logger = logging.getLogger(__name__)
class WineMumbleLinkReader(object):
"""
Class to handle reading MumbleLink via wine.
"""
def __init__(self, parent_server, poll_interval):
"""
Initialize the class.
:param parent_server: the TwistedServer instance that started this
:type parent_server: :py:class:`~.TwistedServer`
:param poll_interval: interval in seconds to poll MumbleLink
:type poll_interval: float
"""
logger.debug("Instantiating WineMumbleLinkReader")
self.server = parent_server
self._poll_interval = poll_interval
self._wine_protocol = None
self._wine_process = None
self._looping_deferred = None
self._setup_process()
self._add_update_loop()
def _add_update_loop(self):
"""
Setup the LoopingCall to poll MumbleLink every ``self.poll_interval``;
helper for testing.
"""
logger.debug("Creating LoopingCall")
l = LoopingCall(self._wine_protocol.ask_for_output)
l.clock = self.server.reactor
logger.info('Setting poll interval to %s seconds',
self._poll_interval)
self._looping_deferred = l.start(self._poll_interval)
self._looping_deferred.addErrback(logger.error)
def _setup_process(self):
"""
Setup and spawn the process to read MumbleLink.
"""
logger.debug("Creating WineProcessProtocol")
self._wine_protocol = WineProcessProtocol(self.server)
logger.debug("Finding process executable, args and environ")
executable, args, env = self._gw2_wine_spawn_info
# this seems to cause problems
if 'WINESERVERSOCKET' in env:
del env['WINESERVERSOCKET']
logger.debug(
"Creating spawned process; executable=%s args=%s len(env)=%d",
executable, args, len(env)
)
logger.debug("Process environment:")
for k in sorted(env.keys()):
logger.debug('%s=%s' % (k, env[k]))
self._wine_process = self.server.reactor.spawnProcess(
self._wine_protocol, executable, args, env)
@property
def _gw2_wine_spawn_info(self):
"""
Return the information required to spawn :py:mod:`~.read_mumble_link`
as a Python script running under GW2's wine install.
:return: return a 3-tuple of wine executable path (str), args to pass
to wine (list, wine python binary path and ``read_mumble_link.py``
module path), wine process environment (dict)
:rtype: tuple
"""
gw2_ps = self._gw2_process
env = gw2_ps.environ()
wine_path = os.path.join(os.path.dirname(gw2_ps.exe()), 'wine')
logger.debug("Gw2.exe executable: %s; inferred wine binary as: %s",
gw2_ps.exe(), wine_path)
wine_args = [
wine_path,
self._wine_python_path(env['WINEPREFIX']),
self._read_mumble_path,
'-i'
]
return wine_path, wine_args, env
@property
def _read_mumble_path(self):
"""
Return the absolute path to :py:mod:`~.read_mumble_link` on disk.
:return: absolute path to :py:mod:`~.read_mumble_link`
:rtype: str
"""
p = pkg_resources.resource_filename('gw2copilot', 'read_mumble_link.py')
p = os.path.abspath(os.path.realpath(p))
logger.debug('Found path to read_mumble_link as: %s', p)
return p
def _wine_python_path(self, wineprefix):
"""
Given a specified ``WINEPREFIX``, return the path to the Python binary
in it.
:param wineprefix: ``WINEPREFIX`` env var
:type wineprefix: str
:return: absolute path to wine's Python binary
:rtype: str
"""
p = os.path.join(wineprefix, 'drive_c', 'Python27', 'python.exe')
if not os.path.exists(p):
raise Exception("Unable to find wine Python at: %s", p)
logger.debug('Found wine Python binary at: %s', p)
return p
@property
def _gw2_process(self):
"""
Find the Gw2.exe process; return the Process object.
:return: Gw2.exe process
:rtype: psutil.Process
"""
gw2_p = None
for p in psutil.process_iter():
if p.name() != 'Gw2.exe':
continue
if gw2_p is not None:
raise Exception("Error: more than one Gw2.exe process found")
gw2_p = p
if gw2_p is None:
raise Exception("Error: could not find a running Gw2.exe process")
logger.debug("Found Gw2.exe process, PID %d", gw2_p.pid)
return gw2_p
class WineProcessProtocol(protocol.ProcessProtocol):
"""
An implementation of :py:class:`twisted.internet.protocol.ProcessProtocol`
to communicate with :py:mod:`~.read_mumble_link` when it is executed
as a command-line script under wine. This handles reading data from the
process and requesting more.
"""
def __init__(self, parent_server):
"""
Initialize; save an instance variable pointing to our
:py:class:`~.TwistedServer`
:param parent_server: the TwistedServer instance that started this
:type parent_server: :py:class:`~.TwistedServer`
"""
logger.debug("Initializing WineProcessProtocol")
self.parent_server = parent_server
self.have_data = False
def connectionMade(self):
"""Triggered when the process starts; just logs a debug message"""
logger.debug("Connection made")
def ask_for_output(self):
"""
Write a newline to the process' STDIN, prompting it to re-read the map
and write the results to STDOUT, which will be received by
:py:meth:`~.outReceived`.
"""
logger.debug("asking for output")
self.transport.write("\n")
def outReceived(self, data):
"""
Called when output is received from the process; attempts to deserialize
JSON and on success passes it back to ``self.parent_server`` via
:py:meth:`~.TwistedServer.update_mumble_data`.
:param data: JSON data read from MumbleLink
:type data: str
"""
logger.debug("Data received: %s", data)
try:
d = json.loads(data.strip())
self.have_data = True
self.parent_server.update_mumble_data(d)
except Exception:
logger.exception("Could not deserialize data")
def errReceived(self, data):
"""
Called when STDERR from the process has output; if we have not yet
successfully deserialized a JSON message, logs STDERR at debug-level;
otherwise discards it.
:param data: STDERR from the process
:type data: str
"""
if not self.have_data:
logger.debug('Process STDERR: %s', data)
def processExited(self, status):
"""called when the process exits; just logs a debug message"""
logger.debug("Process exited %s", status)
def processEnded(self, status):
"""called when the process ends and is cleaned up;
just logs a debug message"""
logger.debug("Process ended %s", status)
def inConnectionLost(self):
"""called when STDIN connection is lost; just logs a debug message"""
logger.debug("STDIN connection lost")
def outConnectionLost(self):
"""called when STDOUT connection is lost; just logs a debug message"""
logger.debug("STDOUT connection lost")
raise Exception('read_mumble_link.py (wine process) '
'STDOUT connection lost')
def errConnectionLost(self):
"""called when STDERR connection is lost; just logs a debug message"""
logger.debug("STDERR connection lost")
| jantman/gw2copilot | gw2copilot/wine_mumble_reader.py | Python | agpl-3.0 | 9,880 |
from __future__ import print_function
# Time: O(n * glogg), g is the max size of groups.
# Space: O(n)
#
# Given an array of strings, return all groups of strings that are anagrams.
#
# Note: All inputs will be in lower-case.
#
import collections
class Solution(object):
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
anagrams_map, result = collections.defaultdict(list), []
for s in strs:
sorted_str = ("").join(sorted(s))
anagrams_map[sorted_str].append(s)
for anagram in anagrams_map.values():
anagram.sort()
result.append(anagram)
return result
if __name__ == "__main__":
result = Solution().groupAnagrams(["cat", "dog", "act", "mac"])
print(result)
| kamyu104/LeetCode | Python/group-anagrams.py | Python | mit | 822 |
# -*- coding: utf-8 -*-
"""
orthopoly.py - A suite of functions for generating orthogonal polynomials
and quadrature rules.
Copyright (c) 2014 Greg von Winckel
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Last updated on Wed Jan 1 14:29:25 MST 2014
"""
from __future__ import division
import numpy as np
import scipy as sp
import scipy.linalg
def gauss(alpha, beta):
"""
Compute the Gauss nodes and weights from the recursion
coefficients associated with a set of orthogonal polynomials
Inputs:
alpha - recursion coefficients
beta - recursion coefficients
Outputs:
x - quadrature nodes
w - quadrature weights
Adapted from the MATLAB code by Walter Gautschi
http://www.cs.purdue.edu/archives/2002/wxg/codes/gauss.m
"""
from scipy.linalg import eig_banded
A = np.vstack((np.sqrt(beta), alpha))
x, V = eig_banded(A, lower=False)
w = beta[0] * sp.real(sp.power(V[0, :], 2))
return x, w
def radau(alpha, beta, xr):
"""
Compute the Radau nodes and weights with the preassigned node xr
Inputs:
alpha - recursion coefficients
beta - recursion coefficients
xr - assigned node location
Outputs:
x - quadrature nodes
w - quadrature weights
Based on the section 7 of the paper
"Some modified matrix eigenvalue problems"
by Gene Golub, SIAM Review Vol 15, No. 2, April 1973, pp.318--334
"""
from scipy.linalg import solve_banded
n = len(alpha) - 1
f = np.zeros(n)
f[-1] = beta[-1]
A = np.vstack((np.sqrt(beta), alpha - xr))
J = np.vstack((A[:, 0:-1], A[0, 1:]))
delta = solve_banded((1, 1), J, f)
alphar = alpha
alphar[-1] = xr + delta[-1]
x, w = gauss(alphar, beta)
return x, w
def lobatto(alpha, beta, xl1, xl2):
"""
Compute the Lobatto nodes and weights with the preassigned
nodea xl1,xl2
Inputs:
alpha - recursion coefficients
beta - recursion coefficients
xl1 - assigned node location
xl2 - assigned node location
Outputs:
x - quadrature nodes
w - quadrature weights
Based on the section 7 of the paper
"Some modified matrix eigenvalue problems"
by Gene Golub, SIAM Review Vol 15, No. 2, April 1973, pp.318--334
"""
from scipy.linalg import solve_banded, solve
n = len(alpha) - 1
en = np.zeros(n)
en[-1] = 1
A1 = np.vstack((np.sqrt(beta), alpha - xl1))
J1 = np.vstack((A1[:, 0:-1], A1[0, 1:]))
A2 = np.vstack((np.sqrt(beta), alpha - xl2))
J2 = np.vstack((A2[:, 0:-1], A2[0, 1:]))
g1 = solve_banded((1, 1), J1, en)
g2 = solve_banded((1, 1), J2, en)
C = np.array(((1, -g1[-1]), (1, -g2[-1])))
xl = np.array((xl1, xl2))
ab = solve(C, xl)
alphal = alpha
alphal[-1] = ab[0]
betal = beta
betal[-1] = ab[1]
x, w = gauss(alphal, betal)
return x, w
def rec_jacobi(N, a, b):
"""
Generate the recursion coefficients alpha_k, beta_k
P_{k+1}(x) = (x-alpha_k)*P_{k}(x) - beta_k P_{k-1}(x)
for the Jacobi polynomials which are orthogonal on [-1,1]
with respect to the weight w(x)=[(1-x)^a]*[(1+x)^b]
Inputs:
N - polynomial order
a - weight parameter
b - weight parameter
Outputs:
alpha - recursion coefficients
beta - recursion coefficients
Adapted from the MATLAB code by Dirk Laurie and Walter Gautschi
http://www.cs.purdue.edu/archives/2002/wxg/codes/r_jacobi.m
"""
from scipy.special import gamma
nu = (b - a) / float(a + b + 2)
mu = 2**(a + b + 1) * gamma(a + 1) * gamma(b + 1) / gamma(a + b + 2)
if N == 1:
alpha = nu
beta = mu
else:
n = np.arange(1.0, N)
nab = 2 * n + a + b
alpha = np.hstack((nu, (b**2 - a**2) / (nab * (nab + 2))))
n = n[1:]
nab = nab[1:]
B1 = 4 * (a + 1) * (b + 1) / float((a + b + 2)**2 * (a + b + 3))
B = 4 * (n + a) * (n + b) * n * (n + a + b) / \
(nab**2 * (nab + 1) * (nab - 1))
beta = np.hstack((mu, B1, B))
return alpha, beta
def rec_jacobi01(N, a, b):
"""
Generate the recursion coefficients alpha_k, beta_k
for the Jacobi polynomials which are orthogonal on [0,1]
See rec_jacobi for the recursion coefficients on [-1,1]
Inputs:
N - polynomial order
a - weight parameter
b - weight parameter
Outputs:
alpha - recursion coefficients
beta - recursion coefficients
Adapted from the MATLAB implementation:
https://www.cs.purdue.edu/archives/2002/wxg/codes/r_jacobi01.m
"""
if a <= -1 or b <= -1:
raise ValueError('''Jacobi coefficients are defined only
for alpha,beta > -1''')
if not isinstance(N, int):
raise TypeError('N must be an integer')
if N < 1:
raise ValueError('N must be at least 1')
c, d = rec_jacobi(N, a, b)
alpha = (1 + c) / 2
beta = d / 4
beta[0] = d[0] / 2**(a + b + 1)
return alpha, beta
def polyval(alpha, beta, x):
"""
Evaluate polynomials on x given the recursion coefficients alpha and beta
"""
N = len(alpha)
m = len(x)
P = np.zeros((m, N + 1))
P[:, 0] = 1
P[:, 1] = (x - alpha[0]) * P[:, 0]
for k in xrange(1, N):
P[:, k + 1] = (x - alpha[k]) * P[:, k] - beta[k] * P[:, k - 1]
return P
def jacobi(N, a, b, x, NOPT=1):
"""
JACOBI computes the Jacobi polynomials which are orthogonal on [-1,1]
with respect to the weight w(x)=[(1-x)^a]*[(1+x)^b] and evaluate them
on the given grid up to P_N(x). Setting NOPT=2 returns the
L2-normalized polynomials
"""
m = len(x)
P = np.zeros((m, N + 1))
apb = a + b
a1 = a - 1
b1 = b - 1
c = apb * (a - b)
P[:, 0] = 1
if N > 0:
P[:, 1] = 0.5 * (a - b + (apb + 2) * x)
if N > 1:
for k in xrange(2, N + 1):
k2 = 2 * k
g = k2 + apb
g1 = g - 1
g2 = g - 2
d = 2.0 * (k + a1) * (k + b1) * g
P[:, k] = (g1 * (c + g2 * g * x) * P[:, k - 1] -
d * P[:, k - 2]) / (k2 * (k + apb) * g2)
if NOPT == 2:
from scipy.special import gamma
k = np.arange(N + 1)
pnorm = 2**(apb + 1) * gamma(k + a + 1) * gamma(k + b + 1) / \
((2 * k + a + b + 1) * (gamma(k + 1) * gamma(k + a + b + 1)))
P *= 1 / np.sqrt(pnorm)
return P
def jacobiD(N, a, b, x, NOPT=1):
"""
JACOBID computes the first derivatives of the normalized Jacobi
polynomials which are orthogonal on [-1,1] with respect
to the weight w(x)=[(1-x)^a]*[(1+x)^b] and evaluate them
on the given grid up to P_N(x). Setting NOPT=2 returns
the derivatives of the L2-normalized polynomials
"""
z = np.zeros((len(x), 1))
if N == 0:
Px = z
else:
Px = 0.5 * np.hstack((z, jacobi(N - 1, a + 1, b + 1, x, NOPT) *
((a + b + 2 + np.arange(N)))))
return Px
def mm_log(N, a):
"""
MM_LOG Modified moments for a logarithmic weight function.
The call mm=MM_LOG(n,a) computes the first n modified moments of the
logarithmic weight function w(t)=t^a log(1/t) on [0,1] relative to
shifted Legendre polynomials.
REFERENCE: Walter Gautschi,``On the preceding paper `A Legendre
polynomial integral' by James L. Blue'',
Math. Comp. 33 (1979), 742-743.
Adapted from the MATLAB implementation:
https://www.cs.purdue.edu/archives/2002/wxg/codes/mm_log.m
"""
if a <= -1:
raise ValueError('Parameter a must be greater than -1')
prod = lambda z: reduce(lambda x, y: x * y, z, 1)
mm = np.zeros(N)
c = 1
for n in range(N):
if isinstance(a, int) and a < n:
p = range(n - a, n + a + 2)
mm[n] = (-1)**(n - a) / prod(p)
mm[n] *= sp.special.gamma(a + 1)**2
else:
if n == 0:
mm[0] = 1 / (a + 1)**2
else:
k = np.arange(1, n + 1)
s = 1 / (a + 1 + k) - 1 / (a + 1 - k)
p = (a + 1 - k) / (a + 1 + k)
mm[n] = (1 / (a + 1) + sum(s)) * prod(p) / (a + 1)
mm[n] *= c
c *= 0.5 * (n + 1) / (2 * n + 1)
return mm
def mod_chebyshev(N, mom, alpham, betam):
"""
Calcuate the recursion coefficients for the orthogonal polynomials
which are are orthogonal with respect to a weight function which is
represented in terms of its modifed moments which are obtained by
integrating the monic polynomials against the weight function.
REFERENCES:
John C. Wheeler, "Modified moments and Gaussian quadratures"
Rocky Mountain Journal of Mathematics, Vol. 4, Num. 2 (1974), 287--296
Walter Gautschi, "Orthogonal Polynomials (in Matlab)
Journal of Computational and Applied Mathematics, Vol. 178 (2005) 215--234
Adapted from the MATLAB implementation:
https://www.cs.purdue.edu/archives/2002/wxg/codes/chebyshev.m
"""
if not isinstance(N, int):
raise TypeError('N must be an integer')
if N < 1:
raise ValueError('N must be at least 1')
N = min(N, int(len(mom) / 2))
alpha = np.zeros(N)
beta = np.zeros(N)
normsq = np.zeros(N)
sig = np.zeros((N + 1, 2 * N))
alpha[0] = alpham[0] + mom[1] / mom[0]
beta[0] = mom[0]
sig[1, :] = mom
for n in range(2, N + 1):
for m in range(n - 1, 2 * N - n + 1):
sig[n, m] = sig[n - 1, m + 1] - (alpha[n - 2] - alpham[m]) * sig[n - 1, m] - \
beta[n - 2] * sig[n - 2, m] + betam[m] * sig[n - 1, m - 1]
alpha[n - 1] = alpham[n - 1] + sig[n, n] / sig[n, n - 1] - sig[n - 1, n - 1] / \
sig[n - 1, n - 2]
beta[n - 1] = sig[n, n - 1] / sig[n - 1, n - 2]
normsq = np.diagonal(sig, -1)
return alpha, beta, normsq
def rec_jaclog(N, a):
"""
Generate the recursion coefficients alpha_k, beta_k
P_{k+1}(x) = (x-alpha_k)*P_{k}(x) - beta_k P_{k-1}(x)
for the monic polynomials which are orthogonal on [0,1]
with respect to the weight w(x)=x^a*log(1/x)
Inputs:
N - polynomial order
a - weight parameter
Outputs:
alpha - recursion coefficients
beta - recursion coefficients
Adated from the MATLAB code:
https://www.cs.purdue.edu/archives/2002/wxg/codes/r_jaclog.m
"""
alphaj, betaj = rec_jacobi01(2 * N, 0, 0)
mom = mm_log(2 * N, a)
alpha, beta, _ = mod_chebyshev(N, mom, alphaj, betaj)
return alpha, beta
| NicovincX2/Python-3.5 | Analyse (mathématiques)/Analyse numérique/Équations différentielles numériques/Collocation method/orthopoly.py | Python | gpl-3.0 | 11,766 |
from django.conf import settings
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.contrib.auth.decorators import login_required
from django.urls import include, path
from django.views.generic import RedirectView
from api.urls import urlpatterns as api
from bridge_lti.urls import urlpatterns as lti
from config.views import BridgeLoginView
from module.urls import urlpatterns as module
from . import views
urlpatterns = [
path('login/', BridgeLoginView.as_view(), name='login'),
path('logout/', auth_views.LogoutView.as_view(), name='logout'),
path('admin/', admin.site.urls),
path('health/', views.health),
path('', login_required(RedirectView.as_view(pattern_name='module:group-list')), name='index'),
path('lti/', include(lti)),
path('module/', include(module)),
path('api/', include(api)),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [path('__debug__/', include(debug_toolbar.urls)), ] + urlpatterns
| harvard-vpal/bridge-adaptivity | bridge_adaptivity/config/urls.py | Python | bsd-3-clause | 1,009 |
# -*- coding: utf-8 -*-
from bda.plone.orders import mailnotify as MN
import unittest
class TestMailnotifyUnit(unittest.TestCase):
def test_indent_wrap(self):
"""The _indent mehtod should wrap like defined by it's parameters.
"""
txt = u"abcd " * 3
ctrl = ' abcd\nabcd abcd' # textwrap removes whitespace at EOL
res = MN._indent(txt, width=10, ind=5)
self.assertEqual(res, ctrl)
def test_indent_unicode(self):
"""The _indent method should be able to handle non-ASCII data.
"""
txt = 'äüöß ' * 3
ctrl = u'äüöß äüöß\näüöß' # textwrap removes whitespace at EOL
res = MN._indent(txt, width=10, ind=0)
self.assertEqual(res, ctrl)
| andreesg/bda.plone.orders | src/bda/plone/orders/tests/test_mailnotify.py | Python | bsd-3-clause | 755 |
#MenuTitle: Delete Short Segments
# -*- coding: utf-8 -*-
__doc__="""
Deletes single-unit segments.
"""
thisFont = Glyphs.font # frontmost font
selectedLayers = thisFont.selectedLayers # active layers of selected glyphs
def process( thisLayer ):
for thisPath in thisLayer.paths:
for i in range(len(thisPath.nodes))[::-1]:
thisNode = thisPath.nodes[i]
prevNode = thisNode.prevNode
if prevNode.type != OFFCURVE and thisNode.type != OFFCURVE:
xDistance = thisNode.x-prevNode.x
yDistance = thisNode.y-prevNode.y
if abs(xDistance) < 1.0 and abs(yDistance) < 1.0:
thisPath.removeNodeCheckKeepShape_( thisNode )
thisFont.disableUpdateInterface() # suppresses UI updates in Font View
for thisLayer in selectedLayers:
thisGlyph = thisLayer.parent
print "Processing %s" % thisGlyph.name
thisGlyph.beginUndo() # begin undo grouping
process( thisLayer )
thisGlyph.endUndo() # end undo grouping
thisFont.enableUpdateInterface() # re-enables UI updates in Font View
| schriftgestalt/Mekka-Scripts | Paths/Delete Short Segments.py | Python | apache-2.0 | 993 |
import os.path
import os
import platform
import commands
import time
import uuid
import sys
import urllib2, urllib
import atexit
import socks
import socket
import socket
import select
os.system('clear')
print "Welcome to pseudo V0.2"
print " _"
print " | |"
print "_ __ ___ ___ _ _ __ | | ___ "
print "| '_ \ / __| / _ \| | | | / _` | / _ \ "
print "| |_) |\__ \| __/| |_| || (_| || (_) |"
print "| .__/ |___/ \___| \__,_| \__,_| \___/ "
print "| |"
print "|_|"
global cwd
global version
global uid
version = "0.2"
prompt = ''
path = '/var/tmp/uid.txt'
time.sleep(1)
ufile = os.path.exists(path)
# torify connection
print "torifying connection \r"
print "\r"
try:
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS4, '127.0.0.1', 9050, True)
socket.socket = socks.socksocket
ip = urllib2.urlopen("https://icanhazip.com").read()
print "Current IP used: " + ip
except socket.error, (value,message):
if s:
s.close()
print "Could not open tor connection: " + message
print "\r"
time.sleep(2)
print "your connection is not torified"
print "\r"
# check for updates
def check_version():
fp = urllib2.urlopen("http://www.example.com/version.txt")
currentversion = fp.read()
if (currentversion > version):
print "a new update is available \n"
com = raw_input("would you like to download it (yes or no) > ")
com_array = ['yes', 'no']
search = com in com_array
if (search == False):
print "\r"
com = raw_input("would you like to download it (yes or no) > ")
if not com:
print "\r"
com = raw_input("would you like to download it (yes or no) > ")
if (com == "yes"):
download_update()
def download_update():
url = "http://www.example.com/pseudo.tar.gz"
file_name = url.split('/')[-1]
u = urllib2.urlopen(url)
f = open(file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s " % (file_name, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print status
f.close()
commands.getoutput("tar -xvf pseudo.tar.gz")
print "\r"
com = raw_input("restart program to update (yes or no)")
com_array = ['yes', 'no']
search = com in com_array
if (search == False):
print "\r"
com = raw_input("restart program to update (yes or no)")
if not com:
print "\r"
com = raw_input("restart program to update (yes or no)")
if (com == "yes"):
restart_program()
# at exit run these cleanup functions
def premexit():
os.system('clear')
log_user_out()
# runs the function above on premature termination
atexit.register(premexit)
# restart program after generating uid
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
# check if the userid file exist
def checkuid(ufile):
if (ufile == False):
generateuid()
if (ufile == True):
check_version()
loginuser()
# generate a uid for the user
def generateuid():
global uid
uid = uuid.uuid1()
f = open('uid.txt','w')
f.write(str(uid))
f.close()
commands.getoutput("sudo mv uid.txt /var/tmp/uid.txt")
commands.getoutput("rm uid.txt")
cpname = platform.node()
print "your ID number is: " + str(uid)
print "\r"
print "please enter a name to be identified by"
username = raw_input(prompt)
mydata=[('uid',uid), ('comp', cpname), ('username', username)]
mydata=urllib.urlencode(mydata)
path='http://www.example.com/start.php'
req=urllib2.Request(path, mydata)
req.add_header("Content-type", "application/x-www-form-urlencoded")
response=urllib2.urlopen(req).read()
if (response != "successful"):
# print response
print "Server Error Please Contact Server Owner"
commands.getoutput("sudo rm /var/tmp/uid")
exit()
if (response == "successful"):
# print response
time.sleep(3)
restart_program()
# found the uid file login user
def loginuser():
global uname
f = open('/var/tmp/uid.txt','r')
uid = f.read()
cpname = platform.node()
print "\r"
print "\r"
print "please wait while we verify your account..."
time.sleep(2)
mydata=[('uid',uid), ('comp', cpname)]
mydata=urllib.urlencode(mydata)
path='http://www.example.com/verify.php'
req=urllib2.Request(path, mydata)
req.add_header("Content-type", "application/x-www-form-urlencoded")
print "\r"
response=urllib2.urlopen(req).read()
# no response was returned
if not response:
print "sorry we could not verify your account, please try again"
exit()
# this response means it was not found in the database
if (response == "failed"):
print "Sorry no account was found, please wait we will generate a new one"
commands.getoutput("sudo rm /var/tmp/uid.txt")
restart_program()
# we got a good response the user has been verified
if response:
time.sleep(1)
uname = response
print "account verified, please wait...."
time.sleep(2)
startit = True
os.system('clear')
programstart(uname)
# get username
def getusername():
f = open('/var/tmp/uid.txt','r')
uid = f.read()
cpname = platform.node()
mydata=[('uid',uid), ('comp', cpname)]
mydata=urllib.urlencode(mydata)
path='http://www.example.com/verify.php'
req=urllib2.Request(path, mydata)
req.add_header("Content-type", "application/x-www-form-urlencoded")
response=urllib2.urlopen(req).read()
if (response == "failed"):
exit()
if not response:
print "sorry we could not verify your account, please try again"
exit()
if response:
uname = response
# start main program
def programstart(uname):
print "Welcome " + uname + "\r"
print "\r"
print " ============ Commands List ================\r"
print " | help (commands list) | \r"
print " | private (private messages) | \r"
print " | members (members list by alias) | \r"
print " | online (online users) | \r"
print " | message (message board) | \r"
print " | live (live chat) | \r"
print " | exit (exit program) | \r"
print " |==========================================| \r"
print " \r"
com = raw_input("please enter your command > ")
command(com)
# command requst
def command(com):
commandlist(com)
# command line
def commandline(com):
commandlist(com)
# =============================
#
# Commands list
#
# =============================
# Master Commands list
def commandlist(list):
#commands array
com_array = ['h', 'help', 'private','members','online','message','live', 'clear', 'exit', 'restart']
# search the array for requested command
search = list in com_array
# can't find command warn user and drop them to command line
if (search == False):
print "\r"
com = raw_input("command not found, please try again > ")
commandline(com)
else:
# command was found now let them run it
# help command
if (list == "h"):
command_help()
# same as above
if (list == "help"):
command_help()
# exit program
if (list == "exit"):
command_exit()
# clear screen drop back to cmdline
if (list == "clear"):
os.system('clear')
programstart(uname)
# online users
if (list == "online"):
online_users()
# members directory
if (list == "members"):
members_directory()
# TODO ITEMS LISTED HERE!
if (list == "private"):
private_messages()
if (list == "message"):
print "sorry this item is not done yet"
com = raw_input(prompt)
commandline(com)
if (list == "live"):
chat_client()
if (list == "restart"):
restart_program()
# prevent program from exiting on blank line
if not list:
com = raw_input(prompt)
commandline(com)
# =============================
#
# Command functions
#
# =============================
# help file
def command_help():
os.system('clear')
print "Welcome " + uname + "\r"
print "\r"
print " ============ Commands List ================\r"
print " | help (commands list) | \r"
print " | private (private messages) | \r"
print " | members (members list by alias) | \r"
print " | online (online users) | \r"
print " | message (message board) | \r"
print " | live (live chat) | \r"
print " | exit (exit program) | \r"
print " |==========================================| \r"
print " \r"
print "\r"
# drop back to commandline
com = raw_input("please enter your command > ")
commandline(com)
# log user out
def log_user_out():
f = open('/var/tmp/uid.txt','r')
uid = f.read()
mydata=[('uid', uid)]
mydata=urllib.urlencode(mydata)
path='http://www.example.com/offline.php'
req=urllib2.Request(path, mydata)
req.add_header("Content-type", "application/x-www-form-urlencoded")
response=urllib2.urlopen(req).read()
# members directory
def members_directory():
os.system('clear')
print "Welcome " + uname + "\r"
action = "online"
mydata=[('action', action)]
mydata=urllib.urlencode(mydata)
path='http://www.example.com/directory.php'
req=urllib2.Request(path, mydata)
req.add_header("Content-type", "application/x-www-form-urlencoded")
response=urllib2.urlopen(req).read()
print " ========== Members Directory ==========="
print " \r "
print "\r" + response + "\r"
print "\r"
com = raw_input("please enter your command > ")
if (com == "back"):
os.system('clear')
programstart(uname)
commandlist(com)
# private messages
def private_messages():
os.system('clear')
print "Welcome " + uname + "\r"
print "\r"
print " ============ PM Commands List ================\r"
print " | help (commands list) | \r"
print " | inbox (messages) | \r"
print " | write (write message) | \r"
print " | back (back to index) | \r"
print " |=============================================| \r"
print " \r"
print "\r"
com = raw_input("please enter your command > ")
private_message_cmds(com)
# pm cmdline
def pm_cmdline(com):
private_message_cmds(com)
# private message commands
def private_message_cmds(com):
#commands array
com_array = ['help', 'inbox','write','sent','back']
# search the array for requested command
search = com in com_array
# can't find command warn user and drop them to command line
if (search == False):
print "\r"
com = raw_input("command not found, please try again > ")
pm_cmdline(com)
else:
# command was found now let them run it
# inbox
if (com == "inbox"):
pm_inbox(uname)
# compose new message
if (com == "write"):
pm_write_msg()
# sent items
if (com == "sent"):
show_sent_items()
# go back
if (com == "back"):
command_help()
# pm inbox
def pm_inbox(uname):
os.system('clear')
print "Welcome " + uname + "\r"
print "\r"
print " ======== Inbox ========= \r"
print "\r"
print " ID# | From | Subject | \n"
mydata=[('user', uname)]
mydata=urllib.urlencode(mydata)
path='http://www.example.com/inbox.php'
req=urllib2.Request(path, mydata)
req.add_header("Content-type", "application/x-www-form-urlencoded")
response=urllib2.urlopen(req).read()
print "" + response + "\r"
print " ======================== \r"
print "\r"
print "Enter id # to read message or type back to go back"
com = raw_input(prompt)
if (com == "back"):
private_messages()
if com:
read_private_msg(com)
if not com:
com = raw_input()
read_private_msg(com)
# show the sent items
def show_sent_items():
os.system('clear')
print "Welcome " + uname + "\r"
print "\r"
print " ======== Sent Items ========= \r"
print "\r"
print " ID# | From | Subject | \n"
mydata=[('user', uname)]
mydata=urllib.urlencode(mydata)
path='http://www.example.com/sent_items.php'
req=urllib2.Request(path, mydata)
req.add_header("Content-type", "application/x-www-form-urlencoded")
response=urllib2.urlopen(req).read()
print "" + response + "\r"
print " ======================== \r"
print "\r"
cool = raw_input("enter message id to view it or type delete to delete")
# read private message
def read_private_msg(com):
os.system('clear')
mid = com
mydata=[('msgid',mid), ('uid', uname)]
mydata=urllib.urlencode(mydata)
path='http://www.example.com/read.php'
req=urllib2.Request(path, mydata)
req.add_header("Content-type", "application/x-www-form-urlencoded")
response=urllib2.urlopen(req).read()
if (response == "invalid"):
print "invalid message id #, please try again"
time.sleep(2)
pm_inbox(uname)
if not response:
print "\r"
print "\r"
print "you have no mesages ID # " + com
time.sleep(2)
pm_inbox(uname)
if (response == "nao"):
print "\r"
print "\r"
print "you have no mesages ID # " + com
time.sleep(2)
pm_inbox(uname)
print "Welcome " + uname + "\r"
print "\r"
print "====== Message ID #" + com + " ========\r"
print "\r"
print "\r" + response + "\r"
print "\r"
command = raw_input("delete, reply or back? > ")
if (command == "delete"):
print "\r"
print "are you sure you want to delete the messsage? (yes or no)"
confirm = raw_input(prompt)
if (confirm == "yes"):
delete_message(mid)
if (confirm == "no"):
print "reply or type back to go back"
command = raw_input(prompt)
if (command == "reply"):
reply_to_message(com)
if (command == "back"):
private_messages()
# reply to Mesasge
if (command == "reply"):
reply_to_message(com)
# go back
if (command == "back"):
private_messages()
# Write a private message
def pm_write_msg():
os.system('clear')
print "Welcome " + uname + "\r"
print "\r"
print " ===== Compose a mesasge ======"
print "\r"
to = raw_input("please enter members alias > ")
if not to:
to = raw_input(prompt)
print "\r"
subject = raw_input("please enter your subject line > ")
os.system('clear')
print "Welcome " + uname + "\r"
print "\r"
print " ===== Compose a mesasge ======"
print "\r"
print "To: " + to + "\r"
print "Subject: " + subject + "\r"
print "\r"
print "please enter your message, type /f on a new line and enter when finished"
print "\r"
text = ""
stopword = "/f"
while True:
line = raw_input()
if line.strip() == stopword:
break
text += "\n" + line
os.system('clear')
print "Welcome " + uname + "\r"
print "\r"
print " ===== Compose a mesasge ======"
print "\r"
print "To: " + to + "\r"
print "Subject: " + subject + "\r"
print "\r"
print "Message: \r"
print text
print "\r"
print "\r"
answer = raw_input("would you like to send or discard? > ");
# if users chooses to discard take them back to PM menu
if (answer == "discard"):
private_messages()
if (answer == "send"):
mydata=[('from', uname), ('to', to), ('subject', subject), ('message', text)]
mydata=urllib.urlencode(mydata)
path='http://www.example.com/write.php'
req=urllib2.Request(path, mydata)
req.add_header("Content-type", "application/x-www-form-urlencoded")
response=urllib2.urlopen(req).read()
if(response == "success"):
print "your message was sent"
time.sleep(2)
private_messages()
if(response != "success"):
print "there was a problem sending your message"
time.sleep(2)
private_messages()
# reply to message function
def reply_to_message(com):
print "please enter your message, type /f on a new line and hit enter when finished"
print "\r"
text = ""
stopword = "/f"
while True:
line = raw_input()
if line.strip() == stopword:
break
text += "\n" + line
mydata=[('msgid',com), ('uname', uname), ('message', text)]
mydata=urllib.urlencode(mydata)
path='http://www.example.com/reply.php'
req=urllib2.Request(path, mydata)
req.add_header("Content-type", "application/x-www-form-urlencoded")
response=urllib2.urlopen(req).read()
print response
time.sleep(3)
pm_inbox(uname)
# delete message function
def delete_message(com):
mid = com
mydata=[('msgid', mid), ('uid', uname)]
mydata=urllib.urlencode(mydata)
path='http://www.example.com/delete.php'
req=urllib2.Request(path, mydata)
req.add_header("Content-type", "application/x-www-form-urlencoded")
response=urllib2.urlopen(req).read()
print response
time.sleep(2)
pm_inbox(uname)
# ====== begining of encryption ===================
def encrypt(key, msg):
encryped = []
for i, c in enumerate(msg):
key_c = ord(key[i % len(key)])
msg_c = ord(c)
encryped.append(chr((msg_c + key_c) % 127))
return ''.join(encryped)
def decrypt(key, encryped):
msg = []
for i, c in enumerate(encryped):
key_c = ord(key[i % len(key)])
enc_c = ord(c)
msg.append(chr((enc_c - key_c) % 127))
return ''.join(msg)
#==== end of encryption ==========================
# display online users
def online_users():
os.system('clear')
print "Welcome " + uname + "\r"
action = "online"
mydata=[('action', action)]
mydata=urllib.urlencode(mydata)
path='http://www.example.com/online.php'
req=urllib2.Request(path, mydata)
req.add_header("Content-type", "application/x-www-form-urlencoded")
response=urllib2.urlopen(req).read()
print "\r"
print " ========== Online Users ==========="
print " \r "
print "\r" + response + "\r"
print "\r"
com = raw_input("please enter your command > ")
if (com =="back"):
os.system('clear')
programstart(uname)
commandline(com)
# exit program command
def command_exit():
log_user_out()
print "Good bye, logging you out\r"
time.sleep(2)
os.system('clear')
sys.exit(0)
# chat clinet
def chat_client():
# mark user as online in chat
mydata=[('username', uname)]
mydata=urllib.urlencode(mydata)
path='http://www.example.com/onlinechat.php'
req=urllib2.Request(path, mydata)
req.add_header("Content-type", "application/x-www-form-urlencoded")
response=urllib2.urlopen(req).read()
time.sleep(1)
os.system('clear')
print " ===== chatroom users ===== \n"
print "\r"
print response
print "\r"
print " ============================== \n"
host = '98.234.50.89'
port = '9009'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
# connect to remote host
try :
port = int(port)
s.connect((host, port))
except socket.error as e:
print e
sys.exit()
print 'Connected to Chat Room. You can start sending messages \n'
s.send(uname +' has joined the room \n')
sys.stdout.write('[Me] '); sys.stdout.flush()
while 1:
socket_list = [sys.stdin, s]
# Get the list sockets which are readable
ready_to_read,ready_to_write,in_error = select.select(socket_list , [], [])
for sock in ready_to_read:
if sock == s:
# incoming message from remote server, s
data = sock.recv(4096)
if not data :
print '\nDisconnected from chat server'
sys.exit()
else :
#print data
sys.stdout.write(data)
sys.stdout.write('[Me] '); sys.stdout.flush()
else :
# user entered a message
msg = sys.stdin.readline()
if (msg == "quit"):
exit();
s.send(uname + ': ' + msg)
sys.stdout.write('[Me] '); sys.stdout.flush()
if __name__ == "__main__":
try:
checkuid(ufile)
except KeyboardInterrupt:
print 'Interrupted'
sys.exit(0)
| jeremystevens/pseudo | source/pseudo.py | Python | gpl-2.0 | 20,422 |
"""Record simulated nightly statistics by program.
"""
from __future__ import print_function, division, absolute_import
import numpy as np
import astropy.io.fits
import desiutil.log
import desisurvey.config
import desisurvey.utils
import desisurvey.tiles
import desisurvey.plots
class SurveyStatistics(object):
"""Collect nightly statistics by program.
Parameters
----------
start_date : datetime.date or None
Record statistics for a survey that starts on the evening of this date.
Uses the configured nominal start date when None.
stop_date : datetime.date
Record statistics for a survey that stops on the morning of this date.
Uses the configured nominal stop date when None.
restore : str or None
Restore internal state from the snapshot saved to this filename,
or initialize a new object when None. Use :meth:`save` to
save a snapshot to be restored later. Filename is relative to
the configured output path unless an absolute path is
provided.
"""
def __init__(self, start_date=None, stop_date=None, restore=None):
self.tiles = desisurvey.tiles.Tiles()
config = desisurvey.config.Configuration()
if start_date is None:
self.start_date = config.first_day()
else:
self.start_date = desisurvey.utils.get_date(start_date)
if stop_date is None:
self.stop_date = config.last_day()
else:
self.stop_date = desisurvey.utils.get_date(stop_date)
self.num_nights = (self.stop_date - self.start_date).days
if self.num_nights <= 0:
raise ValueError('Expected start_date < stop_date.')
# Build our internal array.
dtype = []
for name in 'MJD', 'tsched',:
dtype.append((name, np.float))
nprograms = len(self.tiles.programs)
for name in 'topen', 'tdead',:
dtype.append((name, np.float, (nprograms,)))
for name in 'tscience', 'tsetup', 'tsplit',:
dtype.append((name, np.float, (nprograms,)))
for name in 'completed', 'nexp', 'nsetup', 'nsplit', 'nsetup_abort', 'nsplit_abort',:
dtype.append((name, np.int32, (nprograms,)))
self._data = np.zeros(self.num_nights, dtype)
if restore is not None:
# Restore array contents from a FITS file.
fullname = config.get_path(restore)
with astropy.io.fits.open(fullname, memmap=None) as hdus:
header = hdus[1].header
comment = header['COMMENT']
if header['TILES'] != self.tiles.tiles_file:
raise ValueError('Header mismatch for TILES.')
if header['START'] != self.start_date.isoformat():
raise ValueError('Header mismatch for START.')
if header['STOP'] != self.stop_date.isoformat():
raise ValueError('Header mismatch for STOP.')
self._data[:] = hdus['STATS'].data
log = desiutil.log.get_logger()
log.info('Restored stats from {}'.format(fullname))
if comment:
log.info(' Comment: "{}".'.format(comment))
else:
# Initialize local-noon MJD timestamp for each night.
first_noon = desisurvey.utils.local_noon_on_date(self.start_date).mjd
self._data['MJD'] = first_noon + np.arange(self.num_nights)
def save(self, name='stats.fits', comment='', overwrite=True):
"""Save a snapshot of these statistics as a binary FITS table.
The saved file size is ~800 Kb.
Parameters
----------
name : str
File name to write. Will be located in the configuration
output path unless it is an absolute path. Pass the same
name to the constructor's ``restore`` argument to restore
this snapshot.
comment : str
Comment to include in the saved header, for documentation
purposes.
overwrite : bool
Silently overwrite any existing file when True.
"""
hdus = astropy.io.fits.HDUList()
header = astropy.io.fits.Header()
header['TILES'] = self.tiles.tiles_file
header['START'] = self.start_date.isoformat()
header['STOP'] = self.stop_date.isoformat()
header['COMMENT'] = comment
header['EXTNAME'] = 'STATS'
hdus.append(astropy.io.fits.PrimaryHDU())
hdus.append(astropy.io.fits.BinTableHDU(self._data, header=header, name='STATS'))
config = desisurvey.config.Configuration()
fullname = config.get_path(name)
hdus.writeto(fullname, overwrite=overwrite)
log = desiutil.log.get_logger()
log.info('Saved stats to {}'.format(fullname))
if comment:
log.info('Saved with comment "{}".'.format(header['COMMENT']))
@property
def nexp(self):
return self._data['nexp'].sum()
def get_night(self, night):
night = desisurvey.utils.get_date(night)
assert night < self.stop_date
idx = (night - self.start_date).days
return self._data[idx]
def validate(self):
D = self._data
# Every exposure must be preceded by a setup or split.
if not np.all(D['nexp'] == D['nsplit'] + D['nsetup']):
return False
# Sum live time per program over nights.
tlive = (D['topen'] - D['tdead']).sum(axis=1)
# Sum time spent in each state per program over nights.
ttotal = (D['tsetup'] + D['tscience'] + D['tsplit']).sum(axis=1)
return np.allclose(tlive, ttotal)
def summarize(self, nthday=None):
"""Print a tabular summary of the accumulated statistics to stdout.
"""
assert self.validate()
D = self._data
if nthday is None:
daysel = slice(None)
else:
daysel = D['MJD'] < np.min(D['MJD']) + nthday
D = D[daysel]
tsched = 24 * D['tsched'].sum()
topen = 24 * D['topen'].sum()
tscience = 24 * D['tscience'].sum()
print('Scheduled {:.3f} hr Open {:.3f}% Live {:.3f}%'.format(
tsched, 100 * topen / max(1e-6, tsched), 100 * tscience / max(1e-6, topen)))
print('=' * 82)
print('PROG TILES NEXP SETUP ABT SPLIT ABT TEXP TSETUP TSPLIT TOPEN TDEAD')
print('=' * 82)
# Summarize by program.
for program in self.tiles.programs:
progidx = self.tiles.program_index[program]
ntiles_p, ndone_p, nexp_p, nsetup_p, nsplit_p, nsetup_abort_p, nsplit_abort_p = [0] * 7
tscience_p, tsetup_p, tsplit_p = [0.] * 3
ntiles_all = 0
sel = progidx
ntiles = np.sum(self.tiles.program_mask[program])
ndone = D['completed'][:, sel].sum()
nexp = D['nexp'][:, sel].sum()
nsetup = D['nsetup'][:, sel].sum()
nsplit = D['nsplit'][:, sel].sum()
nsetup_abort = D['nsetup_abort'][:, sel].sum()
nsplit_abort = D['nsplit_abort'][:, sel].sum()
tscience = 86400 * D['tscience'][:, sel].sum() / max(1, ndone)
tsetup = 86400 * D['tsetup'][:, sel].sum() / max(1, ndone)
tsplit = 86400 * D['tsplit'][:, sel].sum() / max(1, ndone)
line = '{:6s} {} {:4d}/{:4d} {:5d} {:5d} {:3d} {:5d} {:3d} {:6.1f}s {:5.1f}s {:5.1f}s'.format(
program, ' ', ndone, ntiles, nexp, nsetup, nsetup_abort, nsplit, nsplit_abort, tscience, tsetup, tsplit)
print(line)
def plot(self, forecast=None):
"""Plot a summary of the survey statistics.
Requires that matplotlib is installed.
"""
import matplotlib.pyplot as plt
assert self.validate()
D = self._data
nprograms = len(self.tiles.programs)
# Find the last day of the survey.
last = np.argmax(np.cumsum(D['completed'].sum(axis=1))) + 1
tsetup = np.zeros((last, nprograms))
tsplit = np.zeros((last, nprograms))
ntiles = np.zeros(nprograms, int)
for program in self.tiles.programs:
progidx = self.tiles.program_index[program]
tsetup[:, progidx] += D['tsetup'][:last, progidx]
tsplit[:, progidx] += D['tsplit'][:last, progidx]
ntiles[progidx] += np.sum(self.tiles.program_mask[program])
actual = np.cumsum(D['completed'], axis=0)
dt = 1 + np.arange(len(D))
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(12, 10))
ax = axes[0]
for program in self.tiles.programs:
programidx = self.tiles.program_index[program]
color = desisurvey.plots.program_color[program]
nprogram = np.sum(self.tiles.program_mask[program])
if forecast:
ax.plot(dt, 100 * forecast.program_progress[program] / nprogram, ':', c=color, lw=1)
ax.plot(dt[:last], 100 * actual[:last, programidx] / nprogram,
lw=3, alpha=0.5, c=color, label=program)
if forecast:
ax.plot([], [], 'b:', lw=1, label='forecast')
ax.legend(ncol=1)
ax.axvline(dt[last-1], ls='-', c='r')
ax.set_ylim(0, 100)
ax.set_ylabel('Completed [%]')
yaxis = ax.yaxis
yaxis.tick_right()
yaxis.set_label_position('right')
ax = axes[1]
# Plot overheads by program.
for program in self.tiles.programs:
progidx = self.tiles.program_index[program]
c = desisurvey.plots.program_color.get(program, 'purple')
scale = 86400 / ntiles[progidx] # secs / tile
ax.plot(dt[:last], scale * np.cumsum(tsetup[:, progidx]), '-', c=c)
ax.plot(dt[:last], scale * np.cumsum(tsplit[:, progidx]), '--', c=c)
ax.plot(dt[:last], scale * np.cumsum(D['tdead'][:last, progidx]), ':', c=c)
if forecast:
row = forecast.df.iloc[self.tiles.PROGRAM_INDEX[program]]
ax.scatter([dt[-1], dt[-1], dt[-1]], [
row['Setup overhead / tile (s)'],
row['Cosmic split overhead / tile (s)'],
row['Operations overhead / tile (s)']], s=50, lw=0, c=c)
ax.plot([], [], 'b-', label='setup')
ax.plot([], [], 'b--', label='split')
ax.plot([], [], 'b:', label='dead')
for program in self.tiles.programs:
ax.plot([], [], '-', c=desisurvey.plots.program_color[program], label=program)
ax.legend(ncol=2)
ax.axvline(dt[last-1], ls='-', c='r')
ax.set_xlabel('Elapsed Days')
ax.set_ylabel('Overhead / Tile [s]')
ax.set_xlim(0, dt[-1] + 1)
ax.set_ylim(0, None)
yaxis = ax.yaxis
yaxis.set_minor_locator(plt.MultipleLocator(10))
yaxis.tick_right()
yaxis.set_label_position('right')
plt.subplots_adjust(hspace=0.05)
return fig, axes
def plot_one_night(exps, tiledata, night, startdate, center_l=180):
import ephem
from astropy import units as u
from astropy.coordinates import SkyCoord, search_around_sky
from matplotlib import pyplot as p
startmjd = int(desisurvey.utils.local_noon_on_date(
desisurvey.utils.get_date(startdate)).mjd)
nightnum = night - startmjd
mstarted = (tiledata['PLANNED'] <= nightnum) & (tiledata['PLANNED'] >= 0)
tiles = desisurvey.tiles.get_tiles()
p.clf()
p.subplots_adjust(hspace=0)
p.subplots_adjust(left=0.1, right=0.9)
programs = ['DARK', 'BRIGHT']
expindex = tiles.index(exps['TILEID'])
expnight = exps['MJD'].astype('i4')
m = expnight == night
medianmjd = np.median(exps['MJD'][m])
mayall = ephem.Observer()
config = desisurvey.config.Configuration()
coord = SkyCoord(ra=tiles.tileRA*u.deg, dec=tiles.tileDEC*u.deg)
mayall.lon = config.location.longitude().to(u.radian).value
mayall.lat = config.location.latitude().to(u.radian).value
mayall.date = medianmjd+(2400000.5-2415020)
moon = ephem.Moon()
moon.compute(mayall)
tile_diameter = config.tile_radius()*2
for i, prog in enumerate(programs):
mprog = prog == tiles.tileprogram
mprogstarted = mstarted & mprog
p.subplot(len(programs), 1, i+1)
ra = ((tiles.tileRA - (center_l-180)) % 360)+(center_l-180)
p.plot(ra[mprog], tiles.tileDEC[mprog], '.', color='gray',
markersize=1)
p.plot(ra[mprogstarted], tiles.tileDEC[mprogstarted], '.',
color='green', markersize=5)
m = (expnight == night) & (tiles.tileprogram[expindex] == prog)
p.plot(ra[expindex[m]], tiles.tileDEC[expindex[m]], 'r-+')
idx1, idx2, sep2d, dist3d = search_around_sky(
coord[expindex[m]], coord[expindex[m]], tile_diameter*10)
mdiff = expindex[m][idx1] != expindex[m][idx2]
if np.sum(mdiff) > 0:
print(f'min separation {prog}: {np.min(sep2d[mdiff])}')
p.gca().set_aspect('equal')
p.plot(((np.degrees(moon.ra)-(center_l-180)) % 360)+(center_l-180),
np.degrees(moon.dec), 'o',
color='yellow', markersize=10,
markeredgecolor='black')
| desihub/surveysim | py/surveysim/stats.py | Python | bsd-3-clause | 13,280 |
N = int(input())
ans = []
i = 0
while N >> i:
if (N >> i) & 1:
ans.append(i+1)
i += 1
print(*ans[::-1])
| knuu/competitive-programming | hackerrank/contest/wfr2016_a.py | Python | mit | 120 |
# Manual labour - a library for step-by-step instructions
# Copyright (C) 2014 Johannes Reinhardt <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
import unittest
from urllib import urlopen
import manuallabour.core.common as common
from manuallabour.core.stores import *
class TestStores(unittest.TestCase):
def test_localmemory(self):
m = LocalMemoryStore()
self.assertFalse(m.has_obj('asdf'))
m.add_obj(common.Object(obj_id='asdf',name='FooBar'))
self.assertTrue(m.has_obj('asdf'))
self.assertEqual(m.get_obj('asdf').name,'FooBar')
self.assertEqual(len(list(m.iter_obj())),1)
m.add_blob('asg','tests/test_stores.py')
def test_blobs(self):
store = LocalMemoryStore()
self.assertFalse(store.has_blob('afgda'))
store.add_blob('afgda','tests/test_stores.py')
self.assertTrue(store.has_blob('afgda'))
fid = urlopen(store.get_blob_url('afgda'))
fid.close()
def test_add_objects(self):
store = LocalMemoryStore()
store.add_obj(common.Object(obj_id='a',name="Nut"))
store.add_obj(common.Object(obj_id='b',name="Wrench"))
store.add_obj(common.Object(obj_id='c',name="Bolt"))
blt = common.Object(obj_id='d',name="Tightened NutBolt")
store.add_obj(blt)
self.assertTrue(store.has_obj('a'))
self.assertFalse(store.has_obj('f'))
self.assertEqual(blt,store.get_obj('d'))
self.assertEqual(len(list(store.iter_obj())),4)
self.assertRaises(KeyError,
lambda: store.add_obj(
(common.Object(obj_id='a',name="Smaller Nut"))
)
)
| jreinhardt/manual-labour | tests/test_stores.py | Python | lgpl-2.1 | 2,384 |
# Copyright 2010 Chet Luther <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import subprocess
from optparse import OptionParser
from snmposter import SNMPosterFactory
def launcher():
"""Launch it."""
parser = OptionParser()
parser.add_option(
'-f',
'--file',
dest='filename',
default='agents.csv',
help='snmposter configuration file'
)
options, args = parser.parse_args()
factory = SNMPosterFactory()
snmpd_status = subprocess.Popen(
["service", "snmpd", "status"],
stdout=subprocess.PIPE
).communicate()[0]
if "is running" in snmpd_status:
message = "snmd service is running. Please stop it and try again."
print >> sys.stderr, message
sys.exit(1)
try:
factory.configure(options.filename)
except IOError:
print >> sys.stderr, "Error opening %s." % options.filename
sys.exit(1)
factory.start()
| cluther/snmposter | snmposter/scripts.py | Python | apache-2.0 | 1,489 |
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
from common import runtests
from .shared import try_finally_maker3
from .shared import setGenerator, test_exceptions
setGenerator(try_finally_maker3)
runtests(test_exceptions)
| slozier/ironpython2 | Tests/compat/sbs_exceptions/try_finally3.py | Python | apache-2.0 | 385 |
###########################################################################
#
# Copyright (c) 2010 Davide Pesavento <[email protected]>
#
# This file is part of FORSE.
#
# FORSE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FORSE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FORSE. If not, see <http://www.gnu.org/licenses/>.
#
###########################################################################
import sys
from RaceInfoWindow import RaceInfoWindow
from Subscriber import SubscriberApplication
if __name__ == "__main__":
app = SubscriberApplication("race_info")
mainwin = RaceInfoWindow()
mainwin.show()
sys.exit(app.exec_())
| Pesa/forse | src/forse/race_info/Main.py | Python | gpl-3.0 | 1,114 |
"""Tests for the class GnosisPackageTopicModel."""
from analytics_platform.kronos.src import config
from analytics_platform.kronos.gnosis.src.gnosis_package_topic_model import GnosisPackageTopicModel
from util.data_store.local_filesystem import LocalFileSystem
from util.analytics_platform_util import create_tags_for_package
from unittest import TestCase
class TestGnosisPackageTopicModel(TestCase):
"""Tests for the class GnosisPackageTopicModel."""
def test_generate_and_save_package_topic_model_local(self):
"""Tests the topic model generation, serialization, and deserialization."""
input_data_store = LocalFileSystem(
"tests/data/data_gnosis/input-ptm-data/")
self.assertTrue(input_data_store is not None)
output_data_store = LocalFileSystem(
"tests/data/data_gnosis/output-ptm-data/")
self.assertTrue(output_data_store is not None)
package_topic_model = GnosisPackageTopicModel.curate(
data_store=input_data_store,
filename="data_input_curated_package_topic/package_topic.json")
self.assertTrue(package_topic_model is not None)
output_result = package_topic_model.get_dictionary()
self.assertTrue(output_result is not None)
package_topic_model.save(
data_store=output_data_store, filename="data_package_topic/package_topic.json")
expected_package_topic_model = GnosisPackageTopicModel.load(
data_store=output_data_store, filename="data_package_topic/expected_package_topic.json")
self.assertTrue(expected_package_topic_model is not None)
expected_output_result = expected_package_topic_model.get_dictionary()
self.assertTrue(expected_output_result is not None)
self.assertDictEqual(output_result, expected_output_result)
def test_manifest_missing_packages(self):
"""Test the method _get_unknown_packages_from_manifests."""
input_data_store = LocalFileSystem(
"tests/data/data_gnosis/")
self.assertTrue(input_data_store is not None)
manifest_json = input_data_store.read_json_file(
filename='data_input_manifest_file_list/manifest_unknown_packages.json'
)
self.assertTrue(manifest_json)
self.assertTrue("package_list" in manifest_json[0])
package_list = manifest_json[0]['package_list']
packages = GnosisPackageTopicModel._get_unknown_packages_from_manifests(
input_data_store,
additional_path='',
package_topic_dict={}
)
self.assertListEqual(sorted(package_list[0]), sorted(packages.keys()))
def test_package_tag_creation(self):
"""Test the creation of package tags."""
input_data_store = LocalFileSystem(
"tests/data/data_gnosis/input-ptm-data/")
self.assertTrue(input_data_store is not None)
ptm_json = input_data_store.read_json_file(
filename='data_input_curated_package_topic/package_topic.json')
self.assertTrue(ptm_json)
package_names = ptm_json[0]['package_topic_map']
for package_name in package_names:
tag_list = create_tags_for_package(
package_name)
# At least one tag should be generated for each package
self.assertTrue(tag_list)
| sara-02/fabric8-analytics-stack-analysis | tests/unit_tests/test_kronos_gnosis_package_topic_model.py | Python | gpl-3.0 | 3,353 |
from django.http import HttpResponse
import pymongo
from course_dashboard_api.v2.dbv import *
mongo_db = MONGO_DB
""" Description: Function to get grading policy of a course
Input Parameters:
course_name: name of the course for which grading policy is required (ex. CT101.1x)
course_run: run of the course for which grading policy is required (ex. 2016-17)
course_organization: organization of the course for which grading policy is required (ex. IITBombayX)
Output Type: JSON Dictionary with course details and grading policy and cutoffs
Author: Jay Goswami
Date of Creation: 31 May 2017
"""
def get_grading_policy(course_name, course_run, course_organization):
try:
client = pymongo.MongoClient() # Establishing MongoDB connection
except:
print "MongoDB connection not established"
return HttpResponse("MongoDB connection not established") # MongoDB could not be connected
db_mongo = client[mongo_db]
mongo_cursor = db_mongo.modulestore.active_versions.find({"course": course_name, "run": course_run,
"org": course_organization})
grading_policy = {}
course_id = course_organization + "+" + course_name + "+" + course_run
grading_policy["course_id"] = str(course_id)
grading_policy["name"] = course_name
grading_policy["run"] = course_run
grading_policy["organization"] = course_organization
try:
course_version = mongo_cursor[0]
try:
published_version = course_version['versions']['published-branch']
mongo_cursor = db_mongo.modulestore.structures.find({'_id':published_version})
course_structures = mongo_cursor[0]['blocks']
for block in course_structures:
if block['block_type'] == 'course':
course_block = block
try:
course_start = course_block['fields']['start']
grading_policy["course_start"] = str(course_start.date())
except:
grading_policy["course_start"] = ""
#print "Course start date not found"
try:
course_end = course_block['fields']['end']
grading_policy["course_end"] = str(course_end.date())
except:
grading_policy["course_end"] = ""
#print "Course end date not found"
try:
course_registration_start = course_block['fields']['enrollment_start']
grading_policy["course_registration_start"] = str(course_registration_start.date())
except:
grading_policy["course_registration_start"] = ""
#print "Course registration start date not found"
try:
course_registration_end = course_block['fields']['enrollment_end']
grading_policy["course_registration_end"] = str(course_registration_end.date())
except:
grading_policy["course_registration_end"] = ""
#print "Course registration end date not found"
try:
course_display_name = course_block['fields']['display_name']
grading_policy["course_display_name"] = str(course_display_name)
except:
grading_policy["course_display_name"] = ""
#print "Course display name not found"
definition_id = course_block['definition']
mongo_cursor = db_mongo.modulestore.definitions.find({'_id':definition_id})
course_definition = mongo_cursor[0]
try:
grade_list = course_definition['fields']['grading_policy']['GRADER']
grader_result_list = []
for j in range(len(grade_list)):
grader_result_dict = {}
min_count = grade_list[j]['min_count']
drop_count = grade_list[j]['drop_count']
short_label = grade_list[j]['short_label']
display_name = grade_list[j]['type']
weight = grade_list[j]['weight']
grader_result_dict["min_count"] = min_count
grader_result_dict["drop_count"] = drop_count
grader_result_dict["short_label"] = str(short_label)
grader_result_dict["type"] = str(display_name)
grader_result_dict["weight"] = weight
grader_result_list.append(grader_result_dict)
grading_policy["grader"] = grader_result_list
try:
grade_cutoffs = course_definition['fields']['grading_policy']['GRADE_CUTOFFS']
grading_policy["grade_cutoffs"] = grade_cutoffs
except:
grading_policy["grade_cutoffs"] = {}
#print "No grade cutoffs mentioned"
except:
grading_policy["grade_cutoffs"] = {}
grading_policy["grader"] = []
#print "No grading policy found"
except:
grading_policy["course_start"] = ""
grading_policy["course_end"] = ""
grading_policy["course_registration_start"] = ""
grading_policy["course_registration_end"] = ""
grading_policy["course_display_name"] = ""
grading_policy["grade_cutoffs"] = {}
grading_policy["grader"] = []
#print "Course not found"
except:
client.close()
return None
client.close()
return grading_policy
""" Description: Function to get grading policy of all courses
Input Parameters:
None
Output Type: List of JSON Dictionary with each JSON containing course details and grading policy and cutoffs
Author: Jay Goswami
Date of Creation: 9 June 2017
"""
def get_all_courses_grading_policy():
try:
client = pymongo.MongoClient() # Establishing MongoDB connection
except:
print "MongoDB connection not established"
return HttpResponse("MongoDB connection not established") # MongoDB could not be connected
db_mongo = client[mongo_db]
mongo_cursor = db_mongo.modulestore.active_versions.find()
courses_grade_policy_list = []
for course in mongo_cursor:
course_name = course['course']
course_run = course['run']
course_organization = course['org']
dict = get_grading_policy(course_name, course_run, course_organization)
courses_grade_policy_list.append(dict)
client.close()
return courses_grade_policy_list | jaygoswami2303/course_dashboard_api | v2/GradePolicyAPI/api.py | Python | mit | 6,687 |
sns.set_style("white")
histplot = sns.displot(data=tidy_experiment, x="optical_density",
color='grey', edgecolor='white')
histplot.fig.suptitle("Optical density distribution")
histplot.axes[0][0].set_ylabel("Frequency"); | jorisvandenbossche/DS-python-data-analysis | notebooks/_solutions/case3_bacterial_resistance_lab_experiment2.py | Python | bsd-3-clause | 244 |
import base64, os, traceback, zipfile
from lxml import etree
from abc import abstractmethod
from fbreader.format.bookfile import BookFile
from fbreader.format.mimetype import Mimetype
from fbreader.format.util import list_zip_file_infos
class FB2StructureException(Exception):
def __init__(self, error):
Exception.__init__(self, 'fb2 verification failed: %s' % error)
if isinstance(error, Exception):
print traceback.print_exc()
class Namespace(object):
FICTION_BOOK = 'http://www.gribuser.ru/xml/fictionbook/2.0'
XLINK = 'http://www.w3.org/1999/xlink'
class FB2Base(BookFile):
def __init__(self, path, original_filename, mimetype):
BookFile.__init__(self, path, original_filename, mimetype)
self.__namespaces = {'fb': Namespace.FICTION_BOOK, 'xlink': Namespace.XLINK}
try:
tree = self.__create_tree__()
self.__detect_title(tree)
self.__detect_authors(tree)
self.__detect_tags(tree)
self.__detect_series_info(tree)
self.__detect_language(tree)
description = self.__detect_description(tree)
if description:
self.description = description.strip()
except FB2StructureException, error:
raise error
except Exception, error:
raise FB2StructureException(error)
@abstractmethod
def __create_tree__(self):
return None
def extract_cover_internal(self, working_dir):
try:
tree = self.__create_tree__()
res = tree.xpath('/fb:FictionBook/fb:description/fb:title-info/fb:coverpage/fb:image', namespaces=self.__namespaces)
cover_id = res[0].get('{' + Namespace.XLINK + '}href')[1:]
res = tree.xpath('//fb:binary[@id="%s"]' % cover_id, namespaces=self.__namespaces)
content = base64.b64decode(res[0].text)
with open(os.path.join(working_dir, 'cover.jpeg'), 'wb') as cover_file:
cover_file.write(content)
return ('cover.jpeg', False)
except:
return (None, False)
def __detect_title(self, tree):
res = tree.xpath('/fb:FictionBook/fb:description/fb:title-info/fb:book-title', namespaces=self.__namespaces)
if len(res) == 0:
res = tree.xpath('/FictionBook/description/title-info/book-title')
if len(res) > 0:
self.__set_title__(res[0].text)
return None
def __detect_authors(self, tree):
use_namespaces = True
def subnode_text(node, name):
if use_namespaces:
subnode = node.find('fb:' + name, namespaces=self.__namespaces)
else:
subnode = node.find(name)
text = subnode.text if subnode is not None else ''
return text or ''
def add_author_from_node(node):
first_name = subnode_text(node, 'first-name')
middle_name = subnode_text(node, 'middle-name')
last_name = subnode_text(node, 'last-name')
self.__add_author__(' '.join([first_name, middle_name, last_name]), last_name)
res = tree.xpath('/fb:FictionBook/fb:description/fb:title-info/fb:author', namespaces=self.__namespaces)
if len(res) == 0:
use_namespaces = False
res = tree.xpath('/FictionBook/description/title-info/author')
for node in res:
add_author_from_node(node)
def __detect_language(self, tree):
res = tree.xpath('/fb:FictionBook/fb:description/fb:title-info/fb:lang', namespaces=self.__namespaces)
if len(res) == 0:
use_namespaces = False
res = tree.xpath('/FictionBook/description/title-info/lang')
if len(res) > 0:
self.language_code = res[0].text
def __detect_tags(self, tree):
res = tree.xpath('/fb:FictionBook/fb:description/fb:title-info/fb:genre', namespaces=self.__namespaces)
if len(res) == 0:
use_namespaces = False
res = tree.xpath('/FictionBook/description/title-info/genre')
for node in res:
self.__add_tag__(node.text)
def __detect_series_info(self, tree):
res = tree.xpath('/fb:FictionBook/fb:description/fb:title-info/fb:sequence', namespaces=self.__namespaces)
if len(res) == 0:
use_namespaces = False
res = tree.xpath('/FictionBook/description/title-info/sequence')
if len(res) > 0:
title = BookFile.__normalise_string__(res[0].get('name'))
index = BookFile.__normalise_string__(res[0].get('number'))
if title:
self.series_info = {
'title': title,
'index': index or None
}
def __detect_description(self, tree):
res = tree.xpath('/fb:FictionBook/fb:description/fb:title-info/fb:annotation', namespaces=self.__namespaces)
if len(res) == 0:
res = tree.xpath('/FictionBook/description/title-info/annotation')
if len(res) > 0:
return etree.tostring(res[0], encoding='utf-8', method='text')
return None
class FB2(FB2Base):
def __init__(self, path, original_filename):
FB2Base.__init__(self, path, original_filename, Mimetype.FB2)
def __create_tree__(self):
try:
return etree.parse(self.path)
except:
raise FB2StructureException('the file is not a valid XML')
def __exit__(self, kind, value, traceback):
pass
class FB2Zip(FB2Base):
def __init__(self, path, original_filename):
self.__zip_file = zipfile.ZipFile(path)
try:
if self.__zip_file.testzip():
raise FB2StructureException('broken zip archive')
self.__infos = list_zip_file_infos(self.__zip_file)
if len(self.__infos) != 1:
raise FB2StructureException('archive contains %s files' % len(self.__infos))
except FB2StructureException, error:
self.__zip_file.close()
raise error
except Exception, error:
self.__zip_file.close()
raise FB2StructureException(error)
FB2Base.__init__(self, path, original_filename, Mimetype.FB2_ZIP)
def __create_tree__(self):
with self.__zip_file.open(self.__infos[0]) as entry:
try:
return etree.fromstring(entry.read(50 * 1024 * 1024))
except:
raise FB2StructureException('\'%s\' is not a valid XML' % self.__infos[0].filename)
def __exit__(self, kind, value, traceback):
self.__zip_file.__exit__(kind, value, traceback)
pass
| geometer/book_tools | fbreader/format/fb2.py | Python | mit | 6,699 |
# -*- coding: utf-8 -*-
"""
Admin site configuration for third party authentication
"""
from django.contrib import admin
from config_models.admin import ConfigurationModelAdmin, KeyedConfigurationModelAdmin
from .models import OAuth2ProviderConfig, SAMLProviderConfig, SAMLConfiguration, SAMLProviderData
from .tasks import fetch_saml_metadata
class OAuth2ProviderConfigAdmin(KeyedConfigurationModelAdmin):
""" Django Admin class for OAuth2ProviderConfig """
def get_list_display(self, request):
""" Don't show every single field in the admin change list """
return (
'name', 'enabled', 'backend_name', 'secondary', 'skip_registration_form',
'skip_email_verification', 'change_date', 'changed_by', 'edit_link',
)
admin.site.register(OAuth2ProviderConfig, OAuth2ProviderConfigAdmin)
class SAMLProviderConfigAdmin(KeyedConfigurationModelAdmin):
""" Django Admin class for SAMLProviderConfig """
def get_list_display(self, request):
""" Don't show every single field in the admin change list """
return (
'name', 'enabled', 'backend_name', 'entity_id', 'metadata_source',
'has_data', 'icon_class', 'change_date', 'changed_by', 'edit_link'
)
def has_data(self, inst):
""" Do we have cached metadata for this SAML provider? """
if not inst.is_active:
return None # N/A
data = SAMLProviderData.current(inst.entity_id)
return bool(data and data.is_valid())
has_data.short_description = u'Metadata Ready'
has_data.boolean = True
def save_model(self, request, obj, form, change):
"""
Post save: Queue an asynchronous metadata fetch to update SAMLProviderData.
We only want to do this for manual edits done using the admin interface.
Note: This only works if the celery worker and the app worker are using the
same 'configuration' cache.
"""
super(SAMLProviderConfigAdmin, self).save_model(request, obj, form, change)
fetch_saml_metadata.apply_async((), countdown=2)
admin.site.register(SAMLProviderConfig, SAMLProviderConfigAdmin)
class SAMLConfigurationAdmin(ConfigurationModelAdmin):
""" Django Admin class for SAMLConfiguration """
def get_list_display(self, request):
""" Shorten the public/private keys in the change view """
return (
'change_date', 'changed_by', 'enabled', 'entity_id',
'org_info_str', 'key_summary',
)
def key_summary(self, inst):
""" Short summary of the key pairs configured """
public_key = inst.get_setting('SP_PUBLIC_CERT')
private_key = inst.get_setting('SP_PRIVATE_KEY')
if not public_key or not private_key:
return u'<em>Key pair incomplete/missing</em>'
pub1, pub2 = public_key[0:10], public_key[-10:]
priv1, priv2 = private_key[0:10], private_key[-10:]
return u'Public: {}…{}<br>Private: {}…{}'.format(pub1, pub2, priv1, priv2)
key_summary.allow_tags = True
admin.site.register(SAMLConfiguration, SAMLConfigurationAdmin)
class SAMLProviderDataAdmin(admin.ModelAdmin):
""" Django Admin class for SAMLProviderData (Read Only) """
list_display = ('entity_id', 'is_valid', 'fetched_at', 'expires_at', 'sso_url')
readonly_fields = ('is_valid', )
def get_readonly_fields(self, request, obj=None):
if obj: # editing an existing object
return self.model._meta.get_all_field_names() # pylint: disable=protected-access
return self.readonly_fields
admin.site.register(SAMLProviderData, SAMLProviderDataAdmin)
| shashank971/edx-platform | common/djangoapps/third_party_auth/admin.py | Python | agpl-3.0 | 3,661 |
# coding=utf-8
__author__ = "Gina Häußge <[email protected]>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
import os
import traceback
import sys
import time
import re
import tempfile
import logging
from flask import make_response
from octoprint.settings import settings, default_settings
logger = logging.getLogger(__name__)
def getFormattedSize(num):
"""
Taken from http://stackoverflow.com/a/1094933/2028598
"""
for x in ["bytes","KB","MB","GB"]:
if num < 1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, "TB")
def isAllowedFile(filename, extensions):
return "." in filename and filename.rsplit(".", 1)[1] in extensions
def getFormattedTimeDelta(d):
if d is None:
return None
hours = d.days * 24 + d.seconds // 3600
minutes = (d.seconds % 3600) // 60
seconds = d.seconds % 60
return "%02d:%02d:%02d" % (hours, minutes, seconds)
def getFormattedDateTime(d):
if d is None:
return None
return d.strftime("%Y-%m-%d %H:%M")
def getClass(name):
"""
Taken from http://stackoverflow.com/a/452981/2028598
"""
parts = name.split(".")
module = ".".join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m
def isDevVersion():
gitPath = os.path.abspath(os.path.join(os.path.split(os.path.abspath(__file__))[0], "../../../.git"))
return os.path.exists(gitPath)
def getExceptionString():
locationInfo = traceback.extract_tb(sys.exc_info()[2])[0]
return "%s: '%s' @ %s:%s:%d" % (str(sys.exc_info()[0].__name__), str(sys.exc_info()[1]), os.path.basename(locationInfo[0]), locationInfo[2], locationInfo[1])
def getGitInfo():
gitPath = os.path.abspath(os.path.join(os.path.split(os.path.abspath(__file__))[0], "../../../.git"))
if not os.path.exists(gitPath):
return (None, None)
headref = None
with open(os.path.join(gitPath, "HEAD"), "r") as f:
headref = f.readline().strip()
if headref is None:
return (None, None)
headref = headref[len("ref: "):]
branch = headref[headref.rfind("/") + 1:]
with open(os.path.join(gitPath, headref)) as f:
head = f.readline().strip()
return (branch, head)
def getNewTimeout(type):
now = time.time()
if type not in default_settings["serial"]["timeout"].keys():
# timeout immediately for unknown timeout type
return now
return now + settings().getFloat(["serial", "timeout", type])
def getFreeBytes(path):
"""
Taken from http://stackoverflow.com/a/2372171/2028598
"""
if sys.platform == "win32":
import ctypes
freeBytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(path), None, None, ctypes.pointer(freeBytes))
return freeBytes.value
else:
st = os.statvfs(path)
return st.f_bavail * st.f_frsize
def getRemoteAddress(request):
forwardedFor = request.headers.get("X-Forwarded-For", None)
if forwardedFor is not None:
return forwardedFor.split(",")[0]
return request.remote_addr
def getDosFilename(input, existingFilenames, extension=None):
if input is None:
return None
if extension is None:
extension = "gco"
filename, ext = input.rsplit(".", 1)
return findCollisionfreeName(filename, extension, existingFilenames)
def findCollisionfreeName(input, extension, existingFilenames):
filename = re.sub(r"\s+", "_", input.lower().translate({ord(i):None for i in ".\"/\\[]:;=,"}))
counter = 1
power = 1
while counter < (10 * power):
result = filename[:(6 - power + 1)] + "~" + str(counter) + "." + extension
if result not in existingFilenames:
return result
counter += 1
if counter == 10 * power:
power += 1
raise ValueError("Can't create a collision free filename")
def safeRename(old, new):
"""
Safely renames a file.
On Windows this is achieved by first creating a backup file of the new file (if it
already exists), thus moving it, then renaming the old into the new file and finally removing the backup. If
anything goes wrong during those steps, the backup (if already there) will be renamed to its old name and thus
the operation hopefully result in a no-op.
On other operating systems the atomic os.rename function will be used instead.
@param old the path to the old file to be renamed
@param new the path to the new file to be created/replaced
"""
if sys.platform == "win32":
fh, backup = tempfile.mkstemp()
os.close(fh)
try:
if os.path.exists(new):
silentRemove(backup)
os.rename(new, backup)
os.rename(old, new)
os.remove(backup)
except OSError:
# if anything went wrong, try to rename the backup file to its original name
logger.error("Could not perform safe rename, trying to revert")
if os.path.exists(backup):
os.remove(new)
os.rename(backup, new)
else:
# on anything else than windows it's ooooh so much easier...
os.rename(old, new)
def silentRemove(file):
"""
Silently removes a file. Does not raise an error if the file doesn't exist.
@param file the path of the file to be removed
"""
try:
os.remove(file)
except OSError:
pass
def sanitizeAscii(line):
return unicode(line, 'ascii', 'replace').encode('ascii', 'replace').rstrip()
def filterNonAscii(line):
"""
Returns True if the line contains non-ascii characters, false otherwise
@param line the line to test
"""
try:
unicode(line, 'ascii').encode('ascii')
return False
except ValueError:
return True
def getJsonCommandFromRequest(request, valid_commands):
if not "application/json" in request.headers["Content-Type"]:
return None, None, make_response("Expected content-type JSON", 400)
data = request.json
if not "command" in data.keys() or not data["command"] in valid_commands.keys():
return None, None, make_response("Expected valid command", 400)
command = data["command"]
for parameter in valid_commands[command]:
if not parameter in data:
return None, None, make_response("Mandatory parameter %s missing for command %s" % (parameter, command), 400)
return command, data, None
def dict_merge(a, b):
'''recursively merges dict's. not just simple a['key'] = b['key'], if
both a and bhave a key who's value is a dict then dict_merge is called
on both values and the result stored in the returned dictionary.
Taken from https://www.xormedia.com/recursively-merge-dictionaries-in-python/'''
from copy import deepcopy
if not isinstance(b, dict):
return b
result = deepcopy(a)
for k, v in b.iteritems():
if k in result and isinstance(result[k], dict):
result[k] = dict_merge(result[k], v)
else:
result[k] = deepcopy(v)
return result
class Object(object):
pass
def interface_addresses(family=None):
import netifaces
if not family:
family = netifaces.AF_INET
for interface in netifaces.interfaces():
try:
ifaddresses = netifaces.ifaddresses(interface)
except:
continue
if family in ifaddresses:
for ifaddress in ifaddresses[family]:
if not ifaddress["addr"].startswith("169.254."):
yield ifaddress["addr"]
def address_for_client(host, port):
import socket
for address in interface_addresses():
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((address, 0))
sock.connect((host, port))
return address
except Exception as e:
pass
| ymilord/OctoPrint-MrBeam | src/octoprint/util/__init__.py | Python | agpl-3.0 | 7,222 |
#!/usr/bin/env python3
import h5py
import sys
import numpy as np
def main(args):
input_feat_path = args[1]
new_feat_path = args[2]
value = args[3]
feat_template = h5py.File(input_feat_path, 'r')
new_feat = h5py.File(new_feat_path, 'w')
for subj in feat_template:
for ictyp in feat_template[subj]:
dataformat = type(feat_template[subj][ictyp])
if dataformat is h5py._hl.group.Group:
for seg in feat_template[subj][ictyp]:
dataset_dim = np.size(
feat_template[subj][ictyp][seg].value)
dataset = np.empty(dataset_dim)
dataset.fill(value)
new_feat.create_dataset(name='/'.join([subj, ictyp, seg]),
data=dataset)
elif dataformat is h5py._hl.dataset.Dataset:
dataset_dim = np.size(feat_template[subj][ictyp].value)
dataset = np.empty(dataset_dim)
dataset.fill(value)
new_feat.create_dataset(name='/'.join([subj, ictyp]),
data=dataset)
feat_template.close()
new_feat.close()
if __name__ == '__main__':
if len(sys.argv) != 4:
print(
"Usage: create_dummy_data.py <PATH_TO_H5_YOU_WANT_TO_MIMIC> <NAME_OF_DUMMY_H5> <FILL VAL>")
sys.exit()
main(sys.argv)
| Neuroglycerin/hail-seizure | python/create_dummy_data.py | Python | apache-2.0 | 1,435 |
"""
Base and utility classes for tseries type pandas objects.
"""
from datetime import datetime
from typing import Any, List, Optional, TypeVar, Union, cast
import numpy as np
from pandas._libs import NaT, Timedelta, iNaT, join as libjoin, lib
from pandas._libs.tslibs import Resolution, timezones
from pandas._libs.tslibs.parsing import DateParseError
from pandas._typing import Label
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, cache_readonly, doc
from pandas.core.dtypes.common import (
ensure_int64,
is_bool_dtype,
is_dtype_equal,
is_integer,
is_list_like,
is_period_dtype,
is_scalar,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.generic import ABCIndex, ABCIndexClass, ABCSeries
from pandas.core import algorithms
from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
from pandas.core.base import IndexOpsMixin
import pandas.core.common as com
from pandas.core.construction import array as pd_array, extract_array
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core.indexes.extension import (
ExtensionIndex,
inherit_names,
make_wrapped_arith_op,
)
from pandas.core.indexes.numeric import Int64Index
from pandas.core.ops import get_op_result_name
from pandas.core.sorting import ensure_key_mapped
from pandas.core.tools.timedeltas import to_timedelta
from pandas.tseries.offsets import DateOffset, Tick
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_T = TypeVar("_T", bound="DatetimeIndexOpsMixin")
def _join_i8_wrapper(joinf, with_indexers: bool = True):
"""
Create the join wrapper methods.
"""
@staticmethod # type: ignore
def wrapper(left, right):
if isinstance(left, (np.ndarray, ABCIndex, ABCSeries, DatetimeLikeArrayMixin)):
left = left.view("i8")
if isinstance(right, (np.ndarray, ABCIndex, ABCSeries, DatetimeLikeArrayMixin)):
right = right.view("i8")
results = joinf(left, right)
if with_indexers:
# dtype should be timedelta64[ns] for TimedeltaIndex
# and datetime64[ns] for DatetimeIndex
dtype = left.dtype.base
join_index, left_indexer, right_indexer = results
join_index = join_index.view(dtype)
return join_index, left_indexer, right_indexer
return results
return wrapper
@inherit_names(
["inferred_freq", "_isnan", "_resolution_obj", "resolution"],
DatetimeLikeArrayMixin,
cache=True,
)
@inherit_names(
["mean", "asi8", "freq", "freqstr", "_box_func"], DatetimeLikeArrayMixin,
)
class DatetimeIndexOpsMixin(ExtensionIndex):
"""
Common ops mixin to support a unified interface datetimelike Index.
"""
_data: Union[DatetimeArray, TimedeltaArray, PeriodArray]
freq: Optional[DateOffset]
freqstr: Optional[str]
_resolution_obj: Resolution
_bool_ops: List[str] = []
_field_ops: List[str] = []
hasnans = cache_readonly(DatetimeLikeArrayMixin._hasnans.fget) # type: ignore
_hasnans = hasnans # for index / array -agnostic code
@property
def is_all_dates(self) -> bool:
return True
# ------------------------------------------------------------------------
# Abstract data attributes
@property
def values(self):
# Note: PeriodArray overrides this to return an ndarray of objects.
return self._data._data
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc.
"""
result = lib.item_from_zerodim(result)
if is_bool_dtype(result) or lib.is_scalar(result):
return result
attrs = self._get_attributes_dict()
if not is_period_dtype(self.dtype) and attrs["freq"]:
# no need to infer if freq is None
attrs["freq"] = "infer"
return Index(result, **attrs)
# ------------------------------------------------------------------------
def equals(self, other) -> bool:
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, ABCIndexClass):
return False
elif not isinstance(other, type(self)):
try:
other = type(self)(other)
except (ValueError, TypeError, OverflowError):
# e.g.
# ValueError -> cannot parse str entry, or OutOfBoundsDatetime
# TypeError -> trying to convert IntervalIndex to DatetimeIndex
# OverflowError -> Index([very_large_timedeltas])
return False
if not is_dtype_equal(self.dtype, other.dtype):
# have different timezone
return False
return np.array_equal(self.asi8, other.asi8)
@Appender(Index.__contains__.__doc__)
def __contains__(self, key: Any) -> bool:
hash(key)
try:
res = self.get_loc(key)
except (KeyError, TypeError, ValueError):
return False
return bool(
is_scalar(res) or isinstance(res, slice) or (is_list_like(res) and len(res))
)
def sort_values(self, return_indexer=False, ascending=True, key=None):
"""
Return sorted copy of Index.
"""
idx = ensure_key_mapped(self, key)
_as = idx.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
if return_indexer:
return sorted_index, _as
else:
return sorted_index
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = ensure_int64(indices)
maybe_slice = lib.maybe_indices_to_slice(indices, len(self))
if isinstance(maybe_slice, slice):
return self[maybe_slice]
return ExtensionIndex.take(
self, indices, axis, allow_fill, fill_value, **kwargs
)
@doc(IndexOpsMixin.searchsorted, klass="Datetime-like Index")
def searchsorted(self, value, side="left", sorter=None):
if isinstance(value, str):
raise TypeError(
"searchsorted requires compatible dtype or scalar, "
f"not {type(value).__name__}"
)
if isinstance(value, Index):
value = value._data
return self._data.searchsorted(value, side=side, sorter=sorter)
_can_hold_na = True
_na_value = NaT
"""The expected NA value to use with this index."""
def _convert_tolerance(self, tolerance, target):
tolerance = np.asarray(to_timedelta(tolerance).to_numpy())
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError("list-like tolerance size must match target index size")
return tolerance
def tolist(self) -> List:
"""
Return a list of the underlying data.
"""
return list(self.astype(object))
def min(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the minimum value of the Index or minimum along
an axis.
See Also
--------
numpy.ndarray.min
Series.min : Return the minimum value in a Series.
"""
nv.validate_min(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
i8 = self.asi8
try:
# quick check
if len(i8) and self.is_monotonic:
if i8[0] != iNaT:
return self._box_func(i8[0])
if self.hasnans:
if skipna:
min_stamp = self[~self._isnan].asi8.min()
else:
return self._na_value
else:
min_stamp = i8.min()
return self._box_func(min_stamp)
except ValueError:
return self._na_value
def argmin(self, axis=None, skipna=True, *args, **kwargs):
"""
Returns the indices of the minimum values along an axis.
See `numpy.ndarray.argmin` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmin
"""
nv.validate_argmin(args, kwargs)
nv.validate_minmax_axis(axis)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all() or not skipna:
return -1
i8 = i8.copy()
i8[mask] = np.iinfo("int64").max
return i8.argmin()
def max(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the maximum value of the Index or maximum along
an axis.
See Also
--------
numpy.ndarray.max
Series.max : Return the maximum value in a Series.
"""
nv.validate_max(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
i8 = self.asi8
try:
# quick check
if len(i8) and self.is_monotonic:
if i8[-1] != iNaT:
return self._box_func(i8[-1])
if self.hasnans:
if skipna:
max_stamp = self[~self._isnan].asi8.max()
else:
return self._na_value
else:
max_stamp = i8.max()
return self._box_func(max_stamp)
except ValueError:
return self._na_value
def argmax(self, axis=None, skipna=True, *args, **kwargs):
"""
Returns the indices of the maximum values along an axis.
See `numpy.ndarray.argmax` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmax
"""
nv.validate_argmax(args, kwargs)
nv.validate_minmax_axis(axis)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all() or not skipna:
return -1
i8 = i8.copy()
i8[mask] = 0
return i8.argmax()
# --------------------------------------------------------------------
# Rendering Methods
def _format_with_header(self, header, na_rep="NaT", **kwargs):
return header + list(self._format_native_types(na_rep, **kwargs))
@property
def _formatter_func(self):
raise AbstractMethodError(self)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value).
"""
attrs = super()._format_attrs()
for attrib in self._attributes:
if attrib == "freq":
freq = self.freqstr
if freq is not None:
freq = repr(freq)
attrs.append(("freq", freq))
return attrs
# --------------------------------------------------------------------
# Indexing Methods
def _validate_partial_date_slice(self, reso: str):
raise NotImplementedError
def _parsed_string_to_bounds(self, reso: str, parsed: datetime):
raise NotImplementedError
def _partial_date_slice(
self, reso: str, parsed: datetime, use_lhs: bool = True, use_rhs: bool = True
):
"""
Parameters
----------
reso : str
parsed : datetime
use_lhs : bool, default True
use_rhs : bool, default True
Returns
-------
slice or ndarray[intp]
"""
self._validate_partial_date_slice(reso)
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
i8vals = self.asi8
unbox = self._data._unbox_scalar
if self.is_monotonic:
if len(self) and (
(use_lhs and t1 < self[0] and t2 < self[0])
or ((use_rhs and t1 > self[-1] and t2 > self[-1]))
):
# we are out of range
raise KeyError
# TODO: does this depend on being monotonic _increasing_?
# a monotonic (sorted) series can be sliced
# Use asi8.searchsorted to avoid re-validating Periods/Timestamps
left = i8vals.searchsorted(unbox(t1), side="left") if use_lhs else None
right = i8vals.searchsorted(unbox(t2), side="right") if use_rhs else None
return slice(left, right)
else:
lhs_mask = (i8vals >= unbox(t1)) if use_lhs else True
rhs_mask = (i8vals <= unbox(t2)) if use_rhs else True
# try to find the dates
return (lhs_mask & rhs_mask).nonzero()[0]
# --------------------------------------------------------------------
# Arithmetic Methods
__add__ = make_wrapped_arith_op("__add__")
__sub__ = make_wrapped_arith_op("__sub__")
__radd__ = make_wrapped_arith_op("__radd__")
__rsub__ = make_wrapped_arith_op("__rsub__")
__pow__ = make_wrapped_arith_op("__pow__")
__rpow__ = make_wrapped_arith_op("__rpow__")
__mul__ = make_wrapped_arith_op("__mul__")
__rmul__ = make_wrapped_arith_op("__rmul__")
__floordiv__ = make_wrapped_arith_op("__floordiv__")
__rfloordiv__ = make_wrapped_arith_op("__rfloordiv__")
__mod__ = make_wrapped_arith_op("__mod__")
__rmod__ = make_wrapped_arith_op("__rmod__")
__divmod__ = make_wrapped_arith_op("__divmod__")
__rdivmod__ = make_wrapped_arith_op("__rdivmod__")
__truediv__ = make_wrapped_arith_op("__truediv__")
__rtruediv__ = make_wrapped_arith_op("__rtruediv__")
def isin(self, values, level=None):
"""
Compute boolean array of whether each index value is found in the
passed set of values.
Parameters
----------
values : set or sequence of values
Returns
-------
is_contained : ndarray (boolean dtype)
"""
if level is not None:
self._validate_index_level(level)
if not isinstance(values, type(self)):
try:
values = type(self)(values)
except ValueError:
return self.astype(object).isin(values)
return algorithms.isin(self.asi8, values.asi8)
@Appender(Index.where.__doc__)
def where(self, cond, other=None):
values = self.view("i8")
try:
other = self._data._validate_where_value(other)
except (TypeError, ValueError) as err:
# Includes tzawareness mismatch and IncompatibleFrequencyError
oth = getattr(other, "dtype", other)
raise TypeError(f"Where requires matching dtype, not {oth}") from err
result = np.where(cond, values, other).astype("i8")
arr = type(self._data)._simple_new(result, dtype=self.dtype)
return type(self)._simple_new(arr, name=self.name)
def _summary(self, name=None) -> str:
"""
Return a summarized representation.
Parameters
----------
name : str
Name to use in the summary representation.
Returns
-------
str
Summarized representation of the index.
"""
formatter = self._formatter_func
if len(self) > 0:
index_summary = f", {formatter(self[0])} to {formatter(self[-1])}"
else:
index_summary = ""
if name is None:
name = type(self).__name__
result = f"{name}: {len(self)} entries{index_summary}"
if self.freq:
result += f"\nFreq: {self.freqstr}"
# display as values, not quoted
result = result.replace("'", "")
return result
def shift(self, periods=1, freq=None):
"""
Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int, default 1
Number of periods (or increments) to shift by,
can be positive or negative.
.. versionchanged:: 0.24.0
freq : pandas.DateOffset, pandas.Timedelta or string, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.DatetimeIndex
Shifted index.
See Also
--------
Index.shift : Shift values of Index.
PeriodIndex.shift : Shift values of PeriodIndex.
"""
arr = self._data.view()
arr._freq = self.freq
result = arr._time_shift(periods, freq=freq)
return type(self)(result, name=self.name)
# --------------------------------------------------------------------
# List-like Methods
def delete(self, loc):
new_i8s = np.delete(self.asi8, loc)
freq = None
if is_period_dtype(self.dtype):
freq = self.freq
elif is_integer(loc):
if loc in (0, -len(self), -1, len(self) - 1):
freq = self.freq
else:
if is_list_like(loc):
loc = lib.maybe_indices_to_slice(ensure_int64(np.array(loc)), len(self))
if isinstance(loc, slice) and loc.step in (1, None):
if loc.start in (0, None) or loc.stop in (len(self), None):
freq = self.freq
arr = type(self._data)._simple_new(new_i8s, dtype=self.dtype, freq=freq)
return type(self)._simple_new(arr, name=self.name)
# --------------------------------------------------------------------
# Join/Set Methods
def _wrap_joined_index(self, joined: np.ndarray, other):
assert other.dtype == self.dtype, (other.dtype, self.dtype)
name = get_op_result_name(self, other)
if is_period_dtype(self.dtype):
freq = self.freq
else:
self = cast(DatetimeTimedeltaMixin, self)
freq = self.freq if self._can_fast_union(other) else None
new_data = type(self._data)._simple_new(joined, dtype=self.dtype, freq=freq)
return type(self)._simple_new(new_data, name=name)
@doc(Index._convert_arr_indexer)
def _convert_arr_indexer(self, keyarr):
if lib.infer_dtype(keyarr) == "string":
# Weak reasoning that indexer is a list of strings
# representing datetime or timedelta or period
try:
extension_arr = pd_array(keyarr, self.dtype)
except (ValueError, DateParseError):
# Fail to infer keyarr from self.dtype
return keyarr
converted_arr = extract_array(extension_arr, extract_numpy=True)
else:
converted_arr = com.asarray_tuplesafe(keyarr)
return converted_arr
class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin, Int64Index):
"""
Mixin class for methods shared by DatetimeIndex and TimedeltaIndex,
but not PeriodIndex
"""
# Compat for frequency inference, see GH#23789
_is_monotonic_increasing = Index.is_monotonic_increasing
_is_monotonic_decreasing = Index.is_monotonic_decreasing
_is_unique = Index.is_unique
def _with_freq(self, freq):
arr = self._data._with_freq(freq)
return type(self)._simple_new(arr, name=self.name)
def _shallow_copy(self, values=None, name: Label = lib.no_default):
name = self.name if name is lib.no_default else name
cache = self._cache.copy() if values is None else {}
if values is None:
values = self._data
if isinstance(values, np.ndarray):
# TODO: We would rather not get here
values = type(self._data)(values, dtype=self.dtype)
result = type(self)._simple_new(values, name=name)
result._cache = cache
return result
# --------------------------------------------------------------------
# Set Operation Methods
@Appender(Index.difference.__doc__)
def difference(self, other, sort=None):
new_idx = super().difference(other, sort=sort)._with_freq(None)
return new_idx
def intersection(self, other, sort=False):
"""
Specialized intersection for DatetimeIndex/TimedeltaIndex.
May be much faster than Index.intersection
Parameters
----------
other : Same type as self or array-like
sort : False or None, default False
Sort the resulting index if possible.
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default to ``False`` to match the behaviour
from before 0.24.0.
.. versionchanged:: 0.25.0
The `sort` keyword is added
Returns
-------
y : Index or same type as self
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
res_name = get_op_result_name(self, other)
if self.equals(other):
return self._get_reconciled_name_object(other)
if len(self) == 0:
return self.copy()
if len(other) == 0:
return other.copy()
if not isinstance(other, type(self)):
result = Index.intersection(self, other, sort=sort)
if isinstance(result, type(self)):
if result.freq is None:
# TODO: no tests rely on this; needed?
result = result._with_freq("infer")
assert result.name == res_name
return result
elif not self._can_fast_intersect(other):
result = Index.intersection(self, other, sort=sort)
assert result.name == res_name
# We need to invalidate the freq because Index.intersection
# uses _shallow_copy on a view of self._data, which will preserve
# self.freq if we're not careful.
result = result._with_freq(None)._with_freq("infer")
return result
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
# after sorting, the intersection always starts with the right index
# and ends with the index of which the last elements is smallest
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[], dtype=self.dtype, freq=self.freq, name=res_name)
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left._values[lslice]
return type(self)._simple_new(left_chunk, name=res_name)
def _can_fast_intersect(self: _T, other: _T) -> bool:
if self.freq is None:
return False
elif other.freq != self.freq:
return False
elif not self.is_monotonic_increasing:
# Because freq is not None, we must then be monotonic decreasing
return False
elif self.freq.is_anchored():
# this along with matching freqs ensure that we "line up",
# so intersection will preserve freq
return True
elif isinstance(self.freq, Tick):
# We "line up" if and only if the difference between two of our points
# is a multiple of our freq
diff = self[0] - other[0]
remainder = diff % self.freq.delta
return remainder == Timedelta(0)
return True
def _can_fast_union(self: _T, other: _T) -> bool:
# Assumes that type(self) == type(other), as per the annotation
# The ability to fast_union also implies that `freq` should be
# retained on union.
if not isinstance(other, type(self)):
return False
freq = self.freq
if freq is None or freq != other.freq:
return False
if not self.is_monotonic_increasing:
# Because freq is not None, we must then be monotonic decreasing
# TODO: do union on the reversed indexes?
return False
if len(self) == 0 or len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
return (right_start == left_end + freq) or right_start in left
def _fast_union(self, other, sort=None):
if len(other) == 0:
return self.view(type(self))
if len(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
elif sort is False:
# TDIs are not in the "correct" order and we don't want
# to sort but want to remove overlaps
left, right = self, other
left_start = left[0]
loc = right.searchsorted(left_start, side="left")
right_chunk = right._values[:loc]
dates = concat_compat((left._values, right_chunk))
# With sort being False, we can't infer that result.freq == self.freq
# TODO: no tests rely on the _with_freq("infer"); needed?
result = self._shallow_copy(dates)._with_freq("infer")
return result
else:
left, right = other, self
left_end = left[-1]
right_end = right[-1]
# concatenate
if left_end < right_end:
loc = right.searchsorted(left_end, side="right")
right_chunk = right._values[loc:]
dates = concat_compat([left._values, right_chunk])
# The can_fast_union check ensures that the result.freq
# should match self.freq
dates = type(self._data)(dates, freq=self.freq)
result = type(self)._simple_new(dates, name=self.name)
return result
else:
return left
def _union(self, other, sort):
if not len(other) or self.equals(other) or not len(self):
return super()._union(other, sort=sort)
# We are called by `union`, which is responsible for this validation
assert isinstance(other, type(self))
this, other = self._maybe_utc_convert(other)
if this._can_fast_union(other):
result = this._fast_union(other, sort=sort)
if sort is None:
# In the case where sort is None, _can_fast_union
# implies that result.freq should match self.freq
assert result.freq == self.freq, (result.freq, self.freq)
elif result.freq is None:
# TODO: no tests rely on this; needed?
result = result._with_freq("infer")
return result
else:
i8self = Int64Index._simple_new(self.asi8, name=self.name)
i8other = Int64Index._simple_new(other.asi8, name=other.name)
i8result = i8self._union(i8other, sort=sort)
result = type(self)(i8result, dtype=self.dtype, freq="infer")
return result
# --------------------------------------------------------------------
# Join Methods
_join_precedence = 10
_inner_indexer = _join_i8_wrapper(libjoin.inner_join_indexer)
_outer_indexer = _join_i8_wrapper(libjoin.outer_join_indexer)
_left_indexer = _join_i8_wrapper(libjoin.left_join_indexer)
_left_indexer_unique = _join_i8_wrapper(
libjoin.left_join_indexer_unique, with_indexers=False
)
def join(
self, other, how: str = "left", level=None, return_indexers=False, sort=False
):
"""
See Index.join
"""
if self._is_convertible_to_index_for_join(other):
try:
other = type(self)(other)
except (TypeError, ValueError):
pass
this, other = self._maybe_utc_convert(other)
return Index.join(
this,
other,
how=how,
level=level,
return_indexers=return_indexers,
sort=sort,
)
def _maybe_utc_convert(self, other):
this = self
if not hasattr(self, "tz"):
return this, other
if isinstance(other, type(self)):
if self.tz is not None:
if other.tz is None:
raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex")
elif other.tz is not None:
raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex")
if not timezones.tz_compare(self.tz, other.tz):
this = self.tz_convert("UTC")
other = other.tz_convert("UTC")
return this, other
@classmethod
def _is_convertible_to_index_for_join(cls, other: Index) -> bool:
"""
return a boolean whether I can attempt conversion to a
DatetimeIndex/TimedeltaIndex
"""
if isinstance(other, cls):
return False
elif len(other) > 0 and other.inferred_type not in (
"floating",
"mixed-integer",
"integer",
"integer-na",
"mixed-integer-float",
"mixed",
):
return True
return False
# --------------------------------------------------------------------
# List-Like Methods
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
if not either a Python datetime or a numpy integer-like, returned
Index dtype will be object rather than datetime.
Returns
-------
new_index : Index
"""
if isinstance(item, str):
# TODO: Why are strings special?
# TODO: Should we attempt _scalar_from_string?
return self.astype(object).insert(loc, item)
item = self._data._validate_insert_value(item)
freq = None
# check freq can be preserved on edge cases
if self.freq is not None:
if self.size:
if item is NaT:
pass
elif (loc == 0 or loc == -len(self)) and item + self.freq == self[0]:
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
else:
# Adding a single item to an empty index may preserve freq
if self.freq.is_on_offset(item):
freq = self.freq
item = self._data._unbox_scalar(item)
new_i8s = np.concatenate([self[:loc].asi8, [item], self[loc:].asi8])
arr = type(self._data)._simple_new(new_i8s, dtype=self.dtype, freq=freq)
return type(self)._simple_new(arr, name=self.name)
| TomAugspurger/pandas | pandas/core/indexes/datetimelike.py | Python | bsd-3-clause | 31,546 |
import math
import csv
from django.shortcuts import render, render_to_response, redirect
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.template import RequestContext
from django.contrib import messages
from django.core.context_processors import csrf
from django.core.exceptions import ObjectDoesNotExist
from django import forms
from go_http.optouts import OptOutsApiClient
import control.settings as settings
from models import Dashboard, UserDashboard
from subscription.models import (Message,
Subscription,
MessageSet)
from servicerating.models import Response
from subscription.forms import (MessageFindForm,
MessageUpdateForm,
MessageConfirmForm,
SubscriptionFindForm,
SubscriptionConfirmCancelForm,
SubscriptionConfirmOptOutForm,
SubscriptionConfirmBabyForm,
SubscriptionCancelForm,
SubscriptionOptOutForm,
SubscriptionBabyForm,
)
def get_user_dashboards(request):
if (request.user.has_perm('controlinterface.view_dashboard_private') or
request.user.has_perm('controlinterface.view_dashboard_summary')):
user_dashboards = UserDashboard.objects.get(user=request.user)
dashboards = {}
for dash in user_dashboards.dashboards.all():
dashboards[dash.id] = dash.name
return {"dashboards": dashboards}
else:
return {"dashboards": {}}
@login_required(login_url='/controlinterface/login/')
def index(request):
if (request.user.has_perm('controlinterface.view_dashboard_private') or
request.user.has_perm('controlinterface.view_dashboard_summary')):
user_dashboards = UserDashboard.objects.get(user=request.user)
return redirect('dashboard',
dashboard_id=user_dashboards.default_dashboard.id)
else:
return render(request,
'controlinterface/index_nodash.html')
@login_required(login_url='/controlinterface/login/')
def dashboard(request, dashboard_id):
context = get_user_dashboards(request)
if (request.user.has_perm('controlinterface.view_dashboard_private') or
request.user.has_perm('controlinterface.view_dashboard_summary')):
try:
access = Dashboard.objects.get(
id=dashboard_id).dashboards.filter(
user=request.user).count()
if access == 1:
dashboard = Dashboard.objects.get(id=dashboard_id)
dashboard_widgets = dashboard.widgets.all()
widgets = {}
for widget in dashboard_widgets:
widgets[widget.id] = {
"config": widget,
"data": widget.data.all()
}
context.update({
"widgets": widgets,
"dashboard_api_key": settings.DASHBOARD_API_KEY
})
return render(request,
'controlinterface/index.html',
context)
else:
return render(request,
'controlinterface/index_notdashallowed.html')
except ObjectDoesNotExist:
# User tried to access a dashboard they're not allowed to
return render(request,
'controlinterface/index_notdashallowed.html')
else:
return render(request,
'controlinterface/index_nodash.html')
@login_required(login_url='/controlinterface/login/')
def message_edit(request):
context = get_user_dashboards(request)
if request.method == "POST" and request.POST["messageaction"] == "find":
# Locate the record
form = MessageFindForm(request.POST)
if form.is_valid():
try:
message = Message.objects.get(
message_set_id=form.cleaned_data['message_set'],
sequence_number=form.cleaned_data['sequence_number'],
lang=form.cleaned_data['lang'])
updateform = MessageUpdateForm()
updateform.fields["message_id"].initial = message.id
updateform.fields["content"].initial = message.content
context.update({
"updateform": updateform,
"contentlength": len(message.content)
})
context.update(csrf(request))
except ObjectDoesNotExist:
messages.error(request,
"Message could not be found",
extra_tags="danger")
context = {"form": form}
context.update(csrf(request))
elif request.method == "POST" and \
request.POST["messageaction"] == "update":
# Update the record
updateform = MessageUpdateForm(request.POST)
if updateform.is_valid():
if len(updateform.cleaned_data['content']) > 160:
messages.error(request,
"SMS messages cannot be longer than 160 "
"characters. Please edit this message to be "
"under 160 characters in order to save your "
"changes",
extra_tags="danger")
context.update({"updateform": updateform})
else:
confirmform = MessageConfirmForm()
confirmform.fields[
"message_id"].initial = \
updateform.cleaned_data['message_id']
confirmform.fields[
"content"].initial = updateform.cleaned_data['content']
context.update({"confirmform": confirmform,
"content": updateform.cleaned_data['content']})
context.update(csrf(request))
else:
# Errors are handled by bootstrap form
context.update({"updateform": updateform})
context.update(csrf(request))
elif request.method == "POST" and \
request.POST["messageaction"] == "confirm":
# Update the record
confirmform = MessageConfirmForm(request.POST)
if confirmform.is_valid():
try:
message = Message.objects.get(
pk=confirmform.cleaned_data['message_id'])
message.content = confirmform.cleaned_data['content']
message.save()
messages.success(request,
"Message has been updated",
extra_tags="success")
# Load the blank find form again
form = MessageFindForm()
context.update({"form": form})
context.update(csrf(request))
except ObjectDoesNotExist:
messages.error(request,
"Message could not be found",
extra_tags="danger")
context.update({"confirmform": confirmform})
context.update(csrf(request))
else:
# Errors are handled by bootstrap form
context.update({"confirmform": confirmform})
context.update(csrf(request))
else:
form = MessageFindForm()
context.update({"form": form})
context.update(csrf(request))
return render_to_response("controlinterface/messages.html",
context,
context_instance=RequestContext(request))
@login_required(login_url='/controlinterface/login/')
def subscription_edit(request):
context = get_user_dashboards(request)
if request.method == "POST" and request.POST["subaction"] == "find":
# Locate the record
form = SubscriptionFindForm(request.POST)
if form.is_valid():
msisdn = form.cleaned_data['msisdn']
subscriptions = Subscription.objects.filter(
to_addr=msisdn)
if subscriptions.count() == 0:
confirmoptoutform = SubscriptionConfirmOptOutForm()
confirmoptoutform.fields["msisdn"].initial = msisdn
messages.error(request,
"No subscriptions found for " + msisdn,
extra_tags="danger")
context.update({"confirmoptoutform": confirmoptoutform})
context.update(csrf(request))
else:
confirmcancelform = SubscriptionConfirmCancelForm()
confirmcancelform.fields["msisdn"].initial = msisdn
confirmoptoutform = SubscriptionConfirmOptOutForm()
confirmoptoutform.fields["msisdn"].initial = msisdn
confirmbabyform = SubscriptionConfirmBabyForm()
confirmbabyform.fields["msisdn"].initial = msisdn
confirmbabyform.fields["existing_id"].initial = \
subscriptions[0].id
context.update({
"subscriptions": subscriptions,
"confirmcancelform": confirmcancelform,
"confirmoptoutform": confirmoptoutform,
"confirmbabyform": confirmbabyform,
})
context.update(csrf(request))
elif request.method == "GET" and 'msisdn' in request.GET:
form = SubscriptionFindForm(request.POST)
msisdn = request.GET.get("msisdn")
subscriptions = Subscription.objects.filter(to_addr=msisdn)
if subscriptions.count() == 0:
confirmoptoutform = SubscriptionConfirmOptOutForm()
confirmoptoutform.fields["msisdn"].initial = msisdn
messages.error(request,
"No subscriptions found for " + msisdn,
extra_tags="danger")
context.update({"confirmoptoutform": confirmoptoutform})
context.update(csrf(request))
else:
confirmcancelform = SubscriptionConfirmCancelForm()
confirmcancelform.fields["msisdn"].initial = msisdn
confirmoptoutform = SubscriptionConfirmOptOutForm()
confirmoptoutform.fields["msisdn"].initial = msisdn
confirmbabyform = SubscriptionConfirmBabyForm()
confirmbabyform.fields["msisdn"].initial = msisdn
confirmbabyform.fields["existing_id"].initial = \
subscriptions[0].id
context.update({
"subscriptions": subscriptions,
"confirmcancelform": confirmcancelform,
"confirmoptoutform": confirmoptoutform,
"confirmbabyform": confirmbabyform,
})
context.update(csrf(request))
elif request.method == "POST" and \
request.POST["subaction"] == "confirmcancel":
# Confirm before update the record
confirmcancelform = SubscriptionConfirmCancelForm(request.POST)
if confirmcancelform.is_valid():
cancelform = SubscriptionCancelForm()
cancelform.fields["msisdn"].initial = \
confirmcancelform.cleaned_data['msisdn']
form = SubscriptionFindForm()
form.fields["msisdn"].widget = forms.HiddenInput()
form.fields["msisdn"].initial = \
confirmcancelform.cleaned_data['msisdn']
context.update({
"cancelform": cancelform,
"form": form
})
context.update(csrf(request))
elif request.method == "POST" and \
request.POST["subaction"] == "confirmoptout":
# Confirm before update the record
confirmoptoutform = SubscriptionConfirmOptOutForm(request.POST)
if confirmoptoutform.is_valid():
optoutform = SubscriptionOptOutForm()
optoutform.fields["msisdn"].initial = \
confirmoptoutform.cleaned_data['msisdn']
form = SubscriptionFindForm()
form.fields["msisdn"].widget = forms.HiddenInput()
form.fields["msisdn"].initial = \
confirmoptoutform.cleaned_data['msisdn']
context.update({
"optoutform": optoutform,
"form": form
})
context.update(csrf(request))
elif request.method == "POST" and \
request.POST["subaction"] == "confirmbaby":
# Confirm before update the record
confirmbabyform = SubscriptionConfirmBabyForm(request.POST)
if confirmbabyform.is_valid():
babyform = SubscriptionBabyForm()
babyform.fields["msisdn"].initial = \
confirmbabyform.cleaned_data['msisdn']
babyform.fields["existing_id"].initial = \
confirmbabyform.cleaned_data['existing_id']
form = SubscriptionFindForm()
form.fields["msisdn"].widget = forms.HiddenInput()
form.fields["msisdn"].initial = \
confirmbabyform.cleaned_data['msisdn']
context.update({
"babyform": babyform,
"form": form
})
context.update(csrf(request))
elif request.method == "POST" and \
request.POST["subaction"] == "cancel":
# Update the record
cancelform = SubscriptionCancelForm(request.POST)
if cancelform.is_valid():
subscriptions = Subscription.objects.filter(
to_addr=cancelform.cleaned_data['msisdn']).update(
active=False)
messages.success(request,
"All subscriptions for %s have been cancelled" %
cancelform.cleaned_data['msisdn'],
extra_tags="success")
form = SubscriptionFindForm()
form.fields[
"msisdn"].initial = cancelform.cleaned_data['msisdn']
context.update({"form": form})
context.update(csrf(request))
elif request.method == "POST" and \
request.POST["subaction"] == "optout":
# Update the record
optoutform = SubscriptionOptOutForm(request.POST)
if optoutform.is_valid():
# Deactivate subscriptions
subscriptions = Subscription.objects.filter(
to_addr=optoutform.cleaned_data['msisdn']).update(
active=False)
# Opt the user out
optout_client = OptOutsApiClient(
auth_token=settings.VUMI_GO_API_TOKEN)
optout_client.set_optout('msisdn',
optoutform.cleaned_data['msisdn'])
messages.success(request,
"All subscriptions for %s have been cancelled \
and user has been opted out" %
optoutform.cleaned_data['msisdn'],
extra_tags="success")
form = SubscriptionFindForm()
form.fields[
"msisdn"].initial = optoutform.cleaned_data['msisdn']
context.update({"form": form})
context.update(csrf(request))
elif request.method == "POST" and \
request.POST["subaction"] == "baby":
# Update the record
babyform = SubscriptionBabyForm(request.POST)
if babyform.is_valid():
# deactivate all
subscriptions = Subscription.objects.filter(
to_addr=babyform.cleaned_data['msisdn']).update(
active=False)
# load existing to clone
subscription = Subscription.objects.get(
pk=babyform.cleaned_data['existing_id'])
subscription.pk = None
subscription.process_status = 0 # Ready
subscription.active = True
subscription.completed = False
subscription.next_sequence_number = 1
newsub = subscription
baby_message_set = MessageSet.objects.get(short_name="baby1")
newsub.message_set = baby_message_set
newsub.schedule = (
baby_message_set.default_schedule)
newsub.save()
messages.success(request,
"All active subscriptions for %s have been "
"cancelled and baby subscription added" %
babyform.cleaned_data['msisdn'],
extra_tags="success")
# Load the blank find form again
form = SubscriptionFindForm()
form.fields[
"msisdn"].initial = babyform.cleaned_data['msisdn']
context.update({"form": form})
context.update(csrf(request))
else:
form = SubscriptionFindForm()
context.update({"form": form})
context.update(csrf(request))
return render_to_response("controlinterface/subscription.html",
context,
context_instance=RequestContext(request))
def empty_response_map():
response_map = {
'question_1_friendliness': {
'very-satisfied': 0,
'satisfied': 0,
'not-satisfied': 0,
'very-unsatisfied': 0
},
'question_2_waiting_times_feel': {
'very-satisfied': 0,
'satisfied': 0,
'not-satisfied': 0,
'very-unsatisfied': 0
},
'question_3_waiting_times_length': {
'less-than-an-hour': 0,
'between-1-and-3-hours': 0,
'more-than-4-hours': 0,
'all-day': 0
},
'question_4_cleanliness': {
'very-satisfied': 0,
'satisfied': 0,
'not-satisfied': 0,
'very-unsatisfied': 0
},
'question_5_privacy': {
'very-satisfied': 0,
'satisfied': 0,
'not-satisfied': 0,
'very-unsatisfied': 0
}
}
return response_map
@login_required(login_url='/controlinterface/login/')
def servicerating(request):
context = get_user_dashboards(request)
if (request.user.has_perm('controlinterface.view_dashboard_private') or
request.user.has_perm('controlinterface.view_dashboard_summary')):
averages = {}
all_responses = Response.objects.all()
num_questions = 5.0
total_responses = 0
response_map = empty_response_map()
for response in all_responses:
total_responses += 1
response_map[response.key][response.value] += 1
num_ratings = math.ceil(total_responses / num_questions)
averages_questions = [
'question_1_friendliness',
'question_2_waiting_times_feel',
'question_4_cleanliness',
'question_5_privacy'
]
question_3_map = response_map['question_3_waiting_times_length']
waiting_times = {
'less_than_an_hour': round(
(question_3_map['less-than-an-hour'] / num_ratings * 100),
1),
'between_1_and_3_hours': round(
(question_3_map['between-1-and-3-hours'] / num_ratings * 100),
1),
'more_than_4_hours': round(
(question_3_map['more-than-4-hours'] / num_ratings * 100),
1),
'all_day': round(
(question_3_map['all-day'] / num_ratings * 100),
1)
}
for question in averages_questions:
averages[question] = round((
(response_map[question]['very-satisfied'] * 4) +
(response_map[question]['satisfied'] * 3) +
(response_map[question]['not-satisfied'] * 2) +
(response_map[question]['very-unsatisfied'] * 1)
) / num_ratings, 1)
context.update({
'averages': averages,
'waiting_times': waiting_times
})
return render(request, 'controlinterface/serviceratings.html', context)
@login_required(login_url='/controlinterface/login/')
def servicerating_report(request):
if (request.user.has_perm('controlinterface.view_dashboard_private') or
request.user.has_perm('controlinterface.view_dashboard_summary')):
qs = Response.objects.raw("""
SELECT servicerating_response.*, servicerating_extra.value
AS clinic_code from servicerating_response
INNER JOIN servicerating_extra ON
servicerating_response.contact_id = servicerating_extra.contact_id
WHERE servicerating_extra.key = 'clinic_code'""")
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = (
'attachment; filename="servicerating_incl_clinic_code.csv"')
writer = csv.writer(response)
writer.writerow(["Rating ID", "Contact ID", "Key", "Value",
"Created At", "Updated At", "Clinic Code"])
for obj in qs:
writer.writerow([obj.id, obj.contact_id, obj.key, obj.value,
obj.created_at, obj.updated_at, obj.clinic_code])
return response
| praekelt/ndoh-control | controlinterface/views.py | Python | bsd-3-clause | 21,742 |
#!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error
class libsndfile(test.test):
"""
Autotest module for testing basic functionality
of libsndfile
@author Anitha MallojiRao [email protected] ##
"""
version = 1
nfail = 0
path = ''
def initialize(self):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./libsndfile.sh'], cwd="%s/libsndfile" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
| PoornimaNayak/autotest-client-tests | linux-tools/libsndfile/libsndfile.py | Python | gpl-2.0 | 1,253 |
from handlers.base_handler import BaseHandler
db = {
1:{'id':1, 'name':'Fido', 'image_url':'https://images-na.ssl-images-amazon.com/images/G/01/img15/pet-products/small-tiles/23695_pets_vertical_store_dogs_small_tile_8._CB312176604_.jpg'},
2:{'id':2, 'name': 'Cesar', 'image_url':'http://3.bp.blogspot.com/-NAJ179pS4VU/Up3IKdKSnVI/AAAAAAAAAvg/mGshsQ078Gk/s1600/bom-dia.jpg'}
}
class ShowDogHandler(BaseHandler):
def get(self, id=None):
dog = db.get(int(id))
if dog:
self.response.write(self.render("show_dog.html", **dog))
else:
self.response.write("No dog found for id {}".format(id))
class CreateDogHandler(BaseHandler):
def get(self):
self.response.write(self.render("create_dog.html", name="", image_url=""))
def post(self):
name = self.request.params['name']
url = self.request.params['image_url']
self.response.write("Creating dog {}".format(name))
class EditDogHandler(BaseHandler):
def get(self, id=None):
dog = db.get(int(id))
if dog:
self.response.write(self.render("create_dog.html", **dog))
else:
self.response.write("No dog found for id {}".format(id))
def post(self, id=None):
name = self.request.params['name']
url = self.request.params['image_url']
self.response.write("Editing dog {}".format(name))
| xstrengthofonex/code-live-tutorials | python_web_development/templating/handlers/dog_handlers.py | Python | mit | 1,402 |
# preliminary tests indicate that for the sum of primes to be less than a million,
# there can only be at most 547 primes
import primes, sys
Pl = list(primes.sieve(1000000))
P = set(Pl)
for l in range(547, 1, -1):
if l % 1000 == 0:
print("Testing " + str(l))
for i in range(len(Pl) - l + 1):
s = sum(Pl[i:i+l])
if s >= 1000000:
break
if s in P:
print(s)
sys.exit(0)
| firefly431/projecteuler | 50.py | Python | gpl-2.0 | 446 |
from a10sdk.common.A10BaseClass import A10BaseClass
class Oper(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param alloc_failed: {"type": "number", "format": "number"}
:param vrid: {"enum": ["default"], "type": "string", "format": "enum"}
:param ha_group_id: {"enum": ["default"], "type": "string", "format": "enum"}
:param ip: {"type": "string", "format": "ipv4-address"}
:param ports_consumed: {"type": "number", "format": "number"}
:param state: {"enum": ["Up", "Down", "Disabled", "Maintenance", "Unknown", "DIS-UP", "DIS-DOWN", "DIS-MAINTENANCE"], "type": "string", "format": "enum"}
:param ipv6: {"type": "string", "format": "ipv6-address"}
:param ports_freed_total: {"type": "number", "format": "number"}
:param ports_consumed_total: {"type": "number", "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "oper"
self.DeviceProxy = ""
self.alloc_failed = ""
self.vrid = ""
self.ha_group_id = ""
self.ip = ""
self.ports_consumed = ""
self.state = ""
self.ipv6 = ""
self.ports_freed_total = ""
self.ports_consumed_total = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Port(A10BaseClass):
"""Class Description::
Operational Status for the object port.
Class port supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param protocol: {"enum": ["tcp", "udp"], "description": "'tcp': TCP Port; 'udp': UDP Port; ", "format": "enum", "type": "string", "oid": "1002", "optional": false}
:param port_number: {"description": "Port Number", "format": "number", "optional": false, "oid": "1001", "maximum": 65534, "minimum": 0, "type": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/slb/server/{name}/port/{port_number}+{protocol}/oper`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "port"
self.a10_url="/axapi/v3/slb/server/{name}/port/{port_number}+{protocol}/oper"
self.DeviceProxy = ""
self.oper = {}
self.protocol = ""
self.port_number = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| amwelch/a10sdk-python | a10sdk/core/slb/slb_server_port_oper.py | Python | apache-2.0 | 2,709 |
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""
Important classes of Flink Batch API:
- :class:`ExecutionEnvironment`:
The ExecutionEnvironment is the context in which a batch program is executed.
"""
from pyflink.dataset.execution_environment import ExecutionEnvironment
__all__ = ['ExecutionEnvironment']
| hequn8128/flink | flink-python/pyflink/dataset/__init__.py | Python | apache-2.0 | 1,234 |
import json
class Student(object):
def __init__(self, name, age, score):
self.name = name
self.age = age
self.score = score
s = Student('Bob', 20, 88)
print(json.dumps(s)) | tangming2010/gitRepository | we.py | Python | gpl-2.0 | 201 |
# -*- coding: utf-8 -*-
__author__ = 'xuanwo'
from setuptools import setup, find_packages
import chineseregion
entry_points = {
"console_scripts": [
"chineseregion = chineseregion.main:main",
]
}
# with open("requirements.txt") as f:
# requires = [l for l in f.read().splitlines() if l]
setup(
name="chineseregion",
version=chineseregion.__version__,
description="a chinese region lib for python",
author="xuanwo",
author_email="[email protected]",
keywords="chinese, region, lib",
license="MIT License",
packages=find_packages(),
entry_points=entry_points,
# install_requires=requires,
include_package_data=True,
package_data={
'': ['*.json']
},
classifiers=[
'Development Status :: 3 - Alpha ',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
)
| Xuanwo/chineseregion | setup.py | Python | mit | 1,230 |
from .fields import * | leliel12/handy | handy/models/__init__.py | Python | bsd-3-clause | 21 |
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
import unittest
import frappe
from frappe.utils import flt, nowdate
from erpnext.assets.doctype.asset.test_asset import (
create_asset,
create_asset_data,
set_depreciation_settings_in_company,
)
class TestAssetRepair(unittest.TestCase):
def setUp(self):
set_depreciation_settings_in_company()
create_asset_data()
frappe.db.sql("delete from `tabTax Rule`")
def test_update_status(self):
asset = create_asset(submit=1)
initial_status = asset.status
asset_repair = create_asset_repair(asset = asset)
if asset_repair.repair_status == "Pending":
asset.reload()
self.assertEqual(asset.status, "Out of Order")
asset_repair.repair_status = "Completed"
asset_repair.save()
asset_status = frappe.db.get_value("Asset", asset_repair.asset, "status")
self.assertEqual(asset_status, initial_status)
def test_stock_item_total_value(self):
asset_repair = create_asset_repair(stock_consumption = 1)
for item in asset_repair.stock_items:
total_value = flt(item.valuation_rate) * flt(item.consumed_quantity)
self.assertEqual(item.total_value, total_value)
def test_total_repair_cost(self):
asset_repair = create_asset_repair(stock_consumption = 1)
total_repair_cost = asset_repair.repair_cost
self.assertEqual(total_repair_cost, asset_repair.repair_cost)
for item in asset_repair.stock_items:
total_repair_cost += item.total_value
self.assertEqual(total_repair_cost, asset_repair.total_repair_cost)
def test_repair_status_after_submit(self):
asset_repair = create_asset_repair(submit = 1)
self.assertNotEqual(asset_repair.repair_status, "Pending")
def test_stock_items(self):
asset_repair = create_asset_repair(stock_consumption = 1)
self.assertTrue(asset_repair.stock_consumption)
self.assertTrue(asset_repair.stock_items)
def test_warehouse(self):
asset_repair = create_asset_repair(stock_consumption = 1)
self.assertTrue(asset_repair.stock_consumption)
self.assertTrue(asset_repair.warehouse)
def test_decrease_stock_quantity(self):
asset_repair = create_asset_repair(stock_consumption = 1, submit = 1)
stock_entry = frappe.get_last_doc('Stock Entry')
self.assertEqual(stock_entry.stock_entry_type, "Material Issue")
self.assertEqual(stock_entry.items[0].s_warehouse, asset_repair.warehouse)
self.assertEqual(stock_entry.items[0].item_code, asset_repair.stock_items[0].item)
self.assertEqual(stock_entry.items[0].qty, asset_repair.stock_items[0].consumed_quantity)
def test_increase_in_asset_value_due_to_stock_consumption(self):
asset = create_asset(calculate_depreciation = 1, submit=1)
initial_asset_value = get_asset_value(asset)
asset_repair = create_asset_repair(asset= asset, stock_consumption = 1, submit = 1)
asset.reload()
increase_in_asset_value = get_asset_value(asset) - initial_asset_value
self.assertEqual(asset_repair.stock_items[0].total_value, increase_in_asset_value)
def test_increase_in_asset_value_due_to_repair_cost_capitalisation(self):
asset = create_asset(calculate_depreciation = 1, submit=1)
initial_asset_value = get_asset_value(asset)
asset_repair = create_asset_repair(asset= asset, capitalize_repair_cost = 1, submit = 1)
asset.reload()
increase_in_asset_value = get_asset_value(asset) - initial_asset_value
self.assertEqual(asset_repair.repair_cost, increase_in_asset_value)
def test_purchase_invoice(self):
asset_repair = create_asset_repair(capitalize_repair_cost = 1, submit = 1)
self.assertTrue(asset_repair.purchase_invoice)
def test_gl_entries(self):
asset_repair = create_asset_repair(capitalize_repair_cost = 1, submit = 1)
gl_entry = frappe.get_last_doc('GL Entry')
self.assertEqual(asset_repair.name, gl_entry.voucher_no)
def test_increase_in_asset_life(self):
asset = create_asset(calculate_depreciation = 1, submit=1)
initial_num_of_depreciations = num_of_depreciations(asset)
create_asset_repair(asset= asset, capitalize_repair_cost = 1, submit = 1)
asset.reload()
self.assertEqual((initial_num_of_depreciations + 1), num_of_depreciations(asset))
self.assertEqual(asset.schedules[-1].accumulated_depreciation_amount, asset.finance_books[0].value_after_depreciation)
def get_asset_value(asset):
return asset.finance_books[0].value_after_depreciation
def num_of_depreciations(asset):
return asset.finance_books[0].total_number_of_depreciations
def create_asset_repair(**args):
from erpnext.accounts.doctype.purchase_invoice.test_purchase_invoice import make_purchase_invoice
from erpnext.stock.doctype.warehouse.test_warehouse import create_warehouse
args = frappe._dict(args)
if args.asset:
asset = args.asset
else:
asset = create_asset(is_existing_asset = 1, submit=1)
asset_repair = frappe.new_doc("Asset Repair")
asset_repair.update({
"asset": asset.name,
"asset_name": asset.asset_name,
"failure_date": nowdate(),
"description": "Test Description",
"repair_cost": 0,
"company": asset.company
})
if args.stock_consumption:
asset_repair.stock_consumption = 1
asset_repair.warehouse = create_warehouse("Test Warehouse", company = asset.company)
asset_repair.append("stock_items", {
"item": args.item or args.item_code or "_Test Item",
"valuation_rate": args.rate if args.get("rate") is not None else 100,
"consumed_quantity": args.qty or 1
})
asset_repair.insert(ignore_if_duplicate=True)
if args.submit:
asset_repair.repair_status = "Completed"
asset_repair.cost_center = "_Test Cost Center - _TC"
if args.stock_consumption:
stock_entry = frappe.get_doc({
"doctype": "Stock Entry",
"stock_entry_type": "Material Receipt",
"company": asset.company
})
stock_entry.append('items', {
"t_warehouse": asset_repair.warehouse,
"item_code": asset_repair.stock_items[0].item,
"qty": asset_repair.stock_items[0].consumed_quantity
})
stock_entry.submit()
if args.capitalize_repair_cost:
asset_repair.capitalize_repair_cost = 1
asset_repair.repair_cost = 1000
if asset.calculate_depreciation:
asset_repair.increase_in_asset_life = 12
asset_repair.purchase_invoice = make_purchase_invoice().name
asset_repair.submit()
return asset_repair
| mhbu50/erpnext | erpnext/assets/doctype/asset_repair/test_asset_repair.py | Python | gpl-3.0 | 6,232 |
'''
Unit tests for oc route
'''
import os
import six
import sys
import unittest
import mock
# Removing invalid variable names for tests so that I can
# keep them brief
# pylint: disable=invalid-name,no-name-in-module
# Disable import-error b/c our libraries aren't loaded in jenkins
# pylint: disable=import-error,wrong-import-position
# place class in our python path
module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, module_path)
from oc_route import OCRoute, locate_oc_binary # noqa: E402
class OCRouteTest(unittest.TestCase):
'''
Test class for OCServiceAccount
'''
@mock.patch('oc_route.locate_oc_binary')
@mock.patch('oc_route.Utils.create_tmpfile_copy')
@mock.patch('oc_route.OCRoute._run')
def test_list_route(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):
''' Testing getting a route '''
# Arrange
# run_ansible input parameters
params = {
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'state': 'list',
'debug': False,
'name': 'test',
'namespace': 'default',
'tls_termination': 'passthrough',
'dest_cacert_path': None,
'cacert_path': None,
'cert_path': None,
'key_path': None,
'dest_cacert_content': None,
'cacert_content': None,
'cert_content': None,
'key_content': None,
'service_name': 'testservice',
'host': 'test.openshift.com',
'wildcard_policy': None,
'weight': None,
'port': None
}
route_result = '''{
"kind": "Route",
"apiVersion": "v1",
"metadata": {
"name": "test",
"namespace": "default",
"selfLink": "/oapi/v1/namespaces/default/routes/test",
"uid": "1b127c67-ecd9-11e6-96eb-0e0d9bdacd26",
"resourceVersion": "439182",
"creationTimestamp": "2017-02-07T01:59:48Z"
},
"spec": {
"host": "test.example",
"to": {
"kind": "Service",
"name": "test",
"weight": 100
},
"port": {
"targetPort": 8443
},
"tls": {
"termination": "passthrough"
},
"wildcardPolicy": "None"
},
"status": {
"ingress": [
{
"host": "test.example",
"routerName": "router",
"conditions": [
{
"type": "Admitted",
"status": "True",
"lastTransitionTime": "2017-02-07T01:59:48Z"
}
],
"wildcardPolicy": "None"
}
]
}
}'''
# Return values of our mocked function call. These get returned once per call.
mock_cmd.side_effect = [
# First call to mock
(0, route_result, ''),
]
mock_oc_binary.side_effect = [
'oc'
]
mock_tmpfile_copy.side_effect = [
'/tmp/mock.kubeconfig',
]
# Act
results = OCRoute.run_ansible(params, False)
# Assert
self.assertFalse(results['changed'])
self.assertEqual(results['state'], 'list')
self.assertEqual(results['results'][0]['metadata']['name'], 'test')
# Making sure our mock was called as we expected
mock_cmd.assert_has_calls([
mock.call(['oc', 'get', 'route', 'test', '-o', 'json', '-n', 'default'], None),
])
@mock.patch('oc_route.locate_oc_binary')
@mock.patch('oc_route.Utils.create_tmpfile_copy')
@mock.patch('oc_route.Yedit._write')
@mock.patch('oc_route.OCRoute._run')
def test_create_route(self, mock_cmd, mock_write, mock_tmpfile_copy, mock_oc_binary):
''' Testing getting a route '''
# Arrange
# run_ansible input parameters
params = {
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'state': 'present',
'debug': False,
'name': 'test',
'namespace': 'default',
'tls_termination': 'edge',
'dest_cacert_path': None,
'cacert_path': None,
'cert_path': None,
'key_path': None,
'dest_cacert_content': None,
'cacert_content': 'testing',
'cert_content': 'testing',
'key_content': 'testing',
'service_name': 'testservice',
'host': 'test.openshift.com',
'wildcard_policy': None,
'weight': None,
'port': None
}
route_result = '''{
"apiVersion": "v1",
"kind": "Route",
"metadata": {
"creationTimestamp": "2017-02-07T20:55:10Z",
"name": "test",
"namespace": "default",
"resourceVersion": "517745",
"selfLink": "/oapi/v1/namespaces/default/routes/test",
"uid": "b6f25898-ed77-11e6-9755-0e737db1e63a"
},
"spec": {
"host": "test.openshift.com",
"tls": {
"caCertificate": "testing",
"certificate": "testing",
"key": "testing",
"termination": "edge"
},
"to": {
"kind": "Service",
"name": "testservice",
"weight": 100
},
"wildcardPolicy": "None"
},
"status": {
"ingress": [
{
"conditions": [
{
"lastTransitionTime": "2017-02-07T20:55:10Z",
"status": "True",
"type": "Admitted"
}
],
"host": "test.openshift.com",
"routerName": "router",
"wildcardPolicy": "None"
}
]
}
}'''
test_route = '''\
kind: Route
spec:
tls:
caCertificate: testing
termination: edge
certificate: testing
key: testing
to:
kind: Service
name: testservice
weight: 100
host: test.openshift.com
wildcardPolicy: None
apiVersion: v1
metadata:
namespace: default
name: test
'''
# Return values of our mocked function call. These get returned once per call.
mock_cmd.side_effect = [
# First call to mock
(1, '', 'Error from server: routes "test" not found'),
(1, '', 'Error from server: routes "test" not found'),
(0, 'route "test" created', ''),
(0, route_result, ''),
]
mock_oc_binary.side_effect = [
'oc'
]
mock_tmpfile_copy.side_effect = [
'/tmp/mock.kubeconfig',
]
mock_write.assert_has_calls = [
# First call to mock
mock.call('/tmp/test', test_route)
]
# Act
results = OCRoute.run_ansible(params, False)
# Assert
self.assertTrue(results['changed'])
self.assertEqual(results['state'], 'present')
self.assertEqual(results['results']['results'][0]['metadata']['name'], 'test')
# Making sure our mock was called as we expected
mock_cmd.assert_has_calls([
mock.call(['oc', 'get', 'route', 'test', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None),
mock.call(['oc', 'get', 'route', 'test', '-o', 'json', '-n', 'default'], None),
])
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
''' Testing binary lookup fallback '''
mock_env_get.side_effect = lambda _v, _d: ''
mock_path_exists.side_effect = lambda _: False
self.assertEqual(locate_oc_binary(), 'oc')
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in path '''
oc_bin = '/usr/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in /usr/local/bin '''
oc_bin = '/usr/local/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in ~/bin '''
oc_bin = os.path.expanduser('~/bin/oc')
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup fallback '''
mock_env_get.side_effect = lambda _v, _d: ''
mock_shutil_which.side_effect = lambda _f, path=None: None
self.assertEqual(locate_oc_binary(), 'oc')
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in path '''
oc_bin = '/usr/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in /usr/local/bin '''
oc_bin = '/usr/local/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in ~/bin '''
oc_bin = os.path.expanduser('~/bin/oc')
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
| brenton/openshift-ansible | roles/lib_openshift/src/test/unit/test_oc_route.py | Python | apache-2.0 | 12,042 |
# encoding: UTF-8
'''
本文件中实现了行情数据记录引擎,用于汇总TICK数据,并生成K线插入数据库。
使用DR_setting.json来配置需要收集的合约,以及主力合约代码。
'''
import json
import os
import copy
from collections import OrderedDict
from datetime import datetime, timedelta
from Queue import Queue
from threading import Thread
from eventEngine import *
from vtGateway import VtSubscribeReq, VtLogData
from drBase import *
from vtFunction import todayDate
import time
########################################################################
class DrEngine(object):
"""数据记录引擎"""
settingFileName = 'DR_setting.json'
settingFileName = os.getcwd() + '/dataRecorder/' + settingFileName
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine):
"""Constructor"""
self.mainEngine = mainEngine
self.eventEngine = eventEngine
# 当前日期
self.today = todayDate()
# 主力合约代码映射字典,key为具体的合约代码(如IF1604),value为主力合约代码(如IF0000)
self.activeSymbolDict = {}
# Tick对象字典
self.tickDict = {}
# K线对象字典
self.barDict = {}
# 负责执行数据库插入的单独线程相关
self.active = False # 工作状态
self.queue = Queue() # 队列
self.thread = Thread(target=self.run) # 线程
# 载入设置,订阅行情
self.loadSetting()
#----------------------------------------------------------------------
def loadSetting(self):
"""载入设置"""
with open(self.settingFileName) as f:
drSetting = json.load(f)
# 如果working设为False则不启动行情记录功能
working = drSetting['working']
if not working:
return
if 'tick' in drSetting:
l = drSetting['tick']
for setting in l:
symbol = setting[0]
vtSymbol = symbol
req = VtSubscribeReq()
req.symbol = setting[0]
# 针对LTS和IB接口,订阅行情需要交易所代码
if len(setting)>=3:
req.exchange = setting[2]
vtSymbol = '.'.join([symbol, req.exchange])
# 针对IB接口,订阅行情需要货币和产品类型
if len(setting)>=5:
req.currency = setting[3]
req.productClass = setting[4]
self.mainEngine.subscribe(req, setting[1])
drTick = DrTickData() # 该tick实例可以用于缓存部分数据(目前未使用)
self.tickDict[vtSymbol] = drTick
if 'bar' in drSetting:
l = drSetting['bar']
for setting in l:
symbol = setting[0]
vtSymbol = symbol
req = VtSubscribeReq()
req.symbol = symbol
if len(setting)>=3:
req.exchange = setting[2]
vtSymbol = '.'.join([symbol, req.exchange])
if len(setting)>=5:
req.currency = setting[3]
req.productClass = setting[4]
self.mainEngine.subscribe(req, setting[1])
bar = DrBarData()
self.barDict[vtSymbol] = bar
if 'active' in drSetting:
d = drSetting['active']
# 注意这里的vtSymbol对于IB和LTS接口,应该后缀.交易所
for activeSymbol, vtSymbol in d.items():
self.activeSymbolDict[vtSymbol] = activeSymbol
# 启动数据插入线程
self.start()
# 注册事件监听
self.registerEvent()
#----------------------------------------------------------------------
def procecssTickEvent(self, event):
"""处理行情推送"""
tick = event.dict_['data']
vtSymbol = tick.vtSymbol
# 转化Tick格式
drTick = DrTickData()
d = drTick.__dict__
for key in d.keys():
if key != 'datetime':
d[key] = tick.__getattribute__(key)
drTick.datetime = datetime.strptime(' '.join([tick.date, tick.time]), '%Y%m%d %H:%M:%S.%f')
#按照不同合约分类时间list
ninetoeleven=["bu","rb","hc","ru"]#9点到11点的合约
ninetohalfeleven=["p","j","m","y","a","b","jm","i","SR","CF","RM","MA","ZC","FG","OI"]#9点到11点半的合约
ninetoone = ["cu","al","zn","pb","sn","ni"] # 9点到1点的合约列表
ninetohalftwo=["ag","au"]#9点到2点半的合约
#过滤中没有加入国债合约!
whether_in_list=False
for instrument in ninetoeleven:
if instrument in vtSymbol:
time_f=datetime.now()
if (time_f >= datetime.today().replace(hour=9,minute=0,second=0) and time_f <= datetime.today().replace(hour=15,minute=0,second=0)) or (time_f>=datetime.today().replace(hour=21,minute=0,second=0) and time_f<=datetime.today().replace(hour=23,minute=0,second=0)):
# 更新Tick数据
if vtSymbol in self.tickDict:
self.insertData(TICK_DB_NAME, vtSymbol, drTick)
if vtSymbol in self.activeSymbolDict:
activeSymbol = self.activeSymbolDict[vtSymbol]
self.insertData(TICK_DB_NAME, activeSymbol, drTick)
# 发出日志
self.writeDrLog(u'记录Tick数据%s,时间:%s, last:%s, bid:%s, ask:%s'
%(drTick.vtSymbol, drTick.time, drTick.lastPrice, drTick.bidPrice1, drTick.askPrice1))
# 更新分钟线数据
if vtSymbol in self.barDict:
bar = self.barDict[vtSymbol]
# 如果第一个TICK或者新的一分钟
if not bar.datetime or bar.datetime.minute != drTick.datetime.minute:
if bar.vtSymbol:
newBar = copy.copy(bar)
self.insertData(MINUTE_DB_NAME, vtSymbol, newBar)
if vtSymbol in self.activeSymbolDict:
activeSymbol = self.activeSymbolDict[vtSymbol]
self.insertData(MINUTE_DB_NAME, activeSymbol, newBar)
self.writeDrLog(u'记录分钟线数据%s,时间:%s, O:%s, H:%s, L:%s, C:%s'
%(bar.vtSymbol, bar.time, bar.open, bar.high,
bar.low, bar.close))
bar.vtSymbol = drTick.vtSymbol
bar.symbol = drTick.symbol
bar.exchange = drTick.exchange
bar.open = drTick.lastPrice
bar.high = drTick.lastPrice
bar.low = drTick.lastPrice
bar.close = drTick.lastPrice
bar.date = drTick.date
bar.time = drTick.time
bar.datetime = drTick.datetime
bar.volume = drTick.volume
bar.openInterest = drTick.openInterest
# 否则继续累加新的K线
else:
bar.high = max(bar.high, drTick.lastPrice)
bar.low = min(bar.low, drTick.lastPrice)
bar.close = drTick.lastPrice
for instrument in ninetohalfeleven:
if instrument in vtSymbol:
time_f = datetime.now()
if (time_f >= datetime.today().replace(hour=9, minute=0, second=0) and time_f <= datetime.today().replace(hour=15, minute=0, second=0)) or (time_f >= datetime.today().replace(hour=21, minute=0, second=0) and time_f <= datetime.today().replace(hour=23, minute=30, second=0)):
# 更新Tick数据
if vtSymbol in self.tickDict:
self.insertData(TICK_DB_NAME, vtSymbol, drTick)
if vtSymbol in self.activeSymbolDict:
activeSymbol = self.activeSymbolDict[vtSymbol]
self.insertData(TICK_DB_NAME, activeSymbol, drTick)
# 发出日志
self.writeDrLog(u'记录Tick数据%s,时间:%s, last:%s, bid:%s, ask:%s'
% (
drTick.vtSymbol, drTick.time, drTick.lastPrice, drTick.bidPrice1, drTick.askPrice1))
# 更新分钟线数据
if vtSymbol in self.barDict:
bar = self.barDict[vtSymbol]
# 如果第一个TICK或者新的一分钟
if not bar.datetime or bar.datetime.minute != drTick.datetime.minute:
if bar.vtSymbol:
newBar = copy.copy(bar)
self.insertData(MINUTE_DB_NAME, vtSymbol, newBar)
if vtSymbol in self.activeSymbolDict:
activeSymbol = self.activeSymbolDict[vtSymbol]
self.insertData(MINUTE_DB_NAME, activeSymbol, newBar)
self.writeDrLog(u'记录分钟线数据%s,时间:%s, O:%s, H:%s, L:%s, C:%s'
% (bar.vtSymbol, bar.time, bar.open, bar.high,
bar.low, bar.close))
bar.vtSymbol = drTick.vtSymbol
bar.symbol = drTick.symbol
bar.exchange = drTick.exchange
bar.open = drTick.lastPrice
bar.high = drTick.lastPrice
bar.low = drTick.lastPrice
bar.close = drTick.lastPrice
bar.date = drTick.date
bar.time = drTick.time
bar.datetime = drTick.datetime
bar.volume = drTick.volume
bar.openInterest = drTick.openInterest
# 否则继续累加新的K线
else:
bar.high = max(bar.high, drTick.lastPrice)
bar.low = min(bar.low, drTick.lastPrice)
bar.close = drTick.lastPrice
for instrument in ninetoone:
if instrument in vtSymbol:
time_f = datetime.now()
if (time_f >= datetime.today().replace(hour=9, minute=0, second=0) and time_f <= datetime.today().replace(hour=15, minute=0, second=0)) or (time_f >= datetime.today().replace(hour=21, minute=0, second=0) and time_f <= datetime.today().replace(hour=24, minute=0, second=0)) or (time_f<=datetime.today().replace(hour=1, minute=0, second=0)):
# 更新Tick数据
if vtSymbol in self.tickDict:
self.insertData(TICK_DB_NAME, vtSymbol, drTick)
if vtSymbol in self.activeSymbolDict:
activeSymbol = self.activeSymbolDict[vtSymbol]
self.insertData(TICK_DB_NAME, activeSymbol, drTick)
# 发出日志
self.writeDrLog(u'记录Tick数据%s,时间:%s, last:%s, bid:%s, ask:%s'
% (
drTick.vtSymbol, drTick.time, drTick.lastPrice, drTick.bidPrice1, drTick.askPrice1))
# 更新分钟线数据
if vtSymbol in self.barDict:
bar = self.barDict[vtSymbol]
# 如果第一个TICK或者新的一分钟
if not bar.datetime or bar.datetime.minute != drTick.datetime.minute:
if bar.vtSymbol:
newBar = copy.copy(bar)
self.insertData(MINUTE_DB_NAME, vtSymbol, newBar)
if vtSymbol in self.activeSymbolDict:
activeSymbol = self.activeSymbolDict[vtSymbol]
self.insertData(MINUTE_DB_NAME, activeSymbol, newBar)
self.writeDrLog(u'记录分钟线数据%s,时间:%s, O:%s, H:%s, L:%s, C:%s'
% (bar.vtSymbol, bar.time, bar.open, bar.high,
bar.low, bar.close))
bar.vtSymbol = drTick.vtSymbol
bar.symbol = drTick.symbol
bar.exchange = drTick.exchange
bar.open = drTick.lastPrice
bar.high = drTick.lastPrice
bar.low = drTick.lastPrice
bar.close = drTick.lastPrice
bar.date = drTick.date
bar.time = drTick.time
bar.datetime = drTick.datetime
bar.volume = drTick.volume
bar.openInterest = drTick.openInterest
# 否则继续累加新的K线
else:
bar.high = max(bar.high, drTick.lastPrice)
bar.low = min(bar.low, drTick.lastPrice)
bar.close = drTick.lastPrice
for instrument in ninetohalftwo:
if instrument in vtSymbol:
time_f = datetime.now()
if (time_f >= datetime.today().replace(hour=9, minute=0, second=0) and time_f <= datetime.today().replace(hour=15, minute=0, second=0)) or (time_f >= datetime.today().replace(hour=21, minute=0, second=0) and time_f <= datetime.today().replace(hour=24, minute=0, second=0)) or (time_f<=datetime.today().replace(hour=2, minute=30, second=0)):
# 更新Tick数据
if vtSymbol in self.tickDict:
self.insertData(TICK_DB_NAME, vtSymbol, drTick)
if vtSymbol in self.activeSymbolDict:
activeSymbol = self.activeSymbolDict[vtSymbol]
self.insertData(TICK_DB_NAME, activeSymbol, drTick)
# 发出日志
self.writeDrLog(u'记录Tick数据%s,时间:%s, last:%s, bid:%s, ask:%s'
% (
drTick.vtSymbol, drTick.time, drTick.lastPrice, drTick.bidPrice1, drTick.askPrice1))
# 更新分钟线数据
if vtSymbol in self.barDict:
bar = self.barDict[vtSymbol]
# 如果第一个TICK或者新的一分钟
if not bar.datetime or bar.datetime.minute != drTick.datetime.minute:
if bar.vtSymbol:
newBar = copy.copy(bar)
self.insertData(MINUTE_DB_NAME, vtSymbol, newBar)
if vtSymbol in self.activeSymbolDict:
activeSymbol = self.activeSymbolDict[vtSymbol]
self.insertData(MINUTE_DB_NAME, activeSymbol, newBar)
self.writeDrLog(u'记录分钟线数据%s,时间:%s, O:%s, H:%s, L:%s, C:%s'
% (bar.vtSymbol, bar.time, bar.open, bar.high,
bar.low, bar.close))
bar.vtSymbol = drTick.vtSymbol
bar.symbol = drTick.symbol
bar.exchange = drTick.exchange
bar.open = drTick.lastPrice
bar.high = drTick.lastPrice
bar.low = drTick.lastPrice
bar.close = drTick.lastPrice
bar.date = drTick.date
bar.time = drTick.time
bar.datetime = drTick.datetime
bar.volume = drTick.volume
bar.openInterest = drTick.openInterest
# 否则继续累加新的K线
else:
bar.high = max(bar.high, drTick.lastPrice)
bar.low = min(bar.low, drTick.lastPrice)
bar.close = drTick.lastPrice
for instrument in ninetoeleven:
if instrument in vtSymbol:
whether_in_list=True
for instrument in ninetohalfeleven:
if instrument in vtSymbol:
whether_in_list=True
if whether_in_list==False:#如果不在特殊列表里就只存白天的数据
time_f = datetime.now()
if (time_f >= datetime.today().replace(hour=9, minute=0, second=0) and time_f <= datetime.today().replace(hour=15, minute=0, second=0)):
# 更新Tick数据
if vtSymbol in self.tickDict:
self.insertData(TICK_DB_NAME, vtSymbol, drTick)
if vtSymbol in self.activeSymbolDict:
activeSymbol = self.activeSymbolDict[vtSymbol]
self.insertData(TICK_DB_NAME, activeSymbol, drTick)
# 发出日志
self.writeDrLog(u'记录Tick数据%s,时间:%s, last:%s, bid:%s, ask:%s'
% (
drTick.vtSymbol, drTick.time, drTick.lastPrice, drTick.bidPrice1,
drTick.askPrice1))
# 更新分钟线数据
if vtSymbol in self.barDict:
bar = self.barDict[vtSymbol]
# 如果第一个TICK或者新的一分钟
if not bar.datetime or bar.datetime.minute != drTick.datetime.minute:
if bar.vtSymbol:
newBar = copy.copy(bar)
self.insertData(MINUTE_DB_NAME, vtSymbol, newBar)
if vtSymbol in self.activeSymbolDict:
activeSymbol = self.activeSymbolDict[vtSymbol]
self.insertData(MINUTE_DB_NAME, activeSymbol, newBar)
self.writeDrLog(u'记录分钟线数据%s,时间:%s, O:%s, H:%s, L:%s, C:%s'
% (bar.vtSymbol, bar.time, bar.open, bar.high,
bar.low, bar.close))
bar.vtSymbol = drTick.vtSymbol
bar.symbol = drTick.symbol
bar.exchange = drTick.exchange
bar.open = drTick.lastPrice
bar.high = drTick.lastPrice
bar.low = drTick.lastPrice
bar.close = drTick.lastPrice
bar.date = drTick.date
bar.time = drTick.time
bar.datetime = drTick.datetime
bar.volume = drTick.volume
bar.openInterest = drTick.openInterest
# 否则继续累加新的K线
else:
bar.high = max(bar.high, drTick.lastPrice)
bar.low = min(bar.low, drTick.lastPrice)
bar.close = drTick.lastPrice
#----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.eventEngine.register(EVENT_TICK, self.procecssTickEvent)
#----------------------------------------------------------------------
def insertData(self, dbName, collectionName, data):
"""插入数据到数据库(这里的data可以是CtaTickData或者CtaBarData)"""
self.queue.put((dbName, collectionName, data.__dict__))
#----------------------------------------------------------------------
def run(self):
"""运行插入线程"""
while self.active:
try:
dbName, collectionName, d = self.queue.get(block=True, timeout=1)
self.mainEngine.dbInsert(dbName, collectionName, d)
except Empty:
pass
#----------------------------------------------------------------------
def start(self):
"""启动"""
self.active = True
self.thread.start()
#----------------------------------------------------------------------
def stop(self):
"""退出"""
if self.active:
self.active = False
self.thread.join()
#----------------------------------------------------------------------
def writeDrLog(self, content):
"""快速发出日志事件"""
log = VtLogData()
log.logContent = content
event = Event(type_=EVENT_DATARECORDER_LOG)
event.dict_['data'] = log
self.eventEngine.put(event)
| yongfuyang/vnpy | vn.trader/dataRecorder/drEngine_kangseung.py | Python | mit | 22,549 |
# -*- coding: utf-8 -*-
"""Combatant self-serve views."""
# standard library imports
import uuid
from datetime import datetime
# third-party imports
from flask import Blueprint, render_template, current_app
from flask_login import current_user
# application imports
from emol.models import Combatant, Discipline, UpdateRequest
from emol.models.faux_user import FauxUserSwitch
from emol.utility.hash import Sha256
BLUEPRINT = Blueprint('combatant', __name__)
@BLUEPRINT.route('/card/<card_id>', methods=['GET'])
def view_card(card_id):
"""Handle requests to view a combatant's card.
Args:
card_id: A combatant card ID
Returns:
- The message view if the card ID is invalid
- The combatant's card if the card ID is valid
"""
current_app.logger.info('Card for {}'.format(card_id))
combatant = Combatant.query.filter(Combatant.card_id == card_id).one_or_none()
if combatant is None:
current_app.logger.error(
'No combatant record for card ID: {}'.format(card_id)
)
return render_template(
'message/message.html',
message='Could not find the specified combatant'
)
return render_template(
'combatant/card.html',
success=True,
disciplines=Discipline.query.all(),
combatant=combatant
)
@BLUEPRINT.route('/update/<token>', methods=['GET'])
def update_info(token):
"""Handle requests to consume a combatant update info request.
Args:
token: An info update request token
Returns:
- The message view if the token is invalid
- The self-serve info update view if the token is valid
"""
token_valid = True
update_request = UpdateRequest.query.filter(UpdateRequest.token == token).one_or_none()
if update_request is None:
token_valid = False
elif update_request.expiry < datetime.utcnow():
token_valid = False
elif update_request.consumed is not None:
token_valid = False
if False and token_valid is False:
return render_template(
'message/message.html',
message='Invalid token provided'
)
# Use the FauxUserSwitch context manager to provide an
# "authorized user" for Jinja environment
with FauxUserSwitch():
return render_template(
'combatant/combatant_update_info.html',
combatant=update_request.combatant,
token=token,
is_self_serve=True
)
| lrt512/emol | emol/emol/views/combatant/combatant.py | Python | mit | 2,510 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import sys
import os
import datetime
import numpy as np
from .. import core
from .. import util
def emulate_weather(initialdata,finaltimestamp=-1,mincloudcover=0,maxcloudcover=1,minambienttemperature=-5,maxambienttemperature=25):
"""
emulate weather conditions
Parameters
----------
initialdata : dict
Dictionary with intial data.
Must have a 'timestamp', 'cloudcover' and 'ambienttemperature'
keys with array like values
lookahead : number
Number of seconds to look ahead from now
"""
# generate timestep vector
timestep = 300
if finaltimestamp < 0:
dt_ref = datetime.datetime(1970, 1, 1)
dt_now = datetime.datetime.utcnow()
finaltimestamp = int( (dt_now-dt_ref).total_seconds() )
timestamp = np.arange( initialdata['timestamp'][-1],finaltimestamp,timestep )
if not timestamp[-1] == finaltimestamp:
timestamp = np.append(timestamp,finaltimestamp)
solar_azimuth = np.zeros(len(timestamp))
solar_altitude = np.zeros(len(timestamp))
I_direct_clearsky = np.zeros(len(timestamp))
I_diffuse_clearsky = np.zeros(len(timestamp))
I_direct_cloudy = np.zeros(len(timestamp))
I_diffuse_cloudy = np.zeros(len(timestamp))
cloudcover = np.zeros(len(timestamp))
ambienttemperature = np.zeros(len(timestamp))
I_total_horizontal = np.zeros(len(timestamp))
I_direct_horizontal = np.zeros(len(timestamp))
I_diffuse_horizontal = np.zeros(len(timestamp))
I_ground_horizontal = np.zeros(len(timestamp))
cloudcover[0] = initialdata['cloudcover'][-1]
ambienttemperature[0] = initialdata['ambienttemperature'][-1]
latitude = core.states['settings/location/latitude'].value
longitude = core.states['settings/location/longitude'].value
elevation = core.states['settings/location/elevation'].value
for i,ts in enumerate(timestamp):
solar_azimuth[i],solar_altitude[i] = util.weather.sunposition(latitude,longitude,elevation=elevation,timestamp=ts)
I_direct_clearsky[i],I_diffuse_clearsky[i] = util.weather.clearskyirrradiance(solar_azimuth[i],solar_altitude[i],timestamp=ts)
# random variation in cloud cover
if i < len(timestamp)-1:
delta_t = timestamp[i+1]-timestamp[i]
cloudcover[i+1] = min(maxcloudcover,max(mincloudcover, cloudcover[i] + 0.0001*(2*np.random.random()-1)*delta_t ))
I_direct_cloudy[i],I_diffuse_cloudy[i] = util.weather.cloudyskyirrradiance(I_direct_clearsky[i],I_diffuse_clearsky[i],cloudcover[i],solar_azimuth[i],solar_altitude[i],timestamp=ts)
I_total_horizontal[i], I_direct_horizontal[i], I_diffuse_horizontal[i], I_ground_horizontal[i] = util.weather.incidentirradiance(I_direct_cloudy[i],I_diffuse_cloudy[i],solar_azimuth[i],solar_altitude[i],0,0)
# ambient temperature dependent on horizontal irradiance and cloud cover
if i+1 < len(timestamp):
c_tot = 800e3
skytemperature = -18*(1-cloudcover[i]) -14*cloudcover[i]
U_sky = 8.5*(1-cloudcover[i]) + 3.0*cloudcover[i]
T_avg = ambienttemperature[i]
q_corr = 10*( np.exp(-(T_avg-minambienttemperature)) - np.exp(-(maxambienttemperature-T_avg)) )
delta_t = timestamp[i+1]-timestamp[i]
ambienttemperature[i+1] = ambienttemperature[i] + (skytemperature-ambienttemperature[i])*U_sky*delta_t/c_tot + I_total_horizontal[i]*delta_t/c_tot + q_corr*delta_t/c_tot
data = {
'timestamp': timestamp.tolist(),
'solar_azimuth': solar_azimuth,
'solar_altitude': solar_altitude,
'I_direct_clearsky': I_direct_clearsky,
'I_diffuse_clearsky': I_diffuse_clearsky,
'I_direct_cloudy': I_direct_cloudy,
'I_diffuse_cloudy': I_diffuse_cloudy,
'cloudcover': cloudcover,
'ambienttemperature': ambienttemperature,
'I_total_horizontal': I_total_horizontal,
'I_direct_horizontal': I_direct_horizontal,
'I_diffuse_horizontal': I_diffuse_horizontal,
'I_ground_horizontal': I_ground_horizontal,
}
return data
| BrechtBa/homeconn | homecon/demo/weather.py | Python | gpl-3.0 | 4,353 |
#-*- coding: UTF-8 -*-
from ctypes import POINTER, c_void_p, c_int, c_uint, c_char, c_float, Structure, c_char_p, c_double, c_ubyte, c_size_t, c_uint32
class Vector2D(Structure):
"""
See 'aiVector2D.h' for details.
"""
_fields_ = [
("x", c_float),("y", c_float),
]
class Matrix3x3(Structure):
"""
See 'aiMatrix3x3.h' for details.
"""
_fields_ = [
("a1", c_float),("a2", c_float),("a3", c_float),
("b1", c_float),("b2", c_float),("b3", c_float),
("c1", c_float),("c2", c_float),("c3", c_float),
]
class Texel(Structure):
"""
See 'aiTexture.h' for details.
"""
_fields_ = [
("b", c_ubyte),("g", c_ubyte),("r", c_ubyte),("a", c_ubyte),
]
class Color4D(Structure):
"""
See 'aiColor4D.h' for details.
"""
_fields_ = [
# Red, green, blue and alpha color values
("r", c_float),("g", c_float),("b", c_float),("a", c_float),
]
class Plane(Structure):
"""
See 'aiTypes.h' for details.
"""
_fields_ = [
# Plane equation
("a", c_float),("b", c_float),("c", c_float),("d", c_float),
]
class Color3D(Structure):
"""
See 'aiTypes.h' for details.
"""
_fields_ = [
# Red, green and blue color values
("r", c_float),("g", c_float),("b", c_float),
]
class String(Structure):
"""
See 'aiTypes.h' for details.
"""
MAXLEN = 1024
_fields_ = [
# Binary length of the string excluding the terminal 0. This is NOT the
# logical length of strings containing UTF-8 multibyte sequences! It's
# the number of bytes from the beginning of the string to its end.
("length", c_size_t),
# String buffer. Size limit is MAXLEN
("data", c_char*MAXLEN),
]
class MaterialPropertyString(Structure):
"""
See 'aiTypes.h' for details.
The size of length is truncated to 4 bytes on 64-bit platforms when used as a
material property (see MaterialSystem.cpp aiMaterial::AddProperty() for details).
"""
MAXLEN = 1024
_fields_ = [
# Binary length of the string excluding the terminal 0. This is NOT the
# logical length of strings containing UTF-8 multibyte sequences! It's
# the number of bytes from the beginning of the string to its end.
("length", c_uint32),
# String buffer. Size limit is MAXLEN
("data", c_char*MAXLEN),
]
class MemoryInfo(Structure):
"""
See 'aiTypes.h' for details.
"""
_fields_ = [
# Storage allocated for texture data
("textures", c_uint),
# Storage allocated for material data
("materials", c_uint),
# Storage allocated for mesh data
("meshes", c_uint),
# Storage allocated for node data
("nodes", c_uint),
# Storage allocated for animation data
("animations", c_uint),
# Storage allocated for camera data
("cameras", c_uint),
# Storage allocated for light data
("lights", c_uint),
# Total storage allocated for the full import.
("total", c_uint),
]
class Quaternion(Structure):
"""
See 'aiQuaternion.h' for details.
"""
_fields_ = [
# w,x,y,z components of the quaternion
("w", c_float),("x", c_float),("y", c_float),("z", c_float),
]
class Face(Structure):
"""
See 'aiMesh.h' for details.
"""
_fields_ = [
# Number of indices defining this face.
# The maximum value for this member is
#AI_MAX_FACE_INDICES.
("mNumIndices", c_uint),
# Pointer to the indices array. Size of the array is given in numIndices.
("mIndices", POINTER(c_uint)),
]
class VertexWeight(Structure):
"""
See 'aiMesh.h' for details.
"""
_fields_ = [
# Index of the vertex which is influenced by the bone.
("mVertexId", c_uint),
# The strength of the influence in the range (0...1).
# The influence from all bones at one vertex amounts to 1.
("mWeight", c_float),
]
class Matrix4x4(Structure):
"""
See 'aiMatrix4x4.h' for details.
"""
_fields_ = [
("a1", c_float),("a2", c_float),("a3", c_float),("a4", c_float),
("b1", c_float),("b2", c_float),("b3", c_float),("b4", c_float),
("c1", c_float),("c2", c_float),("c3", c_float),("c4", c_float),
("d1", c_float),("d2", c_float),("d3", c_float),("d4", c_float),
]
class Vector3D(Structure):
"""
See 'aiVector3D.h' for details.
"""
_fields_ = [
("x", c_float),("y", c_float),("z", c_float),
]
class MeshKey(Structure):
"""
See 'aiAnim.h' for details.
"""
_fields_ = [
# The time of this key
("mTime", c_double),
# Index into the aiMesh::mAnimMeshes array of the
# mesh corresponding to the
#aiMeshAnim hosting this
# key frame. The referenced anim mesh is evaluated
# according to the rules defined in the docs for
#aiAnimMesh.
("mValue", c_uint),
]
class Node(Structure):
"""
See 'aiScene.h' for details.
"""
Node._fields_ = [
# The name of the node.
# The name might be empty (length of zero) but all nodes which
# need to be accessed afterwards by bones or anims are usually named.
# Multiple nodes may have the same name, but nodes which are accessed
# by bones (see
#aiBone and
#aiMesh::mBones) *must* be unique.
# Cameras and lights are assigned to a specific node name - if there
# are multiple nodes with this name, they're assigned to each of them.
# <br>
# There are no limitations regarding the characters contained in
# this text. You should be able to handle stuff like whitespace, tabs,
# linefeeds, quotation marks, ampersands, ... .
("mName", String),
# The transformation relative to the node's parent.
("mTransformation", Matrix4x4),
# Parent node. NULL if this node is the root node.
("mParent", POINTER(Node)),
# The number of child nodes of this node.
("mNumChildren", c_uint),
# The child nodes of this node. NULL if mNumChildren is 0.
("mChildren", POINTER(POINTER(Node))),
# The number of meshes of this node.
("mNumMeshes", c_uint),
# The meshes of this node. Each entry is an index into the mesh
("mMeshes", POINTER(c_uint)),
]
class Light(Structure):
"""
See 'aiLight.h' for details.
"""
_fields_ = [
# The name of the light source.
# There must be a node in the scenegraph with the same name.
# This node specifies the position of the light in the scene
# hierarchy and can be animated.
("mName", String),
# The type of the light source.
# aiLightSource_UNDEFINED is not a valid value for this member.
("mType", c_uint),
# Position of the light source in space. Relative to the
# transformation of the node corresponding to the light.
# The position is undefined for directional lights.
("mPosition", Vector3D),
# Direction of the light source in space. Relative to the
# transformation of the node corresponding to the light.
# The direction is undefined for point lights. The vector
# may be normalized, but it needn't.
("mDirection", Vector3D),
# Constant light attenuation factor.
# The intensity of the light source at a given distance 'd' from
# the light's position is
# @code
# Atten = 1/( att0 + att1
# d + att2
# d*d)
# @endcode
# This member corresponds to the att0 variable in the equation.
# Naturally undefined for directional lights.
("mAttenuationConstant", c_float),
# Linear light attenuation factor.
# The intensity of the light source at a given distance 'd' from
# the light's position is
# @code
# Atten = 1/( att0 + att1
# d + att2
# d*d)
# @endcode
# This member corresponds to the att1 variable in the equation.
# Naturally undefined for directional lights.
("mAttenuationLinear", c_float),
# Quadratic light attenuation factor.
# The intensity of the light source at a given distance 'd' from
# the light's position is
# @code
# Atten = 1/( att0 + att1
# d + att2
# d*d)
# @endcode
# This member corresponds to the att2 variable in the equation.
# Naturally undefined for directional lights.
("mAttenuationQuadratic", c_float),
# Diffuse color of the light source
# The diffuse light color is multiplied with the diffuse
# material color to obtain the final color that contributes
# to the diffuse shading term.
("mColorDiffuse", Color3D),
# Specular color of the light source
# The specular light color is multiplied with the specular
# material color to obtain the final color that contributes
# to the specular shading term.
("mColorSpecular", Color3D),
# Ambient color of the light source
# The ambient light color is multiplied with the ambient
# material color to obtain the final color that contributes
# to the ambient shading term. Most renderers will ignore
# this value it, is just a remaining of the fixed-function pipeline
# that is still supported by quite many file formats.
("mColorAmbient", Color3D),
# Inner angle of a spot light's light cone.
# The spot light has maximum influence on objects inside this
# angle. The angle is given in radians. It is 2PI for point
# lights and undefined for directional lights.
("mAngleInnerCone", c_float),
# Outer angle of a spot light's light cone.
# The spot light does not affect objects outside this angle.
# The angle is given in radians. It is 2PI for point lights and
# undefined for directional lights. The outer angle must be
# greater than or equal to the inner angle.
# It is assumed that the application uses a smooth
# interpolation between the inner and the outer cone of the
# spot light.
("mAngleOuterCone", c_float),
]
class Texture(Structure):
"""
See 'aiTexture.h' for details.
"""
_fields_ = [
# Width of the texture, in pixels
# If mHeight is zero the texture is compressed in a format
# like JPEG. In this case mWidth specifies the size of the
# memory area pcData is pointing to, in bytes.
("mWidth", c_uint),
# Height of the texture, in pixels
# If this value is zero, pcData points to an compressed texture
# in any format (e.g. JPEG).
("mHeight", c_uint),
# A hint from the loader to make it easier for applications
# to determine the type of embedded compressed textures.
# If mHeight != 0 this member is undefined. Otherwise it
# is set set to '\\0\\0\\0\\0' if the loader has no additional
# information about the texture file format used OR the
# file extension of the format without a trailing dot. If there
# are multiple file extensions for a format, the shortest
# extension is chosen (JPEG maps to 'jpg', not to 'jpeg').
# E.g. 'dds\\0', 'pcx\\0', 'jpg\\0'. All characters are lower-case.
# The fourth character will always be '\\0'.
("achFormatHint", c_char*4),
# Data of the texture.
# Points to an array of mWidth
# mHeight aiTexel's.
# The format of the texture data is always ARGB8888 to
# make the implementation for user of the library as easy
# as possible. If mHeight = 0 this is a pointer to a memory
# buffer of size mWidth containing the compressed texture
# data. Good luck, have fun!
("pcData", POINTER(Texel)),
]
class Ray(Structure):
"""
See 'aiTypes.h' for details.
"""
_fields_ = [
# Position and direction of the ray
("pos", Vector3D),("dir", Vector3D),
]
class UVTransform(Structure):
"""
See 'aiMaterial.h' for details.
"""
_fields_ = [
# Translation on the u and v axes.
# The default value is (0|0).
("mTranslation", Vector2D),
# Scaling on the u and v axes.
# The default value is (1|1).
("mScaling", Vector2D),
# Rotation - in counter-clockwise direction.
# The rotation angle is specified in radians. The
# rotation center is 0.5f|0.5f. The default value
# 0.f.
("mRotation", c_float),
]
class MaterialProperty(Structure):
"""
See 'aiMaterial.h' for details.
"""
_fields_ = [
# Specifies the name of the property (key)
# Keys are generally case insensitive.
("mKey", String),
# Textures: Specifies their exact usage semantic.
# For non-texture properties, this member is always 0
# (or, better-said,
#aiTextureType_NONE).
("mSemantic", c_uint),
# Textures: Specifies the index of the texture.
# For non-texture properties, this member is always 0.
("mIndex", c_uint),
# Size of the buffer mData is pointing to, in bytes.
# This value may not be 0.
("mDataLength", c_uint),
# Type information for the property.
# Defines the data layout inside the data buffer. This is used
# by the library internally to perform debug checks and to
# utilize proper type conversions.
# (It's probably a hacky solution, but it works.)
("mType", c_uint),
# Binary buffer to hold the property's value.
# The size of the buffer is always mDataLength.
("mData", POINTER(c_char)),
]
class Material(Structure):
"""
See 'aiMaterial.h' for details.
"""
_fields_ = [
# List of all material properties loaded.
("mProperties", POINTER(POINTER(MaterialProperty))),
# Number of properties in the data base
("mNumProperties", c_uint),
# Storage allocated
("mNumAllocated", c_uint),
]
class Bone(Structure):
"""
See 'aiMesh.h' for details.
"""
_fields_ = [
# The name of the bone.
("mName", String),
# The number of vertices affected by this bone
# The maximum value for this member is
#AI_MAX_BONE_WEIGHTS.
("mNumWeights", c_uint),
# The vertices affected by this bone
("mWeights", POINTER(VertexWeight)),
# Matrix that transforms from mesh space to bone space in bind pose
("mOffsetMatrix", Matrix4x4),
]
class Mesh(Structure):
"""
See 'aiMesh.h' for details.
"""
AI_MAX_FACE_INDICES = 0x7fff
AI_MAX_BONE_WEIGHTS = 0x7fffffff
AI_MAX_VERTICES = 0x7fffffff
AI_MAX_FACES = 0x7fffffff
AI_MAX_NUMBER_OF_COLOR_SETS = 0x8
AI_MAX_NUMBER_OF_TEXTURECOORDS = 0x8
_fields_ = [
# Bitwise combination of the members of the
#aiPrimitiveType enum.
# This specifies which types of primitives are present in the mesh.
# The "SortByPrimitiveType"-Step can be used to make sure the
# output meshes consist of one primitive type each.
("mPrimitiveTypes", c_uint),
# The number of vertices in this mesh.
# This is also the size of all of the per-vertex data arrays.
# The maximum value for this member is
#AI_MAX_VERTICES.
("mNumVertices", c_uint),
# The number of primitives (triangles, polygons, lines) in this mesh.
# This is also the size of the mFaces array.
# The maximum value for this member is
#AI_MAX_FACES.
("mNumFaces", c_uint),
# Vertex positions.
# This array is always present in a mesh. The array is
# mNumVertices in size.
("mVertices", POINTER(Vector3D)),
# Vertex normals.
# The array contains normalized vectors, NULL if not present.
# The array is mNumVertices in size. Normals are undefined for
# point and line primitives. A mesh consisting of points and
# lines only may not have normal vectors. Meshes with mixed
# primitive types (i.e. lines and triangles) may have normals,
# but the normals for vertices that are only referenced by
# point or line primitives are undefined and set to QNaN (WARN:
# qNaN compares to inequal to *everything*, even to qNaN itself.
# Using code like this to check whether a field is qnan is:
# @code
#define IS_QNAN(f) (f != f)
# @endcode
# still dangerous because even 1.f == 1.f could evaluate to false! (
# remember the subtleties of IEEE754 artithmetics). Use stuff like
# @c fpclassify instead.
# @note Normal vectors computed by Assimp are always unit-length.
# However, this needn't apply for normals that have been taken
# directly from the model file.
("mNormals", POINTER(Vector3D)),
# Vertex tangents.
# The tangent of a vertex points in the direction of the positive
# X texture axis. The array contains normalized vectors, NULL if
# not present. The array is mNumVertices in size. A mesh consisting
# of points and lines only may not have normal vectors. Meshes with
# mixed primitive types (i.e. lines and triangles) may have
# normals, but the normals for vertices that are only referenced by
# point or line primitives are undefined and set to qNaN. See
# the
#mNormals member for a detailed discussion of qNaNs.
# @note If the mesh contains tangents, it automatically also
# contains bitangents (the bitangent is just the cross product of
# tangent and normal vectors).
("mTangents", POINTER(Vector3D)),
# Vertex bitangents.
# The bitangent of a vertex points in the direction of the positive
# Y texture axis. The array contains normalized vectors, NULL if not
# present. The array is mNumVertices in size.
# @note If the mesh contains tangents, it automatically also contains
# bitangents.
("mBitangents", POINTER(Vector3D)),
# Vertex color sets.
# A mesh may contain 0 to
#AI_MAX_NUMBER_OF_COLOR_SETS vertex
# colors per vertex. NULL if not present. Each array is
# mNumVertices in size if present.
("mColors", POINTER(Color4D)*AI_MAX_NUMBER_OF_COLOR_SETS),
# Vertex texture coords, also known as UV channels.
# A mesh may contain 0 to AI_MAX_NUMBER_OF_TEXTURECOORDS per
# vertex. NULL if not present. The array is mNumVertices in size.
("mTextureCoords", POINTER(Vector3D)*AI_MAX_NUMBER_OF_TEXTURECOORDS),
# Specifies the number of components for a given UV channel.
# Up to three channels are supported (UVW, for accessing volume
# or cube maps). If the value is 2 for a given channel n, the
# component p.z of mTextureCoords[n][p] is set to 0.0f.
# If the value is 1 for a given channel, p.y is set to 0.0f, too.
# @note 4D coords are not supported
("mNumUVComponents", c_uint*AI_MAX_NUMBER_OF_TEXTURECOORDS),
# The faces the mesh is constructed from.
# Each face refers to a number of vertices by their indices.
# This array is always present in a mesh, its size is given
# in mNumFaces. If the
#AI_SCENE_FLAGS_NON_VERBOSE_FORMAT
# is NOT set each face references an unique set of vertices.
("mFaces", POINTER(Face)),
# The number of bones this mesh contains.
# Can be 0, in which case the mBones array is NULL.
("mNumBones", c_uint),
# The bones of this mesh.
# A bone consists of a name by which it can be found in the
# frame hierarchy and a set of vertex weights.
("mBones", POINTER(POINTER(Bone))),
# The material used by this mesh.
# A mesh does use only a single material. If an imported model uses
# multiple materials, the import splits up the mesh. Use this value
# as index into the scene's material list.
("mMaterialIndex", c_uint),
# Name of the mesh. Meshes can be named, but this is not a
# requirement and leaving this field empty is totally fine.
# There are mainly three uses for mesh names:
# - some formats name nodes and meshes independently.
# - importers tend to split meshes up to meet the
# one-material-per-mesh requirement. Assigning
# the same (dummy) name to each of the result meshes
# aids the caller at recovering the original mesh
# partitioning.
# - Vertex animations refer to meshes by their names.
("mName", String),
# NOT CURRENTLY IN USE. The number of attachment meshes
("mNumAnimMeshes", c_uint),
# NOT CURRENTLY IN USE. Attachment meshes for this mesh, for vertex-based animation.
# Attachment meshes carry replacement data for some of the
# mesh'es vertex components (usually positions, normals).
]
class Camera(Structure):
"""
See 'aiCamera.h' for details.
"""
_fields_ = [
# The name of the camera.
# There must be a node in the scenegraph with the same name.
# This node specifies the position of the camera in the scene
# hierarchy and can be animated.
("mName", String),
# Position of the camera relative to the coordinate space
# defined by the corresponding node.
# The default value is 0|0|0.
("mPosition", Vector3D),
# 'Up' - vector of the camera coordinate system relative to
# the coordinate space defined by the corresponding node.
# The 'right' vector of the camera coordinate system is
# the cross product of the up and lookAt vectors.
# The default value is 0|1|0. The vector
# may be normalized, but it needn't.
("mUp", Vector3D),
# 'LookAt' - vector of the camera coordinate system relative to
# the coordinate space defined by the corresponding node.
# This is the viewing direction of the user.
# The default value is 0|0|1. The vector
# may be normalized, but it needn't.
("mLookAt", Vector3D),
# Half horizontal field of view angle, in radians.
# The field of view angle is the angle between the center
# line of the screen and the left or right border.
# The default value is 1/4PI.
("mHorizontalFOV", c_float),
# Distance of the near clipping plane from the camera.
# The value may not be 0.f (for arithmetic reasons to prevent
# a division through zero). The default value is 0.1f.
("mClipPlaneNear", c_float),
# Distance of the far clipping plane from the camera.
# The far clipping plane must, of course, be further away than the
# near clipping plane. The default value is 1000.f. The ratio
# between the near and the far plane should not be too
# large (between 1000-10000 should be ok) to avoid floating-point
# inaccuracies which could lead to z-fighting.
("mClipPlaneFar", c_float),
# Screen aspect ratio.
# This is the ration between the width and the height of the
# screen. Typical values are 4/3, 1/2 or 1/1. This value is
# 0 if the aspect ratio is not defined in the source file.
# 0 is also the default value.
("mAspect", c_float),
]
class VectorKey(Structure):
"""
See 'aiAnim.h' for details.
"""
_fields_ = [
# The time of this key
("mTime", c_double),
# The value of this key
("mValue", Vector3D),
]
class QuatKey(Structure):
"""
See 'aiAnim.h' for details.
"""
_fields_ = [
# The time of this key
("mTime", c_double),
# The value of this key
("mValue", Quaternion),
]
class NodeAnim(Structure):
"""
See 'aiAnim.h' for details.
"""
_fields_ = [
# The name of the node affected by this animation. The node
# must exist and it must be unique.
("mNodeName", String),
# The number of position keys
("mNumPositionKeys", c_uint),
# The position keys of this animation channel. Positions are
# specified as 3D vector. The array is mNumPositionKeys in size.
# If there are position keys, there will also be at least one
# scaling and one rotation key.
("mPositionKeys", POINTER(VectorKey)),
# The number of rotation keys
("mNumRotationKeys", c_uint),
# The rotation keys of this animation channel. Rotations are
# given as quaternions, which are 4D vectors. The array is
# mNumRotationKeys in size.
# If there are rotation keys, there will also be at least one
# scaling and one position key.
("mRotationKeys", POINTER(QuatKey)),
# The number of scaling keys
("mNumScalingKeys", c_uint),
# The scaling keys of this animation channel. Scalings are
# specified as 3D vector. The array is mNumScalingKeys in size.
# If there are scaling keys, there will also be at least one
# position and one rotation key.
("mScalingKeys", POINTER(VectorKey)),
# Defines how the animation behaves before the first
# key is encountered.
# The default value is aiAnimBehaviour_DEFAULT (the original
# transformation matrix of the affected node is used).
("mPreState", c_uint),
# Defines how the animation behaves after the last
# key was processed.
# The default value is aiAnimBehaviour_DEFAULT (the original
# transformation matrix of the affected node is taken).
("mPostState", c_uint),
]
class Animation(Structure):
"""
See 'aiAnim.h' for details.
"""
_fields_ = [
# The name of the animation. If the modeling package this data was
# exported from does support only a single animation channel, this
# name is usually empty (length is zero).
("mName", String),
# Duration of the animation in ticks.
("mDuration", c_double),
# Ticks per second. 0 if not specified in the imported file
("mTicksPerSecond", c_double),
# The number of bone animation channels. Each channel affects
# a single node.
("mNumChannels", c_uint),
# The node animation channels. Each channel affects a single node.
# The array is mNumChannels in size.
("mChannels", POINTER(POINTER(NodeAnim))),
# The number of mesh animation channels. Each channel affects
# a single mesh and defines vertex-based animation.
("mNumMeshChannels", c_uint),
# The mesh animation channels. Each channel affects a single mesh.
# The array is mNumMeshChannels in size.
]
class Scene(Structure):
"""
See 'aiScene.h' for details.
"""
AI_SCENE_FLAGS_INCOMPLETE = 0x1
AI_SCENE_FLAGS_VALIDATED = 0x2
AI_SCENE_FLAGS_VALIDATION_WARNING = 0x4
AI_SCENE_FLAGS_NON_VERBOSE_FORMAT = 0x8
AI_SCENE_FLAGS_TERRAIN = 0x10
_fields_ = [
# Any combination of the AI_SCENE_FLAGS_XXX flags. By default
# this value is 0, no flags are set. Most applications will
# want to reject all scenes with the AI_SCENE_FLAGS_INCOMPLETE
# bit set.
("mFlags", c_uint),
# The root node of the hierarchy.
# There will always be at least the root node if the import
# was successful (and no special flags have been set).
# Presence of further nodes depends on the format and content
# of the imported file.
("mRootNode", POINTER(Node)),
# The number of meshes in the scene.
("mNumMeshes", c_uint),
# The array of meshes.
# Use the indices given in the aiNode structure to access
# this array. The array is mNumMeshes in size. If the
# AI_SCENE_FLAGS_INCOMPLETE flag is not set there will always
# be at least ONE material.
("mMeshes", POINTER(POINTER(Mesh))),
# The number of materials in the scene.
("mNumMaterials", c_uint),
# The array of materials.
# Use the index given in each aiMesh structure to access this
# array. The array is mNumMaterials in size. If the
# AI_SCENE_FLAGS_INCOMPLETE flag is not set there will always
# be at least ONE material.
("mMaterials", POINTER(POINTER(Material))),
# The number of animations in the scene.
("mNumAnimations", c_uint),
# The array of animations.
# All animations imported from the given file are listed here.
# The array is mNumAnimations in size.
("mAnimations", POINTER(POINTER(Animation))),
# The number of textures embedded into the file
("mNumTextures", c_uint),
# The array of embedded textures.
# Not many file formats embed their textures into the file.
# An example is Quake's MDL format (which is also used by
# some GameStudio versions)
("mTextures", POINTER(POINTER(Texture))),
# The number of light sources in the scene. Light sources
# are fully optional, in most cases this attribute will be 0
("mNumLights", c_uint),
# The array of light sources.
# All light sources imported from the given file are
# listed here. The array is mNumLights in size.
("mLights", POINTER(POINTER(Light))),
# The number of cameras in the scene. Cameras
# are fully optional, in most cases this attribute will be 0
("mNumCameras", c_uint),
# The array of cameras.
# All cameras imported from the given file are listed here.
# The array is mNumCameras in size. The first camera in the
# array (if existing) is the default camera view into
# the scene.
("mCameras", POINTER(POINTER(Camera))),
]
assimp_structs_as_tuple = (Matrix4x4,
Matrix3x3,
Vector2D,
Vector3D,
Color3D,
Color4D,
Quaternion,
Plane,
Texel)
| xupei0610/ComputerGraphics-HW | hw4/lib/assimp/port/PyAssimp/pyassimp/structs.py | Python | mit | 34,579 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.hypervisors.compute \
import forms as project_forms
class EvacuateHostView(forms.ModalFormView):
form_class = project_forms.EvacuateHostForm
template_name = 'admin/hypervisors/compute/evacuate_host.html'
context_object_name = 'compute_host'
success_url = reverse_lazy("horizon:admin:hypervisors:index")
page_title = _("Evacuate Host")
submit_label = page_title
def get_context_data(self, **kwargs):
context = super(EvacuateHostView, self).get_context_data(**kwargs)
context["compute_host"] = self.kwargs['compute_host']
return context
def get_active_compute_hosts_names(self, *args, **kwargs):
try:
services = api.nova.service_list(self.request,
binary='nova-compute')
return [service.host for service in services
if service.state == 'up']
except Exception:
redirect = reverse("horizon:admin:hypervisors:index")
msg = _('Unable to retrieve compute host information.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
initial = super(EvacuateHostView, self).get_initial()
hosts = self.get_active_compute_hosts_names()
current_host = self.kwargs['compute_host']
initial.update({'current_host': current_host,
'hosts': hosts})
return initial
class DisableServiceView(forms.ModalFormView):
form_class = project_forms.DisableServiceForm
template_name = 'admin/hypervisors/compute/disable_service.html'
context_object_name = 'compute_host'
success_url = reverse_lazy("horizon:admin:hypervisors:index")
page_title = _("Disable Service")
submit_label = page_title
def get_context_data(self, **kwargs):
context = super(DisableServiceView, self).get_context_data(**kwargs)
context["compute_host"] = self.kwargs['compute_host']
return context
def get_initial(self):
initial = super(DisableServiceView, self).get_initial()
initial.update({'host': self.kwargs['compute_host']})
return initial
class MigrateHostView(forms.ModalFormView):
form_class = project_forms.MigrateHostForm
template_name = 'admin/hypervisors/compute/migrate_host.html'
context_object_name = 'compute_host'
success_url = reverse_lazy("horizon:admin:hypervisors:index")
page_title = _("Migrate Host")
submit_label = page_title
def get_context_data(self, **kwargs):
context = super(MigrateHostView, self).get_context_data(**kwargs)
context["compute_host"] = self.kwargs['compute_host']
return context
def get_initial(self):
initial = super(MigrateHostView, self).get_initial()
current_host = self.kwargs['compute_host']
initial.update({
'current_host': current_host,
'live_migrate': True,
'block_migration': False,
'disk_over_commit': False
})
return initial
| coreycb/horizon | openstack_dashboard/dashboards/admin/hypervisors/compute/views.py | Python | apache-2.0 | 3,885 |
# -*- tab-width: 4; indent-tabs-mode: nil; py-indent-offset: 4 -*-
#
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
import six
import gdb
from libreoffice.util import printing
class ImplSchedulerDataPrinter(object):
'''Prints the ImplSchedulerData linked list.
This can be used to dump the current state of the scheduler via:
p *ImplGetSVData()->mpFirstSchedulerData
'''
def __init__(self, typename, value):
self.typename = typename
self.value = value
self.timer_type_ptr = gdb.lookup_type("Timer").pointer()
self.idle_type_ptr = gdb.lookup_type("Idle").pointer()
def as_string(self, gdbobj):
if gdbobj['mpTask']:
task = gdbobj['mpTask'].dereference()
timer = gdbobj['mpTask'].dynamic_cast( self.timer_type_ptr )
idle = gdbobj['mpTask'].dynamic_cast( self.idle_type_ptr )
if idle:
task_type = "Idle"
elif timer:
task_type = "Timer"
else:
task_type = "Task"
res = "{:7s}{:10s} active: {:6s}".format( task_type, str(task['mePriority']), str(task['mbActive']) )
name = task['mpDebugName']
if not name:
res = res + " (task debug name not set)"
else:
res = "{} '{}' ({})".format(res, str(name.string()), str(task.dynamic_type))
val_type = gdb.lookup_type(str( task.dynamic_type )).pointer()
timer = gdbobj['mpTask'].cast( val_type )
if (task_type == "Timer"):
res = "{}: {}ms".format(res, timer['mnTimeout'])
else:
assert 1 == timer['mnTimeout'], "Idle with timeout == {}".format( timer['mnTimeout'] )
return res
else:
assert gdbobj['mbDelete'], "No task set and not marked for deletion!"
return "(no task)"
def to_string(self):
return self.typename
def children(self):
return self._iterator(self)
def display_hint(self):
return 'array'
class _iterator(six.Iterator):
def __init__(self, printer):
self.pos = 0
self.printer = printer
self.value = printer.value
def __iter__(self):
return self
def __next__(self):
if not self.value['mpNext']:
raise StopIteration()
pos = str(self.pos)
name = "\n " + self.printer.as_string(self.value)
self.value = self.value['mpNext']
self.pos += 1
return (pos, name)
printer = None
def build_pretty_printers():
global printer
printer = printing.Printer("libreoffice/vcl")
printer.add('ImplSchedulerData', ImplSchedulerDataPrinter)
def register_pretty_printers(obj):
printing.register_pretty_printer(printer, obj)
build_pretty_printers()
# vim:set shiftwidth=4 softtabstop=4 expandtab:
| beppec56/core | solenv/gdb/libreoffice/vcl.py | Python | gpl-3.0 | 3,155 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TopicSkeleton.description'
db.add_column(u'detective_topicskeleton', 'description',
self.gf('tinymce.models.HTMLField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'TopicSkeleton.description'
db.delete_column(u'detective_topicskeleton', 'description')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'detective.article': {
'Meta': {'object_name': 'Article'},
'content': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '250'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['detective.Topic']"})
},
u'detective.detectiveprofileuser': {
'Meta': {'object_name': 'DetectiveProfileUser'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'plan': ('django.db.models.fields.CharField', [], {'default': "'free'", 'max_length': '10'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'detective.quoterequest': {
'Meta': {'object_name': 'QuoteRequest'},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'domain': ('django.db.models.fields.TextField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}),
'employer': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'records': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'detective.searchterm': {
'Meta': {'object_name': 'SearchTerm'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_literal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'label': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'subject': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['detective.Topic']"})
},
u'detective.topic': {
'Meta': {'unique_together': "(('slug', 'author'),)", 'object_name': 'Topic'},
'about': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'background': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'contributor_group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'description': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ontology_as_json': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'ontology_as_mod': ('django.db.models.fields.SlugField', [], {'max_length': '250', 'blank': 'True'}),
'ontology_as_owl': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'skeleton_title': ('django.db.models.fields.CharField', [], {'default': "'No skeleton'", 'max_length': '250'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '250'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'detective.topicskeleton': {
'Meta': {'object_name': 'TopicSkeleton'},
'description': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ontology': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'picture_credits': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'schema_picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'target_plans': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'detective.topictoken': {
'Meta': {'unique_together': "(('topic', 'email'),)", 'object_name': 'TopicToken'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['detective.Topic']"})
}
}
complete_apps = ['detective'] | jplusplus/detective.io | app/detective/migrations/0031_auto__add_field_topicskeleton_description.py | Python | lgpl-3.0 | 10,498 |
# Sketch - A Python-based interactive drawing program
# Copyright (C) 1996, 1997, 1998, 1999 by Bernhard Herzog
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# The layer classes.
#
from types import TupleType
from Sketch.const import LAYER_STATE, LAYER_COLOR
from Sketch import _, NullUndo, EmptyRect, InfinityRect, Point, config, _sketch
import color
import selinfo
from compound import EditableCompound
class Layer(EditableCompound):
can_be_empty = 1
is_Layer = 1
is_SpecialLayer = 0
is_GridLayer = 0
is_GuideLayer = 0
def __init__(self, name = _("New Layer"),
visible = 1, printable = 1, locked = 0,
outlined = 0, outline_color = (0, 0, 0)):
EditableCompound.__init__(self, [])
self.name = name
self.visible = visible
self.printable = printable
self.locked = locked
self.outlined = outlined
if type(outline_color) == TupleType:
self.outline_color = apply(color.CreateRGBColor, outline_color)
else:
self.outline_color = outline_color
def Draw(self, device, rect = None):
# Draw all objects on the device device. RECT, if provided,
# gives the bounding rect of the region to be drawn allowing to
# optimize the redisplay by drawing only those objects that
# overlap with this rect.
if device.draw_visible and self.visible \
or device.draw_printable and self.printable:
outlined = self.outlined or device.IsOutlineActive()
if outlined:
device.StartOutlineMode(self.outline_color)
EditableCompound.DrawShape(self, device, rect)
if outlined:
device.EndOutlineMode()
def SelectSubobject(self, p, rect, device, path = (), *rest):
if not self.CanSelect():
return None
if self.outlined:
device.StartOutlineMode()
try:
result = EditableCompound.SelectSubobject(self, p, rect, device,
path)
finally:
if self.outlined:
device.EndOutlineMode()
return result
def SelectRect(self, rect):
if not self.CanSelect():
return []
test = rect.contains_rect
build_info = selinfo.build_info
selected = []
objects = self.objects
for idx in range(len(objects)):
obj = objects[idx]
if test(obj.bounding_rect):
selected.append(build_info(idx, obj))
return selected
def SelectAll(self):
if self.CanSelect():
return selinfo.select_all(self.objects)
else:
return []
def SelectionInfo(self, child, cache = None):
info = selinfo.build_info(_sketch.IdIndex(self.objects, child), child)
return selinfo.prepend_idx(self.document.LayerIndex(self), info)
def PickObject(self, p, rect, device):
if not self.visible:
return None
if self.outlined:
device.StartOutlineMode()
result = EditableCompound.PickObject(self, p, rect, device)
if self.outlined:
device.EndOutlineMode()
return result
def SetName(self, name):
undo = (self.SetName, self.name)
self.name = name
return undo
def Name(self):
return self.name
def NumObjects(self):
return len(self.objects)
def Visible(self):
return self.visible
def Printable(self):
return self.printable
def Locked(self):
return self.locked
def CanSelect(self):
return not self.locked and self.visible
def get_state(self):
return (self.visible, self.printable, self.locked, self.outlined)
def SetState(self, visible, printable, locked, outlined):
# set the layer state. Return undo info.
#
# Side Effect:
# Queue a LAYER message with parameter LAYER_STATE + tuple.
# The tuple has the form
#
# (layer, visible_changed, printable_changed, outline_changed)
#
# We assume here that the receiver (usually SketchCanvas or
# SketchView) uses this to determine whether to repaint parts of
# the screen. If the receiver shows only visible layers and
# allows outline, it should use the following expression to
# determine whether to redraw or not:
#
# layer.NumObjects() and (visible_changed or
# (outlined_changed and layer.Visible()))
#
# If you only show printable layers:
#
# layer.NumObjects() and printable_changed
#
# (in that case outline mode should be ignored as it is only
# meant for quicker or clearer display while editing)
#
# The bounding rect of the now invalid region is
# layer.bounding_rect
oldstate = self.get_state()
visible_changed = self.visible != visible
self.visible = visible
printable_changed = self.printable != printable
self.printable = printable
locked_changed = self.locked != locked
self.locked = locked
outlined_changed = self.outlined != outlined
self.outlined = outlined
if oldstate != self.get_state():
undo = (self.SetState,) + oldstate
visibility = (self, visible_changed, printable_changed,
outlined_changed)
if self.document is not None:
self.document.queue_layer(LAYER_STATE, visibility)
if locked_changed:
self.document.update_active_layer()
return undo
return NullUndo
def SetOutlineColor(self, color):
undo = (self.SetOutlineColor, self.outline_color)
self.outline_color = color
if self.document is not None:
self.document.queue_layer(LAYER_COLOR, self)
return undo
def OutlineColor(self):
return self.outline_color
def Outlined(self):
return self.outlined
def SaveToFile(self, file):
file.BeginLayer(self.name, self.visible, self.printable, self.locked,
self.outlined, self.outline_color)
for obj in self.objects:
obj.SaveToFile(file)
file.EndLayer()
class SpecialLayer(Layer):
is_SpecialLayer = 1
def __none(self, *args):
return None
SelectSubobject = __none
PickObject = __none
def SelectRect(self, *rect):
return []
SelectAll = SelectRect
class GuideLayer(SpecialLayer):
is_GuideLayer = 1
def __init__(self, name = _("Guides"), visible = 1, printable = 0,
locked = 0, outlined = 1, outline_color = None):
if outline_color is None:
outline_color = config.preferences.guide_color
SpecialLayer.__init__(self, name, visible, 0, locked, 1,
outline_color)
def SetState(self, visible, printable, locked, outlined):
return SpecialLayer.SetState(self, visible, 0, locked, outlined)
def Draw(self, device, rect = None):
if device.draw_visible and self.visible \
or device.draw_printable and self.printable:
device.StartOutlineMode(self.outline_color)
SpecialLayer.DrawShape(self, device)
device.EndOutlineMode()
def SelectSubobject(self, p, rect, device, path = (), *rest):
if not self.CanSelect():
return None
device.StartOutlineMode()
try:
objects = self.objects
for obj_idx in range(len(objects) - 1, -1, -1):
obj = objects[obj_idx]
if obj.Hit(p, rect, device):
result = obj.SelectSubobject(p, rect, device)
return selinfo.prepend_idx(obj_idx, result)
return None
finally:
device.EndOutlineMode()
def SelectRect(self, rect):
if not self.CanSelect():
return []
test = rect.contains_rect
build_info = selinfo.build_info
selected = []
objects = self.objects
for idx in range(len(objects)):
obj = objects[idx]
if not obj.is_GuideLine and test(obj.bounding_rect):
selected.append(build_info(idx, obj))
return selected
def SelectAll(self):
return self.SelectRect(InfinityRect)
def compute_rects(self):
if self.objects:
self.bounding_rect = self.coord_rect = InfinityRect
else:
self.bounding_rect = self.coord_rect = EmptyRect
def SaveToFile(self, file):
file.BeginGuideLayer(self.name, self.visible, self.printable,
self.locked, self.outlined, self.outline_color)
for obj in self.objects:
obj.SaveToFile(file)
file.EndGuideLayer()
def Snap(self, p):
default = (1e100, p)
horizontal = [default]
vertical = [default]
result = [default]
for obj in self.objects:
dist, snapped = obj.Snap(p)
if type(snapped) == TupleType:
if snapped[0] is None:
horizontal.append((dist, snapped))
else:
vertical.append((dist, snapped))
else:
result.append((dist, snapped))
return min(horizontal), min(vertical), min(result)
def GuideLines(self):
result = self.objects[:]
for idx in range(len(result) - 1, -1, -1):
if not result[idx].is_GuideLine:
del result[idx]
return result
def issue_changed(self):
Layer.issue_changed(self)
if self.document is not None:
self.document.GuideLayerChanged(self)
class GridLayer(SpecialLayer):
is_GridLayer = 1
geometry = (0, 0, 20, 20)
def __init__(self, geometry = None, visible = None, outline_color = None,
name = _("Grid")):
# The grid is locked, outlined and not printable
if geometry is None:
geometry = config.preferences.grid_geometry
if visible is None:
visible = config.preferences.grid_visible
if outline_color is None:
outline_color = config.preferences.grid_color
SpecialLayer.__init__(self, name, visible, 0, 1, 1, outline_color)
if len(geometry) == 2:
self.geometry = (0, 0) + geometry
elif len(geometry) == 4:
self.geometry = geometry
else:
raise ValueError, "grid tuple must have length 2 or 4"
def Draw(self, device, rect = None):
if device.draw_visible and self.visible \
or device.draw_printable and self.printable:
device.StartOutlineMode(self.outline_color)
xorg, yorg, xwidth, ywidth = self.geometry
device.DrawGrid(xorg, yorg, xwidth, ywidth, rect)
device.EndOutlineMode()
def update_rects(self):
self.bounding_rect = self.coord_rect = InfinityRect
def SaveToFile(self, file):
file.BeginGridLayer(self.geometry, self.visible, self.outline_color,
self.name)
file.EndGridLayer()
def Snap(self, p):
xorg, yorg, xwidth, ywidth = self.geometry
sx = round((p.x - xorg) / xwidth) * xwidth + xorg
sy = round((p.y - yorg) / ywidth) * ywidth + yorg
result = Point(sx, sy)
return (abs(result - p), result)
def SetState(self, visible, printable, locked, outlined):
return SpecialLayer.SetState(self, visible, 0, 1, 1)
def Geometry(self):
return self.geometry
def SetGeometry(self, geometry):
undo = (self.SetGeometry, self.geometry)
self.geometry = geometry
if self.document:
# a hack...
self.document.queue_layer(LAYER_COLOR, self)
return undo
| shumik/skencil-c | Sketch/Graphics/layer.py | Python | gpl-2.0 | 10,895 |
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import matplotlib.pyplot as plt
import numpy as np
import pyaudio
import wave
from sound_encoder import SoundEncoder
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 10
def getAudioStream():
p = pyaudio.PyAudio()
return p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
def transformData(data, window):
return np.array(wave.struct.unpack("%dh"%(len(data)/CHANNELS),\
data))*window
def visualizeSDRs(sdrs):
sdrsToVisualize = []
for sdr in sdrs:
sdrsToVisualize.append([255 if x else 0 for x in sdr])
imageArray = np.rot90(np.array(sdrsToVisualize))
plt.imshow(imageArray, cmap='Greys', interpolation='nearest')
plt.show()
def recordAndEncode(stream, soundEncoder):
window = np.blackman(CHANNELS*CHUNK)
sdrs = []
print "---recording---"
for _ in range(0, (RATE/CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
transformedData = transformData(data, window)
sdr = soundEncoder.encode(transformedData)
sdrs.append(sdr)
stream.stop_stream()
stream.close()
print "---done---"
return sdrs
if __name__ == "__main__":
n = 300
w = 31
minval = 20
maxval = 10000
soundEncoder = SoundEncoder(n, w, RATE, CHUNK, minval, maxval)
stream = getAudioStream()
sdrs = recordAndEncode(stream, soundEncoder)
visualizeSDRs(sdrs)
| akhilaananthram/nupic.research | sound_encoder/live_sound_encoding_demo.py | Python | gpl-3.0 | 2,476 |
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import http_client
from testtools import matchers
from keystone.tests import unit
from keystone.tests.unit import test_v3
class EndpointPolicyTestCase(test_v3.RestfulTestCase):
"""Test endpoint policy CRUD.
In general, the controller layer of the endpoint policy extension is really
just marshalling the data around the underlying manager calls. Given that
the manager layer is tested in depth by the backend tests, the tests we
execute here concentrate on ensuring we are correctly passing and
presenting the data.
"""
def setUp(self):
super(EndpointPolicyTestCase, self).setUp()
self.policy = unit.new_policy_ref()
self.policy_api.create_policy(self.policy['id'], self.policy)
self.service = unit.new_service_ref()
self.catalog_api.create_service(self.service['id'], self.service)
self.endpoint = unit.new_endpoint_ref(self.service['id'], enabled=True,
interface='public',
region_id=self.region_id)
self.catalog_api.create_endpoint(self.endpoint['id'], self.endpoint)
self.region = unit.new_region_ref()
self.catalog_api.create_region(self.region)
def assert_head_and_get_return_same_response(self, url, expected_status):
self.get(url, expected_status=expected_status)
self.head(url, expected_status=expected_status)
# endpoint policy crud tests
def _crud_test(self, url):
# Test when the resource does not exist also ensures
# that there is not a false negative after creation.
self.assert_head_and_get_return_same_response(
url,
expected_status=http_client.NOT_FOUND)
self.put(url)
# test that the new resource is accessible.
self.assert_head_and_get_return_same_response(
url,
expected_status=http_client.NO_CONTENT)
self.delete(url)
# test that the deleted resource is no longer accessible
self.assert_head_and_get_return_same_response(
url,
expected_status=http_client.NOT_FOUND)
def test_crud_for_policy_for_explicit_endpoint(self):
"""PUT, HEAD and DELETE for explicit endpoint policy."""
url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
'/endpoints/%(endpoint_id)s') % {
'policy_id': self.policy['id'],
'endpoint_id': self.endpoint['id']}
self._crud_test(url)
def test_crud_for_policy_for_service(self):
"""PUT, HEAD and DELETE for service endpoint policy."""
url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
'/services/%(service_id)s') % {
'policy_id': self.policy['id'],
'service_id': self.service['id']}
self._crud_test(url)
def test_crud_for_policy_for_region_and_service(self):
"""PUT, HEAD and DELETE for region and service endpoint policy."""
url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
'/services/%(service_id)s/regions/%(region_id)s') % {
'policy_id': self.policy['id'],
'service_id': self.service['id'],
'region_id': self.region['id']}
self._crud_test(url)
def test_get_policy_for_endpoint(self):
"""GET /endpoints/{endpoint_id}/policy."""
self.put('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
'/endpoints/%(endpoint_id)s' % {
'policy_id': self.policy['id'],
'endpoint_id': self.endpoint['id']})
self.head('/endpoints/%(endpoint_id)s/OS-ENDPOINT-POLICY'
'/policy' % {
'endpoint_id': self.endpoint['id']},
expected_status=http_client.OK)
r = self.get('/endpoints/%(endpoint_id)s/OS-ENDPOINT-POLICY'
'/policy' % {
'endpoint_id': self.endpoint['id']})
self.assertValidPolicyResponse(r, ref=self.policy)
def test_list_endpoints_for_policy(self):
"""GET /policies/%(policy_id}/endpoints."""
self.put('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
'/endpoints/%(endpoint_id)s' % {
'policy_id': self.policy['id'],
'endpoint_id': self.endpoint['id']})
r = self.get('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
'/endpoints' % {
'policy_id': self.policy['id']})
self.assertValidEndpointListResponse(r, ref=self.endpoint)
self.assertThat(r.result.get('endpoints'), matchers.HasLength(1))
def test_endpoint_association_cleanup_when_endpoint_deleted(self):
url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
'/endpoints/%(endpoint_id)s') % {
'policy_id': self.policy['id'],
'endpoint_id': self.endpoint['id']}
self.put(url)
self.head(url)
self.delete('/endpoints/%(endpoint_id)s' % {
'endpoint_id': self.endpoint['id']})
self.head(url, expected_status=http_client.NOT_FOUND)
def test_region_service_association_cleanup_when_region_deleted(self):
url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
'/services/%(service_id)s/regions/%(region_id)s') % {
'policy_id': self.policy['id'],
'service_id': self.service['id'],
'region_id': self.region['id']}
self.put(url)
self.head(url)
self.delete('/regions/%(region_id)s' % {
'region_id': self.region['id']})
self.head(url, expected_status=http_client.NOT_FOUND)
def test_region_service_association_cleanup_when_service_deleted(self):
url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
'/services/%(service_id)s/regions/%(region_id)s') % {
'policy_id': self.policy['id'],
'service_id': self.service['id'],
'region_id': self.region['id']}
self.put(url)
self.head(url)
self.delete('/services/%(service_id)s' % {
'service_id': self.service['id']})
self.head(url, expected_status=http_client.NOT_FOUND)
def test_service_association_cleanup_when_service_deleted(self):
url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
'/services/%(service_id)s') % {
'policy_id': self.policy['id'],
'service_id': self.service['id']}
self.put(url)
self.get(url, expected_status=http_client.NO_CONTENT)
self.delete('/policies/%(policy_id)s' % {
'policy_id': self.policy['id']})
self.head(url, expected_status=http_client.NOT_FOUND)
def test_service_association_cleanup_when_policy_deleted(self):
url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
'/services/%(service_id)s') % {
'policy_id': self.policy['id'],
'service_id': self.service['id']}
self.put(url)
self.get(url, expected_status=http_client.NO_CONTENT)
self.delete('/services/%(service_id)s' % {
'service_id': self.service['id']})
self.head(url, expected_status=http_client.NOT_FOUND)
class JsonHomeTests(test_v3.JsonHomeTestMixin):
EXTENSION_LOCATION = ('https://docs.openstack.org/api/openstack-identity/3'
'/ext/OS-ENDPOINT-POLICY/1.0/rel')
PARAM_LOCATION = ('https://docs.openstack.org/api/openstack-identity/3/'
'param')
JSON_HOME_DATA = {
EXTENSION_LOCATION + '/endpoint_policy': {
'href-template': '/endpoints/{endpoint_id}/OS-ENDPOINT-POLICY/'
'policy',
'href-vars': {
'endpoint_id': PARAM_LOCATION + '/endpoint_id',
},
},
EXTENSION_LOCATION + '/policy_endpoints': {
'href-template': '/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'endpoints',
'href-vars': {
'policy_id': PARAM_LOCATION + '/policy_id',
},
},
EXTENSION_LOCATION + '/endpoint_policy_association': {
'href-template': '/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'endpoints/{endpoint_id}',
'href-vars': {
'policy_id': PARAM_LOCATION + '/policy_id',
'endpoint_id': PARAM_LOCATION + '/endpoint_id',
},
},
EXTENSION_LOCATION + '/service_policy_association': {
'href-template': '/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'services/{service_id}',
'href-vars': {
'policy_id': PARAM_LOCATION + '/policy_id',
'service_id': PARAM_LOCATION + '/service_id',
},
},
EXTENSION_LOCATION + '/region_and_service_policy_association': {
'href-template': '/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'services/{service_id}/regions/{region_id}',
'href-vars': {
'policy_id': PARAM_LOCATION + '/policy_id',
'service_id': PARAM_LOCATION + '/service_id',
'region_id': PARAM_LOCATION + '/region_id',
},
},
}
| rajalokan/keystone | keystone/tests/unit/test_v3_endpoint_policy.py | Python | apache-2.0 | 10,097 |
# 334-inceasing-triplet-subsequence.py
class Solution(object):
def increasingTriplet(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
if len(nums) < 3: return False
start = 0
minimal = nums[0]
bound = 0 # Second minimal (must after minimal)
for i, v in enumerate(nums):
if v < minimal:
minimal = v
continue
if v > minimal:
bound = v
start = i
break
if start == 0: return False
for i in xrange(start, len(nums)):
v = nums[i]
if v > bound: return True
if minimal < v < bound:
bound = v
elif v < minimal:
minimal = v
return False
| daicang/Leetcode-solutions | 334-increasing-triplet-subsequence.py | Python | mit | 870 |
"""
spring2.py
The rk4_two() routine in this program does a two step integration using
an array method. The current x and xprime values are kept in a global
list named 'val'.
val[0] = current position; val[1] = current velocity
The results are compared with analytically calculated values.
"""
from pylab import *
def accn(t, val):
force = -spring_const * val[0] - damping * val[1]
return force/mass
def vel(t, val):
return val[1]
def rk4_two(t, h): # Time and Step value
global xxp # x and xprime values in a 'xxp'
k1 = [0,0] # initialize 5 empty lists.
k2 = [0,0]
k3 = [0,0]
k4 = [0,0]
tmp= [0,0]
k1[0] = vel(t,xxp)
k1[1] = accn(t,xxp)
for i in range(2): # value of functions at t + h/2
tmp[i] = xxp[i] + k1[i] * h/2
k2[0] = vel(t + h/2, tmp)
k2[1] = accn(t + h/2, tmp)
for i in range(2): # value of functions at t + h/2
tmp[i] = xxp[i] + k2[i] * h/2
k3[0] = vel(t + h/2, tmp)
k3[1] = accn(t + h/2, tmp)
for i in range(2): # value of functions at t + h
tmp[i] = xxp[i] + k3[i] * h
k4[0] = vel(t+h, tmp)
k4[1] = accn(t+h, tmp)
for i in range(2): # value of functions at t + h
xxp[i] = xxp[i] + ( k1[i] + \
2.0*k2[i] + 2.0*k3[i] + k4[i]) * h/ 6.0
t = 0.0 # Stating time
h = 0.01 # Runge-Kutta step size, time increment
xxp = [2.0, 0.0] # initial position & velocity
spring_const = 100.0 # spring constant
mass = 2.0 # mass of the oscillating object
damping = 0.0
tm = [0.0] # Lists to store time, position & velocity
x = [xxp[0]]
xp = [xxp[1]]
xth = [xxp[0]]
while t < 5:
rk4_two(t,h) # Do one step RK integration
t = t + h
tm.append(t)
xp.append(xxp[1])
x.append(xxp[0])
th = 2.0 * cos(sqrt(spring_const/mass)* (t))
xth.append(th)
plot(tm,x)
plot(tm,xth,'+')
show()
| wavicles/pycode-browser | Code/Physics/spring2.py | Python | gpl-3.0 | 1,784 |
from __future__ import absolute_import
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: [email protected]
#
#
# #END_LICENSE#############################################################
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'open_newick.ui'
#
# Created: Tue Jan 10 15:56:56 2012
# by: PyQt4 UI code generator 4.7.2
#
# WARNING! All changes made in this file will be lost!
from __future__ import absolute_import
from .qt import QtCore, QtGui
class Ui_OpenNewick(object):
def setupUi(self, OpenNewick):
OpenNewick.setObjectName("OpenNewick")
OpenNewick.resize(569, 353)
self.comboBox = QtGui.QComboBox(OpenNewick)
self.comboBox.setGeometry(QtCore.QRect(460, 300, 81, 23))
self.comboBox.setObjectName("comboBox")
self.widget = QtGui.QWidget(OpenNewick)
self.widget.setGeometry(QtCore.QRect(30, 10, 371, 321))
self.widget.setObjectName("widget")
self.retranslateUi(OpenNewick)
QtCore.QMetaObject.connectSlotsByName(OpenNewick)
def retranslateUi(self, OpenNewick):
OpenNewick.setWindowTitle(QtGui.QApplication.translate("OpenNewick", "Dialog", None, QtGui.QApplication.UnicodeUTF8))
| Unode/ete | ete3/treeview/_open_newick.py | Python | gpl-3.0 | 2,521 |
from django.conf import settings
from django.contrib.auth.models import User
from rest_framework import authentication
from rest_framework import filters
from rest_framework import generics
from rest_framework import permissions
from rest_framework import viewsets
from user_api.serializers import UserSerializer, UserPreferenceSerializer
from user_api.models import UserPreference
class ApiKeyHeaderPermission(permissions.BasePermission):
def has_permission(self, request, view):
"""
Check for permissions by matching the configured API key and header
If settings.DEBUG is True and settings.EDX_API_KEY is not set or None,
then allow the request. Otherwise, allow the request if and only if
settings.EDX_API_KEY is set and the X-Edx-Api-Key HTTP header is
present in the request and matches the setting.
"""
api_key = getattr(settings, "EDX_API_KEY", None)
return (
(settings.DEBUG and api_key is None) or
(api_key is not None and request.META.get("HTTP_X_EDX_API_KEY") == api_key)
)
class UserViewSet(viewsets.ReadOnlyModelViewSet):
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (ApiKeyHeaderPermission,)
queryset = User.objects.all().prefetch_related("preferences")
serializer_class = UserSerializer
paginate_by = 10
paginate_by_param = "page_size"
class UserPreferenceViewSet(viewsets.ReadOnlyModelViewSet):
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (ApiKeyHeaderPermission,)
queryset = UserPreference.objects.all()
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ("key", "user")
serializer_class = UserPreferenceSerializer
paginate_by = 10
paginate_by_param = "page_size"
class PreferenceUsersListView(generics.ListAPIView):
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (ApiKeyHeaderPermission,)
serializer_class = UserSerializer
paginate_by = 10
paginate_by_param = "page_size"
def get_queryset(self):
return User.objects.filter(preferences__key=self.kwargs["pref_key"]).prefetch_related("preferences")
| hkawasaki/kawasaki-aio8-1 | common/djangoapps/user_api/views.py | Python | agpl-3.0 | 2,257 |
# -*- coding: utf-8 -*-
# See LICENSE.txt for licensing terms
#$HeadURL$
#$LastChangedDate$
#$LastChangedRevision$
import os
import sys
import re
from copy import copy
from types import *
from os.path import abspath, dirname, expanduser, join
import docutils.nodes
import reportlab
from reportlab.platypus import *
import reportlab.lib.colors as colors
import reportlab.lib.units as units
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.lib.fonts import addMapping
from reportlab.lib.styles import *
from reportlab.lib.enums import *
from reportlab.pdfbase import pdfmetrics
import reportlab.lib.pagesizes as pagesizes
import reportlab.rl_config
from rst2pdf.rson import loads as rson_loads
import findfonts
from log import log
from opt_imports import ParagraphStyle, wordaxe, wordaxe_version
HAS_WORDAXE = wordaxe is not None
unit_separator = re.compile('(-?[0-9\.]*)')
class StyleSheet(object):
'''Class to handle a collection of stylesheets'''
@staticmethod
def stylepairs(data):
''' Allows pairs of style information to be expressed
in canonical reportlab list of two-item list/tuple,
or in a more human-readable dictionary.
'''
styles = data.get('styles', {})
try:
stylenames = styles.keys()
except AttributeError:
for style in styles:
yield style
return
# Traditional reportlab styles are in ordered (key, value)
# tuples. We also support dictionary lookup. This is not
# necessarily ordered.
# The only problem with dictionary lookup is that
# we need to insure that parents are processed before
# their children. This loop is a little ugly, but
# gets the job done.
while stylenames:
name = stylenames.pop()
parent = styles[name].get('parent')
if parent not in stylenames:
yield name, styles[name]
continue
names = [name]
while parent in stylenames:
stylenames.remove(parent)
names.append(parent)
parent = styles[names[-1]].get('parent')
while names:
name = names.pop()
yield name, styles[name]
def __init__(self, flist, font_path=None, style_path=None, def_dpi=300):
log.info('Using stylesheets: %s' % ','.join(flist))
# find base path
if hasattr(sys, 'frozen'):
self.PATH = abspath(dirname(sys.executable))
else:
self.PATH = abspath(dirname(__file__))
# flist is a list of stylesheet filenames.
# They will be loaded and merged in order.
# but the two default stylesheets will always
# be loaded first
flist = [join(self.PATH, 'styles', 'styles.style'),
join(self.PATH, 'styles', 'default.style')] + flist
self.def_dpi=def_dpi
if font_path is None:
font_path=[]
font_path+=['.', os.path.join(self.PATH, 'fonts')]
self.FontSearchPath = map(os.path.expanduser, font_path)
if style_path is None:
style_path=[]
style_path+=['.', os.path.join(self.PATH, 'styles'),
'~/.rst2pdf/styles']
self.StyleSearchPath = map(os.path.expanduser, style_path)
self.FontSearchPath=list(set(self.FontSearchPath))
self.StyleSearchPath=list(set(self.StyleSearchPath))
log.info('FontPath:%s'%self.FontSearchPath)
log.info('StylePath:%s'%self.StyleSearchPath)
findfonts.flist = self.FontSearchPath
# Page width, height
self.pw = 0
self.ph = 0
# Page size [w,h]
self.ps = None
# Margins (top,bottom,left,right,gutter)
self.tm = 0
self.bm = 0
self.lm = 0
self.rm = 0
self.gm = 0
#text width
self.tw = 0
# Default emsize, later it will be the fontSize of the base style
self.emsize=10
self.languages = []
ssdata = self.readSheets(flist)
# Get pageSetup data from all stylessheets in order:
self.ps = pagesizes.A4
self.page={}
for data, ssname in ssdata:
page = data.get('pageSetup', {})
if page:
self.page.update(page)
pgs=page.get('size', None)
if pgs: # A standard size
pgs=pgs.upper()
if pgs in pagesizes.__dict__:
self.ps = list(pagesizes.__dict__[pgs])
self.psname = pgs
if 'width' in self.page: del(self.page['width'])
if 'height' in self.page: del(self.page['height'])
elif pgs.endswith('-LANDSCAPE'):
self.psname = pgs.split('-')[0]
self.ps = list(pagesizes.landscape(pagesizes.__dict__[self.psname]))
if 'width' in self.page: del(self.page['width'])
if 'height' in self.page: del(self.page['height'])
else:
log.critical('Unknown page size %s in stylesheet %s'%\
(page['size'], ssname))
continue
else: #A custom size
if 'size'in self.page:
del(self.page['size'])
# The sizes are expressed in some unit.
# For example, 2cm is 2 centimeters, and we need
# to do 2*cm (cm comes from reportlab.lib.units)
if 'width' in page:
self.ps[0] = self.adjustUnits(page['width'])
if 'height' in page:
self.ps[1] = self.adjustUnits(page['height'])
self.pw, self.ph = self.ps
if 'margin-left' in page:
self.lm = self.adjustUnits(page['margin-left'])
if 'margin-right' in page:
self.rm = self.adjustUnits(page['margin-right'])
if 'margin-top' in page:
self.tm = self.adjustUnits(page['margin-top'])
if 'margin-bottom' in page:
self.bm = self.adjustUnits(page['margin-bottom'])
if 'margin-gutter' in page:
self.gm = self.adjustUnits(page['margin-gutter'])
if 'spacing-header' in page:
self.ts = self.adjustUnits(page['spacing-header'])
if 'spacing-footer' in page:
self.bs = self.adjustUnits(page['spacing-footer'])
if 'firstTemplate' in page:
self.firstTemplate = page['firstTemplate']
# tw is the text width.
# We need it to calculate header-footer height
# and compress literal blocks.
self.tw = self.pw - self.lm - self.rm - self.gm
# Get page templates from all stylesheets
self.pageTemplates = {}
for data, ssname in ssdata:
templates = data.get('pageTemplates', {})
# templates is a dictionary of pageTemplates
for key in templates:
template = templates[key]
# template is a dict.
# template[´frames'] is a list of frames
if key in self.pageTemplates:
self.pageTemplates[key].update(template)
else:
self.pageTemplates[key] = template
# Get font aliases from all stylesheets in order
self.fontsAlias = {}
for data, ssname in ssdata:
self.fontsAlias.update(data.get('fontsAlias', {}))
embedded_fontnames = []
self.embedded = []
# Embed all fonts indicated in all stylesheets
for data, ssname in ssdata:
embedded = data.get('embeddedFonts', [])
for font in embedded:
try:
# Just a font name, try to embed it
if isinstance(font, unicode):
# See if we can find the font
fname, pos = findfonts.guessFont(font)
if font in embedded_fontnames:
pass
else:
fontList = findfonts.autoEmbed(font)
if fontList:
embedded_fontnames.append(font)
if not fontList:
if (fname, pos) in embedded_fontnames:
fontList = None
else:
fontList = findfonts.autoEmbed(fname)
if fontList is not None:
self.embedded += fontList
# Maybe the font we got is not called
# the same as the one we gave
# so check that out
suff = ["", "-Oblique", "-Bold", "-BoldOblique"]
if not fontList[0].startswith(font):
# We need to create font aliases, and use them
for fname, aliasname in zip(
fontList,
[font + suffix for suffix in suff]):
self.fontsAlias[aliasname] = fname
continue
# Each "font" is a list of four files, which will be
# used for regular / bold / italic / bold+italic
# versions of the font.
# If your font doesn't have one of them, just repeat
# the regular font.
# Example, using the Tuffy font from
# http://tulrich.com/fonts/
# "embeddedFonts" : [
# ["Tuffy.ttf",
# "Tuffy_Bold.ttf",
# "Tuffy_Italic.ttf",
# "Tuffy_Bold_Italic.ttf"]
# ],
# The fonts will be registered with the file name,
# minus the extension.
if font[0].lower().endswith('.ttf'): # A True Type font
for variant in font:
location=self.findFont(variant)
pdfmetrics.registerFont(
TTFont(str(variant.split('.')[0]),
location))
log.info('Registering font: %s from %s'%\
(str(variant.split('.')[0]),location))
self.embedded.append(str(variant.split('.')[0]))
# And map them all together
regular, bold, italic, bolditalic = [
variant.split('.')[0] for variant in font]
addMapping(regular, 0, 0, regular)
addMapping(regular, 0, 1, italic)
addMapping(regular, 1, 0, bold)
addMapping(regular, 1, 1, bolditalic)
else: # A Type 1 font
# For type 1 fonts we require
# [FontName,regular,italic,bold,bolditalic]
# where each variant is a (pfbfile,afmfile) pair.
# For example, for the URW palladio from TeX:
# ["Palatino",("uplr8a.pfb","uplr8a.afm"),
# ("uplri8a.pfb","uplri8a.afm"),
# ("uplb8a.pfb","uplb8a.afm"),
# ("uplbi8a.pfb","uplbi8a.afm")]
faceName = font[0]
regular = pdfmetrics.EmbeddedType1Face(*font[1])
italic = pdfmetrics.EmbeddedType1Face(*font[2])
bold = pdfmetrics.EmbeddedType1Face(*font[3])
bolditalic = pdfmetrics.EmbeddedType1Face(*font[4])
except Exception, e:
try:
if isinstance(font, list):
fname = font[0]
else:
fname = font
log.error("Error processing font %s: %s",
os.path.splitext(fname)[0], str(e))
log.error("Registering %s as Helvetica alias", fname)
self.fontsAlias[fname] = 'Helvetica'
except Exception, e:
log.critical("Error processing font %s: %s",
fname, str(e))
continue
# Go though all styles in all stylesheets and find all fontNames.
# Then decide what to do with them
for data, ssname in ssdata:
for [skey, style] in self.stylepairs(data):
for key in style:
if key == 'fontName' or key.endswith('FontName'):
# It's an alias, replace it
if style[key] in self.fontsAlias:
style[key] = self.fontsAlias[style[key]]
# Embedded already, nothing to do
if style[key] in self.embedded:
continue
# Standard font, nothing to do
if style[key] in (
"Courier",
"Courier-Bold",
"Courier-BoldOblique",
"Courier-Oblique",
"Helvetica",
"Helvetica-Bold",
"Helvetica-BoldOblique",
"Helvetica-Oblique",
"Symbol",
"Times-Bold",
"Times-BoldItalic",
"Times-Italic",
"Times-Roman",
"ZapfDingbats"):
continue
# Now we need to do something
# See if we can find the font
fname, pos = findfonts.guessFont(style[key])
if style[key] in embedded_fontnames:
pass
else:
fontList = findfonts.autoEmbed(style[key])
if fontList:
embedded_fontnames.append(style[key])
if not fontList:
if (fname, pos) in embedded_fontnames:
fontList = None
else:
fontList = findfonts.autoEmbed(fname)
if fontList:
embedded_fontnames.append((fname, pos))
if fontList:
self.embedded += fontList
# Maybe the font we got is not called
# the same as the one we gave so check that out
suff = ["", "-Bold", "-Oblique", "-BoldOblique"]
if not fontList[0].startswith(style[key]):
# We need to create font aliases, and use them
basefname=style[key].split('-')[0]
for fname, aliasname in zip(
fontList,
[basefname + suffix for
suffix in suff]):
self.fontsAlias[aliasname] = fname
style[key] = self.fontsAlias[basefname +\
suff[pos]]
else:
log.error("Unknown font: \"%s\","
"replacing with Helvetica", style[key])
style[key] = "Helvetica"
#log.info('FontList: %s'%self.embedded)
#log.info('FontAlias: %s'%self.fontsAlias)
# Get styles from all stylesheets in order
self.stylesheet = {}
self.styles = []
self.linkColor = 'navy'
# FIXME: linkColor should probably not be a global
# style, and tocColor should probably not
# be a special case, but for now I'm going
# with the flow...
self.tocColor = None
for data, ssname in ssdata:
self.linkColor = data.get('linkColor') or self.linkColor
self.tocColor = data.get('tocColor') or self.tocColor
for [skey, style] in self.stylepairs(data):
sdict = {}
# FIXME: this is done completely backwards
for key in style:
# Handle color references by name
if key == 'color' or key.endswith('Color') and style[key]:
style[key] = formatColor(style[key])
# Yet another workaround for the unicode bug in
# reportlab's toColor
elif key == 'commands':
style[key]=validateCommands(style[key])
#for command in style[key]:
#c=command[0].upper()
#if c=='ROWBACKGROUNDS':
#command[3]=[str(c) for c in command[3]]
#elif c in ['BOX','INNERGRID'] or c.startswith('LINE'):
#command[4]=str(command[4])
# Handle alignment constants
elif key == 'alignment':
style[key] = dict(TA_LEFT=0,
LEFT=0,
TA_CENTER=1,
CENTER=1,
TA_CENTRE=1,
CENTRE=1,
TA_RIGHT=2,
RIGHT=2,
TA_JUSTIFY=4,
JUSTIFY=4,
DECIMAL=8, )[style[key].upper()]
elif key == 'language':
if not style[key] in self.languages:
self.languages.append(style[key])
# Make keys str instead of unicode (required by reportlab)
sdict[str(key)] = style[key]
sdict['name'] = skey
# If the style already exists, update it
if skey in self.stylesheet:
self.stylesheet[skey].update(sdict)
else: # New style
self.stylesheet[skey] = sdict
self.styles.append(sdict)
# If the stylesheet has a style name docutils won't reach
# make a copy with a sanitized name.
# This may make name collisions possible but that should be
# rare (who would have custom_name and custom-name in the
# same stylesheet? ;-)
# Issue 339
styles2=[]
for s in self.styles:
if not re.match("^[a-z](-?[a-z0-9]+)*$", s['name']):
s2 = copy(s)
s2['name'] = docutils.nodes.make_id(s['name'])
log.warning('%s is an invalid docutils class name, adding alias %s'%(s['name'], s2['name']))
styles2.append(s2)
self.styles.extend(styles2)
# And create reportlabs stylesheet
self.StyleSheet = StyleSheet1()
# Patch to make the code compatible with reportlab from SVN 2.4+ and
# 2.4
if not hasattr(self.StyleSheet, 'has_key'):
self.StyleSheet.__class__.has_key = lambda s, k : k in s
for s in self.styles:
if 'parent' in s:
if s['parent'] is None:
if s['name'] != 'base':
s['parent'] = self.StyleSheet['base']
else:
del(s['parent'])
else:
s['parent'] = self.StyleSheet[s['parent']]
else:
if s['name'] != 'base':
s['parent'] = self.StyleSheet['base']
# If the style has no bulletFontName but it has a fontName, set it
if ('bulletFontName' not in s) and ('fontName' in s):
s['bulletFontName'] = s['fontName']
hasFS = True
# Adjust fontsize units
if 'fontSize' not in s:
s['fontSize'] = s['parent'].fontSize
s['trueFontSize']=None
hasFS = False
elif 'parent' in s:
# This means you can set the fontSize to
# "2cm" or to "150%" which will be calculated
# relative to the parent style
s['fontSize'] = self.adjustUnits(s['fontSize'],
s['parent'].fontSize)
s['trueFontSize']=s['fontSize']
else:
# If s has no parent, it's base, which has
# an explicit point size by default and %
# makes no sense, but guess it as % of 10pt
s['fontSize'] = self.adjustUnits(s['fontSize'], 10)
# If the leading is not set, but the size is, set it
if 'leading' not in s and hasFS:
s['leading'] = 1.2*s['fontSize']
# If the bullet font size is not set, set it as fontSize
if ('bulletFontSize' not in s) and ('fontSize' in s):
s['bulletFontSize'] = s['fontSize']
# If the borderPadding is a list and wordaxe <=0.3.2,
# convert it to an integer. Workaround for Issue
if 'borderPadding' in s and ((HAS_WORDAXE and \
wordaxe_version <='wordaxe 0.3.2') or
reportlab.Version < "2.3" )\
and isinstance(s['borderPadding'], list):
log.warning('Using a borderPadding list in '\
'style %s with wordaxe <= 0.3.2 or Reportlab < 2.3. That is not '\
'supported, so it will probably look wrong'%s['name'])
s['borderPadding']=s['borderPadding'][0]
self.StyleSheet.add(ParagraphStyle(**s))
self.emsize=self['base'].fontSize
# Make stdFont the basefont, for Issue 65
reportlab.rl_config.canvas_basefontname = self['base'].fontName
# Make stdFont the default font for table cell styles (Issue 65)
reportlab.platypus.tables.CellStyle1.fontname=self['base'].fontName
def __getitem__(self, key):
# This 'normalizes' the key.
# For example, if the key is todo_node (like sphinx uses), it will be
# converted to 'todo-node' which is a valid docutils class name.
if not re.match("^[a-z](-?[a-z0-9]+)*$", key):
key = docutils.nodes.make_id(key)
if self.StyleSheet.has_key(key):
return self.StyleSheet[key]
else:
if key.startswith('pygments'):
log.info("Using undefined style '%s'"
", aliased to style 'code'."%key)
newst=copy(self.StyleSheet['code'])
else:
log.warning("Using undefined style '%s'"
", aliased to style 'normal'."%key)
newst=copy(self.StyleSheet['normal'])
newst.name=key
self.StyleSheet.add(newst)
return newst
def readSheets(self, flist):
''' Read in the stylesheets. Return a list of
(sheetdata, sheetname) tuples.
Orders included sheets in front
of including sheets.
'''
# Process from end of flist
flist.reverse()
# Keep previously seen sheets in sheetdict
sheetdict = {}
result = []
while flist:
ssname = flist.pop()
data = sheetdict.get(ssname)
if data is None:
data = self.readStyle(ssname)
if data is None:
continue
sheetdict[ssname] = data
if 'options' in data and 'stylesheets' in data['options']:
flist.append(ssname)
newsheets = list(data['options']['stylesheets'])
newsheets.reverse()
flist.extend(newsheets)
continue
result.append((data, ssname))
return result
def readStyle(self, ssname):
# If callables are used, they should probably be subclassed
# strings, or something else that will print nicely for errors
if callable(ssname):
return ssname()
fname = self.findStyle(ssname)
if fname:
try:
return rson_loads(open(fname).read())
except ValueError, e: # Error parsing the JSON data
log.critical('Error parsing stylesheet "%s": %s'%\
(fname, str(e)))
except IOError, e: #Error opening the ssheet
log.critical('Error opening stylesheet "%s": %s'%\
(fname, str(e)))
def findStyle(self, fn):
"""Find the absolute file name for a given style filename.
Given a style filename, searches for it in StyleSearchPath
and returns the real file name.
"""
def innerFind(path, fn):
if os.path.isabs(fn):
if os.path.isfile(fn):
return fn
else:
for D in path:
tfn = os.path.join(D, fn)
if os.path.isfile(tfn):
return tfn
return None
for ext in ['', '.style', '.json']:
result = innerFind(self.StyleSearchPath, fn+ext)
if result:
break
if result is None:
log.warning("Can't find stylesheet %s"%fn)
return result
def findFont(self, fn):
"""Find the absolute font name for a given font filename.
Given a font filename, searches for it in FontSearchPath
and returns the real file name.
"""
if not os.path.isabs(fn):
for D in self.FontSearchPath:
tfn = os.path.join(D, fn)
if os.path.isfile(tfn):
return str(tfn)
return str(fn)
def styleForNode(self, node):
"""Return the right default style for any kind of node.
That usually means "bodytext", but for sidebars, for
example, it's sidebar.
"""
n= docutils.nodes
styles={n.sidebar: 'sidebar',
n.figure: 'figure',
n.tgroup: 'table',
n.table: 'table',
n.Admonition: 'admonition'
}
return self[styles.get(node.__class__, 'bodytext')]
def tstyleHead(self, rows=1):
"""Return a table style spec for a table header of `rows`.
The style will be based on the table-heading style from the stylesheet.
"""
# This alignment thing is exactly backwards from
# the alignment for paragraphstyles
alignment = {0: 'LEFT', 1: 'CENTER', 1: 'CENTRE', 2: 'RIGHT',
4: 'JUSTIFY', 8: 'DECIMAL'}[self['table-heading'].alignment]
return [
('BACKGROUND',
(0, 0),
(-1, rows - 1),
self['table-heading'].backColor),
('ALIGN',
(0, 0),
(-1, rows - 1),
alignment),
('TEXTCOLOR',
(0, 0),
(-1, rows - 1),
self['table-heading'].textColor),
('FONT',
(0, 0),
(-1, rows - 1),
self['table-heading'].fontName,
self['table-heading'].fontSize,
self['table-heading'].leading),
('VALIGN',
(0, 0),
(-1, rows - 1),
self['table-heading'].valign)]
def adjustFieldStyle(self):
"""Merges fieldname and fieldvalue styles into the field table style"""
tstyle=self.tstyles['field']
extras=self.pStyleToTStyle(self['fieldname'], 0, 0)+\
self.pStyleToTStyle(self['fieldvalue'], 1, 0)
for e in extras:
tstyle.add(*e)
return tstyle
def pStyleToTStyle(self, style, x, y):
"""Return a table style similar to a given paragraph style.
Given a reportlab paragraph style, returns a spec for a table style
that adopts some of its features (for example, the background color).
"""
results = []
if style.backColor:
results.append(('BACKGROUND', (x, y), (x, y), style.backColor))
if style.borderWidth:
bw = style.borderWidth
del style.__dict__['borderWidth']
if style.borderColor:
bc = style.borderColor
del style.__dict__['borderColor']
else:
bc = colors.black
bc=str(bc)
results.append(('BOX', (x, y), (x, y), bw, bc))
if style.borderPadding:
if isinstance(style.borderPadding, list):
results.append(('TOPPADDING',
(x, y),
(x, y),
style.borderPadding[0]))
results.append(('RIGHTPADDING',
(x, y),
(x, y),
style.borderPadding[1]))
results.append(('BOTTOMPADDING',
(x, y),
(x, y),
style.borderPadding[2]))
results.append(('LEFTPADDING',
(x, y),
(x, y),
style.borderPadding[3]))
else:
results.append(('TOPPADDING',
(x, y),
(x, y),
style.borderPadding))
results.append(('RIGHTPADDING',
(x, y),
(x, y),
style.borderPadding))
results.append(('BOTTOMPADDING',
(x, y),
(x, y),
style.borderPadding))
results.append(('LEFTPADDING',
(x, y),
(x, y),
style.borderPadding))
return results
def adjustUnits(self, v, total=None, default_unit='pt'):
if total is None:
total = self.tw
return adjustUnits(v, total,
self.def_dpi,
default_unit,
emsize=self.emsize)
def combinedStyle(self, styles):
'''Given a list of style names, it merges them (the existing ones)
and returns a new style.
The styles that don't exist are silently ignored.
For example, if called with styles=['style1','style2'] the returned
style will be called 'merged_style1_style2'.
The styles that are *later* in the list will have priority.
'''
validst = [x for x in styles if self.StyleSheet.has_key(x)]
newname = '_'.join(['merged']+validst)
validst = [self[x] for x in validst]
newst=copy(validst[0])
for st in validst[1:]:
newst.__dict__.update(st.__dict__)
newst.name=newname
return newst
def adjustUnits(v, total=None, dpi=300, default_unit='pt', emsize=10):
"""Takes something like 2cm and returns 2*cm.
If you use % as a unit, it returns the percentage of "total".
If total is not given, returns a percentage of the page width.
However, if you get to that stage, you are doing it wrong.
Example::
>>> adjustUnits('50%',200)
100
"""
if v is None or v=="":
return None
v = str(v)
l = re.split('(-?[0-9\.]*)', v)
n=l[1]
u=default_unit
if len(l) == 3 and l[2]:
u=l[2]
if u in units.__dict__:
return float(n) * units.__dict__[u]
else:
if u == '%':
return float(n) * total/100
elif u=='px':
return float(n) * units.inch / dpi
elif u=='pt':
return float(n)
elif u=='in':
return float(n) * units.inch
elif u=='em':
return float(n) * emsize
elif u=='ex':
return float(n) * emsize /2
elif u=='pc': # picas!
return float(n) * 12
log.error('Unknown unit "%s"' % u)
return float(n)
def formatColor(value, numeric=True):
"""Convert a color like "gray" or "0xf" or "ffff"
to something ReportLab will like."""
if value in colors.__dict__:
return colors.__dict__[value]
else: # Hopefully, a hex color:
c = value.strip()
if c[0] == '#':
c = c[1:]
while len(c) < 6:
c = '0' + c
if numeric:
r = int(c[:2], 16)/255.
g = int(c[2:4], 16)/255.
b = int(c[4:6], 16)/255.
if len(c) >= 8:
alpha = int(c[6:8], 16)/255.
return colors.Color(r, g, b, alpha=alpha)
return colors.Color(r, g, b)
else:
return str("#"+c)
# The values are:
# * Minimum number of arguments
# * Maximum number of arguments
# * Valid types of arguments.
#
# For example, if option FOO takes a list a string and a number,
# but the number is optional:
#
# "FOO":(2,3,"list","string","number")
#
# The reportlab command could look like
#
# ["FOO",(0,0),(-1,-1),[1,2],"whatever",4]
#
# THe (0,0) (-1,-1) are start and stop and are mandatory.
#
# Possible types of arguments are string, number, color, colorlist
validCommands={
# Cell format commands
"FONT":(1,3,"string","number","number"),
"FONTNAME":(1,1,"string"),
"FACE":(1,1,"string"),
"FONTSIZE":(1,1,"number"),
"SIZE":(1,1,"number"),
"LEADING":(1,1,"number"),
"TEXTCOLOR":(1,1,"color"),
"ALIGNMENT":(1,1,"string"),
"ALIGN":(1,1,"string"),
"LEFTPADDING":(1,1,"number"),
"RIGHTPADDING":(1,1,"number"),
"TOPPADDING":(1,1,"number"),
"BOTTOMPADDING":(1,1,"number"),
"BACKGROUND":(1,1,"color"),
"ROWBACKGROUNDS":(1,1,"colorlist"),
"COLBACKGROUNDS":(1,1,"colorlist"),
"VALIGN":(1,1,"string"),
# Line commands
"GRID":(2,2,"number","color"),
"BOX":(2,2,"number","color"),
"OUTLINE":(2,2,"number","color"),
"INNERGRID":(2,2,"number","color"),
"LINEBELOW":(2,2,"number","color"),
"LINEABOVE":(2,2,"number","color"),
"LINEBEFORE":(2,2,"number","color"),
"LINEAFTER":(2,2,"number","color"),
# You should NOT have span commands, man!
#"SPAN":(,,),
}
def validateCommands(commands):
'''Given a list of reportlab's table commands, it fixes some common errors
and/or removes commands that can't be fixed'''
fixed=[]
for command in commands:
command[0]=command[0].upper()
flag=False
# See if the command is valid
if command[0] not in validCommands:
log.error('Unknown table command %s in stylesheet',command[0])
continue
# See if start and stop are the right types
if type(command[1]) not in (ListType,TupleType):
log.error('Start cell in table command should be list or tuple, got %s [%s]',type(command[1]),command[1])
flag=True
if type(command[2]) not in (ListType,TupleType):
log.error('Stop cell in table command should be list or tuple, got %s [%s]',type(command[1]),command[1])
flag=True
# See if the number of arguments is right
l=len(command)-3
if l>validCommands[command[0]][1]:
log.error('Too many arguments in table command: %s',command)
flag=True
if l<validCommands[command[0]][0]:
log.error('Too few arguments in table command: %s',command)
flag=True
# Validate argument types
for pos,arg in enumerate(command[3:]):
typ = validCommands[command[0]][pos+2]
if typ == "color":
# Convert all 'string' colors to numeric
command[3+pos]=formatColor(arg)
elif typ == "colorlist":
command[3+pos]=[ formatColor(c) for c in arg]
elif typ == "number":
pass
elif typ == "string":
# Force string, not unicode
command[3+pos]=str(arg)
else:
log.error("This should never happen: wrong type %s",typ)
if not flag:
fixed.append(command)
return fixed
class CallableStyleSheet(str):
''' Useful for programmatically generated stylesheets.
A generated stylesheet is a callable string (name),
which returns the pre-digested stylesheet data
when called.
'''
def __new__(cls, name, value=''):
self = str.__new__(cls, name)
self.value = value
return self
def __call__(self):
return rson_loads(self.value)
| openpolis/rst2pdf-patched-docutils-0.8 | rst2pdf/styles.py | Python | mit | 38,322 |
from pylearn2.models.mlp import MLP
class Autoencoder(MLP):
"""
An MLP whose output domain is the same as its input domain.
"""
def get_target_source(self):
return 'features'
| CKehl/pylearn2 | pylearn2/scripts/tutorials/convolutional_network/autoencoder.py | Python | bsd-3-clause | 201 |
from django.utils import timezone
from django.views import generic
from django.http import HttpResponse
from django.shortcuts import render
from events.models import Event
# home page
class IndexView(generic.ListView):
template_name = 'mysite/index.html'
def get_queryset(self):
return Event.objects.filter(
event_time__gte=timezone.now()
)[:5]
# Holding page
def coming_soon(request):
event_list = Event.objects.filter(
event_time__gte=timezone.now()
)[:5]
return render(request, 'mysite/coming_soon.html', {
'event_list': event_list,
})
# google
def google(request):
return HttpResponse("google-site-verification: googlec35559684fb6219b.html") | cs98jrb/Trinity | mysite/mysite/views/index.py | Python | gpl-2.0 | 725 |
import unittest
from unittest.mock import MagicMock
import io
from snail import vlq
class TestVlq(unittest.TestCase):
def setup(self):
pass
def teardown(self):
pass
def test_read(self):
pass
def test_write(self):
pass
| sjzabel/snail | tests/test-vlq.py | Python | bsd-3-clause | 271 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# putfuncs - helpers for the put handler
# Copyright (C) 2003-2014 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Functions in this file should only be called from 'put'.
If other scripts needs it then move the function to another file.
"""
import os
import time
import re
import shared.fileio as io
from shared.base import client_id_dir
from shared.defaults import job_output_dir
def template_fits_file(template, filename, allowed_time=3.0):
"""Test if regular expressions in template fits contents of
filename in a line by line comparison. Please note that an
empty template matches if filename doesn't exist.
"""
fit = True
msg = ''
# Allow comparison to take up to allowed_time seconds
start_time = time.time()
try:
comparelines = open(template, 'r').readlines()
except Exception, err:
msg = "Failed to read template file"
return (False, msg)
try:
filelines = open(filename, 'r').readlines()
except Exception, err:
if len(comparelines) == 0:
# No file matches an empty template
return (True, msg)
else:
msg = "Failed to read file to verify"
return (False, msg)
if len(filelines) != len(comparelines):
msg = "line count mismatch between template and file to verify"
return (False, msg)
i = 0
#print "start time:", start_time
while i < len(filelines):
compare_time = time.time() - start_time
if compare_time > allowed_time:
msg = "Template fit against file timed out after %d lines (%ds)" \
% (i, compare_time)
fit = False
break
line = filelines[i].strip()
compare = comparelines[i].strip()
# print line, "?~" , compare
i += 1
if not re.match(compare, line):
# print line, "!~" , compare
msg = "found mismatch: '%s' vs '%s' (%s)" % (line, compare, line==compare)
fit = False
break
#print "Comparison of %s against %s done in %.4f seconds" % (template, filename, compare_time)
return (fit, msg)
def verify_results(job_dict, logger, configuration):
# Compares any verifyfiles against actual results and sets VERIFIED_X for each one
user_home = configuration.user_home
job_dict['VERIFIED'] = 'NO'
if job_dict.has_key('VERIFYFILES') and job_dict['VERIFYFILES']:
verified = True
job_dict['VERIFIED'] = ''
for verify in job_dict['VERIFYFILES']:
logger.debug('verifying %s against actual results', verify)
if verify.endswith('.status'):
check = 'status'
elif verify.endswith('.stdout'):
check = 'stdout'
elif verify.endswith('.stderr'):
check = 'stderr'
else:
logger.warning('unsupported verifyfile %s! must end in .{status,stdout,stderr}'
, verify)
job_dict['VERIFIED'] += ' %s: unknown file suffix!'\
% verify
verified = False
continue
logger.debug('preparing to do %s check', check)
client_id = job_dict.get('USER_CERT', '')
client_dir = client_id_dir(client_id)
logger.debug('owner: %s', client_id)
verifyname = os.path.join(user_home, client_dir, verify)
logger.debug('verify using %s', verifyname)
if not os.path.isfile(verifyname):
logger.warning('no such verifyfile %s! (%s)', verify,
verifyname)
job_dict['VERIFIED'] += ' %s: %s does not exist!'\
% (check, verify)
verified = False
continue
job_id = job_dict['JOB_ID']
filename = os.path.join(user_home, client_dir, job_output_dir,
job_id, job_id + '.' + check)
logger.info('Matching %s against %s', verifyname, filename)
(match, err) = template_fits_file(verifyname, filename)
if match:
job_dict['VERIFIED'] += ' %s: %s' % (check, 'OK')
else:
job_dict['VERIFIED'] += ' %s: %s (%s)' % (check, 'FAILED', err)
verified = False
logger.info('verified %s against actual results - match: %s (%s)'
% (verify, match, err))
if verified:
job_dict['VERIFIED'] = 'SUCCESS -' + job_dict['VERIFIED']
else:
job_dict['VERIFIED'] = 'FAILURE -' + job_dict['VERIFIED']
job_dict['VERIFIED_TIMESTAMP'] = time.gmtime()
else:
logger.info('No verifyfile entries to verify result against')
logger.info('VERIFIED : %s', job_dict['VERIFIED'])
def migrated_job(filename, client_id, configuration):
"""returns a tuple (bool status, str msg)"""
logger = configuration.logger
client_dir = client_id_dir(client_id)
job_path = os.path.abspath(os.path.join(configuration.server_home,
client_dir, filename))
# unpickle and enqueue received job file
job_path_spaces = job_path.replace('\\ ', '\\\\\\ ')
job = io.unpickle(job_path_spaces, configuration.logger)
# TODO: update any fields to mark migration?
if not job:
return (False,
'Fatal migration error: loading pickled job (%s) failed! ' % \
job_path_spaces)
job_id = job['JOB_ID']
# save file with other mRSL files
mrsl_filename = \
os.path.abspath(os.path.join(configuration.mrsl_files_dir,
client_dir, job_id + '.mRSL'))
if not io.pickle(job, mrsl_filename, configuration.logger):
return (False, 'Fatal error: Could not write ' + filename)
# tell 'grid_script'
message = 'SERVERJOBFILE ' + client_dir + '/' + job_id + '\n'
if not io.send_message_to_grid_script(message, logger, configuration):
return (False, 'Fatal error: Could not write to grid stdin')
# TODO: do we need to wait for grid_script to ack job reception?
# ... same question applies to new_job, btw.
return (True, '%s succesfully migrated.' % job_id)
| heromod/migrid | mig/shared/putfuncs.py | Python | gpl-2.0 | 7,108 |
# coding: utf-8
import argparse
from PIL import Image
import qrcode
import os
import re
parser = argparse.ArgumentParser()
parser.add_argument('query', nargs='?', default=None)
args = parser.parse_args()
query = args.query.split('bound')[0]
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_Q,
)
qr.add_data(query)
qr.make(fit=True)
img = qr.make_image().save('ss.png')
# img = qrcode.main.make(query, error_correction=qrcode.constants.ERROR_CORRECT_Q).save('ss.png')
print os.path.join(os.getcwd(), 'ss.png')
| wizyoung/workflows.kyoyue | ss.py | Python | mit | 552 |
# -*- coding: utf-8 -*-
__author__ = """Raghavendra Prabhu"""
__email__ = "[email protected]"
__version__ = "0.1.2"
| ronin13/pyvolume | pyvolume/__init__.py | Python | mit | 115 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import tensorflow.python.platform
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
# pylint: disable=unused-import
from tensorflow.python.ops import array_grad
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import data_flow_grad
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_grad
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad
from tensorflow.python.ops import state_grad
# pylint: enable=unused-import
from tensorflow.python.ops.constant_op import constant
from tensorflow.python.ops.nn_ops import bias_add
from tensorflow.python.platform import googletest
def _OpsBetween(graph, to_ops, from_ops):
"""Build the list of operations between two lists of Operations.
Args:
graph: a Graph.
to_ops: list of Operations.
from_ops: list of Operations.
Returns:
The list of operations between "from_ops" and "to_ops", sorted by
decreasing operation id. This list contains all elements of to_ops.
TODO(touts): Think about returning an empty list if from_ops are not
reachable from to_ops. Presently it returns to_ops in that case.
"""
# List of booleans, indexed by operation id, indicating if
# an op is reached from the output of "input_ops".
reached_ops = [False] * (graph._last_id + 1)
# We only care to reach up to "output_ops" so we mark the
# output ops as reached to avoid recursing past them.
for op in to_ops:
reached_ops[op._id] = True
gradients._MarkReachedOps(from_ops, reached_ops)
between_ops = gradients._GatherInputs(to_ops, reached_ops)
between_ops.sort(key=lambda x: -x._id)
return between_ops
class GradientsTest(test_util.TensorFlowTestCase):
def _OpNames(self, op_list):
return ["%s/%d" % (str(op.name), op._id) for op in op_list]
def _assertOpListEqual(self, ops1, ops2):
self.assertEquals(self._OpNames(ops1), self._OpNames(ops2))
def testOpsBetweenSimple(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.pack([t1, t2])
# Full graph
self._assertOpListEqual([t3.op, t2.op, t1.op],
_OpsBetween(g, [t3.op], [t1.op, t2.op]))
# Only t1, t3.
self._assertOpListEqual([t3.op, t1.op],
_OpsBetween(g, [t3.op], [t1.op]))
def testOpsBetweenUnreachable(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
_ = array_ops.pack([t1, t2])
t4 = constant(1.0)
t5 = constant(2.0)
t6 = array_ops.pack([t4, t5])
# Elements of to_ops are always listed.
self._assertOpListEqual([t6.op], _OpsBetween(g, [t6.op], [t1.op]))
def testOpsBetweenCut(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.pack([t1, t2])
t4 = constant([1.0])
t5 = array_ops.concat(0, [t4, t3])
t6 = constant([2.0])
t7 = array_ops.concat(0, [t5, t6])
self._assertOpListEqual([t7.op, t5.op, t4.op],
_OpsBetween(g, [t7.op], [t4.op]))
def testOpsBetweenCycle(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.pack([t1, t2])
t4 = array_ops.concat(0, [t3, t3, t3])
t5 = constant([1.0])
t6 = array_ops.concat(0, [t4, t5])
t7 = array_ops.concat(0, [t6, t3])
self._assertOpListEqual([t6.op, t4.op, t3.op],
_OpsBetween(g, [t6.op], [t3.op]))
self._assertOpListEqual([t7.op, t6.op, t5.op, t4.op, t3.op, t1.op],
_OpsBetween(g, [t7.op], [t1.op, t5.op]))
self._assertOpListEqual([t6.op, t5.op, t4.op, t3.op, t2.op],
_OpsBetween(g, [t6.op], [t2.op, t5.op]))
def testGradients(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[32, 100], name="in")
w = constant(1.0, shape=[100, 10], name="w")
b = constant(1.0, shape=[10], name="b")
xw = math_ops.matmul(inp, w, name="xw")
h = bias_add(xw, b, name="h")
w_grad = gradients.gradients(h, w)[0]
self.assertEquals("MatMul", w_grad.op.type)
self.assertEquals(w_grad.op._original_op, xw.op)
self.assertTrue(w_grad.op.get_attr("transpose_a"))
self.assertFalse(w_grad.op.get_attr("transpose_b"))
def testUnusedOutput(self):
with ops.Graph().as_default():
w = constant(1.0, shape=[2, 2])
x = constant(1.0, shape=[2, 2])
wx = math_ops.matmul(w, x)
split_wx = array_ops.split(0, 2, wx)
c = math_ops.reduce_sum(split_wx[1])
gw = gradients.gradients(c, [w])[0]
self.assertEquals("MatMul", gw.op.type)
def testColocateGradients(self):
with ops.Graph().as_default() as g:
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
with g.device("/gpu:0"):
wx = math_ops.matmul(w, x)
gw = gradients.gradients(wx, [w], colocate_gradients_with_ops=True)[0]
self.assertEquals("/gpu:0", gw.device)
def testColocateGradientsWithAggregation(self):
with ops.Graph().as_default() as g:
with g.device("/gpu:1"):
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
y = constant(1.0, shape=[1, 2])
wx = math_ops.matmul(w, x)
wy = math_ops.matmul(w, y)
with g.device("/gpu:0"):
z = wx + wy
gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0]
self.assertEquals("/gpu:1", gw1.device)
gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0]
self.assertEquals(None, gw2.device)
def testBoundaryStop(self):
# Test that we don't differentiate 'x'. The gradient function for 'x' is
# set explicitly to None so we will get an exception if the gradient code
# tries to differentiate 'x'.
with ops.Graph().as_default() as g:
c = constant(1.0)
x = array_ops.identity(c)
y = x + 1.0
z = y + 1
grads = gradients.gradients(z, [x])
self.assertTrue(all([x for x in grads]))
def testBoundaryContinue(self):
# Test that we differentiate both 'x' and 'y' correctly when x is a
# predecessor of y.
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y * 3.0
grads = gradients.gradients(z, [x, y])
self.assertTrue(all([x for x in grads]))
self.assertEqual(6.0, grads[0].eval())
def testAggregationMethodAccumulateN(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z,
[x, y],
aggregation_method=
gradients.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
self.assertTrue(all([x for x in grads]))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testAggregationMethodAddN(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z,
[x, y],
aggregation_method=gradients.AggregationMethod.ADD_N)
self.assertTrue(all([x for x in grads]))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testAggregationMethodTree(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z,
[x, y],
aggregation_method=gradients.AggregationMethod.EXPERIMENTAL_TREE)
self.assertTrue(all([x for x in grads]))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testNoGradientForStringOutputs(self):
with ops.Graph().as_default() as g:
@ops.RegisterGradient("TestOp")
def _TestOpGrad(op, float_grad, string_grad):
"""Gradient function for TestOp."""
self.assertEquals(float_grad.dtype, dtypes.float32)
self.assertFalse(string_grad)
return float_grad
ops.RegisterShape("TestOp")(None)
c = constant(1.0)
x, y = g.create_op("TestOp", [c], [dtypes.float32, dtypes.string]).outputs
z = x * 2.0
w = z * 3.0
grads = gradients.gradients(z, [c])
self.assertTrue(isinstance(grads[0], ops.Tensor))
class StopGradientTest(test_util.TensorFlowTestCase):
def testStopGradient(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[100, 32], name="in")
out = array_ops.stop_gradient(inp)
igrad = gradients.gradients(out, inp)[0]
assert igrad is None
class HessianVectorProductTest(test_util.TensorFlowTestCase):
def testHessianVectorProduct(self):
# Manually compute the Hessian explicitly for a low-dimensional problem
# and check that HessianVectorProduct matches multiplication by the
# explicit Hessian.
# Specifically, the Hessian of f(x) = x^T A x is
# H = A + A^T.
# We expect HessianVectorProduct(f(x), x, v) to be H v.
m = 4
rng = np.random.RandomState([1, 2, 3])
mat_value = rng.randn(m, m).astype("float32")
v_value = rng.randn(m, 1).astype("float32")
x_value = rng.randn(m, 1).astype("float32")
hess_value = mat_value + mat_value.T
hess_v_value = np.dot(hess_value, v_value)
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
mat = constant_op.constant(mat_value)
v = constant_op.constant(v_value)
x = constant_op.constant(x_value)
mat_x = math_ops.matmul(mat, x, name="Ax")
x_mat_x = math_ops.matmul(array_ops.transpose(x), mat_x, name="xAx")
hess_v = gradients._hessian_vector_product(x_mat_x, [x], [v])[0]
hess_v_actual = hess_v.eval()
self.assertAllClose(hess_v_value, hess_v_actual)
class IndexedSlicesToTensorTest(test_util.TensorFlowTestCase):
def testIndexedSlicesToTensor(self):
with self.test_session():
np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
c = constant_op.constant(np_val)
c_sparse = math_ops._as_indexed_slices(c)
self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
c_dense = math_ops.mul(c_sparse, 1.0)
self.assertAllClose(np_val, c_dense.eval())
def testInt64Indices(self):
with self.test_session():
np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
c = constant_op.constant(np_val)
c_sparse = math_ops._as_indexed_slices(c)
c_sparse = ops.IndexedSlices(
c_sparse.values, math_ops.cast(c_sparse.indices, dtypes.int64),
c_sparse.dense_shape)
self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
c_dense = math_ops.mul(c_sparse, 1.0)
self.assertAllClose(np_val, c_dense.eval())
def testWarnings(self):
# Smaller than the threshold: no warning.
c_sparse = ops.IndexedSlices(array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32),
constant([4, 4, 4, 4]))
with warnings.catch_warnings(record=True) as w:
math_ops.mul(c_sparse, 1.0)
self.assertEqual(0, len(w))
# Greater than or equal to the threshold: warning.
c_sparse = ops.IndexedSlices(array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32),
constant([100, 100, 100, 100]))
with warnings.catch_warnings(record=True) as w:
math_ops.mul(c_sparse, 1.0)
self.assertEqual(1, len(w))
self.assertTrue(
"with 100000000 elements. This may consume a large amount of memory."
in str(w[0].message))
# Unknown dense shape: warning.
c_sparse = ops.IndexedSlices(array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32))
with warnings.catch_warnings(record=True) as w:
math_ops.mul(c_sparse, 1.0)
self.assertEqual(1, len(w))
self.assertTrue(
"of unknown shape. This may consume a large amount of memory."
in str(w[0].message))
if __name__ == "__main__":
googletest.main()
| YanTangZhai/tf | tensorflow/python/ops/gradients_test.py | Python | apache-2.0 | 13,357 |
"""
WSGI config for YaoGlobal project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "YaoGlobal.settings")
application = get_wsgi_application()
| JasonYao/Yao-Global | YaoGlobal/wsgi.py | Python | gpl-2.0 | 395 |
#! /usr/bin/env python
import argparse
import operator
import os
import sys
from collections import namedtuple
from functools import lru_cache
from functools import reduce
from pathlib import Path
from typing import Any
from typing import Dict
from typing import List
import addict
import arrow
import googleapiclient.errors
import httplib2
import oauth2client.client
import oauth2client.file
import oauth2client.tools
import yaml
from apiclient.discovery import build # pylint: disable=import-error
from isodate import parse_duration
from isodate import strftime
from tqdm import tqdm
from xdg import XDG_CACHE_HOME
print = tqdm.write # pylint: disable=invalid-name,redefined-builtin
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret. You can acquire an OAuth 2.0 client ID and client secret from
# the {{ Google Cloud Console }} at
# {{ https://cloud.google.com/console }}.
# Please ensure that you have enabled the YouTube Data API for your project.
# For more information about using OAuth2 to access the YouTube Data API, see:
# https://developers.google.com/youtube/v3/guides/authentication
# For more information about the client_secrets.json file format, see:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
CLIENT_SECRETS_FILE = 'client_secrets.json'
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the {{ Cloud Console }}
{{ https://cloud.google.com/console }}
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(
os.path.join(os.path.dirname(__file__), CLIENT_SECRETS_FILE)
)
# This OAuth 2.0 access scope allows for full read/write access to the
# authenticated user's account.
YOUTUBE_READ_WRITE_SCOPE = 'https://www.googleapis.com/auth/youtube'
YOUTUBE_API_SERVICE_NAME = 'youtube'
YOUTUBE_API_VERSION = 'v3'
VideoInfo = namedtuple('VideoInfo', ['channel_id', 'published_date', 'duration'])
JsonType = Dict[str, Any]
class YoutubeManager:
def __init__(self, dry_run: bool, args: List[str]) -> None:
self.youtube = self.get_youtube(args)
self.dry_run = dry_run
@staticmethod
def get_creds(args: List[str]) -> oauth2client.client.Credentials:
'''Authorize client with OAuth2.'''
flow = oauth2client.client.flow_from_clientsecrets(
CLIENT_SECRETS_FILE, message=MISSING_CLIENT_SECRETS_MESSAGE, scope=YOUTUBE_READ_WRITE_SCOPE
)
storage = oauth2client.file.Storage('{}-oauth2.json'.format(sys.argv[0]))
credentials = storage.get()
if credentials is None or credentials.invalid:
flags = oauth2client.tools.argparser.parse_args(args)
credentials = oauth2client.tools.run_flow(flow, storage, flags)
return credentials
def get_youtube(self, args: List[str]):
'''Get youtube data v3 object.'''
creds = self.get_creds(args)
return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, http=creds.authorize(httplib2.Http()))
def get_watchlater_playlist(self) -> str:
'''Get the id of the 'Sort Watch Later' playlist.
The 'Sort Watch Later' playlist is regular playlist and is not the same as the magical one that all
youtube users have by default.
'''
playlists = self.youtube.playlists().list(part='snippet', mine=True).execute()
playlist_id = next(i['id'] for i in playlists['items'] if i['snippet']['title'] == 'Sort Watch Later')
return playlist_id
def get_playlist_videos(self, watchlater_id: str) -> List[JsonType]:
'''Returns list of playlistItems from Sort Watch Later playlist'''
result: List[Dict] = []
request = self.youtube.playlistItems().list(part='snippet', playlistId=watchlater_id, maxResults=50)
# Iterate through all results pages
while request:
response: Dict[str, Dict] = request.execute()
result.extend(response['items'])
# Prepare next results page
request = self.youtube.playlistItems().list_next(request, response)
return result
def get_video_info(self, playlist_videos: List[JsonType]) -> Dict[str, VideoInfo]:
'''Returns a dict of VideoInfo for each video
The key is video id and the value is VideoInfo.
'''
result = {}
videos = [i['snippet']['resourceId']['videoId'] for i in playlist_videos]
# Partition videos due to max number of videos queryable with one api call
while videos:
to_query = videos[:50]
remaining = videos[50:]
response = (
self.youtube.videos()
.list(part='snippet,contentDetails', id=','.join(list(to_query)), maxResults=50)
.execute()
)
for i in response['items']:
video_id = i['id']
channel_id = i['snippet']['channelId']
published_date = i['snippet']['publishedAt']
duration = parse_duration(i['contentDetails']['duration'])
result[video_id] = VideoInfo(channel_id, published_date, duration)
videos = remaining
return result
def sort_playlist(self, playlist_videos: List[Dict], video_infos: JsonType) -> None:
'''Sorts a playlist and groups videos by channel.'''
def sort_key(playlist_item):
'''Groups together videos from the same channel, sorted by date in ascending order.'''
video_id = playlist_item['snippet']['resourceId']['videoId']
channel_name, published_date, _ = video_infos[video_id]
return '{}-{}'.format(channel_name, published_date)
sorted_playlist = sorted(playlist_videos, key=sort_key)
for index, i in enumerate(tqdm(sorted_playlist, unit='video')):
print('{} is being put in pos {}'.format(i['snippet']['title'], index))
if not self.dry_run:
i['snippet']['position'] = index
self.youtube.playlistItems().update(part='snippet', body=i).execute()
def get_subscribed_channels(self) -> List[Dict[str, str]]:
channels: List[Dict[str, str]] = []
next_page_token = None
request = self.youtube.subscriptions().list(part='snippet', mine=True, maxResults=50, pageToken=next_page_token)
while request:
response = request.execute()
response = addict.Dict(response)
channels.extend({'title': i.snippet.title, 'id': i.snippet.resourceId.channelId} for i in response['items'])
request = self.youtube.subscriptions().list_next(request, response)
return channels
def get_channel_details(self, channel_id: str) -> addict.Dict:
request = self.youtube.channels().list(part='contentDetails', id=channel_id)
# Only 1 item, since queried by id
channel_details = addict.Dict(request.execute()['items'][0])
return channel_details
def add_channel_videos_watch_later(self, channel: str, uploaded_after: arrow) -> None:
video_ids = []
channel_details = self.get_channel_details(channel)
uploaded_playlist = channel_details.contentDetails.relatedPlaylists.uploads
request = self.youtube.playlistItems().list(part='snippet', playlistId=uploaded_playlist, maxResults=50)
while request:
response = addict.Dict(request.execute())
recent_videos = [
{'id': i.snippet.resourceId.videoId, 'title': i.snippet.title}
for i in response['items']
if i.snippet.resourceId.kind == 'youtube#video' and arrow.get(i.snippet.publishedAt) >= uploaded_after
]
if not recent_videos:
break
video_ids.extend(recent_videos)
request = self.youtube.playlistItems().list_next(request, response)
for video_id in video_ids:
self.add_video_to_watch_later(video_id)
def add_video_to_watch_later(self, video_id: JsonType) -> None:
print('Adding video to playlist: {}'.format(video_id['title']))
if not self.dry_run:
try:
self.youtube.playlistItems().insert(
part='snippet',
body={
'snippet': {
'playlistId': self.get_watchlater_playlist(),
'resourceId': {'kind': 'youtube#video', 'videoId': video_id['id']},
}
},
).execute()
except googleapiclient.errors.HttpError as error:
if error.resp.status == 409:
print('Already in list, skipping!')
else:
raise
def update(self, uploaded_after: arrow, only_allowed: bool = False) -> None:
channels = self.get_subscribed_channels()
config = read_config()
auto_add = config.setdefault('auto_add', [])
if uploaded_after is None:
if 'last_updated' in config:
uploaded_after = arrow.get(config['last_updated'])
else:
uploaded_after = arrow.now().shift(weeks=-2)
allowed_channel_ids = {i['id'] for i in auto_add}
if not only_allowed and not self.dry_run:
unknown_channels = [i for i in channels if i['id'] not in allowed_channel_ids]
for channel in unknown_channels:
response = input('Want to auto-add videos from "{}"? y/n: '.format(channel['title']))
if response == 'y':
auto_add.append({'id': channel['id'], 'name': channel['id']})
allowed_channel_ids.add(channel['id'])
write_config(config)
allowed_channels = [i for i in channels if i['id'] in allowed_channel_ids]
for channel in tqdm(allowed_channels, unit='video'):
self.add_channel_videos_watch_later(channel['id'], uploaded_after)
if not self.dry_run:
config['last_updated'] = arrow.now().format()
write_config(config)
def sort(self) -> None:
'''Sort the 'Sort Watch Later' playlist.'''
watchlater_id = self.get_watchlater_playlist()
if not watchlater_id:
sys.exit("Oh noes, you don't have a playlist named Sort Watch Later")
playlist_videos = self.get_playlist_videos(watchlater_id)
if playlist_videos:
video_infos = self.get_video_info(playlist_videos)
self.sort_playlist(playlist_videos, video_infos)
self.print_duration(video_infos)
else:
sys.exit(
'Playlist is empty! '
"Did you remember to copy over Youtube's Watch Later "
'to your personal Sort Watch Later playlist?'
)
@staticmethod
def print_duration(video_infos: JsonType) -> None:
total_duration = reduce(operator.add, [video.duration for video in video_infos.values()])
print('\n' * 2)
print('Total duration of playlist is {}'.format(strftime(total_duration, '%H:%M')))
@lru_cache(1)
def read_config() -> JsonType:
config_dir = Path(XDG_CACHE_HOME) / 'youtube-sort-playlist'
config_dir.mkdir(parents=True, exist_ok=True)
config_file = config_dir / 'config.yaml'
config_file.touch()
with config_file.open('r') as config:
return yaml.safe_load(config) or {}
def write_config(config: JsonType) -> None:
with open(os.path.join(XDG_CACHE_HOME, 'youtube-sort-playlist', 'config.yaml'), 'w') as file:
yaml.safe_dump(config, stream=file, explicit_start=True, default_flow_style=False)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description='Tool to manage Youtube Watch Later playlist. Because they refuse to make it trivial.'
)
parser.add_argument('args', nargs=argparse.REMAINDER)
common_parser = argparse.ArgumentParser(add_help=False)
common_parser.add_argument('--dry-run', action='store_true')
subparser = parser.add_subparsers(title='sub-commands', dest='subcommand')
subparser.add_parser(
'sort',
help="Sort 'Watch Later' playlist.",
description="Sort the 'Sort Watch Later' playlist and group by channel.",
parents=[common_parser],
)
update_parser = subparser.add_parser(
'update',
help='Add recent videos to watch later playlist.',
description='Update the watch later playlist with recent videos from subscribed channels.',
parents=[common_parser],
)
update_parser.add_argument('--since', help='Start date to filter videos by.', type=arrow.get)
update_parser.add_argument(
'-f', '--only-allowed', help='Auto add videos from known and allowed channels.', action='store_true'
)
return parser.parse_args()
def main() -> None:
args = parse_args()
youtube_manager = YoutubeManager(args.dry_run, args.args)
if args.subcommand == 'sort':
youtube_manager.sort()
elif args.subcommand == 'update':
youtube_manager.update(args.since, args.only_allowed)
if __name__ == '__main__':
main()
| ipwnponies/youtube-sort-playlist | playlist_updates.py | Python | unlicense | 13,622 |
# -*- coding: utf-8 -*-
from pyglet.graphics import OrderedGroup
from pyglet.gl import glPushMatrix, glPopMatrix, glScalef
from GestureAgentsDemo.Utils import DynamicValue
from GestureAgentsDemo.Render import basegroup
class ShellAppGroup(OrderedGroup):
"""docstring for ShellAppGroup"""
def __init__(self, order, parent=None):
super(ShellAppGroup, self).__init__(order, parent=parent)
self.scale = DynamicValue(1)
def set_state(self):
glPushMatrix()
glScalef(self.scale(), self.scale(), 1)
def unset_state(self):
glPopMatrix()
from GestureAgents.AppRecognizer import AppRecognizer
from GestureAgentsTUIO.Gestures2D.RecognizerDoubleTap import RecognizerDoubleTap
class ShellApp(object):
"""docstring for ShellApp"""
def __init__(self, system):
self.possible_apps = [DemoApp]
self.running = {}
self.maxid = 0
AppRecognizer(system, RecognizerDoubleTap).newAgent.register(
ShellApp.newAgentDoubleTap, self)
self.minimized = False
self.system = system
def launch(self, App):
id = self.maxid
group = ShellAppGroup(id, parent=basegroup)
self.maxid += 1
app = App(self.system, group=group)
self.running[app] = dict(id=id, group=group, program=App, instance=app)
def newAgentDoubleTap(self, agent):
agent.newDoubleTap.register(ShellApp.newDoubleTap, self)
def newDoubleTap(self, DTap):
self.minimized = not self.minimized
s = 0 if self.minimized else 1
for app in self.running.itervalues():
app["group"].scale(s, .5)
from GestureAgentsTUIO.Tuio import TuioCursorEvents
from GestureAgentsDemo.Geometry import Circle
class DemoApp(object):
"""docstring for DemoApp"""
def __init__(self, group=None):
AppRecognizer(
TuioCursorEvents).newAgent.register(DemoApp.newAgentPaint, self)
self.circles = {}
self.displaygroup = group
def newAgentPaint(self, agent):
agent.updateCursor.register(DemoApp.event_move, self)
agent.newCursor.register(DemoApp.event_new, self)
agent.removeCursor.register(DemoApp.event_remove, self)
def event_move(self, Point):
self.circles[Point.sessionid].getCentered(Point.pos)
self.circles[Point.sessionid].updateDisplay()
def event_new(self, Point):
self.circles[Point.sessionid] = Circle(25, 20, group=self.displaygroup)
self.event_move(Point)
def event_remove(self, Point):
self.circles[Point.sessionid].clear()
del self.circles[Point.sessionid]
if __name__ == '__main__':
from GestureAgentsDemo import System
from apps.Map import DemoMapApp
from apps.Shadows import FingerShadow
from apps.Pictures import AppPictureBrowser
from apps.Calibrator import CalibratorApp
# from apps.DebugRecognizers import DebugRecognizer
system = System()
shell = ShellApp(system)
shell.launch(DemoMapApp)
shell.launch(AppPictureBrowser)
shell.launch(FingerShadow)
shell.launch(CalibratorApp)
# shell.launch(DebugRecognizer)
# shell.launch(DemoApp)
system.run_apps()
| chaosct/GestureAgents | Apps/DemoApp/DemoApp.py | Python | mit | 3,194 |
from blackjack.cmake.ScriptBase import ScriptBase
from blackjack.cmake.storage.SetList import SetList
from .cmake_set import cmake_set
class add_executable(ScriptBase):
"""
CMake Command - Add Executable Target
"""
def __init__(self, name: str, opts: str, srcs: []):
super().__init__()
self._Name = None
self.Name = name
"""Name of the target"""
self.Options = opts
"""Executable options"""
self.Srcs = srcs
"""List of Sources to include into the target"""
return
@property
def Name(self):
"""Name of the target"""
return self._Name
@Name.setter
def Name(self, value):
self._Name = value.replace(" ", "_")
def render_body(self):
ret = []
tmpline = "add_executable(" + self.Name
if self.Options:
tmpline += " " + self.Options
ret.append(tmpline)
for item in self.Srcs:
if isinstance(item, str):
ret.append(' "' + item + '" ')
if isinstance(item, SetList):
ret.append(' ' + item.Name)
ret[-1] += ")"
return ret
| grbd/GBD.Build.BlackJack | blackjack/cmake/cmd/add_executable.py | Python | apache-2.0 | 1,180 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
"""
from nupic.regions.ImageSensorFilters.BaseFilter import BaseFilter
class Crop(BaseFilter):
"""
Crop the image.
"""
def __init__(self, box):
"""
@param box -- 4-tuple specifying the left, top, right, and bottom coords.
"""
BaseFilter.__init__(self)
if box[2] <= box[0] or box[3] <= box[1]:
raise RuntimeError('Specified box has zero width or height')
self.box = box
def process(self, image):
"""
@param image -- The image to process.
Returns a single image, or a list containing one or more images.
"""
BaseFilter.process(self, image)
if self.box[2] > image.size[0] or self.box[3] > image.size[1]:
raise RuntimeError('Crop coordinates exceed image bounds')
return image.crop(self.box)
| david-ragazzi/nupic | nupic/regions/ImageSensorFilters/Crop.py | Python | gpl-3.0 | 1,768 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from qonos.api import api
from qonos.api.v1 import api_utils
from qonos.common import exception
from qonos.common import utils
import qonos.db
from qonos.openstack.common._i18n import _
from qonos.openstack.common import wsgi
CONF = api.CONF
class WorkersController(object):
def __init__(self, db_api=None):
self.db_api = db_api or qonos.db.get_api()
def _get_request_params(self, request):
params = {}
params['limit'] = request.params.get('limit')
params['marker'] = request.params.get('marker')
return params
def list(self, request):
params = self._get_request_params(request)
try:
params = utils.get_pagination_limit(params)
except exception.Invalid as e:
raise webob.exc.HTTPBadRequest(explanation=str(e))
try:
workers = self.db_api.worker_get_all(params=params)
except exception.NotFound:
raise webob.exc.HTTPNotFound()
[utils.serialize_datetimes(worker) for worker in workers]
return {'workers': workers}
def create(self, request, body):
worker = self.db_api.worker_create(body.get('worker'))
utils.serialize_datetimes(worker)
return {'worker': worker}
def get(self, request, worker_id):
try:
worker = self.db_api.worker_get_by_id(worker_id)
except exception.NotFound:
msg = _('Worker %s could not be found.') % worker_id
raise webob.exc.HTTPNotFound(explanation=msg)
utils.serialize_datetimes(worker)
return {'worker': worker}
def delete(self, request, worker_id):
try:
self.db_api.worker_delete(worker_id)
except exception.NotFound:
msg = _('Worker %s could not be found.') % worker_id
raise webob.exc.HTTPNotFound(explanation=msg)
def get_next_job(self, request, worker_id, body):
action = body.get('action')
try:
# Check that worker exists
self.db_api.worker_get_by_id(worker_id)
except exception.NotFound:
msg = _('Worker %s could not be found.') % worker_id
raise webob.exc.HTTPNotFound(explanation=msg)
new_timeout = api_utils.get_new_timeout_by_action(action)
job = self.db_api.job_get_and_assign_next_by_action(
action, worker_id, new_timeout)
if job:
utils.serialize_datetimes(job)
api_utils.serialize_job_metadata(job)
return {'job': job}
def create_resource():
"""QonoS resource factory method."""
return wsgi.Resource(WorkersController())
| rackerlabs/qonos | qonos/api/v1/workers.py | Python | apache-2.0 | 3,305 |
"""
conttest
--------
This task uses ``conttest`` to monitor a directory for changes and executes the specified
task everytime a change is made. The following configuration is supported::
config = {
'conttest': {
'task': 'registered_task',
'directory': './directory/to/monitor/'
}
}
The ``task`` parameter is the task to be executed and must be registered in ``boltfile.py``.
The ``directory`` parameter is the directory (including sub-directories) to monitor for
changes.
To use this task, you need to have ``conttest`` installed, which you can do by calling::
pip install conttest
"""
import logging
import os
import bolt
import bolt.api as api
class ExecuteConttest(api.Task):
def _configure(self):
self.task_name = self._require('task')
self.directory = self._optional('directory', os.getcwd())
self.continue_on_error = True
logging.info('Executing continously "{task}" at {directory}'.format(task=self.task_name, directory=self.directory))
def _execute(self):
import conttest.conttest as ct
try:
ct.watch_dir(self.directory, self._execute_assigned_task, method=ct.TIMES)
except KeyboardInterrupt:
logging.info('Exiting continuous execution')
def _execute_assigned_task(self):
bolt.run_task(self.task_name, self.continue_on_error)
logging.info('Press <ctrl+c> to exit')
def register_tasks(registry):
registry.register_task('conttest', ExecuteConttest())
| abantos/bolt | bolt/tasks/bolt_conttest.py | Python | mit | 1,545 |
def fact_iter(n):
"""This function will find the Factorial of the given number by iterative
method. This function is coded in Pyhton 3.5."""
# check for integer
if not isinstance(n, int):
raise TypeError("Please only enter integer")
if n <= 0:
raise ValueError("Kindly Enter positive integer only ")
temp = 1
for num in range(1,n):
temp += temp * num
return temp
def fact_recu(n):
# check for integer
if not isinstance(n, int):
raise TypeError("Please only enter integer")
if n <= 0:
raise ValueError("Kindly Enter positive integer only ")
if n == 1:
return 1
else:
return n * fact_recu(n-1)
if __name__ == "__main__":
print("""Enter your choice
1 - factorial Iterative
2 - factorial Recursive""")
choice = int(input())
print("Enter the number")
number = int(input())
if choice == 1:
number = fact_iter(number)
if choice == 2:
number = fact_recu(number)
print(number) | rvsingh011/NitK_Assignments | Sem1/Algorithm/Factorial_iter.py | Python | mit | 1,055 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.