code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1
value | license stringclasses 15
values | size int64 3 1.05M |
|---|---|---|---|---|---|
from flask import render_template, flash, request, url_for, redirect, session, abort, g
from app import app, db, login_manager
## LOGIN imports ##
from flask_login import login_user, logout_user, current_user, login_required
from .models import User
from .forms import RegistrationForm
# Exceptions #
from sqlalchemy import exc
## CONTENT MANAGER imports ##
from .content_manager import Content
#send_from_directory
@login_manager.user_loader
def load_user(id):
return User.query.get(int(id))
# The g global is setup by Flask as a place to store and share data during the life of a request. Logged in user stored there.
# Any functions that are decorated with before_request will run before the view function each time a request is received.
@app.before_request
def before_request():
g.user = current_user
TOPIC_DICT = Content()
# URL for website navigation
@app.route('/')
@app.route('/home/')
def index():
return render_template("index.html")
# User profile
@app.route("/user-profile/<username>")
@login_required
def user_profile(username):
user = User.query.filter_by(username=username).first()
if user == None:
flash("User %s not found." % username)
return redirect(url_for("index"))
return render_template("user-profile.html", user=user)
@app.route('/topics/')
@login_required
def topics():
return render_template("topics.html", TOPIC_DICT = TOPIC_DICT)
# 1st TOPIC_DICT is used in html
# 2nd TOPIC_DICT is corresponding to one declared on top
@app.route('/topics/installing-components/')
@login_required
def installation():
return render_template("/topics/installing-components.html", TOPIC_DICT = TOPIC_DICT)
@app.route('/topics/app-architecture/')
@login_required
def architecture():
return render_template("/topics/app-architecture.html", TOPIC_DICT = TOPIC_DICT)
@app.route('/topics/coding-basics/')
@login_required
def basics():
return render_template("/topics/coding-basics.html", TOPIC_DICT = TOPIC_DICT)
@app.route('/topics/app-setup-conclusion/')
@login_required
def basics_conclusion():
return render_template("/topics/app-setup-conclusion.html", TOPIC_DICT = TOPIC_DICT)#
@app.route('/topics/about-bootstrap/')
@login_required
def about_bootstrap():
return render_template("/topics/about-bootstrap.html", TOPIC_DICT = TOPIC_DICT)
@app.route('/topics/bootstrap-conclusion/')
@login_required
def bootstrap_conclusion():
return render_template("/topics/bootstrap-conclusion.html", TOPIC_DICT = TOPIC_DICT)
@app.route('/topics/what-is-jinja2/')
@login_required
def about_jinja():
return render_template("/topics/what-is-jinja2.html", TOPIC_DICT = TOPIC_DICT)
@app.route('/topics/how-to-jinja/')
@login_required
def how_to_jinja():
return render_template("/topics/how-to-jinja.html", TOPIC_DICT = TOPIC_DICT)
@app.route('/topics/jinja2-conclusion/')
@login_required
def jinja2_conclusion():
return render_template("/topics/jinja2-conclusion.html", TOPIC_DICT = TOPIC_DICT)
@app.route('/topics/sqlite-alchemy/')
@login_required
def sqlite_alchemy():
return render_template("/topics/sqlite-alchemy.html", TOPIC_DICT = TOPIC_DICT)
@app.route('/topics/db-models/')
@login_required
def db_models():
return render_template("/topics/db-models.html", TOPIC_DICT = TOPIC_DICT)
@app.route('/topics/flask-forms/')
@login_required
def flask_form():
return render_template("/topics/flask-forms.html", TOPIC_DICT = TOPIC_DICT)
@app.route('/topics/database-conclusion/')
@login_required
def database_conclusion():
return render_template("/topics/database-conclusion.html", TOPIC_DICT = TOPIC_DICT)
# Code source: https://blog.openshift.com/use-flask-login-to-add-user-authentication-to-your-python-application/
@app.route('/login/',methods=['GET','POST'])
def login():
if g.user is not None and g.user.is_authenticated:
return redirect(url_for('topics'))
if request.method == 'GET':
return render_template('login.html')
username = request.form['username']
password = request.form['password']
registered_user = User.query.filter_by(username=username, password=password).first()
if registered_user is None:
flash('Invalid username or password', 'error')
return redirect(url_for('login'))
login_user(registered_user)
flash('Logged in successfully')
return redirect(url_for('index'))
# need to handle(if debug mode set to true): sqlalchemy.exc.IntegrityError
# IntegrityError: (IntegrityError) column username is not unique u'UPDATE user SET nickname=?, about_me=? WHE
# (if debug mode false): HTTP error code 500
# Solution: try: ... except exc.IntegrityError as e: ... . Sqlalchemy exc library handles exceptions for us.
# Important! rollback() current db session if IntegrityError trigger excepted.
@app.route('/register/', methods = ['GET', 'POST'])
def register():
if g.user is not None and g.user.is_authenticated:
return redirect(url_for('topics'))
form = RegistrationForm(request.form)
if request.method == 'POST' and form.validate():
try:
user = User(form.username.data,
form.password.data)
db.session.add(user)
db.session.commit()
flash('Thanks for registering')
return redirect(url_for('login'))
# http://stackoverflow.com/questions/24522290/cannot-catch-sqlalchemy-integrityerror
except exc.IntegrityError as e:
flash("That user name is already taken... Try something else!")
db.session().rollback()
return render_template('register.html', form=form)
return render_template('register.html', form=form)
@app.route('/logout/')
def logout():
logout_user()
return redirect(url_for('index'))
# 404 error handle that renders 404.html template
@app.errorhandler(404)
def page_not_found(e):
return render_template("404.html")
@app.errorhandler(500)
def internal_error(error):
db.session.rollback()
return render_template('500.html'), 500
| EddyCodeIt/SPA_Project_2016_Data_Rep-Quering | app/views.py | Python | apache-2.0 | 6,100 |
import curses
import logging
_COLOR_UID = 1
BLACK = curses.COLOR_BLACK
WHITE = curses.COLOR_WHITE
class Color(object):
def __init__(self, fg=WHITE, bg=BLACK, curses_lib=curses):
global _COLOR_UID
self.COLOR_UID = _COLOR_UID
_COLOR_UID += 1
self._fg = fg
self._bg = bg
self._curses = curses_lib
self.flush()
def change_color(self, fg=None, bg=None, flush=True):
self._fg = fg if fg is not None else self._fg
self._bg = bg if bg is not None else self._bg
if flush:
self.flush()
def flush(self):
logging.error('Assigning %d, %d, %d',
self.COLOR_UID, self._fg, self._bg)
self._curses.init_pair(self.COLOR_UID, self._fg, self._bg)
LIGHT_GRAY = 237
LIGHT_GREY = LIGHT_GRAY
| ElegantBadger/splutter | splutter/colors.py | Python | mit | 818 |
"""
Copyright 2013 Shine Wang
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import urllib
import re
from HTMLParser import HTMLParser
from courseClasses import Course, Lecture, Tutorial, Reserve
class CustomHTMLParser(HTMLParser):
"""this class reads a HTML stream, then parses out the "data" fields"""
def __init__(self, webData):
HTMLParser.__init__(self)
self.webData = webData
def handle_data(self, data):
"""takes out the data"""
self.webData.append(data.strip())
class WebParser:
""""A WebParser is created for each and every course,
to parse the corresponding web page"""
requestURL = "http://www.adm.uwaterloo.ca/cgi-bin/" \
"cgiwrap/infocour/salook.pl"
def __init__(self):
self.webData = []
self.index = -1
self.session = None
self.thisCourse = None
def run(self, courseString, sessionString):
"""this is the method that the main class can call
if successful, returns the Course class
if not, returns an error message"""
self.session = self.parseSession(sessionString)
if self.session is None:
return "SessionNameWrongError"
courseString = map(lambda x: x.upper(), courseString.split())
try:
self.thisCourse = Course(self.session, courseString[0],
courseString[1])
except:
return "CourseNameWrongError"
if self.getWebData(self.thisCourse):
return "WebPageError"
elif self.parseWebData():
return "CourseNotFoundError"
else:
self.processCourseInfo()
self.postProcess(self.thisCourse)
return self.thisCourse
def parseSession(self, sessionString):
try:
ret = "1"
ret += sessionString.split()[1][-2:] # last 2 digits of year
tempMap = (("fall", "9"), ("winter", "1"), ("spring", "5"))
for season in tempMap:
if season[0] in sessionString.lower():
ret += season[1]
return ret
except:
return None
def getWebData(self, course):
"""submits a POST query, initializes HTMLParser"""
try:
params = urllib.urlencode({"sess": course.session,
"subject": course.subject,
"cournum": course.catalogNumber})
page = urllib.urlopen(WebParser.requestURL, params)
parser = CustomHTMLParser(self.webData)
# we use .replace() because HTMLParser ignores " ",
# which would screwn up our table
parser.feed(page.read().replace(" ", " "))
except:
return "WebPageError"
def parseWebData(self):
"""We try to find the beginning of the desired table"""
# now, we find the start index and pass that on along
# with the webData
for i in xrange(len(self.webData)-3):
if self.webData[i] == self.thisCourse.subject \
and self.webData[i+2] == self.thisCourse.catalogNumber:
self.index = i
break
if self.index == -1: # website not found
return "CourseNotFound"
def processCourseInfo(self):
"""now, we do the heavy-duty processing of the data table"""
# sets basic attrs of thisCourse
self.thisCourse.units = self.webData[self.index+4]
self.thisCourse.title = self.webData[self.index+6]
while self.webData[self.index] != "Instructor":
self.index += 1
# processing row-by-row
while not self.endOfRow(self.webData[self.index]):
if self.webData[self.index] != "":
self.processSlot()
self.index += 1
if self.index == len(self.webData):
return
def processSlot(self):
"""we check to see if this is the BEGINNING of a valid row"""
if (self.webData[self.index+1][:3].upper() == "LEC"
or self.webData[self.index+1][:3].upper() == "LAB") \
and "ONLINE" not in self.webData[self.index+2]:
# we don't want online classes!
# processing a lecture row
lec = Lecture()
if self.processClass(lec, self.index, self.webData):
return
self.thisCourse.lectures.append(lec)
elif self.webData[self.index+1][:3].upper() == "TUT":
# processing a tutorial row
tut = Tutorial()
if self.processClass(tut, self.index, self.webData):
return
self.thisCourse.tutorials.append(tut)
elif self.webData[self.index][:7].upper() == "RESERVE":
# processing a reserve row
res = Reserve()
self.processReserve(res, self.index, self.webData)
if self.thisCourse.lectures:
self.thisCourse.lectures[-1].reserves.append(res)
# note: we leave out the TST (exam?) times for now
def processReserve(self, res, index, webData):
"""processing reservations for certain types of students"""
res.name = webData[index][9:]
# we remove the "only" suffix (which is annoyingly pointless)
if "only" in res.name:
res.name = res.name[:-5]
# also, the "students" suffx
if "students" in res.name or "Students" in res.name:
res.name = res.name[:-9]
# now, we merge the match list
while not webData[index].isdigit():
index += 1
# retriving enrollment numbers
res.enrlCap = int(webData[index])
res.enrlTotal = int(webData[index+1])
def processClass(self, lec, index, webData):
"""we process a typical lecture or tutorial row"""
attr1 = ["classNumber", "compSec", "campusLocation"]
for i in xrange(len(attr1)):
setattr(lec, attr1[i], webData[index+i].strip())
index += 6
attr2 = ["enrlCap", "enrlTotal", "waitCap", "waitTotal"]
for i in xrange(len(attr2)):
setattr(lec, attr2[i], int(webData[index+i]))
index += 4
# parsing the "Times Days/Date" field
match = re.search(r"([:\d]+)-([:\d]+)(\w+)", webData[index])
if not match:
# we return an error message in the "TBA" case
return "NoTimeError"
attr3 = ["startTime", "endTime", "days"]
for i in xrange(len(attr3)):
setattr(lec, attr3[i], match.group(i+1).strip())
index += 1
if len(webData[index].split()) == 2:
# sometimes, no building, room, and instructor will be given
# this is mostly for Laurier courses
lec.building, lec.room = webData[index].split()
lec.instructor = webData[index+1].strip()
def endOfRow(self, data):
"""returns true if the current data-cell is the last cell
of this course; else - false"""
# the last cell is of the form: ##/##-##/## or
# "Information last updated
if re.search(r"\d+/\d+-\d+/\d+", data) or \
"Information last updated" in data:
return True
else:
return False
def postProcess(self, course):
"""this function will convert the class times to minutes-past-
the-previous-midnight, and converts the days to numbers.
Also, some reservation-postprocessing"""
map(lambda x: x.calcMiscSeats(), course.lectures)
for lec in course.lectures:
lec.courseID = course.subject + " " + course.catalogNumber
for tut in course.tutorials:
tut.courseID = course.subject + " " + course.catalogNumber
for slot in course.lectures + course.tutorials:
# first, we convert time to 24hr time
# earliest start time for a class is 8:30am
# night classes start at/before 7:00pm
if 1 <= int(slot.startTime.split(":")[0]) <= 7:
slot.startTime, slot.endTime = \
map(lambda x: "{}:{}".format(str(int(x.split(":")[0])
+ 12), x[-2:]), [slot.startTime,
slot.endTime])
elif int(slot.startTime.split(":")[0]) > int(
slot.endTime.split(":")[0]):
# e.g. 12:00 to 1:00
slot.endTime = "{}:{}".format(str(int(
slot.endTime.split(":")[0])+12), slot.endTime[-2:])
# now, we write to slot.sTime, slot.eTime
# (minutes-past-midnight...)
slot.sTime, slot.eTime = map(lambda x: int(x[:2]) * 60 +
int(x[-2:]),
[slot.startTime, slot.endTime])
# we write to slot.ndays, where ndays is a string of numbers,
# 0->4
if "M" in slot.days:
slot.ndays += "0"
i = slot.days.find("T")
if i != -1 and (i == len(slot.days) - 1 or
slot.days[i+1] != 'h'):
# basically, if not Th (for Thursday)
slot.ndays += "1"
# now, for the rest of the days...
for i in [("W", "2"), ("Th", "3"), ("F", "4")]:
if i[0] in slot.days:
slot.ndays += i[1]
# we make a small adjustment to campusLocation,
# removing whitespace
slot.campusLocation = slot.campusLocation.split()[0]
# we make the prof name "first last" instead of
# "last,first middle"
if slot.instructor != "":
s = slot.instructor.split(" ")
for i in s:
if "," in i:
# we want the 2 words connected by the ","
slot.instructor = " ".join(reversed(list(
i.split(","))))
| shinexwang/Classy | Main/webParser.py | Python | apache-2.0 | 10,545 |
import threading
import time
import pyotherside
# If you try to instantiate a QObject, it's unbound
unbound = pyotherside.QObject()
print(unbound)
try:
unbound.a = 1
except Exception as e:
print('Got exception:', e)
def do_something(bar):
while True:
print('got: ', bar.dynamicFunction(1, 2, 3))
time.sleep(1)
def foo(bar, py):
# Printing the objects will give some info on the
# QObject class and memory address
print('got:', bar, py)
# Ok, this is pretty wicked - we can now call into
# the PyOtherSide QML element from within Python
# (not that it's a good idea to do this, mind you..)
print(py.evaluate)
print(py.evaluate('3*3'))
try:
bar.i_am_pretty_sure_this_attr_does_not_exist = 147
except Exception as e:
print('Got exception (as expected):', e)
try:
bar.x = 'i dont think i can set this to a string'
except Exception as e:
print('Got exception (as expected):', e)
# This doesn't work yet, because we can't convert a bound
# member function to a Qt/QML type yet (fallback to None)
try:
bar.dynamicFunction(bar.dynamicFunction, 2, 3)
except Exception as e:
print('Got exception (as expected):', e)
# Property access works just like expected
print(bar.x, bar.color, bar.scale)
bar.x *= 3
# Printing a member function gives a bound method
print(bar.dynamicFunction)
# Calling a member function is just as easy
result = bar.dynamicFunction(1, 2, 3)
print('result:', result)
try:
bar.dynamicFunction(1, 2, 3, unexpected=123)
except Exception as e:
print('Got exception (as expected):', e)
threading.Thread(target=do_something, args=[bar]).start()
# Returning QObject references from Python also works
return bar
| blueghost56/pyotherside | examples/qobject_reference.py | Python | isc | 1,837 |
#!/usr/bin/env python
import re,sys,os,subprocess,shlex,Queue
from threading import Thread
import urllib2, socket,sys,base64,ssl
from xml.dom.minidom import parse, parseString
niktoPath = "/usr/bin/nikto"
#Bing Search API Account Key
account_key = ""
runBing = True
#CA certs http://curl.haxx.se/ca/cacert.pem
class Consumer(Thread):
def __init__(self, queue=None):
super(Consumer, self).__init__()
self.daemon = True
self.queue = queue
def run(self):
while True:
cmd = self.queue.get()
args = shlex.split(cmd)
p = subprocess.Popen(args,stdout=subprocess.PIPE)
retcode = p.wait()
#p.communicate()
self.queue.task_done()
def isOpen(ip,port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip,int(port)))
s.shutdown(2)
return True
except:
return False
def getSSLCommonName(HOST,PORT):
try:
HOST = socket.getaddrinfo(HOST, PORT)[0][4][0]
sock = socket.socket()
sock.connect((HOST, PORT))
sock = ssl.wrap_socket(sock,
#cert_reqs=ssl.CERT_REQUIRED,
cert_reqs=ssl.CERT_OPTIONAL,
#cert_reqs=ssl.CERT_NOT_REQUIRED,
ca_certs="cacert.pem"
)
cert = sock.getpeercert()
for field in cert['subject']:
if field[0][0] == 'commonName':
certhost = field[0][1]
return certhost
except ssl.SSLError:
return ""
def bing(account_key,ip):
if(isinstance(ip,list)):
for count in ip:
count = count.strip()
sites = []
skip = 0
top = 50
while skip < 200:
url = "https://api.datamarket.azure.com/Data.ashx/Bing/Search/v1/Web?Query='ip:%s'&$top=%s&$skip=%s&$format=Atom"% (count,top,skip)
request = urllib2.Request(url)
auth = base64.encodestring("%s:%s" % (account_key, account_key)).replace("\n", "")
request.add_header("Authorization", "Basic %s" % auth)
res = urllib2.urlopen(request)
data = res.read()
tempDomainList = []
xmldoc = parseString(data)
site_list = xmldoc.getElementsByTagName('d:Url')
for site in site_list:
domain = site.childNodes[0].nodeValue
domain = domain.split("/")[2]
if domain not in sites:
if domain not in tempDomainList:
tempDomainList.append(domain)
#sites.append(domain)
count = 1
if len(tempDomainList)>1:
for i in tempDomainList:
print "("+str(count)+")\t"+i
count+=1
#print tempDomainList
print "[*] Enter the number followed by comma E.g. 1, 4, 10"
print "[*] To select all, key in 'ALL'. Leave it blank or key in 'NONE' to ignore all."
listInput = raw_input()
listInput = listInput.strip()
listInput = listInput.lower()
if len(listInput)>0:
if listInput == "all" and listInput != "none":
for x in tempDomainList:
sites.append(x)
elif listInput != "all" and listInput != "none":
inputList = listInput.split(",")
for x in inputList:
print tempDomainList[int(x)-1]
sites.append(tempDomainList[int(x)-1])
skip += 50
if(len(sites)==0):
if isOpen(count,443):
commonName=""
commonName=getSSLCommonName(count,443)
if(len(commonName)>0):
sites.append(commonName)
return sites
elif(isinstance(ip,str)):
sites = []
skip = 0
top = 50
while skip < 200:
url = "https://api.datamarket.azure.com/Data.ashx/Bing/Search/v1/Web?Query='ip:%s'&$top=%s&$skip=%s&$format=Atom"% (ip,top,skip)
request = urllib2.Request(url)
auth = base64.encodestring("%s:%s" % (account_key, account_key)).replace("\n", "")
request.add_header("Authorization", "Basic %s" % auth)
res = urllib2.urlopen(request)
data = res.read()
xmldoc = parseString(data)
site_list = xmldoc.getElementsByTagName('d:Url')
tempDomainList = []
for site in site_list:
domain = site.childNodes[0].nodeValue
domain = domain.split("/")[2]
if domain not in sites:
if domain not in tempDomainList:
tempDomainList.append(domain)
#sites.append(domain)
count = 1
if len(tempDomainList)>1:
for i in tempDomainList:
print "("+str(count)+")\t"+i
count+=1
print "[*] Enter the number followed by comma E.g. 1, 4, 10"
print "[*] To select all, key in 'ALL'. Leave it blank or key in 'NONE' to ignore all."
listInput = raw_input()
listInput = listInput.strip()
listInput = listInput.lower()
if len(listInput)>0:
if listInput == "all" and listInput != "none":
for x in tempDomainList:
sites.append(x)
elif listInput != "all" and listInput != "none":
inputList = listInput.split(",")
for x in inputList:
print tempDomainList[int(x)-1]
sites.append(tempDomainList[int(x)-1])
#if len(tempDomainList)>1:
# print tempDomainList
skip += 50
if(len(sites)==0):
if isOpen(ip,443):
commonName=""
commonName=getSSLCommonName(ip,443)
if(len(commonName)>0):
sites.append(commonName)
return sites
def parseNmap(fname,child,displayOnly):
queue = Queue.Queue()
ipList = []
with open(fname) as f:
count=0
content = f.readlines()
for i in content:
count+=1
if(count>2):
i = i.strip()
if 'http' in i:
result = re.search('[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}', i)
host = result.group(0).strip()
if host not in ipList:
ipList.append(host)
#Perform a reverse DNS lookup on Bing.com
sites = []
global runBing
if runBing==True:
try:
sites = bing(account_key,host)
except urllib2.HTTPError:
print "[*] Please check your Bing API Key"
sys.exit(0)
if len(sites)>0:
for site in sites:
strStart = i.index('Ports: ')+7
strEnd = len(i)
portString = i[strStart:strEnd]
portStringList = portString.split(",")
for port in portStringList:
portNo = port.split("/")[0].strip()
if "ssl|http" in port:
if "open" in port:
currentDir = os.getcwd()
savePath = currentDir+"/nikto-"+host+"-port"+portNo+"-"+site+".txt"
cmd = "/usr/bin/perl "+niktoPath+" -vhost "+site+" -maxtime 7200 -Cgidirs all -ssl -host "+host+" -port "+portNo+" -output "+savePath
print cmd
queue.put(cmd)
elif "http" in port:
if "open" in port:
currentDir = os.getcwd()
savePath = currentDir+"/nikto-"+host+"-port"+portNo+"-"+site+".txt"
cmd = "/usr/bin/perl "+niktoPath+" -vhost "+site+" -maxtime 7200 -Cgidirs all -host "+host+" -port "+portNo+" -output "+savePath
print cmd
queue.put(cmd)
else:
strStart = i.index('Ports: ')+7
strEnd = len(i)
portString = i[strStart:strEnd]
portStringList = portString.split(",")
for port in portStringList:
currentDir = os.getcwd()
portNo = port.split("/")[0].strip()
savePath = currentDir+"/nikto-"+host+"-port"+portNo+".txt"
if "ssl|http" in port:
if "open" in port:
cmd = "/usr/bin/perl "+niktoPath+" -maxtime 7200 -Cgidirs all -ssl -host "+host+" -port "+portNo+" -output "+savePath
print cmd
queue.put(cmd)
elif "http" in port:
if "open" in port:
cmd = "/usr/bin/perl "+niktoPath+" -maxtime 7200 -Cgidirs all -host "+host+" -port "+portNo+" -output "+savePath
print cmd
queue.put(cmd)
if displayOnly==False:
for i in range(int(child)):
consumer = Consumer(queue)
consumer.start()
queue.join()
def options(arguments):
count = 0
child = 0
displayOnly = False
filename = ""
for arg in arguments:
if arg == "-child":
child = arguments[count+1]
if arg == "-file":
filename = arguments[count+1]
if arg == "-nobing":
global runBing
runBing = False
if arg == "-display":
displayOnly = True
count+=1
print filename
parseNmap(filename,child,displayOnly)
def showhelp():
print """
#####################################################
# niktoHelper.py #
# Run Nikto against http/https services in .gnmap #
# visit morgoroth.com/blog/pentest-scripts#
#####################################################
Usage: python niktoHelper.py [OPTIONS]
[OPTIONS]
-file [Nmap .gnmap File]
-child [Num of Threads]
-nobing [Do not run Bing reverse IP]
-display[Print only to screen. Do not run Nikto]
"""
if __name__ == '__main__':
if len(sys.argv) <= 2:
showhelp()
sys.exit()
else:
options(sys.argv)
| morgoroth/pentest-scripts | scanners/niktohelper/niktohelper.py | Python | apache-2.0 | 9,571 |
#!/usr/bin/env python3
import os
import re
import mimetypes
import subprocess
import urllib
import random
import json
import requests
import io
import PIL.Image
import PIL.ImageChops
from PIL import ImageDraw
from PIL import ImageFont
import numpy as np
import lxml.html
os.environ[ 'http_proxy' ] = 'http://proxy.ncbs.res.in:3128'
os.environ[ 'https_proxy' ] = 'http://proxy.ncbs.res.in:3128'
base_url_ = 'https://intranet.ncbs.res.in/photography'
background_dir_ = './_backgrounds'
if not os.path.exists( background_dir_ ):
os.makedirs( background_dir_ )
def log( msg ):
print( msg )
return
with open( '/tmp/a.txt', 'a' ) as f:
f.write( msg + '\n' )
def is_url_image(url):
mimetype,encoding = mimetypes.guess_type(url)
return (mimetype and mimetype.startswith('image'))
def is_image_and_ready(url):
return is_url_image(url)
def writeOnImage( img, caption, copyright = '(c) NCBS Photography Club' ):
draw = ImageDraw.Draw(img)
# font = ImageFont.truetype(<font-file>, <font-size>)
font = ImageFont.truetype("./data/OpenSans-Regular.ttf", 12 )
fontCaption = ImageFont.truetype("./data/OpenSans-Regular.ttf", 20 )
# get mean color of box.
nI = np.asarray( img )
color = np.mean( nI[10:300,15:100, :] )
if color > 125:
writeColor = (0,0,0)
else:
writeColor = (255,255,255)
draw.text((10, 15) , caption[0:80], writeColor, font=fontCaption)
draw.text((10, 50) , copyright, writeColor, font=font)
return img
def crop_surrounding_whitespace(image):
"""Remove surrounding empty space around an image.
This implemenation assumes that the surrounding space has the same colour
as the top leftmost pixel.
:param image: PIL image
:rtype: PIL image
"""
bg = PIL.Image.new(image.mode, image.size, image.getpixel((0, 0)))
diff = PIL.ImageChops.difference(image, bg)
bbox = diff.getbbox()
if not bbox:
return image
return image.crop(bbox)
def download_url( url, caption, copyright = '(c) NCBS Photography Club', outpath = None):
# Download URL.
if outpath is None:
outfile = os.path.basename( url )
outpath = os.path.join( background_dir_, outfile + '.jpg' )
print( '[INFO] Downloading %s -> %s' % (url, outpath) )
if not os.path.exists( outpath ):
try:
r = requests.get( url )
img = io.BytesIO( r.content )
img = PIL.Image.open( img )
img = crop_surrounding_whitespace( img )
width = 800
height = int((float(img.size[1])*width/float(img.size[0])))
img = img.resize( (width,height), PIL.Image.ANTIALIAS )
writeOnImage( img, caption, copyright )
img.save( outpath )
except Exception as e:
print( e )
pass
else:
print( 'File already downloaded' )
def get_images_from_intranet( ):
global base_url_
html = None
try:
r = requests.get( base_url_ )
html = r.text
except Exception as e:
log( 'failed to open %s' % e )
return 1
doc = lxml.html.fromstring( html )
tables = doc.xpath( '//table' )
images = [ ]
for table in tables:
trs = table.xpath( './/tr' )
for tr in trs:
image = {}
tds = tr.xpath( './/td' )
for td in tds:
links = td.xpath( './/a' )
if links:
for l in links:
if l.text:
image[ 'caption' ] = l.text
if is_url_image( l.attrib[ 'href' ] ):
image[ 'url' ] = l.attrib[ 'href' ]
images.append( image )
for im in images:
if not im:
continue
url = im[ 'url' ]
caption = im.get( 'caption', '' )
if is_image_and_ready( url ):
download_url( url, caption )
def img_to_fname( img ):
fname = '%s_%s' % (img['author'], img[ 'title'])
fname = re.sub( r'\W+|(\s+)', '', fname )
return fname + '.jpg'
def get_images_from_dropbox( ):
log( 'Fetching from dropbox' )
data = None
# Run submodule to get the data.
try:
subprocess.run( [ 'python3', 'main.py', '-t', 'json' ], shell = False
, cwd = './PhotographyCompetition/'
)
with open( os.path.join( './PhotographyCompetition', 'output.json' )) as f:
data = json.load( f )
except Exception as e:
log( 'Failed to read JSON' )
print( e )
for k in data:
img = data[k]
author = img['author']
url = img[ 'photo_url']
caption = img[ 'title' ]
fname = img_to_fname( img )
outfile = os.path.join( background_dir_, fname )
# print( img[ 'author' ] )
if img['author'] in [ 'N/A', 'NA', None ]:
print( '[WARN] No author or hidden' )
continue
if img['average_votes'] is None:
continue
if float( img[ 'average_votes'] ) < 3.0:
print( '[INFO] Not enough votes. Ignoring' )
# Delete this if it was already there.
if os.path.exists( outfile ):
print( ' ... deleting' )
os.remove( outfile )
continue
if is_image_and_ready( url ):
download_url( url, caption, '(c) %s' % author, outfile )
def main( ):
if True:
log( 'using json' )
get_images_from_dropbox( )
else:
log( 'using intranet' )
get_images_from_intranet( )
if __name__ == '__main__':
log( 'running' )
main()
log( 'Finished' )
| dilawar/ncbs-hippo | fetch_backgrounds.py | Python | mit | 5,685 |
import unittest
import docker
from .. import helpers
from .base import TEST_API_VERSION
class ServiceTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
client = docker.from_env(version=TEST_API_VERSION)
helpers.force_leave_swarm(client)
client.swarm.init('127.0.0.1', listen_addr=helpers.swarm_listen_addr())
@classmethod
def tearDownClass(cls):
helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
def test_create(self):
client = docker.from_env(version=TEST_API_VERSION)
name = helpers.random_name()
service = client.services.create(
# create arguments
name=name,
labels={'foo': 'bar'},
# ContainerSpec arguments
image="alpine",
command="sleep 300",
container_labels={'container': 'label'}
)
assert service.name == name
assert service.attrs['Spec']['Labels']['foo'] == 'bar'
container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
assert "alpine" in container_spec['Image']
assert container_spec['Labels'] == {'container': 'label'}
def test_create_with_network(self):
client = docker.from_env(version=TEST_API_VERSION)
name = helpers.random_name()
network = client.networks.create(
helpers.random_name(), driver='overlay'
)
service = client.services.create(
# create arguments
name=name,
# ContainerSpec arguments
image="alpine",
command="sleep 300",
networks=[network.id]
)
assert 'Networks' in service.attrs['Spec']['TaskTemplate']
networks = service.attrs['Spec']['TaskTemplate']['Networks']
assert len(networks) == 1
assert networks[0]['Target'] == network.id
def test_get(self):
client = docker.from_env(version=TEST_API_VERSION)
name = helpers.random_name()
service = client.services.create(
name=name,
image="alpine",
command="sleep 300"
)
service = client.services.get(service.id)
assert service.name == name
def test_list_remove(self):
client = docker.from_env(version=TEST_API_VERSION)
service = client.services.create(
name=helpers.random_name(),
image="alpine",
command="sleep 300"
)
assert service in client.services.list()
service.remove()
assert service not in client.services.list()
def test_tasks(self):
client = docker.from_env(version=TEST_API_VERSION)
service1 = client.services.create(
name=helpers.random_name(),
image="alpine",
command="sleep 300"
)
service2 = client.services.create(
name=helpers.random_name(),
image="alpine",
command="sleep 300"
)
tasks = []
while len(tasks) == 0:
tasks = service1.tasks()
assert len(tasks) == 1
assert tasks[0]['ServiceID'] == service1.id
tasks = []
while len(tasks) == 0:
tasks = service2.tasks()
assert len(tasks) == 1
assert tasks[0]['ServiceID'] == service2.id
def test_update(self):
client = docker.from_env(version=TEST_API_VERSION)
service = client.services.create(
# create arguments
name=helpers.random_name(),
# ContainerSpec arguments
image="alpine",
command="sleep 300"
)
service.update(
# create argument
name=service.name,
# ContainerSpec argument
command="sleep 600"
)
service.reload()
container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
assert container_spec['Command'] == ["sleep", "600"]
def test_update_retains_service_labels(self):
client = docker.from_env(version=TEST_API_VERSION)
service = client.services.create(
# create arguments
name=helpers.random_name(),
labels={'service.label': 'SampleLabel'},
# ContainerSpec arguments
image="alpine",
command="sleep 300"
)
service.update(
# create argument
name=service.name,
# ContainerSpec argument
command="sleep 600"
)
service.reload()
labels = service.attrs['Spec']['Labels']
assert labels == {'service.label': 'SampleLabel'}
def test_update_retains_container_labels(self):
client = docker.from_env(version=TEST_API_VERSION)
service = client.services.create(
# create arguments
name=helpers.random_name(),
# ContainerSpec arguments
image="alpine",
command="sleep 300",
container_labels={'container.label': 'SampleLabel'}
)
service.update(
# create argument
name=service.name,
# ContainerSpec argument
command="sleep 600"
)
service.reload()
container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
assert container_spec['Labels'] == {'container.label': 'SampleLabel'}
def test_update_remove_service_labels(self):
client = docker.from_env(version=TEST_API_VERSION)
service = client.services.create(
# create arguments
name=helpers.random_name(),
labels={'service.label': 'SampleLabel'},
# ContainerSpec arguments
image="alpine",
command="sleep 300"
)
service.update(
# create argument
name=service.name,
labels={},
# ContainerSpec argument
command="sleep 600"
)
service.reload()
assert not service.attrs['Spec'].get('Labels')
def test_scale_service(self):
client = docker.from_env(version=TEST_API_VERSION)
service = client.services.create(
# create arguments
name=helpers.random_name(),
# ContainerSpec arguments
image="alpine",
command="sleep 300"
)
tasks = []
while len(tasks) == 0:
tasks = service.tasks()
assert len(tasks) == 1
service.update(
mode=docker.types.ServiceMode('replicated', replicas=2),
)
while len(tasks) == 1:
tasks = service.tasks()
assert len(tasks) >= 2
# check that the container spec is not overridden with None
service.reload()
spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
assert spec.get('Command') == ['sleep', '300']
@helpers.requires_api_version('1.25')
def test_restart_service(self):
client = docker.from_env(version=TEST_API_VERSION)
service = client.services.create(
# create arguments
name=helpers.random_name(),
# ContainerSpec arguments
image="alpine",
command="sleep 300"
)
initial_version = service.version
service.update(
# create argument
name=service.name,
# task template argument
force_update=10,
# ContainerSpec argument
command="sleep 600"
)
service.reload()
assert service.version > initial_version
| vpetersson/docker-py | tests/integration/models_services_test.py | Python | apache-2.0 | 7,604 |
import logging
from urllib.parse import urlparse, parse_qs
from flask import current_app, request, url_for
from os_api_cache import get_os_cache
def service_for_path(path, query):
for x in {'/aggregate', '/members/', '/facts', '/package', '/model', '/loader'}:
if x in path:
package_id = path.split(x)[0].split('/')[-1]
if package_id == '2':
qs = parse_qs(query)
package_id = str(qs.get('dataset'))
service = x.replace('/', '')
return package_id, service
return None, None
def return_cached():
cache = current_app.extensions.get('cache')
loader = current_app.extensions.get('loader')
stats = current_app.extensions.get('stats')
o = urlparse(request.url)
stats.increment('openspending.api.requests')
package_id, service = service_for_path(o.path, o.query)
if service is not None:
stats.increment('openspending.api.requests.' + service)
if cache is not None \
and not (loader and o.path.startswith(url_for('FDPLoader.load'))):
response = cache.get_from_cache(package_id, o.query, o.path)
if response:
stats.increment('openspending.api.cache.hits')
response.from_cache = True
response.headers.add('X-OpenSpending-Cache', 'true')
response.headers.add('X-OpenSpending-PackageId', package_id)
return response
stats.increment('openspending.api.cache.misses')
def cache_response(response):
cache = current_app.extensions.get('cache')
stats = current_app.extensions.get('stats')
o = urlparse(request.url)
stats.increment('openspending.api.responses.%d' % response.status_code)
if cache is not None and response.status_code == 200 and not hasattr(response, 'from_cache'):
package_id, _ = service_for_path(o.path, o.query)
if package_id is not None:
try:
cache.put_in_cache(package_id, o.query, o.path, response)
except Exception:
logging.exception('There was a problem caching the response')
stats.increment('openspending.api.cache.fail')
response.headers.add('X-OpenSpending-Cache', 'false')
response.headers.add('X-OpenSpending-PackageId', package_id)
return response
def setup_caching(app):
cache = get_os_cache()
if cache is not None:
app.extensions['cache'] = cache
app.before_request(return_cached)
app.after_request(cache_response)
| openspending/os-api | os_api/cache.py | Python | mit | 2,544 |
###############################################################################
# lazyflow: data flow based lazy parallel computation framework
#
# Copyright (C) 2011-2014, the ilastik developers
# <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Lesser GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the
# GNU Lesser General Public License version 2.1 and 3 respectively.
# This information is also available on the ilastik web site at:
# http://ilastik.org/license/
###############################################################################
import numpy
import vigra
from lazyflow.roi import TinyVector
from lazyflow.graph import Operator, InputSlot, OutputSlot
class OpCrosshairMarkers( Operator ):
"""
Given a list of 3D coordinates and a volume with up to 5 dimensions,
produces a volume with 3D 'crosshair' markers centered at each
coordinate, with zeros elsewhere.
"""
Input = InputSlot() # Used for meta-data
PointList = InputSlot() # list of 3d coordinates (dtype=object)
CrosshairRadius = InputSlot(value=5)
Output = OutputSlot()
def setupOutputs(self):
self.Output.meta.assignFrom( self.Input.meta )
self.Output.meta.dtype = numpy.uint8
def execute(self, slot, subindex, roi, result):
assert slot == self.Output, "Unknown slot: {}".format( slot.name )
radius = self.CrosshairRadius.value
points = map(TinyVector, self.PointList.value)
result[:] = 0
result_view = result.view(vigra.VigraArray)
result_view.axistags = self.Output.meta.axistags
result_3d = result_view.withAxes(*'xyz')
axiskeys = self.Output.meta.getAxisKeys()
roi_start_3d = TinyVector(roi.start)
roi_stop_3d = TinyVector(roi.stop)
try:
roi_start_3d.pop( axiskeys.index('c') )
roi_stop_3d.pop( axiskeys.index('c') )
except ValueError:
pass
try:
roi_start_3d.pop( axiskeys.index('t') )
roi_stop_3d.pop( axiskeys.index('t') )
except ValueError:
pass
for point3d in points:
point3d -= roi_start_3d
cross_min = point3d - radius
cross_max = point3d + radius+1
# If the cross would be entirely out-of-view, skip it.
if (cross_max < [0,0,0]).any() or \
(cross_min >= result_3d.shape).any():
continue
cross_min = numpy.maximum(cross_min, (0,0,0))
cross_max = numpy.minimum(cross_max, result_3d.shape)
x,y,z = point3d
x1,y1,z1 = cross_min
x2,y2,z2 = cross_max
if 0 <= y < result_3d.shape[1] and 0 <= z < result_3d.shape[2]:
result_3d[x1:x2, y, z ] = 1
if 0 <= x < result_3d.shape[0] and 0 <= z < result_3d.shape[2]:
result_3d[x, y1:y2, z ] = 1
if 0 <= x < result_3d.shape[0] and 0 <= y < result_3d.shape[1]:
result_3d[x, y, z1:z2] = 1
return result
def propagateDirty(self, slot, subindex, roi):
if slot == self.PointList or slot == self.CrosshairRadius:
self.Output.setDirty()
else:
assert slot == self.Input, "Unknown slot: {}".format( slot.name )
| stuarteberg/lazyflow | lazyflow/operators/opCrosshairMarkers.py | Python | lgpl-3.0 | 3,883 |
# Copyright (C) 2017 Johnny Vestergaard <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from heralding.capabilities.handlerbase import HandlerBase
from heralding.capabilities.http import Http
logger = logging.getLogger(__name__)
class https(Http, HandlerBase):
"""
This class will get wrapped in SSL. This is possible because we by convention wrap
all capabilities that ends with the letter 's' in SSL."""
| johnnykv/heralding | heralding/capabilities/https.py | Python | gpl-3.0 | 1,044 |
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2020 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
from unittest import TestCase
from pybuilder.plugins.python.vendorize_plugin import ImportTransformer
__author__ = "Arcadiy Ivanov"
class ImportTransformerTests(TestCase):
def test_absolute_imports(self):
self.assertEqual(self.get_transformed_source("""import b
""", "/vendor/a.py", "/vendor", ["a", "b"]), """from . import b
""")
self.assertEqual(self.get_transformed_source("""import b
""", "/vendor/a/__init__.py", "/vendor", ["a", "b"]), """from .. import b
""")
self.assertEqual(self.get_transformed_source("""import b
""", "/vendor/a/x.py", "/vendor", ["a", "b"]), """from .. import b
""")
self.assertEqual(self.get_transformed_source("""import b
""", "/vendor/a/x/__init__.py", "/vendor", ["a", "b"]), """from ... import b
""")
def test_relative_imports(self):
self.assertEqual(self.get_transformed_source("""from b import x
""", "/vendor/a.py", "/vendor", ["a", "b"]), """from .b import x
""")
self.assertEqual(self.get_transformed_source("""from b import x
""", "/vendor/a/__init__.py", "/vendor", ["a", "b"]), """from ..b import x
""")
self.assertEqual(self.get_transformed_source("""from b import x
""", "/vendor/a/x.py", "/vendor", ["a", "b"]), """from ..b import x
""")
self.assertEqual(self.get_transformed_source("""from b import x
""", "/vendor/a/x/__init__.py", "/vendor", ["a", "b"]), """from ...b import x
""")
def get_transformed_source(self, source, source_path, vendorized_path, vendorized_packages):
parsed_ast = ast.parse(source, filename=source_path)
it = ImportTransformer(source_path, source, vendorized_path, vendorized_packages, [])
it.visit(parsed_ast)
return it.transformed_source
| pybuilder/pybuilder | src/unittest/python/plugins/python/vendorize_plugin_tests.py | Python | apache-2.0 | 2,534 |
import importlib
import importlib.abc
import importlib.util
import os
from platform import python_version
import re
import string
import sys
from tkinter import *
import tkinter.simpledialog as tkSimpleDialog
import tkinter.messagebox as tkMessageBox
import traceback
import webbrowser
from idlelib.MultiCall import MultiCallCreator
from idlelib import idlever
from idlelib import WindowList
from idlelib import SearchDialog
from idlelib import GrepDialog
from idlelib import ReplaceDialog
from idlelib import PyParse
from idlelib.configHandler import idleConf
from idlelib import aboutDialog, textView, configDialog
from idlelib import macosxSupport
# The default tab setting for a Text widget, in average-width characters.
TK_TABWIDTH_DEFAULT = 8
def _sphinx_version():
"Format sys.version_info to produce the Sphinx version string used to install the chm docs"
major, minor, micro, level, serial = sys.version_info
release = '%s%s' % (major, minor)
release += '%s' % (micro,)
if level == 'candidate':
release += 'rc%s' % (serial,)
elif level != 'final':
release += '%s%s' % (level[0], serial)
return release
class HelpDialog(object):
def __init__(self):
self.parent = None # parent of help window
self.dlg = None # the help window iteself
def display(self, parent, near=None):
""" Display the help dialog.
parent - parent widget for the help window
near - a Toplevel widget (e.g. EditorWindow or PyShell)
to use as a reference for placing the help window
"""
if self.dlg is None:
self.show_dialog(parent)
if near:
self.nearwindow(near)
def show_dialog(self, parent):
self.parent = parent
fn=os.path.join(os.path.abspath(os.path.dirname(__file__)),'help.txt')
self.dlg = dlg = textView.view_file(parent,'Help',fn, modal=False)
dlg.bind('<Destroy>', self.destroy, '+')
def nearwindow(self, near):
# Place the help dialog near the window specified by parent.
# Note - this may not reposition the window in Metacity
# if "/apps/metacity/general/disable_workarounds" is enabled
dlg = self.dlg
geom = (near.winfo_rootx() + 10, near.winfo_rooty() + 10)
dlg.withdraw()
dlg.geometry("=+%d+%d" % geom)
dlg.deiconify()
dlg.lift()
def destroy(self, ev=None):
self.dlg = None
self.parent = None
helpDialog = HelpDialog() # singleton instance
def _Help_dialog(parent): # wrapper for htest
helpDialog.show_dialog(parent)
class EditorWindow(object):
from idlelib.Percolator import Percolator
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from idlelib.IOBinding import IOBinding, filesystemencoding, encoding
from idlelib import Bindings
from tkinter import Toplevel
from idlelib.MultiStatusBar import MultiStatusBar
help_url = None
def __init__(self, flist=None, filename=None, key=None, root=None):
if EditorWindow.help_url is None:
dochome = os.path.join(sys.base_prefix, 'Doc', 'index.html')
if sys.platform.count('linux'):
# look for html docs in a couple of standard places
pyver = 'python-docs-' + '%s.%s.%s' % sys.version_info[:3]
if os.path.isdir('/var/www/html/python/'): # "python2" rpm
dochome = '/var/www/html/python/index.html'
else:
basepath = '/usr/share/doc/' # standard location
dochome = os.path.join(basepath, pyver,
'Doc', 'index.html')
elif sys.platform[:3] == 'win':
chmfile = os.path.join(sys.base_prefix, 'Doc',
'Python%s.chm' % _sphinx_version())
if os.path.isfile(chmfile):
dochome = chmfile
elif sys.platform == 'darwin':
# documentation may be stored inside a python framework
dochome = os.path.join(sys.base_prefix,
'Resources/English.lproj/Documentation/index.html')
dochome = os.path.normpath(dochome)
if os.path.isfile(dochome):
EditorWindow.help_url = dochome
if sys.platform == 'darwin':
# Safari requires real file:-URLs
EditorWindow.help_url = 'file://' + EditorWindow.help_url
else:
EditorWindow.help_url = "http://docs.python.org/%d.%d" % sys.version_info[:2]
currentTheme=idleConf.CurrentTheme()
self.flist = flist
root = root or flist.root
self.root = root
try:
sys.ps1
except AttributeError:
sys.ps1 = '>>> '
self.menubar = Menu(root)
self.top = top = WindowList.ListedToplevel(root, menu=self.menubar)
if flist:
self.tkinter_vars = flist.vars
#self.top.instance_dict makes flist.inversedict available to
#configDialog.py so it can access all EditorWindow instances
self.top.instance_dict = flist.inversedict
else:
self.tkinter_vars = {} # keys: Tkinter event names
# values: Tkinter variable instances
self.top.instance_dict = {}
self.recent_files_path = os.path.join(idleConf.GetUserCfgDir(),
'recent-files.lst')
self.text_frame = text_frame = Frame(top)
self.vbar = vbar = Scrollbar(text_frame, name='vbar')
self.width = idleConf.GetOption('main', 'EditorWindow',
'width', type='int')
text_options = {
'name': 'text',
'padx': 5,
'wrap': 'none',
'width': self.width,
'height': idleConf.GetOption('main', 'EditorWindow',
'height', type='int')}
if TkVersion >= 8.5:
# Starting with tk 8.5 we have to set the new tabstyle option
# to 'wordprocessor' to achieve the same display of tabs as in
# older tk versions.
text_options['tabstyle'] = 'wordprocessor'
self.text = text = MultiCallCreator(Text)(text_frame, **text_options)
self.top.focused_widget = self.text
self.createmenubar()
self.apply_bindings()
self.top.protocol("WM_DELETE_WINDOW", self.close)
self.top.bind("<<close-window>>", self.close_event)
if macosxSupport.isAquaTk():
# Command-W on editorwindows doesn't work without this.
text.bind('<<close-window>>', self.close_event)
# Some OS X systems have only one mouse button,
# so use control-click for pulldown menus there.
# (Note, AquaTk defines <2> as the right button if
# present and the Tk Text widget already binds <2>.)
text.bind("<Control-Button-1>",self.right_menu_event)
else:
# Elsewhere, use right-click for pulldown menus.
text.bind("<3>",self.right_menu_event)
text.bind("<<cut>>", self.cut)
text.bind("<<copy>>", self.copy)
text.bind("<<paste>>", self.paste)
text.bind("<<center-insert>>", self.center_insert_event)
text.bind("<<help>>", self.help_dialog)
text.bind("<<python-docs>>", self.python_docs)
text.bind("<<about-idle>>", self.about_dialog)
text.bind("<<open-config-dialog>>", self.config_dialog)
text.bind("<<open-module>>", self.open_module)
text.bind("<<do-nothing>>", lambda event: "break")
text.bind("<<select-all>>", self.select_all)
text.bind("<<remove-selection>>", self.remove_selection)
text.bind("<<find>>", self.find_event)
text.bind("<<find-again>>", self.find_again_event)
text.bind("<<find-in-files>>", self.find_in_files_event)
text.bind("<<find-selection>>", self.find_selection_event)
text.bind("<<replace>>", self.replace_event)
text.bind("<<goto-line>>", self.goto_line_event)
text.bind("<<smart-backspace>>",self.smart_backspace_event)
text.bind("<<newline-and-indent>>",self.newline_and_indent_event)
text.bind("<<smart-indent>>",self.smart_indent_event)
text.bind("<<indent-region>>",self.indent_region_event)
text.bind("<<dedent-region>>",self.dedent_region_event)
text.bind("<<comment-region>>",self.comment_region_event)
text.bind("<<uncomment-region>>",self.uncomment_region_event)
text.bind("<<tabify-region>>",self.tabify_region_event)
text.bind("<<untabify-region>>",self.untabify_region_event)
text.bind("<<toggle-tabs>>",self.toggle_tabs_event)
text.bind("<<change-indentwidth>>",self.change_indentwidth_event)
text.bind("<Left>", self.move_at_edge_if_selection(0))
text.bind("<Right>", self.move_at_edge_if_selection(1))
text.bind("<<del-word-left>>", self.del_word_left)
text.bind("<<del-word-right>>", self.del_word_right)
text.bind("<<beginning-of-line>>", self.home_callback)
if flist:
flist.inversedict[self] = key
if key:
flist.dict[key] = self
text.bind("<<open-new-window>>", self.new_callback)
text.bind("<<close-all-windows>>", self.flist.close_all_callback)
text.bind("<<open-class-browser>>", self.open_class_browser)
text.bind("<<open-path-browser>>", self.open_path_browser)
self.set_status_bar()
vbar['command'] = text.yview
vbar.pack(side=RIGHT, fill=Y)
text['yscrollcommand'] = vbar.set
fontWeight = 'normal'
if idleConf.GetOption('main', 'EditorWindow', 'font-bold', type='bool'):
fontWeight='bold'
text.config(font=(idleConf.GetOption('main', 'EditorWindow', 'font'),
idleConf.GetOption('main', 'EditorWindow',
'font-size', type='int'),
fontWeight))
text_frame.pack(side=LEFT, fill=BOTH, expand=1)
text.pack(side=TOP, fill=BOTH, expand=1)
text.focus_set()
# usetabs true -> literal tab characters are used by indent and
# dedent cmds, possibly mixed with spaces if
# indentwidth is not a multiple of tabwidth,
# which will cause Tabnanny to nag!
# false -> tab characters are converted to spaces by indent
# and dedent cmds, and ditto TAB keystrokes
# Although use-spaces=0 can be configured manually in config-main.def,
# configuration of tabs v. spaces is not supported in the configuration
# dialog. IDLE promotes the preferred Python indentation: use spaces!
usespaces = idleConf.GetOption('main', 'Indent',
'use-spaces', type='bool')
self.usetabs = not usespaces
# tabwidth is the display width of a literal tab character.
# CAUTION: telling Tk to use anything other than its default
# tab setting causes it to use an entirely different tabbing algorithm,
# treating tab stops as fixed distances from the left margin.
# Nobody expects this, so for now tabwidth should never be changed.
self.tabwidth = 8 # must remain 8 until Tk is fixed.
# indentwidth is the number of screen characters per indent level.
# The recommended Python indentation is four spaces.
self.indentwidth = self.tabwidth
self.set_notabs_indentwidth()
# If context_use_ps1 is true, parsing searches back for a ps1 line;
# else searches for a popular (if, def, ...) Python stmt.
self.context_use_ps1 = False
# When searching backwards for a reliable place to begin parsing,
# first start num_context_lines[0] lines back, then
# num_context_lines[1] lines back if that didn't work, and so on.
# The last value should be huge (larger than the # of lines in a
# conceivable file).
# Making the initial values larger slows things down more often.
self.num_context_lines = 50, 500, 5000000
self.per = per = self.Percolator(text)
self.undo = undo = self.UndoDelegator()
per.insertfilter(undo)
text.undo_block_start = undo.undo_block_start
text.undo_block_stop = undo.undo_block_stop
undo.set_saved_change_hook(self.saved_change_hook)
# IOBinding implements file I/O and printing functionality
self.io = io = self.IOBinding(self)
io.set_filename_change_hook(self.filename_change_hook)
self.good_load = False
self.set_indentation_params(False)
self.color = None # initialized below in self.ResetColorizer
if filename:
if os.path.exists(filename) and not os.path.isdir(filename):
if io.loadfile(filename):
self.good_load = True
is_py_src = self.ispythonsource(filename)
self.set_indentation_params(is_py_src)
else:
io.set_filename(filename)
self.good_load = True
self.ResetColorizer()
self.saved_change_hook()
self.update_recent_files_list()
self.load_extensions()
menu = self.menudict.get('windows')
if menu:
end = menu.index("end")
if end is None:
end = -1
if end >= 0:
menu.add_separator()
end = end + 1
self.wmenu_end = end
WindowList.register_callback(self.postwindowsmenu)
# Some abstractions so IDLE extensions are cross-IDE
self.askyesno = tkMessageBox.askyesno
self.askinteger = tkSimpleDialog.askinteger
self.showerror = tkMessageBox.showerror
self._highlight_workaround() # Fix selection tags on Windows
def _highlight_workaround(self):
# On Windows, Tk removes painting of the selection
# tags which is different behavior than on Linux and Mac.
# See issue14146 for more information.
if not sys.platform.startswith('win'):
return
text = self.text
text.event_add("<<Highlight-FocusOut>>", "<FocusOut>")
text.event_add("<<Highlight-FocusIn>>", "<FocusIn>")
def highlight_fix(focus):
sel_range = text.tag_ranges("sel")
if sel_range:
if focus == 'out':
HILITE_CONFIG = idleConf.GetHighlight(
idleConf.CurrentTheme(), 'hilite')
text.tag_config("sel_fix", HILITE_CONFIG)
text.tag_raise("sel_fix")
text.tag_add("sel_fix", *sel_range)
elif focus == 'in':
text.tag_remove("sel_fix", "1.0", "end")
text.bind("<<Highlight-FocusOut>>",
lambda ev: highlight_fix("out"))
text.bind("<<Highlight-FocusIn>>",
lambda ev: highlight_fix("in"))
def _filename_to_unicode(self, filename):
"""convert filename to unicode in order to display it in Tk"""
if isinstance(filename, str) or not filename:
return filename
else:
try:
return filename.decode(self.filesystemencoding)
except UnicodeDecodeError:
# XXX
try:
return filename.decode(self.encoding)
except UnicodeDecodeError:
# byte-to-byte conversion
return filename.decode('iso8859-1')
def new_callback(self, event):
dirname, basename = self.io.defaultfilename()
self.flist.new(dirname)
return "break"
def home_callback(self, event):
if (event.state & 4) != 0 and event.keysym == "Home":
# state&4==Control. If <Control-Home>, use the Tk binding.
return
if self.text.index("iomark") and \
self.text.compare("iomark", "<=", "insert lineend") and \
self.text.compare("insert linestart", "<=", "iomark"):
# In Shell on input line, go to just after prompt
insertpt = int(self.text.index("iomark").split(".")[1])
else:
line = self.text.get("insert linestart", "insert lineend")
for insertpt in range(len(line)):
if line[insertpt] not in (' ','\t'):
break
else:
insertpt=len(line)
lineat = int(self.text.index("insert").split('.')[1])
if insertpt == lineat:
insertpt = 0
dest = "insert linestart+"+str(insertpt)+"c"
if (event.state&1) == 0:
# shift was not pressed
self.text.tag_remove("sel", "1.0", "end")
else:
if not self.text.index("sel.first"):
# there was no previous selection
self.text.mark_set("my_anchor", "insert")
else:
if self.text.compare(self.text.index("sel.first"), "<",
self.text.index("insert")):
self.text.mark_set("my_anchor", "sel.first") # extend back
else:
self.text.mark_set("my_anchor", "sel.last") # extend forward
first = self.text.index(dest)
last = self.text.index("my_anchor")
if self.text.compare(first,">",last):
first,last = last,first
self.text.tag_remove("sel", "1.0", "end")
self.text.tag_add("sel", first, last)
self.text.mark_set("insert", dest)
self.text.see("insert")
return "break"
def set_status_bar(self):
self.status_bar = self.MultiStatusBar(self.top)
if sys.platform == "darwin":
# Insert some padding to avoid obscuring some of the statusbar
# by the resize widget.
self.status_bar.set_label('_padding1', ' ', side=RIGHT)
self.status_bar.set_label('column', 'Col: ?', side=RIGHT)
self.status_bar.set_label('line', 'Ln: ?', side=RIGHT)
self.status_bar.pack(side=BOTTOM, fill=X)
self.text.bind("<<set-line-and-column>>", self.set_line_and_column)
self.text.event_add("<<set-line-and-column>>",
"<KeyRelease>", "<ButtonRelease>")
self.text.after_idle(self.set_line_and_column)
def set_line_and_column(self, event=None):
line, column = self.text.index(INSERT).split('.')
self.status_bar.set_label('column', 'Col: %s' % column)
self.status_bar.set_label('line', 'Ln: %s' % line)
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("format", "F_ormat"),
("run", "_Run"),
("options", "_Options"),
("windows", "_Windows"),
("help", "_Help"),
]
if sys.platform == "darwin":
menu_specs[-2] = ("windows", "_Window")
def createmenubar(self):
mbar = self.menubar
self.menudict = menudict = {}
for name, label in self.menu_specs:
underline, label = prepstr(label)
menudict[name] = menu = Menu(mbar, name=name)
mbar.add_cascade(label=label, menu=menu, underline=underline)
if macosxSupport.isCarbonTk():
# Insert the application menu
menudict['application'] = menu = Menu(mbar, name='apple')
mbar.add_cascade(label='IDLE', menu=menu)
self.fill_menus()
self.recent_files_menu = Menu(self.menubar)
self.menudict['file'].insert_cascade(3, label='Recent Files',
underline=0,
menu=self.recent_files_menu)
self.base_helpmenu_length = self.menudict['help'].index(END)
self.reset_help_menu_entries()
def postwindowsmenu(self):
# Only called when Windows menu exists
menu = self.menudict['windows']
end = menu.index("end")
if end is None:
end = -1
if end > self.wmenu_end:
menu.delete(self.wmenu_end+1, end)
WindowList.add_windows_to_menu(menu)
rmenu = None
def right_menu_event(self, event):
self.text.mark_set("insert", "@%d,%d" % (event.x, event.y))
if not self.rmenu:
self.make_rmenu()
rmenu = self.rmenu
self.event = event
iswin = sys.platform[:3] == 'win'
if iswin:
self.text.config(cursor="arrow")
for item in self.rmenu_specs:
try:
label, eventname, verify_state = item
except ValueError: # see issue1207589
continue
if verify_state is None:
continue
state = getattr(self, verify_state)()
rmenu.entryconfigure(label, state=state)
rmenu.tk_popup(event.x_root, event.y_root)
if iswin:
self.text.config(cursor="ibeam")
rmenu_specs = [
# ("Label", "<<virtual-event>>", "statefuncname"), ...
("Close", "<<close-window>>", None), # Example
]
def make_rmenu(self):
rmenu = Menu(self.text, tearoff=0)
for item in self.rmenu_specs:
label, eventname = item[0], item[1]
if label is not None:
def command(text=self.text, eventname=eventname):
text.event_generate(eventname)
rmenu.add_command(label=label, command=command)
else:
rmenu.add_separator()
self.rmenu = rmenu
def rmenu_check_cut(self):
return self.rmenu_check_copy()
def rmenu_check_copy(self):
try:
indx = self.text.index('sel.first')
except TclError:
return 'disabled'
else:
return 'normal' if indx else 'disabled'
def rmenu_check_paste(self):
try:
self.text.tk.call('tk::GetSelection', self.text, 'CLIPBOARD')
except TclError:
return 'disabled'
else:
return 'normal'
def about_dialog(self, event=None):
aboutDialog.AboutDialog(self.top,'About IDLE')
def config_dialog(self, event=None):
configDialog.ConfigDialog(self.top,'Settings')
def help_dialog(self, event=None):
if self.root:
parent = self.root
else:
parent = self.top
helpDialog.display(parent, near=self.top)
def python_docs(self, event=None):
if sys.platform[:3] == 'win':
try:
os.startfile(self.help_url)
except OSError as why:
tkMessageBox.showerror(title='Document Start Failure',
message=str(why), parent=self.text)
else:
webbrowser.open(self.help_url)
return "break"
def cut(self,event):
self.text.event_generate("<<Cut>>")
return "break"
def copy(self,event):
if not self.text.tag_ranges("sel"):
# There is no selection, so do nothing and maybe interrupt.
return
self.text.event_generate("<<Copy>>")
return "break"
def paste(self,event):
self.text.event_generate("<<Paste>>")
self.text.see("insert")
return "break"
def select_all(self, event=None):
self.text.tag_add("sel", "1.0", "end-1c")
self.text.mark_set("insert", "1.0")
self.text.see("insert")
return "break"
def remove_selection(self, event=None):
self.text.tag_remove("sel", "1.0", "end")
self.text.see("insert")
def move_at_edge_if_selection(self, edge_index):
"""Cursor move begins at start or end of selection
When a left/right cursor key is pressed create and return to Tkinter a
function which causes a cursor move from the associated edge of the
selection.
"""
self_text_index = self.text.index
self_text_mark_set = self.text.mark_set
edges_table = ("sel.first+1c", "sel.last-1c")
def move_at_edge(event):
if (event.state & 5) == 0: # no shift(==1) or control(==4) pressed
try:
self_text_index("sel.first")
self_text_mark_set("insert", edges_table[edge_index])
except TclError:
pass
return move_at_edge
def del_word_left(self, event):
self.text.event_generate('<Meta-Delete>')
return "break"
def del_word_right(self, event):
self.text.event_generate('<Meta-d>')
return "break"
def find_event(self, event):
SearchDialog.find(self.text)
return "break"
def find_again_event(self, event):
SearchDialog.find_again(self.text)
return "break"
def find_selection_event(self, event):
SearchDialog.find_selection(self.text)
return "break"
def find_in_files_event(self, event):
GrepDialog.grep(self.text, self.io, self.flist)
return "break"
def replace_event(self, event):
ReplaceDialog.replace(self.text)
return "break"
def goto_line_event(self, event):
text = self.text
lineno = tkSimpleDialog.askinteger("Goto",
"Go to line number:",parent=text)
if lineno is None:
return "break"
if lineno <= 0:
text.bell()
return "break"
text.mark_set("insert", "%d.0" % lineno)
text.see("insert")
def open_module(self, event=None):
# XXX Shouldn't this be in IOBinding?
try:
name = self.text.get("sel.first", "sel.last")
except TclError:
name = ""
else:
name = name.strip()
name = tkSimpleDialog.askstring("Module",
"Enter the name of a Python module\n"
"to search on sys.path and open:",
parent=self.text, initialvalue=name)
if name:
name = name.strip()
if not name:
return
# XXX Ought to insert current file's directory in front of path
try:
spec = importlib.util.find_spec(name)
except (ValueError, ImportError) as msg:
tkMessageBox.showerror("Import error", str(msg), parent=self.text)
return
if spec is None:
tkMessageBox.showerror("Import error", "module not found",
parent=self.text)
return
if not isinstance(spec.loader, importlib.abc.SourceLoader):
tkMessageBox.showerror("Import error", "not a source-based module",
parent=self.text)
return
try:
file_path = spec.loader.get_filename(name)
except AttributeError:
tkMessageBox.showerror("Import error",
"loader does not support get_filename",
parent=self.text)
return
if self.flist:
self.flist.open(file_path)
else:
self.io.loadfile(file_path)
def open_class_browser(self, event=None):
filename = self.io.filename
if not filename:
tkMessageBox.showerror(
"No filename",
"This buffer has no associated filename",
master=self.text)
self.text.focus_set()
return None
head, tail = os.path.split(filename)
base, ext = os.path.splitext(tail)
from idlelib import ClassBrowser
ClassBrowser.ClassBrowser(self.flist, base, [head])
def open_path_browser(self, event=None):
from idlelib import PathBrowser
PathBrowser.PathBrowser(self.flist)
def gotoline(self, lineno):
if lineno is not None and lineno > 0:
self.text.mark_set("insert", "%d.0" % lineno)
self.text.tag_remove("sel", "1.0", "end")
self.text.tag_add("sel", "insert", "insert +1l")
self.center()
def ispythonsource(self, filename):
if not filename or os.path.isdir(filename):
return True
base, ext = os.path.splitext(os.path.basename(filename))
if os.path.normcase(ext) in (".py", ".pyw"):
return True
line = self.text.get('1.0', '1.0 lineend')
return line.startswith('#!') and 'python' in line
def close_hook(self):
if self.flist:
self.flist.unregister_maybe_terminate(self)
self.flist = None
def set_close_hook(self, close_hook):
self.close_hook = close_hook
def filename_change_hook(self):
if self.flist:
self.flist.filename_changed_edit(self)
self.saved_change_hook()
self.top.update_windowlist_registry(self)
self.ResetColorizer()
def _addcolorizer(self):
if self.color:
return
if self.ispythonsource(self.io.filename):
self.color = self.ColorDelegator()
# can add more colorizers here...
if self.color:
self.per.removefilter(self.undo)
self.per.insertfilter(self.color)
self.per.insertfilter(self.undo)
def _rmcolorizer(self):
if not self.color:
return
self.color.removecolors()
self.per.removefilter(self.color)
self.color = None
def ResetColorizer(self):
"Update the colour theme"
# Called from self.filename_change_hook and from configDialog.py
self._rmcolorizer()
self._addcolorizer()
theme = idleConf.GetOption('main','Theme','name')
normal_colors = idleConf.GetHighlight(theme, 'normal')
cursor_color = idleConf.GetHighlight(theme, 'cursor', fgBg='fg')
select_colors = idleConf.GetHighlight(theme, 'hilite')
self.text.config(
foreground=normal_colors['foreground'],
background=normal_colors['background'],
insertbackground=cursor_color,
selectforeground=select_colors['foreground'],
selectbackground=select_colors['background'],
)
IDENTCHARS = string.ascii_letters + string.digits + "_"
def colorize_syntax_error(self, text, pos):
text.tag_add("ERROR", pos)
char = text.get(pos)
if char and char in self.IDENTCHARS:
text.tag_add("ERROR", pos + " wordstart", pos)
if '\n' == text.get(pos): # error at line end
text.mark_set("insert", pos)
else:
text.mark_set("insert", pos + "+1c")
text.see(pos)
def ResetFont(self):
"Update the text widgets' font if it is changed"
# Called from configDialog.py
fontWeight='normal'
if idleConf.GetOption('main','EditorWindow','font-bold',type='bool'):
fontWeight='bold'
self.text.config(font=(idleConf.GetOption('main','EditorWindow','font'),
idleConf.GetOption('main','EditorWindow','font-size',
type='int'),
fontWeight))
def RemoveKeybindings(self):
"Remove the keybindings before they are changed."
# Called from configDialog.py
self.Bindings.default_keydefs = keydefs = idleConf.GetCurrentKeySet()
for event, keylist in keydefs.items():
self.text.event_delete(event, *keylist)
for extensionName in self.get_standard_extension_names():
xkeydefs = idleConf.GetExtensionBindings(extensionName)
if xkeydefs:
for event, keylist in xkeydefs.items():
self.text.event_delete(event, *keylist)
def ApplyKeybindings(self):
"Update the keybindings after they are changed"
# Called from configDialog.py
self.Bindings.default_keydefs = keydefs = idleConf.GetCurrentKeySet()
self.apply_bindings()
for extensionName in self.get_standard_extension_names():
xkeydefs = idleConf.GetExtensionBindings(extensionName)
if xkeydefs:
self.apply_bindings(xkeydefs)
#update menu accelerators
menuEventDict = {}
for menu in self.Bindings.menudefs:
menuEventDict[menu[0]] = {}
for item in menu[1]:
if item:
menuEventDict[menu[0]][prepstr(item[0])[1]] = item[1]
for menubarItem in self.menudict:
menu = self.menudict[menubarItem]
end = menu.index(END)
if end is None:
# Skip empty menus
continue
end += 1
for index in range(0, end):
if menu.type(index) == 'command':
accel = menu.entrycget(index, 'accelerator')
if accel:
itemName = menu.entrycget(index, 'label')
event = ''
if menubarItem in menuEventDict:
if itemName in menuEventDict[menubarItem]:
event = menuEventDict[menubarItem][itemName]
if event:
accel = get_accelerator(keydefs, event)
menu.entryconfig(index, accelerator=accel)
def set_notabs_indentwidth(self):
"Update the indentwidth if changed and not using tabs in this window"
# Called from configDialog.py
if not self.usetabs:
self.indentwidth = idleConf.GetOption('main', 'Indent','num-spaces',
type='int')
def reset_help_menu_entries(self):
"Update the additional help entries on the Help menu"
help_list = idleConf.GetAllExtraHelpSourcesList()
helpmenu = self.menudict['help']
# first delete the extra help entries, if any
helpmenu_length = helpmenu.index(END)
if helpmenu_length > self.base_helpmenu_length:
helpmenu.delete((self.base_helpmenu_length + 1), helpmenu_length)
# then rebuild them
if help_list:
helpmenu.add_separator()
for entry in help_list:
cmd = self.__extra_help_callback(entry[1])
helpmenu.add_command(label=entry[0], command=cmd)
# and update the menu dictionary
self.menudict['help'] = helpmenu
def __extra_help_callback(self, helpfile):
"Create a callback with the helpfile value frozen at definition time"
def display_extra_help(helpfile=helpfile):
if not helpfile.startswith(('www', 'http')):
helpfile = os.path.normpath(helpfile)
if sys.platform[:3] == 'win':
try:
os.startfile(helpfile)
except OSError as why:
tkMessageBox.showerror(title='Document Start Failure',
message=str(why), parent=self.text)
else:
webbrowser.open(helpfile)
return display_extra_help
def update_recent_files_list(self, new_file=None):
"Load and update the recent files list and menus"
rf_list = []
if os.path.exists(self.recent_files_path):
with open(self.recent_files_path, 'r',
encoding='utf_8', errors='replace') as rf_list_file:
rf_list = rf_list_file.readlines()
if new_file:
new_file = os.path.abspath(new_file) + '\n'
if new_file in rf_list:
rf_list.remove(new_file) # move to top
rf_list.insert(0, new_file)
# clean and save the recent files list
bad_paths = []
for path in rf_list:
if '\0' in path or not os.path.exists(path[0:-1]):
bad_paths.append(path)
rf_list = [path for path in rf_list if path not in bad_paths]
ulchars = "1234567890ABCDEFGHIJK"
rf_list = rf_list[0:len(ulchars)]
try:
with open(self.recent_files_path, 'w',
encoding='utf_8', errors='replace') as rf_file:
rf_file.writelines(rf_list)
except OSError as err:
if not getattr(self.root, "recentfilelist_error_displayed", False):
self.root.recentfilelist_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update Recent Files list:\n%s'
% str(err),
parent=self.text)
# for each edit window instance, construct the recent files menu
for instance in self.top.instance_dict:
menu = instance.recent_files_menu
menu.delete(0, END) # clear, and rebuild:
for i, file_name in enumerate(rf_list):
file_name = file_name.rstrip() # zap \n
# make unicode string to display non-ASCII chars correctly
ufile_name = self._filename_to_unicode(file_name)
callback = instance.__recent_file_callback(file_name)
menu.add_command(label=ulchars[i] + " " + ufile_name,
command=callback,
underline=0)
def __recent_file_callback(self, file_name):
def open_recent_file(fn_closure=file_name):
self.io.open(editFile=fn_closure)
return open_recent_file
def saved_change_hook(self):
short = self.short_title()
long = self.long_title()
if short and long:
title = short + " - " + long
elif short:
title = short
elif long:
title = long
else:
title = "Untitled"
icon = short or long or title
if not self.get_saved():
title = "*%s*" % title
icon = "*%s" % icon
self.top.wm_title(title)
self.top.wm_iconname(icon)
def get_saved(self):
return self.undo.get_saved()
def set_saved(self, flag):
self.undo.set_saved(flag)
def reset_undo(self):
self.undo.reset_undo()
def short_title(self):
pyversion = "Python " + python_version() + ": "
filename = self.io.filename
if filename:
filename = os.path.basename(filename)
else:
filename = "Untitled"
# return unicode string to display non-ASCII chars correctly
return pyversion + self._filename_to_unicode(filename)
def long_title(self):
# return unicode string to display non-ASCII chars correctly
return self._filename_to_unicode(self.io.filename or "")
def center_insert_event(self, event):
self.center()
def center(self, mark="insert"):
text = self.text
top, bot = self.getwindowlines()
lineno = self.getlineno(mark)
height = bot - top
newtop = max(1, lineno - height//2)
text.yview(float(newtop))
def getwindowlines(self):
text = self.text
top = self.getlineno("@0,0")
bot = self.getlineno("@0,65535")
if top == bot and text.winfo_height() == 1:
# Geometry manager hasn't run yet
height = int(text['height'])
bot = top + height - 1
return top, bot
def getlineno(self, mark="insert"):
text = self.text
return int(float(text.index(mark)))
def get_geometry(self):
"Return (width, height, x, y)"
geom = self.top.wm_geometry()
m = re.match(r"(\d+)x(\d+)\+(-?\d+)\+(-?\d+)", geom)
return list(map(int, m.groups()))
def close_event(self, event):
self.close()
def maybesave(self):
if self.io:
if not self.get_saved():
if self.top.state()!='normal':
self.top.deiconify()
self.top.lower()
self.top.lift()
return self.io.maybesave()
def close(self):
reply = self.maybesave()
if str(reply) != "cancel":
self._close()
return reply
def _close(self):
if self.io.filename:
self.update_recent_files_list(new_file=self.io.filename)
WindowList.unregister_callback(self.postwindowsmenu)
self.unload_extensions()
self.io.close()
self.io = None
self.undo = None
if self.color:
self.color.close(False)
self.color = None
self.text = None
self.tkinter_vars = None
self.per.close()
self.per = None
self.top.destroy()
if self.close_hook:
# unless override: unregister from flist, terminate if last window
self.close_hook()
def load_extensions(self):
self.extensions = {}
self.load_standard_extensions()
def unload_extensions(self):
for ins in list(self.extensions.values()):
if hasattr(ins, "close"):
ins.close()
self.extensions = {}
def load_standard_extensions(self):
for name in self.get_standard_extension_names():
try:
self.load_extension(name)
except:
print("Failed to load extension", repr(name))
traceback.print_exc()
def get_standard_extension_names(self):
return idleConf.GetExtensions(editor_only=True)
def load_extension(self, name):
try:
try:
mod = importlib.import_module('.' + name, package=__package__)
except (ImportError, TypeError):
mod = importlib.import_module(name)
except ImportError:
print("\nFailed to import extension: ", name)
raise
cls = getattr(mod, name)
keydefs = idleConf.GetExtensionBindings(name)
if hasattr(cls, "menudefs"):
self.fill_menus(cls.menudefs, keydefs)
ins = cls(self)
self.extensions[name] = ins
if keydefs:
self.apply_bindings(keydefs)
for vevent in keydefs:
methodname = vevent.replace("-", "_")
while methodname[:1] == '<':
methodname = methodname[1:]
while methodname[-1:] == '>':
methodname = methodname[:-1]
methodname = methodname + "_event"
if hasattr(ins, methodname):
self.text.bind(vevent, getattr(ins, methodname))
def apply_bindings(self, keydefs=None):
if keydefs is None:
keydefs = self.Bindings.default_keydefs
text = self.text
text.keydefs = keydefs
for event, keylist in keydefs.items():
if keylist:
text.event_add(event, *keylist)
def fill_menus(self, menudefs=None, keydefs=None):
"""Add appropriate entries to the menus and submenus
Menus that are absent or None in self.menudict are ignored.
"""
if menudefs is None:
menudefs = self.Bindings.menudefs
if keydefs is None:
keydefs = self.Bindings.default_keydefs
menudict = self.menudict
text = self.text
for mname, entrylist in menudefs:
menu = menudict.get(mname)
if not menu:
continue
for entry in entrylist:
if not entry:
menu.add_separator()
else:
label, eventname = entry
checkbutton = (label[:1] == '!')
if checkbutton:
label = label[1:]
underline, label = prepstr(label)
accelerator = get_accelerator(keydefs, eventname)
def command(text=text, eventname=eventname):
text.event_generate(eventname)
if checkbutton:
var = self.get_var_obj(eventname, BooleanVar)
menu.add_checkbutton(label=label, underline=underline,
command=command, accelerator=accelerator,
variable=var)
else:
menu.add_command(label=label, underline=underline,
command=command,
accelerator=accelerator)
def getvar(self, name):
var = self.get_var_obj(name)
if var:
value = var.get()
return value
else:
raise NameError(name)
def setvar(self, name, value, vartype=None):
var = self.get_var_obj(name, vartype)
if var:
var.set(value)
else:
raise NameError(name)
def get_var_obj(self, name, vartype=None):
var = self.tkinter_vars.get(name)
if not var and vartype:
# create a Tkinter variable object with self.text as master:
self.tkinter_vars[name] = var = vartype(self.text)
return var
# Tk implementations of "virtual text methods" -- each platform
# reusing IDLE's support code needs to define these for its GUI's
# flavor of widget.
# Is character at text_index in a Python string? Return 0 for
# "guaranteed no", true for anything else. This info is expensive
# to compute ab initio, but is probably already known by the
# platform's colorizer.
def is_char_in_string(self, text_index):
if self.color:
# Return true iff colorizer hasn't (re)gotten this far
# yet, or the character is tagged as being in a string
return self.text.tag_prevrange("TODO", text_index) or \
"STRING" in self.text.tag_names(text_index)
else:
# The colorizer is missing: assume the worst
return 1
# If a selection is defined in the text widget, return (start,
# end) as Tkinter text indices, otherwise return (None, None)
def get_selection_indices(self):
try:
first = self.text.index("sel.first")
last = self.text.index("sel.last")
return first, last
except TclError:
return None, None
# Return the text widget's current view of what a tab stop means
# (equivalent width in spaces).
def get_tk_tabwidth(self):
current = self.text['tabs'] or TK_TABWIDTH_DEFAULT
return int(current)
# Set the text widget's current view of what a tab stop means.
def set_tk_tabwidth(self, newtabwidth):
text = self.text
if self.get_tk_tabwidth() != newtabwidth:
# Set text widget tab width
pixels = text.tk.call("font", "measure", text["font"],
"-displayof", text.master,
"n" * newtabwidth)
text.configure(tabs=pixels)
### begin autoindent code ### (configuration was moved to beginning of class)
def set_indentation_params(self, is_py_src, guess=True):
if is_py_src and guess:
i = self.guess_indent()
if 2 <= i <= 8:
self.indentwidth = i
if self.indentwidth != self.tabwidth:
self.usetabs = False
self.set_tk_tabwidth(self.tabwidth)
def smart_backspace_event(self, event):
text = self.text
first, last = self.get_selection_indices()
if first and last:
text.delete(first, last)
text.mark_set("insert", first)
return "break"
# Delete whitespace left, until hitting a real char or closest
# preceding virtual tab stop.
chars = text.get("insert linestart", "insert")
if chars == '':
if text.compare("insert", ">", "1.0"):
# easy: delete preceding newline
text.delete("insert-1c")
else:
text.bell() # at start of buffer
return "break"
if chars[-1] not in " \t":
# easy: delete preceding real char
text.delete("insert-1c")
return "break"
# Ick. It may require *inserting* spaces if we back up over a
# tab character! This is written to be clear, not fast.
tabwidth = self.tabwidth
have = len(chars.expandtabs(tabwidth))
assert have > 0
want = ((have - 1) // self.indentwidth) * self.indentwidth
# Debug prompt is multilined....
if self.context_use_ps1:
last_line_of_prompt = sys.ps1.split('\n')[-1]
else:
last_line_of_prompt = ''
ncharsdeleted = 0
while 1:
if chars == last_line_of_prompt:
break
chars = chars[:-1]
ncharsdeleted = ncharsdeleted + 1
have = len(chars.expandtabs(tabwidth))
if have <= want or chars[-1] not in " \t":
break
text.undo_block_start()
text.delete("insert-%dc" % ncharsdeleted, "insert")
if have < want:
text.insert("insert", ' ' * (want - have))
text.undo_block_stop()
return "break"
def smart_indent_event(self, event):
# if intraline selection:
# delete it
# elif multiline selection:
# do indent-region
# else:
# indent one level
text = self.text
first, last = self.get_selection_indices()
text.undo_block_start()
try:
if first and last:
if index2line(first) != index2line(last):
return self.indent_region_event(event)
text.delete(first, last)
text.mark_set("insert", first)
prefix = text.get("insert linestart", "insert")
raw, effective = classifyws(prefix, self.tabwidth)
if raw == len(prefix):
# only whitespace to the left
self.reindent_to(effective + self.indentwidth)
else:
# tab to the next 'stop' within or to right of line's text:
if self.usetabs:
pad = '\t'
else:
effective = len(prefix.expandtabs(self.tabwidth))
n = self.indentwidth
pad = ' ' * (n - effective % n)
text.insert("insert", pad)
text.see("insert")
return "break"
finally:
text.undo_block_stop()
def newline_and_indent_event(self, event):
text = self.text
first, last = self.get_selection_indices()
text.undo_block_start()
try:
if first and last:
text.delete(first, last)
text.mark_set("insert", first)
line = text.get("insert linestart", "insert")
i, n = 0, len(line)
while i < n and line[i] in " \t":
i = i+1
if i == n:
# the cursor is in or at leading indentation in a continuation
# line; just inject an empty line at the start
text.insert("insert linestart", '\n')
return "break"
indent = line[:i]
# strip whitespace before insert point unless it's in the prompt
i = 0
last_line_of_prompt = sys.ps1.split('\n')[-1]
while line and line[-1] in " \t" and line != last_line_of_prompt:
line = line[:-1]
i = i+1
if i:
text.delete("insert - %d chars" % i, "insert")
# strip whitespace after insert point
while text.get("insert") in " \t":
text.delete("insert")
# start new line
text.insert("insert", '\n')
# adjust indentation for continuations and block
# open/close first need to find the last stmt
lno = index2line(text.index('insert'))
y = PyParse.Parser(self.indentwidth, self.tabwidth)
if not self.context_use_ps1:
for context in self.num_context_lines:
startat = max(lno - context, 1)
startatindex = repr(startat) + ".0"
rawtext = text.get(startatindex, "insert")
y.set_str(rawtext)
bod = y.find_good_parse_start(
self.context_use_ps1,
self._build_char_in_string_func(startatindex))
if bod is not None or startat == 1:
break
y.set_lo(bod or 0)
else:
r = text.tag_prevrange("console", "insert")
if r:
startatindex = r[1]
else:
startatindex = "1.0"
rawtext = text.get(startatindex, "insert")
y.set_str(rawtext)
y.set_lo(0)
c = y.get_continuation_type()
if c != PyParse.C_NONE:
# The current stmt hasn't ended yet.
if c == PyParse.C_STRING_FIRST_LINE:
# after the first line of a string; do not indent at all
pass
elif c == PyParse.C_STRING_NEXT_LINES:
# inside a string which started before this line;
# just mimic the current indent
text.insert("insert", indent)
elif c == PyParse.C_BRACKET:
# line up with the first (if any) element of the
# last open bracket structure; else indent one
# level beyond the indent of the line with the
# last open bracket
self.reindent_to(y.compute_bracket_indent())
elif c == PyParse.C_BACKSLASH:
# if more than one line in this stmt already, just
# mimic the current indent; else if initial line
# has a start on an assignment stmt, indent to
# beyond leftmost =; else to beyond first chunk of
# non-whitespace on initial line
if y.get_num_lines_in_stmt() > 1:
text.insert("insert", indent)
else:
self.reindent_to(y.compute_backslash_indent())
else:
assert 0, "bogus continuation type %r" % (c,)
return "break"
# This line starts a brand new stmt; indent relative to
# indentation of initial line of closest preceding
# interesting stmt.
indent = y.get_base_indent_string()
text.insert("insert", indent)
if y.is_block_opener():
self.smart_indent_event(event)
elif indent and y.is_block_closer():
self.smart_backspace_event(event)
return "break"
finally:
text.see("insert")
text.undo_block_stop()
# Our editwin provides a is_char_in_string function that works
# with a Tk text index, but PyParse only knows about offsets into
# a string. This builds a function for PyParse that accepts an
# offset.
def _build_char_in_string_func(self, startindex):
def inner(offset, _startindex=startindex,
_icis=self.is_char_in_string):
return _icis(_startindex + "+%dc" % offset)
return inner
def indent_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, self.tabwidth)
effective = effective + self.indentwidth
lines[pos] = self._make_blanks(effective) + line[raw:]
self.set_region(head, tail, chars, lines)
return "break"
def dedent_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, self.tabwidth)
effective = max(effective - self.indentwidth, 0)
lines[pos] = self._make_blanks(effective) + line[raw:]
self.set_region(head, tail, chars, lines)
return "break"
def comment_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines) - 1):
line = lines[pos]
lines[pos] = '##' + line
self.set_region(head, tail, chars, lines)
def uncomment_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if not line:
continue
if line[:2] == '##':
line = line[2:]
elif line[:1] == '#':
line = line[1:]
lines[pos] = line
self.set_region(head, tail, chars, lines)
def tabify_region_event(self, event):
head, tail, chars, lines = self.get_region()
tabwidth = self._asktabwidth()
if tabwidth is None: return
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, tabwidth)
ntabs, nspaces = divmod(effective, tabwidth)
lines[pos] = '\t' * ntabs + ' ' * nspaces + line[raw:]
self.set_region(head, tail, chars, lines)
def untabify_region_event(self, event):
head, tail, chars, lines = self.get_region()
tabwidth = self._asktabwidth()
if tabwidth is None: return
for pos in range(len(lines)):
lines[pos] = lines[pos].expandtabs(tabwidth)
self.set_region(head, tail, chars, lines)
def toggle_tabs_event(self, event):
if self.askyesno(
"Toggle tabs",
"Turn tabs " + ("on", "off")[self.usetabs] +
"?\nIndent width " +
("will be", "remains at")[self.usetabs] + " 8." +
"\n Note: a tab is always 8 columns",
parent=self.text):
self.usetabs = not self.usetabs
# Try to prevent inconsistent indentation.
# User must change indent width manually after using tabs.
self.indentwidth = 8
return "break"
# XXX this isn't bound to anything -- see tabwidth comments
## def change_tabwidth_event(self, event):
## new = self._asktabwidth()
## if new != self.tabwidth:
## self.tabwidth = new
## self.set_indentation_params(0, guess=0)
## return "break"
def change_indentwidth_event(self, event):
new = self.askinteger(
"Indent width",
"New indent width (2-16)\n(Always use 8 when using tabs)",
parent=self.text,
initialvalue=self.indentwidth,
minvalue=2,
maxvalue=16)
if new and new != self.indentwidth and not self.usetabs:
self.indentwidth = new
return "break"
def get_region(self):
text = self.text
first, last = self.get_selection_indices()
if first and last:
head = text.index(first + " linestart")
tail = text.index(last + "-1c lineend +1c")
else:
head = text.index("insert linestart")
tail = text.index("insert lineend +1c")
chars = text.get(head, tail)
lines = chars.split("\n")
return head, tail, chars, lines
def set_region(self, head, tail, chars, lines):
text = self.text
newchars = "\n".join(lines)
if newchars == chars:
text.bell()
return
text.tag_remove("sel", "1.0", "end")
text.mark_set("insert", head)
text.undo_block_start()
text.delete(head, tail)
text.insert(head, newchars)
text.undo_block_stop()
text.tag_add("sel", head, "insert")
# Make string that displays as n leading blanks.
def _make_blanks(self, n):
if self.usetabs:
ntabs, nspaces = divmod(n, self.tabwidth)
return '\t' * ntabs + ' ' * nspaces
else:
return ' ' * n
# Delete from beginning of line to insert point, then reinsert
# column logical (meaning use tabs if appropriate) spaces.
def reindent_to(self, column):
text = self.text
text.undo_block_start()
if text.compare("insert linestart", "!=", "insert"):
text.delete("insert linestart", "insert")
if column:
text.insert("insert", self._make_blanks(column))
text.undo_block_stop()
def _asktabwidth(self):
return self.askinteger(
"Tab width",
"Columns per tab? (2-16)",
parent=self.text,
initialvalue=self.indentwidth,
minvalue=2,
maxvalue=16)
# Guess indentwidth from text content.
# Return guessed indentwidth. This should not be believed unless
# it's in a reasonable range (e.g., it will be 0 if no indented
# blocks are found).
def guess_indent(self):
opener, indented = IndentSearcher(self.text, self.tabwidth).run()
if opener and indented:
raw, indentsmall = classifyws(opener, self.tabwidth)
raw, indentlarge = classifyws(indented, self.tabwidth)
else:
indentsmall = indentlarge = 0
return indentlarge - indentsmall
# "line.col" -> line, as an int
def index2line(index):
return int(float(index))
# Look at the leading whitespace in s.
# Return pair (# of leading ws characters,
# effective # of leading blanks after expanding
# tabs to width tabwidth)
def classifyws(s, tabwidth):
raw = effective = 0
for ch in s:
if ch == ' ':
raw = raw + 1
effective = effective + 1
elif ch == '\t':
raw = raw + 1
effective = (effective // tabwidth + 1) * tabwidth
else:
break
return raw, effective
import tokenize
_tokenize = tokenize
del tokenize
class IndentSearcher(object):
# .run() chews over the Text widget, looking for a block opener
# and the stmt following it. Returns a pair,
# (line containing block opener, line containing stmt)
# Either or both may be None.
def __init__(self, text, tabwidth):
self.text = text
self.tabwidth = tabwidth
self.i = self.finished = 0
self.blkopenline = self.indentedline = None
def readline(self):
if self.finished:
return ""
i = self.i = self.i + 1
mark = repr(i) + ".0"
if self.text.compare(mark, ">=", "end"):
return ""
return self.text.get(mark, mark + " lineend+1c")
def tokeneater(self, type, token, start, end, line,
INDENT=_tokenize.INDENT,
NAME=_tokenize.NAME,
OPENERS=('class', 'def', 'for', 'if', 'try', 'while')):
if self.finished:
pass
elif type == NAME and token in OPENERS:
self.blkopenline = line
elif type == INDENT and self.blkopenline:
self.indentedline = line
self.finished = 1
def run(self):
save_tabsize = _tokenize.tabsize
_tokenize.tabsize = self.tabwidth
try:
try:
tokens = _tokenize.generate_tokens(self.readline)
for token in tokens:
self.tokeneater(*token)
except (_tokenize.TokenError, SyntaxError):
# since we cut off the tokenizer early, we can trigger
# spurious errors
pass
finally:
_tokenize.tabsize = save_tabsize
return self.blkopenline, self.indentedline
### end autoindent code ###
def prepstr(s):
# Helper to extract the underscore from a string, e.g.
# prepstr("Co_py") returns (2, "Copy").
i = s.find('_')
if i >= 0:
s = s[:i] + s[i+1:]
return i, s
keynames = {
'bracketleft': '[',
'bracketright': ']',
'slash': '/',
}
def get_accelerator(keydefs, eventname):
keylist = keydefs.get(eventname)
# issue10940: temporary workaround to prevent hang with OS X Cocoa Tk 8.5
# if not keylist:
if (not keylist) or (macosxSupport.isCocoaTk() and eventname in {
"<<open-module>>",
"<<goto-line>>",
"<<change-indentwidth>>"}):
return ""
s = keylist[0]
s = re.sub(r"-[a-z]\b", lambda m: m.group().upper(), s)
s = re.sub(r"\b\w+\b", lambda m: keynames.get(m.group(), m.group()), s)
s = re.sub("Key-", "", s)
s = re.sub("Cancel","Ctrl-Break",s) # [email protected]
s = re.sub("Control-", "Ctrl-", s)
s = re.sub("-", "+", s)
s = re.sub("><", " ", s)
s = re.sub("<", "", s)
s = re.sub(">", "", s)
return s
def fixwordbreaks(root):
# Make sure that Tk's double-click and next/previous word
# operations use our definition of a word (i.e. an identifier)
tk = root.tk
tk.call('tcl_wordBreakAfter', 'a b', 0) # make sure word.tcl is loaded
tk.call('set', 'tcl_wordchars', '[a-zA-Z0-9_]')
tk.call('set', 'tcl_nonwordchars', '[^a-zA-Z0-9_]')
def _Editor_window(parent):
root = parent
fixwordbreaks(root)
root.withdraw()
if sys.argv[1:]:
filename = sys.argv[1]
else:
filename = None
macosxSupport.setupApp(root, None)
edit = EditorWindow(root=root, filename=filename)
edit.set_close_hook(root.quit)
edit.text.bind("<<close-all-windows>>", edit.close_event)
if __name__ == '__main__':
from idlelib.idle_test.htest import run
if len(sys.argv) <= 1:
run(_Help_dialog)
run(_Editor_window)
| PennartLoettring/Poettrix | rootfs/usr/lib/python3.4/idlelib/EditorWindow.py | Python | gpl-2.0 | 66,506 |
"""Stuff to parse AIFF-C and AIFF files.
Unless explicitly stated otherwise, the description below is true
both for AIFF-C files and AIFF files.
An AIFF-C file has the following structure.
+-----------------+
| FORM |
+-----------------+
| <size> |
+----+------------+
| | AIFC |
| +------------+
| | <chunks> |
| | . |
| | . |
| | . |
+----+------------+
An AIFF file has the string "AIFF" instead of "AIFC".
A chunk consists of an identifier (4 bytes) followed by a size (4 bytes,
big endian order), followed by the data. The size field does not include
the size of the 8 byte header.
The following chunk types are recognized.
FVER
<version number of AIFF-C defining document> (AIFF-C only).
MARK
<# of markers> (2 bytes)
list of markers:
<marker ID> (2 bytes, must be > 0)
<position> (4 bytes)
<marker name> ("pstring")
COMM
<# of channels> (2 bytes)
<# of sound frames> (4 bytes)
<size of the samples> (2 bytes)
<sampling frequency> (10 bytes, IEEE 80-bit extended
floating point)
in AIFF-C files only:
<compression type> (4 bytes)
<human-readable version of compression type> ("pstring")
SSND
<offset> (4 bytes, not used by this program)
<blocksize> (4 bytes, not used by this program)
<sound data>
A pstring consists of 1 byte length, a string of characters, and 0 or 1
byte pad to make the total length even.
Usage.
Reading AIFF files:
f = aifc.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
In some types of audio files, if the setpos() method is not used,
the seek() method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' for AIFF files)
getcompname() -- returns human-readable version of
compression type ('not compressed' for AIFF files)
getparams() -- returns a namedtuple consisting of all of the
above in the above order
getmarkers() -- get the list of marks in the audio file or None
if there are no marks
getmark(id) -- get mark with the specified id (raises an error
if the mark does not exist)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell(), the position given to setpos() and
the position of marks are all compatible and have nothing to do with
the actual position in the file.
The close() method is called automatically when the class instance
is destroyed.
Writing AIFF files:
f = aifc.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
aiff() -- create an AIFF file (AIFF-C default)
aifc() -- create an AIFF-C file
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)
-- set all parameters at once
setmark(id, pos, name)
-- add specified mark to the list of marks
tell() -- return current position in output file (useful
in combination with setmark())
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes(b'') or
close() to patch up the sizes in the header.
Marks can be added anytime. If there are any marks, you must call
close() after all frames have been written.
The close() method is called automatically when the class instance
is destroyed.
When a file is opened with the extension '.aiff', an AIFF file is
written, otherwise an AIFF-C file is written. This default can be
changed by calling aiff() or aifc() before the first writeframes or
writeframesraw.
"""
import struct
import builtins
import warnings
__all__ = ["Error", "open", "openfp"]
class Error(Exception):
pass
_AIFC_version = 0xA2805140 # Version 1 of AIFF-C
def _read_long(file):
try:
return struct.unpack('>l', file.read(4))[0]
except struct.error:
raise EOFError from None
def _read_ulong(file):
try:
return struct.unpack('>L', file.read(4))[0]
except struct.error:
raise EOFError from None
def _read_short(file):
try:
return struct.unpack('>h', file.read(2))[0]
except struct.error:
raise EOFError from None
def _read_ushort(file):
try:
return struct.unpack('>H', file.read(2))[0]
except struct.error:
raise EOFError from None
def _read_string(file):
length = ord(file.read(1))
if length == 0:
data = b''
else:
data = file.read(length)
if length & 1 == 0:
dummy = file.read(1)
return data
_HUGE_VAL = 1.79769313486231e+308 # See <limits.h>
def _read_float(f): # 10 bytes
expon = _read_short(f) # 2 bytes
sign = 1
if expon < 0:
sign = -1
expon = expon + 0x8000
himant = _read_ulong(f) # 4 bytes
lomant = _read_ulong(f) # 4 bytes
if expon == himant == lomant == 0:
f = 0.0
elif expon == 0x7FFF:
f = _HUGE_VAL
else:
expon = expon - 16383
f = (himant * 0x100000000 + lomant) * pow(2.0, expon - 63)
return sign * f
def _write_short(f, x):
f.write(struct.pack('>h', x))
def _write_ushort(f, x):
f.write(struct.pack('>H', x))
def _write_long(f, x):
f.write(struct.pack('>l', x))
def _write_ulong(f, x):
f.write(struct.pack('>L', x))
def _write_string(f, s):
if len(s) > 255:
raise ValueError("string exceeds maximum pstring length")
f.write(struct.pack('B', len(s)))
f.write(s)
if len(s) & 1 == 0:
f.write(b'\x00')
def _write_float(f, x):
import math
if x < 0:
sign = 0x8000
x = x * -1
else:
sign = 0
if x == 0:
expon = 0
himant = 0
lomant = 0
else:
fmant, expon = math.frexp(x)
if expon > 16384 or fmant >= 1 or fmant != fmant: # Infinity or NaN
expon = sign|0x7FFF
himant = 0
lomant = 0
else: # Finite
expon = expon + 16382
if expon < 0: # denormalized
fmant = math.ldexp(fmant, expon)
expon = 0
expon = expon | sign
fmant = math.ldexp(fmant, 32)
fsmant = math.floor(fmant)
himant = int(fsmant)
fmant = math.ldexp(fmant - fsmant, 32)
fsmant = math.floor(fmant)
lomant = int(fsmant)
_write_ushort(f, expon)
_write_ulong(f, himant)
_write_ulong(f, lomant)
from chunk import Chunk
from collections import namedtuple
_aifc_params = namedtuple('_aifc_params',
'nchannels sampwidth framerate nframes comptype compname')
_aifc_params.nchannels.__doc__ = 'Number of audio channels (1 for mono, 2 for stereo)'
_aifc_params.sampwidth.__doc__ = 'Sample width in bytes'
_aifc_params.framerate.__doc__ = 'Sampling frequency'
_aifc_params.nframes.__doc__ = 'Number of audio frames'
_aifc_params.comptype.__doc__ = 'Compression type ("NONE" for AIFF files)'
_aifc_params.compname.__doc__ = ("""\
A human-readable version of the compression type
('not compressed' for AIFF files)""")
class Aifc_read:
# Variables used in this class:
#
# These variables are available to the user though appropriate
# methods of this class:
# _file -- the open file with methods read(), close(), and seek()
# set through the __init__() method
# _nchannels -- the number of audio channels
# available through the getnchannels() method
# _nframes -- the number of audio frames
# available through the getnframes() method
# _sampwidth -- the number of bytes per audio sample
# available through the getsampwidth() method
# _framerate -- the sampling frequency
# available through the getframerate() method
# _comptype -- the AIFF-C compression type ('NONE' if AIFF)
# available through the getcomptype() method
# _compname -- the human-readable AIFF-C compression type
# available through the getcomptype() method
# _markers -- the marks in the audio file
# available through the getmarkers() and getmark()
# methods
# _soundpos -- the position in the audio stream
# available through the tell() method, set through the
# setpos() method
#
# These variables are used internally only:
# _version -- the AIFF-C version number
# _decomp -- the decompressor from builtin module cl
# _comm_chunk_read -- 1 iff the COMM chunk has been read
# _aifc -- 1 iff reading an AIFF-C file
# _ssnd_seek_needed -- 1 iff positioned correctly in audio
# file for readframes()
# _ssnd_chunk -- instantiation of a chunk class for the SSND chunk
# _framesize -- size of one frame in the file
_file = None # Set here since __del__ checks it
def initfp(self, file):
self._version = 0
self._convert = None
self._markers = []
self._soundpos = 0
self._file = file
chunk = Chunk(file)
if chunk.getname() != b'FORM':
raise Error('file does not start with FORM id')
formdata = chunk.read(4)
if formdata == b'AIFF':
self._aifc = 0
elif formdata == b'AIFC':
self._aifc = 1
else:
raise Error('not an AIFF or AIFF-C file')
self._comm_chunk_read = 0
self._ssnd_chunk = None
while 1:
self._ssnd_seek_needed = 1
try:
chunk = Chunk(self._file)
except EOFError:
break
chunkname = chunk.getname()
if chunkname == b'COMM':
self._read_comm_chunk(chunk)
self._comm_chunk_read = 1
elif chunkname == b'SSND':
self._ssnd_chunk = chunk
dummy = chunk.read(8)
self._ssnd_seek_needed = 0
elif chunkname == b'FVER':
self._version = _read_ulong(chunk)
elif chunkname == b'MARK':
self._readmark(chunk)
chunk.skip()
if not self._comm_chunk_read or not self._ssnd_chunk:
raise Error('COMM chunk and/or SSND chunk missing')
def __init__(self, f):
if isinstance(f, str):
file_object = builtins.open(f, 'rb')
try:
self.initfp(file_object)
except:
file_object.close()
raise
else:
# assume it is an open file object already
self.initfp(f)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
#
# User visible methods.
#
def getfp(self):
return self._file
def rewind(self):
self._ssnd_seek_needed = 1
self._soundpos = 0
def close(self):
file = self._file
if file is not None:
self._file = None
file.close()
def tell(self):
return self._soundpos
def getnchannels(self):
return self._nchannels
def getnframes(self):
return self._nframes
def getsampwidth(self):
return self._sampwidth
def getframerate(self):
return self._framerate
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
## def getversion(self):
## return self._version
def getparams(self):
return _aifc_params(self.getnchannels(), self.getsampwidth(),
self.getframerate(), self.getnframes(),
self.getcomptype(), self.getcompname())
def getmarkers(self):
if len(self._markers) == 0:
return None
return self._markers
def getmark(self, id):
for marker in self._markers:
if id == marker[0]:
return marker
raise Error('marker {0!r} does not exist'.format(id))
def setpos(self, pos):
if pos < 0 or pos > self._nframes:
raise Error('position not in range')
self._soundpos = pos
self._ssnd_seek_needed = 1
def readframes(self, nframes):
if self._ssnd_seek_needed:
self._ssnd_chunk.seek(0)
dummy = self._ssnd_chunk.read(8)
pos = self._soundpos * self._framesize
if pos:
self._ssnd_chunk.seek(pos + 8)
self._ssnd_seek_needed = 0
if nframes == 0:
return b''
data = self._ssnd_chunk.read(nframes * self._framesize)
if self._convert and data:
data = self._convert(data)
self._soundpos = self._soundpos + len(data) // (self._nchannels
* self._sampwidth)
return data
#
# Internal methods.
#
def _alaw2lin(self, data):
import audioop
return audioop.alaw2lin(data, 2)
def _ulaw2lin(self, data):
import audioop
return audioop.ulaw2lin(data, 2)
def _adpcm2lin(self, data):
import audioop
if not hasattr(self, '_adpcmstate'):
# first time
self._adpcmstate = None
data, self._adpcmstate = audioop.adpcm2lin(data, 2, self._adpcmstate)
return data
def _read_comm_chunk(self, chunk):
self._nchannels = _read_short(chunk)
self._nframes = _read_long(chunk)
self._sampwidth = (_read_short(chunk) + 7) // 8
self._framerate = int(_read_float(chunk))
if self._sampwidth <= 0:
raise Error('bad sample width')
if self._nchannels <= 0:
raise Error('bad # of channels')
self._framesize = self._nchannels * self._sampwidth
if self._aifc:
#DEBUG: SGI's soundeditor produces a bad size :-(
kludge = 0
if chunk.chunksize == 18:
kludge = 1
warnings.warn('Warning: bad COMM chunk size')
chunk.chunksize = 23
#DEBUG end
self._comptype = chunk.read(4)
#DEBUG start
if kludge:
length = ord(chunk.file.read(1))
if length & 1 == 0:
length = length + 1
chunk.chunksize = chunk.chunksize + length
chunk.file.seek(-1, 1)
#DEBUG end
self._compname = _read_string(chunk)
if self._comptype != b'NONE':
if self._comptype == b'G722':
self._convert = self._adpcm2lin
elif self._comptype in (b'ulaw', b'ULAW'):
self._convert = self._ulaw2lin
elif self._comptype in (b'alaw', b'ALAW'):
self._convert = self._alaw2lin
else:
raise Error('unsupported compression type')
self._sampwidth = 2
else:
self._comptype = b'NONE'
self._compname = b'not compressed'
def _readmark(self, chunk):
nmarkers = _read_short(chunk)
# Some files appear to contain invalid counts.
# Cope with this by testing for EOF.
try:
for i in range(nmarkers):
id = _read_short(chunk)
pos = _read_long(chunk)
name = _read_string(chunk)
if pos or name:
# some files appear to have
# dummy markers consisting of
# a position 0 and name ''
self._markers.append((id, pos, name))
except EOFError:
w = ('Warning: MARK chunk contains only %s marker%s instead of %s' %
(len(self._markers), '' if len(self._markers) == 1 else 's',
nmarkers))
warnings.warn(w)
class Aifc_write:
# Variables used in this class:
#
# These variables are user settable through appropriate methods
# of this class:
# _file -- the open file with methods write(), close(), tell(), seek()
# set through the __init__() method
# _comptype -- the AIFF-C compression type ('NONE' in AIFF)
# set through the setcomptype() or setparams() method
# _compname -- the human-readable AIFF-C compression type
# set through the setcomptype() or setparams() method
# _nchannels -- the number of audio channels
# set through the setnchannels() or setparams() method
# _sampwidth -- the number of bytes per audio sample
# set through the setsampwidth() or setparams() method
# _framerate -- the sampling frequency
# set through the setframerate() or setparams() method
# _nframes -- the number of audio frames written to the header
# set through the setnframes() or setparams() method
# _aifc -- whether we're writing an AIFF-C file or an AIFF file
# set through the aifc() method, reset through the
# aiff() method
#
# These variables are used internally only:
# _version -- the AIFF-C version number
# _comp -- the compressor from builtin module cl
# _nframeswritten -- the number of audio frames actually written
# _datalength -- the size of the audio samples written to the header
# _datawritten -- the size of the audio samples actually written
_file = None # Set here since __del__ checks it
def __init__(self, f):
if isinstance(f, str):
file_object = builtins.open(f, 'wb')
try:
self.initfp(file_object)
except:
file_object.close()
raise
# treat .aiff file extensions as non-compressed audio
if f.endswith('.aiff'):
self._aifc = 0
else:
# assume it is an open file object already
self.initfp(f)
def initfp(self, file):
self._file = file
self._version = _AIFC_version
self._comptype = b'NONE'
self._compname = b'not compressed'
self._convert = None
self._nchannels = 0
self._sampwidth = 0
self._framerate = 0
self._nframes = 0
self._nframeswritten = 0
self._datawritten = 0
self._datalength = 0
self._markers = []
self._marklength = 0
self._aifc = 1 # AIFF-C is default
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
#
# User visible methods.
#
def aiff(self):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
self._aifc = 0
def aifc(self):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
self._aifc = 1
def setnchannels(self, nchannels):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
if nchannels < 1:
raise Error('bad # of channels')
self._nchannels = nchannels
def getnchannels(self):
if not self._nchannels:
raise Error('number of channels not set')
return self._nchannels
def setsampwidth(self, sampwidth):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
if sampwidth < 1 or sampwidth > 4:
raise Error('bad sample width')
self._sampwidth = sampwidth
def getsampwidth(self):
if not self._sampwidth:
raise Error('sample width not set')
return self._sampwidth
def setframerate(self, framerate):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
if framerate <= 0:
raise Error('bad frame rate')
self._framerate = framerate
def getframerate(self):
if not self._framerate:
raise Error('frame rate not set')
return self._framerate
def setnframes(self, nframes):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
self._nframes = nframes
def getnframes(self):
return self._nframeswritten
def setcomptype(self, comptype, compname):
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
if comptype not in (b'NONE', b'ulaw', b'ULAW',
b'alaw', b'ALAW', b'G722'):
raise Error('unsupported compression type')
self._comptype = comptype
self._compname = compname
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
## def setversion(self, version):
## if self._nframeswritten:
## raise Error, 'cannot change parameters after starting to write'
## self._version = version
def setparams(self, params):
nchannels, sampwidth, framerate, nframes, comptype, compname = params
if self._nframeswritten:
raise Error('cannot change parameters after starting to write')
if comptype not in (b'NONE', b'ulaw', b'ULAW',
b'alaw', b'ALAW', b'G722'):
raise Error('unsupported compression type')
self.setnchannels(nchannels)
self.setsampwidth(sampwidth)
self.setframerate(framerate)
self.setnframes(nframes)
self.setcomptype(comptype, compname)
def getparams(self):
if not self._nchannels or not self._sampwidth or not self._framerate:
raise Error('not all parameters set')
return _aifc_params(self._nchannels, self._sampwidth, self._framerate,
self._nframes, self._comptype, self._compname)
def setmark(self, id, pos, name):
if id <= 0:
raise Error('marker ID must be > 0')
if pos < 0:
raise Error('marker position must be >= 0')
if not isinstance(name, bytes):
raise Error('marker name must be bytes')
for i in range(len(self._markers)):
if id == self._markers[i][0]:
self._markers[i] = id, pos, name
return
self._markers.append((id, pos, name))
def getmark(self, id):
for marker in self._markers:
if id == marker[0]:
return marker
raise Error('marker {0!r} does not exist'.format(id))
def getmarkers(self):
if len(self._markers) == 0:
return None
return self._markers
def tell(self):
return self._nframeswritten
def writeframesraw(self, data):
if not isinstance(data, (bytes, bytearray)):
data = memoryview(data).cast('B')
self._ensure_header_written(len(data))
nframes = len(data) // (self._sampwidth * self._nchannels)
if self._convert:
data = self._convert(data)
self._file.write(data)
self._nframeswritten = self._nframeswritten + nframes
self._datawritten = self._datawritten + len(data)
def writeframes(self, data):
self.writeframesraw(data)
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten:
self._patchheader()
def close(self):
if self._file is None:
return
try:
self._ensure_header_written(0)
if self._datawritten & 1:
# quick pad to even size
self._file.write(b'\x00')
self._datawritten = self._datawritten + 1
self._writemarkers()
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten or \
self._marklength:
self._patchheader()
finally:
# Prevent ref cycles
self._convert = None
f = self._file
self._file = None
f.close()
#
# Internal methods.
#
def _lin2alaw(self, data):
import audioop
return audioop.lin2alaw(data, 2)
def _lin2ulaw(self, data):
import audioop
return audioop.lin2ulaw(data, 2)
def _lin2adpcm(self, data):
import audioop
if not hasattr(self, '_adpcmstate'):
self._adpcmstate = None
data, self._adpcmstate = audioop.lin2adpcm(data, 2, self._adpcmstate)
return data
def _ensure_header_written(self, datasize):
if not self._nframeswritten:
if self._comptype in (b'ULAW', b'ulaw', b'ALAW', b'alaw', b'G722'):
if not self._sampwidth:
self._sampwidth = 2
if self._sampwidth != 2:
raise Error('sample width must be 2 when compressing '
'with ulaw/ULAW, alaw/ALAW or G7.22 (ADPCM)')
if not self._nchannels:
raise Error('# channels not specified')
if not self._sampwidth:
raise Error('sample width not specified')
if not self._framerate:
raise Error('sampling rate not specified')
self._write_header(datasize)
def _init_compression(self):
if self._comptype == b'G722':
self._convert = self._lin2adpcm
elif self._comptype in (b'ulaw', b'ULAW'):
self._convert = self._lin2ulaw
elif self._comptype in (b'alaw', b'ALAW'):
self._convert = self._lin2alaw
def _write_header(self, initlength):
if self._aifc and self._comptype != b'NONE':
self._init_compression()
self._file.write(b'FORM')
if not self._nframes:
self._nframes = initlength // (self._nchannels * self._sampwidth)
self._datalength = self._nframes * self._nchannels * self._sampwidth
if self._datalength & 1:
self._datalength = self._datalength + 1
if self._aifc:
if self._comptype in (b'ulaw', b'ULAW', b'alaw', b'ALAW'):
self._datalength = self._datalength // 2
if self._datalength & 1:
self._datalength = self._datalength + 1
elif self._comptype == b'G722':
self._datalength = (self._datalength + 3) // 4
if self._datalength & 1:
self._datalength = self._datalength + 1
try:
self._form_length_pos = self._file.tell()
except (AttributeError, OSError):
self._form_length_pos = None
commlength = self._write_form_length(self._datalength)
if self._aifc:
self._file.write(b'AIFC')
self._file.write(b'FVER')
_write_ulong(self._file, 4)
_write_ulong(self._file, self._version)
else:
self._file.write(b'AIFF')
self._file.write(b'COMM')
_write_ulong(self._file, commlength)
_write_short(self._file, self._nchannels)
if self._form_length_pos is not None:
self._nframes_pos = self._file.tell()
_write_ulong(self._file, self._nframes)
if self._comptype in (b'ULAW', b'ulaw', b'ALAW', b'alaw', b'G722'):
_write_short(self._file, 8)
else:
_write_short(self._file, self._sampwidth * 8)
_write_float(self._file, self._framerate)
if self._aifc:
self._file.write(self._comptype)
_write_string(self._file, self._compname)
self._file.write(b'SSND')
if self._form_length_pos is not None:
self._ssnd_length_pos = self._file.tell()
_write_ulong(self._file, self._datalength + 8)
_write_ulong(self._file, 0)
_write_ulong(self._file, 0)
def _write_form_length(self, datalength):
if self._aifc:
commlength = 18 + 5 + len(self._compname)
if commlength & 1:
commlength = commlength + 1
verslength = 12
else:
commlength = 18
verslength = 0
_write_ulong(self._file, 4 + verslength + self._marklength + \
8 + commlength + 16 + datalength)
return commlength
def _patchheader(self):
curpos = self._file.tell()
if self._datawritten & 1:
datalength = self._datawritten + 1
self._file.write(b'\x00')
else:
datalength = self._datawritten
if datalength == self._datalength and \
self._nframes == self._nframeswritten and \
self._marklength == 0:
self._file.seek(curpos, 0)
return
self._file.seek(self._form_length_pos, 0)
dummy = self._write_form_length(datalength)
self._file.seek(self._nframes_pos, 0)
_write_ulong(self._file, self._nframeswritten)
self._file.seek(self._ssnd_length_pos, 0)
_write_ulong(self._file, datalength + 8)
self._file.seek(curpos, 0)
self._nframes = self._nframeswritten
self._datalength = datalength
def _writemarkers(self):
if len(self._markers) == 0:
return
self._file.write(b'MARK')
length = 2
for marker in self._markers:
id, pos, name = marker
length = length + len(name) + 1 + 6
if len(name) & 1 == 0:
length = length + 1
_write_ulong(self._file, length)
self._marklength = length + 8
_write_short(self._file, len(self._markers))
for marker in self._markers:
id, pos, name = marker
_write_short(self._file, id)
_write_ulong(self._file, pos)
_write_string(self._file, name)
def open(f, mode=None):
if mode is None:
if hasattr(f, 'mode'):
mode = f.mode
else:
mode = 'rb'
if mode in ('r', 'rb'):
return Aifc_read(f)
elif mode in ('w', 'wb'):
return Aifc_write(f)
else:
raise Error("mode must be 'r', 'rb', 'w', or 'wb'")
def openfp(f, mode=None):
warnings.warn("aifc.openfp is deprecated since Python 3.7. "
"Use aifc.open instead.", DeprecationWarning, stacklevel=2)
return open(f, mode=mode)
if __name__ == '__main__':
import sys
if not sys.argv[1:]:
sys.argv.append('/usr/demos/data/audio/bach.aiff')
fn = sys.argv[1]
with open(fn, 'r') as f:
print("Reading", fn)
print("nchannels =", f.getnchannels())
print("nframes =", f.getnframes())
print("sampwidth =", f.getsampwidth())
print("framerate =", f.getframerate())
print("comptype =", f.getcomptype())
print("compname =", f.getcompname())
if sys.argv[2:]:
gn = sys.argv[2]
print("Writing", gn)
with open(gn, 'w') as g:
g.setparams(f.getparams())
while 1:
data = f.readframes(1024)
if not data:
break
g.writeframes(data)
print("Done.")
| prefetchnta/questlab | bin/x64bin/python/37/Lib/aifc.py | Python | lgpl-2.1 | 33,765 |
"""This module is contains the Vehicle class, which is the main entry point for
using the Python library to access vehicle data programatically. Most users will
want to interact with an instance of Vehicle, and won't need to deal with other
parts of the library directly (besides measurement types).
"""
from .measurements import Measurement
from .sinks import MeasurementNotifierSink
class Vehicle(object):
"""The Vehicle class is the main entry point for the OpenXC Python library.
A Vehicle represents a connection to at least one vehicle data source and
zero or 1 vehicle controllers, which can accept commands to send back to the
vehicle. A Vehicle instance can have more than one data source (e.g. if the
computer using this library has a secondary GPS data source).
Most applications will either request synchronous vehicle data measurements
using the ``get`` method or or with a callback function passed to
``listen``.
More advanced applications that want access to all raw vehicle data may want
to register a ``DataSink`` with a Vehicle.
"""
def __init__(self, interface=None):
"""Construct a new Vehicle instance, optionally providing an vehicle
interface from ``openxc.interface`` to user for I/O.
"""
self.sources = set()
self.sinks = set()
self.measurements = {}
if interface is not None:
self.add_source(interface)
self.controller = interface
self.notifier = MeasurementNotifierSink()
self.sinks.add(self.notifier)
def get(self, measurement_class):
"""Return the latest measurement for the given class or None if nothing
has been received from the vehicle.
"""
name = Measurement.name_from_class(measurement_class)
return self._construct_measurement(name)
def listen(self, measurement_class, callback):
"""Register the callback function to be called whenever a new
measurement of the given class is received from the vehicle data
sources.
If the callback is already registered for measurements of the given
type, this method will have no effect.
"""
self.notifier.register(measurement_class, callback)
def unlisten(self, measurement_class, callback):
"""Stop notifying the given callback of new values of the measurement
type.
If the callback was not previously registered as a listener, this method
will have no effect.
"""
self.notifier.unregister(measurement_class, callback)
def add_source(self, source):
"""Add a vehicle data source to the instance.
The Vehicle instance will be set as the callback of the source, and the
source will be started if it is startable. (i.e. it has a ``start()``
method).
"""
if source is not None:
self.sources.add(source)
source.callback = self._receive
if hasattr(source, 'start'):
source.start()
def add_sink(self, sink):
"""Add a vehicle data sink to the instance. ``sink`` should be a
sub-class of ``DataSink`` or at least have a ``receive(message,
**kwargs)`` method.
The sink will be started if it is startable. (i.e. it has a ``start()``
method).
"""
if sink is not None:
self.sinks.add(sink)
if hasattr(sink, 'start'):
sink.start()
def _receive(self, message, **kwargs):
name = message.get('name', 'can_message')
self.measurements[name] = message
for sink in self.sinks:
sink.receive(message, **kwargs)
def _construct_measurement(self, measurement_id):
raw_measurement = self.measurements.get(measurement_id, None)
if raw_measurement is not None:
return Measurement.from_dict(raw_measurement)
| openxc/openxc-python | openxc/vehicle.py | Python | bsd-3-clause | 3,945 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
| Database (Sherrill) of interaction energies for dissociation curves of dispersion-bound bimolecular complexes.
| Geometries and Reference interaction energies from the following articles:
| Benzene Dimers from Sherrill et al. JPCA 113 10146 (2009).
| Benzene-Hydrogen Sulfide from Sherrill et al. JPCA 113 10146 (2009).
| Benzene-Methane from Sherrill et al. JPCA 113 10146 (2009).
| Methane Dimer from Takatani et al. PCCP 9 6106 (2007).
| Pyridine Dimers from Hohenstein et al. JPCA 113 878 (2009).
| Collection into NBC10 from Burns et al. JCP 134 084107 (2011).
| Revised reference interaction energies (NBC10A) from Marshall et al. JCP 135 194102 (2011).
- **cp** ``'off'`` || ``'on'``
- **rlxd** ``'off'``
- **benchmark**
- ``'NBC100'`` Burns et al. JCP 134 084107 (2011).
- |dl| ``'NBC10A'`` |dr| Marshall et al. JCP 135 194102 (2011).
- **subset**
- ``'small'``
- ``'large'``
- ``'equilibrium'``
- ``'BzBz_S'`` dissociation curve for benzene dimer, sandwich
- ``'BzBz_T'`` dissociation curve for benzene dimer, t-shaped
- ``'BzBz_PD34'`` dissociation curve for benzene dimer, parallel displaced by 3.4A
- ``'BzH2S'`` dissociation curve for benzene-H2S
- ``'BzMe'`` dissociation curve for benzene-methane
- ``'MeMe'`` dissociation curve for methane dimer
- ``'PyPy_S2'`` dissociation curve for pyridine dimer, sandwich
- ``'PyPy_T3'`` dissociation curve for pyridine dimer, t-shaped
- ``'BzBz_PD32'`` dissociation curve for benzene dimer, parallel displaced by 3.2A
- ``'BzBz_PD36'`` dissociation curve for benzene dimer, parallel displaced by 3.6A
"""
import re
import qcdb
# <<< NBC10 Database Module >>>
dbse = 'NBC1'
# <<< Database Members >>>
BzBz_S = []
dist = [3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0, 4.1, 4.2, 4.5, 5.0, 5.5, 6.0, 6.5, 10.0]
for d in dist:
BzBz_S.append('BzBz_S-' + str(d))
BzBz_T = []
dist = [4.4, 4.5, 4.6, 4.7, 4.8, 4.9, 5.0, 5.1, 5.2, 5.3, 5.4, 5.5, 5.6, 6.0, 6.5, 7.0, 7.5, 8.0]
for d in dist:
BzBz_T.append('BzBz_T-' + str(d))
BzBz_PD34 = []
dist = [0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.2, 2.4, 2.6, 2.8, 3.0]
for d in dist:
BzBz_PD34.append('BzBz_PD34-' + str(d))
BzH2S = []
dist = [3.2, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0, 4.1, 4.2, 4.5, 4.75, 5.0, 5.25, 5.5, 6.0, 6.5, 7.0, 7.5]
for d in dist:
BzH2S.append('BzH2S-' + str(d))
BzMe = []
dist = [3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0, 4.1, 4.2, 4.4, 4.6, 4.8, 5.0, 5.2, 5.4, 5.6, 6.0]
for d in dist:
BzMe.append('BzMe-' + str(d))
MeMe = []
dist = [3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0, 4.1, 4.2, 4.3, 4.4, 4.6, 4.8, 5.0, 5.4, 5.8]
for d in dist:
MeMe.append('MeMe-' + str(d))
PyPy_S2 = []
dist = [3.1, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0, 4.1, 4.2, 4.3, 4.4, 4.5, 4.7, 5.0, 5.5, 6.0, 6.5, 7.0]
for d in dist:
PyPy_S2.append('PyPy_S2-' + str(d))
PyPy_T3 = []
dist = [4.1, 4.3, 4.5, 4.6, 4.7, 4.8, 4.9, 5.0, 5.1, 5.2, 5.3, 5.4, 5.5, 5.7, 6.0, 6.5, 7.0, 8.0, 9.0]
for d in dist:
PyPy_T3.append('PyPy_T3-' + str(d))
BzBz_PD32 = []
dist = [0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.2, 2.4, 2.6, 2.8, 3.0]
for d in dist:
BzBz_PD32.append('BzBz_PD32-' + str(d))
BzBz_PD36 = []
dist = [0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.2, 2.4, 2.6, 2.8, 3.0]
for d in dist:
BzBz_PD36.append('BzBz_PD36-' + str(d))
temp = [BzBz_S, BzBz_T, BzBz_PD34, BzH2S, BzMe, MeMe, PyPy_S2, PyPy_T3, BzBz_PD32, BzBz_PD36]
HRXN = sum(temp, [])
HRXN_SM = ['BzMe-6.0', 'MeMe-5.0']
HRXN_LG = ['BzBz_T-5.0']
HRXN_EQ = ['BzBz_S-3.9', 'BzBz_T-5.0', 'BzBz_PD34-1.8', 'BzH2S-3.8', 'BzMe-3.8',
'MeMe-3.6', 'PyPy_S2-3.7', 'PyPy_T3-4.9', 'BzBz_PD32-1.9', 'BzBz_PD36-1.7']
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV_CP = {} # order of active reagents per counterpoise-corrected reaction
ACTV_SA = {} # order of active reagents for non-supramolecular calculations
for rxn in HRXN:
if (rxn in BzBz_S) or (rxn in BzBz_PD34) or (rxn in BzBz_PD32) or (rxn in BzBz_PD36):
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -2,
'%s-Bz-mono-unCP' % (dbse) : -2 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-Bz-mono-unCP' % (dbse) ]
elif rxn in BzBz_T:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-Bz-mono-unCP' % (dbse) : -2 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-Bz-mono-unCP' % (dbse) ]
elif rxn in BzH2S:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-Bz-mono-unCP' % (dbse) : -1,
'%s-H2S-mono-unCP' % (dbse) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-Bz-mono-unCP' % (dbse),
'%s-H2S-mono-unCP' % (dbse) ]
elif rxn in BzMe:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-Bz2-mono-unCP' % (dbse) : -1,
'%s-Me-mono-unCP' % (dbse) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-Bz2-mono-unCP' % (dbse),
'%s-Me-mono-unCP' % (dbse) ]
elif rxn in MeMe:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -2,
'%s-Me-mono-unCP' % (dbse) : -2 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-Me-mono-unCP' % (dbse) ]
elif rxn in PyPy_S2:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -2,
'%s-Py-mono-unCP' % (dbse) : -2 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-Py-mono-unCP' % (dbse) ]
elif rxn in PyPy_T3:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-Py-mono-unCP' % (dbse) : -2 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-Py-mono-unCP' % (dbse) ]
# <<< Reference Values >>>
BIND = {}
# Original publication
BIND_NBC100 = {}
BIND_NBC100['%s-BzBz_S-3.2' % (dbse)] = 3.522
BIND_NBC100['%s-BzBz_S-3.3' % (dbse)] = 1.535
BIND_NBC100['%s-BzBz_S-3.4' % (dbse)] = 0.189
BIND_NBC100['%s-BzBz_S-3.5' % (dbse)] = -0.689
BIND_NBC100['%s-BzBz_S-3.6' % (dbse)] = -1.231
BIND_NBC100['%s-BzBz_S-3.7' % (dbse)] = -1.535
BIND_NBC100['%s-BzBz_S-3.8' % (dbse)] = -1.674
BIND_NBC100['%s-BzBz_S-3.9' % (dbse)] = -1.701 # BzBz_S minimum
BIND_NBC100['%s-BzBz_S-4.0' % (dbse)] = -1.655
BIND_NBC100['%s-BzBz_S-4.1' % (dbse)] = -1.565
BIND_NBC100['%s-BzBz_S-4.2' % (dbse)] = -1.448
BIND_NBC100['%s-BzBz_S-4.5' % (dbse)] = -1.058
BIND_NBC100['%s-BzBz_S-5.0' % (dbse)] = -0.542
BIND_NBC100['%s-BzBz_S-5.5' % (dbse)] = -0.248
BIND_NBC100['%s-BzBz_S-6.0' % (dbse)] = -0.099
BIND_NBC100['%s-BzBz_S-6.5' % (dbse)] = -0.028
BIND_NBC100['%s-BzBz_S-10.0' % (dbse)] = 0.018
BIND_NBC100['%s-BzBz_T-4.4' % (dbse)] = 0.626
BIND_NBC100['%s-BzBz_T-4.5' % (dbse)] = -0.760
BIND_NBC100['%s-BzBz_T-4.6' % (dbse)] = -1.673
BIND_NBC100['%s-BzBz_T-4.7' % (dbse)] = -2.239
BIND_NBC100['%s-BzBz_T-4.8' % (dbse)] = -2.552
BIND_NBC100['%s-BzBz_T-4.9' % (dbse)] = -2.687
BIND_NBC100['%s-BzBz_T-5.0' % (dbse)] = -2.698 # BzBz_T minimum
BIND_NBC100['%s-BzBz_T-5.1' % (dbse)] = -2.627
BIND_NBC100['%s-BzBz_T-5.2' % (dbse)] = -2.503
BIND_NBC100['%s-BzBz_T-5.3' % (dbse)] = -2.349
BIND_NBC100['%s-BzBz_T-5.4' % (dbse)] = -2.179
BIND_NBC100['%s-BzBz_T-5.5' % (dbse)] = -2.005
BIND_NBC100['%s-BzBz_T-5.6' % (dbse)] = -1.833
BIND_NBC100['%s-BzBz_T-6.0' % (dbse)] = -1.242
BIND_NBC100['%s-BzBz_T-6.5' % (dbse)] = -0.752
BIND_NBC100['%s-BzBz_T-7.0' % (dbse)] = -0.468
BIND_NBC100['%s-BzBz_T-7.5' % (dbse)] = -0.302
BIND_NBC100['%s-BzBz_T-8.0' % (dbse)] = -0.203
BIND_NBC100['%s-BzBz_PD34-0.2' % (dbse)] = 0.070
BIND_NBC100['%s-BzBz_PD34-0.4' % (dbse)] = -0.257
BIND_NBC100['%s-BzBz_PD34-0.6' % (dbse)] = -0.728
BIND_NBC100['%s-BzBz_PD34-0.8' % (dbse)] = -1.260
BIND_NBC100['%s-BzBz_PD34-1.0' % (dbse)] = -1.766
BIND_NBC100['%s-BzBz_PD34-1.2' % (dbse)] = -2.179
BIND_NBC100['%s-BzBz_PD34-1.4' % (dbse)] = -2.466
BIND_NBC100['%s-BzBz_PD34-1.5' % (dbse)] = -2.557
BIND_NBC100['%s-BzBz_PD34-1.6' % (dbse)] = -2.614
BIND_NBC100['%s-BzBz_PD34-1.7' % (dbse)] = -2.640
BIND_NBC100['%s-BzBz_PD34-1.8' % (dbse)] = -2.643 # BzBz_PD34 minimum
BIND_NBC100['%s-BzBz_PD34-1.9' % (dbse)] = -2.624
BIND_NBC100['%s-BzBz_PD34-2.0' % (dbse)] = -2.587
BIND_NBC100['%s-BzBz_PD34-2.2' % (dbse)] = -2.479
BIND_NBC100['%s-BzBz_PD34-2.4' % (dbse)] = -2.356
BIND_NBC100['%s-BzBz_PD34-2.6' % (dbse)] = -2.242
BIND_NBC100['%s-BzBz_PD34-2.8' % (dbse)] = -2.147
BIND_NBC100['%s-BzBz_PD34-3.0' % (dbse)] = -2.079
BIND_NBC100['%s-BzH2S-3.2' % (dbse)] = 1.250
BIND_NBC100['%s-BzH2S-3.4' % (dbse)] = -1.570
BIND_NBC100['%s-BzH2S-3.5' % (dbse)] = -2.256
BIND_NBC100['%s-BzH2S-3.6' % (dbse)] = -2.638
BIND_NBC100['%s-BzH2S-3.7' % (dbse)] = -2.808
BIND_NBC100['%s-BzH2S-3.8' % (dbse)] = -2.834 # BzH2S minimum
BIND_NBC100['%s-BzH2S-3.9' % (dbse)] = -2.766
BIND_NBC100['%s-BzH2S-4.0' % (dbse)] = -2.639
BIND_NBC100['%s-BzH2S-4.1' % (dbse)] = -2.478
BIND_NBC100['%s-BzH2S-4.2' % (dbse)] = -2.301
BIND_NBC100['%s-BzH2S-4.5' % (dbse)] = -1.770
BIND_NBC100['%s-BzH2S-4.75' % (dbse)] = -1.393
BIND_NBC100['%s-BzH2S-5.0' % (dbse)] = -1.093
BIND_NBC100['%s-BzH2S-5.25' % (dbse)] = -0.861
BIND_NBC100['%s-BzH2S-5.5' % (dbse)] = -0.684
BIND_NBC100['%s-BzH2S-6.0' % (dbse)] = -0.446
BIND_NBC100['%s-BzH2S-6.5' % (dbse)] = -0.302
BIND_NBC100['%s-BzH2S-7.0' % (dbse)] = -0.214
BIND_NBC100['%s-BzH2S-7.5' % (dbse)] = -0.155
BIND_NBC100['%s-BzMe-3.2' % (dbse)] = 0.717
BIND_NBC100['%s-BzMe-3.3' % (dbse)] = -0.183
BIND_NBC100['%s-BzMe-3.4' % (dbse)] = -0.774
BIND_NBC100['%s-BzMe-3.5' % (dbse)] = -1.135
BIND_NBC100['%s-BzMe-3.6' % (dbse)] = -1.337
BIND_NBC100['%s-BzMe-3.7' % (dbse)] = -1.432
BIND_NBC100['%s-BzMe-3.8' % (dbse)] = -1.439 # BzMe minimum
BIND_NBC100['%s-BzMe-3.9' % (dbse)] = -1.414
BIND_NBC100['%s-BzMe-4.0' % (dbse)] = -1.327
BIND_NBC100['%s-BzMe-4.1' % (dbse)] = -1.232
BIND_NBC100['%s-BzMe-4.2' % (dbse)] = -1.138
BIND_NBC100['%s-BzMe-4.4' % (dbse)] = -0.950
BIND_NBC100['%s-BzMe-4.6' % (dbse)] = -0.760
BIND_NBC100['%s-BzMe-4.8' % (dbse)] = -0.606
BIND_NBC100['%s-BzMe-5.0' % (dbse)] = -0.475
BIND_NBC100['%s-BzMe-5.2' % (dbse)] = -0.370
BIND_NBC100['%s-BzMe-5.4' % (dbse)] = -0.286
BIND_NBC100['%s-BzMe-5.6' % (dbse)] = -0.230
BIND_NBC100['%s-BzMe-6.0' % (dbse)] = -0.141
BIND_NBC100['%s-MeMe-3.2' % (dbse)] = 0.069
BIND_NBC100['%s-MeMe-3.3' % (dbse)] = -0.239
BIND_NBC100['%s-MeMe-3.4' % (dbse)] = -0.417
BIND_NBC100['%s-MeMe-3.5' % (dbse)] = -0.508
BIND_NBC100['%s-MeMe-3.6' % (dbse)] = -0.541 # MeMe minimum
BIND_NBC100['%s-MeMe-3.7' % (dbse)] = -0.539
BIND_NBC100['%s-MeMe-3.8' % (dbse)] = -0.515
BIND_NBC100['%s-MeMe-3.9' % (dbse)] = -0.480
BIND_NBC100['%s-MeMe-4.0' % (dbse)] = -0.439
BIND_NBC100['%s-MeMe-4.1' % (dbse)] = -0.396
BIND_NBC100['%s-MeMe-4.2' % (dbse)] = -0.354
BIND_NBC100['%s-MeMe-4.3' % (dbse)] = -0.315
BIND_NBC100['%s-MeMe-4.4' % (dbse)] = -0.279
BIND_NBC100['%s-MeMe-4.6' % (dbse)] = -0.217
BIND_NBC100['%s-MeMe-4.8' % (dbse)] = -0.168
BIND_NBC100['%s-MeMe-5.0' % (dbse)] = -0.130
BIND_NBC100['%s-MeMe-5.4' % (dbse)] = -0.080
BIND_NBC100['%s-MeMe-5.8' % (dbse)] = -0.050
BIND_NBC100['%s-PyPy_S2-3.1' % (dbse)] = 2.442
BIND_NBC100['%s-PyPy_S2-3.3' % (dbse)] = -1.125
BIND_NBC100['%s-PyPy_S2-3.4' % (dbse)] = -2.016
BIND_NBC100['%s-PyPy_S2-3.5' % (dbse)] = -2.534
BIND_NBC100['%s-PyPy_S2-3.6' % (dbse)] = -2.791
BIND_NBC100['%s-PyPy_S2-3.7' % (dbse)] = -2.870 # PyPy_S2 minimum
BIND_NBC100['%s-PyPy_S2-3.8' % (dbse)] = -2.832
BIND_NBC100['%s-PyPy_S2-3.9' % (dbse)] = -2.719
BIND_NBC100['%s-PyPy_S2-4.0' % (dbse)] = -2.561
BIND_NBC100['%s-PyPy_S2-4.1' % (dbse)] = -2.381
BIND_NBC100['%s-PyPy_S2-4.2' % (dbse)] = -2.192
BIND_NBC100['%s-PyPy_S2-4.3' % (dbse)] = -2.005
BIND_NBC100['%s-PyPy_S2-4.4' % (dbse)] = -1.824
BIND_NBC100['%s-PyPy_S2-4.5' % (dbse)] = -1.655
BIND_NBC100['%s-PyPy_S2-4.7' % (dbse)] = -1.354
BIND_NBC100['%s-PyPy_S2-5.0' % (dbse)] = -0.999
BIND_NBC100['%s-PyPy_S2-5.5' % (dbse)] = -0.618
BIND_NBC100['%s-PyPy_S2-6.0' % (dbse)] = -0.402
BIND_NBC100['%s-PyPy_S2-6.5' % (dbse)] = -0.277
BIND_NBC100['%s-PyPy_S2-7.0' % (dbse)] = -0.200
BIND_NBC100['%s-PyPy_T3-4.1' % (dbse)] = 9.340
BIND_NBC100['%s-PyPy_T3-4.3' % (dbse)] = 1.991
BIND_NBC100['%s-PyPy_T3-4.5' % (dbse)] = -1.377
BIND_NBC100['%s-PyPy_T3-4.6' % (dbse)] = -2.203
BIND_NBC100['%s-PyPy_T3-4.7' % (dbse)] = -2.673
BIND_NBC100['%s-PyPy_T3-4.8' % (dbse)] = -2.897
BIND_NBC100['%s-PyPy_T3-4.9' % (dbse)] = -2.954 # PyPy_T3 minimum
BIND_NBC100['%s-PyPy_T3-5.0' % (dbse)] = -2.903
BIND_NBC100['%s-PyPy_T3-5.1' % (dbse)] = -2.784
BIND_NBC100['%s-PyPy_T3-5.2' % (dbse)] = -2.625
BIND_NBC100['%s-PyPy_T3-5.3' % (dbse)] = -2.447
BIND_NBC100['%s-PyPy_T3-5.4' % (dbse)] = -2.263
BIND_NBC100['%s-PyPy_T3-5.5' % (dbse)] = -2.080
BIND_NBC100['%s-PyPy_T3-5.7' % (dbse)] = -1.742
BIND_NBC100['%s-PyPy_T3-6.0' % (dbse)] = -1.324
BIND_NBC100['%s-PyPy_T3-6.5' % (dbse)] = -0.853
BIND_NBC100['%s-PyPy_T3-7.0' % (dbse)] = -0.574
BIND_NBC100['%s-PyPy_T3-8.0' % (dbse)] = -0.296
BIND_NBC100['%s-PyPy_T3-9.0' % (dbse)] = -0.175
BIND_NBC100['%s-BzBz_PD32-0.2' % (dbse)] = 3.301
BIND_NBC100['%s-BzBz_PD32-0.4' % (dbse)] = 2.678
BIND_NBC100['%s-BzBz_PD32-0.6' % (dbse)] = 1.783
BIND_NBC100['%s-BzBz_PD32-0.8' % (dbse)] = 0.781
BIND_NBC100['%s-BzBz_PD32-1.0' % (dbse)] = -0.171
BIND_NBC100['%s-BzBz_PD32-1.2' % (dbse)] = -0.954
BIND_NBC100['%s-BzBz_PD32-1.4' % (dbse)] = -1.508
BIND_NBC100['%s-BzBz_PD32-1.5' % (dbse)] = -1.695
BIND_NBC100['%s-BzBz_PD32-1.6' % (dbse)] = -1.827
BIND_NBC100['%s-BzBz_PD32-1.7' % (dbse)] = -1.911
BIND_NBC100['%s-BzBz_PD32-1.8' % (dbse)] = -1.950
BIND_NBC100['%s-BzBz_PD32-1.9' % (dbse)] = -1.957 # BzBz_PD32 minimum
BIND_NBC100['%s-BzBz_PD32-2.0' % (dbse)] = -1.937
BIND_NBC100['%s-BzBz_PD32-2.2' % (dbse)] = -1.860
BIND_NBC100['%s-BzBz_PD32-2.4' % (dbse)] = -1.767
BIND_NBC100['%s-BzBz_PD32-2.6' % (dbse)] = -1.702
BIND_NBC100['%s-BzBz_PD32-2.8' % (dbse)] = -1.680
BIND_NBC100['%s-BzBz_PD32-3.0' % (dbse)] = -1.705
BIND_NBC100['%s-BzBz_PD36-0.2' % (dbse)] = -1.293
BIND_NBC100['%s-BzBz_PD36-0.4' % (dbse)] = -1.462
BIND_NBC100['%s-BzBz_PD36-0.6' % (dbse)] = -1.708
BIND_NBC100['%s-BzBz_PD36-0.8' % (dbse)] = -1.984
BIND_NBC100['%s-BzBz_PD36-1.0' % (dbse)] = -2.248
BIND_NBC100['%s-BzBz_PD36-1.2' % (dbse)] = -2.458
BIND_NBC100['%s-BzBz_PD36-1.4' % (dbse)] = -2.597
BIND_NBC100['%s-BzBz_PD36-1.5' % (dbse)] = -2.635
BIND_NBC100['%s-BzBz_PD36-1.6' % (dbse)] = -2.652
BIND_NBC100['%s-BzBz_PD36-1.7' % (dbse)] = -2.654 # BzBz_PD36 minimum
BIND_NBC100['%s-BzBz_PD36-1.8' % (dbse)] = -2.642
BIND_NBC100['%s-BzBz_PD36-1.9' % (dbse)] = -2.615
BIND_NBC100['%s-BzBz_PD36-2.0' % (dbse)] = -2.575
BIND_NBC100['%s-BzBz_PD36-2.2' % (dbse)] = -2.473
BIND_NBC100['%s-BzBz_PD36-2.4' % (dbse)] = -2.356
BIND_NBC100['%s-BzBz_PD36-2.6' % (dbse)] = -2.240
BIND_NBC100['%s-BzBz_PD36-2.8' % (dbse)] = -2.130
BIND_NBC100['%s-BzBz_PD36-3.0' % (dbse)] = -2.035
# Current revision
BIND_NBC10A = {}
BIND_NBC10A['%s-BzBz_S-3.2' % (dbse)] = 3.462
BIND_NBC10A['%s-BzBz_S-3.3' % (dbse)] = 1.484
BIND_NBC10A['%s-BzBz_S-3.4' % (dbse)] = 0.147
BIND_NBC10A['%s-BzBz_S-3.5' % (dbse)] = -0.724
BIND_NBC10A['%s-BzBz_S-3.6' % (dbse)] = -1.259
BIND_NBC10A['%s-BzBz_S-3.7' % (dbse)] = -1.558
BIND_NBC10A['%s-BzBz_S-3.8' % (dbse)] = -1.693
BIND_NBC10A['%s-BzBz_S-3.9' % (dbse)] = -1.717 # BzBz_S minimum
BIND_NBC10A['%s-BzBz_S-4.0' % (dbse)] = -1.669
BIND_NBC10A['%s-BzBz_S-4.1' % (dbse)] = -1.577
BIND_NBC10A['%s-BzBz_S-4.2' % (dbse)] = -1.459
BIND_NBC10A['%s-BzBz_S-4.5' % (dbse)] = -1.066
BIND_NBC10A['%s-BzBz_S-5.0' % (dbse)] = -0.546
BIND_NBC10A['%s-BzBz_S-5.5' % (dbse)] = -0.251
BIND_NBC10A['%s-BzBz_S-6.0' % (dbse)] = -0.101
BIND_NBC10A['%s-BzBz_S-6.5' % (dbse)] = -0.029
BIND_NBC10A['%s-BzBz_S-10.0' % (dbse)] = 0.018
BIND_NBC10A['%s-BzBz_T-4.4' % (dbse)] = 0.617
BIND_NBC10A['%s-BzBz_T-4.5' % (dbse)] = -0.769
BIND_NBC10A['%s-BzBz_T-4.6' % (dbse)] = -1.682
BIND_NBC10A['%s-BzBz_T-4.7' % (dbse)] = -2.246
BIND_NBC10A['%s-BzBz_T-4.8' % (dbse)] = -2.559
BIND_NBC10A['%s-BzBz_T-4.9' % (dbse)] = -2.693
BIND_NBC10A['%s-BzBz_T-5.0' % (dbse)] = -2.703 # BzBz_T minimum
BIND_NBC10A['%s-BzBz_T-5.1' % (dbse)] = -2.630
BIND_NBC10A['%s-BzBz_T-5.2' % (dbse)] = -2.506
BIND_NBC10A['%s-BzBz_T-5.3' % (dbse)] = -2.351
BIND_NBC10A['%s-BzBz_T-5.4' % (dbse)] = -2.181
BIND_NBC10A['%s-BzBz_T-5.5' % (dbse)] = -2.006
BIND_NBC10A['%s-BzBz_T-5.6' % (dbse)] = -1.834
BIND_NBC10A['%s-BzBz_T-6.0' % (dbse)] = -1.242
BIND_NBC10A['%s-BzBz_T-6.5' % (dbse)] = -0.752
BIND_NBC10A['%s-BzBz_T-7.0' % (dbse)] = -0.468
BIND_NBC10A['%s-BzBz_T-7.5' % (dbse)] = -0.302
BIND_NBC10A['%s-BzBz_T-8.0' % (dbse)] = -0.203
BIND_NBC10A['%s-BzBz_PD34-0.2' % (dbse)] = 0.029
BIND_NBC10A['%s-BzBz_PD34-0.4' % (dbse)] = -0.298
BIND_NBC10A['%s-BzBz_PD34-0.6' % (dbse)] = -0.768
BIND_NBC10A['%s-BzBz_PD34-0.8' % (dbse)] = -1.298
BIND_NBC10A['%s-BzBz_PD34-1.0' % (dbse)] = -1.802
BIND_NBC10A['%s-BzBz_PD34-1.2' % (dbse)] = -2.213
BIND_NBC10A['%s-BzBz_PD34-1.4' % (dbse)] = -2.497
BIND_NBC10A['%s-BzBz_PD34-1.5' % (dbse)] = -2.586
BIND_NBC10A['%s-BzBz_PD34-1.6' % (dbse)] = -2.643
BIND_NBC10A['%s-BzBz_PD34-1.7' % (dbse)] = -2.668
BIND_NBC10A['%s-BzBz_PD34-1.8' % (dbse)] = -2.670 # BzBz_PD34 minimum
BIND_NBC10A['%s-BzBz_PD34-1.9' % (dbse)] = -2.649
BIND_NBC10A['%s-BzBz_PD34-2.0' % (dbse)] = -2.611
BIND_NBC10A['%s-BzBz_PD34-2.2' % (dbse)] = -2.501
BIND_NBC10A['%s-BzBz_PD34-2.4' % (dbse)] = -2.377
BIND_NBC10A['%s-BzBz_PD34-2.6' % (dbse)] = -2.260
BIND_NBC10A['%s-BzBz_PD34-2.8' % (dbse)] = -2.163
BIND_NBC10A['%s-BzBz_PD34-3.0' % (dbse)] = -2.093
BIND_NBC10A['%s-BzH2S-3.2' % (dbse)] = 1.236
BIND_NBC10A['%s-BzH2S-3.4' % (dbse)] = -1.584
BIND_NBC10A['%s-BzH2S-3.5' % (dbse)] = -2.269
BIND_NBC10A['%s-BzH2S-3.6' % (dbse)] = -2.649
BIND_NBC10A['%s-BzH2S-3.7' % (dbse)] = -2.818
BIND_NBC10A['%s-BzH2S-3.8' % (dbse)] = -2.843 # BzH2S minimum
BIND_NBC10A['%s-BzH2S-3.9' % (dbse)] = -2.773
BIND_NBC10A['%s-BzH2S-4.0' % (dbse)] = -2.645
BIND_NBC10A['%s-BzH2S-4.1' % (dbse)] = -2.483
BIND_NBC10A['%s-BzH2S-4.2' % (dbse)] = -2.305
BIND_NBC10A['%s-BzH2S-4.5' % (dbse)] = -1.771
BIND_NBC10A['%s-BzH2S-4.75' % (dbse)] = -1.393
BIND_NBC10A['%s-BzH2S-5.0' % (dbse)] = -1.092
BIND_NBC10A['%s-BzH2S-5.25' % (dbse)] = -0.859
BIND_NBC10A['%s-BzH2S-5.5' % (dbse)] = -0.682
BIND_NBC10A['%s-BzH2S-6.0' % (dbse)] = -0.444
BIND_NBC10A['%s-BzH2S-6.5' % (dbse)] = -0.301
BIND_NBC10A['%s-BzH2S-7.0' % (dbse)] = -0.212
BIND_NBC10A['%s-BzH2S-7.5' % (dbse)] = -0.154
BIND_NBC10A['%s-BzMe-3.2' % (dbse)] = 0.686
BIND_NBC10A['%s-BzMe-3.3' % (dbse)] = -0.213
BIND_NBC10A['%s-BzMe-3.4' % (dbse)] = -0.805
BIND_NBC10A['%s-BzMe-3.5' % (dbse)] = -1.173
BIND_NBC10A['%s-BzMe-3.6' % (dbse)] = -1.378
BIND_NBC10A['%s-BzMe-3.7' % (dbse)] = -1.470
BIND_NBC10A['%s-BzMe-3.8' % (dbse)] = -1.484 # BzMe minimum
BIND_NBC10A['%s-BzMe-3.9' % (dbse)] = -1.445
BIND_NBC10A['%s-BzMe-4.0' % (dbse)] = -1.374
BIND_NBC10A['%s-BzMe-4.1' % (dbse)] = -1.284
BIND_NBC10A['%s-BzMe-4.2' % (dbse)] = -1.185
BIND_NBC10A['%s-BzMe-4.4' % (dbse)] = -0.984
BIND_NBC10A['%s-BzMe-4.6' % (dbse)] = -0.800
BIND_NBC10A['%s-BzMe-4.8' % (dbse)] = -0.643
BIND_NBC10A['%s-BzMe-5.0' % (dbse)] = -0.515
BIND_NBC10A['%s-BzMe-5.2' % (dbse)] = -0.413
BIND_NBC10A['%s-BzMe-5.4' % (dbse)] = -0.332
BIND_NBC10A['%s-BzMe-5.6' % (dbse)] = -0.268
BIND_NBC10A['%s-BzMe-6.0' % (dbse)] = -0.177
BIND_NBC10A['%s-MeMe-3.2' % (dbse)] = 0.069
BIND_NBC10A['%s-MeMe-3.3' % (dbse)] = -0.239
BIND_NBC10A['%s-MeMe-3.4' % (dbse)] = -0.417
BIND_NBC10A['%s-MeMe-3.5' % (dbse)] = -0.508
BIND_NBC10A['%s-MeMe-3.6' % (dbse)] = -0.541 # MeMe minimum
BIND_NBC10A['%s-MeMe-3.7' % (dbse)] = -0.539
BIND_NBC10A['%s-MeMe-3.8' % (dbse)] = -0.515
BIND_NBC10A['%s-MeMe-3.9' % (dbse)] = -0.480
BIND_NBC10A['%s-MeMe-4.0' % (dbse)] = -0.439
BIND_NBC10A['%s-MeMe-4.1' % (dbse)] = -0.396
BIND_NBC10A['%s-MeMe-4.2' % (dbse)] = -0.354
BIND_NBC10A['%s-MeMe-4.3' % (dbse)] = -0.315
BIND_NBC10A['%s-MeMe-4.4' % (dbse)] = -0.279
BIND_NBC10A['%s-MeMe-4.6' % (dbse)] = -0.217
BIND_NBC10A['%s-MeMe-4.8' % (dbse)] = -0.168
BIND_NBC10A['%s-MeMe-5.0' % (dbse)] = -0.130
BIND_NBC10A['%s-MeMe-5.4' % (dbse)] = -0.080
BIND_NBC10A['%s-MeMe-5.8' % (dbse)] = -0.050
BIND_NBC10A['%s-PyPy_S2-3.1' % (dbse)] = 2.387
BIND_NBC10A['%s-PyPy_S2-3.3' % (dbse)] = -1.165
BIND_NBC10A['%s-PyPy_S2-3.4' % (dbse)] = -2.050
BIND_NBC10A['%s-PyPy_S2-3.5' % (dbse)] = -2.562
BIND_NBC10A['%s-PyPy_S2-3.6' % (dbse)] = -2.815
BIND_NBC10A['%s-PyPy_S2-3.7' % (dbse)] = -2.890 # PyPy_S2 minimum
BIND_NBC10A['%s-PyPy_S2-3.8' % (dbse)] = -2.849
BIND_NBC10A['%s-PyPy_S2-3.9' % (dbse)] = -2.733
BIND_NBC10A['%s-PyPy_S2-4.0' % (dbse)] = -2.573
BIND_NBC10A['%s-PyPy_S2-4.1' % (dbse)] = -2.391
BIND_NBC10A['%s-PyPy_S2-4.2' % (dbse)] = -2.201
BIND_NBC10A['%s-PyPy_S2-4.3' % (dbse)] = -2.012
BIND_NBC10A['%s-PyPy_S2-4.4' % (dbse)] = -1.830
BIND_NBC10A['%s-PyPy_S2-4.5' % (dbse)] = -1.660
BIND_NBC10A['%s-PyPy_S2-4.7' % (dbse)] = -1.357
BIND_NBC10A['%s-PyPy_S2-5.0' % (dbse)] = -1.002
BIND_NBC10A['%s-PyPy_S2-5.5' % (dbse)] = -0.619
BIND_NBC10A['%s-PyPy_S2-6.0' % (dbse)] = -0.402
BIND_NBC10A['%s-PyPy_S2-6.5' % (dbse)] = -0.276
BIND_NBC10A['%s-PyPy_S2-7.0' % (dbse)] = -0.200
BIND_NBC10A['%s-PyPy_T3-4.1' % (dbse)] = 9.341
BIND_NBC10A['%s-PyPy_T3-4.3' % (dbse)] = 1.991
BIND_NBC10A['%s-PyPy_T3-4.5' % (dbse)] = -1.377
BIND_NBC10A['%s-PyPy_T3-4.6' % (dbse)] = -2.203
BIND_NBC10A['%s-PyPy_T3-4.7' % (dbse)] = -2.673
BIND_NBC10A['%s-PyPy_T3-4.8' % (dbse)] = -2.896
BIND_NBC10A['%s-PyPy_T3-4.9' % (dbse)] = -2.954 # PyPy_T3 minimum
BIND_NBC10A['%s-PyPy_T3-5.0' % (dbse)] = -2.903
BIND_NBC10A['%s-PyPy_T3-5.1' % (dbse)] = -2.783
BIND_NBC10A['%s-PyPy_T3-5.2' % (dbse)] = -2.625
BIND_NBC10A['%s-PyPy_T3-5.3' % (dbse)] = -2.447
BIND_NBC10A['%s-PyPy_T3-5.4' % (dbse)] = -2.262
BIND_NBC10A['%s-PyPy_T3-5.5' % (dbse)] = -2.080
BIND_NBC10A['%s-PyPy_T3-5.7' % (dbse)] = -1.741
BIND_NBC10A['%s-PyPy_T3-6.0' % (dbse)] = -1.323
BIND_NBC10A['%s-PyPy_T3-6.5' % (dbse)] = -0.852
BIND_NBC10A['%s-PyPy_T3-7.0' % (dbse)] = -0.573
BIND_NBC10A['%s-PyPy_T3-8.0' % (dbse)] = -0.296
BIND_NBC10A['%s-PyPy_T3-9.0' % (dbse)] = -0.174
BIND_NBC10A['%s-BzBz_PD32-0.2' % (dbse)] = 3.241
BIND_NBC10A['%s-BzBz_PD32-0.4' % (dbse)] = 2.619
BIND_NBC10A['%s-BzBz_PD32-0.6' % (dbse)] = 1.726
BIND_NBC10A['%s-BzBz_PD32-0.8' % (dbse)] = 0.726
BIND_NBC10A['%s-BzBz_PD32-1.0' % (dbse)] = -0.222
BIND_NBC10A['%s-BzBz_PD32-1.2' % (dbse)] = -1.002
BIND_NBC10A['%s-BzBz_PD32-1.4' % (dbse)] = -1.553
BIND_NBC10A['%s-BzBz_PD32-1.5' % (dbse)] = -1.738
BIND_NBC10A['%s-BzBz_PD32-1.6' % (dbse)] = -1.868
BIND_NBC10A['%s-BzBz_PD32-1.7' % (dbse)] = -1.949
BIND_NBC10A['%s-BzBz_PD32-1.8' % (dbse)] = -1.988
BIND_NBC10A['%s-BzBz_PD32-1.9' % (dbse)] = -1.992 # BzBz_PD32 minimum
BIND_NBC10A['%s-BzBz_PD32-2.0' % (dbse)] = -1.971
BIND_NBC10A['%s-BzBz_PD32-2.2' % (dbse)] = -1.891
BIND_NBC10A['%s-BzBz_PD32-2.4' % (dbse)] = -1.795
BIND_NBC10A['%s-BzBz_PD32-2.6' % (dbse)] = -1.727
BIND_NBC10A['%s-BzBz_PD32-2.8' % (dbse)] = -1.702
BIND_NBC10A['%s-BzBz_PD32-3.0' % (dbse)] = -1.725
BIND_NBC10A['%s-BzBz_PD36-0.2' % (dbse)] = -1.321
BIND_NBC10A['%s-BzBz_PD36-0.4' % (dbse)] = -1.490
BIND_NBC10A['%s-BzBz_PD36-0.6' % (dbse)] = -1.735
BIND_NBC10A['%s-BzBz_PD36-0.8' % (dbse)] = -2.011
BIND_NBC10A['%s-BzBz_PD36-1.0' % (dbse)] = -2.273
BIND_NBC10A['%s-BzBz_PD36-1.2' % (dbse)] = -2.482
BIND_NBC10A['%s-BzBz_PD36-1.4' % (dbse)] = -2.619
BIND_NBC10A['%s-BzBz_PD36-1.5' % (dbse)] = -2.657
BIND_NBC10A['%s-BzBz_PD36-1.6' % (dbse)] = -2.674
BIND_NBC10A['%s-BzBz_PD36-1.7' % (dbse)] = -2.675 # BzBz_PD36 minimum
BIND_NBC10A['%s-BzBz_PD36-1.8' % (dbse)] = -2.662
BIND_NBC10A['%s-BzBz_PD36-1.9' % (dbse)] = -2.633
BIND_NBC10A['%s-BzBz_PD36-2.0' % (dbse)] = -2.593
BIND_NBC10A['%s-BzBz_PD36-2.2' % (dbse)] = -2.489
BIND_NBC10A['%s-BzBz_PD36-2.4' % (dbse)] = -2.371
BIND_NBC10A['%s-BzBz_PD36-2.6' % (dbse)] = -2.253
BIND_NBC10A['%s-BzBz_PD36-2.8' % (dbse)] = -2.143
BIND_NBC10A['%s-BzBz_PD36-3.0' % (dbse)] = -2.046
# Set default
BIND = BIND_NBC10A
# <<< Comment Lines >>>
TAGL = {}
rxnpattern = re.compile(r'^(.+)-(.+)$')
for item in BzBz_S:
distance = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Sandwich Benzene Dimer at %s A' % (distance.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Sandwich Benzene Dimer at %s A' % (distance.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Benzene from Sandwich Benzene Dimer at %s A' % (distance.group(2))
for item in BzBz_T:
distance = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'T-shaped Benzene Dimer at %s A' % (distance.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'T-shaped Benzene Dimer at %s A' % (distance.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Benzene from T-shaped Benzene Dimer at %s A' % (distance.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Benzene from T-shaped Benzene Dimer at %s A' % (distance.group(2))
for item in BzBz_PD34:
distance = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Parallel Displaced Benzene Dimer Interplane 3.4 at %s A' % (distance.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Parallel Displaced Benzene Dimer Interplane 3.4 at %s A' % (distance.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Benzene from Parallel Displaced Benzene Dimer Interplane 3.4 at %s A' % (distance.group(2))
for item in BzH2S:
distance = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Benzene-H2S at %s A' % (distance.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Benzene-H2S at %s A' % (distance.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Benzene from Benzene-Methane at %s A' % (distance.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Hydrogen Sulfide from Benzene-Methane at %s A' % (distance.group(2))
for item in BzMe:
distance = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Benzene-Methane at %s A' % (distance.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Benzene-Methane at %s A' % (distance.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Benzene from Benzene-Methane at %s A' % (distance.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Methane from Benzene-Methane at %s A' % (distance.group(2))
for item in MeMe:
distance = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Methane Dimer at %s A' % (distance.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Methane Dimer at %s A' % (distance.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Methane from Methane Dimer at %s A' % (distance.group(2))
for item in PyPy_S2:
distance = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Pyridine Dimer S2 Configuration at %s A' % (distance.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Pyridine Dimer S2 Configuration at %s A' % (distance.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Pyridine from Pyridine Dimer S2 Configuration at %s A' % (distance.group(2))
for item in PyPy_T3:
distance = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Pyridine Dimer T3 Configuration at %s A' % (distance.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Pyridine Dimer T3 Configuration at %s A' % (distance.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Pyridine from Pyridine Dimer T3 Configuration at %s A' % (distance.group(2))
TAGL['%s-%s-monoB-CP' % (dbse, item)] = 'Pyridine from Pyridine Dimer T3 Configuration at %s A' % (distance.group(2))
for item in BzBz_PD32:
distance = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Parallel Displaced Benzene Dimer Interplane 3.2 at %s A' % (distance.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Parallel Displaced Benzene Dimer Interplane 3.2 at %s A' % (distance.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Benzene from Parallel Displaced Benzene Dimer Interplane 3.2 at %s A' % (distance.group(2))
for item in BzBz_PD36:
distance = rxnpattern.match(item)
TAGL['%s-%s' % (dbse, item)] = 'Parallel Displaced Benzene Dimer Interplane 3.6 at %s A' % (distance.group(2))
TAGL['%s-%s-dimer' % (dbse, item)] = 'Parallel Displaced Benzene Dimer Interplane 3.6 at %s A' % (distance.group(2))
TAGL['%s-%s-monoA-CP' % (dbse, item)] = 'Benzene from Parallel Displaced Benzene Dimer Interplane 3.6 at %s A' % (distance.group(2))
TAGL['%s-Bz-mono-unCP' % (dbse)] = 'Benzene'
TAGL['%s-H2S-mono-unCP' % (dbse)] = 'Hydrogen Sulfide'
TAGL['%s-Bz2-mono-unCP' % (dbse)] = 'Benzene (alt. geometry)'
TAGL['%s-Me-mono-unCP' % (dbse)] = 'Methane'
TAGL['%s-Py-mono-unCP' % (dbse)] = 'Pyridine'
#<<< Geometry Specification Strings >>>
GEOS = {}
for rxn in BzBz_S:
molname = rxnpattern.match(rxn)
Rval = float(molname.group(2))
GEOS['%s-%s-%s' % (dbse, rxn, 'dimer')] = qcdb.Molecule("""
0 1
X
X 1 RXX
X 2 RXX 1 90.0
C 3 RCC 2 90.0 1 0.0
C 3 RCC 2 90.0 1 60.0
C 3 RCC 2 90.0 1 120.0
C 3 RCC 2 90.0 1 180.0
C 3 RCC 2 90.0 1 240.0
C 3 RCC 2 90.0 1 300.0
H 3 RCH 2 90.0 1 0.0
H 3 RCH 2 90.0 1 60.0
H 3 RCH 2 90.0 1 120.0
H 3 RCH 2 90.0 1 180.0
H 3 RCH 2 90.0 1 240.0
H 3 RCH 2 90.0 1 300.0
--
0 1
X 3 RXX 2 90.0 1 0.0
X 3 R 16 90.0 2 180.0
X 3 DRXX 16 90.0 2 180.0
X 18 RXX 17 90.0 16 0.0
C 17 RCC 18 90.0 19 0.0
C 17 RCC 18 90.0 19 60.0
C 17 RCC 18 90.0 19 120.0
C 17 RCC 18 90.0 19 180.0
C 17 RCC 18 90.0 19 240.0
C 17 RCC 18 90.0 19 300.0
H 17 RCH 18 90.0 19 0.0
H 17 RCH 18 90.0 19 60.0
H 17 RCH 18 90.0 19 120.0
H 17 RCH 18 90.0 19 180.0
H 17 RCH 18 90.0 19 240.0
H 17 RCH 18 90.0 19 300.0
RXX = 1.0
DRXX = 12.0
RCC = 1.3915
RCH = 2.4715
R = %(Rval)s
units angstrom
""" % vars())
for rxn in BzBz_T:
molname = rxnpattern.match(rxn)
Rval = float(molname.group(2))
GEOS['%s-%s-%s' % (dbse, rxn, 'dimer')] = qcdb.Molecule("""
0 1
X
X 1 RXX
X 2 RXX 1 90.0
C 3 RCC 2 90.0 1 0.0
C 3 RCC 2 90.0 1 60.0
C 3 RCC 2 90.0 1 120.0
C 3 RCC 2 90.0 1 180.0
C 3 RCC 2 90.0 1 240.0
C 3 RCC 2 90.0 1 300.0
H 3 RCH 2 90.0 1 0.0
H 3 RCH 2 90.0 1 60.0
H 3 RCH 2 90.0 1 120.0
H 3 RCH 2 90.0 1 180.0
H 3 RCH 2 90.0 1 240.0
H 3 RCH 2 90.0 1 300.0
--
0 1
X 3 RXX 2 90.0 1 0.0
X 3 R 16 90.0 1 0.0
X 17 RXX 3 90.0 16 180.0
X 18 RXX 17 90.0 3 0.0
C 17 RCC 18 90.0 19 0.0
C 17 RCC 18 90.0 19 60.0
C 17 RCC 18 90.0 19 120.0
C 17 RCC 18 90.0 19 180.0
C 17 RCC 18 90.0 19 240.0
C 17 RCC 18 90.0 19 300.0
H 17 RCH 18 90.0 19 0.0
H 17 RCH 18 90.0 19 60.0
H 17 RCH 18 90.0 19 120.0
H 17 RCH 18 90.0 19 180.0
H 17 RCH 18 90.0 19 240.0
H 17 RCH 18 90.0 19 300.0
RXX = 1.0
RCC = 1.3915
RCH = 2.4715
R = %(Rval)s
units angstrom
""" % vars())
for rxn in sum([BzBz_PD32, BzBz_PD34, BzBz_PD36], []):
molname = rxnpattern.match(rxn)
Rval = float(molname.group(2))
if rxn in BzBz_PD32:
R2val = 3.2
elif rxn in BzBz_PD34:
R2val = 3.4
elif rxn in BzBz_PD36:
R2val = 3.6
GEOS['%s-%s-%s' % (dbse, rxn, 'dimer')] = qcdb.Molecule("""
0 1
X
X 1 RXX
X 2 R2 1 90.0
C 3 RCC 2 90.0 1 0.0
C 3 RCC 2 90.0 1 60.0
C 3 RCC 2 90.0 1 120.0
C 3 RCC 2 90.0 1 180.0
C 3 RCC 2 90.0 1 240.0
C 3 RCC 2 90.0 1 300.0
H 3 RCH 2 90.0 1 0.0
H 3 RCH 2 90.0 1 60.0
H 3 RCH 2 90.0 1 120.0
H 3 RCH 2 90.0 1 180.0
H 3 RCH 2 90.0 1 240.0
H 3 RCH 2 90.0 1 300.0
--
0 1
X 3 RXX 2 90.0 1 0.0
X 2 R 3 90.0 16 90.0
X 17 RXX 2 90.0 1 90.0
X 18 RXX 17 90.0 2 90.0
C 17 RCC 18 90.0 19 0.0
C 17 RCC 18 90.0 19 60.0
C 17 RCC 18 90.0 19 120.0
C 17 RCC 18 90.0 19 180.0
C 17 RCC 18 90.0 19 240.0
C 17 RCC 18 90.0 19 300.0
H 17 RCH 18 90.0 19 0.0
H 17 RCH 18 90.0 19 60.0
H 17 RCH 18 90.0 19 120.0
H 17 RCH 18 90.0 19 180.0
H 17 RCH 18 90.0 19 240.0
H 17 RCH 18 90.0 19 300.0
RXX = 1.0
RCC = 1.3915
RCH = 2.4715
R = %(Rval)s
R2 = %(R2val)s
units angstrom
""" % vars())
for rxn in BzH2S:
molname = rxnpattern.match(rxn)
Rval = float(molname.group(2))
GEOS['%s-%s-%s' % (dbse, rxn, 'dimer')] = qcdb.Molecule("""
0 1
X
X 1 1.0
C 2 BZCX 1 90.0
C 2 BZCX 1 90.0 3 60.0
C 2 BZCX 1 90.0 4 60.0
C 2 BZCX 1 90.0 5 60.0
C 2 BZCX 1 90.0 6 60.0
C 2 BZCX 1 90.0 7 60.0
X 3 1.0 2 90.0 1 0.0
H 3 BZHC 9 90.0 2 180.0
H 4 BZHC 3 120.0 2 180.0
H 5 BZHC 4 120.0 2 180.0
H 6 BZHC 5 120.0 2 180.0
H 7 BZHC 6 120.0 2 180.0
H 8 BZHC 7 120.0 2 180.0
--
0 1
S 2 R 3 90.0 4 90.0
H 16 HS 2 HHSH 9 180.0
H 16 HS 2 HHSH 9 0.0
BZCX = 1.3915
BZHC = 1.0800
HS = 1.3356
HHSH = 46.06
R = %(Rval)s
units angstrom
""" % vars())
for rxn in BzMe:
molname = rxnpattern.match(rxn)
Rval = float(molname.group(2))
GEOS['%s-%s-%s' % (dbse, rxn, 'dimer')] = qcdb.Molecule("""
0 1
X
X 1 1.0
C 2 CQ 1 90.0
C 3 CQ 2 60.0 1 90.0
C 4 CQ 2 60.0 1 90.0
C 5 CQ 2 60.0 1 90.0
C 6 CQ 2 60.0 1 90.0
C 7 CQ 2 60.0 1 90.0
X 3 1.0 2 90.0 1 0.0
H 3 CH1 9 90.0 2 180.0
H 4 CH1 3 120.0 2 180.0
H 5 CH1 4 120.0 2 180.0
H 6 CH1 5 120.0 2 180.0
H 7 CH1 6 120.0 2 180.0
H 8 CH1 7 120.0 2 180.0
--
0 1
C 2 R 3 90.0 9 0.0
H 16 CH2 2 0.0 3 0.0
H 16 CH2 2 HCH 3 0.0
H 16 CH2 17 HCH 18 120.0
H 16 CH2 17 HCH 18 240.0
CQ = 1.405731
CH1 = 1.095210
CH2 = 1.099503
HCH = 109.471209
R = %(Rval)s
units angstrom
""" % vars())
for rxn in MeMe:
molname = rxnpattern.match(rxn)
Rval = float(molname.group(2))
GEOS['%s-%s-%s' % (dbse, rxn, 'dimer')] = qcdb.Molecule("""
0 1
C
H 1 CH2
H 1 CH2 2 HCH
H 1 CH2 2 HCH 3 120.0
H 1 CH2 2 HCH 3 240.0
--
0 1
C 1 R 2 180.0 4 120.0
H 6 CH2 2 180.0 4 120.0
H 6 CH2 7 HCH 3 180.0
H 6 CH2 7 HCH 4 180.0
H 6 CH2 7 HCH 5 180.0
CH2 = 1.099503
HCH = 109.471209
R = %(Rval)s
units angstrom
""" % vars())
for rxn in PyPy_S2:
molname = rxnpattern.match(rxn)
Rval = float(molname.group(2))
GEOS['%s-%s-%s' % (dbse, rxn, 'dimer')] = qcdb.Molecule("""
0 1
X
X 1 R
N 1 1.3980380 2 90.0
C 1 1.3371053 2 90.0 3 -58.504950
C 1 1.3822904 2 90.0 4 -61.640500
C 1 1.4067471 2 90.0 5 -59.854550
C 1 1.3822904 2 90.0 6 -59.854550
C 1 1.3371053 2 90.0 7 -61.640500
H 4 1.08650 3 116.01 8 180.0
H 5 1.08260 4 120.12 3 180.0
H 6 1.08180 3 180.00 4 0.0
H 7 1.08260 8 120.12 3 180.0
H 8 1.08650 3 116.01 4 180.0
--
0 1
N 2 1.3980380 1 90.0 3 theta
C 2 1.3371053 1 90.0 14 -58.504950
C 2 1.3822904 1 90.0 15 -61.640500
C 2 1.4067471 1 90.0 16 -59.854550
C 2 1.3822904 1 90.0 17 -59.854550
C 2 1.3371053 1 90.0 18 -61.640500
H 15 1.08650 14 116.01 19 180.0
H 16 1.08260 15 120.12 14 180.0
H 17 1.08180 14 180.00 15 0.0
H 18 1.08260 19 120.12 14 180.0
H 19 1.08650 14 116.01 15 180.0
theta = 180.0
R = %(Rval)s
units angstrom
""" % vars())
for rxn in PyPy_T3:
molname = rxnpattern.match(rxn)
Rval = float(molname.group(2))
GEOS['%s-%s-%s' % (dbse, rxn, 'dimer')] = qcdb.Molecule("""
0 1
X
X 1 R
N 1 1.3980380 2 90.0
C 1 1.3371053 2 90.0 3 -58.504950
C 1 1.3822904 2 90.0 4 -61.640500
C 1 1.4067471 2 90.0 5 -59.854550
C 1 1.3822904 2 90.0 6 -59.854550
C 1 1.3371053 2 90.0 7 -61.640500
H 4 1.08650 3 116.01 8 180.0
H 5 1.08260 4 120.12 3 180.0
H 6 1.08180 3 180.00 4 0.0
H 7 1.08260 8 120.12 3 180.0
H 8 1.08650 3 116.01 4 180.0
--
0 1
X 2 2.0000000 1 90.0 3 theta
N 2 1.3980380 14 90.0 1 updown
C 2 1.3371053 14 90.0 15 -58.504950
C 2 1.3822904 14 90.0 16 -61.640500
C 2 1.4067471 14 90.0 17 -59.854550
C 2 1.3822904 14 90.0 18 -59.854550
C 2 1.3371053 14 90.0 19 -61.640500
H 16 1.08650 15 116.01 20 180.0
H 17 1.08260 16 120.12 15 180.0
H 18 1.08180 15 180.00 16 0.0
H 19 1.08260 20 120.12 15 180.0
H 20 1.08650 15 116.01 16 180.0
theta = 90.0
updown = 270.0
R = %(Rval)s
units angstrom
""" % vars())
GEOS['%s-%s-%s' % (dbse, 'Bz', 'mono-unCP')] = qcdb.Molecule("""
0 1
X
X 1 RXX
X 2 RXX 1 90.0
C 3 RCC 2 90.0 1 0.0
C 3 RCC 2 90.0 1 60.0
C 3 RCC 2 90.0 1 120.0
C 3 RCC 2 90.0 1 180.0
C 3 RCC 2 90.0 1 240.0
C 3 RCC 2 90.0 1 300.0
H 3 RCH 2 90.0 1 0.0
H 3 RCH 2 90.0 1 60.0
H 3 RCH 2 90.0 1 120.0
H 3 RCH 2 90.0 1 180.0
H 3 RCH 2 90.0 1 240.0
H 3 RCH 2 90.0 1 300.0
RXX = 1.0
RCC = 1.3915
RCH = 2.4715
units angstrom
""" % vars())
GEOS['%s-%s-%s' % (dbse, 'H2S', 'mono-unCP')] = qcdb.Molecule("""
0 1
S
H 1 HS
H 1 HS 2 HSH
HS = 1.3356
HSH = 92.12
units angstrom
""" % vars())
GEOS['%s-%s-%s' % (dbse, 'Bz2', 'mono-unCP')] = qcdb.Molecule("""
0 1
X
X 1 1.0
C 2 CQ 1 90.0
C 3 CQ 2 60.0 1 90.0
C 4 CQ 2 60.0 1 90.0
C 5 CQ 2 60.0 1 90.0
C 6 CQ 2 60.0 1 90.0
C 7 CQ 2 60.0 1 90.0
X 3 1.0 2 90.0 1 0.0
H 3 CH1 9 90.0 2 180.0
H 4 CH1 3 120.0 2 180.0
H 5 CH1 4 120.0 2 180.0
H 6 CH1 5 120.0 2 180.0
H 7 CH1 6 120.0 2 180.0
H 8 CH1 7 120.0 2 180.0
CQ = 1.405731
CH1 = 1.095210
units angstrom
""" % vars())
GEOS['%s-%s-%s' % (dbse, 'Me', 'mono-unCP')] = qcdb.Molecule("""
0 1
C
H 1 CH2
H 1 CH2 2 HCH
H 1 CH2 2 HCH 3 120.0
H 1 CH2 2 HCH 3 240.0
CH2 = 1.099503
HCH = 109.471209
units angstrom
""" % vars())
GEOS['%s-%s-%s' % (dbse, 'Py', 'mono-unCP')] = qcdb.Molecule("""
0 1
X
X 1 RXX
N 1 1.3980380 2 90.0
C 1 1.3371053 2 90.0 3 -58.504950
C 1 1.3822904 2 90.0 4 -61.640500
C 1 1.4067471 2 90.0 5 -59.854550
C 1 1.3822904 2 90.0 6 -59.854550
C 1 1.3371053 2 90.0 7 -61.640500
H 4 1.08650 3 116.01 8 180.0
H 5 1.08260 4 120.12 3 180.0
H 6 1.08180 3 180.00 4 0.0
H 7 1.08260 8 120.12 3 180.0
H 8 1.08650 3 116.01 4 180.0
RXX = 1.0
units angstrom
""" % vars())
# <<< Derived Geometry Strings >>>
for rxn in HRXN:
GEOS['%s-%s-monoA-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1, 2)
GEOS['%s-%s-monoB-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2, 1)
#########################################################################
# <<< Supplementary Quantum Chemical Results >>>
DATA = {}
DATA['NUCLEAR REPULSION ENERGY'] = {}
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.2-dimer' ] = 652.58240326
DATA['NUCLEAR REPULSION ENERGY']['NBC1-Bz-mono-unCP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.3-dimer' ] = 647.08083072
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.4-dimer' ] = 641.79881504
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.5-dimer' ] = 636.72435401
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.6-dimer' ] = 631.84627841
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.7-dimer' ] = 627.15417831
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.8-dimer' ] = 622.63833806
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.9-dimer' ] = 618.28967853
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-4.0-dimer' ] = 614.09970566
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-4.1-dimer' ] = 610.06046424
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-4.2-dimer' ] = 606.16449631
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-4.5-dimer' ] = 595.26834684
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-5.0-dimer' ] = 579.39688238
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-5.5-dimer' ] = 565.87021271
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-6.0-dimer' ] = 554.22625379
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-6.5-dimer' ] = 544.11253672
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-10.0-dimer' ] = 499.16037479
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.4-dimer' ] = 613.04854518
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.5-dimer' ] = 608.81636557
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.6-dimer' ] = 604.74550671
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.7-dimer' ] = 600.82787505
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.8-dimer' ] = 597.05577907
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.9-dimer' ] = 593.42192782
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.0-dimer' ] = 589.91942332
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.1-dimer' ] = 586.54174882
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.2-dimer' ] = 583.28275414
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.3-dimer' ] = 580.13663931
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.4-dimer' ] = 577.09793714
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.5-dimer' ] = 574.16149552
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.6-dimer' ] = 571.32245963
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-6.0-dimer' ] = 560.85272572
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-6.5-dimer' ] = 549.47925556
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-7.0-dimer' ] = 539.65622514
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-7.5-dimer' ] = 531.09189940
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-8.0-dimer' ] = 523.56205991
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-0.2-dimer' ] = 641.59153721
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-0.4-dimer' ] = 640.97218086
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-0.6-dimer' ] = 639.94808010
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-0.8-dimer' ] = 638.53114770
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.0-dimer' ] = 636.73745247
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.2-dimer' ] = 634.58670201
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.4-dimer' ] = 632.10168144
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.5-dimer' ] = 630.74164257
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.6-dimer' ] = 629.30768985
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.7-dimer' ] = 627.80329032
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.8-dimer' ] = 626.23200316
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.9-dimer' ] = 624.59746513
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.0-dimer' ] = 622.90337667
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.2-dimer' ] = 619.35158842
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.4-dimer' ] = 615.60701452
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.6-dimer' ] = 611.70022314
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.8-dimer' ] = 607.66157487
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-3.0-dimer' ] = 603.52082284
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.2-dimer' ] = 332.50866690
DATA['NUCLEAR REPULSION ENERGY']['NBC1-H2S-mono-unCP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.4-dimer' ] = 326.76493049
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.5-dimer' ] = 324.08312886
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.6-dimer' ] = 321.51823084
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.7-dimer' ] = 319.06348175
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.8-dimer' ] = 316.71257239
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.9-dimer' ] = 314.45961051
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.0-dimer' ] = 312.29909326
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.1-dimer' ] = 310.22588084
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.2-dimer' ] = 308.23517159
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.5-dimer' ] = 302.71463310
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.75-dimer' ] = 298.57449040
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-5.0-dimer' ] = 294.79763877
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-5.25-dimer' ] = 291.34045574
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-5.5-dimer' ] = 288.16568982
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-6.0-dimer' ] = 282.54011405
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-6.5-dimer' ] = 277.71464354
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-7.0-dimer' ] = 273.53417452
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-7.5-dimer' ] = 269.88029141
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.2-dimer' ] = 277.70122037
DATA['NUCLEAR REPULSION ENERGY']['NBC1-Bz2-mono-unCP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-Me-mono-unCP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.3-dimer' ] = 276.14505886
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.4-dimer' ] = 274.65657480
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.5-dimer' ] = 273.23211647
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.6-dimer' ] = 271.86820659
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.7-dimer' ] = 270.56154682
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.8-dimer' ] = 269.30901798
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.9-dimer' ] = 268.10767718
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.0-dimer' ] = 266.95475267
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.1-dimer' ] = 265.84763738
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.2-dimer' ] = 264.78388141
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.4-dimer' ] = 262.77738579
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.6-dimer' ] = 260.91850385
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.8-dimer' ] = 259.19247204
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.0-dimer' ] = 257.58628148
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.2-dimer' ] = 256.08845607
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.4-dimer' ] = 254.68885527
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.6-dimer' ] = 253.37850109
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-6.0-dimer' ] = 250.99455064
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.2-dimer' ] = 42.94051671
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.3-dimer' ] = 42.46449704
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.4-dimer' ] = 42.01471911
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.5-dimer' ] = 41.58914043
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.6-dimer' ] = 41.18591734
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.7-dimer' ] = 40.80338247
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.8-dimer' ] = 40.44002498
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.9-dimer' ] = 40.09447330
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.0-dimer' ] = 39.76547998
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.1-dimer' ] = 39.45190844
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.2-dimer' ] = 39.15272123
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.3-dimer' ] = 38.86696980
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.4-dimer' ] = 38.59378540
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.6-dimer' ] = 38.08199453
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.8-dimer' ] = 37.61171219
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-5.0-dimer' ] = 37.17815187
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-5.4-dimer' ] = 36.40542136
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-5.8-dimer' ] = 35.73746090
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.1-dimer' ] = 664.74968142
DATA['NUCLEAR REPULSION ENERGY']['NBC1-Py-mono-unCP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.3-dimer' ] = 653.28897360
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.4-dimer' ] = 647.90584891
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.5-dimer' ] = 642.73711461
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.6-dimer' ] = 637.77107423
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.7-dimer' ] = 632.99683541
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.8-dimer' ] = 628.40424073
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.9-dimer' ] = 623.98380628
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.0-dimer' ] = 619.72666684
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.1-dimer' ] = 615.62452662
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.2-dimer' ] = 611.66961499
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.3-dimer' ] = 607.85464633
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.4-dimer' ] = 604.17278378
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.5-dimer' ] = 600.61760611
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.7-dimer' ] = 593.86352067
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-5.0-dimer' ] = 584.54275675
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-5.5-dimer' ] = 570.86466240
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-6.0-dimer' ] = 559.10620798
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-6.5-dimer' ] = 548.90465922
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-7.0-dimer' ] = 539.98032943
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.1-dimer' ] = 631.74018099
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.3-dimer' ] = 622.28221702
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.5-dimer' ] = 613.57422251
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.6-dimer' ] = 609.47520868
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.7-dimer' ] = 605.53368830
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.8-dimer' ] = 601.74111111
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.9-dimer' ] = 598.08951503
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.0-dimer' ] = 594.57147649
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.1-dimer' ] = 591.18006603
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.2-dimer' ] = 587.90880856
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.3-dimer' ] = 584.75164753
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.4-dimer' ] = 581.70291245
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.5-dimer' ] = 578.75728949
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.7-dimer' ] = 573.15574951
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-6.0-dimer' ] = 565.41165299
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-6.5-dimer' ] = 554.01089095
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-7.0-dimer' ] = 544.16644693
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-8.0-dimer' ] = 528.04095562
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-9.0-dimer' ] = 515.40150653
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-0.2-dimer' ] = 652.35026383
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-0.4-dimer' ] = 651.65685475
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-0.6-dimer' ] = 650.51106101
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-0.8-dimer' ] = 648.92723975
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.0-dimer' ] = 646.92462020
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.2-dimer' ] = 644.52659143
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.4-dimer' ] = 641.75995892
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.5-dimer' ] = 640.24755050
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.6-dimer' ] = 638.65423207
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.7-dimer' ] = 636.98400901
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.8-dimer' ] = 635.24097954
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.9-dimer' ] = 633.42931896
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.0-dimer' ] = 631.55326486
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.2-dimer' ] = 627.62515488
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.4-dimer' ] = 623.49127864
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.6-dimer' ] = 619.18640729
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.8-dimer' ] = 614.74502815
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-3.0-dimer' ] = 610.20089775
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-0.2-dimer' ] = 631.66053374
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-0.4-dimer' ] = 631.10536715
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-0.6-dimer' ] = 630.18691177
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-0.8-dimer' ] = 628.91516711
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.0-dimer' ] = 627.30369102
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.2-dimer' ] = 625.36921338
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.4-dimer' ] = 623.13120361
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.5-dimer' ] = 621.90509666
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.6-dimer' ] = 620.61142042
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.7-dimer' ] = 619.25317914
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.8-dimer' ] = 617.83346514
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.9-dimer' ] = 616.35544587
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.0-dimer' ] = 614.82235130
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.2-dimer' ] = 611.60409513
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.4-dimer' ] = 608.20532569
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.6-dimer' ] = 604.65291019
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.8-dimer' ] = 600.97358989
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-3.0-dimer' ] = 597.19362514
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.2-dimer' ] = 652.58240326
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.3-dimer' ] = 647.08083072
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.3-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.4-dimer' ] = 641.79881504
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.5-dimer' ] = 636.72435401
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.6-dimer' ] = 631.84627841
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.7-dimer' ] = 627.15417831
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.7-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.8-dimer' ] = 622.63833806
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.8-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.9-dimer' ] = 618.28967853
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-3.9-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-4.0-dimer' ] = 614.09970566
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-4.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-4.1-dimer' ] = 610.06046424
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-4.1-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-4.2-dimer' ] = 606.16449631
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-4.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-4.5-dimer' ] = 595.26834684
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-4.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-5.0-dimer' ] = 579.39688238
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-5.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-5.5-dimer' ] = 565.87021271
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-5.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-6.0-dimer' ] = 554.22625379
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-6.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-6.5-dimer' ] = 544.11253672
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-6.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-10.0-dimer' ] = 499.16037479
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_S-10.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.4-dimer' ] = 613.04854518
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.4-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.5-dimer' ] = 608.81636557
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.5-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.6-dimer' ] = 604.74550671
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.6-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.7-dimer' ] = 600.82787505
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.7-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.7-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.8-dimer' ] = 597.05577907
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.8-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.8-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.9-dimer' ] = 593.42192782
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.9-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-4.9-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.0-dimer' ] = 589.91942332
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.0-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.1-dimer' ] = 586.54174882
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.1-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.1-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.2-dimer' ] = 583.28275414
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.2-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.3-dimer' ] = 580.13663931
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.3-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.3-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.4-dimer' ] = 577.09793714
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.4-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.5-dimer' ] = 574.16149552
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.5-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.6-dimer' ] = 571.32245963
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-5.6-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-6.0-dimer' ] = 560.85272572
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-6.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-6.0-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-6.5-dimer' ] = 549.47925556
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-6.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-6.5-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-7.0-dimer' ] = 539.65622514
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-7.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-7.0-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-7.5-dimer' ] = 531.09189940
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-7.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-7.5-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-8.0-dimer' ] = 523.56205991
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-8.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_T-8.0-monoB-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-0.2-dimer' ] = 641.59153721
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-0.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-0.4-dimer' ] = 640.97218086
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-0.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-0.6-dimer' ] = 639.94808010
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-0.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-0.8-dimer' ] = 638.53114770
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-0.8-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.0-dimer' ] = 636.73745247
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.2-dimer' ] = 634.58670201
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.4-dimer' ] = 632.10168144
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.5-dimer' ] = 630.74164257
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.6-dimer' ] = 629.30768985
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.7-dimer' ] = 627.80329032
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.7-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.8-dimer' ] = 626.23200316
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.8-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.9-dimer' ] = 624.59746513
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-1.9-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.0-dimer' ] = 622.90337667
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.2-dimer' ] = 619.35158842
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.4-dimer' ] = 615.60701452
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.6-dimer' ] = 611.70022314
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.8-dimer' ] = 607.66157487
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-2.8-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-3.0-dimer' ] = 603.52082284
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD34-3.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.2-dimer' ] = 332.50866690
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.2-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.4-dimer' ] = 326.76493049
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.4-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.5-dimer' ] = 324.08312886
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.5-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.6-dimer' ] = 321.51823084
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.6-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.7-dimer' ] = 319.06348175
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.7-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.7-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.8-dimer' ] = 316.71257239
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.8-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.8-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.9-dimer' ] = 314.45961051
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.9-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-3.9-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.0-dimer' ] = 312.29909326
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.0-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.1-dimer' ] = 310.22588084
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.1-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.1-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.2-dimer' ] = 308.23517159
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.2-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.5-dimer' ] = 302.71463310
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.5-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.75-dimer' ] = 298.57449040
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.75-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-4.75-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-5.0-dimer' ] = 294.79763877
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-5.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-5.0-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-5.25-dimer' ] = 291.34045574
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-5.25-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-5.25-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-5.5-dimer' ] = 288.16568982
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-5.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-5.5-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-6.0-dimer' ] = 282.54011405
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-6.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-6.0-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-6.5-dimer' ] = 277.71464354
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-6.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-6.5-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-7.0-dimer' ] = 273.53417452
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-7.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-7.0-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-7.5-dimer' ] = 269.88029141
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-7.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzH2S-7.5-monoB-CP' ] = 12.95382185
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.2-dimer' ] = 277.70122037
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.2-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.2-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.3-dimer' ] = 276.14505886
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.3-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.3-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.4-dimer' ] = 274.65657480
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.4-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.4-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.5-dimer' ] = 273.23211647
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.5-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.5-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.6-dimer' ] = 271.86820659
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.6-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.6-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.7-dimer' ] = 270.56154682
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.7-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.7-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.8-dimer' ] = 269.30901798
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.8-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.8-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.9-dimer' ] = 268.10767718
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.9-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-3.9-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.0-dimer' ] = 266.95475267
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.0-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.0-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.1-dimer' ] = 265.84763738
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.1-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.1-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.2-dimer' ] = 264.78388141
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.2-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.2-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.4-dimer' ] = 262.77738579
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.4-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.4-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.6-dimer' ] = 260.91850385
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.6-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.6-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.8-dimer' ] = 259.19247204
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.8-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-4.8-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.0-dimer' ] = 257.58628148
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.0-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.0-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.2-dimer' ] = 256.08845607
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.2-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.2-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.4-dimer' ] = 254.68885527
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.4-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.4-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.6-dimer' ] = 253.37850109
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.6-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-5.6-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-6.0-dimer' ] = 250.99455064
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-6.0-monoA-CP' ] = 201.83853774
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzMe-6.0-monoB-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.2-dimer' ] = 42.94051671
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.2-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.3-dimer' ] = 42.46449704
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.3-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.4-dimer' ] = 42.01471911
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.4-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.5-dimer' ] = 41.58914043
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.5-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.6-dimer' ] = 41.18591734
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.6-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.7-dimer' ] = 40.80338247
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.7-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.8-dimer' ] = 40.44002498
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.8-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.9-dimer' ] = 40.09447330
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-3.9-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.0-dimer' ] = 39.76547998
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.0-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.1-dimer' ] = 39.45190844
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.1-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.2-dimer' ] = 39.15272123
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.2-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.3-dimer' ] = 38.86696980
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.3-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.4-dimer' ] = 38.59378540
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.4-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.6-dimer' ] = 38.08199453
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.6-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.8-dimer' ] = 37.61171219
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-4.8-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-5.0-dimer' ] = 37.17815187
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-5.0-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-5.4-dimer' ] = 36.40542136
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-5.4-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-5.8-dimer' ] = 35.73746090
DATA['NUCLEAR REPULSION ENERGY']['NBC1-MeMe-5.8-monoA-CP' ] = 13.31926457
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.1-dimer' ] = 664.74968142
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.1-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.3-dimer' ] = 653.28897360
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.3-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.4-dimer' ] = 647.90584891
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.4-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.5-dimer' ] = 642.73711461
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.5-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.6-dimer' ] = 637.77107423
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.6-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.7-dimer' ] = 632.99683541
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.7-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.8-dimer' ] = 628.40424073
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.8-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.9-dimer' ] = 623.98380628
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-3.9-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.0-dimer' ] = 619.72666684
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.0-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.1-dimer' ] = 615.62452662
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.1-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.2-dimer' ] = 611.66961499
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.2-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.3-dimer' ] = 607.85464633
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.3-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.4-dimer' ] = 604.17278378
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.4-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.5-dimer' ] = 600.61760611
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.5-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.7-dimer' ] = 593.86352067
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-4.7-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-5.0-dimer' ] = 584.54275675
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-5.0-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-5.5-dimer' ] = 570.86466240
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-5.5-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-6.0-dimer' ] = 559.10620798
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-6.0-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-6.5-dimer' ] = 548.90465922
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-6.5-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-7.0-dimer' ] = 539.98032943
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_S2-7.0-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.1-dimer' ] = 631.74018099
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.1-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.1-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.3-dimer' ] = 622.28221702
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.3-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.3-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.5-dimer' ] = 613.57422251
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.5-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.5-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.6-dimer' ] = 609.47520868
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.6-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.6-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.7-dimer' ] = 605.53368830
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.7-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.7-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.8-dimer' ] = 601.74111111
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.8-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.8-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.9-dimer' ] = 598.08951503
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.9-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-4.9-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.0-dimer' ] = 594.57147649
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.0-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.0-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.1-dimer' ] = 591.18006603
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.1-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.1-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.2-dimer' ] = 587.90880856
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.2-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.2-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.3-dimer' ] = 584.75164753
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.3-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.3-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.4-dimer' ] = 581.70291245
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.4-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.4-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.5-dimer' ] = 578.75728949
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.5-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.5-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.7-dimer' ] = 573.15574951
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.7-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-5.7-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-6.0-dimer' ] = 565.41165299
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-6.0-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-6.0-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-6.5-dimer' ] = 554.01089095
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-6.5-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-6.5-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-7.0-dimer' ] = 544.16644693
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-7.0-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-7.0-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-8.0-dimer' ] = 528.04095562
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-8.0-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-8.0-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-9.0-dimer' ] = 515.40150653
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-9.0-monoA-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-PyPy_T3-9.0-monoB-CP' ] = 206.21910131
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-0.2-dimer' ] = 652.35026383
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-0.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-0.4-dimer' ] = 651.65685475
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-0.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-0.6-dimer' ] = 650.51106101
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-0.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-0.8-dimer' ] = 648.92723975
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-0.8-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.0-dimer' ] = 646.92462020
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.2-dimer' ] = 644.52659143
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.4-dimer' ] = 641.75995892
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.5-dimer' ] = 640.24755050
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.6-dimer' ] = 638.65423207
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.7-dimer' ] = 636.98400901
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.7-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.8-dimer' ] = 635.24097954
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.8-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.9-dimer' ] = 633.42931896
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-1.9-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.0-dimer' ] = 631.55326486
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.2-dimer' ] = 627.62515488
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.4-dimer' ] = 623.49127864
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.6-dimer' ] = 619.18640729
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.8-dimer' ] = 614.74502815
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-2.8-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-3.0-dimer' ] = 610.20089775
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD32-3.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-0.2-dimer' ] = 631.66053374
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-0.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-0.4-dimer' ] = 631.10536715
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-0.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-0.6-dimer' ] = 630.18691177
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-0.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-0.8-dimer' ] = 628.91516711
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-0.8-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.0-dimer' ] = 627.30369102
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.2-dimer' ] = 625.36921338
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.4-dimer' ] = 623.13120361
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.5-dimer' ] = 621.90509666
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.5-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.6-dimer' ] = 620.61142042
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.7-dimer' ] = 619.25317914
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.7-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.8-dimer' ] = 617.83346514
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.8-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.9-dimer' ] = 616.35544587
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-1.9-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.0-dimer' ] = 614.82235130
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.0-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.2-dimer' ] = 611.60409513
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.2-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.4-dimer' ] = 608.20532569
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.4-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.6-dimer' ] = 604.65291019
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.6-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.8-dimer' ] = 600.97358989
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-2.8-monoA-CP' ] = 204.01997321
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-3.0-dimer' ] = 597.19362514
DATA['NUCLEAR REPULSION ENERGY']['NBC1-BzBz_PD36-3.0-monoA-CP' ] = 204.01997321
| psi4/psi4 | psi4/share/psi4/databases/NBC10.py | Python | lgpl-3.0 | 98,454 |
import _plotly_utils.basevalidators
class TickprefixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="tickprefix",
parent_name="scatterternary.marker.colorbar",
**kwargs
):
super(TickprefixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/scatterternary/marker/colorbar/_tickprefix.py | Python | mit | 471 |
"""
The GeometryColumns and SpatialRefSys models for the SpatiaLite backend.
"""
from django.db import models
from django.contrib.gis.db.backends.base import SpatialRefSysMixin
class GeometryColumns(models.Model):
"""
The 'geometry_columns' table from SpatiaLite.
"""
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
type = models.CharField(max_length=30)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
spatial_index_enabled = models.IntegerField()
class Meta:
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the
the feature table name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the
the feature geometry column.
"""
return 'f_geometry_column'
def __unicode__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class SpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from SpatiaLite.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
ref_sys_name = models.CharField(max_length=256)
proj4text = models.CharField(max_length=2048)
@property
def wkt(self):
from django.contrib.gis.gdal import SpatialReference
return SpatialReference(self.proj4text).wkt
class Meta:
db_table = 'spatial_ref_sys'
managed = False
| ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-1.4/django/contrib/gis/db/backends/spatialite/models.py | Python | bsd-3-clause | 1,847 |
"""
Django ID mapper
Modified for Evennia by making sure that no model references
leave caching unexpectedly (no use if WeakRefs).
Also adds cache_size() for monitoring the size of the cache.
"""
import os, threading
#from twisted.internet import reactor
#from twisted.internet.threads import blockingCallFromThread
from twisted.internet.reactor import callFromThread
from django.core.exceptions import ObjectDoesNotExist, FieldError
from django.db.models.base import Model, ModelBase
from django.db.models.signals import post_save, pre_delete, post_syncdb
from src.utils.utils import dbref, get_evennia_pids, to_str
from manager import SharedMemoryManager
_FIELD_CACHE_GET = None
_FIELD_CACHE_SET = None
_GA = object.__getattribute__
_SA = object.__setattr__
_DA = object.__delattr__
# determine if our current pid is different from the server PID (i.e.
# if we are in a subprocess or not)
from src import PROC_MODIFIED_OBJS
# get info about the current process and thread
_SELF_PID = os.getpid()
_SERVER_PID, _PORTAL_PID = get_evennia_pids()
_IS_SUBPROCESS = (_SERVER_PID and _PORTAL_PID) and not _SELF_PID in (_SERVER_PID, _PORTAL_PID)
_IS_MAIN_THREAD = threading.currentThread().getName() == "MainThread"
#_SERVER_PID = None
#_PORTAL_PID = None
# #global _SERVER_PID, _PORTAL_PID, _IS_SUBPROCESS, _SELF_PID
# if not _SERVER_PID and not _PORTAL_PID:
# _IS_SUBPROCESS = (_SERVER_PID and _PORTAL_PID) and not _SELF_PID in (_SERVER_PID, _PORTAL_PID)
class SharedMemoryModelBase(ModelBase):
# CL: upstream had a __new__ method that skipped ModelBase's __new__ if
# SharedMemoryModelBase was not in the model class's ancestors. It's not
# clear what was the intended purpose, but skipping ModelBase.__new__
# broke things; in particular, default manager inheritance.
def __call__(cls, *args, **kwargs):
"""
this method will either create an instance (by calling the default implementation)
or try to retrieve one from the class-wide cache by infering the pk value from
args and kwargs. If instance caching is enabled for this class, the cache is
populated whenever possible (ie when it is possible to infer the pk value).
"""
def new_instance():
return super(SharedMemoryModelBase, cls).__call__(*args, **kwargs)
instance_key = cls._get_cache_key(args, kwargs)
# depending on the arguments, we might not be able to infer the PK, so in that case we create a new instance
if instance_key is None:
return new_instance()
cached_instance = cls.get_cached_instance(instance_key)
if cached_instance is None:
cached_instance = new_instance()
cls.cache_instance(cached_instance)
return cached_instance
def _prepare(cls):
cls.__instance_cache__ = {} #WeakValueDictionary()
super(SharedMemoryModelBase, cls)._prepare()
def __new__(cls, classname, bases, classdict, *args, **kwargs):
"""
Field shortcut creation:
Takes field names db_* and creates property wrappers named without the db_ prefix. So db_key -> key
This wrapper happens on the class level, so there is no overhead when creating objects. If a class
already has a wrapper of the given name, the automatic creation is skipped. Note: Remember to
document this auto-wrapping in the class header, this could seem very much like magic to the user otherwise.
"""
def create_wrapper(cls, fieldname, wrappername, editable=True, foreignkey=False):
"Helper method to create property wrappers with unique names (must be in separate call)"
def _get(cls, fname):
"Wrapper for getting database field"
#print "_get:", fieldname, wrappername,_GA(cls,fieldname)
return _GA(cls, fieldname)
def _get_foreign(cls, fname):
"Wrapper for returing foreignkey fields"
value = _GA(cls, fieldname)
#print "_get_foreign:value:", value
try:
return _GA(value, "typeclass")
except:
return value
def _set_nonedit(cls, fname, value):
"Wrapper for blocking editing of field"
raise FieldError("Field %s cannot be edited." % fname)
def _set(cls, fname, value):
"Wrapper for setting database field"
_SA(cls, fname, value)
# only use explicit update_fields in save if we actually have a
# primary key assigned already (won't be set when first creating object)
update_fields = [fname] if _GA(cls, "_get_pk_val")(_GA(cls, "_meta")) is not None else None
_GA(cls, "save")(update_fields=update_fields)
def _set_foreign(cls, fname, value):
"Setter only used on foreign key relations, allows setting with #dbref"
try:
value = _GA(value, "dbobj")
except AttributeError:
pass
if isinstance(value, (basestring, int)):
value = to_str(value, force_string=True)
if (value.isdigit() or value.startswith("#")):
# we also allow setting using dbrefs, if so we try to load the matching object.
# (we assume the object is of the same type as the class holding the field, if
# not a custom handler must be used for that field)
dbid = dbref(value, reqhash=False)
if dbid:
model = _GA(cls, "_meta").get_field(fname).model
try:
value = model._default_manager.get(id=dbid)
except ObjectDoesNotExist:
# maybe it is just a name that happens to look like a dbid
pass
_SA(cls, fname, value)
# only use explicit update_fields in save if we actually have a
# primary key assigned already (won't be set when first creating object)
update_fields = [fname] if _GA(cls, "_get_pk_val")(_GA(cls, "_meta")) is not None else None
_GA(cls, "save")(update_fields=update_fields)
def _del_nonedit(cls, fname):
"wrapper for not allowing deletion"
raise FieldError("Field %s cannot be edited." % fname)
def _del(cls, fname):
"Wrapper for clearing database field - sets it to None"
_SA(cls, fname, None)
update_fields = [fname] if _GA(cls, "_get_pk_val")(_GA(cls, "_meta")) is not None else None
_GA(cls, "save")(update_fields=update_fields)
# wrapper factories
fget = lambda cls: _get(cls, fieldname)
if not editable:
fset = lambda cls, val: _set_nonedit(cls, fieldname, val)
elif foreignkey:
fget = lambda cls: _get_foreign(cls, fieldname)
fset = lambda cls, val: _set_foreign(cls, fieldname, val)
else:
fset = lambda cls, val: _set(cls, fieldname, val)
fdel = lambda cls: _del(cls, fieldname) if editable else _del_nonedit(cls,fieldname)
# assigning
classdict[wrappername] = property(fget, fset, fdel)
#type(cls).__setattr__(cls, wrappername, property(fget, fset, fdel))#, doc))
# exclude some models that should not auto-create wrapper fields
if cls.__name__ in ("ServerConfig", "TypeNick"):
return
# dynamically create the wrapper properties for all fields not already handled (manytomanyfields are always handlers)
for fieldname, field in ((fname, field) for fname, field in classdict.items()
if fname.startswith("db_") and type(field).__name__ != "ManyToManyField"):
foreignkey = type(field).__name__ == "ForeignKey"
#print fieldname, type(field).__name__, field
wrappername = "dbid" if fieldname == "id" else fieldname.replace("db_", "", 1)
if wrappername not in classdict:
# makes sure not to overload manually created wrappers on the model
#print "wrapping %s -> %s" % (fieldname, wrappername)
create_wrapper(cls, fieldname, wrappername, editable=field.editable, foreignkey=foreignkey)
return super(SharedMemoryModelBase, cls).__new__(cls, classname, bases, classdict, *args, **kwargs)
#def __init__(cls, *args, **kwargs):
# """
# Field shortcut creation:
# Takes field names db_* and creates property wrappers named without the db_ prefix. So db_key -> key
# This wrapper happens on the class level, so there is no overhead when creating objects. If a class
# already has a wrapper of the given name, the automatic creation is skipped. Note: Remember to
# document this auto-wrapping in the class header, this could seem very much like magic to the user otherwise.
# """
# super(SharedMemoryModelBase, cls).__init__(*args, **kwargs)
# def create_wrapper(cls, fieldname, wrappername, editable=True):
# "Helper method to create property wrappers with unique names (must be in separate call)"
# def _get(cls, fname):
# "Wrapper for getting database field"
# value = _GA(cls, fieldname)
# if type(value) in (basestring, int, float, bool):
# return value
# elif hasattr(value, "typeclass"):
# return _GA(value, "typeclass")
# return value
# def _set_nonedit(cls, fname, value):
# "Wrapper for blocking editing of field"
# raise FieldError("Field %s cannot be edited." % fname)
# def _set(cls, fname, value):
# "Wrapper for setting database field"
# #print "_set:", fname
# if hasattr(value, "dbobj"):
# value = _GA(value, "dbobj")
# elif isinstance(value, basestring) and (value.isdigit() or value.startswith("#")):
# # we also allow setting using dbrefs, if so we try to load the matching object.
# # (we assume the object is of the same type as the class holding the field, if
# # not a custom handler must be used for that field)
# dbid = dbref(value, reqhash=False)
# if dbid:
# try:
# value = cls._default_manager.get(id=dbid)
# except ObjectDoesNotExist:
# # maybe it is just a name that happens to look like a dbid
# from src.utils.logger import log_trace
# log_trace()
# #print "_set wrapper:", fname, value, type(value), cls._get_pk_val(cls._meta)
# _SA(cls, fname, value)
# # only use explicit update_fields in save if we actually have a
# # primary key assigned already (won't be set when first creating object)
# update_fields = [fname] if _GA(cls, "_get_pk_val")(_GA(cls, "_meta")) is not None else None
# _GA(cls, "save")(update_fields=update_fields)
# def _del_nonedit(cls, fname):
# "wrapper for not allowing deletion"
# raise FieldError("Field %s cannot be edited." % fname)
# def _del(cls, fname):
# "Wrapper for clearing database field - sets it to None"
# _SA(cls, fname, None)
# update_fields = [fname] if _GA(cls, "_get_pk_val")(_GA(cls, "_meta")) is not None else None
# _GA(cls, "save")(update_fields=update_fields)
# # create class field wrappers
# fget = lambda cls: _get(cls, fieldname)
# fset = lambda cls, val: _set(cls, fieldname, val) if editable else _set_nonedit(cls, fieldname, val)
# fdel = lambda cls: _del(cls, fieldname) if editable else _del_nonedit(cls,fieldname)
# type(cls).__setattr__(cls, wrappername, property(fget, fset, fdel))#, doc))
# # exclude some models that should not auto-create wrapper fields
# if cls.__name__ in ("ServerConfig", "TypeNick"):
# return
# # dynamically create the wrapper properties for all fields not already handled
# for field in cls._meta.fields:
# fieldname = field.name
# if fieldname.startswith("db_"):
# wrappername = "dbid" if fieldname == "id" else fieldname.replace("db_", "")
# if not hasattr(cls, wrappername):
# # makes sure not to overload manually created wrappers on the model
# #print "wrapping %s -> %s" % (fieldname, wrappername)
# create_wrapper(cls, fieldname, wrappername, editable=field.editable)
class SharedMemoryModel(Model):
# CL: setting abstract correctly to allow subclasses to inherit the default
# manager.
__metaclass__ = SharedMemoryModelBase
objects = SharedMemoryManager()
class Meta:
abstract = True
def _get_cache_key(cls, args, kwargs):
"""
This method is used by the caching subsystem to infer the PK value from the constructor arguments.
It is used to decide if an instance has to be built or is already in the cache.
"""
result = None
# Quick hack for my composites work for now.
if hasattr(cls._meta, 'pks'):
pk = cls._meta.pks[0]
else:
pk = cls._meta.pk
# get the index of the pk in the class fields. this should be calculated *once*, but isn't atm
pk_position = cls._meta.fields.index(pk)
if len(args) > pk_position:
# if it's in the args, we can get it easily by index
result = args[pk_position]
elif pk.attname in kwargs:
# retrieve the pk value. Note that we use attname instead of name, to handle the case where the pk is a
# a ForeignKey.
result = kwargs[pk.attname]
elif pk.name != pk.attname and pk.name in kwargs:
# ok we couldn't find the value, but maybe it's a FK and we can find the corresponding object instead
result = kwargs[pk.name]
if result is not None and isinstance(result, Model):
# if the pk value happens to be a model instance (which can happen wich a FK), we'd rather use its own pk as the key
result = result._get_pk_val()
return result
_get_cache_key = classmethod(_get_cache_key)
def _flush_cached_by_key(cls, key):
try:
del cls.__instance_cache__[key]
except KeyError:
pass
_flush_cached_by_key = classmethod(_flush_cached_by_key)
def get_cached_instance(cls, id):
"""
Method to retrieve a cached instance by pk value. Returns None when not found
(which will always be the case when caching is disabled for this class). Please
note that the lookup will be done even when instance caching is disabled.
"""
return cls.__instance_cache__.get(id)
get_cached_instance = classmethod(get_cached_instance)
def cache_instance(cls, instance):
"""
Method to store an instance in the cache.
"""
if instance._get_pk_val() is not None:
cls.__instance_cache__[instance._get_pk_val()] = instance
cache_instance = classmethod(cache_instance)
def get_all_cached_instances(cls):
"return the objects so far cached by idmapper for this class."
return cls.__instance_cache__.values()
get_all_cached_instances = classmethod(get_all_cached_instances)
def flush_cached_instance(cls, instance):
"""
Method to flush an instance from the cache. The instance will always be flushed from the cache,
since this is most likely called from delete(), and we want to make sure we don't cache dead objects.
"""
cls._flush_cached_by_key(instance._get_pk_val())
flush_cached_instance = classmethod(flush_cached_instance)
def flush_instance_cache(cls):
cls.__instance_cache__ = {} #WeakValueDictionary()
flush_instance_cache = classmethod(flush_instance_cache)
def save(cls, *args, **kwargs):
"save method tracking process/thread issues"
if _IS_SUBPROCESS:
# we keep a store of objects modified in subprocesses so
# we know to update their caches in the central process
PROC_MODIFIED_OBJS.append(cls)
if _IS_MAIN_THREAD:
# in main thread - normal operation
super(SharedMemoryModel, cls).save(*args, **kwargs)
else:
# in another thread; make sure to save in reactor thread
def _save_callback(cls, *args, **kwargs):
super(SharedMemoryModel, cls).save(*args, **kwargs)
#blockingCallFromThread(reactor, _save_callback, cls, *args, **kwargs)
callFromThread(_save_callback, cls, *args, **kwargs)
# Use a signal so we make sure to catch cascades.
def flush_cache(**kwargs):
def class_hierarchy(root):
"""Recursively yield a class hierarchy."""
yield root
for subcls in root.__subclasses__():
for cls in class_hierarchy(subcls):
yield cls
for model in class_hierarchy(SharedMemoryModel):
model.flush_instance_cache()
#request_finished.connect(flush_cache)
post_syncdb.connect(flush_cache)
def flush_cached_instance(sender, instance, **kwargs):
# XXX: Is this the best way to make sure we can flush?
if not hasattr(instance, 'flush_cached_instance'):
return
sender.flush_cached_instance(instance)
pre_delete.connect(flush_cached_instance)
def update_cached_instance(sender, instance, **kwargs):
if not hasattr(instance, 'cache_instance'):
return
sender.cache_instance(instance)
post_save.connect(update_cached_instance)
def cache_size(mb=True):
"""
Returns a dictionary with estimates of the
cache size of each subclass.
mb - return the result in MB.
"""
import sys
sizedict = {"_total": [0, 0]}
def getsize(model):
instances = model.get_all_cached_instances()
linst = len(instances)
size = sum([sys.getsizeof(o) for o in instances])
size = (mb and size/1024.0) or size
return (linst, size)
def get_recurse(submodels):
for submodel in submodels:
subclasses = submodel.__subclasses__()
if not subclasses:
tup = getsize(submodel)
sizedict["_total"][0] += tup[0]
sizedict["_total"][1] += tup[1]
sizedict[submodel.__name__] = tup
else:
get_recurse(subclasses)
get_recurse(SharedMemoryModel.__subclasses__())
sizedict["_total"] = tuple(sizedict["_total"])
return sizedict
| tectronics/evennia | src/utils/idmapper/base.py | Python | bsd-3-clause | 19,327 |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 25 21:34:15 2017
@author: rohankoodli
"""
from sklearn.svm import SVC
from watson_developer_cloud import ToneAnalyzerV3, NaturalLanguageClassifierV1
import json, os, pickle
tone_analyzer = ToneAnalyzerV3(
username='26db2c5b-2eda-46d0-9c66-438d943713d8',
password='O3UAXtApBXOQ',
version='2016-05-19')
words = ['suck','hate','terribe','worst','bad','ugly','dumb',
'nice','thanks','kind','safe','appreciate','yay','beautiful','adore']
#print json.dumps(tone_analyzer.tone(text='yayyyy'),indent=2)
def iterate_tone_analysis(jsonfile):
return tone_analyzer.tone(text=jsonfile)
def get_features(json_text):
main_json = []
for val in json_text['document_tone']['tone_categories']:
for i in val['tones']:
main_json.append(i['score'])
return main_json
word_analysis = []
for i in words:
tone = iterate_tone_analysis(i)
features = get_features(tone)
word_analysis.append(features)
good_labels = [1]*8
bad_labels = [0]*7
labels = bad_labels + good_labels
svc = SVC()
svc.fit(word_analysis,labels)
pickle.dump(svc,open(os.getcwd()+'/svm-cyber.p','wb'))
| ProtonHackers/Incredible-Filter | antibully.py | Python | lgpl-2.1 | 1,172 |
from math import isnan
import os
from pathlib import Path
import numpy as np
import openmc.data
from openmc.data import K_BOLTZMANN
from openmc.stats import Uniform
import pytest
def make_fake_cross_section():
"""Create fake U235 nuclide
This nuclide is designed to have k_inf=1 at 300 K, k_inf=2 at 600 K, and
k_inf=1 at 900 K. The absorption cross section is also constant with
temperature so as to make the true k-effective go linear with temperature.
"""
def isotropic_angle(E_min, E_max):
return openmc.data.AngleDistribution(
[E_min, E_max],
[Uniform(-1., 1.), Uniform(-1., 1.)]
)
def cross_section(value):
return openmc.data.Tabulated1D(
energy,
value*np.ones_like(energy)
)
temperatures = (300, 600, 900)
u235_fake = openmc.data.IncidentNeutron(
'U235', 92, 235, 0, 233.0248, [T*K_BOLTZMANN for T in temperatures]
)
# Create energy grids
E_min, E_max = 1e-5, 20.0e6
energy = np.logspace(np.log10(E_min), np.log10(E_max))
for T in temperatures:
u235_fake.energy['{}K'.format(T)] = energy
# Create elastic scattering
elastic = openmc.data.Reaction(2)
for T in temperatures:
elastic.xs['{}K'.format(T)] = cross_section(1.0)
elastic_dist = openmc.data.UncorrelatedAngleEnergy(isotropic_angle(E_min, E_max))
product = openmc.data.Product()
product.distribution.append(elastic_dist)
elastic.products.append(product)
u235_fake.reactions[2] = elastic
# Create fission
fission = openmc.data.Reaction(18)
fission.center_of_mass = False
fission.Q_value = 193.0e6
fission_xs = (2., 4., 2.)
for T, xs in zip(temperatures, fission_xs):
fission.xs['{}K'.format(T)] = cross_section(xs)
a = openmc.data.Tabulated1D([E_min, E_max], [0.988e6, 0.988e6])
b = openmc.data.Tabulated1D([E_min, E_max], [2.249e-6, 2.249e-6])
fission_dist = openmc.data.UncorrelatedAngleEnergy(
isotropic_angle(E_min, E_max),
openmc.data.WattEnergy(a, b, -E_max)
)
product = openmc.data.Product()
product.distribution.append(fission_dist)
product.yield_ = openmc.data.Polynomial((2.0,))
fission.products.append(product)
u235_fake.reactions[18] = fission
# Create capture
capture = openmc.data.Reaction(102)
capture.q_value = 6.5e6
capture_xs = (2., 0., 2.)
for T, xs in zip(temperatures, capture_xs):
capture.xs['{}K'.format(T)] = cross_section(xs)
u235_fake.reactions[102] = capture
# Export HDF5 file
u235_fake.export_to_hdf5('U235_fake.h5', 'w')
lib = openmc.data.DataLibrary()
lib.register_file('U235_fake.h5')
lib.export_to_xml('cross_sections_fake.xml')
@pytest.fixture(scope='module')
def model(tmp_path_factory):
tmp_path = tmp_path_factory.mktemp("temp_interp")
orig = Path.cwd()
os.chdir(tmp_path)
make_fake_cross_section()
model = openmc.model.Model()
mat = openmc.Material()
mat.add_nuclide('U235', 1.0)
model.materials.append(mat)
model.materials.cross_sections = str(Path('cross_sections_fake.xml').resolve())
sph = openmc.Sphere(r=100.0, boundary_type='reflective')
cell = openmc.Cell(fill=mat, region=-sph)
model.geometry = openmc.Geometry([cell])
model.settings.particles = 1000
model.settings.inactive = 0
model.settings.batches = 10
tally = openmc.Tally()
tally.scores = ['absorption', 'fission', 'scatter', 'nu-fission']
model.tallies = [tally]
try:
yield model
finally:
os.chdir(orig)
@pytest.mark.parametrize(
["method", "temperature", "fission_expected"],
[
("nearest", 300.0, 0.5),
("nearest", 600.0, 1.0),
("nearest", 900.0, 0.5),
("interpolation", 360.0, 0.6),
("interpolation", 450.0, 0.75),
("interpolation", 540.0, 0.9),
("interpolation", 660.0, 0.9),
("interpolation", 750.0, 0.75),
("interpolation", 840.0, 0.6),
]
)
def test_interpolation(model, method, temperature, fission_expected):
model.settings.temperature = {'method': method, 'default': temperature}
sp_filename = model.run()
with openmc.StatePoint(sp_filename) as sp:
t = sp.tallies[model.tallies[0].id]
absorption_mean, fission_mean, scatter_mean, nu_fission_mean = t.mean.ravel()
absorption_unc, fission_unc, scatter_unc, nu_fission_unc = t.std_dev.ravel()
nu = 2.0
assert abs(absorption_mean - 1) < 3*absorption_unc
assert abs(fission_mean - fission_expected) < 3*fission_unc
assert abs(scatter_mean - 1/4) < 3*scatter_unc
assert abs(nu_fission_mean - nu*fission_expected) < 3*nu_fission_unc
# Check that k-effective value matches expected
k = sp.k_combined
if isnan(k.s):
assert k.n == pytest.approx(nu*fission_expected)
else:
assert abs(k.n - nu*fission_expected) <= 3*k.s
| nelsonag/openmc | tests/unit_tests/test_temp_interp.py | Python | mit | 5,000 |
#File: sentiment_mod.py
import nltk
import random
#from nltk.corpus import movie_reviews
from nltk.classify.scikitlearn import SklearnClassifier
import pickle
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.svm import SVC, LinearSVC, NuSVC
from nltk.classify import ClassifierI
from statistics import mode
from nltk.tokenize import word_tokenize
class VoteClassifier(ClassifierI):
def __init__(self, *classifiers):
self._classifiers = classifiers
def classify(self, features):
votes = []
for c in self._classifiers:
v = c.classify(features)
votes.append(v)
return mode(votes)
def confidence(self, features):
votes = []
for c in self._classifiers:
v = c.classify(features)
votes.append(v)
choice_votes = votes.count(mode(votes))
conf = choice_votes / len(votes)
return conf
documents_f = open("pickled_algos/documents.pickle", "rb")
documents = pickle.load(documents_f)
documents_f.close()
word_features5k_f = open("pickled_algos/word_features5k.pickle", "rb")
word_features = pickle.load(word_features5k_f)
word_features5k_f.close()
def find_features(document):
words = word_tokenize(document)
features = {}
for w in word_features:
features[w] = (w in words)
return features
featuresets_f = open("pickled_algos/featuresets.pickle", "rb")
featuresets = pickle.load(featuresets_f)
featuresets_f.close()
random.shuffle(featuresets)
print(len(featuresets))
testing_set = featuresets[10000:]
training_set = featuresets[:10000]
open_file = open("pickled_algos/originalnaivebayes5k.pickle", "rb")
classifier = pickle.load(open_file)
open_file.close()
open_file = open("pickled_algos/MNB_classifier5k.pickle", "rb")
MNB_classifier = pickle.load(open_file)
open_file.close()
open_file = open("pickled_algos/BernoulliNB_classifier5k.pickle", "rb")
BernoulliNB_classifier = pickle.load(open_file)
open_file.close()
open_file = open("pickled_algos/LogisticRegression_classifier5k.pickle", "rb")
LogisticRegression_classifier = pickle.load(open_file)
open_file.close()
open_file = open("pickled_algos/LinearSVC_classifier5k.pickle", "rb")
LinearSVC_classifier = pickle.load(open_file)
open_file.close()
open_file = open("pickled_algos/SGDC_classifier5k.pickle", "rb")
SGDC_classifier = pickle.load(open_file)
open_file.close()
voted_classifier = VoteClassifier(
classifier,
LinearSVC_classifier,
MNB_classifier,
BernoulliNB_classifier,
LogisticRegression_classifier)
def sentiment(text):
feats = find_features(text)
return voted_classifier.classify(feats),voted_classifier.confidence(feats) | abhishekjiitr/my-nltk | examples/sentiment_mod.py | Python | mit | 2,946 |
import os
import tempfile
import uuid
from wsgiref.util import FileWrapper
import zipfile
from django.conf import settings
from django.http import StreamingHttpResponse
from django.views.generic import View
from django_transfer import TransferHttpResponse
from corehq.util.view_utils import set_file_download
CHUNK_SIZE = 8192
def make_zip_file(files, compress=True, path=None):
compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
fpath = path
if not fpath:
_, fpath = tempfile.mkstemp()
with open(fpath, 'wb') as tmp:
with zipfile.ZipFile(tmp, "w", compression) as z:
for path, data in files:
z.writestr(path, data)
return fpath
class DownloadZip(View):
compress_zip = None
zip_name = None
@property
def zip_mimetype(self):
if self.compress_zip:
return 'application/zip'
else:
return 'application/x-zip-compressed'
def log_errors(self, errors):
raise NotImplementedError()
def iter_files(self):
raise NotImplementedError()
def check_before_zipping(self):
raise NotImplementedError()
def get(self, request, *args, **kwargs):
error_response = self.check_before_zipping()
if error_response:
return error_response
path = None
transfer_enabled = settings.SHARED_DRIVE_CONF.transfer_enabled
if transfer_enabled:
path = os.path.join(settings.SHARED_DRIVE_CONF.transfer_dir, uuid.uuid4().hex)
files, errors = self.iter_files()
fpath = make_zip_file(files, compress=self.compress_zip, path=path)
if errors:
self.log_errors(errors)
if transfer_enabled:
return TransferHttpResponse(fpath, mimetype=self.zip_mimetype)
else:
response = StreamingHttpResponse(FileWrapper(open(fpath), CHUNK_SIZE), mimetype=self.zip_mimetype)
response['Content-Length'] = os.path.getsize(fpath)
set_file_download(response, self.zip_name)
return response
| puttarajubr/commcare-hq | corehq/util/zip_utils.py | Python | bsd-3-clause | 2,092 |
def extractMudandironWordpressCom(item):
'''
Parser for 'mudandiron.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractMudandironWordpressCom.py | Python | bsd-3-clause | 560 |
from cdo_api_py import Client
import pandas as pd
from datetime import datetime
from pprint import pprint
# initialize a client with a developer token ,
# note 5 calls per second and 1000 calls per day limit for each token
token = "my token here!"
my_client = Client(token, default_units=None, default_limit=1000)
# the other valid option for units is 'standard', and default_limit maxes out at 1000
# first lets see what endpoints are associated with the API
# you can read more about this from NOAAs NCDC at
# https://www.ncdc.noaa.gov/cdo-web/webservices/v2#gettingStarted
pprint(my_client.list_endpoints())
# request a list of available datasets (about 11) with
pprint(my_client.list_datasets())
# there are more than 1000 datatypes, but you can see them all with
pprint(my_client.list_datatypes())
# define the extent we are interested in. in this case the DC metro area.
extent = {
"north": 39.14,
"south": 38.68,
"east": -76.65,
"west": -77.35,
}
# lets define the date range we're interested in as well, December 2016
startdate = datetime(2016, 12, 1)
enddate = datetime(2016, 12, 31)
# after examining the available datasets, we decided 'GHCND' is the one we want,
# and that we really want daily min and max temperatures
datasetid='GHCND'
datatypeid=['TMIN', 'TMAX', 'PRCP']
# lets find stations that meet all our criteria
stations = my_client.find_stations(
datasetid=datasetid,
extent=extent,
startdate=startdate,
enddate=enddate,
datatypeid=datatypeid,
return_dataframe=True)
pprint(stations)
# we can get big lists of station data with
big_df = pd.DataFrame()
for rowid, station in stations.iterrows(): # remember this is a pandas dataframe!
station_data = my_client.get_data_by_station(
datasetid=datasetid,
stationid=station['id'], # remember this is a pandas dataframe
startdate=startdate,
enddate=enddate,
return_dataframe=True, # this defaults to True
include_station_meta=True # flatten station metadata with ghcnd readings
)
pprint(station_data)
big_df = pd.concat([big_df, station_data], sort=False)
# Now we can do whatever we want with our big dataframe. Lets sort it by date and save it
print(big_df)
big_df = big_df.sort_values(by='date').reset_index()
big_df.to_csv('dc_ghcnd_example_output.csv')
| Jwely/cdo-api-py | docs/example/dc_weather_data.py | Python | mit | 2,346 |
# Copyright (c) 2014-2016 Red Hat, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests the Console Security Proxy Framework."""
import six
import mock
from nova.console.rfb import auth
from nova.console.rfb import authnone
from nova.console.securityproxy import rfb
from nova import exception
from nova import test
class RFBSecurityProxyTestCase(test.NoDBTestCase):
"""Test case for the base RFBSecurityProxy."""
def setUp(self):
super(RFBSecurityProxyTestCase, self).setUp()
self.manager = mock.Mock()
self.tenant_sock = mock.Mock()
self.compute_sock = mock.Mock()
self.tenant_sock.recv.side_effect = []
self.compute_sock.recv.side_effect = []
self.expected_manager_calls = []
self.expected_tenant_calls = []
self.expected_compute_calls = []
self.proxy = rfb.RFBSecurityProxy()
def _assert_expected_calls(self):
self.assertEqual(self.expected_manager_calls,
self.manager.mock_calls)
self.assertEqual(self.expected_tenant_calls,
self.tenant_sock.mock_calls)
self.assertEqual(self.expected_compute_calls,
self.compute_sock.mock_calls)
def _version_handshake(self):
full_version_str = "RFB 003.008\n"
self._expect_compute_recv(auth.VERSION_LENGTH, full_version_str)
self._expect_compute_send(full_version_str)
self._expect_tenant_send(full_version_str)
self._expect_tenant_recv(auth.VERSION_LENGTH, full_version_str)
def _to_binary(self, val):
if not isinstance(val, six.binary_type):
val = six.binary_type(val, 'utf-8')
return val
def _expect_tenant_send(self, val):
val = self._to_binary(val)
self.expected_tenant_calls.append(mock.call.sendall(val))
def _expect_compute_send(self, val):
val = self._to_binary(val)
self.expected_compute_calls.append(mock.call.sendall(val))
def _expect_tenant_recv(self, amt, ret_val):
ret_val = self._to_binary(ret_val)
self.expected_tenant_calls.append(mock.call.recv(amt))
self.tenant_sock.recv.side_effect = (
list(self.tenant_sock.recv.side_effect) + [ret_val])
def _expect_compute_recv(self, amt, ret_val):
ret_val = self._to_binary(ret_val)
self.expected_compute_calls.append(mock.call.recv(amt))
self.compute_sock.recv.side_effect = (
list(self.compute_sock.recv.side_effect) + [ret_val])
def test_fail(self):
"""Validate behavior for invalid initial message from tenant.
The spec defines the sequence that should be used in the handshaking
process. Anything outside of this is invalid.
"""
self._expect_tenant_send("\x00\x00\x00\x01\x00\x00\x00\x04blah")
self.proxy._fail(self.tenant_sock, None, 'blah')
self._assert_expected_calls()
def test_fail_server_message(self):
"""Validate behavior for invalid initial message from server.
The spec defines the sequence that should be used in the handshaking
process. Anything outside of this is invalid.
"""
self._expect_tenant_send("\x00\x00\x00\x01\x00\x00\x00\x04blah")
self._expect_compute_send("\x00")
self.proxy._fail(self.tenant_sock, self.compute_sock, 'blah')
self._assert_expected_calls()
def test_parse_version(self):
"""Validate behavior of version parser."""
res = self.proxy._parse_version("RFB 012.034\n")
self.assertEqual(12.34, res)
def test_fails_on_compute_version(self):
"""Validate behavior for unsupported compute RFB version.
We only support RFB protocol version 3.8.
"""
for full_version_str in ["RFB 003.007\n", "RFB 003.009\n"]:
self._expect_compute_recv(auth.VERSION_LENGTH, full_version_str)
ex = self.assertRaises(exception.SecurityProxyNegotiationFailed,
self.proxy.connect,
self.tenant_sock,
self.compute_sock)
self.assertIn('version 3.8, but server', six.text_type(ex))
self._assert_expected_calls()
def test_fails_on_tenant_version(self):
"""Validate behavior for unsupported tenant RFB version.
We only support RFB protocol version 3.8.
"""
full_version_str = "RFB 003.008\n"
for full_version_str_invalid in ["RFB 003.007\n", "RFB 003.009\n"]:
self._expect_compute_recv(auth.VERSION_LENGTH, full_version_str)
self._expect_compute_send(full_version_str)
self._expect_tenant_send(full_version_str)
self._expect_tenant_recv(auth.VERSION_LENGTH,
full_version_str_invalid)
ex = self.assertRaises(exception.SecurityProxyNegotiationFailed,
self.proxy.connect,
self.tenant_sock,
self.compute_sock)
self.assertIn('version 3.8, but tenant', six.text_type(ex))
self._assert_expected_calls()
def test_fails_on_sec_type_cnt_zero(self):
"""Validate behavior if a server returns 0 supported security types.
This indicates a random issue and the cause of that issues should be
decoded and reported in the exception.
"""
self.proxy._fail = mock.Mock()
self._version_handshake()
self._expect_compute_recv(1, "\x00")
self._expect_compute_recv(4, "\x00\x00\x00\x06")
self._expect_compute_recv(6, "cheese")
self._expect_tenant_send("\x00\x00\x00\x00\x06cheese")
ex = self.assertRaises(exception.SecurityProxyNegotiationFailed,
self.proxy.connect,
self.tenant_sock,
self.compute_sock)
self.assertIn('cheese', six.text_type(ex))
self._assert_expected_calls()
@mock.patch.object(authnone.RFBAuthSchemeNone, "security_handshake")
def test_full_run(self, mock_handshake):
"""Validate correct behavior."""
new_sock = mock.MagicMock()
mock_handshake.return_value = new_sock
self._version_handshake()
self._expect_compute_recv(1, "\x02")
self._expect_compute_recv(2, "\x01\x02")
self._expect_tenant_send("\x01\x01")
self._expect_tenant_recv(1, "\x01")
self._expect_compute_send("\x01")
self.assertEqual(new_sock, self.proxy.connect(
self.tenant_sock, self.compute_sock))
mock_handshake.assert_called_once_with(self.compute_sock)
self._assert_expected_calls()
def test_client_auth_invalid_fails(self):
"""Validate behavior if no security types are supported."""
self.proxy._fail = self.manager.proxy._fail
self.proxy.security_handshake = self.manager.proxy.security_handshake
self._version_handshake()
self._expect_compute_recv(1, "\x02")
self._expect_compute_recv(2, "\x01\x02")
self._expect_tenant_send("\x01\x01")
self._expect_tenant_recv(1, "\x02")
self.expected_manager_calls.append(
mock.call.proxy._fail(self.tenant_sock,
self.compute_sock,
"Only the security type "
"None (1) is supported"))
self.assertRaises(exception.SecurityProxyNegotiationFailed,
self.proxy.connect,
self.tenant_sock,
self.compute_sock)
self._assert_expected_calls()
def test_exception_in_choose_security_type_fails(self):
"""Validate behavior if a given security type isn't supported."""
self.proxy._fail = self.manager.proxy._fail
self.proxy.security_handshake = self.manager.proxy.security_handshake
self._version_handshake()
self._expect_compute_recv(1, "\x02")
self._expect_compute_recv(2, "\x02\x05")
self._expect_tenant_send("\x01\x01")
self._expect_tenant_recv(1, "\x01")
self.expected_manager_calls.extend([
mock.call.proxy._fail(
self.tenant_sock, self.compute_sock,
'Unable to negotiate security with server')])
self.assertRaises(exception.SecurityProxyNegotiationFailed,
self.proxy.connect,
self.tenant_sock,
self.compute_sock)
self._assert_expected_calls()
@mock.patch.object(authnone.RFBAuthSchemeNone, "security_handshake")
def test_exception_security_handshake_fails(self, mock_auth):
"""Validate behavior if the security handshake fails for any reason."""
self.proxy._fail = self.manager.proxy._fail
self._version_handshake()
self._expect_compute_recv(1, "\x02")
self._expect_compute_recv(2, "\x01\x02")
self._expect_tenant_send("\x01\x01")
self._expect_tenant_recv(1, "\x01")
self._expect_compute_send("\x01")
ex = exception.RFBAuthHandshakeFailed(reason="crackers")
mock_auth.side_effect = ex
self.expected_manager_calls.extend([
mock.call.proxy._fail(self.tenant_sock, None,
'Unable to negotiate security with server')])
self.assertRaises(exception.SecurityProxyNegotiationFailed,
self.proxy.connect,
self.tenant_sock,
self.compute_sock)
mock_auth.assert_called_once_with(self.compute_sock)
self._assert_expected_calls()
| rahulunair/nova | nova/tests/unit/console/securityproxy/test_rfb.py | Python | apache-2.0 | 10,400 |
from Tkinter import *
from tkColorChooser import askcolor
import json
from string import maketrans
import re
import ttk
import tkFont
"""
standard save + load options functions, from item_tracker.py. save_options modified a bit to take a parameter
"""
def load_options():
with open("options.json", "r") as json_file:
options = json.load(json_file)
return options
def save_options(options):
with open("options.json", "w") as json_file:
json.dump(options, json_file, indent=3, sort_keys=True)
"""
callbacks
"""
def color_callback(source):
# prompt a color picker, set the options and the background/foreground of the button
global buttons
global options
nums, hex_color = askcolor(color=options.get(source),
title = "Color Chooser")
if hex_color:
opposite = opposite_color(hex_color)
options[source] = hex_color.upper()
buttons[source].configure(bg=hex_color, fg=opposite)
def checkbox_callback():
# just for the "show decription" checkbox -- to disable the message duration entry
global checks
global entries
if not checks.get("show_description").get():
entries["message_duration"].configure(state=DISABLED)
else:
entries["message_duration"].configure(state=NORMAL)
def save():
# callback for the "save" option -- rejiggers options and saves to options.json, then quits
global root
global options
global numeric_entry_keys
for key, value in entries.iteritems():
if key in numeric_entry_keys:
options[key] = int(value.get())
else:
options[key] = value.get()
for key, value in checks.iteritems():
options[key] = True if value.get() else False
save_options(options)
root.quit()
# taken from http://code.activestate.com/recipes/527747-invert-css-hex-colors/
def opposite_color(color):
# get the opposite color of a hex color, just to make text on buttons readable
color = color.lower()
table = maketrans(
'0123456789abcdef',
'fedcba9876543210')
return str(color).translate(table).upper()
def pretty_name(s):
# change from a var name to something you'd show the users
return " ".join(s.split("_")).title()
# from http://stackoverflow.com/questions/4140437/interactively-validating-entry-widget-content-in-tkinter
def OnValidate(d, i, P, s, S, v, V, W):
# this validation is a biiit janky, just some crazy regex that checks P (value of entry after modification)
return P=="" or re.search("^\d+(\.\d*)?$",P) is not None
# load options, create root
options = load_options()
root = Tk()
root.wm_title("Item Tracker Options")
root.resizable(False,False)
# generate numeric options by looping over option types
numeric_entry_keys = ["message_duration", "min_spacing", "default_spacing", "framerate_limit", "size_multiplier"]
entries = {}
nextrow = 0
vcmd = (root.register(OnValidate),
'%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')
for index, opt in enumerate(["message_duration", "min_spacing", "default_spacing", "framerate_limit", "size_multiplier"]):
Label(root, text=pretty_name(opt)).grid(row=nextrow)
entries[opt] = Entry(root,validate="key",validatecommand=vcmd)
entries[opt].grid(row=nextrow,column=1)
entries[opt].insert(0,options.get(opt))
nextrow += 1
for index, opt in enumerate(["show_font"]):
Label(root, text=pretty_name(opt)).grid(row=nextrow)
fonts = tkFont.families()
initialvar = StringVar()
initialvar.set(options.get(opt))
entries[opt] = ttk.Combobox(root, values=sorted(fonts), textvariable=initialvar, state='readonly')
entries[opt].pack()
entries[opt].grid(row=nextrow,column=1)
nextrow +=1
# generate text options by looping over option types
for index, opt in enumerate(["item_details_link", "custom_message"]):
Label(root, text=pretty_name(opt)).grid(row=nextrow)
entries[opt] = Entry(root)
entries[opt].grid(row=nextrow,column=1)
entries[opt].insert(0,options.get(opt))
nextrow += 1
# generate buttons by looping over option types
buttons = {}
for index, opt in enumerate(["background_color","text_color"]):
buttons[opt] = Button(root,
text=pretty_name(opt),
bg=options.get(opt),
fg=opposite_color(options.get(opt)),
# command=lambda: color_callback(opt))
command=lambda opt=opt: color_callback(opt))
buttons[opt].grid(row=len(entries),column=index)
# generate checkboxes, with special exception for show_description for message duration
checks = {}
for index, opt in enumerate(["show_description", "show_custom_message", "show_seed", "show_guppy_count", "show_floors", "show_rerolled_items", "show_health_ups", "show_space_items", "show_blind_icon", "word_wrap"]):
checks[opt] = IntVar()
c = Checkbutton(root, text=pretty_name(opt), variable=checks[opt])
c.grid(row=len(entries)+1+index/2,column=index%2) # 2 checkboxes per row
if options.get(opt):
c.select()
# Disable letting the user set the message duration if the show description option is disabled.
if opt=="show_description":
c.configure(command=checkbox_callback)
if not options.get("show_description"):
entries["message_duration"].configure(state=DISABLED)
# save and cancel buttons
cancel = Button(root,
text="Cancel",
command=root.quit)
save = Button(root,
text="Save",
command=save)
cancel.grid(row=len(entries)+len(buttons)+len(checks),column=1)
save.grid(row=len(entries)+len(buttons)+len(checks),column=0)
# start the main loop eyyy
mainloop()
| Brett824/RebirthItemTracker | option_picker.py | Python | bsd-2-clause | 5,464 |
# airplay2sonos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import select
import socket
from rtp_packet import RTPPacket, ConnectionClosed, UnknownPayload
class DataEndpoints(object):
def __init__(self, port):
self.active = True
self.sockets = {}
self.poll = select.poll()
trigger = os.pipe()
self.trigger = os.fdopen(trigger[0], "r", 0), os.fdopen(trigger[1], "w", 0)
self.sockets[trigger[0]] = self.trigger[0]
self.poll.register(self.trigger[0], select.POLLIN)
self.port_number = port + 1
def handle(self):
while self.active:
sockets = self.poll.poll()
if not self.active:
return
for (s, events) in sockets:
if s == self.trigger[0].fileno():
self.trigger[0].read(1)
else:
sobj = self.sockets[s]
if hasattr(sobj, "process"):
sobj.process()
else:
sobj.func(sobj, sobj.client)
def stop(self):
self.active = False
for s in self.sockets.items():
self.poll.unregister(s[0])
s[1].close()
del self.sockets[s[0]]
self.trigger[1].close()
def open_socket(self, func, client):
while True:
port = self.port_number
self.port_number += 1
try:
s = TCPListener(port, self) if client.is_tcp else UDPSocket(port)
except socket.error, e:
if e[0] == 98: # Address already in use
continue
else:
raise
else:
break
s.func = getattr(self, func)
s.client = client
self.sockets[s.fileno()] = s
self.poll.register(s, select.POLLIN)
self.trigger[1].write("a")
return port
def server(self, s, client):
try:
client.audio_packet(RTPPacket(s))
except ConnectionClosed:
self.poll.unregister(s.fileno())
del self.sockets[s.fileno()]
s.close()
except UnknownPayload, e:
#print e
pass
def control(self, s, client):
print "control", repr(s.recv(128))
def timing(self, s, client):
print "timing", repr(s.recv(128))
class UDPSocket(socket.socket):
def __init__(self, port):
socket.socket.__init__(self, socket.AF_INET, socket.SOCK_DGRAM)
self.bind(("", port))
class TCPListener(socket.socket):
def __init__(self, port, de):
socket.socket.__init__(self, socket.AF_INET, socket.SOCK_STREAM)
self.bind(("", port))
self.listen(1)
self.de = de
def process(self):
s, addr = self.accept()
s = TCPSocket(s, self.func, self.client)
print "accepted", s.fileno()
self.de.sockets[s.fileno()] = s
self.de.poll.register(s, select.POLLIN)
class TCPSocket(object):
def __init__(self, s, func, client):
self.s = s
self.func = func
self.client = client
def fileno(self):
return self.s.fileno()
def close(self):
return self.s.close()
def recv(self, buf):
return self.s.recv(buf)
| andrewjw/airplay2sonos | airplay2sonos/data_endpoints.py | Python | gpl-2.0 | 3,929 |
#!/usr/bin/python2
#
# Copyright (C) 2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: David Shea <[email protected]>
#
"""
Check that widgets that implement GtkScrollable are not placed within a
GtkViewport. If a widget knows how to scroll itself we probably don't want
to add an extra layer.
"""
import argparse
import sys
try:
from lxml import etree
except ImportError:
print("You need to install the python-lxml package to use check_pw_visibility.py")
sys.exit(1)
# I guess we could look at the introspected classes and see if they implement the Scrollable
# interface but that sounds like kind of a pain
SCROLLABLES = ["GtkIconView", "GtkLayout", "GtkTextView", "GtkToolPalette",
"GtkTreeView", "GtkViewport"]
def check_glade_file(glade_file_path):
glade_success = True
with open(glade_file_path) as glade_file:
# Parse the XML
glade_tree = etree.parse(glade_file)
# Look for something like:
# <object class="GtkViewport">
# <child>
# <object class="GtkTreeView">
for scrollable in SCROLLABLES:
for element in glade_tree.xpath(".//object[@class='GtkViewport']/child/object[@class='%s']" % scrollable):
glade_success = False
print("%s contained in GtkViewport at %s:%d" % (scrollable, glade_file_path,
element.sourceline))
return glade_success
if __name__ == "__main__":
parser = argparse.ArgumentParser("Check that password entries have visibility set to False")
# Ignore translation arguments
parser.add_argument("-t", "--translate", action='store_true',
help=argparse.SUPPRESS)
parser.add_argument("-p", "--podir", action='store', type=str,
metavar='PODIR', help=argparse.SUPPRESS, default='./po')
parser.add_argument("glade_files", nargs="+", metavar="GLADE-FILE",
help='The glade file to check')
args = parser.parse_args(args=sys.argv[1:])
success = True
for file_path in args.glade_files:
if not check_glade_file(file_path):
success = False
sys.exit(0 if success else 1)
| vojtechtrefny/anaconda | tests/glade/viewport/check_viewport.py | Python | gpl-2.0 | 2,834 |
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ecs_cluster
short_description: create or terminate ecs clusters
notes:
- When deleting a cluster, the information returned is the state of the cluster prior to deletion.
- It will also wait for a cluster to have instances registered to it.
description:
- Creates or terminates ecs clusters.
version_added: "2.0"
author: Mark Chance(@Java1Guy)
requirements: [ json, time, boto, boto3 ]
options:
state:
description:
- The desired state of the cluster
required: true
choices: ['present', 'absent', 'has_instances']
name:
description:
- The cluster name
required: true
delay:
description:
- Number of seconds to wait
required: false
repeat:
description:
- The number of times to wait for the cluster to have an instance
required: false
extends_documentation_fragment:
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Cluster creation
- ecs_cluster:
name: default
state: present
# Cluster deletion
- ecs_cluster:
name: default
state: absent
- name: Wait for register
ecs_cluster:
name: "{{ new_cluster }}"
state: has_instances
delay: 10
repeat: 10
register: task_output
'''
RETURN = '''
activeServicesCount:
description: how many services are active in this cluster
returned: 0 if a new cluster
type: int
clusterArn:
description: the ARN of the cluster just created
type: string (ARN)
sample: arn:aws:ecs:us-west-2:172139249013:cluster/test-cluster-mfshcdok
clusterName:
description: name of the cluster just created (should match the input argument)
type: string
sample: test-cluster-mfshcdok
pendingTasksCount:
description: how many tasks are waiting to run in this cluster
returned: 0 if a new cluster
type: int
registeredContainerInstancesCount:
description: how many container instances are available in this cluster
returned: 0 if a new cluster
type: int
runningTasksCount:
description: how many tasks are running in this cluster
returned: 0 if a new cluster
type: int
status:
description: the status of the new cluster
returned: ACTIVE
type: string
'''
try:
import json, time
import boto
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
class EcsClusterManager:
"""Handles ECS Clusters"""
def __init__(self, module):
self.module = module
try:
# self.ecs = boto3.client('ecs')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound, e:
self.module.fail_json(msg="Can't authorize connection - "+str(e))
def find_in_array(self, array_of_clusters, cluster_name, field_name='clusterArn'):
for c in array_of_clusters:
if c[field_name].endswith(cluster_name):
return c
return None
def describe_cluster(self, cluster_name):
response = self.ecs.describe_clusters(clusters=[
cluster_name
])
if len(response['failures'])>0:
c = self.find_in_array(response['failures'], cluster_name, 'arn')
if c and c['reason']=='MISSING':
return None
# fall thru and look through found ones
if len(response['clusters'])>0:
c = self.find_in_array(response['clusters'], cluster_name)
if c:
return c
raise StandardError("Unknown problem describing cluster %s." % cluster_name)
def create_cluster(self, clusterName = 'default'):
response = self.ecs.create_cluster(clusterName=clusterName)
return response['cluster']
def delete_cluster(self, clusterName):
return self.ecs.delete_cluster(cluster=clusterName)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent', 'has_instances'] ),
name=dict(required=True, type='str' ),
delay=dict(required=False, type='int', default=10),
repeat=dict(required=False, type='int', default=10)
))
required_together = ( ['state', 'name'] )
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together)
if not HAS_BOTO:
module.fail_json(msg='boto is required.')
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
cluster_name = module.params['name']
cluster_mgr = EcsClusterManager(module)
try:
existing = cluster_mgr.describe_cluster(module.params['name'])
except Exception, e:
module.fail_json(msg="Exception describing cluster '"+module.params['name']+"': "+str(e))
results = dict(changed=False)
if module.params['state'] == 'present':
if existing and 'status' in existing and existing['status']=="ACTIVE":
results['cluster']=existing
else:
if not module.check_mode:
# doesn't exist. create it.
results['cluster'] = cluster_mgr.create_cluster(module.params['name'])
results['changed'] = True
# delete the cluster
elif module.params['state'] == 'absent':
if not existing:
pass
else:
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
results['cluster'] = existing
if 'status' in existing and existing['status']=="INACTIVE":
results['changed'] = False
else:
if not module.check_mode:
cluster_mgr.delete_cluster(module.params['name'])
results['changed'] = True
elif module.params['state'] == 'has_instances':
if not existing:
module.fail_json(msg="Cluster '"+module.params['name']+" not found.")
return
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
delay = module.params['delay']
repeat = module.params['repeat']
time.sleep(delay)
count = 0
for i in range(repeat):
existing = cluster_mgr.describe_cluster(module.params['name'])
count = existing['registeredContainerInstancesCount']
if count > 0:
results['changed'] = True
break
time.sleep(delay)
if count == 0 and i is repeat-1:
module.fail_json(msg="Cluster instance count still zero after "+str(repeat)+" tries of "+str(delay)+" seconds each.")
return
module.exit_json(**results)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| fdupoux/ansible-modules-extras | cloud/amazon/ecs_cluster.py | Python | gpl-3.0 | 8,030 |
# -*- coding: utf-8 -*-
"""
The goal of this file is to load the MNIST Dataset
@author: niboshi (https://github.com/niboshi/mnist_loader/blob/master/mnist_loader.py)
"""
import os
import urllib
import urllib2
import StringIO
import gzip
import struct
import numpy as np
def open_data_url(url, cache_dir=None):
"""
Opens an URL as a file-like object.
"""
if cache_dir is None:
cache_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '_cache')
cache_path = os.path.join(cache_dir, urllib.quote(url, safe=''))
if os.path.isfile(cache_path):
return open(cache_path, 'rb')
else:
request = urllib2.Request(url)
response = urllib2.urlopen(request)
buf = StringIO.StringIO()
block = 1024
while True:
data = response.read(block)
if data is None:
break
if len(data) == 0:
break
buf.write(data)
if not os.path.isdir(os.path.dirname(cache_path)):
os.makedirs(os.path.dirname(cache_path))
with open(cache_path, 'wb') as fo:
fo.write(buf.getvalue())
buf.seek(0)
return buf
def read_data(file_in):
"""
Parses the IDX file format.
"""
# Magic code
magic = file_in.read(4)
magic = [ord(_) for _ in magic]
if len(magic) != 4 or magic[0] != 0 or magic[1] != 0:
raise RuntimeError("Invalid magic number: [{}]".format('-'.join(['{:02x}'.format(_) for _ in magic])))
# Type code
type_code = magic[2]
dtype_map = {
0x08: np.uint8,
0x09: np.int8,
0x0B: np.int16,
0x0C: np.int32,
0x0D: np.float32,
0x0E: np.float64,
}
dtype = dtype_map[type_code]
# Dimensions
ndim = magic[3]
dims = []
for idim in range(ndim):
dim, = struct.unpack('>I', file_in.read(4))
dims.append(dim)
# Data
data = file_in.read()
data = np.fromstring(data, dtype=dtype).reshape(tuple(dims))
return data
def read_data_from_url(url, cache_dir=None):
"""
Extracts multidimensional data from an URL.
"""
return read_data(gzip.GzipFile(fileobj=open_data_url(url, cache_dir=cache_dir)))
def load(set_name='train', cache_dir=None):
"""
Loads the MNIST data set.
"""
if set_name == 'train':
urls = (
'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz',
)
elif set_name == 'test':
urls = (
'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz',
)
else:
assert False, "Invalid set name: {}".format(set_name)
data_url, labels_url = urls
data = read_data_from_url(data_url, cache_dir=cache_dir)
labels = read_data_from_url(labels_url, cache_dir=cache_dir)
return data, labels
| NeoBoy/STSP_IIUI-Spring2016 | Task2/mnist_load.py | Python | bsd-2-clause | 3,124 |
"""Support for Xiaomi Miio."""
| fbradyirl/home-assistant | homeassistant/components/xiaomi_miio/__init__.py | Python | apache-2.0 | 31 |
from django.test import TestCase
from lingcod.features.tests import TestMpa, TestFolder
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from lingcod.common.utils import enable_sharing
class HeatmapTest(TestCase):
fixtures = ['example_data']
def setUp(self):
enable_sharing()
self.user1 = User.objects.create_user(
'user1', '[email protected]', password='pword')
self.user2 = User.objects.create_user(
'user2', '[email protected]', password='pword')
self.mpa1 = TestMpa(user=self.user1, name="My Mpa")
self.mpa1.save()
self.folder1 = TestFolder(user=self.user1, name="My Folder")
self.folder1.save()
self.mpa1.add_to_collection(self.folder1)
self.tif_url = reverse("heatmap-collection-geotiff", kwargs={'collection_uids': self.folder1.uid})
self.kmz_url = reverse("heatmap-collection-kmz", kwargs={'collection_uids': self.folder1.uid})
def test_noauth(self):
response = self.client.get(self.tif_url)
self.assertEqual(response.status_code, 401)
self.client.login(username=self.user2.username, password='pword')
response = self.client.get(self.tif_url)
self.assertEqual(response.status_code, 403)
def test_urls(self):
self.client.login(username=self.user1.username, password='pword')
response = self.client.get(self.tif_url)
self.assertEqual(response.status_code, 200)
response = self.client.get(self.kmz_url)
self.assertEqual(response.status_code, 200)
| Alwnikrotikz/marinemap | lingcod/heatmap/tests.py | Python | bsd-3-clause | 1,616 |
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.core.files.storage import default_storage
from course_grader.dao import GradeImportSource, current_datetime
from course_grader.dao.person import person_from_netid
from course_grader.exceptions import InvalidCSV
from restclients_core.exceptions import InvalidNetID, DataFailureException
from logging import getLogger
import chardet
import csv
import os
logger = getLogger(__name__)
STUDENT_NUM_LEN = 7
class InsensitiveDict(dict):
"""
Override the get method to strip() and lower() the input key, and
strip() the returned value.
"""
def get(self, *k, default=None):
for i in k:
if i.strip().lower() in self:
try:
return super().get(i.strip().lower()).strip()
except AttributeError:
break
return default
class InsensitiveDictReader(csv.DictReader):
"""
Override the csv.fieldnames property to strip() and lower() the fieldnames.
"""
@property
def fieldnames(self):
return [field.strip().lower() for field in super().fieldnames]
def __next__(self):
return InsensitiveDict(super().__next__())
class GradeImportCSV(GradeImportSource):
def __init__(self):
self.encoding = None
def decode_file(self, csvfile):
if not self.encoding:
result = chardet.detect(csvfile)
self.encoding = result["encoding"]
return csvfile.decode(self.encoding)
def validate(self, fileobj):
# Read the first line of the file to validate the header
decoded_file = self.decode_file(fileobj.readline())
self.has_header = csv.Sniffer().has_header(decoded_file)
self.dialect = csv.Sniffer().sniff(decoded_file)
reader = InsensitiveDictReader(decoded_file.splitlines(),
dialect=self.dialect)
if ("import grade" not in reader.fieldnames and
"importgrade" not in reader.fieldnames):
raise InvalidCSV("Missing header: grade")
if ("uwregid" not in reader.fieldnames and
"sis user id" not in reader.fieldnames and
"studentno" not in reader.fieldnames):
raise InvalidCSV("Missing header: student")
fileobj.seek(0, 0)
def grades_for_section(self, section, instructor, **kwargs):
"""
Convert CSV file object into normalized JSON
Supported column names are:
"UWRegID" OR "SIS User ID" OR "StudentNo" (required),
"Import Grade" OR "ImportGrade" (required),
"Incomplete" (optional),
"Writing Credit" OR "WritingCredit" (optional)
All other field names are ignored.
"""
fileobj = kwargs.get("fileobj")
self.validate(fileobj)
decoded_file = self.decode_file(fileobj.read()).splitlines()
grade_data = []
for row in InsensitiveDictReader(decoded_file, dialect=self.dialect):
student_number = row.get("StudentNo")
student_data = {
"student_reg_id": row.get("UWRegID", "SIS User ID"),
"student_number": student_number.zfill(STUDENT_NUM_LEN) if (
student_number is not None) else student_number,
"grade": row.get("Import Grade", "ImportGrade"),
"is_incomplete": self.is_true(row.get("Incomplete")),
"is_writing": self.is_true(
row.get("Writing Credit", "WritingCredit")),
}
if (student_data["student_reg_id"] or
student_data["student_number"]):
grade_data.append(student_data)
try:
self._write_file(section, instructor, fileobj)
except Exception as ex:
logger.error("WRITE upload file {} for {} failed: {}".format(
fileobj.name, section.section_label(), ex))
return {"grades": grade_data}
def _write_file(self, section, instructor, fileobj):
"""
Writes a copy of the uploaded file to the default storage backend.
The path format is:
[term_id]/[section_id]/[uwnetid]/[timestamp]/[original_file_name]
Ex: 2013-spring/CHEM-101-A/javerage/20131018T083055/grades.csv
"""
self.filepath = os.path.join(
section.term.canvas_sis_id(),
"-".join([section.curriculum_abbr.upper().replace(" ", "_"),
section.course_number,
section.section_id.upper()]),
instructor.uwnetid,
current_datetime().strftime("%Y%m%dT%H%M%S"),
os.path.basename(fileobj.name).replace("/", "-"))
fileobj.seek(0, 0)
decoded_file = self.decode_file(fileobj.read()).splitlines()
with default_storage.open(self.filepath, mode="w") as f:
for line in decoded_file:
f.write(line + "\n")
| uw-it-aca/gradepage | course_grader/dao/csv.py | Python | apache-2.0 | 5,006 |
# From 3.7.3 dataclasses.py
# Bug was handling precedence. Need parenthesis before IfExp.
#
# RUNNABLE!
def _hash_add(fields):
flds = [f for f in fields if (4 if f is None else f)]
return flds
assert _hash_add([None, True, False, 3]) == [None, True, 3]
| rocky/python-uncompyle6 | test/simple_source/bug26/04_ifelse_parens.py | Python | gpl-3.0 | 262 |
#coding=utf-8
"""
Django settings for PyTweet project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_y!6un*9hq*4=i7r@yw^0_%zr7w=2a_$+-af-!zs9h+oaf-4^a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'PyTweetApp',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'PyTweet.urls'
WSGI_APPLICATION = 'PyTweet.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'fr-FR'
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
ADMINS = (
('Clément Fasquel', '[email protected]'),
)
APPEND_SLASH = True # Ajoute un slash en fin d'URL
LOGIN_URL = '/' | cfasquel/PyTweet | PyTweet/settings.py | Python | unlicense | 2,152 |
from pycp2k.inputsection import InputSection
class _initial_vibration1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Vib_eigs_file_name = None
self.Phase = None
self._name = "INITIAL_VIBRATION"
self._keywords = {'Vib_eigs_file_name': 'VIB_EIGS_FILE_NAME', 'Phase': 'PHASE'}
| SINGROUP/pycp2k | pycp2k/classes/_initial_vibration1.py | Python | lgpl-3.0 | 343 |
#
# FileIO.py
#
# Description: All FILE utilities goes here
#
import os
def readFile(filePath):
"""
Reads the file file_path and returns the data or appropriate error
"""
error, response = None, {}
try:
with open(filePath, 'rb') as dataFile:
response['raw'] = dataFile.read()
except IOError as e:
error = e.strerror
return error, response | akstealthay/anoy | app/utils/FileIO.py | Python | mit | 406 |
### MDR, June 2017 ###
### utilities for deep learning work ###
import os
def set_gpu_fan_speed(speed = 0):
"""Puts the GPU fans into manual mode, and sets the speed to parameter value.
N.B. Setting to any speed <80 will switch to automatic mode.
... and anything >= 100 is held to 100."""
speed = int(speed)
if speed > 100: speed = 100
if speed >= 80:
## Turn the GPU fans up...
speed = str(speed)
man_comd = 'nvidia-settings -a [gpu:0]/GPUFanControlState=1'
fan_comd = ' -a [fan:0]/GPUTargetFanSpeed=' + speed + ' -c :0.0'
os.system(man_comd + fan_comd)
else:
##turn the fans back to auto
os.system('nvidia-settings -a [gpu:0]/GPUFanControlState=0')
return;
| Mdround/fastai-deeplearning1 | deeplearning1/nbs/utils_MDR.py | Python | apache-2.0 | 756 |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
"""Count the most common letters in words.
"""
#end_pymotw_header
import collections
c = collections.Counter()
with open('/usr/share/dict/words', 'rt') as f:
for line in f:
c.update(line.rstrip().lower())
print 'Most common:'
for letter, count in c.most_common(3):
print '%s: %7d' % (letter, count)
| qilicun/python | python2/PyMOTW-1.132/PyMOTW/collections/collections_counter_most_common.py | Python | gpl-3.0 | 420 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fhir', '0003_auto_20160109_1919'),
]
operations = [
migrations.AlterField(
model_name='supportedresourcetype',
name='fhir_history',
field=models.BooleanField(default=False, verbose_name=b'_history'),
),
]
| videntity/django-fhir | fhir/migrations/0004_auto_20160109_2017.py | Python | gpl-2.0 | 449 |
#from __future__ import print_function
import numpy as np
import random
shapes = {
'T': [(0, 0), (-1, 0), (1, 0), (0, -1)],
'J': [(0, 0), (-1, 0), (0, -1), (0, -2)],
'L': [(0, 0), (1, 0), (0, -1), (0, -2)],
'Z': [(0, 0), (-1, 0), (0, -1), (1, -1)],
'S': [(0, 0), (-1, -1), (0, -1), (1, 0)],
'I': [(0, 0), (0, -1), (0, -2), (0, -3)],
'O': [(0, 0), (0, -1), (-1, 0), (-1, -1)],
}
shape_names = ['T', 'J', 'L', 'Z', 'S', 'I', 'O']
def rotated(shape, cclk=False):
if cclk:
return [(-j, i) for i, j in shape]
else:
return [(j, -i) for i, j in shape]
def is_occupied(shape, anchor, board):
for i, j in shape:
x, y = anchor[0] + i, anchor[1] + j
if y < 0:
continue
if x < 0 or x >= board.shape[0] or y >= board.shape[1] or board[x, y]:
return True
return False
def left(shape, anchor, board):
new_anchor = (anchor[0] - 1, anchor[1])
return (shape, anchor) if is_occupied(shape, new_anchor, board) else (shape, new_anchor)
def right(shape, anchor, board):
new_anchor = (anchor[0] + 1, anchor[1])
return (shape, anchor) if is_occupied(shape, new_anchor, board) else (shape, new_anchor)
def soft_drop(shape, anchor, board):
new_anchor = (anchor[0], anchor[1] + 1)
return (shape, anchor) if is_occupied(shape, new_anchor, board) else (shape, new_anchor)
def hard_drop(shape, anchor, board):
while True:
_, anchor_new = soft_drop(shape, anchor, board)
if anchor_new == anchor:
return shape, anchor_new
anchor = anchor_new
def rotate_left(shape, anchor, board):
new_shape = rotated(shape, cclk=False)
return (shape, anchor) if is_occupied(new_shape, anchor, board) else (new_shape, anchor)
def rotate_right(shape, anchor, board):
new_shape = rotated(shape, cclk=True)
return (shape, anchor) if is_occupied(new_shape, anchor, board) else (new_shape, anchor)
def idle(shape, anchor, board):
return (shape, anchor)
class TetrisEngine:
def __init__(self, width, height):
self.width = width
self.height = height
self.board = np.zeros(shape=(width, height), dtype=np.float)
# actions are triggered by letters
self.value_action_map = {
0: left,
1: right,
2: hard_drop,
3: soft_drop,
4: rotate_left,
5: rotate_right,
6: idle,
}
self.action_value_map = dict([(j, i) for i, j in self.value_action_map.items()])
self.nb_actions = len(self.value_action_map)
# for running the engine
self.time = -1
self.score = -1
self.anchor = None
self.shape = None
self.n_deaths = 0
# used for generating shapes
self._shape_counts = [0] * len(shapes)
# clear after initializing
self.clear()
def _choose_shape(self):
maxm = max(self._shape_counts)
m = [5 + maxm - x for x in self._shape_counts]
r = random.randint(1, sum(m))
for i, n in enumerate(m):
r -= n
if r <= 0:
self._shape_counts[i] += 1
return shapes[shape_names[i]]
def _new_piece(self):
# Place randomly on x-axis with 2 tiles padding
#x = int((self.width/2+1) * np.random.rand(1,1)[0,0]) + 2
self.anchor = (self.width / 2, 0)
#self.anchor = (x, 0)
self.shape = self._choose_shape()
def _has_dropped(self):
return is_occupied(self.shape, (self.anchor[0], self.anchor[1] + 1), self.board)
def _clear_lines(self):
can_clear = [np.all(self.board[:, i]) for i in range(self.height)]
new_board = np.zeros_like(self.board)
j = self.height - 1
for i in range(self.height - 1, -1, -1):
if not can_clear[i]:
new_board[:, j] = self.board[:, i]
j -= 1
self.score += sum(can_clear)
self.board = new_board
return sum(can_clear)
def valid_action_count(self):
valid_action_sum = 0
for value, fn in self.value_action_map.items():
# If they're equal, it is not a valid action
if fn(self.shape, self.anchor, self.board) != (self.shape, self.anchor):
valid_action_sum += 1
return valid_action_sum
def step(self, action):
self.anchor = (int(self.anchor[0]), int(self.anchor[1]))
self.shape, self.anchor = self.value_action_map[action](self.shape, self.anchor, self.board)
# Drop each step
self.shape, self.anchor = soft_drop(self.shape, self.anchor, self.board)
# Update time and reward
self.time += 1
reward = self.valid_action_count()
#reward = 1
done = False
if self._has_dropped():
self._set_piece(True)
reward += 10 * self._clear_lines()
if np.any(self.board[:, 0]):
self.clear()
self.n_deaths += 1
done = True
reward = -10
else:
self._new_piece()
self._set_piece(True)
state = np.copy(self.board)
self._set_piece(False)
return state, reward, done
def clear(self):
self.time = 0
self.score = 0
self._new_piece()
self.board = np.zeros_like(self.board)
return self.board
def _set_piece(self, on=False):
for i, j in self.shape:
x, y = i + self.anchor[0], j + self.anchor[1]
if x < self.width and x >= 0 and y < self.height and y >= 0:
self.board[int(self.anchor[0] + i), int(self.anchor[1] + j)] = on
def __repr__(self):
self._set_piece(True)
s = 'o' + '-' * self.width + 'o\n'
s += '\n'.join(['|' + ''.join(['X' if j else ' ' for j in i]) + '|' for i in self.board.T])
s += '\no' + '-' * self.width + 'o'
self._set_piece(False)
return s
| jaybutera/tetrisRL | engine.py | Python | mit | 6,018 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import math
from mathutils import Vector, Matrix
import bpy
from bpy.props import EnumProperty, IntProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, match_long_repeat
from sverchok.utils.sv_bmesh_utils import bmesh_from_pydata, pydata_from_bmesh
class Vertices(object):
outputs = [
('VerticesSocket', 'YesVertices'),
('VerticesSocket', 'NoVertices'),
('StringsSocket', 'VerticesMask'),
('StringsSocket', 'YesEdges'),
('StringsSocket', 'NoEdges'),
('StringsSocket', 'YesFaces'),
('StringsSocket', 'NoFaces'),
]
submodes = [
("Wire", "Wire", "Wire", 1),
("Boundary", "Boundary", "Boundary", 2),
("Interior", "Interior", "Interior", 3)
]
default_submode = "Interior"
@staticmethod
def on_update_submode(node):
node.outputs['YesFaces'].hide = node.submode == "Wire"
node.outputs['NoFaces'].hide = node.submode == "Wire"
@staticmethod
def process(bm, submode):
def find_new_idxs(data, old_idxs):
new_idxs = []
for old_idx in old_idxs:
new = [n for (co, o, n) in data if o == old_idx]
if not new:
return None
new_idxs.append(new[0])
return new_idxs
def is_good(v):
if submode == "Wire":
return v.is_wire
if submode == "Boundary":
return v.is_boundary
if submode == "Interior":
return (v.is_manifold and not v.is_boundary)
good = []
bad = []
good_idx = 0
bad_idx = 0
mask = []
for v in bm.verts:
co = tuple(v.co)
ok = is_good(v)
if ok:
good.append((co, v.index, good_idx))
good_idx += 1
else:
bad.append((co, v.index, bad_idx))
bad_idx += 1
mask.append(ok)
good_vertices = [x[0] for x in good]
bad_vertices = [x[0] for x in bad]
good_edges = []
bad_edges = []
for e in bm.edges:
sv1, sv2 = e.verts[0].index, e.verts[1].index
good_edge = find_new_idxs(good, [sv1, sv2])
if good_edge:
good_edges.append(good_edge)
bad_edge = find_new_idxs(bad, [sv1, sv2])
if bad_edge:
bad_edges.append(bad_edge)
good_faces = []
bad_faces = []
for f in bm.faces:
vs = [v.index for v in f.verts]
good_face = find_new_idxs(good, vs)
if good_face:
good_faces.append(good_face)
bad_face = find_new_idxs(bad, vs)
if bad_face:
bad_faces.append(bad_face)
return [good_vertices, bad_vertices, mask,
good_edges, bad_edges,
good_faces, bad_faces]
class Edges(object):
outputs = [
('StringsSocket', 'YesEdges'),
('StringsSocket', 'NoEdges'),
('StringsSocket', 'Mask'),
]
submodes = [
("Wire", "Wire", "Wire", 1),
("Boundary", "Boundary", "Boundary", 2),
("Interior", "Interior", "Interior", 3),
("Convex", "Convex", "Convex", 4),
("Concave", "Concave", "Concave", 5),
("Contiguous", "Contiguous", "Contiguous", 6),
]
default_submode = "Interior"
@staticmethod
def process(bm, submode):
good = []
bad = []
mask = []
def is_good(e):
if submode == "Wire":
return e.is_wire
if submode == "Boundary":
return e.is_boundary
if submode == "Interior":
return (e.is_manifold and not e.is_boundary)
if submode == "Convex":
return e.is_convex
if submode == "Concave":
return e.is_contiguous and not e.is_convex
if submode == "Contiguous":
return e.is_contiguous
for e in bm.edges:
idxs = (e.verts[0].index, e.verts[1].index)
ok = is_good(e)
if ok:
good.append(idxs)
else:
bad.append(idxs)
mask.append(ok)
return [good, bad, mask]
class Faces(object):
outputs = [
('StringsSocket', 'Interior'),
('StringsSocket', 'Boundary'),
('StringsSocket', 'BoundaryMask'),
]
@staticmethod
def process(bm, submode):
interior = []
boundary = []
mask = []
for f in bm.faces:
idxs = [v.index for v in f.verts]
is_boundary = False
for e in f.edges:
if e.is_boundary:
is_boundary = True
break
if is_boundary:
boundary.append(idxs)
else:
interior.append(idxs)
mask.append(int(is_boundary))
return [interior, boundary, mask]
class SvMeshFilterNode(bpy.types.Node, SverchCustomTreeNode):
''' Filter mesh elements: manifold vs boundary etc. '''
bl_idname = 'SvMeshFilterNode'
bl_label = 'Mesh filter'
bl_icon = 'OUTLINER_OB_EMPTY'
modes = [
("Vertices", "Vertices", "Filter vertices", 0),
("Edges", "Edges", "Filter edges", 1),
("Faces", "Faces", "Filter faces", 2)
]
def update_mode(self, context):
cls = globals()[self.mode]
while len(self.outputs) > 0:
self.outputs.remove(self.outputs[0])
for ocls, oname in cls.outputs:
self.outputs.new(ocls, oname)
if hasattr(cls, "default_submode"):
self.submode = cls.default_submode
else:
self.submode = None
updateNode(self, context)
def update_submode(self, context):
cls = globals()[self.mode]
if hasattr(cls, "on_update_submode"):
cls.on_update_submode(self)
updateNode(self, context)
mode = EnumProperty(name="Mode",
items=modes,
default='Vertices',
update=update_mode)
def get_submodes(self, context):
cls = globals()[self.mode]
if hasattr(cls, "submodes"):
return cls.submodes
else:
return []
submode = EnumProperty(name="Filter",
items = get_submodes,
update = update_submode)
def draw_buttons(self, context, layout):
layout.prop(self, 'mode', expand=True)
cls = globals()[self.mode]
if hasattr(cls, "submodes"):
layout.prop(self, 'submode', expand=False)
def sv_init(self, context):
self.inputs.new('VerticesSocket', "Vertices")
self.inputs.new('StringsSocket', "Edges")
self.inputs.new('StringsSocket', "Polygons")
self.update_mode(context)
self.update_submode(context)
def process(self):
if not any(output.is_linked for output in self.outputs):
return
vertices_s = self.inputs['Vertices'].sv_get(default=[[]])
edges_s = self.inputs['Edges'].sv_get(default=[[]])
faces_s = self.inputs['Polygons'].sv_get(default=[[]])
cls = globals()[self.mode]
results = []
meshes = match_long_repeat([vertices_s, edges_s, faces_s])
for vertices, edges, faces in zip(*meshes):
bm = bmesh_from_pydata(vertices, edges, faces)
bm.normal_update()
outs = cls.process(bm, self.submode)
results.append(outs)
results = zip(*results)
for (ocls,oname), result in zip(cls.outputs, results):
if self.outputs[oname].is_linked:
self.outputs[oname].sv_set(result)
def register():
bpy.utils.register_class(SvMeshFilterNode)
def unregister():
bpy.utils.unregister_class(SvMeshFilterNode)
| taxpon/sverchok | nodes/analyzer/mesh_filter.py | Python | gpl-3.0 | 8,899 |
# -*- coding: utf-8 -*-
#
# Tempest documentation build configuration file, created by
# sphinx-quickstart on Tue May 21 17:43:32 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import subprocess
# Build a tempest sample config file:
def build_sample_config(app):
root_dir = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
subprocess.call(["oslo-config-generator", "--config-file",
"tools/config/config-generator.tempest.conf",
"--output-file", "doc/source/_static/tempest.conf"],
cwd=root_dir)
def setup(app):
app.connect('builder-inited', build_sample_config)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'oslosphinx'
]
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Tempest'
copyright = u'2013, OpenStack QA Team'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['tempest.']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
html_last_updated_fmt = os.popen(git_cmd).read()
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Tempestdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Tempest.tex', u'Tempest Documentation',
u'OpenStack QA Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tempest', u'Tempest Documentation',
[u'OpenStack QA Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Tempest', u'Tempest Documentation',
u'OpenStack QA Team', 'Tempest', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Tempest'
epub_author = u'Sean Dague'
epub_publisher = u'OpenStack QA Team'
epub_copyright = u'2013, OpenStack QA Team'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| hayderimran7/tempest | doc/source/conf.py | Python | apache-2.0 | 9,459 |
from pizzashare import db, models
if __name__ == '__main__':
db.Base.metadata.create_all(db.engine)
| emef/pizzashare | scripts/sync_db.py | Python | gpl-2.0 | 105 |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" DNS CNAME records """
from sqlalchemy import Column, Integer, ForeignKey, Index
from sqlalchemy.orm import relation, backref, column_property
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.sql import select, func
from aquilon.aqdb.model import DnsRecord, Fqdn
_TN = 'alias'
MAX_ALIAS_DEPTH = 4
class Alias(DnsRecord):
""" Aliases a.k.a. CNAMES """
__tablename__ = _TN
dns_record_id = Column(Integer, ForeignKey('dns_record.id',
name='%s_dns_record_fk' % _TN,
ondelete='CASCADE'),
primary_key=True)
target_id = Column(Integer, ForeignKey('fqdn.id',
name='%s_target_fk' % _TN),
nullable=False)
target = relation(Fqdn, foreign_keys=target_id, backref=backref('aliases'))
# The same name may resolve to multiple RRs
target_rrs = association_proxy('target', 'dns_records')
__table_args__ = (Index('%s_target_idx' % _TN, target_id),)
__mapper_args__ = {'polymorphic_identity': _TN}
@property
def alias_depth(self):
depth = 0
for tgt in self.target_rrs:
if not isinstance(tgt, Alias):
continue
depth = max(depth, tgt.alias_depth)
return depth + 1
def __init__(self, **kwargs):
super(Alias, self).__init__(**kwargs)
if self.alias_depth > MAX_ALIAS_DEPTH:
raise ValueError("Maximum alias depth exceeded")
alias = Alias.__table__ # pylint: disable=C0103
alias.info['unique_fields'] = ['fqdn']
alias.info['extra_search_fields'] = ['target', 'dns_environment']
# Most addresses will not have aliases. This bulk loadable property allows the
# formatter to avoid querying the alias table for every displayed DNS record
# See http://www.sqlalchemy.org/trac/ticket/2139 about why we need the .alias()
DnsRecord.alias_cnt = column_property(
select([func.count()], DnsRecord.fqdn_id == alias.alias().c.target_id)
.label("alias_cnt"), deferred=True)
| jrha/aquilon | lib/python2.6/aquilon/aqdb/model/alias.py | Python | apache-2.0 | 2,814 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 12 10:22:59 2020
@author: gtucker
"""
import numpy as np
from numpy.testing import (
assert_array_almost_equal,
assert_array_equal,
assert_equal,
assert_raises,
)
from landlab import HexModelGrid, RadialModelGrid, RasterModelGrid
from landlab.components import TidalFlowCalculator
def test_constant_depth_deeper_than_tidal_amplitude():
r"""Test velocity calculation under the following conditions:
r = 1 # tidal range in m
T = 4.0e4 # tidal period in s
n = 0.01 # roughness, s/m^1/3
chi = 1 # scale velocity, m/s
h = 8 # tidal mean depth, m
Under these conditions, the key factors are:
velocity coefficient, Cv = h^(4/3) / n^2 chi = 1.6e5 m/s
diffusion coefficient, D = h^(7/3) / n^2 chi = 1.28e6 m2/s
inundation rate (I) = 2 r / T = 5e-5 m/s
Domain, L: 300 m, 1D
Analytical solution for water surface height, ebb tide:
$\eta = (IL^2/D) ((x/L) - (1/2) (x/L)^2)$
Solution for x-directed velocity. Note that in this case $I$ and $h$ are
uniform, so
$u h = I x$ # output = input
$u(x) = I x / h$
So with the above parameters, $I$ is 5e-5 m/s. In a grid with 5 columns,
$x$ is effectively 100, 200, and 300 m, with the adjacent open boundary node
at 350 m. So the velocity in m/s should be:
0.000625, 0.00125, 0.001875
"""
grid = RasterModelGrid((3, 5), xy_spacing=100.0)
z = grid.add_zeros("topographic__elevation", at="node")
z[:] = -8.0
grid.set_closed_boundaries_at_grid_edges(False, True, True, True)
tfc = TidalFlowCalculator(grid, tidal_period=4.0e4)
tfc.run_one_step()
assert_array_almost_equal(
grid.at_link["ebb_tide_flow__velocity"][10:13], [0.000625, 0.00125, 0.001875]
)
assert_array_almost_equal(
grid.at_link["flood_tide_flow__velocity"][10:13],
[-0.000625, -0.00125, -0.001875],
)
def test_constant_depth_deeper_than_tidal_amplitude_alt_grid():
"""Test velocity calculation with different grid orientation."""
grid = RasterModelGrid((5, 3), xy_spacing=100.0)
z = grid.add_zeros("topographic__elevation", at="node")
z[:] = -8.0
grid.set_closed_boundaries_at_grid_edges(True, False, True, True)
tfc = TidalFlowCalculator(grid, tidal_period=4.0e4)
tfc.run_one_step()
links_to_test = [8, 13, 18]
ebb_vel = grid.at_link["ebb_tide_flow__velocity"][links_to_test]
assert_array_almost_equal(ebb_vel, [0.000625, 0.00125, 0.001875])
flood_vel = grid.at_link["flood_tide_flow__velocity"][links_to_test]
assert_array_almost_equal(flood_vel, [-0.000625, -0.00125, -0.001875])
def test_constant_depth_shallower_than_tidal_amplitude():
r"""Test velocity calculation under the following conditions:
r = 1 # tidal range in m
T = 4.0e4 # tidal period in s
n = 0.01 # roughness, s/m^1/3
chi = 1 # scale velocity, m/s
h_m = 0.25 # water depth at mean sea level, m
Under these conditions, the key factors are:
inundation rate (I) =
$I = [r/2 − max(−r/2, min(z, r/2))]/(T/2)$
$= (0.5 - max(-0.25, min(-0.25, 0.5))) / 20,000$
$= (0.5 - (-0.25)) / 20,000 = 3.75\times 10^{-5}$ m/s
Domain, L: 300 m, 1D
Analytical solution for water surface height, ebb tide:
$\eta = (IL^2/D) ((x/L) - (1/2) (x/L)^2)$
Solution for x-directed velocity. Note that in this case $I$ and $h$ are
uniform, so
$u h = I x$ # output = input
$u(x) = I x / h$
Water depth $h$ is calculated as the average of high-tide and low-tide
depth. Because low-tide depth is zero,
$h = (0.25 + r/2) / 2 = 0.375$ m
In a grid with 5 columns, $x$ is effectively 100, 200, and 300 m, with the
adjacent open boundary node at 350 m. So the velocity in m/s should be:
0.01, 0.02, 0.03
"""
grid = RasterModelGrid((3, 5), xy_spacing=100.0)
z = grid.add_zeros("topographic__elevation", at="node")
z[:] = -0.25
grid.set_closed_boundaries_at_grid_edges(False, True, True, True)
tfc = TidalFlowCalculator(grid, tidal_period=4.0e4)
tfc.run_one_step()
assert_array_almost_equal(
grid.at_link["ebb_tide_flow__velocity"][10:13], [0.01, 0.02, 0.03]
)
assert_array_almost_equal(
grid.at_link["flood_tide_flow__velocity"][10:13], [-0.01, -0.02, -0.03]
)
def test_with_hex_grid():
"""Test mass balance with a hex grid.
The test here is based on a simple mass balance: the computed velocity at
the open boundaries, when multiplied by the depth and the width of all open
cell faces, should equal the total inflow or outflow rate, which is the
inundation rate times the total area.
The test configuration is a hex grid with 5 rows and a maximum width of 5
columns. The bottom 3 nodes are open (fixed value) boundaries; the rest are
closed. The tidal range is 1 meter, the mean depth is 5 meters, and the
tidal period is 40,000 seconds. Node spacing will be 2 meters.
Inundation rate = I = tidal range / tidal half period
= 1 / 20,000 = 5 x 10^-5 m/s
Area of one cell = (3^0.5 / 2) dx^2 ~ 3.4641
Width of one face = dx / 3^0.5
Inundation volume rate = I x cell area x 7 cells
= ~0.0012124 m3/s
Outflow volume = velocity at one the edge of any one of the lower active
links x (solved) depth at that link x width of face x 4 faces. Call the
velocity-depth product q. Then the predicted q should be:
q = inundation volume rate / (face width x 4 faces)
= (7 I (3^0.5 / 2) dx^2) / (4 dx / 3^0.5)
= (21/8) I dx = (21/4) r dx / T = 0.0002625
"""
grid = HexModelGrid((5, 3), spacing=2.0)
z = grid.add_zeros("topographic__elevation", at="node")
z[:] = -5.0
# Close all boundary nodes except the bottom row
grid.status_at_node[
grid.status_at_node != grid.BC_NODE_IS_CORE
] = grid.BC_NODE_IS_CLOSED
grid.status_at_node[0:3] = grid.BC_NODE_IS_FIXED_VALUE
tfc = TidalFlowCalculator(grid, tidal_period=4.0e4)
tfc.run_one_step()
q = grid.at_link["flood_tide_flow__velocity"] * tfc._water_depth_at_links
assert_array_almost_equal(q[3:7], [0.0002625, 0.0002625, 0.0002625, 0.0002625])
def test_wrong_grid_type_error():
grid = RadialModelGrid(2)
assert_raises(TypeError, TidalFlowCalculator, grid)
def test_getters_and_setters():
grid = RasterModelGrid((3, 5))
grid.add_zeros("topographic__elevation", at="node")
tfc = TidalFlowCalculator(grid)
tfc.roughness = 0.01
assert_array_equal(tfc.roughness, 0.01 + np.zeros(grid.number_of_links))
tfc.tidal_range = 4.0
assert_equal(tfc.tidal_range, 4.0)
assert_equal(tfc._tidal_half_range, 2.0)
tfc.tidal_period = 40000.0
assert_equal(tfc.tidal_period, 40000.0)
assert_equal(tfc._tidal_half_period, 20000.0)
tfc.mean_sea_level = 1.0
assert_equal(tfc.mean_sea_level, 1.0)
| cmshobe/landlab | tests/components/tidal_flow_calculator/test_tidal_flow_calculator.py | Python | mit | 6,994 |
from .chrome import *
| crckyl/pixplus | test/chrome/__init__.py | Python | mit | 22 |
import pandas as pd
def add_full_name(path_to_csv, path_to_new_csv):
# Assume you will be reading in a csv file with the same columns that the
# Lahman baseball data set has -- most importantly, there are columns
# called 'nameFirst' and 'nameLast'.
# 1) Write a function that reads a csv
# located at "path_to_csv" into a pandas dataframe and adds a new column
# called 'nameFull' with a player's full name.
#
# For example:
# for Hank Aaron, nameFull would be 'Hank Aaron',
#
# 2) Write the data in the pandas dataFrame to a new csv file located at
# path_to_new_csv
# WRITE YOUR CODE HERE
df = pd.read_csv(path_to_csv)
df['nameFull'] = df['nameFirst'] + ' ' + df['nameLast']
df.to_csv(path_to_new_csv)
if __name__ == "__main__":
# For local use only
# If you are running this on your own machine add the path to the
# Lahman baseball csv and a path for the new csv.
# The dataset can be downloaded from this website: http://www.seanlahman.com/baseball-archive/statistics
# We are using the file Master.csv
path_to_csv = "baseballdatabank-2017.1/core/Master.csv"
path_to_new_csv = "baseballdatabank-2017.1/core/new_Master.csv"
add_full_name(path_to_csv, path_to_new_csv)
| angelmtenor/IDSFC | L2_Data_Wrangling/A_CSV_exercise.py | Python | mit | 1,277 |
"""
Twitter platform for notify component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.twitter/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.notify import (
PLATFORM_SCHEMA, BaseNotificationService)
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_USERNAME
REQUIREMENTS = ['TwitterAPI==2.4.4']
_LOGGER = logging.getLogger(__name__)
CONF_CONSUMER_KEY = 'consumer_key'
CONF_CONSUMER_SECRET = 'consumer_secret'
CONF_ACCESS_TOKEN_SECRET = 'access_token_secret'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_ACCESS_TOKEN): cv.string,
vol.Required(CONF_ACCESS_TOKEN_SECRET): cv.string,
vol.Required(CONF_CONSUMER_KEY): cv.string,
vol.Required(CONF_CONSUMER_SECRET): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
})
def get_service(hass, config, discovery_info=None):
"""Get the Twitter notification service."""
return TwitterNotificationService(
config[CONF_CONSUMER_KEY], config[CONF_CONSUMER_SECRET],
config[CONF_ACCESS_TOKEN], config[CONF_ACCESS_TOKEN_SECRET],
config.get(CONF_USERNAME)
)
class TwitterNotificationService(BaseNotificationService):
"""Implementation of a notification service for the Twitter service."""
def __init__(self, consumer_key, consumer_secret, access_token_key,
access_token_secret, username):
"""Initialize the service."""
from TwitterAPI import TwitterAPI
self.user = username
self.api = TwitterAPI(consumer_key, consumer_secret, access_token_key,
access_token_secret)
def send_message(self, message="", **kwargs):
"""Tweet a message."""
if self.user:
resp = self.api.request(
'direct_messages/new', {'text': message, 'user': self.user})
else:
resp = self.api.request('statuses/update', {'status': message})
if resp.status_code != 200:
import json
obj = json.loads(resp.text)
error_message = obj['errors'][0]['message']
error_code = obj['errors'][0]['code']
_LOGGER.error("Error %s : %s (Code %s)", resp.status_code,
error_message,
error_code)
| kyvinh/home-assistant | homeassistant/components/notify/twitter.py | Python | apache-2.0 | 2,402 |
# https://app.codesignal.com/arcade/intro/level-3/JKKuHJknZNj4YGL32
from collections import Counter
def commonCharacterCount(s1, s2):
# When counter is initialized with a String, since its a
# sequence it will store counts for each of its characters.
c1 = Counter(s1)
c2 = Counter(s2)
common = 0
# Given two strings, find the amount of common characters
# between both. This is the min count of a given character
# ocurring at both. If it doesn't occur, then value is 0
# and min will be 0.
for item in c2.items():
key = item[0]
common += min(c2[key], c1[key])
return common
| zubie7a/Algorithms | CodeSignal/Arcade/Intro/Level_03/02_Common_Character_Count.py | Python | mit | 637 |
# Built-in
import os
import re
import itertools as itt
import warnings
# Common
import numpy as np
from scipy.interpolate import RectBivariateSpline as scpRectSpl
__all__ = ['step03_read', 'step03_read_all']
_DTYPES = {'adf11': ['acd', 'ccd', 'scd', 'plt', 'prb'],
'adf15': None}
_DEG = 1
_PECASFUNC = True
# #############################################################################
# #############################################################################
# Utility functions
# #############################################################################
def _get_PATH_LOCAL():
pfe = os.path.join(os.path.expanduser('~'), '.tofu', 'openadas2tofu')
if os.path.isdir(pfe):
return pfe
else:
return None
def _get_subdir_from_pattern(path, pattern, mult=None):
""" Get list of files matching patterns in path
If no match => Exception
If multiple matches
=> mult = True: pass
=> mult = 'warn': warning
=> mult = 'err': Exception
"""
# Check inputs
if mult is None:
mult = 'err'
if mult not in [True, 'warn', 'err']:
msg = (
"Arg mult must be in [True, 'warn', 'err']!\n"
+ "\t- provided: {}".format(mult)
)
raise Exception(msg)
if isinstance(pattern, str):
pattern = [pattern]
# Get matches
ld = [
dd for dd in os.listdir(path)
if os.path.isdir(os.path.join(path, dd))
and all([pp in dd for pp in pattern])
]
if len(ld) != 1:
msg = ("You have no / many directories in your local "
+ "~/.tofu/openadas2tofu/ matching the desired file type:\n"
+ "\t- path: {}\n".format(path)
+ "\t- provided (all): {}\n".format(pattern)
+ "\t- available: {}\n".format(ld)
+ " => download the data with "
+ "tf.openadas2tofu.step02_download()")
if len(ld) == 0:
raise Exception(msg)
else:
if mult == 'err':
raise Exception(msg)
elif mult == 'warn':
warnings.warn(msg)
return [os.path.join(path, dd) for dd in ld]
def _get_available_elements_from_path(path=None, typ1=None):
# Check inputs
if not os.path.isdir(path):
msg = (
"Provided path is not an existing directory!\n"
+ "\t- provided: {}".format(path)
)
raise Exception(msg)
ltyp = ['adf15']
if typ1 not in ltyp:
msg = (
"Only the following types of files are handled up to now:\n"
+ "\t- handled: {}\n".format(ltyp)
+ "\t- provided: {}".format(typ1)
)
raise Exception(msg)
if typ1 == 'adf15':
lf = [
ff for ff in os.listdir(path)
if all([ss in ff for ss in ['pec', '][']])
]
element = [ff[ff.index('][')+2:] for ff in lf]
return element
def _format_for_DataCollection_adf15(
dout,
dsource0=None,
dref0=None,
ddata0=None,
dlines0=None,
):
"""
Format dout from step03_read_all() for SPectralLines object
(separated te, ne, ions, sources, lines)
"""
# Remove already known lines of dlines0 provided
if dlines0 is not None:
# Check for mistakes
dk0 = {
k0: [
k1 for k1, v1 in dlines0.items()
if np.sum([(
v1['ion'] == v0['ion']
and v1['transition'] == v0['transition'],
v1['ion'] == v0['ion'] and v1['symbol'] == v0['symbol']
)]) == 1
]
for k0, v0 in dout.items()
}
dk0 = {k0: v0 for k0, v0 in dk0.items() if len(v0) > 0}
if len(dk0) > 0:
msg = (
"\nPossible error in openadas detected,\n"
+ "the following lines have same ion and transition but "
+ "different symbol (typ0typ1-isoel):\n"
+ "\n".join([
"\t- {}: {}".format(k0, v0) for k0, v0 in dk0.items()
])
+ "\n\n => There might be redundancy / errors in openadas"
)
warnings.warn(msg)
dout = {
k0: v0 for k0, v0 in dout.items()
if not any([
v1['ion'] == v0['ion'] and v1['transition'] == v0['transition']
for v1 in dlines0.values()
])
}
# Get dict of unique sources
lsource = sorted(set([v0['source'] for v0 in dout.values()]))
dsource = {}
if dsource0 is None:
dsource = {
'oa-adf15-{:02}'.format(ii): {'long': ss}
for ii, ss in enumerate(lsource)
}
else:
# Check against existing sources
nmax = int(np.max([
int(k0.split('-')[-1])
for k0 in dsource0.keys() if 'oa-adf15' in k0
])) + 1
for ii, ss in enumerate(lsource):
lk0 = [k0 for k0, v0 in dsource0.items() if v0['long'] == ss]
if len(lk0) == 0:
k0 = 'oa-adf15-{:02}'.format(nmax)
nmax += 1
elif len(lk0) == 1:
k0 = lk0[0]
else:
msg = (
"\nMultiple possible matches for source {}".format(ss)
)
raise Exception(msg)
dsource[k0] = {'long': ss}
# Get dict of unique Te and ne
dte, dne = {}, {}
if dref0 is None:
ite, ine = 0, 0
else:
ite = int(np.max([
int(k0.split('-')[-1]) for k0 in dref0.keys()
if 'Te-' in k0 and 'oa-adf15' in ddata0[k0]['source']
])) + 1
ine = int(np.max([
int(k0.split('-')[-1]) for k0 in dref0.keys()
if 'ne-' in k0 and 'oa-adf15' in ddata0[k0]['source']
])) + 1
for k0, v0 in dout.items():
# Get source
sour = [
k1 for k1, v1 in dsource.items() if v1['long'] == v0['source']
][0]
# fill dte
kte = [
kk for kk, vv in dte.items()
if v0['te'].shape == vv['data'].shape
and np.allclose(v0['te'], vv['data'])
and sour == vv['source']
]
normal = dref0 is None
if normal is False:
# Check vs existing Te
lk0 = [
k1 for k1, v1 in dref0.items()
if ddata0[k1]['source'] == sour
and v0['te'].shape == ddata0[k1]['data'].shape
and np.allclose(v0['te'], ddata0[k1]['data'])
]
if len(lk0) == 0:
normal = True
elif len(lk0) == 1:
keyte = lk0[0]
dte[keyte] = {
'data': ddata0[lk0[0]]['data'],
'units': v0['te_units'],
'source': sour,
'dim': 'temperature',
'quant': 'Te',
'name': 'Te',
'group': 'Te',
}
elif len(lk0) > 1:
msg = (
"Multiple matches for dout[{}] in dref0:\n".format(k0)
+ "\t- {}".format(lk0)
)
raise Exception(msg)
if normal is True:
if len(kte) == 0:
keyte = 'Te-{:02}'.format(ite)
dte[keyte] = {
'data': v0['te'],
'units': v0['te_units'],
'source': sour,
'dim': 'temperature',
'quant': 'Te',
'name': 'Te',
'group': 'Te',
}
ite += 1
elif len(kte) == 1:
pass
else:
msg = (
"len(kte) != 1:\n"
+ "\t- kte = {}\n".format(kte)
)
raise Exception(msg)
dout[k0]['keyte'] = keyte
# fill dne
kne = [
kk for kk, vv in dne.items()
if v0['ne'].shape == vv['data'].shape
and np.allclose(v0['ne'], vv['data'])
and sour == vv['source']
]
normal = dref0 is None
if normal is False:
# Check vs existing ne
lk0 = [
k1 for k1, v1 in dref0.items()
if ddata0[k1]['source'] == sour
and v0['ne'].shape == ddata0[k1]['data'].shape
and np.allclose(v0['ne'], ddata0[k1]['data'])
]
if len(lk0) == 0:
normal = True
elif len(lk0) == 1:
keyne = lk0[0]
dne[keyne] = {
'data': ddata0[lk0[0]]['data'],
'units': v0['ne_units'],
'source': sour,
'dim': 'density',
'quant': 'ne',
'name': 'ne',
'group': 'ne',
}
elif len(lk0) > 1:
msg = (
"Multiple matches for dout[{}] in dref0:\n".format(k0)
+ "\t- {}".format(lk0)
)
raise Exception(msg)
if normal is True:
if len(kne) == 0:
keyne = 'ne-{:02}'.format(ine)
dne[keyne] = {
'data': v0['ne'],
'units': v0['ne_units'],
'source': sour,
'dim': 'density',
'quant': 'ne',
'name': 'ne',
'group': 'ne',
}
ine += 1
elif len(kne) == 1:
pass
else:
msg = (
"len(kne) != 1:\n"
+ "\t- kne = {}\n".format(kne)
)
raise Exception(msg)
dout[k0]['keyne'] = keyne
# Get dict of pec
dpec = {
'{}-pec'.format(k0): {
'data': v0['pec'], 'units': v0['pec_units'],
'ref': (v0['keyne'], v0['keyte']),
'source': [
k1 for k1, v1 in dsource.items()
if v1['long'] == v0['source']
][0],
'dim': '<sigma v>',
'quant': 'pec',
}
for k0, v0 in dout.items()
}
# dlines
inds = np.argsort([v0['lambda0'] for v0 in dout.values()])
lk0 = np.array(list(dout.keys()), dtype=str)[inds]
dlines = {
k0: {
'ion': dout[k0]['ion'],
'source': [
k1 for k1, v1 in dsource.items()
if v1['long'] == dout[k0]['source']
][0],
'lambda0': dout[k0]['lambda0'],
'pec': '{}-pec'.format(k0),
'symbol': dout[k0]['symbol'],
'type': dout[k0]['type'],
'transition': dout[k0]['transition'],
}
for k0 in lk0
}
# Get dict of unique ions
lion = sorted(set([v0['ion'] for v0 in dout.values()]))
return dne, dte, dpec, lion, dsource, dlines
# #############################################################################
# #############################################################################
# Main functions
# #############################################################################
def step03_read(
adas_path,
pec_as_func=None,
**kwdargs,
):
""" Read openadas-formatted files and return a dict with the data
Povide the full adas file name
The result is returned as a dict
example
-------
>>> import tofu as tf
>>> fn = '/adf11/scd74/scd74_ar.dat'
>>> out = tf.openadas2tofu.step03_read(fn)
>>> fn = '/adf15/pec40][ar/pec40][ar_ca][ar16.dat'
>>> out = tf.openadas2tofu.step03_read(fn)
"""
path_local = _get_PATH_LOCAL()
# Check whether the local .tofu repo exists, if not recommend tofu-custom
if path_local is None:
path = os.path.join(os.path.expanduser('~'), '.tofu', 'openadas2tofu')
msg = ("You do not seem to have a local ./tofu repository\n"
+ "tofu uses that local repository to store all user-specific "
+ "data and downloads\n"
+ "In particular, openadas files are downloaded and saved in:\n"
+ "\t{}\n".format(path)
+ " => to set-up your local .tofu repo, run in a terminal:\n"
+ "\ttofu-custom")
raise Exception(msg)
# Determine whether adas_path is an absolute path or an adas full name
if os.path.isfile(adas_path):
pfe = adas_path
else:
# make sure adas_path is not understood as absolute local path
if adas_path[0] == '/':
adas_path = adas_path[1:]
# Check file was downloaded locally
pfe = os.path.join(path_local, adas_path)
if not os.path.isfile(pfe):
msg = ("Provided file does not seem to exist:\n"
+ "\t{}\n".format(pfe)
+ " => Search it online with "
+ "tofu.openadas2tofu.step01_search_online()\n"
+ " => Download it with "
+ "tofu.openadas2tofu.step02_download()")
raise FileNotFoundError(msg)
lc = [ss for ss in _DTYPES.keys() if ss in pfe]
if not len(lc) == 1:
msg = ("File type could not be derived from absolute path:\n"
+ "\t- provided: {}\n".format(pfe)
+ "\t- supported: {}".format(sorted(_DTYPES.keys())))
raise Exception(msg)
func = eval('_read_{}'.format(lc[0]))
return func(pfe, **kwdargs)
def step03_read_all(
element=None, charge=None, typ1=None, typ2=None,
pec_as_func=None,
format_for_DataCollection=None,
dsource0=None,
dref0=None,
ddata0=None,
dlines0=None,
verb=None, **kwdargs,
):
""" Read all relevant openadas files for chosen typ1
Please specify:
- typ1:
- 'adf11': ionisation / recombination data
- 'adf15': pec data
- element: the symbol of the element
If typ1 = 'adf11', you can also provide typ2 to specify the coefficients:
- 'scd': effective ionisation coefficients
- 'acd': effective electron-impact recombination coefficients
- 'ccd': effective hydrogen-impact recombination coefficients
- 'plt': line power due to electron-impact excitation
- 'prc': line power due to hydrogen-impact excitation
- 'prb': rad. recombination and bremmstrahlung due to electron-impact
If typ1 = 'adf15', you can optioanlly provide a min/max wavelength
The result is returned as a dict
examples
--------
>>> import tofu as tf
>>> dout = tf.openadas2tofu.step03_read_all(element='ar', typ1='adf11')
>>> dout = tf.openadas2tofu.step03_read_all(element='ar', typ1='adf15',
charge=16,
lambmin=3.94e-10,
lambmax=4.e-10)
"""
# --------------------
# Check whether the local .tofu repo exists, if not recommend tofu-custom
path_local = _get_PATH_LOCAL()
if path_local is None:
path = os.path.join(os.path.expanduser('~'), '.tofu', 'openadas2tofu')
msg = ("You do not seem to have a local ./tofu repository\n"
+ "tofu uses that local repository to store all user-specific "
+ "data and downloads\n"
+ "In particular, openadas files are downloaded and saved in:\n"
+ "\t{}\n".format(path)
+ " => to set-up your local .tofu repo, run in a terminal:\n"
+ "\ttofu-custom")
raise Exception(msg)
# --------------------
# Check / format input
if typ1 is None:
typ1 = 'adf15'
if not isinstance(typ1, str) or typ1.lower() not in _DTYPES.keys():
msg = ("Please choose a valid adas file type:\n"
+ "\t- allowed: {}\n".format(_DTYPES.keys())
+ "\t- provided: {}".format(typ1))
raise Exception(msg)
typ1 = typ1.lower()
if typ1 == 'adf11' and typ2 is None:
typ2 = _DTYPES[typ1]
fd = os.listdir(os.path.join(path_local, typ1))
typ2 = [sorted([ss for ss in fd if tt in ss])[-1] for tt in typ2]
if isinstance(typ2, str):
typ2 = [typ2]
if (_DTYPES[typ1] is not None
and (not isinstance(typ2, list)
or not all([any([s1 in ss for s1 in _DTYPES[typ1]])
for ss in typ2]))):
msg = ("typ2 must be a list of valid openadas file types for typ1:\n"
+ "\t- provided: {}\n".format(typ2)
+ "\t- available for {}: {}".format(typ1, _DTYPES[typ1]))
raise Exception(msg)
# --------------------
# Get elevant directory
# Level 1: Type
path = _get_subdir_from_pattern(path_local, typ1, mult='err')[0]
# --------------------
# element
c0 = (
element is None
or isinstance(element, str)
or (
isinstance(element, list)
and all([isinstance(ee, str) for ee in element])
)
or (
isinstance(element, tuple)
and all([isinstance(ee, str) for ee in element])
)
)
if not c0:
msg = "Please choose an element!"
raise Exception(msg)
if element is None or isinstance(element, tuple):
el = _get_available_elements_from_path(path=path, typ1=typ1)
if element is None:
element = el
else:
element = tuple([ee.lower() for ee in element])
element = [ee for ee in el if ee not in element]
if isinstance(element, str):
element = [element]
element = [ee.lower() for ee in element]
# --------------------
# charge
if charge is not None:
c0 = (
isinstance(charge, int)
or (
isinstance(charge, list)
and all([isinstance(cc, int) for cc in charge])
)
or (
isinstance(charge, tuple)
and all([isinstance(cc, int) for cc in charge])
)
)
if not c0:
msg = ("Arg charge must be a int or list (e.g.: 16 or [0])\n"
+ "\t- provided: {}".format(charge))
raise Exception(msg)
if isinstance(charge, int):
charge = [charge]
if isinstance(charge, list):
charge = ['{}.dat'.format(cc) for cc in charge]
elif isinstance(charge, tuple):
charge = tuple(['{}.dat'.format(cc) for cc in charge])
if format_for_DataCollection is None:
format_for_DataCollection = False
if verb is None:
verb = True
# --------------------
# Get list of relevant directories per element
# Level 2: element or typ2
if typ1 == 'adf11':
lpath = [
_get_subdir_from_pattern(path, tt, mult='err')[0]
for tt in typ2
]
elif typ1 == 'adf15':
lpath = np.concatenate([
_get_subdir_from_pattern(
path, ['pec', '][{}'.format(ee)], mult=True,
)
for ee in element
]).tolist()
# --------------------
# Get list of relevant files pfe
lpfe = list(itt.chain.from_iterable([[
os.path.join(path, ff) for ff in os.listdir(path)
if (
os.path.isfile(os.path.join(path, ff))
and ff[-4:] == '.dat'
and any(['][{}'.format(ee) in ff for ee in element])
)]
for path in lpath
]))
if typ1 == 'adf15':
kwdargs['pec_as_func'] = pec_as_func
if isinstance(charge, list):
lpfe = [
ff for ff in lpfe
if any([
''.join([ss for ss in ff.split('][')[-1] if ss.isdigit()])
+ '.dat'
== cc
for cc in charge
])
]
elif isinstance(charge, tuple):
lpfe = [
ff for ff in lpfe
if not any([
''.join([ss for ss in ff.split('][')[-1] if ss.isdigit()])
+ '.dat'
== cc
for cc in charge
])
]
# --------------------
# Extract data from each file
func = eval('_read_{}'.format(typ1))
dout = {}
for pfe in lpfe:
if verb is True:
msg = "\tLoading data from {}".format(pfe)
print(msg)
dout = func(pfe, dout=dout, **kwdargs)
if typ1 == 'adf15' and format_for_DataCollection is True:
return _format_for_DataCollection_adf15(
dout,
dsource0=dsource0,
dref0=dref0,
ddata0=ddata0,
dlines0=dlines0,
)
else:
return dout
# #############################################################################
# Specialized functions for ADF 11
# #############################################################################
def _read_adf11(pfe, deg=None, dout=None):
if deg is None:
deg = _DEG
if dout is None:
dout = {}
# Get second order file type
typ1 = [vv for vv in _DTYPES['adf11'] if vv in pfe]
if len(typ1) != 1:
msg = ("Second order file type could not be inferred from file name!\n"
+ "\t- available: {}\n".format(_DTYPES['adf11'])
+ "\t- provided: {}".format(pfe))
raise Exception(msg)
typ1 = typ1[0]
# Get element
elem = pfe[:-4].split('_')[1]
comline = '-'*60
comline2 = 'C'+comline
if typ1 in ['acd', 'ccd', 'scd', 'plt', 'prb']:
# read blocks
with open(pfe) as search:
for ii, line in enumerate(search):
if comline2 in line:
break
# Get atomic number (transitions) stored in this file
if ii == 0:
lstr = line.split('/')
lin = [ss for ss in lstr[0].strip().split(' ')
if ss.strip() != '']
lc = [
len(lin) == 5 and all([ss.isdigit() for ss in lin]),
elem.upper() in lstr[1],
# 'ADF11' in lstr[-1],
]
if not all(lc):
msg = ("File header format seems to have changed!\n"
+ "\t- pfe: {}\n".format(pfe)
+ "\t- lc = {}\n".format(lc)
+ "\t- lstr = {}".format(lstr))
raise Exception(msg)
Z, nne, nte, q0, qend = map(int, lin)
nelog10 = np.array([])
telog10 = np.array([])
in_ne = True
continue
if comline in line:
continue
# Get nelog10
if in_ne:
li = [ss for ss in line.strip().split(' ')
if ss.strip() != '']
nelog10 = np.append(nelog10, np.array(li, dtype=float))
if nelog10.size == nne:
in_ne = False
in_te = True
# Get telog10
elif in_te is True:
li = [ss for ss in line.strip().split(' ')
if ss.strip() != '']
telog10 = np.append(telog10, np.array(li, dtype=float))
if telog10.size == nte:
in_te = False
in_ion = True
# Get ion block
elif (in_ion is True and 'Z1=' in line
and ('------/' in line and 'DATE=' in line)):
nion = int(
line[line.index('Z1=')+len('Z1='):].split('/')[0])
if typ1 in ['scd', 'plt']:
charge = nion - 1
else:
charge = nion
coefslog10 = np.array([])
elif in_ion is True and charge is not None:
li = [ss for ss in line.strip().split(' ')
if ss.strip() != '']
coefslog10 = np.append(coefslog10,
np.array(li, dtype=float))
if coefslog10.size == nne*nte:
key = '{}{}'.format(elem, charge)
tkv = [('element', elem), ('Z', Z), ('charge', charge)]
if key in dout.keys():
assert all([dout[key][ss] == vv for ss, vv in tkv])
else:
dout[key] = {ss: vv for ss, vv in tkv}
if typ1 == 'scd':
# nelog10+6 to convert /cm3 -> /m3
# coefslog10-6 to convert cm3/s -> m3/s
func = scpRectSpl(nelog10+6, telog10,
coefslog10.reshape((nne, nte))-6,
kx=deg, ky=deg)
dout[key]['ionis'] = {'func': func,
'type': 'log10_nete',
'units': 'log10(m3/s)',
'source': pfe}
elif typ1 == 'acd':
# nelog10+6 to convert /cm3 -> /m3
# coefslog10-6 to convert cm3/s -> m3/s
func = scpRectSpl(nelog10+6, telog10,
coefslog10.reshape((nne, nte))-6,
kx=deg, ky=deg)
dout[key]['recomb'] = {'func': func,
'type': 'log10_nete',
'units': 'log10(m3/s)',
'source': pfe}
elif typ1 == 'ccd':
# nelog10+6 to convert /cm3 -> /m3
# coefslog10-6 to convert cm3/s -> m3/s
func = scpRectSpl(nelog10+6, telog10,
coefslog10.reshape((nne, nte))-6,
kx=deg, ky=deg)
dout[key]['recomb_ce'] = {'func': func,
'type': 'log10_nete',
'units': 'log10(m3/s)',
'source': pfe}
elif typ1 == 'plt':
# nelog10+6 to convert /cm3 -> /m3
# coefslog10+6 to convert W.cm3 -> W.m3
func = scpRectSpl(nelog10+6, telog10,
coefslog10.reshape((nne, nte))+6,
kx=deg, ky=deg)
dout[key]['rad_bb'] = {'func': func,
'type': 'log10_nete',
'units': 'log10(W.m3)',
'source': pfe}
elif typ1 == 'prb':
# nelog10+6 to convert /cm3 -> /m3
# coefslog10+6 to convert W.cm3 -> W.m3
func = scpRectSpl(nelog10+6, telog10,
coefslog10.reshape((nne, nte))+6,
kx=deg, ky=deg)
dout[key]['rad_fffb'] = {'func': func,
'type': 'log10_nete',
'units': 'log10(W.m3)',
'source': pfe}
if nion == Z:
break
return dout
# #############################################################################
# Specialized functions for ADF 15
# #############################################################################
def _get_adf15_key(elem, charge, isoel, typ0, typ1):
return '{}{}_{}_oa_{}_{}'.format(elem, charge, isoel, typ0, typ1)
def _read_adf15(
pfe,
dout=None,
lambmin=None,
lambmax=None,
pec_as_func=None,
deg=None,
):
"""
Here lambmin and lambmax are provided in m
"""
if deg is None:
deg = _DEG
if pec_as_func is None:
pec_as_func = _PECASFUNC
if dout is None:
dout = {}
# Get summary of transitions
flagblock = '/isel ='
flag0 = 'superstage partition information'
# Get file markers from name (elem, charge, typ0, typ1)
typ0, typ1, elemq = pfe.split('][')[1:]
ind = re.search(r'\d', elemq).start()
elem = elemq[:ind].title()
charge = int(elemq[ind:-4])
assert elem.lower() in typ0[:2]
assert elem.lower() == typ1.split('_')[0]
typ0 = typ0[len(elem)+1:]
typ1 = typ1.split('_')[1]
# Extract data from file
nlines, nblock = None, 0
in_ne, in_te, in_pec, in_tab, itab = False, False, False, False, np.inf
skip = False
with open(pfe) as search:
for ii, line in enumerate(search):
# Get number of lines (transitions) stored in this file
if ii == 0:
lstr = line.split('/')
nlines = int(lstr[0].replace(' ', ''))
continue
# Get info about the transition being scanned (block)
if flagblock in line and 'C' not in line and nblock < nlines:
lstr = [kk for kk in line.rstrip().split(' ') if len(kk) > 0]
lamb = float(lstr[0])*1.e-10
isoel = nblock + 1
nblock += 1
c0 = ((lambmin is not None and lamb < lambmin)
or (lambmax is not None and lamb > lambmax))
if c0:
skip = True
continue
skip = False
nne, nte = int(lstr[1]), int(lstr[2])
typ = [ss[ss.index('type=')+len('type='):ss.index('/ispb')]
for ss in lstr[3:] if 'type=' in ss]
assert len(typ) == 1
# To be updated : proper reading from line
in_ne = True
ne = np.array([])
te = np.array([])
pec = np.full((nne*nte,), np.nan)
ind = 0
# il = 0
continue
if 'root partition information' in line and skip is True:
skip = False
# Check lamb is ok
if skip is True:
continue
# Get ne for the transition being scanned (block)
if in_ne is True:
ne = np.append(ne,
np.array(line.rstrip().strip().split(' '),
dtype=float))
if ne.size == nne:
in_ne = False
in_te = True
# Get te for the transition being scanned (block)
elif in_te is True:
te = np.append(te,
np.array(line.rstrip().strip().split(' '),
dtype=float))
if te.size == nte:
in_te = False
in_pec = True
# Get pec for the transition being scanned (block)
elif in_pec is True:
data = np.array(line.rstrip().strip().split(' '),
dtype=float)
pec[ind:ind+data.size] = data
# pec[il, :] = data
ind += data.size
# il += 1
if ind == pec.size:
in_pec = False
key = _get_adf15_key(elem, charge, isoel, typ0, typ1)
# PEC reshaping and conversion to cm3/s -> m3/s
pec = pec.reshape((nne, nte)) * 1e-6
# log(ne)+6 to convert /cm3 -> /m3
ne = ne*1e6
if pec_as_func is True:
pec_rec = scpRectSpl(
np.log(ne),
np.log(te),
np.log(pec),
kx=deg,
ky=deg,
)
def pec(Te=None, ne=None, pec_rec=pec_rec):
return np.exp(pec_rec(np.log(ne), np.log(Te)))
dout[key] = {
'lambda0': lamb,
'ion': '{}{}+'.format(elem, charge),
'charge': charge,
'element': elem,
'symbol': '{}{}-{}'.format(typ0, typ1, isoel),
'source': pfe,
'type': typ[0],
'ne': ne,
'ne_units': '/m3',
'te': te,
'te_units': 'eV',
'pec': pec,
'pec_type': 'f(ne, Te)',
'pec_units': 'm3/s',
}
# Get transitions from table at the end
if 'photon emissivity atomic transitions' in line:
itab = ii + 6
if ii == itab:
in_tab = True
if in_tab is True:
lstr = [kk for kk in line.rstrip().split(' ') if len(kk) > 0]
isoel = int(lstr[1])
lamb = float(lstr[2])*1.e-10
key = _get_adf15_key(elem, charge, isoel, typ0, typ1)
c0 = ((lambmin is None or lambmin < lamb)
and (lambmax is None or lambmax > lamb))
if c0 and key not in dout.keys():
msg = ("Inconsistency in file {}:\n".format(pfe)
+ "\t- line should be present".format(key))
raise Exception(msg)
if key in dout.keys():
if dout[key]['lambda0'] != lamb:
msg = "Inconsistency in file {}".format(pfe)
raise Exception(msg)
c0 = (dout[key]['type'] not in lstr
or lstr.index(dout[key]['type']) < 4)
if c0:
msg = ("Inconsistency in table, type not found:\n"
+ "\t- expected: {}\n".format(dout[key]['type'])
+ "\t- line: {}".format(line))
raise Exception(msg)
trans = lstr[3:lstr.index(dout[key]['type'])]
dout[key]['transition'] = ''.join(trans)
if isoel == nlines:
in_tab = False
assert all(['transition' in vv.keys() for vv in dout.values()])
return dout
| ToFuProject/tofu | tofu/openadas2tofu/_read_files.py | Python | mit | 35,622 |
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ValidationError
from dateutil.parser import parse
from django.utils import dateparse
from emails.models import Email, EmailReference
import time
import os
import sys
# Location where the archive email files are stored
ARCHIVE_DIR = os.path.dirname(os.path.dirname(__file__))
ARCHIVE_DIR = os.path.join(ARCHIVE_DIR, '../../archive/files')
# Indicates the end of an email's contents
EMAIL_END_TEXT = '-------------- next part --------------'
fnames = ['2007-December.txt', '2008-January.txt', '2008-February.txt',]
class Command(BaseCommand):
help = 'Load email data from archive file(s) into the model'
def handle(self, *args, **options):
fnames = self.file_names()
for f in fnames:
#print f
fname = os.path.join(ARCHIVE_DIR, f)
if os.path.isfile(fname):
self.stdout.write('Loading ' + f)
self.load_data(fname)
def load_data(self, fname):
with open(fname, 'r') as afile:
is_beginning_of_file = True
are_headers_over = False
is_body_over = False
email = ''
name = ''
e_date = ''
subject = ''
body = ''
msg_id = ''
in_reply = None
all_references = None
for line in afile:
if is_beginning_of_file:
is_beginning_of_file = False
if are_headers_over:
# Email body begins
if line.startswith(EMAIL_END_TEXT):
are_headers_over = False
is_body_over = True
else:
body += line
else:
is_body_over = False
if line.startswith('From:'):
email, name = self.get_email_and_name(line)
#if line.startswith('Date:'):
if line.startswith('From '):
# The date mentioned in this field does not use
# local timezone. Hence, this date would give the
# correct time ordering.
# TODO: Incorporate time zone with the email date
e_date = self.get_date(line)
if line.startswith('Subject:'):
subject = line.split(':')[1].strip(). \
replace('?UTF-8?Q?', '').replace('?=', '')
if line.startswith('In-Reply-To:'):
in_reply = line.split(':')[1].strip() \
.replace('<', '').replace('>', '')
if line.startswith('References:'):
all_references = self.get_all_references(line)
if line.startswith('Message-ID:'):
msg_id = line.split(':')[1].strip() \
.replace('<', '').replace('>', '')
# Message ID is the last header that appears in an email
are_headers_over = True
if is_body_over:
# Save this email into the model (database)
if email and msg_id:
# If there is any multi-part message (perhaps in case
# of a message digest?), the 'From ' part is missing.
# So, can't obtain the (timezone free) date in this case
# TODO: Fix this
if e_date:
#print 'Saving email', email, name, msg_id, in_reply, \
# all_references
new_email = self.save_email(email, name, e_date, \
subject, body, \
msg_id, in_reply, all_references)
self.save_references(new_email, all_references)
# Reset all the fields to store the next entry
email = ''
name = ''
e_date = ''
subject = ''
body = ''
msg_id = ''
in_reply = None
all_references = None
# Save the last email from the file -- this didn't end with the
# EMAIL_END_TEXT
if email and msg_id:
# If there is any multi-part message (perhaps in case
# of a message digest?), the 'From ' part is missing.
# So, can't obtain the (timezone free) date in this case
# TODO: Fix this
if e_date:
#print 'Saving email', email, name, msg_id, in_reply, all_references
new_email = self.save_email(email, name, e_date, subject, \
body, msg_id, in_reply, all_references)
self.save_references(new_email, all_references)
def get_email_and_name(self, text):
# Get the email address, name
email_and_name = text.split(':')[1].split('(')
#print email_and_name
# A particular email address was in this format
email = email_and_name[0].strip().replace(' at ', '@'). \
replace('?UTF-8?Q?', '').replace('?=', '')
name = None
if len(email_and_name) > 1:
name = email_and_name[1].strip().replace(')', '')
return (email, name)
def get_date(self, text):
e_date = text.strip().split(' ')[4:]
e_date = ' '.join(e_date)
e_date = parse(e_date)
return e_date
def get_all_references(self, text):
all_references = text.split(':')[1].strip().replace('<', '')
# There could be multiple references
all_references = all_references.split('>')
# Skip the last empty item
all_references = all_references[:len(all_references)-1]
return all_references
def save_email(self, email, name, e_date, subject, body, msg_id, in_reply, all_references):
new_email = Email(email_from=email, email_from_name=name, \
email_date=e_date, subject=subject, body=body, \
message_id=msg_id, in_reply_to=in_reply)
try:
new_email.full_clean()
new_email.save()
#print 'Saved email', msg_id
except ValidationError as e:
# Do something based on the errors contained in e.message_dict.
# Display them to a user, or handle them programatically.
self.stderr.write('Failed to save email! ' + str(e))
#sys.exit(1)
return new_email
def save_references(self, email, all_references):
if all_references:
for ref in all_references:
#print 'Ref:', ref
try:
new_reference = EmailReference(email_id=email, \
reference_id= \
Email.objects.get(message_id__exact=ref.strip()))
except Email.DoesNotExist as dne:
self.stderr.write('Error: ' + str(dne) + ' ' + str(email) + \
' ' + str(ref))
new_reference = None
if new_reference:
try:
new_reference.full_clean()
new_reference.save()
#print 'Saved ref', new_reference
except ValidationError as refe:
self.stderr.write('Failed to save reference(s) ' \
+ str(refe))
#sys.exit(1)
def file_names(self):
years = [2014,]
months = ['January', 'February', 'March', 'April', 'May', 'June', \
'July', 'August', 'September', 'October', 'November', 'December',]
names = [str(y) + '-' + m + '.txt' for y in years for m in months]
return names
| barun-saha/django_mailman_search | emails/management/commands/load_archive.py | Python | gpl-2.0 | 6,276 |
from django.contrib import admin
from shares_count.models import Share
class ShareAdmin(admin.ModelAdmin):
list_display = ('created', 'modified', 'shares', 'content_object')
admin.site.register(Share, ShareAdmin)
| vlinhart/django_shares_count | shares_count/admin.py | Python | mit | 221 |
'''
ÄÚ½¨µÄ open / file º¯ÊýÓÃÓÚ´´½¨, ´ò¿ªºÍ±à¼Îļþ, Èç ÏÂÀý Ëùʾ.
¶ø os Ä£¿éÌṩÁËÖØÃüÃûºÍɾ³ýÎļþËùÐèµÄº¯Êý
'''
import os
import string
def replace(file, search_for, replace_with):
# replace strings in a text file
back = os.path.splitext(file)[0] + ".bak"
temp = os.path.splitext(file)[0] + ".tmp"
try:
# remove old temp file, if any
os.remove(temp)
except os.error:
pass
fi = open(file)
fo = open(temp, "w")
for s in fi.readlines():
fo.write(string.replace(s, search_for, replace_with))
fi.close()
fo.close()
try: # remove old backup file, if any
os.remove(back)
except os.error:
pass # rename original to backup...
os.rename(file, back) # ...and temporary to original os.rename(temp,file)
# # try it out!
file = "samples/sample.txt"
replace(file, "hello", "tjena")
replace(file, "tjena", "hello") | iamweilee/pylearn | os-example-1.py | Python | mit | 933 |
# Copyright (c) 2011 Yubico AB
# See the file COPYING for licence statement.
import sys
import unittest
import pyhsm
import test_common
class TestUtil(test_common.YHSM_TestCase):
def setUp(self):
self.saved_stderr = sys.stderr
# Discard everything written to stderr from these tests (i.e. debug output
# from YubiHSM communication routines with debugging enabled).
sys.stderr = DiscardOutput()
DontChange = True # we test debug output from YubiHSM communication here
test_common.YHSM_TestCase.setUp(self, debug = DontChange)
def test_debug_output(self):
""" Test debug output of YubiHSM communication. """
self.assertTrue(self.hsm.echo('testing'))
self.assertTrue(self.hsm.drain())
def tearDown(self):
# Close YubiHSM interface before restoring stderr, to avoid output
# when it is closed.
self.hsm = None
sys.stderr = self.saved_stderr
class DiscardOutput(object):
def write(self, text):
pass
| Yubico/python-pyhsm | test/test_stick.py | Python | bsd-2-clause | 1,029 |
#!/usr/bin/env python
# add paths
import os, sys
for p in os.environ['PATH'].split(':'): sys.path.append(p)
# import modules
from re import findall
from itertools import product
from netCDF4 import Dataset as nc
from optparse import OptionParser
from numpy.ma import masked_array
from filespecs import BiasCorrectFile
from biascorrecter import BiasCorrecter
from os.path import split, splitext, sep
from numpy import intersect1d, zeros, ones
import ruamel.yaml
parser = OptionParser()
parser.add_option("-i", "--infile", dest = "infile", default = "", type = "string",
help = "Input aggregated file", metavar = "FILE")
parser.add_option("-r", "--reffile", dest = "reffile", default = "", type = "string",
help = "Reference data netcdf file", metavar = "FILE")
parser.add_option("-a", "--agglvl", dest = "agglvl", default = "gadm0", type = "string",
help = "Aggregation level (e.g., gadm0, fpu, kg)")
parser.add_option("-o", "--outdir", dest = "outdir", default = "", type = "string",
help = "Output directory to save results")
parser.add_option("-p", "--params", dest = "params", default = "", type = "string",
help = "YAML params file")
options, args = parser.parse_args()
infile = options.infile
reffile = options.reffile
agglvl = options.agglvl
outdir = options.outdir
params = ruamel.yaml.load(open(options.params, 'r'), ruamel.yaml.RoundTripLoader)
dt = params['dt']
mp = params['mp']
cr = params['cr']
ndt, nmp, ncr = len(dt), len(mp), len(cr)
crop = split(infile)[1].split('_')[3] # pull crop name from file name
with nc(reffile) as fref: # pull reference data
aref = fref.variables[agglvl][:]
aggunits = fref.variables[agglvl].units
agglongname = fref.variables[agglvl].long_name
tref = fref.variables['time'][:]
tref_units = fref.variables['time'].units
dtidx = fref.variables['dt'].long_name.split(', ').index('none')
mpidx = fref.variables['mp'].long_name.split(', ').index('true')
var = 'yield_' + crop
if var in fref.variables:
yield_ref = fref.variables[var][:, :, dtidx, mpidx]
else:
print 'Crop %s unavailable in reference file %s. Exiting . . .' % (crop, reffile)
sys.exit()
with nc(infile) as fin: # pull input data
ain = fin.variables[agglvl][:]
tin = fin.variables['time'][:]
tin_units = fin.variables['time'].units
scen = fin.variables['scen'].long_name.split(', ')
sum_idx = fin.variables['irr'].long_name.split(', ').index('sum')
var = 'yield_' + agglvl
if var in fin.variables:
yield_in = fin.variables[var][:, :, :, sum_idx]
else:
print 'Yield variable not found in file %s. Exiting . . .' % infile
sys.exit()
tref += int(findall(r'\d+', tref_units)[0]) # get reference time
tin += int(findall(r'\d+', tin_units)[0]) - 1 # get simulation time
aggs = intersect1d(ain, aref) # find common gadm indices
naggs, ntime, nscen = len(aggs), len(tin), len(scen)
if not naggs: raise Exception('No common aggregates')
yield_sim_common = masked_array(zeros((naggs, len(tin), nscen)), mask = ones((naggs, len(tin), nscen)))
yield_ref_common = masked_array(zeros((naggs, len(tref))), mask = ones((naggs, len(tref))))
for i in range(naggs):
yield_sim_common[i] = yield_in[list(ain).index(aggs[i])]
yield_ref_common[i] = yield_ref[list(aref).index(aggs[i])]
sh = (naggs, ntime, nscen, ndt, nmp, ncr)
yield_detr = masked_array(zeros(sh), mask = ones(sh))
yield_retr = masked_array(zeros(sh), mask = ones(sh))
for g, s in product(range(naggs), range(nscen)):
yref, ysim = yield_ref_common[g], yield_sim_common[g, :, s]
if not yref.mask.all() and not ysim.mask.all():
for d, m, c in product(range(ndt), range(nmp), range(ncr)):
bc = BiasCorrecter(dt[d], mp[m], cr[c])
detr, retr = bc.correct(ysim, yref, tin, tref)
yield_detr[g, :, s, d, m, c] = detr
yield_retr[g, :, s, d, m, c] = retr
fn = outdir + sep + splitext(split(infile)[1])[0] + '.biascorr.nc4' # create file
fout = BiasCorrectFile(fn, aggs, agglvl, aggunits, agglongname, tin, scen, dt, mp, cr)
fout.append('yield_detrend', yield_detr, (agglvl, 'time', 'scen', 'dt', 'mp', 'cr'), 't ha-1 yr-1', 'average detrended yield') # append to file
fout.append('yield_retrend', yield_retr, (agglvl, 'time', 'scen', 'dt', 'mp', 'cr'), 't ha-1 yr-1', 'average retrended yield')
| RDCEP/ggcmi | bin/biascorr/biascorrect.py | Python | agpl-3.0 | 4,488 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
if os.getenv('SPELLCHECK'):
extensions += 'sphinxcontrib.spelling',
spelling_show_suggestions = True
spelling_lang = 'en_US'
source_suffix = '.rst'
master_doc = 'index'
project = 'rf-django-envconfig'
year = '2016'
author = 'René Fleschenberg'
copyright = '{0}, {1}'.format(year, author)
version = release = '0.1.0'
pygments_style = 'trac'
templates_path = ['.']
extlinks = {
'issue': ('https://github.com/rfleschenberg/rf-django-envconfig/issues/%s', '#'),
'pr': ('https://github.com/rfleschenberg/rf-django-envconfig/pull/%s', 'PR #'),
}
import sphinx_py3doc_enhanced_theme
html_theme = "sphinx_py3doc_enhanced_theme"
html_theme_path = [sphinx_py3doc_enhanced_theme.get_html_theme_path()]
html_theme_options = {
'githuburl': 'https://github.com/rfleschenberg/rf-django-envconfig/'
}
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = False
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = '%s-%s' % (project, version)
napoleon_use_ivar = True
napoleon_use_rtype = False
napoleon_use_param = False
| rfleschenberg/rf-django-envconfig | docs/conf.py | Python | bsd-2-clause | 1,450 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields, osv, orm
from openerp.exceptions import UserError
from openerp.tools.translate import _
class make_procurement(osv.osv_memory):
_name = 'make.procurement'
_description = 'Make Procurements'
def onchange_product_id(self, cr, uid, ids, prod_id):
""" On Change of Product ID getting the value of related UoM.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param prod_id: Changed ID of Product
@return: A dictionary which gives the UoM of the changed Product
"""
product = self.pool.get('product.product').browse(cr, uid, prod_id)
return {'value': {'uom_id': product.uom_id.id}}
_columns = {
'qty': fields.float('Quantity', digits=(16,2), required=True),
'product_id': fields.many2one('product.product', 'Product', required=True, readonly=1),
'uom_id': fields.many2one('product.uom', 'Unit of Measure', required=True),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', required=True),
'date_planned': fields.date('Planned Date', required=True),
'route_ids': fields.many2many('stock.location.route', string='Preferred Routes'),
}
_defaults = {
'date_planned': fields.date.context_today,
'qty': lambda *args: 1.0,
}
def make_procurement(self, cr, uid, ids, context=None):
""" Creates procurement order for selected product.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return: A dictionary which loads Procurement form view.
"""
user = self.pool.get('res.users').browse(cr, uid, uid, context=context).login
wh_obj = self.pool.get('stock.warehouse')
procurement_obj = self.pool.get('procurement.order')
data_obj = self.pool.get('ir.model.data')
for proc in self.browse(cr, uid, ids, context=context):
wh = wh_obj.browse(cr, uid, proc.warehouse_id.id, context=context)
procure_id = procurement_obj.create(cr, uid, {
'name':'INT: '+str(user),
'date_planned': proc.date_planned,
'product_id': proc.product_id.id,
'product_qty': proc.qty,
'product_uom': proc.uom_id.id,
'warehouse_id': proc.warehouse_id.id,
'location_id': wh.lot_stock_id.id,
'company_id': wh.company_id.id,
'route_ids': [(6, 0, proc.route_ids.ids)],
})
procurement_obj.signal_workflow(cr, uid, [procure_id], 'button_confirm')
id2 = data_obj._get_id(cr, uid, 'procurement', 'procurement_tree_view')
id3 = data_obj._get_id(cr, uid, 'procurement', 'procurement_form_view')
if id2:
id2 = data_obj.browse(cr, uid, id2, context=context).res_id
if id3:
id3 = data_obj.browse(cr, uid, id3, context=context).res_id
return {
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'procurement.order',
'res_id' : procure_id,
'views': [(id3,'form'),(id2,'tree')],
'type': 'ir.actions.act_window',
}
def default_get(self, cr, uid, fields, context=None):
""" To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
if context is None:
context = {}
record_id = context.get('active_id')
if context.get('active_model') == 'product.template':
product_ids = self.pool.get('product.product').search(cr, uid, [('product_tmpl_id', '=', context.get('active_id'))], context=context)
if len(product_ids) == 1:
record_id = product_ids[0]
else:
raise UserError(_('Please use the Product Variant view to request a procurement.'))
res = super(make_procurement, self).default_get(cr, uid, fields, context=context)
if record_id and 'product_id' in fields:
proxy = self.pool.get('product.product')
product_ids = proxy.search(cr, uid, [('id', '=', record_id)], context=context, limit=1)
if product_ids:
product_id = product_ids[0]
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
res['product_id'] = product.id
res['uom_id'] = product.uom_id.id
if 'warehouse_id' in fields:
warehouse_id = self.pool.get('stock.warehouse').search(cr, uid, [], context=context)
res['warehouse_id'] = warehouse_id[0] if warehouse_id else False
return res
| tvtsoft/odoo8 | addons/stock/wizard/make_procurement_product.py | Python | agpl-3.0 | 5,307 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient.v2_0 import client as keystone_client
from novaclient.v1_1 import client as nova_client
from horizon import api
from horizon import test
TEST_CONSOLE_KIND = 'vnc'
TEST_EMAIL = '[email protected]'
TEST_HOSTNAME = 'hostname'
TEST_INSTANCE_ID = '2'
TEST_PASSWORD = '12345'
TEST_PORT = 8000
TEST_RETURN = 'retValue'
TEST_TENANT_DESCRIPTION = 'tenantDescription'
TEST_TENANT_ID = '1234'
TEST_TENANT_NAME = 'foo'
TEST_TOKEN = 'aToken'
TEST_TOKEN_ID = 'userId'
TEST_URL = 'http://%s:%s/something/v1.0' % (TEST_HOSTNAME, TEST_PORT)
TEST_USERNAME = 'testUser'
class APIResource(api.APIResourceWrapper):
""" Simple APIResource for testing """
_attrs = ['foo', 'bar', 'baz']
@staticmethod
def get_instance(innerObject=None):
if innerObject is None:
class InnerAPIResource(object):
pass
innerObject = InnerAPIResource()
innerObject.foo = 'foo'
innerObject.bar = 'bar'
return APIResource(innerObject)
class APIDict(api.APIDictWrapper):
""" Simple APIDict for testing """
_attrs = ['foo', 'bar', 'baz']
@staticmethod
def get_instance(innerDict=None):
if innerDict is None:
innerDict = {'foo': 'foo',
'bar': 'bar'}
return APIDict(innerDict)
class APITestCase(test.TestCase):
def setUp(self):
def fake_keystoneclient(request, username=None, password=None,
tenant_id=None, token_id=None, endpoint=None):
return self.stub_keystoneclient()
super(APITestCase, self).setUp()
self._original_keystoneclient = api.keystone.keystoneclient
self._original_novaclient = api.nova.novaclient
api.keystone.keystoneclient = fake_keystoneclient
api.nova.novaclient = lambda request: self.stub_novaclient()
def stub_novaclient(self):
if not hasattr(self, "novaclient"):
self.mox.StubOutWithMock(nova_client, 'Client')
self.novaclient = self.mox.CreateMock(nova_client.Client)
return self.novaclient
def stub_keystoneclient(self):
if not hasattr(self, "keystoneclient"):
self.mox.StubOutWithMock(keystone_client, 'Client')
self.keystoneclient = self.mox.CreateMock(keystone_client.Client)
return self.keystoneclient
def tearDown(self):
super(APITestCase, self).tearDown()
api.nova.novaclient = self._original_novaclient
api.keystone.keystoneclient = self._original_keystoneclient
| usc-isi/horizon-old | horizon/horizon/tests/api_tests/utils.py | Python | apache-2.0 | 3,363 |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Minception model with learned scale changes and balanced paths.
This model is like Minception, except:
1) Downscales and upscales are always learned, using striding convolutions
or deconvolutions.
2) Layer depths are set so that all Minception model_util.modules require
approximately the same number of floating point operations to compute (so as
layer rows and columns decrease, the depth increases).
3) All paths through the network, from input to output, have the same length.
It appears to be at least as accurate as the original Minception model, and
at the time of writing is still converging.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
# pylint: disable=g-bad-import-order
from isl import tensorcheck
from isl import util
from isl.models import model_util
logging = tf.logging
# The standard convolution sizes for in-scale Minception model_util.modules.
IN_SCALE_EXPANSION_SIZE = 3
IN_SCALE_REDUCTION_SIZE = 1
@tensorcheck.well_defined()
def core(base_depth: int, is_train: bool, input_op: tf.Tensor,
name: str = None) -> tf.Tensor:
"""A Minception model with learned scale changes and balanced paths.
Args:
base_depth: The depth of a 1x1 layer.
Used as a multiplier when computing layer depths from size.
is_train: Whether we're training.
input_op: The input.
name: Optional op name.
Returns:
The output of the core model as an embedding layer.
Network heads should take this layer as input.
"""
with tf.name_scope(name, 'fovea_core', [input_op]) as scope:
# Ensure the input data is in the range [0.0, 1.0].
input_op = tensorcheck.bounds_unlabeled(0.0, 1.0, input_op)
# These are the layer sizes (size == width == height) for the lower part
# of the network (the part that comes before spatial merging).
# Each row is a single tower, and each tower operates at a different
# spatial scale.
# Scale i corresponds to a rescaling factor of 2 ^ i; so 0 is the original
# scale, 1 is 2x downscaled, 2 is 4x, etc.
# There are 5 scales total, and the rescaling operations always down- or
# upscale by a factor of 2.
# So, to get to scale 4 takes 4 downscalings.
lls = [
# This tower lives entirely in scale 0, the native scale.
[36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16],
# This tower switches to scale 1 in the first step, and back to scale 0
# in the last step.
[50, 24, 22, 20, 18, 16, 14, 12, 10, 8, 16],
# This tower switches to scale 2 in the first two steps, and back to
# scale 0 in the last two steps.
[70, 34, 16, 14, 12, 10, 8, 6, 4, 8, 16],
# This tower switches to scale 3 in the first three steps, and back
# to scale 0 in the last three steps.
[94, 46, 22, 10, 8, 6, 4, 2, 4, 8, 16],
# This tower switches to scale 4 in the first four steps, and back
# to scale 0 in the last four steps.
[110, 54, 26, 12, 5, 3, 1, 2, 4, 8, 16],
]
# These are the layer sizes for the upper part of the network.
# This part is simply several in-scale minception
# model_util.modules stacked on top of each other.
uls = [16, 14, 12, 10, 8]
[_, num_rows, num_columns, _] = input_op.shape_as_list()
assert num_rows == lls[4][0]
assert num_rows == num_columns
# The number of in-scale Minception model_util.modules for each scale.
num_lower_recursive_stacks = lambda s: 10 - 2 * s
num_upper_recursive_stacks = len(uls) - 1
num_scales = len(lls)
def foveate(final_size: int, op: tf.Tensor, name: str) -> tf.Tensor:
return model_util.learned_fovea(is_train, base_depth, False, final_size,
op, name)
scale_ops = []
for s in range(num_scales):
if s == 0:
scale_op = util.crop_center_unlabeled(lls[0][0], input_op)
elif s == 1:
scale_op = util.crop_center_unlabeled(lls[1][0], input_op)
scale_op = foveate(lls[1][1], scale_op, 'downscale_1_0')
elif s == 2:
scale_op = util.crop_center_unlabeled(lls[2][0], input_op)
scale_op = foveate(lls[2][1], scale_op, 'downscale_2_0')
scale_op = foveate(lls[2][2], scale_op, 'downscale_2_1')
elif s == 3:
scale_op = util.crop_center_unlabeled(lls[3][0], input_op)
scale_op = foveate(lls[3][1], scale_op, 'downscale_3_0')
scale_op = foveate(lls[3][2], scale_op, 'downscale_3_1')
scale_op = foveate(lls[3][3], scale_op, 'downscale_3_2')
elif s == 4:
# There's no need to crop.
scale_op = foveate(lls[4][1], input_op, 'downscale_4_0')
scale_op = foveate(lls[4][2], scale_op, 'downscale_4_1')
scale_op = foveate(lls[4][3], scale_op, 'downscale_4_2')
scale_op = foveate(lls[4][4], scale_op, 'downscale_4_3')
else:
raise AssertionError
logging.info('scale %d tower input shape: %s', s,
str(scale_op.shape_as_list()))
scale_ops.append(scale_op)
multiscale_tower_ops = []
for s in range(num_scales):
recursive_op = scale_ops[s]
for r in range(num_lower_recursive_stacks(s)):
final_size = recursive_op.shape_as_list()[1] - 2
recursive_op = model_util.module(
IN_SCALE_EXPANSION_SIZE,
IN_SCALE_REDUCTION_SIZE,
model_util.size_to_depth(base_depth, final_size, True),
model_util.size_to_depth(base_depth, final_size, False),
is_deconv=False,
add_bias=False,
min_depth_from_residual=True,
is_train=is_train,
input_op=recursive_op,
name='lower_scale_%d_recursion_%d' % (s, r))
num_recursive_rows = recursive_op.shape_as_list()[1]
if s == 0:
assert num_recursive_rows == lls[0][-1], num_recursive_rows
elif s == 1:
assert num_recursive_rows == lls[1][-2], num_recursive_rows
elif s == 2:
assert num_recursive_rows == lls[2][-3], num_recursive_rows
elif s == 3:
assert num_recursive_rows == lls[3][-4], num_recursive_rows
elif s == 4:
assert num_recursive_rows == lls[4][-5], num_recursive_rows
else:
raise AssertionError
multiscale_tower_ops.append(recursive_op)
def defoveate(final_size: int, op: tf.Tensor, name: str) -> tf.Tensor:
return model_util.learned_defovea(is_train, base_depth, False, False,
final_size, op, name)
deconv_ops = []
for s in range(num_scales):
recursive_op = multiscale_tower_ops[s]
if s == 0:
deconv_op = recursive_op
elif s == 1:
deconv_op = defoveate(lls[1][-1], recursive_op, 'upscale_1_0')
elif s == 2:
deconv_op = defoveate(lls[2][-2], recursive_op, 'upscale_2_0')
deconv_op = defoveate(lls[2][-1], deconv_op, 'upscale_2_1')
elif s == 3:
deconv_op = defoveate(lls[3][-3], recursive_op, 'upscale_3_0')
deconv_op = defoveate(lls[3][-2], deconv_op, 'upscale_3_1')
deconv_op = defoveate(lls[3][-1], deconv_op, 'upscale_3_2')
elif s == 4:
deconv_op = defoveate(lls[4][-4], recursive_op, 'upscale_4_0')
deconv_op = defoveate(lls[4][-3], deconv_op, 'upscale_4_1')
deconv_op = defoveate(lls[4][-2], deconv_op, 'upscale_4_2')
deconv_op = defoveate(lls[4][-1], deconv_op, 'upscale_4_3')
else:
raise AssertionError
deconv_ops.append(deconv_op)
recursive_op = tf.concat(deconv_ops, 3)
assert recursive_op.shape_as_list()[1] == uls[0]
for r in range(num_upper_recursive_stacks):
final_size = recursive_op.shape_as_list()[1] - 2
recursive_op = model_util.module(
IN_SCALE_EXPANSION_SIZE,
IN_SCALE_REDUCTION_SIZE,
model_util.size_to_depth(base_depth, final_size, True),
model_util.size_to_depth(base_depth, final_size, False),
is_deconv=False,
add_bias=False,
min_depth_from_residual=True,
is_train=is_train,
input_op=recursive_op,
name='upper_recursion_%d' % (r))
assert recursive_op.shape_as_list()[1] == uls[-1]
return tf.identity(recursive_op, name=scope)
| google/in-silico-labeling | isl/models/fovea.py | Python | apache-2.0 | 8,969 |
import sys
from typing import List, Optional
from data import warehouse
from puzzle.heuristics import analyze_number
from puzzle.problems import problem
_OFFSETS = None
class NumberProblem(problem.Problem):
def __init__(
self, name: str, lines: List[str], allow_offsets: bool=True, **kwargs
) -> None:
super(NumberProblem, self).__init__(name, lines, **kwargs)
self._digits = _parse(lines)
self._allow_offsets = allow_offsets
@staticmethod
def score(lines: List[str]) -> float:
if not lines:
return 0
parsed = _parse(lines)
if not parsed:
return 0
if any(isinstance(digit, float) for digit in parsed):
return .5 # Unsure how to score a float.
if len(parsed) > 5:
return 1 # Enough digits to be interesting.
# Weakest score returned for 0 ('epsilon').
max_information = max(parsed) ** len(parsed)
return max(sys.float_info.epsilon, min(max_information / 0xAAAA, 1))
def _solve(self) -> dict:
# TODO: Much optimization needed here.
result = {}
required_weight = self._solution_constraints.weight_threshold
if self._allow_offsets and len(self._digits) == 1:
offsets = _get_offsets()
else:
offsets = [0]
for i, offset in enumerate(offsets):
scale_factor = 1 - i / len(offsets)
digits = self._digits[:]
if offset:
digits[0] += offset
for (
solution, weight), notes in analyze_number.digit_solutions_with_notes(
digits):
scaled_weight = weight * scale_factor
if scaled_weight < required_weight:
continue
if offset:
solution_str = '%s +%s' % (solution, offset)
else:
solution_str = solution
result[solution_str] = scaled_weight
if offset:
self._notes[solution_str].append('offset +%s' % offset)
self._notes[solution_str] += notes
return result
def _parse(lines: List[str]) -> Optional[List[int]]:
"""Converts a space-separated list of digits into 1 number."""
if not lines:
return None
segments = ' '.join(lines).split() # Merge into one line.
if not segments:
return None
digits = []
bases = []
segment_lengths = []
max_digit = 0
binary_heuristic = True
for segment in segments:
if not all(c in '01' for c in segment):
binary_heuristic = False
break
for segment in segments:
segment_length = len(segment)
try:
if binary_heuristic:
base = 2
elif segment.startswith('0x'):
base = 16
segment_length -= 2
elif segment.startswith('0'):
base = 8
segment_length -= 1
else:
base = 0 # Autodetect.
segment_lengths.append(segment_length)
bases.append(base)
parsed = int(segment, base)
digits.append(parsed)
if parsed > max_digit:
max_digit = parsed + 1
except:
# TODO: Support float, base64.
return None
if not digits:
return None
if len(digits) == 1:
return digits
if len(digits) >= 30: # Chosen arbitrarily.
return None
return digits
def _get_offsets() -> List[int]:
global _OFFSETS
if _OFFSETS is None:
_OFFSETS = [0] + [
ord(c) - ord('a') + 1 for c in warehouse.get('/letter/frequency')
]
return _OFFSETS
| PhilHarnish/forge | src/puzzle/problems/number_problem.py | Python | mit | 3,296 |
#!/usr/bin/python
#
# Copyright (C) 2006, 2007, 2010, 2011, 2012 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Script for testing ganeti.utils.x509"""
import os
import tempfile
import unittest
import shutil
import time
import OpenSSL
import distutils.version
import string
from ganeti import constants
from ganeti import utils
from ganeti import compat
from ganeti import errors
import testutils
class TestParseAsn1Generalizedtime(unittest.TestCase):
def setUp(self):
self._Parse = utils.x509._ParseAsn1Generalizedtime
def test(self):
# UTC
self.assertEqual(self._Parse("19700101000000Z"), 0)
self.assertEqual(self._Parse("20100222174152Z"), 1266860512)
self.assertEqual(self._Parse("20380119031407Z"), (2**31) - 1)
# With offset
self.assertEqual(self._Parse("20100222174152+0000"), 1266860512)
self.assertEqual(self._Parse("20100223131652+0000"), 1266931012)
self.assertEqual(self._Parse("20100223051808-0800"), 1266931088)
self.assertEqual(self._Parse("20100224002135+1100"), 1266931295)
self.assertEqual(self._Parse("19700101000000-0100"), 3600)
# Leap seconds are not supported by datetime.datetime
self.assertRaises(ValueError, self._Parse, "19841231235960+0000")
self.assertRaises(ValueError, self._Parse, "19920630235960+0000")
# Errors
self.assertRaises(ValueError, self._Parse, "")
self.assertRaises(ValueError, self._Parse, "invalid")
self.assertRaises(ValueError, self._Parse, "20100222174152")
self.assertRaises(ValueError, self._Parse, "Mon Feb 22 17:47:02 UTC 2010")
self.assertRaises(ValueError, self._Parse, "2010-02-22 17:42:02")
class TestGetX509CertValidity(testutils.GanetiTestCase):
def setUp(self):
testutils.GanetiTestCase.setUp(self)
pyopenssl_version = distutils.version.LooseVersion(OpenSSL.__version__)
# Test whether we have pyOpenSSL 0.7 or above
self.pyopenssl0_7 = (pyopenssl_version >= "0.7")
if not self.pyopenssl0_7:
warnings.warn("This test requires pyOpenSSL 0.7 or above to"
" function correctly")
def _LoadCert(self, name):
return OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
testutils.ReadTestData(name))
def test(self):
validity = utils.GetX509CertValidity(self._LoadCert("cert1.pem"))
if self.pyopenssl0_7:
self.assertEqual(validity, (1266919967, 1267524767))
else:
self.assertEqual(validity, (None, None))
class TestSignX509Certificate(unittest.TestCase):
KEY = "My private key!"
KEY_OTHER = "Another key"
def test(self):
# Generate certificate valid for 5 minutes
(_, cert_pem) = utils.GenerateSelfSignedX509Cert(None, 300)
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
cert_pem)
# No signature at all
self.assertRaises(errors.GenericError,
utils.LoadSignedX509Certificate, cert_pem, self.KEY)
# Invalid input
self.assertRaises(errors.GenericError, utils.LoadSignedX509Certificate,
"", self.KEY)
self.assertRaises(errors.GenericError, utils.LoadSignedX509Certificate,
"X-Ganeti-Signature: \n", self.KEY)
self.assertRaises(errors.GenericError, utils.LoadSignedX509Certificate,
"X-Ganeti-Sign: $1234$abcdef\n", self.KEY)
self.assertRaises(errors.GenericError, utils.LoadSignedX509Certificate,
"X-Ganeti-Signature: $1234567890$abcdef\n", self.KEY)
self.assertRaises(errors.GenericError, utils.LoadSignedX509Certificate,
"X-Ganeti-Signature: $1234$abc\n\n" + cert_pem, self.KEY)
# Invalid salt
for salt in list("-_@$,:;/\\ \t\n"):
self.assertRaises(errors.GenericError, utils.SignX509Certificate,
cert_pem, self.KEY, "foo%sbar" % salt)
for salt in ["HelloWorld", "salt", string.letters, string.digits,
utils.GenerateSecret(numbytes=4),
utils.GenerateSecret(numbytes=16),
"{123:456}".encode("hex")]:
signed_pem = utils.SignX509Certificate(cert, self.KEY, salt)
self._Check(cert, salt, signed_pem)
self._Check(cert, salt, "X-Another-Header: with a value\n" + signed_pem)
self._Check(cert, salt, (10 * "Hello World!\n") + signed_pem)
self._Check(cert, salt, (signed_pem + "\n\na few more\n"
"lines----\n------ at\nthe end!"))
def _Check(self, cert, salt, pem):
(cert2, salt2) = utils.LoadSignedX509Certificate(pem, self.KEY)
self.assertEqual(salt, salt2)
self.assertEqual(cert.digest("sha1"), cert2.digest("sha1"))
# Other key
self.assertRaises(errors.GenericError, utils.LoadSignedX509Certificate,
pem, self.KEY_OTHER)
class TestCertVerification(testutils.GanetiTestCase):
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testVerifyCertificate(self):
cert_pem = testutils.ReadTestData("cert1.pem")
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
cert_pem)
# Not checking return value as this certificate is expired
utils.VerifyX509Certificate(cert, 30, 7)
@staticmethod
def _GenCert(key, before, validity):
# Urgh... mostly copied from x509.py :(
# Create self-signed certificate
cert = OpenSSL.crypto.X509()
cert.set_serial_number(1)
if before != 0:
cert.gmtime_adj_notBefore(int(before))
cert.gmtime_adj_notAfter(validity)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(key)
cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
return cert
def testClockSkew(self):
SKEW = constants.NODE_MAX_CLOCK_SKEW
# Create private and public key
key = OpenSSL.crypto.PKey()
key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
validity = 7 * 86400
# skew small enough, accepting cert; note that this is a timed
# test, and could fail if the machine is so loaded that the next
# few lines take more than NODE_MAX_CLOCK_SKEW / 2
for before in [-1, 0, SKEW / 4, SKEW / 2]:
cert = self._GenCert(key, before, validity)
result = utils.VerifyX509Certificate(cert, 1, 2)
self.assertEqual(result, (None, None))
# skew too great, not accepting certs
for before in [SKEW * 2, SKEW * 10]:
cert = self._GenCert(key, before, validity)
(status, msg) = utils.VerifyX509Certificate(cert, 1, 2)
self.assertEqual(status, utils.CERT_WARNING)
self.assertTrue(msg.startswith("Certificate not yet valid"))
class TestVerifyCertificateInner(unittest.TestCase):
def test(self):
vci = utils.x509._VerifyCertificateInner
# Valid
self.assertEqual(vci(False, 1263916313, 1298476313, 1266940313, 30, 7),
(None, None))
# Not yet valid
(errcode, msg) = vci(False, 1266507600, 1267544400, 1266075600, 30, 7)
self.assertEqual(errcode, utils.CERT_WARNING)
# Expiring soon
(errcode, msg) = vci(False, 1266507600, 1267544400, 1266939600, 30, 7)
self.assertEqual(errcode, utils.CERT_ERROR)
(errcode, msg) = vci(False, 1266507600, 1267544400, 1266939600, 30, 1)
self.assertEqual(errcode, utils.CERT_WARNING)
(errcode, msg) = vci(False, 1266507600, None, 1266939600, 30, 7)
self.assertEqual(errcode, None)
# Expired
(errcode, msg) = vci(True, 1266507600, 1267544400, 1266939600, 30, 7)
self.assertEqual(errcode, utils.CERT_ERROR)
(errcode, msg) = vci(True, None, 1267544400, 1266939600, 30, 7)
self.assertEqual(errcode, utils.CERT_ERROR)
(errcode, msg) = vci(True, 1266507600, None, 1266939600, 30, 7)
self.assertEqual(errcode, utils.CERT_ERROR)
(errcode, msg) = vci(True, None, None, 1266939600, 30, 7)
self.assertEqual(errcode, utils.CERT_ERROR)
class TestGenerateSelfSignedX509Cert(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def _checkRsaPrivateKey(self, key):
lines = key.splitlines()
return (("-----BEGIN RSA PRIVATE KEY-----" in lines and
"-----END RSA PRIVATE KEY-----" in lines) or
("-----BEGIN PRIVATE KEY-----" in lines and
"-----END PRIVATE KEY-----" in lines))
def _checkCertificate(self, cert):
lines = cert.splitlines()
return ("-----BEGIN CERTIFICATE-----" in lines and
"-----END CERTIFICATE-----" in lines)
def test(self):
for common_name in [None, ".", "Ganeti", "node1.example.com"]:
(key_pem, cert_pem) = utils.GenerateSelfSignedX509Cert(common_name, 300)
self._checkRsaPrivateKey(key_pem)
self._checkCertificate(cert_pem)
key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM,
key_pem)
self.assert_(key.bits() >= 1024)
self.assertEqual(key.bits(), constants.RSA_KEY_BITS)
self.assertEqual(key.type(), OpenSSL.crypto.TYPE_RSA)
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
cert_pem)
self.failIf(x509.has_expired())
self.assertEqual(x509.get_issuer().CN, common_name)
self.assertEqual(x509.get_subject().CN, common_name)
self.assertEqual(x509.get_pubkey().bits(), constants.RSA_KEY_BITS)
def testLegacy(self):
cert1_filename = os.path.join(self.tmpdir, "cert1.pem")
utils.GenerateSelfSignedSslCert(cert1_filename, validity=1)
cert1 = utils.ReadFile(cert1_filename)
self.assert_(self._checkRsaPrivateKey(cert1))
self.assert_(self._checkCertificate(cert1))
class TestCheckNodeCertificate(testutils.GanetiTestCase):
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
testutils.GanetiTestCase.tearDown(self)
shutil.rmtree(self.tmpdir)
def testMismatchingKey(self):
other_cert = testutils.TestDataFilename("cert1.pem")
node_cert = testutils.TestDataFilename("cert2.pem")
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
utils.ReadFile(other_cert))
try:
utils.CheckNodeCertificate(cert, _noded_cert_file=node_cert)
except errors.GenericError, err:
self.assertEqual(str(err),
"Given cluster certificate does not match local key")
else:
self.fail("Exception was not raised")
def testMatchingKey(self):
cert_filename = testutils.TestDataFilename("cert2.pem")
# Extract certificate
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
utils.ReadFile(cert_filename))
cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
cert)
utils.CheckNodeCertificate(cert, _noded_cert_file=cert_filename)
def testMissingFile(self):
cert_path = testutils.TestDataFilename("cert1.pem")
nodecert = utils.PathJoin(self.tmpdir, "does-not-exist")
utils.CheckNodeCertificate(NotImplemented, _noded_cert_file=nodecert)
self.assertFalse(os.path.exists(nodecert))
def testInvalidCertificate(self):
tmpfile = utils.PathJoin(self.tmpdir, "cert")
utils.WriteFile(tmpfile, data="not a certificate")
self.assertRaises(errors.X509CertError, utils.CheckNodeCertificate,
NotImplemented, _noded_cert_file=tmpfile)
def testNoPrivateKey(self):
cert = testutils.TestDataFilename("cert1.pem")
self.assertRaises(errors.X509CertError, utils.CheckNodeCertificate,
NotImplemented, _noded_cert_file=cert)
def testMismatchInNodeCert(self):
cert1_path = testutils.TestDataFilename("cert1.pem")
cert2_path = testutils.TestDataFilename("cert2.pem")
tmpfile = utils.PathJoin(self.tmpdir, "cert")
# Extract certificate
cert1 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
utils.ReadFile(cert1_path))
cert1_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
cert1)
# Extract mismatching key
key2 = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM,
utils.ReadFile(cert2_path))
key2_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM,
key2)
# Write to file
utils.WriteFile(tmpfile, data=cert1_pem + key2_pem)
try:
utils.CheckNodeCertificate(cert1, _noded_cert_file=tmpfile)
except errors.X509CertError, err:
self.assertEqual(err.args,
(tmpfile, "Certificate does not match with private key"))
else:
self.fail("Exception was not raised")
if __name__ == "__main__":
testutils.GanetiTestProgram()
| damoxc/ganeti | test/py/ganeti.utils.x509_unittest.py | Python | gpl-2.0 | 13,768 |
# -*- coding: utf-8 -*-
from fuel.datasets import H5PYDataset
from fuel.transformers.defaults import uint8_pixels_to_floatX
from fuel.utils import find_in_data_path
class MNIST(H5PYDataset):
u"""MNIST dataset.
MNIST (Mixed National Institute of Standards and Technology) [LBBH] is
a database of handwritten digits. It is one of the most famous
datasets in machine learning and consists of 60,000 training images
and 10,000 testing images. The images are grayscale and 28 x 28 pixels
large. It is accessible through Yann LeCun's website [LECUN].
.. [LECUN] http://yann.lecun.com/exdb/mnist/
Parameters
----------
which_sets : tuple of str
Which split to load. Valid values are 'train' and 'test',
corresponding to the training set (50,000 examples) and the test
set (10,000 examples).
"""
filename = 'mnist.hdf5'
default_transformers = uint8_pixels_to_floatX(('features',))
def __init__(self, which_sets, **kwargs):
kwargs.setdefault('load_in_memory', True)
super(MNIST, self).__init__(
file_or_path=find_in_data_path(self.filename),
which_sets=which_sets, **kwargs)
| glewis17/fuel | fuel/datasets/mnist.py | Python | mit | 1,193 |
# -*- coding: utf-8 -*-
#Copyright (c) 2007-8, Playful Invention Company.
#Copyright (c) 2008-10 Walter Bender
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
"""
sprites.py is a simple sprites library for managing graphics objects,
'sprites', on a canvas. It manages multiple sprites with methods such
as move, hide, set_layer, etc.
There are two classes:
class Sprites maintains a collection of sprites.
class Sprite manages individual sprites within the collection.
Example usage:
# Import the classes into your program.
from sprites import Sprites Sprite
# In your expose callback event handler, call refresh
def _expose_cb(self, win, event):
self.sprite_list.refresh(event)
return True
# Create a new sprite collection for a gtk Drawing Area.
my_drawing_area = gtk.DrawingArea()
self.sprite_list = Sprites(my_drawing_area)
# Create a "pixbuf" (in this example, from SVG).
my_pixbuf = svg_str_to_pixbuf("<svg>...some svg code...</svg>")
# Create a sprite at position x1, y1.
my_sprite = sprites.Sprite(self.sprite_list, x1, y1, my_pixbuf)
# Move the sprite to a new position.
my_sprite.move((x1+dx, y1+dy))
# Create another "pixbuf".
your_pixbuf = svg_str_to_pixbuf("<svg>...some svg code...</svg>")
# Create a sprite at position x2, y2.
your_sprite = sprites.Sprite(self.sprite_list, x2, y2, my_pixbuf)
# Assign the sprites to layers.
# In this example, your_sprite will be on top of my_sprite.
my_sprite.set_layer(100)
your_sprite.set_layer(200)
# Now put my_sprite on top of your_sprite.
my_sprite.set_layer(300)
# method for converting SVG to a gtk pixbuf
def svg_str_to_pixbuf(svg_string):
pl = gtk.gdk.PixbufLoader('svg')
pl.write(svg_string)
pl.close()
pixbuf = pl.get_pixbuf()
return pixbuf
"""
import pygtk
pygtk.require('2.0')
import gtk
import pango
class Sprites:
""" A class for the list of sprites and everything they share in common """
def __init__(self, canvas, area=None, gc=None):
""" Initialize an empty array of sprites """
self.canvas = canvas
if area == None:
self.area = self.canvas.window
self.gc = self.area.new_gc()
else:
self.area = area
self.gc = gc
self.cm = self.gc.get_colormap()
self.list = []
def get_sprite(self, i):
""" Return a sprint from the array """
if i < 0 or i > len(self.list)-1:
return(None)
else:
return(self.list[i])
def length_of_list(self):
""" How many sprites are there? """
return(len(self.list))
def append_to_list(self, spr):
""" Append a new sprite to the end of the list. """
self.list.append(spr)
def insert_in_list(self, spr, i):
""" Insert a sprite at position i. """
if i < 0:
self.list.insert(0, spr)
elif i > len(self.list) - 1:
self.list.append(spr)
else:
self.list.insert(i, spr)
def remove_from_list(self, spr):
""" Remove a sprite from the list. """
if spr in self.list:
self.list.remove(spr)
def find_sprite(self, pos, alpha=True):
""" Search based on (x, y) position. Return the 'top/first' one. """
list = self.list[:]
list.reverse()
for spr in list:
if spr.hit(pos):
if not alpha or spr.get_pixel(pos)[3] == 255:
return spr
return None
def refresh(self, event):
""" Handle expose event refresh """
self.redraw_sprites(event.area)
def redraw_sprites(self, area=None):
""" Redraw the sprites that intersect area. """
for spr in self.list:
if area == None:
spr.draw()
else:
intersection = spr.rect.intersect(area)
if intersection.width > 0 or intersection.height > 0:
spr.draw()
class Sprite:
""" A class for the individual sprites """
def __init__(self, sprites, x, y, image):
""" Initialize an individual sprite """
self._sprites = sprites
self.rect = gtk.gdk.Rectangle(int(x), int(y), 0, 0)
self._scale = [12]
self._rescale = [True]
self._horiz_align = ["center"]
self._vert_align = ["middle"]
self._fd = None
self._bold = False
self._italic = False
self._color = None
self._margins = [0, 0, 0, 0]
self.layer = 100
self.labels = []
self.images = []
self._dx = [] # image offsets
self._dy = []
self.set_image(image)
if self._sprites is not None:
self._sprites.append_to_list(self)
def set_image(self, image, i=0, dx=0, dy=0):
""" Add an image to the sprite. """
while len(self.images) < i + 1:
self.images.append(None)
self._dx.append(0)
self._dy.append(0)
self.images[i] = image
self._dx[i] = dx
self._dy[i] = dy
if isinstance(self.images[i], gtk.gdk.Pixbuf):
w = self.images[i].get_width()
h = self.images[i].get_height()
else:
w, h = self.images[i].get_size()
if i == 0: # Always reset width and height when base image changes.
self.rect.width = w + dx
self.rect.height = h + dy
else:
if w + dx > self.rect.width:
self.rect.width = w + dx
if h + dy > self.rect.height:
self.rect.height = h + dy
def move(self, pos, visible=True):
""" Move to new (x, y) position """
if visible:
self.inval()
self.rect.x, self.rect.y = int(pos[0]), int(pos[1])
if visible:
self.inval()
def move_relative(self, pos, visible=True):
""" Move to new (x+dx, y+dy) position """
if visible:
self.inval()
self.rect.x += int(pos[0])
self.rect.y += int(pos[1])
if visible:
self.inval()
def get_xy(self):
""" Return current (x, y) position """
return (self.rect.x, self.rect.y)
def get_dimensions(self):
""" Return current size """
return (self.rect.width, self.rect.height)
def get_layer(self):
""" Return current layer """
return self.layer
def set_shape(self, image, i=0):
""" Set the current image associated with the sprite """
self.inval()
self.set_image(image, i)
self.inval()
def set_layer(self, layer):
""" Set the layer for a sprite """
if self._sprites is None:
return
self._sprites.remove_from_list(self)
self.layer = layer
for i in range(self._sprites.length_of_list()):
if layer < self._sprites.get_sprite(i).layer:
self._sprites.insert_in_list(self, i)
self.inval()
return
self._sprites.append_to_list(self)
self.inval()
def set_label(self, new_label, i=0):
""" Set the label drawn on the sprite """
self._extend_labels_array(i)
if type(new_label) is str or type(new_label) is unicode:
# pango doesn't like nulls
self.labels[i] = new_label.replace("\0", " ")
else:
self.labels[i] = str(new_label)
self.inval()
def set_margins(self, l=0, t=0, r=0, b=0):
""" Set the margins for drawing the label """
self._margins = [l, t, r, b]
def _extend_labels_array(self, i):
""" Append to the labels attribute list """
if self._fd is None:
self.set_font('Sans')
if self._color is None:
self._color = self._sprites.cm.alloc_color('black')
while len(self.labels) < i + 1:
self.labels.append(" ")
self._scale.append(self._scale[0])
self._rescale.append(self._rescale[0])
self._horiz_align.append(self._horiz_align[0])
self._vert_align.append(self._vert_align[0])
def set_font(self, font):
""" Set the font for a label """
self._fd = pango.FontDescription(font)
def set_label_color(self, rgb):
""" Set the font color for a label """
self._color = self._sprites.cm.alloc_color(rgb)
def set_label_attributes(self, scale, rescale=True, horiz_align="center",
vert_align="middle", i=0):
""" Set the various label attributes """
self._extend_labels_array(i)
self._scale[i] = scale
self._rescale[i] = rescale
self._horiz_align[i] = horiz_align
self._vert_align[i] = vert_align
def hide(self):
""" Hide a sprite """
if self._sprites is None:
return
self.inval()
self._sprites.remove_from_list(self)
def inval(self):
""" Force a region redraw by gtk """
if self._sprites is None:
return
self._sprites.area.invalidate_rect(self.rect, False)
def draw(self):
""" Draw the sprite (and label) """
if self._sprites is None:
return
for i, img in enumerate(self.images):
if isinstance(img, gtk.gdk.Pixbuf):
self._sprites.area.draw_pixbuf(self._sprites.gc, img, 0, 0,
self.rect.x + self._dx[i],
self.rect.y + self._dy[i])
elif img is not None:
self._sprites.area.draw_drawable(self._sprites.gc, img, 0, 0,
self.rect.x + self._dx[i],
self.rect.y + self._dy[i],
-1, -1)
if len(self.labels) > 0:
self.draw_label()
def hit(self, pos):
""" Is (x, y) on top of the sprite? """
x, y = pos
if x < self.rect.x:
return False
if x > self.rect.x + self.rect.width:
return False
if y < self.rect.y:
return False
if y > self.rect.y + self.rect.height:
return False
return True
def draw_label(self):
""" Draw the label based on its attributes """
if self._sprites is None:
return
my_width = self.rect.width - self._margins[0] - self._margins[2]
if my_width < 0:
my_width = 0
my_height = self.rect.height - self._margins[1] - self._margins[3]
for i in range(len(self.labels)):
pl = self._sprites.canvas.create_pango_layout(str(self.labels[i]))
self._fd.set_size(int(self._scale[i] * pango.SCALE))
pl.set_font_description(self._fd)
w = pl.get_size()[0] / pango.SCALE
if w > my_width:
if self._rescale[i]:
self._fd.set_size(
int(self._scale[i] * pango.SCALE * my_width / w))
pl.set_font_description(self._fd)
w = pl.get_size()[0] / pango.SCALE
else:
j = len(self.labels[i]) - 1
while(w > my_width and j > 0):
pl = self._sprites.canvas.create_pango_layout(
"…" + self.labels[i][len(self.labels[i]) - j:])
self._fd.set_size(int(self._scale[i] * pango.SCALE))
pl.set_font_description(self._fd)
w = pl.get_size()[0] / pango.SCALE
j -= 1
if self._horiz_align[i] == "center":
x = int(self.rect.x + self._margins[0] + (my_width - w) / 2)
elif self._horiz_align[i] == 'left':
x = int(self.rect.x + self._margins[0])
else: # right
x = int(self.rect.x + self.rect.width - w - self._margins[2])
h = pl.get_size()[1] / pango.SCALE
if self._vert_align[i] == "middle":
y = int(self.rect.y + self._margins[1] + (my_height - h) / 2)
elif self._vert_align[i] == "top":
y = int(self.rect.y + self._margins[1])
else: # bottom
y = int(self.rect.y + self.rect.height - h - self._margins[3])
self._sprites.gc.set_foreground(self._color)
self._sprites.area.draw_layout(self._sprites.gc, x, y, pl)
def label_width(self):
""" Calculate the width of a label """
max = 0
for i in range(len(self.labels)):
pl = self._sprites.canvas.create_pango_layout(self.labels[i])
self._fd.set_size(int(self._scale[i] * pango.SCALE))
pl.set_font_description(self._fd)
w = pl.get_size()[0] / pango.SCALE
if w > max:
max = w
return max
def label_safe_width(self):
""" Return maximum width for a label """
return self.rect.width - self._margins[0] - self._margins[2]
def label_safe_height(self):
""" Return maximum height for a label """
return self.rect.height - self._margins[1] - self._margins[3]
def label_left_top(self):
""" Return the upper-left corner of the label safe zone """
return(self._margins[0], self._margins[1])
def get_pixel(self, pos, i=0, mode='888'):
""" Return the pixel at (x, y) """
x, y = pos
x = x - self.rect.x
y = y - self.rect.y
if isinstance(self.images[i], gtk.gdk.Pixbuf):
if y > self.images[i].get_height() - 1:
return(-1, -1, -1, -1)
array = self.images[i].get_pixels()
if array is not None:
try:
if self.images[i].get_has_alpha():
offset = (y * self.images[i].get_width() + x) * 4
a = ord(array[offset + 3])
else:
offset = (y * self.images[i].get_width() + x) * 3
a = 255
r = ord(array[offset])
g = ord(array[offset + 1])
b = ord(array[offset + 2])
return(r, g, b, a)
except IndexError:
print "Index Error: %d %d (%d, %d) (w: %d, h: %d) (%dx%d)"\
% (len(array), offset, x, y,
self.images[i].get_width(),
self.images[i].get_height(),
self.rect.width, self.rect.height)
return(-1, -1, -1, -1)
else:
w, h = self.images[i].get_size()
if x < 0 or x > (w - 1) or y < 0 or y > (h - 1):
return(-1, -1, -1, -1)
image = self.images[i].get_image(x, y, 1, 1)
pixel = image.get_pixel(0, 0)
visual = self.images[i].get_visual()
r = int((pixel & visual.red_mask) >> visual.red_shift)
g = int((pixel & visual.green_mask) >> visual.green_shift)
b = int((pixel & visual.blue_mask) >> visual.blue_shift)
# Rescale to 8 bits
if mode == '565':
r = r << 3
g = g << 2
b = b << 3
return(r, g, b, 0)
| max630/turtleart-hacks | TurtleArt/sprites.py | Python | mit | 16,552 |
# -*- coding: utf-8 -*-
"""Created on Wed Mar 15 10:09:24 2017"""
import math
import os
import numpy as np
from skimage import io
from skimage import transform as tf
from keras.models import Sequential
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Flatten
from keras.layers.core import Dense
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.preprocessing.image import ImageDataGenerator
from matplotlib import pyplot as plt
def load_images(folder):
images = []
labels = []
for file in os.listdir(folder):
if file.endswith(".png"):
images.append(io.imread(folder + file, as_grey=True))
if file.find("einstein") > -1:
labels.append(1)
elif file.find("curie") > -1:
labels.append(2)
elif os.path.splitext(file)[0].isdigit():
labels.append(int(os.path.splitext(file)[0]))
else:
labels.append(0)
return images, labels
def deshear(filename):
image = io.imread(filename)
distortion = image.shape[1] - image.shape[0]
shear = tf.AffineTransform(shear=math.atan(distortion/image.shape[0]))
return tf.warp(image, shear)[:, distortion:]
def normalize_images(images):
for i in range(len(images)):
images[i] = images[i][0:100, 0:100]
images[i] = images[i]/np.amax(images[i])
return np.array(images)
class LeNet:
def __init__(self, input_shape, conv_1, pool_1, conv_2, pool_2, hidden,
classes):
self.model = Sequential()
# first set of CONV => RELU => POOL
self.model.add(Conv2D(*conv_1, padding='same', activation='relu',
data_format='channels_last',
input_shape=input_shape))
self.model.add(MaxPooling2D(pool_1[0], pool_1[1]))
# second set of CONV => RELU => POOL
self.model.add(Conv2D(*conv_2, padding='same', activation='relu',
data_format='channels_last'))
self.model.add(MaxPooling2D(pool_2[0], pool_2[1]))
# set of FC => RELU layers
self.model.add(Flatten())
self.model.add(Dense(hidden, activation='relu'))
# softmax classifier
self.model.add(Dense(classes, activation='softmax'))
# Loading image data sets and normalizing color scale
training_set, training_labels = load_images("images/train/")
test_set, test_labels = load_images("images/test/")
rw_set, rw_file_labels = load_images("images/real_world/")
training_set = normalize_images(training_set)
training_set = training_set[..., np.newaxis]
test_set = normalize_images(test_set)
test_set = test_set[..., np.newaxis]
rw_set = normalize_images(rw_set)
rw_set = rw_set[..., np.newaxis]
rw_set = np.array([x for (y, x) in sorted(zip(rw_file_labels, rw_set))])
# Getting labels for real world set from file
f = open('images/real_world/labels.txt', "r")
lines = f.readlines()
rw_labels = []
for x in lines:
rw_labels.append(int((x.split(' ')[1]).replace('\n', '')))
f.close()
# Parameters for LeNet convolutional network
classes = 3 # number of classes to identify
hidden = 500 # number of nuerons in hidden layer
conv_1 = (20, (15, 15)) # (num of filters in first layer, filter size)
conv_2 = (50, (15, 15)) # (num of filters in second layer, filter size)
pool_1 = ((6, 6), (6, 6)) # (size of pool matrix, stride)
pool_2 = ((6, 6), (6, 6)) # (size of pool matrix, stride)
# Converting integer labels to categorical labels
training_labels = np_utils.to_categorical(training_labels, classes)
test_labels = np_utils.to_categorical(test_labels, classes)
rw_labels = np_utils.to_categorical(rw_labels, classes)
# Creating LeNet from basic training data
aps = LeNet(training_set[1].shape, conv_1, pool_1, conv_2, pool_2, hidden,
classes)
aps.model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=["accuracy"])
print('\nTraining LeNet with basic training data\n')
aps.model.fit(training_set, training_labels, batch_size=10, epochs=50,
verbose=1)
# Testing basic model of both sets
test_probs = aps.model.predict(test_set)
test_prediction = test_probs.argmax(axis=1)
rw_probs = aps.model.predict(rw_set)
rw_prediction = rw_probs.argmax(axis=1)
(loss, accuracy) = aps.model.evaluate(test_set, test_labels, verbose=0)
print('\nAccuracy in test set: {}\n'.format(accuracy))
(loss, accuracy) = aps.model.evaluate(rw_set, rw_labels, verbose=0)
print('Accuracy in real world set: {}\n'.format(accuracy))
# Augmenting basic data set to improve performance in real world set
datagen = ImageDataGenerator(rotation_range=10, shear_range=0.3,
zoom_range=0.2, width_shift_range=0.15,
height_shift_range=0.15, fill_mode='constant',
cval=1)
# Creating LeNet with augmmented data
aps_aug = LeNet(training_set[1].shape, conv_1, pool_1, conv_2, pool_2, hidden,
classes)
aps_aug.model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=["accuracy"])
print('Training LeNet with augmented training data\n')
aps_aug.model.fit_generator(datagen.flow(training_set, training_labels,
batch_size=10),
steps_per_epoch=len(training_set), epochs=50,
verbose=1)
# Testing augmented model
test_probs_aug = aps_aug.model.predict(test_set)
test_prediction_aug = test_probs_aug.argmax(axis=1)
rw_probs_aug = aps_aug.model.predict(rw_set)
rw_prediction_aug = rw_probs_aug.argmax(axis=1)
(loss, accuracy) = aps_aug.model.evaluate(rw_set, rw_labels, verbose=0)
print('\nAccuracy in real world set: {}'.format(accuracy))
| jdnz/qml-rg | Meeting 5/wheres_wally.py | Python | gpl-3.0 | 5,856 |
# Pipe -- API gateway
# Copyright (C) 2015 Jan Karásek
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
sys.path.append('../Gate')
import pytest
import Gate.Api
import redis
class TestClass:
def setup(self):
self.redis = redis.StrictRedis()
self.req_data = {
"public_token": "fgagasd",
"timestamp": 12354564641,
"pipe": "basic-test"
}
def test_new_request_valid(self):
req = Gate.Api.Request(self.redis)
assert req.new(self.req_data)
def test_new_request_invalid(self):
req = Gate.Api.Request(self.redis)
invalid_data = self.req_data
invalid_data['pipe'] = 12345
assert req.new(self.req_data) == False | hkar/pipe-gate | tests/test_basic_api.py | Python | gpl-2.0 | 1,402 |
"""Plugin which add a copyright to the image.
Settings:
- ``copyright``: the copyright text.
- ``copyright_text_font``: the copyright text font - either system/user
font-name or absolute path to font.ttf file. If no font is specified, or
specified font is not found, the default font is used.
- ``copyright_text_font_size``: the copyright text font-size. If no font is
specified, this setting is ignored.
- ``copyright_text_color``: the copyright text color in a tuple (R, G, B)
with decimal RGB code, e.g. ``(255, 255, 255)`` is white.
- ``copyright_text_position``: the copyright text position in 2 tuple (left,
top). By default text would be positioned at bottom-left corner.
"""
import logging
from PIL import ImageDraw, ImageFont
from sigal import signals
logger = logging.getLogger(__name__)
def add_copyright(img, settings=None):
logger.debug('Adding copyright to %r', img)
draw = ImageDraw.Draw(img)
text = settings['copyright']
font = settings.get('copyright_text_font', None)
font_size = settings.get('copyright_text_font_size', 10)
assert font_size >= 0
color = settings.get('copyright_text_color', (0, 0, 0))
bottom_margin = 3 # bottom margin for text
text_height = bottom_margin + 12 # default text height (of 15)
if font:
try:
font = ImageFont.truetype(font, font_size)
text_height = font.getsize(text)[1] + bottom_margin
except Exception: # load default font in case of any exception
logger.debug("Exception: Couldn't locate font %s, using "
"default font", font)
font = ImageFont.load_default()
else:
font = ImageFont.load_default()
left, top = settings.get('copyright_text_position',
(5, img.size[1] - text_height))
draw.text((left, top), text, fill=color, font=font)
return img
def register(settings):
if settings.get('copyright'):
signals.img_resized.connect(add_copyright)
else:
logger.warning('Copyright text is not set')
| kontza/sigal | sigal/plugins/copyright.py | Python | mit | 2,076 |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
import pymongo
from scrapy.settings import Settings
from scrapy.exceptions import DropItem
from scrapy import log
from pymongo import ReturnDocument
class YellowPipeline(object):
def __init__(self, mongo_uri, mongo_db, collection_name):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
self.collection_name = collection_name
@classmethod
def from_crawler(cls, crawler):
return cls(
mongo_uri=crawler.settings.get('MONGODB_SERVER'),
mongo_db=crawler.settings.get('MONGODB_DB'),
collection_name=crawler.settings.get('MONGODB_COLLECTION')
)
def open_spider(self, spider):
log.msg("Open client", level=log.DEBUG, spider=spider)
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
def close_spider(self, spider):
log.msg("Close client", level=log.DEBUG, spider=spider)
self.client.close()
def process_item(self, item, spider):
#self.db[self.collection_name].insert(dict(item))
#if('email' in item):
self.db[self.collection_name].find_one_and_update(
{ 'key': item['key'] },
{ '$set': dict(item) },
upsert=True)
log.msg("Contact added to MongoDB database!", level=log.DEBUG, spider=spider)
return item
class DuplicatesPipeline(object):
def __init__(self):
self.ids_seen = set()
def process_item(self, item, spider):
if item['id'] in self.ids_seen:
raise DropItem("Duplicate item found: %s" % item)
else:
self.ids_seen.add(item['id'])
return item
| diegordzr/YellowSpider | yellow/yellow/pipelines.py | Python | mit | 1,714 |
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
# This file is part of the E-Cell System
#
# Copyright (C) 1996-2016 Keio University
# Copyright (C) 2008-2016 RIKEN
# Copyright (C) 2005-2009 The Molecular Sciences Institute
#
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
#
# E-Cell System is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# E-Cell System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with E-Cell System -- see the file COPYING.
# If not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#END_HEADER
import os
# Constants for ConfirmWindow
OK_MODE = 0
OKCANCEL_MODE = 1
YESNO_MODE = 2
CUSTOM_MODE = 3
# Constans for result
OK_PRESSED = 0
CANCEL_PRESSED = -1
YES_PRESSED = OK_PRESSED
NO_PRESSED = CANCEL_PRESSED
ME_DESIGN_MODE = "Design"
ME_RUN_MODE = "Run"
# Layout constants
#Shape Plugin Constants
SHAPE_PLUGIN_NAME='Default Shape'
SHAPE_PLUGIN_TYPE='Default Type'
DEFAULT_SHAPE_NAME='Default'
# object types:
LE_OBJECT_SYSTEM = 0
LE_OBJECT_PROCESS = 1
LE_OBJECT_VARIABLE = 2
LE_OBJECT_TEXTBOX = 3
LE_OBJECT_CUSTOM = 4
LE_OBJECT_CONNECTION = 5
# Default Root System Stepper
DE_DEFAULT_STEPPER_CLASS = 'ODEStepper'
DE_DEFAULT_STEPPER_NAME = 'Default_Stepper'
DE_DEFAULT_PROCESS_CLASS = 'ExpressionFluxProcess'
#CL_PATH='ME_PATH'
# ModelStore constants
MS_STEPPER_CLASS = 0
MS_STEPPER_PROPERTYLIST = 1
MS_STEPPER_INFO = 2
MS_STEPPER_SYSTEMLIST = 'SystemList'
MS_STEPPER_PROCESSLIST = 'ProcessList'
MS_VARIABLE_PROCESSLIST = 'ProcessList'
MS_PROCESS_VARREFLIST = 'VariableReferenceList'
MS_PROPERTY_VALUE = 0
MS_PROPERTY_FLAGS = 1
MS_PROPERTY_TYPE = 2
MS_PROPERTY_COLOR = 3
#MS_SETTABLE_FLAG = 0
#MS_GETTABLE_FLAG = 1
AVOGADRO = 6.0221367e+23
MS_SETTABLE_FLAG = 0
MS_GETTABLE_FLAG = 1
MS_LOADABLE_FLAG = 2
MS_SAVEABLE_FLAG = 3
MS_DELETEABLE_FLAG = 4
MS_CHANGED_FLAG = 5
MS_VARREF_NAME = 0
MS_VARREF_FULLID = 1
MS_VARREF_COEF = 2
MS_VARIABLE_VALUE = "Value"
MS_VARIABLE_NUMCONC = "NumberConc"
MS_VARIABLE_MOLARCONC = "MolarConc"
MS_SIZE = "SIZE"
MS_SYSTEM_STEPPERID = 'StepperID'
MS_PROCESS_STEPPERID = 'StepperID'
DND_PROPERTYLIST_TYPE = "propertylist"
DND_PROPERTYVALUELIST_TYPE = "propertyvaluelist"
MS_ENTITY_CLASS = 0
MS_ENTITY_PROPERTYLIST = 1
MS_ENTITY_PARENT = 2
MS_ENTITY_CHILD_SYSTEMLIST = 3
MS_ENTITY_CHILD_PROCESSLIST = 4
MS_ENTITY_CHILD_VARIABLELIST = 5
MS_ENTITY_INFO = 6
MS_SYSTEM_ROOT = 'System::/'
# DM constants
ECELL_PROPERTY_SETTABLE_FLAG = 0
DM_DESCRIPTION = "Description"
DM_ACCEPTNEWPROPERTY = "AcceptNewProperty"
DM_PROPERTYLIST ="PropertyList"
DM_BASECLASS = "Baseclass"
DM_BUILTIN = "Builtin"
DM_PROPERTY_DEFAULTVALUE = "DEFAULTVALUE"
DM_PROPERTY_SETTABLE_FLAG = "SETTABLEFLAG"
DM_PROPERTY_GETTABLE_FLAG = "GETTABLEFLAG"
DM_PROPERTY_SAVEABLE_FLAG = "SAVEABLEFLAG"
DM_PROPERTY_LOADABLE_FLAG = "LOADABLEFLAG"
DM_PROPERTY_DELETEABLE_FLAG = "DELETEABLEFLAG"
DM_PROPERTY_LOADABLE_FLAG = "LOADABLEFLAG"
DM_PROPERTY_SAVEABLE_FLAG = "SAVEABLEFLAG"
DM_PROPERTY_TYPE = "TYPE"
DM_PROPERTY_STRING = "STRING"
DM_PROPERTY_MULTILINE = "MULTILINE"
DM_PROPERTY_POLYMORPH = "POLYMORPH"
DM_PROPERTY_INTEGER = "INTEGER"
DM_PROPERTY_FLOAT = "FLOAT"
DM_SYSTEM_CLASS = 'System'
DM_VARIABLE_CLASS = 'Variable'
DM_CAN_INSTANTIATE = 0
DM_CAN_LOADINFO = 1
DM_CAN_ADDPROPERTY = 2
# message types
ME_PLAINMESSAGE = 0
ME_OKCANCEL = 1
ME_YESNO = 2
ME_WARNING = 3
ME_ERROR = 4
ME_STATUSBAR = 5
ME_RESULT_OK = OK_PRESSED
ME_RESULT_CANCEL = CANCEL_PRESSED
# propertyattributes
ME_GETTABLE_FLAG = MS_GETTABLE_FLAG
ME_SETTABLE_FLAG = MS_SETTABLE_FLAG
ME_LOADABLE_FLAG = MS_LOADABLE_FLAG
ME_SAVEABLE_FLAG = MS_SAVEABLE_FLAG
ME_DELETEABLE_FLAG = MS_DELETEABLE_FLAG
ME_CHANGED_FLAG = MS_CHANGED_FLAG
ME_ROOTID = 'System::/'
# varref indexes
ME_VARREF_NAME = 0
ME_VARREF_FULLID = 1
ME_VARREF_COEF = 2
# entity types
ME_SYSTEM_TYPE = "System"
ME_VARIABLE_TYPE = "Variable"
ME_PROCESS_TYPE = "Process"
ME_STEPPER_TYPE = "Stepper"
ME_PROPERTY_TYPE = "Property"
# special properties
ME_STEPPER_SYSTEMLIST = MS_STEPPER_SYSTEMLIST
ME_STEPPER_PROCESSLIST = MS_STEPPER_PROCESSLIST
ME_VARIABLE_PROCESSLIST = MS_VARIABLE_PROCESSLIST
ME_PROCESS_VARREFLIST = MS_PROCESS_VARREFLIST
ME_STEPPERID = MS_SYSTEM_STEPPERID
#special editor type
ME_ENTITY_EDITOR = 'ME_ENTITY_EDITOR'
ME_CONNNECTION_OBJ_EDITOR = 'ME_CONNNECTION_OBJ_EDITOR'
# undo
MAX_REDOABLE_COMMAND = 100
# ADCP flags
ME_ADD_FLAG = 0
ME_DELETE_FLAG = 1
ME_COPY_FLAG = 2
ME_PASTE_FLAG = 3
ME_EDIT_FLAG = 4
ME_BROWSE_FLAG = 5
ME_APPEND_FLAG = 6
ME_FLAGS_NO = 7
# varrefs
ME_VARREF_FULLID = 1
### PATHWAYEDITOR CONSTANTS
# direction
DIRECTION_UP = 4
DIRECTION_DOWN = 8
DIRECTION_LEFT = 1
DIRECTION_RIGHT = 2
DIRECTION_BOTTOM_RIGHT=10
DIRECTION_BOTTOM_LEFT=9
DIRECTION_TOP_RIGHT=6
DIRECTION_TOP_LEFT=5
# CURSOR types
CU_POINTER = 0
CU_MOVE = 1
CU_ADD = 2
CU_RESIZE_TOP_LEFT = 3
CU_RESIZE_TOP = 4
CU_RESIZE_TOP_RIGHT = 5
CU_RESIZE_RIGHT = 6
CU_RESIZE_BOTTOM_RIGHT = 7
CU_RESIZE_BOTTOM = 8
CU_RESIZE_BOTTOM_LEFT = 9
CU_RESIZE_LEFT = 10
CU_CONN_INIT = 11
CU_CROSS=12
#4- up, 5, down,
# object types
OB_TYPE_PROCESS = "Process"
OB_TYPE_VARIABLE = "Variable"
OB_TYPE_SYSTEM = "System"
OB_TYPE_TEXT = "Text"
OB_TYPE_CONNECTION = "Connection"
OB_NOTHING = "NOTHING"
OB_SHOW_LABEL=1
OB_MIN_WIDTH = 80
OB_MIN_HEIGTH = 40
# object properties
OB_POS_X = 'x' #omitted
OB_POS_Y = 'y' #omitted
OB_FULLID = 'FullID' #rename action
OB_LABEL = 'Label'
OB_MINLABEL='Min Label Length'
OB_STEPPERID = 'StepperID' #omitted
OB_TYPE = 'Type' #omitted
OB_DIMENSION_X = 'DimensionX' #omitted in Editor Object, resize action in SystemObject
OB_DIMENSION_Y = 'DimensionY' #omitted in Editor Object, resize action in SystemObject
OB_HASFULLID = 'HasFullID' #omitted
OB_OUTLINE_COLOR = "OutlineColor" #outline change action
OB_FILL_COLOR = "FillColor" #fill change action
OB_TEXT_COLOR = "TextColor" # textcolot change action
OB_OUTLINE_WIDTH = "Outline" #outline change action
OB_SHAPE_TYPE = "ShapeType" #shapetype change action
OB_SHAPEDESCRIPTORLIST = "ShapeDescriptorList" # cannot change: omitted
# connection constants
# directions
PROCESS_TO_VARIABLE = 0
VARIABLE_TO_PROCESS = 1
# connection properties
CO_PROCESS_ATTACHED = "ProcessAttached" # this is an ID or None! #omitted
CO_VARIABLE_ATTACHED = "VariableAttached" # this is an ID or None! #omitted
CO_PROCESS_RING = "ProcessRing" #omitted
CO_VARIABLE_RING = "VariableRing" #omitted
CO_NAME = "Name" #rename action
CO_COEF = "Coefficient" # change coef action
CO_ISRELATIVE = "Isrelative" # omitted
# lower level connection properties
CO_LINETYPE = "LineType" # change linetype action
CO_LINEWIDTH = "LineWidth" # change linewidth
CO_CONTROL_POINTS = "ControlPoints"
CO_ENDPOINT1 = "Endpoint1" # omitted
CO_ENDPOINT2 = "Endpoint2" # omitted
CO_DIRECTION1 = "Direction1" #omitted
CO_DIRECTION2 = "Direction2" #omitted
CO_HASARROW1 = "Hasarrow1" #change arrow action
CO_HASARROW2 = "Hasarrow2" #change arrow action
CO_ATTACHMENT1TYPE = "Attachment1Type" # attached thing to endpoint 1 ( Process, Variable, Nothing ) #omitted
CO_ATTACHMENT2TYPE = "Attachment2Type" #omitted
CO_USEROVERRIDEARROW = "UserSetArrow" #omitted
# process properties
PR_CONNECTIONLIST = "ConnectionList" #omitted
# variable properties
VR_CONNECTIONLIST = "ConnectionList" #omitted
# system properties
SY_INSIDE_DIMENSION_X = "InsideDimensionX" #omitted
SY_INSIDE_DIMENSION_Y = "InsideDimensionY" #omitted
SY_PASTE_CONNECTIONLIST = "PasteConnectionList" #omitted
# selector variable
PE_SELECTOR = "Selector"
PE_VARIABLE ="Variable"
PE_PROCESS = "Process"
PE_SYSTEM= "System"
PE_CUSTOM = "Custom"
PE_TEXT = "Text"
# local/global properties
GLOBALPROPERTYSET = [ CO_COEF, CO_NAME, OB_SHAPE_TYPE, CO_LINETYPE ]
MODELPROPERTYSET = [ CO_COEF, CO_NAME ]
### CANVAS CONSTANTS
# shape types
SHAPE_TYPE_SQUARE = "Square"
SHAPE_TYPE_VARIABLE = "Variable"
SHAPE_TYPE_PROCESS = "Process"
SHAPE_TYPE_SYSTEM = "System"
SHAPE_TYPE_TEXTBOX = "TextBox"
SHAPE_TYPE_CUSTOM = "Custom"
SHAPE_TYPE_STRAIGHT_LINE = "Straight"
SHAPE_TYPE_CORNERED_LINE = "Cornered"
SHAPE_TYPE_CURVED_LINE = "Curved"
SHAPE_TYPE_MULTIBCURVE_LINE = "MultiBezierCurve"
# layout properties
LO_SCROLL_REGION = "ScrollRegion" # list of int
LO_ZOOM_RATIO ="Zoom ratio"
LO_ROOT_SYSTEM = "Rootsystem"
#SHAPEDESCRIPTOR properties
SD_NAME = 0
SD_TYPE = 1
SD_FUNCTION = 2
SD_COLOR = 3
SD_Z = 4
SD_SPECIFIC = 5
SD_PROPERTIES = 6
#specific descriptors
RECT_RELX1 = 0
RECT_RELY1 = 1
RECT_RELX2 = 2
RECT_RELY2 = 3
RECT_ABSX1 = 4
RECT_ABSY1 = 5
RECT_ABSX2 = 6
RECT_ABSY2 = 7
#for rect. line, ellipse specific
SPEC_POINTS=0
SPEC_WIDTH_RATIO = 1
SPEC_LABEL = 1
LINE_POINTS = 0 # [ x1abs, x1rel, y1abs, y1rel,... ] for connection lines it x1, y1, x2, y2
LINE_WIDTH = 1
TEXT_TEXT = 0
TEXT_RELX = 1
TEXT_RELY = 2
TEXT_ABSX = 3
TEXT_ABSY = 4
BPATH_PATHDEF=0
BPATH_WIDTH = 1
# parameters for SD_FUNCTION and SD_COLOR
SD_OUTLINE = 0
SD_FILL = 1
SD_TEXT = 2
SD_RING = 3 # initates connectionobject by mousedrag
SD_NONE = 4 # does nothing by mousedrag
SD_SYSTEM_CANVAS = 5
SD_ARROWHEAD = 6
SD_FIXEDLINE = 7
SD_MOVINGLINE = 8
IMG_FILENAME = 1
#gnomecanvasobjects:
CV_RECT = "CanvasRect"
CV_ELL = "CanvasEllipse"
CV_TEXT = "CanvasText"
CV_LINE = "CanvasLine"
CV_BPATH = "CanvasBPath"
CV_IMG = "CanvasWidget"
# parameters for minimum SYSTEM_TYPE dimensions
SYS_MINWIDTH=230
SYS_MINHEIGHT=200
SYS_MINLABEL=29
# parameters for minimum VARIABLE_TYPE dimensions
VAR_MINWIDTH=10
VAR_MINHEIGHT=10
VAR_MINLABEL=5
# parameters for minimum PROCESS_TYPE dimensions
PRO_MINWIDTH=10
PRO_MINHEIGHT=10
PRO_MINLABEL=5
# parameters for minimum TEXT_TYPE dimensions
TEXT_MINWIDTH=205
TEXT_MINHEIGHT=27
# attachment points
RING_TOP = "RingTop"
RING_BOTTOM = "RingBottom"
RING_LEFT = "RingLeft"
RING_RIGHT = "RingRight"
RINGANGLES = { RING_TOP:90, RING_BOTTOM :270, RING_LEFT:180, RING_RIGHT:0 }
# line parts
ARROWHEAD1 = "arrowhead1"
ARROWHEAD2 = "arrowhead2"
EXTEND1 = "extendline1"
EXTEND2 = "extendline2"
ARROWHEAD_LENGTH = 10
| ecell/ecell3 | ecell/frontend/model-editor/ecell/ui/model_editor/Constants.py | Python | lgpl-3.0 | 10,477 |
# Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import falcon
import six
from marconi.openstack.common.gettextutils import _
import marconi.openstack.common.log as logging
from marconi.queues.storage import errors as storage_errors
from marconi.queues.transport import utils
from marconi.queues.transport import validation
from marconi.queues.transport.wsgi import errors as wsgi_errors
from marconi.queues.transport.wsgi import utils as wsgi_utils
LOG = logging.getLogger(__name__)
MESSAGE_POST_SPEC = (('ttl', int), ('body', '*'))
class CollectionResource(object):
__slots__ = ('message_controller', '_wsgi_conf', '_validate')
def __init__(self, wsgi_conf, validate, message_controller):
self._wsgi_conf = wsgi_conf
self._validate = validate
self.message_controller = message_controller
#-----------------------------------------------------------------------
# Helpers
#-----------------------------------------------------------------------
def _get_by_id(self, base_path, project_id, queue_name, ids):
"""Returns one or more messages from the queue by ID."""
try:
self._validate.message_listing(limit=len(ids))
messages = self.message_controller.bulk_get(
queue_name,
message_ids=ids,
project=project_id)
except validation.ValidationFailed as ex:
LOG.debug(ex)
raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex))
except Exception as ex:
LOG.exception(ex)
description = _(u'Message could not be retrieved.')
raise wsgi_errors.HTTPServiceUnavailable(description)
# Prepare response
messages = list(messages)
if not messages:
return None
base_path += '/'
for each_message in messages:
each_message['href'] = base_path + each_message['id']
del each_message['id']
return messages
def _get(self, req, project_id, queue_name):
client_uuid = wsgi_utils.get_client_uuid(req)
kwargs = {}
# NOTE(kgriffs): This syntax ensures that
# we don't clobber default values with None.
req.get_param('marker', store=kwargs)
req.get_param_as_int('limit', store=kwargs)
req.get_param_as_bool('echo', store=kwargs)
req.get_param_as_bool('include_claimed', store=kwargs)
try:
self._validate.message_listing(**kwargs)
results = self.message_controller.list(
queue_name,
project=project_id,
client_uuid=client_uuid,
**kwargs)
# Buffer messages
cursor = next(results)
messages = list(cursor)
except validation.ValidationFailed as ex:
LOG.debug(ex)
raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex))
except storage_errors.DoesNotExist as ex:
LOG.debug(ex)
raise falcon.HTTPNotFound()
except Exception as ex:
LOG.exception(ex)
description = _(u'Messages could not be listed.')
raise wsgi_errors.HTTPServiceUnavailable(description)
if not messages:
return None
# Found some messages, so prepare the response
kwargs['marker'] = next(results)
for each_message in messages:
each_message['href'] = req.path + '/' + each_message['id']
del each_message['id']
return {
'messages': messages,
'links': [
{
'rel': 'next',
'href': req.path + falcon.to_query_str(kwargs)
}
]
}
#-----------------------------------------------------------------------
# Interface
#-----------------------------------------------------------------------
def on_post(self, req, resp, project_id, queue_name):
LOG.debug(u'Messages collection POST - queue: %(queue)s, '
u'project: %(project)s',
{'queue': queue_name, 'project': project_id})
client_uuid = wsgi_utils.get_client_uuid(req)
try:
# Place JSON size restriction before parsing
self._validate.message_length(req.content_length)
except validation.ValidationFailed as ex:
LOG.debug(ex)
raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex))
# Pull out just the fields we care about
messages = wsgi_utils.filter_stream(
req.stream,
req.content_length,
MESSAGE_POST_SPEC,
doctype=wsgi_utils.JSONArray)
# Enqueue the messages
partial = False
try:
self._validate.message_posting(messages)
message_ids = self.message_controller.post(
queue_name,
messages=messages,
project=project_id,
client_uuid=client_uuid)
except validation.ValidationFailed as ex:
LOG.debug(ex)
raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex))
except storage_errors.DoesNotExist as ex:
LOG.debug(ex)
raise falcon.HTTPNotFound()
except storage_errors.MessageConflict as ex:
LOG.exception(ex)
partial = True
message_ids = ex.succeeded_ids
if not message_ids:
# TODO(kgriffs): Include error code that is different
# from the code used in the generic case, below.
description = _(u'No messages could be enqueued.')
raise wsgi_errors.HTTPServiceUnavailable(description)
except Exception as ex:
LOG.exception(ex)
description = _(u'Messages could not be enqueued.')
raise wsgi_errors.HTTPServiceUnavailable(description)
# Prepare the response
ids_value = ','.join(message_ids)
resp.location = req.path + '?ids=' + ids_value
hrefs = [req.path + '/' + id for id in message_ids]
body = {'resources': hrefs, 'partial': partial}
resp.body = utils.to_json(body)
resp.status = falcon.HTTP_201
def on_get(self, req, resp, project_id, queue_name):
LOG.debug(u'Messages collection GET - queue: %(queue)s, '
u'project: %(project)s',
{'queue': queue_name, 'project': project_id})
resp.content_location = req.relative_uri
ids = req.get_param_as_list('ids')
if ids is None:
response = self._get(req, project_id, queue_name)
else:
response = self._get_by_id(req.path, project_id, queue_name, ids)
if response is None:
resp.status = falcon.HTTP_204
return
resp.body = utils.to_json(response)
# status defaults to 200
def on_delete(self, req, resp, project_id, queue_name):
# NOTE(zyuan): Attempt to delete the whole message collection
# (without an "ids" parameter) is not allowed
ids = req.get_param_as_list('ids', required=True)
try:
self._validate.message_listing(limit=len(ids))
self.message_controller.bulk_delete(
queue_name,
message_ids=ids,
project=project_id)
except validation.ValidationFailed as ex:
LOG.debug(ex)
raise wsgi_errors.HTTPBadRequestAPI(six.text_type(ex))
except Exception as ex:
LOG.exception(ex)
description = _(u'Messages could not be deleted.')
raise wsgi_errors.HTTPServiceUnavailable(description)
resp.status = falcon.HTTP_204
class ItemResource(object):
__slots__ = ('message_controller')
def __init__(self, message_controller):
self.message_controller = message_controller
def on_get(self, req, resp, project_id, queue_name, message_id):
LOG.debug(u'Messages item GET - message: %(message)s, '
u'queue: %(queue)s, project: %(project)s',
{'message': message_id,
'queue': queue_name,
'project': project_id})
try:
message = self.message_controller.get(
queue_name,
message_id,
project=project_id)
except storage_errors.DoesNotExist as ex:
LOG.debug(ex)
raise falcon.HTTPNotFound()
except Exception as ex:
LOG.exception(ex)
description = _(u'Message could not be retrieved.')
raise wsgi_errors.HTTPServiceUnavailable(description)
# Prepare response
message['href'] = req.path
del message['id']
resp.content_location = req.relative_uri
resp.body = utils.to_json(message)
# status defaults to 200
def on_delete(self, req, resp, project_id, queue_name, message_id):
LOG.debug(u'Messages item DELETE - message: %(message)s, '
u'queue: %(queue)s, project: %(project)s',
{'message': message_id,
'queue': queue_name,
'project': project_id})
try:
self.message_controller.delete(
queue_name,
message_id=message_id,
project=project_id,
claim=req.get_param('claim_id'))
except storage_errors.NotPermitted as ex:
LOG.exception(ex)
title = _(u'Unable to delete')
description = _(u'This message is claimed; it cannot be '
u'deleted without a valid claim_id.')
raise falcon.HTTPForbidden(title, description)
except Exception as ex:
LOG.exception(ex)
description = _(u'Message could not be deleted.')
raise wsgi_errors.HTTPServiceUnavailable(description)
# Alles guete
resp.status = falcon.HTTP_204
| rackerlabs/marconi | marconi/queues/transport/wsgi/v1_1/messages.py | Python | apache-2.0 | 10,604 |
#!/usr/bin/env python
"""
Re-write config file and optionally convert to python
"""
__revision__ = "$Id: writeCfg.py,v 1.1 2011/09/19 21:41:44 paus Exp $"
__version__ = "$Revision: 1.1 $"
import getopt
import imp
import os
import pickle
import sys
import xml.dom.minidom
from random import SystemRandom
from ProdCommon.CMSConfigTools.ConfigAPI.CfgInterface import CfgInterface
import FWCore.ParameterSet.Types as CfgTypes
MyRandom = SystemRandom()
class ConfigException(Exception):
"""
Exceptions raised by writeCfg
"""
def __init__(self, msg):
Exception.__init__(self, msg)
self._msg = msg
return
def __str__(self):
return self._msg
def main(argv) :
"""
writeCfg
- Read in existing, user supplied pycfg or pickled pycfg file
- Modify job specific parameters based on environment variables and arguments.xml
- Write out pickled pycfg file
required parameters: none
optional parameters:
--help : help
--debug : debug statements
"""
# defaults
inputFileNames = None
parentFileNames = None
debug = False
_MAXINT = 900000000
try:
opts, args = getopt.getopt(argv, "", ["debug", "help"])
except getopt.GetoptError:
print main.__doc__
sys.exit(2)
try:
CMSSW = os.environ['CMSSW_VERSION']
parts = CMSSW.split('_')
CMSSW_major = int(parts[1])
CMSSW_minor = int(parts[2])
CMSSW_patch = int(parts[3])
except (KeyError, ValueError):
msg = "Your environment doesn't specify the CMSSW version or specifies it incorrectly"
raise ConfigException(msg)
# Parse command line options
for opt, arg in opts :
if opt == "--help" :
print main.__doc__
sys.exit()
elif opt == "--debug" :
debug = True
# Parse remaining parameters
try:
fileName = args[0]
outFileName = args[1]
except IndexError:
print main.__doc__
sys.exit()
# Read in Environment, XML and get optional Parameters
nJob = int(os.environ.get('NJob', '0'))
preserveSeeds = os.environ.get('PreserveSeeds','')
incrementSeeds = os.environ.get('IncrementSeeds','')
# Defaults
maxEvents = 0
skipEvents = 0
firstEvent = -1
compHEPFirstEvent = 0
firstRun = 0
# FUTURE: Remove firstRun
firstLumi = 0
dom = xml.dom.minidom.parse(os.environ['RUNTIME_AREA']+'/arguments.xml')
for elem in dom.getElementsByTagName("Job"):
if nJob == int(elem.getAttribute("JobID")):
if elem.getAttribute("MaxEvents"):
maxEvents = int(elem.getAttribute("MaxEvents"))
if elem.getAttribute("SkipEvents"):
skipEvents = int(elem.getAttribute("SkipEvents"))
if elem.getAttribute("FirstEvent"):
firstEvent = int(elem.getAttribute("FirstEvent"))
if elem.getAttribute("FirstRun"):
firstRun = int(elem.getAttribute("FirstRun"))
if elem.getAttribute("FirstLumi"):
firstLumi = int(elem.getAttribute("FirstLumi"))
generator = str(elem.getAttribute('Generator'))
inputFiles = str(elem.getAttribute('InputFiles'))
parentFiles = str(elem.getAttribute('ParentFiles'))
lumis = str(elem.getAttribute('Lumis'))
# Read Input python config file
handle = open(fileName, 'r')
try: # Nested form for Python < 2.5
try:
print "Importing .py file"
cfo = imp.load_source("pycfg", fileName, handle)
cmsProcess = cfo.process
except Exception, ex:
msg = "Your pycfg file is not valid python: %s" % str(ex)
raise ConfigException(msg)
finally:
handle.close()
cfg = CfgInterface(cmsProcess)
# Set parameters for job
print "Setting parameters"
inModule = cfg.inputSource
if maxEvents:
cfg.maxEvents.setMaxEventsInput(maxEvents)
if skipEvents:
inModule.setSkipEvents(skipEvents)
# Set "skip events" for various generators
if generator == 'comphep':
cmsProcess.source.CompHEPFirstEvent = CfgTypes.int32(firstEvent)
elif generator == 'lhe':
cmsProcess.source.skipEvents = CfgTypes.untracked(CfgTypes.uint32(firstEvent))
cmsProcess.source.firstEvent = CfgTypes.untracked(CfgTypes.uint32(firstEvent+1))
elif firstEvent != -1: # (Old? Madgraph)
cmsProcess.source.firstEvent = CfgTypes.untracked(CfgTypes.uint32(firstEvent))
if inputFiles:
inputFileNames = inputFiles.split(',')
inModule.setFileNames(*inputFileNames)
# handle parent files if needed
if parentFiles:
parentFileNames = parentFiles.split(',')
inModule.setSecondaryFileNames(*parentFileNames)
if lumis:
if CMSSW_major < 3: # FUTURE: Can remove this check
print "Cannot skip lumis for CMSSW 2_x"
else:
lumiRanges = lumis.split(',')
inModule.setLumisToProcess(*lumiRanges)
# Pythia parameters
if (firstRun):
inModule.setFirstRun(firstRun)
if (firstLumi):
inModule.setFirstLumi(firstLumi)
# Check if there are random #'s to deal with
if cfg.data.services.has_key('RandomNumberGeneratorService'):
print "RandomNumberGeneratorService found, will attempt to change seeds"
from IOMC.RandomEngine.RandomServiceHelper import RandomNumberServiceHelper
ranGenerator = cfg.data.services['RandomNumberGeneratorService']
randSvc = RandomNumberServiceHelper(ranGenerator)
incrementSeedList = []
preserveSeedList = []
if incrementSeeds:
incrementSeedList = incrementSeeds.split(',')
if preserveSeeds:
preserveSeedList = preserveSeeds.split(',')
# Increment requested seed sets
for seedName in incrementSeedList:
curSeeds = randSvc.getNamedSeed(seedName)
newSeeds = [x+nJob for x in curSeeds]
randSvc.setNamedSeed(seedName, *newSeeds)
preserveSeedList.append(seedName)
# Randomize remaining seeds
randSvc.populate(*preserveSeedList)
# Write out new config file
outFile = open(outFileName,"w")
outFile.write("import FWCore.ParameterSet.Config as cms\n")
outFile.write("import pickle\n")
outFile.write("pickledCfg=\"\"\"%s\"\"\"\n" % pickle.dumps(cmsProcess))
outFile.write("process = pickle.loads(pickledCfg)\n")
outFile.close()
if (debug):
print "writeCfg output (May not be exact):"
print "import FWCore.ParameterSet.Config as cms"
print cmsProcess.dumpPython()
if __name__ == '__main__' :
exit_status = main(sys.argv[1:])
sys.exit(exit_status)
| cpausmit/Kraken | filefi/024/writeCfg.py | Python | mit | 6,916 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Setting sections used for qutebrowser."""
import collections
from qutebrowser.config import value as confvalue
class Section:
"""Base class for KeyValue/ValueList sections.
Attributes:
_readonly: Whether this section is read-only.
values: An OrderedDict with key as index and value as value.
key: string
value: SettingValue
descriptions: A dict with the description strings for the keys.
"""
def __init__(self):
self.values = None
self.descriptions = {}
self._readonly = False
def __getitem__(self, key):
"""Get the value for key.
Args:
key: The key to get a value for, as a string.
Return:
The value, as value class.
"""
return self.values[key]
def __iter__(self):
"""Iterate over all set values."""
return self.values.__iter__()
def __bool__(self):
"""Get boolean state of section."""
return bool(self.values)
def __contains__(self, key):
"""Return whether the section contains a given key."""
return key in self.values
def items(self):
"""Get dict items."""
return self.values.items()
def keys(self):
"""Get value keys."""
return self.values.keys()
def delete(self, key):
"""Delete item with given key."""
del self.values[key]
def setv(self, layer, key, value, interpolated):
"""Set the value on a layer.
Args:
layer: The layer to set the value on, an element name of the
ValueLayers dict.
key: The key of the element to set.
value: The value to set.
interpolated: The interpolated value, for checking, or None.
"""
raise NotImplementedError
def dump_userconfig(self):
"""Dump the part of the config which was changed by the user.
Return:
A list of (key, valuestr) tuples.
"""
raise NotImplementedError
class KeyValue(Section):
"""Representation of a section with ordinary key-value mappings.
This is a section which contains normal "key = value" pairs with a fixed
set of keys.
"""
def __init__(self, *defaults, readonly=False):
"""Constructor.
Args:
*defaults: A (key, value, description) list of defaults.
readonly: Whether this config is readonly.
"""
super().__init__()
self._readonly = readonly
if not defaults:
return
self.values = collections.OrderedDict()
for (k, v, desc) in defaults:
assert k not in self.values, k
self.values[k] = v
self.descriptions[k] = desc
def setv(self, layer, key, value, interpolated):
if self._readonly:
raise ValueError("Trying to modify a read-only config!")
self.values[key].setv(layer, value, interpolated)
def dump_userconfig(self):
changed = []
for k, v in self.items():
vals = v.values
if vals['temp'] is not None and vals['temp'] != vals['default']:
changed.append((k, vals['temp']))
elif vals['conf'] is not None and vals['conf'] != vals['default']:
changed.append((k, vals['conf']))
return changed
class ValueList(Section):
"""This class represents a section with a list key-value settings.
These are settings inside sections which don't have fixed keys, but instead
have a dynamic list of "key = value" pairs, like key bindings or
searchengines.
They basically consist of two different SettingValues.
Attributes:
layers: An OrderedDict of the config layers.
keytype: The type to use for the key (only used for validating)
valtype: The type to use for the value.
_ordered_value_cache: A ChainMap-like OrderedDict of all values.
_readonly: Whether this section is read-only.
"""
def __init__(self, keytype, valtype, *defaults, readonly=False):
"""Wrap types over default values. Take care when overriding this.
Args:
keytype: The type instance to be used for keys.
valtype: The type instance to be used for values.
*defaults: A (key, value) list of default values.
readonly: Whether this config is readonly.
"""
super().__init__()
self._readonly = readonly
self._ordered_value_cache = None
self.keytype = keytype
self.valtype = valtype
self.layers = collections.OrderedDict([
('default', collections.OrderedDict()),
('conf', collections.OrderedDict()),
('temp', collections.OrderedDict()),
])
defaultlayer = self.layers['default']
for key, value in defaults:
assert key not in defaultlayer, key
defaultlayer[key] = confvalue.SettingValue(valtype, value)
self.values = collections.ChainMap(
self.layers['temp'], self.layers['conf'], self.layers['default'])
def _ordered_values(self):
"""Get ordered values in layers.
This is more expensive than the ChainMap, but we need this for
iterating/items/etc. when order matters.
"""
if self._ordered_value_cache is None:
self._ordered_value_cache = collections.OrderedDict()
for layer in self.layers.values():
self._ordered_value_cache.update(layer)
return self._ordered_value_cache
def setv(self, layer, key, value, interpolated):
if self._readonly:
raise ValueError("Trying to modify a read-only config!")
self.keytype.validate(key)
if key in self.layers[layer]:
self.layers[layer][key].setv(layer, value, interpolated)
else:
val = confvalue.SettingValue(self.valtype)
val.setv(layer, value, interpolated)
self.layers[layer][key] = val
self._ordered_value_cache = None
def dump_userconfig(self):
changed = []
mapping = collections.ChainMap(self.layers['temp'],
self.layers['conf'])
for k, v in mapping.items():
try:
if v.value() != self.layers['default'][k].value():
changed.append((k, v.value()))
except KeyError:
changed.append((k, v.value()))
return changed
def __iter__(self):
"""Iterate over all set values."""
return self._ordered_values().__iter__()
def items(self):
"""Get dict items."""
return self._ordered_values().items()
def keys(self):
"""Get value keys."""
return self._ordered_values().keys()
| Konubinix/qutebrowser | qutebrowser/config/sections.py | Python | gpl-3.0 | 7,668 |
"""Tests for the loqusdb executable extension"""
import subprocess
import pytest
from scout.exceptions.config import ConfigError
from scout.server.app import create_app
from scout.server.extensions import LoqusDB, loqus_extension, loqusdb
def test_set_coordinates_no_variant_type():
"""Test update coordinates when nothing to update"""
# GIVEN a variant with some coordinates
pos = 10
end = 10
length = 1
var = {
"_id": "1_10_A_C",
"pos": pos,
"end": end,
"length": length,
}
# WHEN setting the coordinates
LoqusDB.set_coordinates(var)
# THEN assert that nothing changed
assert var["pos"] == pos
assert var["end"] == end
assert var["length"] == length
def test_set_coordinates_ins():
"""Test update coordinates when hidden insertion"""
# GIVEN a variant with some coordinates
pos = 10
end = 10
length = 10
var_type = "INS"
var = {
"_id": "1_10_INS",
"pos": pos,
"end": end,
"length": length,
"variant_type": var_type,
}
# WHEN setting the coordinates
LoqusDB.set_coordinates(var)
# THEN assert that end coordinate is updated
assert var["pos"] == pos
assert var["end"] == end + length
assert var["length"] == length
def test_set_coordinates_unknown_ins():
"""Test update coordinates when insertion length is unknown"""
# GIVEN a variant with some coordinates
pos = 10
end = 10
length = -1
var_type = "INS"
var = {
"_id": "1_10_INS",
"pos": pos,
"end": end,
"length": length,
"variant_type": var_type,
}
# WHEN setting the coordinates
LoqusDB.set_coordinates(var)
# THEN assert that end coordinate is updated
assert var["pos"] == pos
assert var["end"] == end
assert var["length"] == length
def test_get_bin_path_wrong_instance(loqus_exe_app):
"""Test get_bin_path when the instance is not available"""
# When the get_bin_path function is invoked for a non-existing loqus instance
with loqus_exe_app.app_context():
# THEN it should raise ConfigError
with pytest.raises(ConfigError):
assert loqusdb.get_bin_path(loqusdb_id="FOO")
def test_get_config_path_wrong_instance(loqus_exe_app):
"""Test get_config_path when the instance is not available"""
# When the get_config_path function is invoked for a non-existing loqus instance
with loqus_exe_app.app_context():
# THEN it should raise ConfigError
with pytest.raises(ConfigError):
assert loqusdb.get_config_path(loqusdb_id="FOO")
def test_get_exec_loqus_version_CalledProcessError(loqus_exe_app, monkeypatch):
"""Test the error triggered when retrieving the version of a LoqusDB instance not properly configured"""
# GIVEN a mocked subprocess that gives error
def mocksubprocess(*args, **kwargs):
raise subprocess.CalledProcessError(None, None)
monkeypatch.setattr(subprocess, "check_output", mocksubprocess)
# When the get_exec_loqus_version function is invoked
with loqus_exe_app.app_context():
# It will trigger the same error and return -1
assert loqusdb.get_exec_loqus_version("default") == "-1.0"
def test_loqusdb_exe_variant(loqus_exe_app, monkeypatch, loqus_exe_variant):
"""Test to fetch a variant from loqusdb executable instance"""
# GIVEN a mocked subprocess command
def mockcommand(*args):
return loqus_exe_variant
monkeypatch.setattr(loqus_extension, "execute_command", mockcommand)
with loqus_exe_app.app_context():
# WHEN fetching the variant info
var_info = loqusdb.get_variant({"_id": "a variant"})
# THEN assert the info was parsed correct
assert var_info["total"] == 3
def test_loqusdb_exe_variant_CalledProcessError(loqus_exe_app, monkeypatch):
"""Test fetching a variant from loqusdb executable that raises an exception"""
# GIVEN replacing subprocess.check_output to raise CalledProcessError
def mockcommand(*args):
raise subprocess.CalledProcessError(123, "case_count")
monkeypatch.setattr(loqus_extension, "execute_command", mockcommand)
with loqus_exe_app.app_context():
# THEN raised exception is caught and an empty dict is returned
assert {} == loqusdb.get_variant({"_id": "a variant"})
def test_loqusdb_exe_cases(loqus_exe_app, monkeypatch):
"""Test the case count function in loqus executable extension"""
nr_cases = 15
# GIVEN a return value from loqusdb using a mocker
def mockcommand(*args):
return_value = b"%d" % nr_cases
return return_value
monkeypatch.setattr(loqus_extension, "execute_command", mockcommand)
with loqus_exe_app.app_context():
# WHEN fetching the number of cases
res = loqusdb.case_count(variant_category="snv")
# THEN assert the output is parsed correct
assert res == nr_cases
def test_loqusdb_exe_cases_ValueError(loqus_exe_app, monkeypatch):
"""Test the case count function in loqus extension"""
# GIVEN a return value from loqusdb which is not an int
def mockcommand(*args):
return "nonsense"
monkeypatch.setattr(loqus_extension, "execute_command", mockcommand)
with loqus_exe_app.app_context():
# THEN assert a value error is raised, but passed, and 0 is returned
assert loqusdb.case_count(variant_category="snv") == 0
def test_loqusdb_exe_case_count_CalledProcessError(loqus_exe_app, monkeypatch):
"""Test the case count function in loqus extension that raises an exception"""
# GIVEN replacing subprocess.check_output to raise CalledProcessError
def mockcommand(*args):
raise subprocess.CalledProcessError(123, "case_count")
monkeypatch.setattr(loqus_extension, "execute_command", mockcommand)
with loqus_exe_app.app_context():
# THEN assert exception is caught and the value 0 is returned
assert 0 == loqusdb.case_count(variant_category="snv")
def test_init_app_loqus_list(monkeypatch, loqus_exe, loqus_config):
"""Test creating a Loqus extension from a list of config params"""
# GIVEN a mocked loqus exe instance returning a supported loqus version
def mockcommand(*args):
return "2.5"
monkeypatch.setattr(loqus_extension, "execute_command", mockcommand)
# The app shold be created by providing LoqusDB params as a list
app = create_app(
config=dict(LOQUSDB_SETTINGS=[{"binary_path": loqus_exe, "loqusdb_config": loqus_config}])
)
assert app
def test_init_app_loqus_dict(monkeypatch, loqus_exe, loqus_config):
"""Test creating a Loqus extension from dictionary settings"""
# GIVEN a mocked loqus exe instance returning a supported loqus version
def mockcommand(*args):
return "2.5"
monkeypatch.setattr(loqus_extension, "execute_command", mockcommand)
# The app shold be created by providing LoqusDB params as a dictionary
app = create_app(
config=dict(
LOQUSDB_SETTINGS={
"id": "default",
"binary_path": loqus_exe,
"loqusdb_config": loqus_config,
}
)
)
assert app
def test_loqusdb_settings_list_to_dict():
"""Test handling of deprecated settings: list of settings becomes a dictionary with instance ids as keys"""
cfg_1 = {"id": "default", "binary_path": "test_binary"}
cfg_2 = {"id": "test_exe", "api_url": "test_url"}
list_cfg = [cfg_1, cfg_2]
# GIVEN a LoqusDB extensions instantiated with deprecated param (list)
loqusdb.settings_list_to_dict(list_cfg)
# The instance should have settings as dict, with as many key/values as the elements of the initial list
assert len(loqusdb.loqusdb_settings.keys()) == 2
assert cfg_1["id"] in loqusdb.loqusdb_settings
assert cfg_2["id"] in loqusdb.loqusdb_settings
| Clinical-Genomics/scout | tests/server/extensions/test_loqusdb_exe_extension.py | Python | bsd-3-clause | 7,957 |
ADMIN_MEDIA_PREFIX = '/static/admin/'
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'django.contrib.flatpages',
'categories',
'categories.editor',
'mptt',
'simpletext',
# 'south',
)
| gavinhodge/django-categories | example/settings13.py | Python | apache-2.0 | 363 |
# -*- coding: utf-8 -*-
# ###
# Copyright (c) 2013, Rice University
# This software is subject to the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
# ###
import os
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
__name__ = 'cnxpublishing'
def find_migrations_directory(): # pragma: no cover
"""Finds and returns the location of the database migrations directory.
This function is used from a setuptools entry-point for db-migrator.
"""
here = os.path.abspath(os.path.dirname(__file__))
return os.path.join(here, 'sql/migrations')
def make_wsgi_app(global_config, **settings): # pragma: no cover
"""Application factory"""
from .config import configure
return configure(settings).make_wsgi_app()
| Connexions/cnx-publishing | cnxpublishing/main.py | Python | agpl-3.0 | 838 |
# Generated by Django 2.2.17 on 2021-01-08 14:30
import django.contrib.auth.models
from django.db import migrations, models
def update_is_staff(apps, schema_editor):
user_model = apps.get_model("users", "User")
user_model.objects.filter(is_superuser=True).update(is_staff=True)
def empty_reverse(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('users', '0030_auto_20201119_1031'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.AddField(
model_name='user',
name='is_staff',
field=models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status'),
),
migrations.RunPython(update_is_staff, empty_reverse)
]
| taigaio/taiga-back | taiga/users/migrations/0031_auto_20210108_1430.py | Python | agpl-3.0 | 980 |
import pytest
from sqlcomplete.parseutils.tables import extract_tables
from sqlcomplete.parseutils.utils import find_prev_keyword, is_open_quote
def test_empty_string():
tables = extract_tables('')
assert tables == ()
def test_simple_select_single_table():
tables = extract_tables('select * from abc')
assert tables == ((None, 'abc', None, False),)
@pytest.mark.parametrize('sql', [
'select * from "abc"."def"',
'select * from abc."def"',
])
def test_simple_select_single_table_schema_qualified_quoted_table(sql):
tables = extract_tables(sql)
assert tables == (('abc', 'def', '"def"', False),)
@pytest.mark.parametrize('sql', [
'select * from abc.def',
'select * from "abc".def',
])
def test_simple_select_single_table_schema_qualified(sql):
tables = extract_tables(sql)
assert tables == (('abc', 'def', None, False),)
def test_simple_select_single_table_double_quoted():
tables = extract_tables('select * from "Abc"')
assert tables == ((None, 'Abc', None, False),)
def test_simple_select_multiple_tables():
tables = extract_tables('select * from abc, def')
assert set(tables) == set([(None, 'abc', None, False),
(None, 'def', None, False)])
def test_simple_select_multiple_tables_double_quoted():
tables = extract_tables('select * from "Abc", "Def"')
assert set(tables) == set([(None, 'Abc', None, False),
(None, 'Def', None, False)])
def test_simple_select_single_table_deouble_quoted_aliased():
tables = extract_tables('select * from "Abc" a')
assert tables == ((None, 'Abc', 'a', False),)
def test_simple_select_multiple_tables_deouble_quoted_aliased():
tables = extract_tables('select * from "Abc" a, "Def" d')
assert set(tables) == set([(None, 'Abc', 'a', False),
(None, 'Def', 'd', False)])
def test_simple_select_multiple_tables_schema_qualified():
tables = extract_tables('select * from abc.def, ghi.jkl')
assert set(tables) == set([('abc', 'def', None, False),
('ghi', 'jkl', None, False)])
def test_simple_select_with_cols_single_table():
tables = extract_tables('select a,b from abc')
assert tables == ((None, 'abc', None, False),)
def test_simple_select_with_cols_single_table_schema_qualified():
tables = extract_tables('select a,b from abc.def')
assert tables == (('abc', 'def', None, False),)
def test_simple_select_with_cols_multiple_tables():
tables = extract_tables('select a,b from abc, def')
assert set(tables) == set([(None, 'abc', None, False),
(None, 'def', None, False)])
def test_simple_select_with_cols_multiple_qualified_tables():
tables = extract_tables('select a,b from abc.def, def.ghi')
assert set(tables) == set([('abc', 'def', None, False),
('def', 'ghi', None, False)])
def test_select_with_hanging_comma_single_table():
tables = extract_tables('select a, from abc')
assert tables == ((None, 'abc', None, False),)
def test_select_with_hanging_comma_multiple_tables():
tables = extract_tables('select a, from abc, def')
assert set(tables) == set([(None, 'abc', None, False),
(None, 'def', None, False)])
def test_select_with_hanging_period_multiple_tables():
tables = extract_tables('SELECT t1. FROM tabl1 t1, tabl2 t2')
assert set(tables) == set([(None, 'tabl1', 't1', False),
(None, 'tabl2', 't2', False)])
def test_simple_insert_single_table():
tables = extract_tables('insert into abc (id, name) values (1, "def")')
# sqlparse mistakenly assigns an alias to the table
# AND mistakenly identifies the field list as
# assert tables == ((None, 'abc', 'abc', False),)
assert tables == ((None, 'abc', 'abc', False),)
@pytest.mark.xfail
def test_simple_insert_single_table_schema_qualified():
tables = extract_tables('insert into abc.def (id, name) values (1, "def")')
assert tables == (('abc', 'def', None, False),)
def test_simple_update_table_no_schema():
tables = extract_tables('update abc set id = 1')
assert tables == ((None, 'abc', None, False),)
def test_simple_update_table_with_schema():
tables = extract_tables('update abc.def set id = 1')
assert tables == (('abc', 'def', None, False),)
@pytest.mark.parametrize('join_type', ['', 'INNER', 'LEFT', 'RIGHT OUTER'])
def test_join_table(join_type):
sql = 'SELECT * FROM abc a {0} JOIN def d ON a.id = d.num'.format(join_type)
tables = extract_tables(sql)
assert set(tables) == set([(None, 'abc', 'a', False),
(None, 'def', 'd', False)])
def test_join_table_schema_qualified():
tables = extract_tables('SELECT * FROM abc.def x JOIN ghi.jkl y ON x.id = y.num')
assert set(tables) == set([('abc', 'def', 'x', False),
('ghi', 'jkl', 'y', False)])
def test_incomplete_join_clause():
sql = '''select a.x, b.y
from abc a join bcd b
on a.id = '''
tables = extract_tables(sql)
assert tables == ((None, 'abc', 'a', False),
(None, 'bcd', 'b', False))
def test_join_as_table():
tables = extract_tables('SELECT * FROM my_table AS m WHERE m.a > 5')
assert tables == ((None, 'my_table', 'm', False),)
def test_multiple_joins():
sql = '''select * from t1
inner join t2 ON
t1.id = t2.t1_id
inner join t3 ON
t2.id = t3.'''
tables = extract_tables(sql)
assert tables == (
(None, 't1', None, False),
(None, 't2', None, False),
(None, 't3', None, False))
def test_subselect_tables():
sql = 'SELECT * FROM (SELECT FROM abc'
tables = extract_tables(sql)
assert tables == ((None, 'abc', None, False),)
@pytest.mark.parametrize('text', ['SELECT * FROM foo.', 'SELECT 123 AS foo'])
def test_extract_no_tables(text):
tables = extract_tables(text)
assert tables == tuple()
@pytest.mark.parametrize('arg_list', ['', 'arg1', 'arg1, arg2, arg3'])
def test_simple_function_as_table(arg_list):
tables = extract_tables('SELECT * FROM foo({0})'.format(arg_list))
assert tables == ((None, 'foo', None, True),)
@pytest.mark.parametrize('arg_list', ['', 'arg1', 'arg1, arg2, arg3'])
def test_simple_schema_qualified_function_as_table(arg_list):
tables = extract_tables('SELECT * FROM foo.bar({0})'.format(arg_list))
assert tables == (('foo', 'bar', None, True),)
@pytest.mark.parametrize('arg_list', ['', 'arg1', 'arg1, arg2, arg3'])
def test_simple_aliased_function_as_table(arg_list):
tables = extract_tables('SELECT * FROM foo({0}) bar'.format(arg_list))
assert tables == ((None, 'foo', 'bar', True),)
def test_simple_table_and_function():
tables = extract_tables('SELECT * FROM foo JOIN bar()')
assert set(tables) == set([(None, 'foo', None, False),
(None, 'bar', None, True)])
def test_complex_table_and_function():
tables = extract_tables('''SELECT * FROM foo.bar baz
JOIN bar.qux(x, y, z) quux''')
assert set(tables) == set([('foo', 'bar', 'baz', False),
('bar', 'qux', 'quux', True)])
def test_find_prev_keyword_using():
q = 'select * from tbl1 inner join tbl2 using (col1, '
kw, q2 = find_prev_keyword(q)
assert kw.value == '(' and q2 == 'select * from tbl1 inner join tbl2 using ('
@pytest.mark.parametrize('sql', [
'select * from foo where bar',
'select * from foo where bar = 1 and baz or ',
'select * from foo where bar = 1 and baz between qux and ',
])
def test_find_prev_keyword_where(sql):
kw, stripped = find_prev_keyword(sql)
assert kw.value == 'where' and stripped == 'select * from foo where'
@pytest.mark.parametrize('sql', [
'create table foo (bar int, baz ',
'select * from foo() as bar (baz '
])
def test_find_prev_keyword_open_parens(sql):
kw, _ = find_prev_keyword(sql)
assert kw.value == '('
@pytest.mark.parametrize('sql', [
'',
'$$ foo $$',
"$$ 'foo' $$",
'$$ "foo" $$',
'$$ $a$ $$',
'$a$ $$ $a$',
'foo bar $$ baz $$',
])
def test_is_open_quote__closed(sql):
assert not is_open_quote(sql)
@pytest.mark.parametrize('sql', [
'$$',
';;;$$',
'foo $$ bar $$; foo $$',
'$$ foo $a$',
"foo 'bar baz",
"$a$ foo ",
'$$ "foo" ',
'$$ $a$ ',
'foo bar $$ baz',
])
def test_is_open_quote__open(sql):
assert is_open_quote(sql)
| dbcli/sqlcomplete | tests/parseutils/test_parseutils.py | Python | bsd-3-clause | 8,627 |
# -*- coding: utf-8 -*-
# pytils - russian-specific string utils
# Copyright (C) 2006-2009 Yury Yurevich
#
# http://pyobject.ru/projects/pytils/
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, version 2
# of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
"""
Helpers for templatetags' unit tests in Django webframework
"""
from django.conf import settings
encoding = 'utf-8'
settings.configure(
TEMPLATE_DIRS=(),
TEMPLATE_CONTEXT_PROCESSORS=(),
TEMPLATE_LOADERS=(),
INSTALLED_APPS=('pytils',),
DEFAULT_CHARSET=encoding,
)
from django import template
from django.template import loader
from pytils.templatetags import pseudo_str
import unittest
def pstr(ustr):
"""
Provide/Pseudo unicode
"""
return pseudo_str(ustr, encoding, None)
class TemplateTagTestCase(unittest.TestCase):
"""
TestCase for testing template tags and filters
"""
def check_template_tag(self, template_name, template_string, context, result_string):
"""
Method validates output of template tag or filter
@param template_name: name of template
@type template_name: C{str}
@param template_string: contents of template
@type template_string: C{str} or C{unicode}
@param context: rendering context
@type context: C{dict}
@param result_string: reference output
@type result_string: C{str} or C{unicode}
"""
def test_template_loader(template_name, template_dirs=None):
return pstr(template_string), template_name
loader.template_source_loaders = [test_template_loader,]
output = loader.get_template(template_name).render(template.Context(context))
self.assertEquals(output, pstr(result_string))
| mosquito/TelePY | pytils/test/templatetags/helpers.py | Python | gpl-3.0 | 2,137 |
import six
# -*- test-case-name: axiom.test.test_slotmachine -*-
import six
hyper = super
_NOSLOT = object()
class Allowed(object):
"""
An attribute that's allowed to be set.
"""
def __init__(self, name, default=_NOSLOT):
self.name = name
self.default = default
def __get__(self, oself, otype=None):
if otype is not None and oself is None:
return self
if self.name in oself.__dict__:
return oself.__dict__[self.name]
if self.default is not _NOSLOT:
return self.default
raise AttributeError("%r object did not have attribute %r" %(oself.__class__.__name__, self.name))
def __delete__(self, oself):
if self.name not in oself.__dict__:
# Returning rather than raising here because that's what
# member_descriptor does, and Axiom relies upon that behavior.
## raise AttributeError('%r object has no attribute %r' %
## (oself.__class__.__name__, self.name))
return
del oself.__dict__[self.name]
def __set__(self, oself, value):
oself.__dict__[self.name] = value
class _SlotMetaMachine(type):
def __new__(meta, name, bases, dictionary):
dictionary['__name__'] = name
slots = list(meta.determineSchema(dictionary))
for slot in slots:
for base in bases:
defval = getattr(base, slot, _NOSLOT)
if defval is not _NOSLOT:
break
dictionary[slot] = Allowed(slot, defval)
nt = type.__new__(meta, name, bases, dictionary)
return nt
def determineSchema(meta, dictionary):
return dictionary.get("slots", [])
determineSchema = classmethod(determineSchema)
class DescriptorWithDefault(object):
def __init__(self, default, original):
self.original = original
self.default = default
def __get__(self, oself, type=None):
if type is not None:
if oself is None:
return self.default
return getattr(oself, self.original, self.default)
def __set__(self, oself, value):
setattr(oself, self.original, value)
def __delete__(self, oself):
delattr(oself, self.original)
class Attribute(object):
def __init__(self, doc=''):
self.doc = doc
def requiredSlots(self, modname, classname, attrname):
self.name = attrname
yield attrname
def __get__(self, oself, type=None):
assert oself is None, "{}: should be masked".format(self.name)
return self
_RAISE = object()
class SetOnce(Attribute):
def __init__(self, doc='', default=_RAISE):
Attribute.__init__(self)
if default is _RAISE:
self.default = ()
else:
self.default = (default,)
def requiredSlots(self, modname, classname, attrname):
self.name = attrname
t = self.trueattr = ('_' + self.name)
yield t
def __set__(self, iself, value):
if not hasattr(iself, self.trueattr):
setattr(iself, self.trueattr, value)
else:
raise AttributeError('{}.{} may only be set once'.format(
type(iself).__name__, self.name))
def __get__(self, iself, type=None):
if type is not None and iself is None:
return self
return getattr(iself, self.trueattr, *self.default)
class SchemaMetaMachine(_SlotMetaMachine):
def determineSchema(meta, dictionary):
attrs = dictionary['__attributes__'] = []
name = dictionary['__name__']
moduleName = dictionary['__module__']
dictitems = list(dictionary.items())
dictitems.sort()
for k, v in dictitems:
if isinstance(v, Attribute):
attrs.append((k, v))
for slot in v.requiredSlots(moduleName, name, k):
if slot == k:
del dictionary[k]
yield slot
determineSchema = classmethod(determineSchema)
class _Strict(object):
"""
I disallow all attributes from being set that do not have an explicit
data descriptor.
"""
def __setattr__(self, name, value):
"""
Like PyObject_GenericSetAttr, but call descriptors only.
"""
try:
allowed = type(self).__dict__['_Strict__setattr__allowed']
except KeyError:
allowed = type(self)._Strict__setattr__allowed = {}
for cls in type(self).__mro__:
for attrName, slot in six.iteritems(cls.__dict__):
if attrName in allowed:
# It was found earlier in the mro, overriding
# whatever this is. Ignore it and move on.
continue
setter = getattr(slot, '__set__', _NOSLOT)
if setter is not _NOSLOT:
# It is a data descriptor, so remember the setter
# for it in the cache.
allowed[attrName] = setter
else:
# It is something else, so remember None for it in
# the cache to indicate it cannot have its value
# set.
allowed[attrName] = None
try:
setter = allowed[name]
except KeyError:
pass
else:
if setter is not None:
setter(self, value)
return
# It wasn't found in the setter cache or it was found to be None,
# indicating a non-data descriptor which cannot be set.
raise AttributeError(
"{!r} can't set attribute {!r}".format(self.__class__.__name__, name))
class SchemaMachine(six.with_metaclass(SchemaMetaMachine, _Strict)):
pass
class SlotMachine(six.with_metaclass(_SlotMetaMachine, _Strict)):
pass
| twisted/axiom | axiom/slotmachine.py | Python | mit | 5,990 |
#!/usr/bin/env python3
from setuptools import setup
setup(
name = 'MeanRecipes',
packages = ['meanrecipes'],
install_requires = [
'flask',
'requests',
'beautifulsoup4',
]
)
| kkelk/MeanRecipes | setup.py | Python | bsd-2-clause | 242 |
import nengo
import nengo_pushbot
import numpy as np
import tag
motor_speed_factor = -0.2 * tag.get_dir()
sheep_motor_speed_factor=0.5 * tag.get_dir()
# how fast can the bot go as a maximum in each direction
input_factor=0.9
model = nengo.Network(label='pushbot')
with model:
l1 = nengo.Ensemble(100, dimensions=1, label='l1')
r1 = nengo.Ensemble(100, dimensions=1, label='r1')
# speed cannot go faster than one - want it to be forward and right and it goes double the speed.
combo1 = nengo.Ensemble(200, dimensions=2, label='combo1', radius = 1.4)
control = nengo.Ensemble(200, dimensions=2, label='control', radius = 1.4)
#connect to populations that control the motors
l2 = nengo.Ensemble(100, dimensions=1, label='l2')
r2 = nengo.Ensemble(100, dimensions=1, label='r2')
# replica of what is sent to the motor so that we can see the behavior and allows plotting - this is also what we are probing
bot1 = nengo_pushbot.PushBotNetwork(tag.get_addr())
#bot1.laser(tag.get_self_freq())
bot1.led(tag.get_self_freq())
#bot1.laser(0)
#bot1.track_freqs([200, 300])
bot1.track_freqs([tag.get_good_freq(), tag.get_bad_freq()], certainty_scale=10000)
half_size = 64.0
y_limit = list()
y_limit.append(0.0)
y_limit.append(35.0)
y_limit.append(40.0)
y_limit.append(115.0)
y_limit.append(127.0)
x_limit=list()
x_limit.append(0.0)
x_limit.append(18.0)
x_limit.append(36.0)
x_limit.append(54.0)
x_limit.append(73.0)
x_limit.append(91.0)
x_limit.append(109.0)
x_limit.append(127.0)
# 3 dimensions are x y and a confidence level for how sure there is something there.
pos0 = nengo.Ensemble(100, 3, label='pos_good')
# only want 2 of the dimensions - removing the 3rd component for tracking.
nengo.Connection(bot1.tracker_0, pos0)
pos1 = nengo.Ensemble(100, 3, label='pos_bad')
nengo.Connection(bot1.tracker_1, pos1)
select = nengo.Ensemble(600,6, label='select')
nengo.Connection(pos0, select[0:3])
nengo.Connection(pos1, select[3:6])
def normalize_coord(x):
return (x-half_size)/half_size
# divides the vision field into 9 positions - says that if the interest area is not in the middle then look up or down and turn either left / right to put the stimuli in the middle. This also allows distance to be maintained.
def orient(x):
"""
if x[0] < normalize_coord(y_limit[1]): #forward/backward
y_ret = 1
elif x[0] >= normalize_coord(y_limit[1]) and x[0] < normalize_coord(y_limit[2]):
y_ret = 0
else:
y_ret = -1
"""
if (x[2] > x[5] and x[2] > 0.2):
y_ret = 1
if x[1] < normalize_coord(x_limit[3]): #rotate left/right
x_ret = 0.07
elif x[1] >= normalize_coord(x_limit[3]) and x[1] <= normalize_coord(x_limit[4]):
x_ret = 0
else:
x_ret = -0.12
else:
x_ret = 0.05
y_ret = 0
return [y_ret, x_ret]
#pos1 = nengo.Ensemble(100, 2, label='pos1')
#nengo.Connection(bot1.tracker_1, pos1)
# this is what controls the robot - from control to position through the function orient - transform is just a one to one mapping of a 2D matrix.
nengo.Connection(select, control, function=orient, transform=[[1,0],[0,1]], synapse=0.002)
# duplication so that both robots are controlled - one with the keyboard and the other with the stimuli.
nengo.Connection(control, l1, transform=[[input_factor, 0]])
nengo.Connection(control, r1, transform=[[input_factor, 0]])
nengo.Connection(control, l1, transform=[[0, input_factor]])
nengo.Connection(control, r1, transform=[[0, -input_factor]])
nengo.Connection(l1, bot1.motor, synapse=0.002, transform=[[sheep_motor_speed_factor], [0]])
nengo.Connection(r1, bot1.motor, synapse=0.002, transform=[[0], [sheep_motor_speed_factor]])
nengo.Connection(l1, combo1, synapse=0.002, transform=[[sheep_motor_speed_factor], [0]])
nengo.Connection(r1, combo1, synapse=0.002, transform=[[0], [sheep_motor_speed_factor]])
if __name__ == '__main__':
sim = nengo.Simulator(model)
while True:
sim.run(5000) | tcstewar/telluride2014 | tag/tag_sheep.py | Python | gpl-3.0 | 4,266 |
from datetime import datetime
from status import is_status_down, get_last_status_update
import os
from google.appengine.ext.webapp import template
import version
def convert_time(s):
'''convert string time into datetime struct'''
d = datetime.strptime(s,"%H:%M")
return d
def split_weeks(weeks):
'''split string containing weeks info into separated fields
e.g. "1,5-7" ---> [1,5,6,7]'''
s = weeks.split(',')
w = []
for f in s:
sf = f.split('-')
if len(sf)>1:
w.extend(range(int(sf[0]),int(sf[1])+1))
else:
w.append(int(f))
return w
def is_course_mnemo_valid(course_mnemo):
return course_mnemo.isalnum() and len(course_mnemo) < 32
def render_course_notfound_page(request_handler, mnemo, resource_type):
template_values = {'gehol_is_down': is_status_down(),
'last_status_update': get_last_status_update(),
'mnemo':mnemo,
'resource_type':resource_type,
'version':version.VERSION
}
path = os.path.join(os.path.dirname(__file__), 'templates/course_notfound.html')
request_handler.response.out.write(template.render(path, template_values))
def render_deadline_exceeded_page(request_handler):
template_values = {'gehol_is_down': is_status_down(),
'last_status_update': get_last_status_update(),
'version':version.VERSION
}
path = os.path.join(os.path.dirname(__file__), 'templates/deadline_exceeded.html')
request_handler.response.out.write(template.render(path, template_values)) | sevas/geholimportapp | utils.py | Python | mit | 1,642 |
from django.contrib import admin
# Register your models here.
from datatable.models import Serveur
# Register your models here.
class ServeurAdmin(admin.ModelAdmin):
list_display = ('In_Type', 'In_Nom', 'In_IP', 'statut')
list_filter = ('In_Type', 'In_Nom', 'In_IP', 'statut')
search_fields = ['In_Type', 'In_Nom', 'In_IP' ]
admin.site.register(Serveur, ServeurAdmin)
| chrislyon/dj_ds1 | datatable/admin.py | Python | gpl-2.0 | 375 |
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests import unittest
from twisted.internet import defer
from mock import Mock
from tests.utils import (
MockHttpResource, DeferredMockCallable, setup_test_homeserver
)
from synapse.api.filtering import Filter
from synapse.events import FrozenEvent
user_localpart = "test_user"
def MockEvent(**kwargs):
if "event_id" not in kwargs:
kwargs["event_id"] = "fake_event_id"
if "type" not in kwargs:
kwargs["type"] = "fake_type"
return FrozenEvent(kwargs)
class FilteringTestCase(unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
self.mock_federation_resource = MockHttpResource()
self.mock_http_client = Mock(spec=[])
self.mock_http_client.put_json = DeferredMockCallable()
hs = yield setup_test_homeserver(
handlers=None,
http_client=self.mock_http_client,
keyring=Mock(),
)
self.filtering = hs.get_filtering()
self.datastore = hs.get_datastore()
def test_definition_types_works_with_literals(self):
definition = {
"types": ["m.room.message", "org.matrix.foo.bar"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!foo:bar"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_types_works_with_wildcards(self):
definition = {
"types": ["m.*", "org.matrix.foo.bar"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!foo:bar"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_types_works_with_unknowns(self):
definition = {
"types": ["m.room.message", "org.matrix.foo.bar"]
}
event = MockEvent(
sender="@foo:bar",
type="now.for.something.completely.different",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_types_works_with_literals(self):
definition = {
"not_types": ["m.room.message", "org.matrix.foo.bar"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_types_works_with_wildcards(self):
definition = {
"not_types": ["m.room.message", "org.matrix.*"]
}
event = MockEvent(
sender="@foo:bar",
type="org.matrix.custom.event",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_types_works_with_unknowns(self):
definition = {
"not_types": ["m.*", "org.*"]
}
event = MockEvent(
sender="@foo:bar",
type="com.nom.nom.nom",
room_id="!foo:bar"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_not_types_takes_priority_over_types(self):
definition = {
"not_types": ["m.*", "org.*"],
"types": ["m.room.message", "m.room.topic"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.topic",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_senders_works_with_literals(self):
definition = {
"senders": ["@flibble:wibble"]
}
event = MockEvent(
sender="@flibble:wibble",
type="com.nom.nom.nom",
room_id="!foo:bar"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_senders_works_with_unknowns(self):
definition = {
"senders": ["@flibble:wibble"]
}
event = MockEvent(
sender="@challenger:appears",
type="com.nom.nom.nom",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_senders_works_with_literals(self):
definition = {
"not_senders": ["@flibble:wibble"]
}
event = MockEvent(
sender="@flibble:wibble",
type="com.nom.nom.nom",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_senders_works_with_unknowns(self):
definition = {
"not_senders": ["@flibble:wibble"]
}
event = MockEvent(
sender="@challenger:appears",
type="com.nom.nom.nom",
room_id="!foo:bar"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_not_senders_takes_priority_over_senders(self):
definition = {
"not_senders": ["@misspiggy:muppets"],
"senders": ["@kermit:muppets", "@misspiggy:muppets"]
}
event = MockEvent(
sender="@misspiggy:muppets",
type="m.room.topic",
room_id="!foo:bar"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_rooms_works_with_literals(self):
definition = {
"rooms": ["!secretbase:unknown"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!secretbase:unknown"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_rooms_works_with_unknowns(self):
definition = {
"rooms": ["!secretbase:unknown"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!anothersecretbase:unknown"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_rooms_works_with_literals(self):
definition = {
"not_rooms": ["!anothersecretbase:unknown"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!anothersecretbase:unknown"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_not_rooms_works_with_unknowns(self):
definition = {
"not_rooms": ["!secretbase:unknown"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!anothersecretbase:unknown"
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_not_rooms_takes_priority_over_rooms(self):
definition = {
"not_rooms": ["!secretbase:unknown"],
"rooms": ["!secretbase:unknown"]
}
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!secretbase:unknown"
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_combined_event(self):
definition = {
"not_senders": ["@misspiggy:muppets"],
"senders": ["@kermit:muppets"],
"rooms": ["!stage:unknown"],
"not_rooms": ["!piggyshouse:muppets"],
"types": ["m.room.message", "muppets.kermit.*"],
"not_types": ["muppets.misspiggy.*"]
}
event = MockEvent(
sender="@kermit:muppets", # yup
type="m.room.message", # yup
room_id="!stage:unknown" # yup
)
self.assertTrue(
Filter(definition).check(event)
)
def test_definition_combined_event_bad_sender(self):
definition = {
"not_senders": ["@misspiggy:muppets"],
"senders": ["@kermit:muppets"],
"rooms": ["!stage:unknown"],
"not_rooms": ["!piggyshouse:muppets"],
"types": ["m.room.message", "muppets.kermit.*"],
"not_types": ["muppets.misspiggy.*"]
}
event = MockEvent(
sender="@misspiggy:muppets", # nope
type="m.room.message", # yup
room_id="!stage:unknown" # yup
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_combined_event_bad_room(self):
definition = {
"not_senders": ["@misspiggy:muppets"],
"senders": ["@kermit:muppets"],
"rooms": ["!stage:unknown"],
"not_rooms": ["!piggyshouse:muppets"],
"types": ["m.room.message", "muppets.kermit.*"],
"not_types": ["muppets.misspiggy.*"]
}
event = MockEvent(
sender="@kermit:muppets", # yup
type="m.room.message", # yup
room_id="!piggyshouse:muppets" # nope
)
self.assertFalse(
Filter(definition).check(event)
)
def test_definition_combined_event_bad_type(self):
definition = {
"not_senders": ["@misspiggy:muppets"],
"senders": ["@kermit:muppets"],
"rooms": ["!stage:unknown"],
"not_rooms": ["!piggyshouse:muppets"],
"types": ["m.room.message", "muppets.kermit.*"],
"not_types": ["muppets.misspiggy.*"]
}
event = MockEvent(
sender="@kermit:muppets", # yup
type="muppets.misspiggy.kisses", # nope
room_id="!stage:unknown" # yup
)
self.assertFalse(
Filter(definition).check(event)
)
@defer.inlineCallbacks
def test_filter_presence_match(self):
user_filter_json = {
"presence": {
"types": ["m.*"]
}
}
filter_id = yield self.datastore.add_user_filter(
user_localpart=user_localpart,
user_filter=user_filter_json,
)
event = MockEvent(
sender="@foo:bar",
type="m.profile",
)
events = [event]
user_filter = yield self.filtering.get_user_filter(
user_localpart=user_localpart,
filter_id=filter_id,
)
results = user_filter.filter_presence(events=events)
self.assertEquals(events, results)
@defer.inlineCallbacks
def test_filter_presence_no_match(self):
user_filter_json = {
"presence": {
"types": ["m.*"]
}
}
filter_id = yield self.datastore.add_user_filter(
user_localpart=user_localpart + "2",
user_filter=user_filter_json,
)
event = MockEvent(
event_id="$asdasd:localhost",
sender="@foo:bar",
type="custom.avatar.3d.crazy",
)
events = [event]
user_filter = yield self.filtering.get_user_filter(
user_localpart=user_localpart + "2",
filter_id=filter_id,
)
results = user_filter.filter_presence(events=events)
self.assertEquals([], results)
@defer.inlineCallbacks
def test_filter_room_state_match(self):
user_filter_json = {
"room": {
"state": {
"types": ["m.*"]
}
}
}
filter_id = yield self.datastore.add_user_filter(
user_localpart=user_localpart,
user_filter=user_filter_json,
)
event = MockEvent(
sender="@foo:bar",
type="m.room.topic",
room_id="!foo:bar"
)
events = [event]
user_filter = yield self.filtering.get_user_filter(
user_localpart=user_localpart,
filter_id=filter_id,
)
results = user_filter.filter_room_state(events=events)
self.assertEquals(events, results)
@defer.inlineCallbacks
def test_filter_room_state_no_match(self):
user_filter_json = {
"room": {
"state": {
"types": ["m.*"]
}
}
}
filter_id = yield self.datastore.add_user_filter(
user_localpart=user_localpart,
user_filter=user_filter_json,
)
event = MockEvent(
sender="@foo:bar",
type="org.matrix.custom.event",
room_id="!foo:bar"
)
events = [event]
user_filter = yield self.filtering.get_user_filter(
user_localpart=user_localpart,
filter_id=filter_id,
)
results = user_filter.filter_room_state(events)
self.assertEquals([], results)
def test_filter_rooms(self):
definition = {
"rooms": ["!allowed:example.com", "!excluded:example.com"],
"not_rooms": ["!excluded:example.com"],
}
room_ids = [
"!allowed:example.com", # Allowed because in rooms and not in not_rooms.
"!excluded:example.com", # Disallowed because in not_rooms.
"!not_included:example.com", # Disallowed because not in rooms.
]
filtered_room_ids = list(Filter(definition).filter_rooms(room_ids))
self.assertEquals(filtered_room_ids, ["!allowed:example.com"])
@defer.inlineCallbacks
def test_add_filter(self):
user_filter_json = {
"room": {
"state": {
"types": ["m.*"]
}
}
}
filter_id = yield self.filtering.add_user_filter(
user_localpart=user_localpart,
user_filter=user_filter_json,
)
self.assertEquals(filter_id, 0)
self.assertEquals(user_filter_json, (
yield self.datastore.get_user_filter(
user_localpart=user_localpart,
filter_id=0,
)
))
@defer.inlineCallbacks
def test_get_filter(self):
user_filter_json = {
"room": {
"state": {
"types": ["m.*"]
}
}
}
filter_id = yield self.datastore.add_user_filter(
user_localpart=user_localpart,
user_filter=user_filter_json,
)
filter = yield self.filtering.get_user_filter(
user_localpart=user_localpart,
filter_id=filter_id,
)
self.assertEquals(filter.get_filter_json(), user_filter_json)
self.assertRegexpMatches(repr(filter), r"<FilterCollection \{.*\}>")
| TribeMedia/synapse | tests/api/test_filtering.py | Python | apache-2.0 | 15,549 |
import random
from datetime import datetime
from urlparse import urlparse
from django.http import HttpResponse,HttpResponseBadRequest,HttpResponseForbidden,HttpResponseGone
from django.shortcuts import get_object_or_404
from django.views.decorators.http import require_GET,require_POST,require_http_methods
from django.utils.translation import ugettext as _
from django.core.urlresolvers import resolve
from gonzo.api import utils as api_utils
from gonzo.api.decorators import api_function
from gonzo.hunt.forms import *
from gonzo.hunt.models import *
from gonzo.hunt.utils import *
def _ensure_current(request,hunt):
now = datetime.utcnow()
if now < hunt.start_time:
return api_utils.api_error(request, "Hunt hasn't started yet")
if now >= hunt.end_time:
return api_utils.api_error(request, "Hunt has ended")
def _ensure_vote_current(request,hunt):
now = datetime.utcnow()
if now < hunt.start_time:
return api_utils.api_error(request, "Hunt hasn't started yet")
if now >= hunt.vote_end_time:
return api_utils.api_error(request, "Hunt has ended")
def _slice(request,set):
limit = request.REQUEST.get('limit')
offset = request.REQUEST.get('offset')
if offset:
set = set[offset:]
if limit:
set = set[:limit]
return set
def _get_hunts(request,set):
# TODO: BAD! Don't use list() on a QuerySet. We don't know how large it is!
# We should use pagination for this.
return api_utils.to_json(request,{ 'hunts':list(_slice(request,set))})
def _get_photos(request,set):
# TODO: BAD! Don't use list() on a QuerySet. We don't know how large it is!
# We should use pagination for this.
return api_utils.to_json(request,{ 'submissions':list(_slice(request,set))})
def _get_comments(request,set):
# TODO: BAD! Don't use list() on a QuerySet. We don't know how large it is!
# We should use pagination for this.
return api_utils.to_json(request,{ 'comments':list(_slice(request,set))})
def _new_hunt(request):
# TODO: new-hunt requires a logged-in user with the appropriate permissions
pass
@api_function
def index(request):
if request.method == 'GET':
return _get_hunts(request,Hunt.objects.all())
elif request.method == 'POST':
return _new_hunt(request)
else:
return HttpResponseBadRequest()
@require_GET
@api_function
def current_hunts(request):
now = datetime.utcnow()
return _get_hunts(request,
Hunt.objects.filter(start_time__lte=now, vote_end_time__gt=now))
@require_GET
@api_function
def hunt_by_id(request,slug):
return api_utils.get_json_or_404(Hunt,request,slug=slug)
def _get_ballot(request,hunt):
try:
return _get_photos(request,
random.sample(hunt.submission_set.filter(is_removed=False), 2))
except ValueError:
return api_utils.api_error(request, "We're still waiting on photos. Check back later, or add your own!")
def _submit_vote(request,hunt):
url = request.POST.get("url")
if not url:
return HttpResponseBadRequest()
# resolve the URL for the slug and object_id
try:
view, args, kwargs = resolve(urlparse(url)[2])
except:
return api_utils.api_error(request, "Invalid photo URL: "+str(url))
slug = kwargs['slug']
object_id = kwargs['object_id']
if hunt.slug != slug:
return api_utils.api_error(request, "Photo isn't a part of this hunt")
submission = get_object_or_404(Submission,pk=object_id)
vote = Vote(hunt=hunt,
submission=submission,
ip_address=request.META.get('REMOTE_ADDR'))
# TODO: Some users may have a vote of more value.
vote.value = 1
if request.user.is_authenticated():
vote.user = request.user
else:
vote.anon_source = get_anon_source(request)
vote.save()
return _get_ballot(request,hunt)
@api_function
def hunt_ballot(request,slug):
hunt = get_object_or_404(Hunt,slug=slug)
response = _ensure_vote_current(request, hunt)
if response:
return response
if request.method == 'GET':
return _get_ballot(request,hunt)
elif request.method == 'POST':
return _submit_vote(request,hunt)
else:
return HttpResponseBadRequest()
def _submit_comment(request, hunt, submission):
f = CommentForm(request.POST)
if not f.is_valid():
return api_utils.api_error(request,str(f.errors))
# You can leave a comment at any time
comment = f.save(commit=False)
if request.user.is_authenticated():
comment.user = request.user
else:
comment.anon_source = get_anon_source(request)
comment.ip_address = request.META.get('REMOTE_ADDR')
comment.hunt = hunt
comment.submission = submission
comment.save()
response = HttpResponse(api_utils.to_json(request,comment),
status=201,
content_type=api_utils.JSON_TYPE)
response['Content-Location'] = request.build_absolute_uri(comment.get_api_url())
return response
@api_function
def _comment_by_id(request,slug,comment_id,object_id=None):
comment = get_object_or_404(Comment,pk=comment_id)
if request.method == 'GET':
return api_utils.to_json(request, comment)
elif request.method == 'DELETE':
# TODO: Only allow deleting comments from the source
# or from someone who has permission to do so
return HttpResponse()
else:
return HttpResponseBadRequest()
@api_function
def hunt_comments(request,slug):
hunt = get_object_or_404(Hunt,slug=slug)
if request.method == 'GET':
return _get_comments(request,
hunt.comment_set.filter(submission=None,is_removed=False))
elif request.method == 'POST':
return _submit_comment(request, hunt, None)
else:
return HttpResponseBadRequest()
hunt_comment_by_id = _comment_by_id
@require_GET
@api_function
def hunt_comment_stream(request,slug):
pass
# TODO: We should probably generate the via from the API key, when we have one.
def _submit_photo(request,hunt):
f = SubmissionForm(request.POST, request.FILES)
if not f.is_valid():
return api_utils.api_error(request,str(f.errors))
# Ensure the time is within the hunt
response = _ensure_current(request, hunt)
if response:
return response
photo = f.save(commit=False)
if request.user.is_authenticated():
photo.user = request.user
else:
photo.anon_source = get_anon_source(request)
photo.ip_address = request.META.get('REMOTE_ADDR')
photo.hunt = hunt
photo.submit()
# response_content_type is an idiotic hack to work around some
# weird interaction between JSONView and ajaxSubmit().
response = HttpResponse(api_utils.to_json(request,photo),
status=201,
content_type=request.POST.get('response_content_type',
api_utils.JSON_TYPE))
response['Content-Location'] = request.build_absolute_uri(photo.get_api_url())
return response;
def _can_delete_photo(user,submission):
return (user.is_authenticated() and
(request.user == submission.user or user.has_perm('hunt.delete_submission')))
def _delete_photo(request, submission):
user = request.user
if _can_delete_photo(user,submission):
submission.remove("api")
return HttpResponseGone()
else:
return HttpResponseForbidden()
@api_function
def photo_index(request,slug):
hunt = get_object_or_404(Hunt,slug=slug)
if request.method == 'GET':
return _get_photos(request,
hunt.submission_set.filter(is_removed=False))
elif request.method == 'POST':
return _submit_photo(request, hunt)
else:
return HttpResponseBadRequest()
@api_function
def photo_by_id(request,slug,object_id):
submission = get_object_or_404(Submission,pk=object_id)
if request.method == 'GET':
return api_utils.to_json(request, submission)
elif request.method == 'DELETE':
return _delete_photo(request, submission)
else:
return HttpResponseBadRequest()
@api_function
@require_POST
def photo_mark_inappropriate(request,slug,object_id):
submission = get_object_or_404(Submission,pk=object_id)
submission.remove("inappropriate")
# TWTR.Widget.jsonP = function(url, callback) {
# var script = document.createElement('script');
# script.type = 'text/javascript';
# script.src = url;
# document.getElementsByTagName('head')[0].appendChild(script);
# callback(script);
# return script;
# };
@api_function
def photo_stream(request,slug):
pass
@api_function
def photo_comments(request,slug,object_id):
photo = get_object_or_404(Submission,pk=object_id)
if request.method == 'GET':
return _get_comments(request, photo.comment_set.filter(is_removed=False))
elif request.method == 'POST':
return _submit_comment(request, photo.hunt, photo)
else:
return HttpResponseBadRequest()
photo_comment_by_id = _comment_by_id
@api_function
def photo_comment_stream(request,slug,object_id):
pass
| paulcwatts/1hph | gonzo/api/hunt/views.py | Python | bsd-3-clause | 9,295 |
#! /usr/bin/env python
#
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2015. It is licensed under
# the three-clause BSD license; see LICENSE.
# Contact: [email protected]
#
from __future__ import print_function
import glob
filelist = glob.glob('*R1*.fastq.gz')
for r1 in filelist:
r2 = r1.replace('R1', 'R2')
final_pe = r1[:-9] + '.pe.fq.gz'
final_se = r1[:-9] + '.se.fq.gz'
print("""\
mkdir trim
cd trim
java -jar /usr/local/bin/trimmomatic-0.30.jar PE ../%s ../%s s1_pe s1_se s2_pe s2_se ILLUMINACLIP:/usr/local/share/adapters/TruSeq3-PE.fa:2:30:10
/usr/local/share/khmer/scripts/interleave-reads.py s1_pe s2_pe | gzip -9c > ../%s
cat s1_se s2_se | gzip -9c > ../%s
cd ..
rm -r ./trim/
chmod u-w %s %s
""" % (r1, r2, final_pe, final_se, final_pe, final_se))
| Winterflower/khmer | sandbox/write-trimmomatic.py | Python | bsd-3-clause | 861 |
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from hamcrest import assert_that, has_length, has_item, has_property, none
from river.models import TransitionApproval, APPROVED, PENDING
from river.tests.models import BasicTestModel
# noinspection PyMethodMayBeStatic
from rivertest.flowbuilder import RawState, FlowBuilder, AuthorizationPolicyBuilder
class TransitionApprovalMetaModelTest(TestCase):
def test_shouldNotDeleteApprovedTransitionWhenDeleted(self):
content_type = ContentType.objects.get_for_model(BasicTestModel)
state1 = RawState("state_1")
state2 = RawState("state_2")
authorization_policies = [AuthorizationPolicyBuilder().build()]
flow = FlowBuilder("my_field", content_type) \
.with_transition(state1, state2, authorization_policies) \
.build()
TransitionApproval.objects.filter(workflow=flow.workflow).update(status=APPROVED)
approvals = TransitionApproval.objects.filter(workflow=flow.workflow)
assert_that(approvals, has_length(1))
assert_that(approvals, has_item(has_property("meta", flow.transitions_approval_metas[0])))
flow.transitions_approval_metas[0].delete()
approvals = TransitionApproval.objects.filter(workflow=flow.workflow)
assert_that(approvals, has_length(1))
assert_that(approvals, has_item(has_property("meta", none())))
def test_shouldNotDeletePendingTransitionWhenDeleted(self):
content_type = ContentType.objects.get_for_model(BasicTestModel)
state1 = RawState("state_1")
state2 = RawState("state_2")
authorization_policies = [AuthorizationPolicyBuilder().build()]
flow = FlowBuilder("my_field", content_type) \
.with_transition(state1, state2, authorization_policies) \
.build()
TransitionApproval.objects.filter(workflow=flow.workflow).update(status=PENDING)
assert_that(TransitionApproval.objects.filter(workflow=flow.workflow), has_length(1))
flow.transitions_approval_metas[0].delete()
assert_that(TransitionApproval.objects.filter(workflow=flow.workflow), has_length(0))
| javrasya/django-river | river/tests/models/test__transition_approval_meta.py | Python | bsd-3-clause | 2,209 |
# coding=utf-8
from emft.core.logging import make_logger
from emft.core.providers.github import GHRepo, GHRepoList
from emft.core.singleton import Singleton
from .gh_anon import GHAnonymousSession
from .gh_errors import GHSessionError, NotFoundError
from .gh_objects.gh_mail import GHMail, GHMailList
from .gh_objects.gh_user import GHUser
LOGGER = make_logger(__name__)
# TODO: https://github.com/github/choosealicense.com/tree/gh-pages/_licenses
# TODO https://developer.github.com/v3/licenses/
class GHSession(GHAnonymousSession, metaclass=Singleton):
session_status = dict(
not_connected=0,
connected=1,
wrong_token=-1,
)
def __init__(self, token=None):
GHAnonymousSession.__init__(self)
self.gh_user = None
self.user = None
if token is None:
LOGGER.debug('no token given, trying local environment')
import os
token = os.environ.get('GH_TOKEN', None)
if token:
LOGGER.debug('GH token found in local environment')
else:
LOGGER.debug('no GH token found in local environment')
self.authenticate(token)
if self.user is False:
LOGGER.error('Token was invalidated; please create a new one')
elif self.user is None:
LOGGER.info('No user')
else:
LOGGER.info('authenticated as: {}'.format(self.user))
def authenticate(self, token):
if token is None:
LOGGER.debug('no token, staying anonymous')
self.user = None
else:
self.headers.update(
{
'Authorization': 'token {}'.format(token)
}
)
self.build_req('user')
try:
self.gh_user = GHUser(self._get_json())
self.user = self.gh_user.login
except GHSessionError:
self.user = False
return self
def check_authentication(self, _raise=True):
if not isinstance(self.user, str):
if _raise:
raise GHSessionError('unauthenticated')
return False
@property
def rate_limit(self):
self.build_req('rate_limit')
req = self._get()
return req.json().get('resources', {}).get('core', {}).get('remaining', 0)
@property
def email_addresses(self) -> GHMailList:
self.build_req('user', 'emails')
return GHMailList(self._get_json())
@property
def primary_email(self) -> GHMail or None:
for mail in self.email_addresses:
assert isinstance(mail, GHMail)
if mail.primary and mail.verified:
return mail
return None
def create_repo(self,
name: str,
description: str = None,
homepage: str = None,
auto_init: bool = False,
# license_template: str = None
):
self.build_req('user', 'repos')
json = dict(
name=name,
description=description,
homepage=homepage,
auto_init=auto_init
)
self._post(json=json)
def edit_repo(self,
user, repo,
new_name: str = None,
description: str = None,
homepage: str = None,
auto_init: bool = False,
# license_template: str = None # TODO GH licenses
):
if new_name is None:
new_name = repo
self.build_req('repos', user, repo)
json = dict(name=new_name)
if description:
json['body'] = description
if homepage:
json['homepage'] = homepage
if auto_init:
json['auto_init'] = auto_init
return self._patch(json=json)
def delete_repo(self, name: str):
self.check_authentication()
self.build_req('repos', self.user, name)
self._delete()
def list_own_repos(self):
self.build_req('user', 'repos')
return GHRepoList(self._get_json())
def get_repo(self, repo_name: str, user: str = None, **_):
if user is None:
self.check_authentication()
user = self.user
self.build_req('repos', user, repo_name)
try:
return GHRepo(self._get_json())
except NotFoundError:
raise FileNotFoundError('repository does not exist')
def create_pull_request(
self,
title: str,
user, repo,
description: str = None,
head: str = None, base: str = 'master'
):
self.check_authentication()
if head is None:
head = '{}:master'.format(self.user)
json = dict(
title=title,
head=head,
base=base
)
if description:
json['body'] = description
self.build_req('repos', user, repo, 'pulls')
self._post(json=json)
# TODO this is just for the lulz
def create_status(
self,
repo: str,
sha: str,
state: str,
target_url: str = None,
description: str = None,
context: str = None
):
self.check_authentication()
self.build_req('repos', self.user, repo, 'statuses', sha)
json = dict(state=state)
if target_url:
json['target_url'] = target_url
if description:
json['description'] = description
if context:
json['context'] = context
self._post(json=json)
| 132nd-etcher/EMFT | emft/core/providers/github/gh_session.py | Python | gpl-3.0 | 5,661 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2019 Edgewall Software
# Copyright (C) 2008 Eli Carter
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
import argparse
import getpass
import sys
from trac.util import salt
from trac.util.compat import crypt, wait_for_file_mtime_change
from trac.util.text import printerr
if crypt is None:
printerr("The crypt module is not found. Install the passlib package "
"from PyPI.", newline=True)
sys.exit(1)
def ask_pass():
pass1 = getpass.getpass('New password: ')
pass2 = getpass.getpass('Re-type new password: ')
if pass1 != pass2:
printerr("htpasswd: password verification error")
sys.exit(1)
return pass1
class HtpasswdFile(object):
"""A class for manipulating htpasswd files."""
def __init__(self, filename, create=False):
self.entries = []
self.filename = filename
if not create:
self.load()
def load(self):
"""Read the htpasswd file into memory."""
self.entries = []
with open(self.filename, 'r') as f:
for line in f:
username, pwhash = line.split(':')
entry = [username, pwhash.rstrip()]
self.entries.append(entry)
def save(self):
"""Write the htpasswd file to disk"""
wait_for_file_mtime_change(self.filename)
with open(self.filename, 'w') as f:
f.writelines("%s:%s\n" % (entry[0], entry[1])
for entry in self.entries)
def update(self, username, password):
"""Replace the entry for the given user, or add it if new."""
pwhash = crypt(password, salt())
matching_entries = [entry for entry in self.entries
if entry[0] == username]
if matching_entries:
matching_entries[0][1] = pwhash
else:
self.entries.append([username, pwhash])
def delete(self, username):
"""Remove the entry for the given user."""
self.entries = [entry for entry in self.entries
if entry[0] != username]
def main():
"""
%(prog)s [-c] passwordfile username
%(prog)s -b[c] passwordfile username password
%(prog)s -D passwordfile username\
"""
parser = argparse.ArgumentParser(usage=main.__doc__)
parser.add_argument('-b', action='store_true', dest='batch',
help="batch mode; password is passed on the command "
"line IN THE CLEAR")
parser_group = parser.add_mutually_exclusive_group()
parser_group.add_argument('-c', action='store_true', dest='create',
help="create a new htpasswd file, overwriting "
"any existing file")
parser_group.add_argument('-D', action='store_true', dest='delete_user',
help="remove the given user from the password "
"file")
parser.add_argument('passwordfile', help=argparse.SUPPRESS)
parser.add_argument('username', help=argparse.SUPPRESS)
parser.add_argument('password', nargs='?', help=argparse.SUPPRESS)
args = parser.parse_args()
password = args.password
if args.delete_user:
if password is not None:
parser.error("too many arguments")
else:
if args.batch and password is None:
parser.error("too few arguments")
elif not args.batch and password is not None:
parser.error("too many arguments")
try:
passwdfile = HtpasswdFile(args.passwordfile, create=args.create)
except EnvironmentError:
printerr("File not found.")
sys.exit(1)
else:
if args.delete_user:
passwdfile.delete(args.username)
else:
if password is None:
password = ask_pass()
passwdfile.update(args.username, password)
passwdfile.save()
if __name__ == '__main__':
main()
| rbaumg/trac | contrib/htpasswd.py | Python | bsd-3-clause | 4,406 |
from django.conf.urls import patterns, url
from views import *
urlpatterns = patterns('',
url(r"^$", IndexView.as_view(), name="index"),
) | Core2Duo/django-engagements | engagements/urls.py | Python | bsd-3-clause | 143 |
# -*- coding: utf-8 -*-
# The MIT License (MIT) - Copyright (c) 2016-2021 Dave Vandenbout.
import pytest
from skidl import TEMPLATE, Net, Network, Part, tee
from .setup_teardown import setup_function, teardown_function
def test_ntwk_1():
"""A common-emitter amplifier."""
r1, r2 = Part("Device", "R", dest=TEMPLATE) * 2
q1 = Part("Device", "Q_NPN_EBC")
Net("5V") & r1 & Net("OUTPUT") & q1["C,E"] & Net("GND")
Net.fetch("5V") & r2 & q1.B & Net("INPUT")
assert len(default_circuit.get_nets()) == 4
assert len(q1.C.get_nets()[0]) == 2
assert len(q1.B.get_nets()[0]) == 2
assert len(q1.E.get_nets()[0]) == 1
assert len(Net.fetch("5V")) == 2
assert len(Net.fetch("GND")) == 1
def test_ntwk_2():
"""A resistor + diode in parallel with another resistor."""
r1, r2 = Part("Device", "R", dest=TEMPLATE) * 2
d1 = Part("Device", "D")
Net("5V") & ((r1 & d1["A,K"]) | r2) & Net("GND")
assert len(default_circuit.get_nets()) == 3
assert len(d1.A.get_nets()[0]) == 2
assert len(d1.K.get_nets()[0]) == 2
assert len(r1.p2.get_nets()[0]) == 2
assert len(Net.fetch("5V")) == 2
assert len(Net.fetch("GND")) == 2
def test_ntwk_3():
"""Cascaded resistor dividers."""
def r_div():
r1, r2 = Part("Device", "R", dest=TEMPLATE) * 2
return r1 & (r2 & Net.fetch("GND"))[0]
Net("inp") & r_div() & r_div() & r_div() & Net("outp")
assert len(default_circuit.get_nets()) == 5
assert len(Net.fetch("inp")) == 1
assert len(Net.fetch("outp")) == 2
def test_ntwk_4():
"""Test limit on network length."""
q1 = Part("Device", "Q_NPN_EBC")
with pytest.raises(ValueError):
Network(q1)
def test_ntwk_5():
"""Test limit on network length."""
q1 = Part("Device", "Q_NPN_EBC")
with pytest.raises(ValueError):
Network(q1[:])
def test_ntwk_6():
"""Test limit on network length."""
r1, r2 = Part("Device", "R", dest=TEMPLATE) * 2
q1 = Part("Device", "Q_NPN_EBC")
with pytest.raises(ValueError):
(r1 | r2) & q1
def test_ntwk_7():
"""Test tee() function."""
r1, r2, r3, r4, r5 = Part("Device", "R", dest=TEMPLATE) * 5
vi, gnd = Net("VI"), Net("GND")
ntwk = vi & r1 & r2 & tee(r3 & r4 & gnd) & r5 & gnd
assert len(r3[1].get_nets()[0]) == 3
assert len(r2[2].get_nets()[0]) == 3
assert len(r5[1].get_nets()[0]) == 3
assert len(gnd.get_pins()) == 2
| xesscorp/skidl | tests/test_network.py | Python | mit | 2,436 |
from pyfem.util.BaseModule import BaseModule
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
class MeshWriter ( BaseModule ):
def __init__( self , props , globdat ):
self.prefix = globdat.prefix
# self.elementGroup = "All"
self.elementGroup = "Actived"
self.k = 0
self.interval = 1
BaseModule.__init__( self , props )
def run( self , props , globdat ):
if not globdat.cycle%self.interval == 0:
return
vtkfile = open( self.prefix + '-' + str(self.k) + '.vtu' ,'w' )
vtkfile.write('<?xml version="1.0"?>\n')
vtkfile.write('<VTKFile type="UnstructuredGrid" version="0.1" byte_order="LittleEndian" compressor="vtkZLibDataCompressor">\n')
vtkfile.write('<UnstructuredGrid>\n')
vtkfile.write('<Piece NumberOfPoints="'+str(len(globdat.nodes))+'" NumberOfCells="')
vtkfile.write(str(globdat.elements.elementGroupCount( self.elementGroup))+'">\n')
vtkfile.write('<PointData>\n')
vtkfile.write('<DataArray type="Float64" Name="displacement" NumberOfComponents="3" format="ascii" >\n')
for nodeID in globdat.nodes.keys():
for dofType in globdat.dofs.dofTypes:
vtkfile.write(str(globdat.state[globdat.dofs.getForType(nodeID,dofType)])+' ')
vtkfile.write(' 0.\n')
vtkfile.write('</DataArray>\n')
for label in props.outlabel:
outdata = globdat.getData(label , range(len(globdat.nodes)) )
column = len(outdata[0])
vtkfile.write('<DataArray type="Float64" Name="'+label+'" NumberOfComponents="'+str(column)+'" format="ascii" >\n')
for i in range(len(globdat.nodes)):
for j in range(column):
vtkfile.write( str(outdata[i][j]) + ' ' )
vtkfile.write(" \n")
vtkfile.write('</DataArray>\n')
vtkfile.write('</PointData>\n')
vtkfile.write('<CellData>\n')
vtkfile.write('</CellData>\n')
vtkfile.write('<Points>\n')
vtkfile.write('<DataArray type="Float64" Name="Points" NumberOfComponents="3" format="ascii">\n')
for nodeID in globdat.nodes.keys():
crd = globdat.nodes.getNodeCoords(nodeID)
vtkfile.write( str(crd[0]) + ' ' + str(crd[1]) + " 0.0\n" )
vtkfile.write('</DataArray>\n')
vtkfile.write('</Points>\n')
vtkfile.write('<Cells>\n')
vtkfile.write('<DataArray type="Int64" Name="connectivity" format="ascii">\n')
#--Store elements-----------------------------
for element in globdat.elements.iterElementGroup( self.elementGroup ):
el_nodes = globdat.nodes.getIndices(element.getNodes())
if len(el_nodes) == 3 or len(el_nodes) == 4:
for node in el_nodes:
vtkfile.write(str(node)+' ')
elif len(el_nodes) == 6 or len(el_nodes) == 8:
for node in el_nodes[::2]:
vtkfile.write(str(node)+' ')
vtkfile.write('\n')
vtkfile.write('</DataArray>\n')
vtkfile.write('<DataArray type="Int64" Name="offsets" format="ascii">\n')
for i,element in enumerate(globdat.elements.iterElementGroup( self.elementGroup )):
el_nodes = globdat.nodes.getIndices(element.getNodes())
vtkfile.write(str(len(el_nodes)*(i+1))+'\n')
vtkfile.write('</DataArray>\n')
vtkfile.write('<DataArray type="UInt8" Name="types" format="ascii" RangeMin="9" RangeMax="9">\n')
for i in range(globdat.elements.elementGroupCount( self.elementGroup)):
vtkfile.write('9\n')
vtkfile.write('</DataArray>\n')
vtkfile.write('</Cells>\n')
vtkfile.write('</Piece>\n')
vtkfile.write('</UnstructuredGrid>\n')
vtkfile.write('</VTKFile>\n')
#--Write pvd file
f = open( self.prefix + '.pvd' ,'w' )
f.write("<VTKFile byte_order='LittleEndian' type='Collection' version='0.1'>\n")
f.write("<Collection>\n")
for i in range(self.k+1):
f.write("<DataSet file='"+self.prefix+'-'+str(i)+".vtu' groups='' part='0' timestep='"+str(i)+"'/>\n")
f.write("</Collection>\n")
f.write("</VTKFile>\n")
f.close()
self.k = self.k+1
| shiyifuchen/PyFem | pyfem/pyfem/io/MeshWriter.py | Python | gpl-3.0 | 4,108 |
from __future__ import absolute_import
from .base import *
from .local import *
CACHE_BACKEND = 'redis_cache.cache://127.0.0.1:6379/?timeout=15'
DEBUG = False
| almet/whiskerboard | settings/live.py | Python | mit | 161 |
from django.conf.urls import patterns, include, url
import settings
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'xueba.views.home', name='home'),
# url(r'^xueba/', include('xueba.foo.urls')),
url( r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root':settings.STATIC_ROOT}),
url(r'^shudu/', include('shudu.shuDuUrls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
| qzxx-syzz/study | python/sudoku3/xueba/xueba/urls.py | Python | gpl-3.0 | 765 |
# Copyright NuoBiT Solutions - Eric Antones <[email protected]>
# Copyright 2021 NuoBiT Solutions - Kilian Niubo <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
from odoo import fields, models
class Project(models.Model):
_inherit = "project.project"
default_timeline_view = fields.Boolean(string="Default timeline view")
def action_view_tasks(self):
action = super(Project, self).action_view_tasks()
if self.default_timeline_view:
action[
"view_mode"
] = "timeline,kanban,tree,form,calendar,pivot,graph,activity"
action["views"] = [
(False, "timeline"),
(False, "kanban"),
(False, "tree"),
(False, "form"),
(False, "calendar"),
(False, "pivot"),
(False, "graph"),
(False, "activity"),
]
return action
| nuobit/odoo-addons | project_timeline_default_view/models/project.py | Python | agpl-3.0 | 972 |
#
# Copyright 2014-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
#
"""
Shared module for vsc software testing
TestCase: use instead of unittest TestCase
from easybuild.base.testing import TestCase
:author: Stijn De Weirdt (Ghent University)
:author: Kenneth Hoste (Ghent University)
"""
import difflib
import pprint
import re
import sys
from contextlib import contextmanager
try:
from cStringIO import StringIO # Python 2
except ImportError:
from io import StringIO # Python 3
from unittest import TestCase as OrigTestCase
from easybuild.tools.py2vs3 import string_type
def nicediff(txta, txtb, offset=5):
"""
generate unified diff style output
ndiff has nice indicators what is different, but prints the whole content
each line that is interesting starts with non-space
unified diff only prints changes and some offset around it
return list with diff (one per line) (not a generator like ndiff or unified_diff)
"""
diff = list(difflib.ndiff(txta.splitlines(1), txtb.splitlines(1)))
different_idx = [idx for idx, line in enumerate(diff) if not line.startswith(' ')]
res_idx = []
# very bruteforce
for didx in different_idx:
for idx in range(max(didx - offset, 0), min(didx + offset, len(diff) - 1)):
if idx not in res_idx:
res_idx.append(idx)
res_idx.sort()
# insert linenumbers too? what are the linenumbers in ndiff?
newdiff = [diff[idx] for idx in res_idx]
return newdiff
class TestCase(OrigTestCase):
"""Enhanced test case, provides extra functionality (e.g. an assertErrorRegex method)."""
longMessage = True # print both standard messgae and custom message
ASSERT_MAX_DIFF = 100
DIFF_OFFSET = 5 # lines of text around changes
def is_string(self, x):
"""test if the variable x is a string)"""
try:
return isinstance(x, string_type)
except NameError:
return isinstance(x, str)
# pylint: disable=arguments-differ
def assertEqual(self, a, b, msg=None):
"""Make assertEqual always print useful messages"""
try:
super(TestCase, self).assertEqual(a, b)
except AssertionError as e:
if msg is None:
msg = str(e)
else:
msg = "%s: %s" % (msg, e)
if self.is_string(a):
txta = a
else:
txta = pprint.pformat(a)
if self.is_string(b):
txtb = b
else:
txtb = pprint.pformat(b)
diff = nicediff(txta, txtb, offset=self.DIFF_OFFSET)
if len(diff) > self.ASSERT_MAX_DIFF:
limit = ' (first %s lines)' % self.ASSERT_MAX_DIFF
else:
limit = ''
raise AssertionError("%s:\nDIFF%s:\n%s" % (msg, limit, ''.join(diff[:self.ASSERT_MAX_DIFF])))
def setUp(self):
"""Prepare test case."""
super(TestCase, self).setUp()
self.maxDiff = None
self.longMessage = True
self.orig_sys_stdout = sys.stdout
self.orig_sys_stderr = sys.stderr
def convert_exception_to_str(self, err):
"""Convert an Exception instance to a string."""
msg = err
if hasattr(err, 'msg'):
msg = err.msg
elif hasattr(err, 'message'):
msg = err.message
if not msg:
# rely on str(msg) in case err.message is empty
msg = err
elif hasattr(err, 'args'): # KeyError in Python 2.4 only provides message via 'args' attribute
msg = err.args[0]
else:
msg = err
try:
res = str(msg)
except UnicodeEncodeError:
res = msg.encode('utf8', 'replace')
return res
def assertErrorRegex(self, error, regex, call, *args, **kwargs):
"""
Convenience method to match regex with the expected error message.
Example: self.assertErrorRegex(OSError, "No such file or directory", os.remove, '/no/such/file')
"""
try:
call(*args, **kwargs)
str_kwargs = ['='.join([k, str(v)]) for (k, v) in kwargs.items()]
str_args = ', '.join(list(map(str, args)) + str_kwargs)
self.assertTrue(False, "Expected errors with %s(%s) call should occur" % (call.__name__, str_args))
except error as err:
msg = self.convert_exception_to_str(err)
if self.is_string(regex):
regex = re.compile(regex)
self.assertTrue(regex.search(msg), "Pattern '%s' is found in '%s'" % (regex.pattern, msg))
def mock_stdout(self, enable):
"""Enable/disable mocking stdout."""
sys.stdout.flush()
if enable:
sys.stdout = StringIO()
else:
sys.stdout = self.orig_sys_stdout
def mock_stderr(self, enable):
"""Enable/disable mocking stdout."""
sys.stderr.flush()
if enable:
sys.stderr = StringIO()
else:
sys.stderr = self.orig_sys_stderr
def get_stdout(self):
"""Return output captured from stdout until now."""
return sys.stdout.getvalue()
def get_stderr(self):
"""Return output captured from stderr until now."""
return sys.stderr.getvalue()
@contextmanager
def mocked_stdout_stderr(self, mock_stdout=True, mock_stderr=True):
"""Context manager to mock stdout and stderr"""
if mock_stdout:
self.mock_stdout(True)
if mock_stderr:
self.mock_stderr(True)
try:
if mock_stdout and mock_stderr:
yield sys.stdout, sys.stderr
elif mock_stdout:
yield sys.stdout
else:
yield sys.stderr
finally:
if mock_stdout:
self.mock_stdout(False)
if mock_stderr:
self.mock_stderr(False)
def tearDown(self):
"""Cleanup after running a test."""
self.mock_stdout(False)
self.mock_stderr(False)
super(TestCase, self).tearDown()
| hpcugent/easybuild-framework | easybuild/base/testing.py | Python | gpl-2.0 | 7,168 |
import re
from autotest.client.shared import error
from autotest.client import utils
from virttest import virsh
from virttest import utils_libvirtd
def run(test, params, env):
"""
Test the command virsh nodecpustats
(1) Call the virsh nodecpustats command for all cpu host cpus
separately
(2) Get the output
(3) Check the against /proc/stat output(o) for respective cpu
user: o[0] + o[1]
system: o[2] + o[5] + o[6]
idle: o[3]
iowait: o[4]
(4) Call the virsh nodecpustats command with an unexpected option
(5) Call the virsh nodecpustats command with libvirtd service stop
"""
def virsh_check_nodecpustats_percpu(actual_stats):
"""
Check the acual nodecpustats output value
total time <= system uptime
"""
# Normalise to seconds from nano seconds
total = float((actual_stats['system'] + actual_stats['user'] +
actual_stats['idle'] + actual_stats['iowait']) / (10 ** 9))
uptime = float(utils.get_uptime())
if not total <= uptime:
raise error.TestFail("Commands 'virsh nodecpustats' not succeeded"
" as total time: %f is more"
" than uptime: %f" % (total, uptime))
return True
def virsh_check_nodecpustats(actual_stats, cpu_count):
"""
Check the acual nodecpustats output value
total time <= system uptime
"""
# Normalise to seconds from nano seconds and get for one cpu
total = float(((actual_stats['system'] + actual_stats['user'] +
actual_stats['idle'] + actual_stats['iowait']) / (10 ** 9)) / (
cpu_count))
uptime = float(utils.get_uptime())
if not total <= uptime:
raise error.TestFail("Commands 'virsh nodecpustats' not succeeded"
" as total time: %f is more"
" than uptime: %f" % (total, uptime))
return True
def virsh_check_nodecpustats_percentage(actual_per):
"""
Check the actual nodecpustats percentage adds up to 100%
"""
total = int(round(actual_per['user'] + actual_per['system'] +
actual_per['idle'] + actual_per['iowait']))
if not total == 100:
raise error.TestFail("Commands 'virsh nodecpustats' not succeeded"
" as the total percentage value: %d"
" is not equal 100" % total)
def parse_output(output):
"""
To get the output parsed into a dictionary
:param virsh command output
:return: dict of user,system,idle,iowait times
"""
# From the beginning of a line, group 1 is one or more word-characters,
# followed by zero or more whitespace characters and a ':',
# then one or more whitespace characters,
# followed by group 2, which is one or more digit characters,
# e.g as below
# user: 6163690000000
#
regex_obj = re.compile(r"^(\w+)\s*:\s+(\d+)")
actual = {}
for line in output.stdout.split('\n'):
match_obj = regex_obj.search(line)
# Due to the extra space in the list
if match_obj is not None:
name = match_obj.group(1)
value = match_obj.group(2)
actual[name] = int(value)
return actual
def parse_percentage_output(output):
"""
To get the output parsed into a dictionary
:param virsh command output
:return: dict of user,system,idle,iowait times
"""
# From the beginning of a line, group 1 is one or more word-characters,
# followed by zero or more whitespace characters and a ':',
# then one or more whitespace characters,
# followed by group 2, which is one or more digit characters,
# e.g as below
# user: 1.5%
#
regex_obj = re.compile(r"^(\w+)\s*:\s+(\d+.\d+)")
actual_percentage = {}
for line in output.stdout.split('\n'):
match_obj = regex_obj.search(line)
# Due to the extra space in the list
if match_obj is not None:
name = match_obj.group(1)
value = match_obj.group(2)
actual_percentage[name] = float(value)
return actual_percentage
# Initialize the variables
itr = int(params.get("inner_test_iterations"))
option = params.get("virsh_cpunodestats_options")
invalid_cpunum = params.get("invalid_cpunum")
status_error = params.get("status_error")
libvirtd = params.get("libvirtd", "on")
# Prepare libvirtd service
if libvirtd == "off":
utils_libvirtd.libvirtd_stop()
# Get the host cpu list
host_cpus_list = utils.cpu_online_map()
# Run test case for 5 iterations default can be changed in subtests.cfg
# file
for i in range(itr):
if status_error == "yes":
if invalid_cpunum == "yes":
option = "--cpu %s" % (len(host_cpus_list) + 1)
output = virsh.nodecpustats(ignore_status=True, option=option)
status = output.exit_status
if status == 0:
if libvirtd == "off":
utils_libvirtd.libvirtd_start()
raise error.TestFail("Command 'virsh nodecpustats' "
"succeeded with libvirtd service "
"stopped, incorrect")
else:
raise error.TestFail("Command 'virsh nodecpustats %s' "
"succeeded (incorrect command)" % option)
elif status_error == "no":
# Run the testcase for each cpu to get the cpu stats
for cpu in host_cpus_list:
option = "--cpu %s" % cpu
output = virsh.nodecpustats(ignore_status=True, option=option)
status = output.exit_status
if status == 0:
actual_value = parse_output(output)
virsh_check_nodecpustats_percpu(actual_value)
else:
raise error.TestFail("Command 'virsh nodecpustats %s'"
"not succeeded" % option)
# Run the test case for each cpu to get the cpu stats in percentage
for cpu in host_cpus_list:
option = "--cpu %s --percent" % cpu
output = virsh.nodecpustats(ignore_status=True, option=option)
status = output.exit_status
if status == 0:
actual_value = parse_percentage_output(output)
virsh_check_nodecpustats_percentage(actual_value)
else:
raise error.TestFail("Command 'virsh nodecpustats %s'"
" not succeeded" % option)
option = ''
# Run the test case for total cpus to get the cpus stats
output = virsh.nodecpustats(ignore_status=True, option=option)
status = output.exit_status
if status == 0:
actual_value = parse_output(output)
virsh_check_nodecpustats(actual_value, len(host_cpus_list))
else:
raise error.TestFail("Command 'virsh nodecpustats %s'"
" not succeeded" % option)
# Run the test case for the total cpus to get the stats in
# percentage
option = "--percent"
output = virsh.nodecpustats(ignore_status=True, option=option)
status = output.exit_status
if status == 0:
actual_value = parse_percentage_output(output)
virsh_check_nodecpustats_percentage(actual_value)
else:
raise error.TestFail("Command 'virsh nodecpustats %s'"
" not succeeded" % option)
# Recover libvirtd service start
if libvirtd == "off":
utils_libvirtd.libvirtd_start()
| svirt/tp-libvirt | libvirt/tests/src/virsh_cmd/host/virsh_nodecpustats.py | Python | gpl-2.0 | 8,299 |
#!/usr/bin/env python3
#
# Copyright (C) 2014 INRA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__author__ = 'Frederic Escudie - Plateforme bioinformatique Toulouse - Maria Bernard - Sigenae Jouy en Josas'
__copyright__ = 'Copyright (C) 2015 INRA'
__license__ = 'GNU General Public License'
__version__ = '0.7.2'
__email__ = '[email protected]'
__status__ = 'prod'
import os
import sys
import time
import argparse
import subprocess
from subprocess import Popen, PIPE
import threading
import multiprocessing
from multiprocessing import Queue
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
LIB_DIR = os.path.abspath(os.path.join(os.path.dirname(CURRENT_DIR), "lib"))
sys.path.append(LIB_DIR)
if os.getenv('PYTHONPATH') is None: os.environ['PYTHONPATH'] = LIB_DIR
else: os.environ['PYTHONPATH'] = os.environ['PYTHONPATH'] + os.pathsep + LIB_DIR
from frogsBiom import BiomIO
from frogsSequenceIO import *
from frogsUtils import *
##################################################################################################################################################
#
# FUNCTIONS
#
##################################################################################################################################################
def get_sample_resuts( log_file ):
"""
@summary: Returns the sample results (number of sequences after each filters).
@param log_file: [str] Path to a log file.
@return: [list] The number of sequences after each filter.
"""
res_dic=dict()
FH_input = open(log_file)
for line in FH_input:
if line.strip().startswith('nb_chimera: '):
res_dic["nb_chimera"]= int(line.split(':')[1].strip())
elif line.strip().startswith('chimera_abun: '):
res_dic["chimera_abundance"] = int(line.split(':')[1].strip())
elif line.strip().startswith('max_chimera_abun: '):
res_dic["chimera_max_abundance"] = int(line.split(':')[1].strip())
FH_input.close()
return res_dic
def write_summary( samples_names, sample_logs, log_remove_global, log_remove_spl, out_file ):
"""
@summary: Writes the summary file.
@param samples_names: [list] The samples names.
@param sample_logs: [list] list of sample logs files
@param log_remove_global: [dict] The global remove metrics.
@param log_remove_spl: [dict] The remove metrics by sample.
@param out_file: [str] Path to the summary file.
"""
# Collect metrics
detection_results = dict()
for idx, sample in enumerate(samples_names):
detection_results[sample] = get_sample_resuts(sample_logs[idx])
# Writes output
FH_out = open(out_file, "wt")
global_remove_results = [ log_remove_global['nb_removed'], log_remove_global['nb_kept'],
log_remove_global['abundance_removed'], log_remove_global['abundance_kept'],
log_remove_global['nb_ambiguous'], log_remove_global['abundance_ambiguous'] ]
FH_out.write( '##Metrics global\n' )
FH_out.write( "\t".join(['#Nb removed', 'Nb kept', 'Abundance removed', 'Abundance kept', 'Nb ambiguous', 'Abundance ambiguous']) + "\n" )
FH_out.write( "\t".join(map(str, global_remove_results)) + "\n" )
FH_out.write( "\n" )
FH_out.write( '##Metrics by sample\n' )
# FH_out.write( "\t".join(['#Sample name', 'Kept nb', 'Kept abundance', 'Removed nb', 'Removed abundance', 'Abundance of the most abundant removed', 'Detected nb', 'Detected abundance', 'Abundance of the most abundant detected']) + "\n" )
FH_out.write( "\t".join(['#Sample name', "Clusters kept", "Cluster abundance kept", "Chimeric clusters removed", "Chimeric abundance removed", "Abundance of the most abundant chimera removed", "Individual chimera detected", "Individual chimera abundance detected", "Abundance of the most abundant individual chimera detected"]) + "\n" )
for sample in sorted(samples_names):
sample_remove_results = "\t".join(map(str, [sample,
log_remove_spl[sample]['nb_kept'],
log_remove_spl[sample]['kept_abundance'],
log_remove_spl[sample]['nb_removed'],
log_remove_spl[sample]['removed_abundance'],
log_remove_spl[sample]['removed_max_abundance'],
detection_results[sample]['nb_chimera'],
detection_results[sample]['chimera_abundance'],
detection_results[sample]['chimera_max_abundance'],
]))
FH_out.write( sample_remove_results + "\n" )
FH_out.write( "\n" )
FH_out.close()
def get_obs_from_biom( in_biom ):
"""
@summary: Returns the counts by observation from a BIOM file.
@param in_biom: Path to the BIOM.
@return: [dict] Returns the counts by observation.
"""
observ_dict = dict()
biom = BiomIO.from_json(in_biom)
for observation_name in biom.get_observations_names():
observ_dict[observation_name] = biom.get_observation_count(observation_name)
del biom
return observ_dict
def get_obs_from_count( in_count ):
"""
@summary: Returns the counts by observation from a COUNT file.
@param in_count: Path to the COUNT.
@return: [dict] Returns the counts by observation.
"""
observ_dict = dict()
in_count_fh = open(in_count)
header = in_count_fh.readline()
for line in in_count_fh:
line_fields = line.strip().split()
observation_sum = 0
for count in line_fields[1:]:
observation_sum += int(count)
observ_dict[line_fields[0]] = observation_sum
in_count_fh.close()
return observ_dict
def submit_cmd( cmd, stdout_path, stderr_path):
"""
@summary: Submits a command on system.
@param cmd: [list] The command.
@param stdout_path: The path to the file where the standard outputs will be written.
@param stderr_path: The path to the file where the error outputs will be written.
"""
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
# write down the stdout
stdoh = open(stdout_path, "wt")
stdoh.write(stdout.decode('utf-8'))
stdoh.close()
# write down the stderr
stdeh = open(stderr_path, "wt")
stdeh.write(stderr.decode('utf-8'))
stdeh.close()
# check error status
if p.returncode != 0:
stdeh = open(stderr_path,'rt')
error_msg = "".join( map(str, stdeh.readlines()) )
stdeh.close()
raise_exception( Exception( "\n\n#ERROR : " + error_msg + "\n\n" ))
def remove_chimera_fasta( in_fasta, out_fasta, kept_observ, user_size_separator ):
in_fasta_fh = FastaIO( in_fasta )
out_fasta_fh = FastaIO( out_fasta, "wt")
for record in in_fasta_fh:
real_id = record.id
if user_size_separator is not None and user_size_separator in record.id:
real_id = record.id.rsplit(user_size_separator, 1)[0]
if real_id in kept_observ:
record.id = real_id
if user_size_separator is not None:
record.id = real_id + user_size_separator + str(kept_observ[real_id])
out_fasta_fh.write(record)
in_fasta_fh.close()
out_fasta_fh.close()
def remove_chimera_biom( samples, chimera_files, in_biom_file, out_biom_file, lenient_filter, global_report, bySample_report, log_file ):
"""
@summary: Removes the chimera observation from BIOM.
@param samples: [list] samples name list
@param chimera_files : [list] samples chimera files
@param in_biom_file: [str] The path to the BIOM file to filter.
@param out_biom_file: [str] The path to the BIOM after filter.
@param lenient_filter: [bool] True: removes one sequence in all samples
only if it is detected as chimera in all samples
where it is present. With False removes one
sequence in all samples if it is detected as chimera
in at least one sample.
@param global_report: [dict] This dictionary is update with the global
number of removed observations, the global removed
abundance, ...
@param bySample_report: [dict] This dictionary is update for add by sample the
number of removed observations, the removed
abundance, ...
@param log_file : [path] Path to general log output file
"""
FH_log = Logger(log_file)
FH_log.write("## Removes the chimera observation from BIOM.\n")
nb_sample_by_chimera = dict()
# Init bySample_report
for sample_name in samples:
bySample_report[sample_name] = {
'nb_kept': 0,
'kept_abundance': 0,
'nb_removed': 0,
'removed_abundance': 0,
'removed_max_abundance': 0
}
# Retrieve chimera
for chimera_file in chimera_files:
chimera_fh = open( chimera_file)
for line in chimera_fh:
observation_name = line.strip()
if observation_name not in nb_sample_by_chimera:
nb_sample_by_chimera[observation_name] = 0
nb_sample_by_chimera[observation_name] += 1
chimera_fh.close()
# Remove chimera
removed_chimera = list()
biom = BiomIO.from_json(in_biom_file)
for chimera_name in list(nb_sample_by_chimera.keys()):
is_always_chimera = True
nb_sample_with_obs = sum( 1 for sample in biom.get_samples_by_observation(chimera_name) )
observation_abundance = biom.get_observation_count(chimera_name)
if nb_sample_with_obs != nb_sample_by_chimera[chimera_name]:
is_always_chimera = False
global_report['nb_ambiguous'] += 1
global_report['abundance_ambiguous'] += observation_abundance
FH_log.write("'" + chimera_name + "' is not interpreted as chimera in all samples where it is present.\n")
if not lenient_filter or is_always_chimera:
removed_chimera.append(chimera_name)
# Global metrics
global_report['nb_removed'] += 1
global_report['abundance_removed'] += observation_abundance
# By sample metrics
for sample in biom.get_samples_by_observation(chimera_name):
sample_count = biom.get_count(chimera_name, sample['id'])
bySample_report[sample['id']]['nb_removed'] += 1
bySample_report[sample['id']]['removed_abundance'] += sample_count
bySample_report[sample['id']]['removed_max_abundance'] = max(bySample_report[sample['id']]['removed_max_abundance'], sample_count)
biom.remove_observations(removed_chimera)
# Nb non-chimera
for observation_name in biom.get_observations_names():
global_report['nb_kept'] += 1
global_report['abundance_kept'] += biom.get_observation_count(observation_name)
# By sample metrics
for sample in biom.get_samples_by_observation(observation_name):
sample_count = biom.get_count(observation_name, sample['id'])
bySample_report[sample['id']]['nb_kept'] += 1
bySample_report[sample['id']]['kept_abundance'] += sample_count
BiomIO.write(out_biom_file, biom)
FH_log.close()
def remove_chimera_count( samples, chimera_files, in_count_file, out_count_file, lenient_filter, global_report, bySample_report, log_file ):
"""
@summary: Removes the chimera observation from TSV.
@param samples: [list] samples name list
@param chimera_files : [list] samples chimera files
@param in_count_file: [str] The path to the COUNT file to filter.
@param out_count_file: [str] The path to the COUNT after filter.
@param lenient_filter: [bool] True: removes one sequence in all samples
only if it is detected as chimera in all samples
where it is present. With False removes one
sequence in all samples if it is detected as chimera
in at least one sample.
@param global_report: [dict] This dictionary is update with the global
number of removed observations, the global removed
abundance, ...
@param bySample_report: [dict] This dictionary is update for add by sample the
number of removed observations, the removed
abundance, ...
@param log_file : [path] Path to general log output file
"""
FH_log = Logger(log_file)
FH_log.write("Removes the chimera observation from TSV.\n")
chimera = dict()
# Retrieve chimera
for idx, sample_name in enumerate(samples):
chimera_fh = open( chimera_files[idx] )
for line in chimera_fh:
observation_name = line.strip()
if observation_name not in chimera:
chimera[observation_name] = dict()
chimera[observation_name][sample_name] = True
chimera_fh.close()
# Remove chimera
in_count_fh = open( in_count_file )
out_count_fh = open( out_count_file, "wt" )
samples_pos = dict()
# header
header = in_count_fh.readline()
out_count_fh.write(header)
for idx, sample_name in enumerate(header.strip().split()[1:]):
samples_pos[sample_name] = idx
if sample_name not in bySample_report:
bySample_report[sample_name] = {
'nb_kept': 0,
'kept_abundance': 0,
'nb_removed': 0,
'removed_abundance': 0,
'removed_max_abundance': 0
}
# body
for line in in_count_fh:
line_fields = line.strip().split()
observation_name = line_fields[0]
observation_counts = [int(sample_count) for sample_count in line_fields[1:]]
if observation_name not in chimera:
out_count_fh.write( line )
global_report['nb_kept'] += 1
global_report['abundance_kept'] += sum(observation_counts)
# By sample metrics
for sample_name in list(samples_pos.keys()):
sample_count = int(observation_counts[samples_pos[sample_name]])
if sample_count > 0:
bySample_report[sample_name]['nb_kept'] += 1
bySample_report[sample_name]['kept_abundance'] += sample_count
else: # is chimera in at least one sample
is_always_chimera = True
for sample_name in list(samples_pos.keys()):
if sample_name not in chimera[observation_name] and int(observation_counts[samples_pos[sample_name]]) != 0:
is_always_chimera = False
if not is_always_chimera: # is not chimera in all samples where it is find
global_report['nb_ambiguous'] += 1
global_report['abundance_ambiguous'] += sum(observation_counts)
FH_log.write( "'" + observation_name + "' is not interpreted as chimera in all samples where it is present.\n")
if is_always_chimera or not lenient_filter:
global_report['nb_removed'] += 1
global_report['abundance_removed'] += sum(observation_counts)
# By sample metrics
for sample_name in list(samples_pos.keys()):
sample_count = int(observation_counts[samples_pos[sample_name]])
if sample_count > 0:
bySample_report[sample_name]['nb_removed'] += 1
bySample_report[sample_name]['removed_abundance'] += sample_count
bySample_report[sample_name]['removed_max_abundance'] = max(bySample_report[sample_name]['removed_max_abundance'], sample_count)
else:
global_report['nb_kept'] += 1
global_report['abundance_kept'] += sum(observation_counts)
out_count_fh.write( line )
# By sample metrics
for sample_name in list(samples_pos.keys()):
sample_count = int(observation_counts[samples_pos[sample_name]])
if sample_count > 0:
bySample_report[sample_name]['nb_kept'] += 1
bySample_report[sample_name]['kept_abundance'] += sample_count
in_count_fh.close()
out_count_fh.close()
FH_log.close()
def chimera( sample_names, input_fasta, input_abund, outputs_fasta, outputs_chimera, log_chimera, user_size_separator ):
for idx in range(len(sample_names)):
chimera_by_sample( sample_names[idx], input_fasta, input_abund, outputs_fasta[idx], outputs_chimera[idx], log_chimera[idx], user_size_separator )
time.sleep(0.5) # Wait to fix 'Exception in thread QueueFeederThread' in slow systems
def chimera_by_sample( sample_name, input_fasta, input_abund, output_fasta, output_chimera, log_chimera, user_size_separator ):
tmp_fasta = output_fasta + ".tmp"
tmp_log = output_fasta + ".log"
tmp_stderr = output_fasta + ".stderr"
tmp_stdout = output_fasta + ".stdout"
size_separator = ";size="
count_by_obs = dict()
try:
FH_log = open(log_chimera,"wt")
FH_log.write("##Sample : " + sample_name + "\n")
# Get count by obs
in_obs_fh = open( input_abund )
header_line = in_obs_fh.readline().strip()
sample_idx = header_line.split().index( sample_name )
for line in in_obs_fh:
line_fields = line.strip().split()
if int(line_fields[sample_idx]) != 0:
count_by_obs[line_fields[0]] = line_fields[sample_idx]
in_obs_fh.close()
# Write fasta with observation size
nb_seq_sample = 0
in_fasta_fh = FastaIO( input_fasta )
tmp_fasta_fh = FastaIO( tmp_fasta, "wt")
for record in in_fasta_fh:
real_id = record.id
if user_size_separator is not None and user_size_separator in record.id:
real_id = record.id.rsplit(user_size_separator, 1)[0]
if real_id in count_by_obs:
nb_seq_sample += 1
record.id = real_id + size_separator + str(count_by_obs[real_id])
tmp_fasta_fh.write(record)
in_fasta_fh.close()
tmp_fasta_fh.close()
# Chimera cleanning
if nb_seq_sample != 0:
FH_log.write("## Vsearch command: " + " ".join(["vsearch", "--uchime_denovo", tmp_fasta, "--nonchimeras", output_fasta, "--uchimeout", tmp_log]) + "\n" )
submit_cmd( ["vsearch", "--uchime_denovo", tmp_fasta, "--nonchimeras", output_fasta, "--uchimeout", tmp_log], tmp_stdout, tmp_stderr )
else: # The sample is empty
FH_log.write("## Empty sample, no chimera research\n")
open( output_fasta, "wt" ).close()
open( tmp_log, "wt" ).close()
# Log
nb_chimera = 0
chimera_abun = 0
max_chimera_abun = 0
nb_non_chimera = 0
non_chimera_abun = 0
in_log_fh = open( tmp_log )
out_chimera_fh = open( output_chimera, "wt" )
for line in in_log_fh:
line_fields = line.strip().split()
observation_name, size = line_fields[1].rsplit( size_separator, 1 )
size = int(size)
if line_fields[-1] == "Y":
out_chimera_fh.write( observation_name + "\n" )
nb_chimera += 1
chimera_abun += size
max_chimera_abun = max(max_chimera_abun, size)
else:
nb_non_chimera += 1
non_chimera_abun += size
in_log_fh.close()
out_chimera_fh.close()
FH_log.write("##Results\n")
FH_log.write("sample_name: " + sample_name + "\n" + \
"nb_chimera: " + str(nb_chimera) + "\n" + \
"chimera_abun: " + str(chimera_abun) + "\n" + \
"max_chimera_abun: " + str(max_chimera_abun) + "\n" + \
"nb_non_chimera: " + str(nb_non_chimera) + "\n" + \
"non_chimera_abun: " + str(non_chimera_abun) + "\n" )
FH_log.close()
finally:
if os.path.exists(tmp_fasta): os.remove(tmp_fasta)
if os.path.exists(tmp_log): os.remove(tmp_log)
if os.path.exists(tmp_stdout): os.remove(tmp_stdout)
if os.path.exists(tmp_stderr): os.remove(tmp_stderr)
def log_append_files( log_file, appended_files ):
"""
@summary: Append content of several log files in one log file.
@param log_file: [str] The log file where contents of others are appended.
@param appended_files: [list] List of log files to append.
"""
FH_log = Logger( log_file )
FH_log.write( "\n" )
for current_file in appended_files:
FH_input = open(current_file)
for line in FH_input:
FH_log.write( line )
FH_input.close()
FH_log.write( "\n" )
FH_log.write( "\n" )
FH_log.close()
def main_process(args):
tmp_files = TmpFiles(os.path.split(args.non_chimera)[0])
try:
if args.out_abundance is None:
args.out_abundance = "count.tsv"
if args.biom is not None:
args.out_abundance = "abundance.biom"
count_table = args.count
if args.biom is not None:
count_table = tmp_files.add("tmp_count.tsv")
biom = BiomIO.from_json( args.biom )
BiomIO.write_count_table( count_table, biom )
del biom
# Get samples
samples = list()
fasta_files=list()
chimera_files=list()
sample_logs=list()
in_count_fh = open( count_table )
header_line = in_count_fh.readline().strip()
for sample_name in header_line.split()[1:]:
samples.append(sample_name)
fasta_files.append(tmp_files.add(sample_name + ".fasta"))
chimera_files.append(tmp_files.add(sample_name + ".chimera"))
sample_logs.append(tmp_files.add(sample_name + "_log.txt"))
in_count_fh.close()
# Find chimera
nb_processses_used = min( len(samples), args.nb_cpus )
processes = [{'process':None, 'in_file':[], 'out_file':[], 'sample_name':[], 'log' :[]} for idx in range(nb_processses_used)]
# Set processes
for idx, sample_name in enumerate(samples):
process_idx = idx % nb_processses_used
processes[process_idx]['sample_name'].append( sample_name )
processes[process_idx]['in_file'].append( fasta_files[idx] )
processes[process_idx]['out_file'].append( chimera_files[idx] )
processes[process_idx]['log'].append( sample_logs[idx] )
# Launch processes
for current_process in processes:
if idx == 0: # First process is threaded with parent job
current_process['process'] = threading.Thread( target=chimera,
args=(current_process['sample_name'], args.sequences, count_table, current_process['in_file'], current_process['out_file'], current_process['log'], args.size_separator) )
else: # Others processes are processed on different CPU
current_process['process'] = multiprocessing.Process( target=chimera,
args=(current_process['sample_name'], args.sequences, count_table, current_process['in_file'], current_process['out_file'], current_process['log'], args.size_separator) )
current_process['process'].start()
# Wait processes end
for current_process in processes:
current_process['process'].join()
# Check processes status
for current_process in processes:
if issubclass(current_process['process'].__class__, multiprocessing.Process) and current_process['process'].exitcode != 0:
sys.exit(1)
# Append independant log files
log_append_files( args.log_file, sample_logs )
# Remove chimera
log_remove_global = { 'nb_kept': 0,
'abundance_kept': 0,
'nb_removed': 0,
'abundance_removed': 0,
'nb_ambiguous': 0,
'abundance_ambiguous': 0}
log_remove_spl = {}
if args.biom is not None:
remove_chimera_biom( samples, chimera_files, args.biom, args.out_abundance, args.lenient_filter, log_remove_global, log_remove_spl, args.log_file )
remove_chimera_fasta( args.sequences, args.non_chimera, get_obs_from_biom(args.out_abundance), args.size_separator )
else:
remove_chimera_count( samples, chimera_files, args.count, args.out_abundance, args.lenient_filter, log_remove_global, log_remove_spl, args.log_file )
remove_chimera_fasta( args.sequences, args.non_chimera, get_obs_from_count(args.out_abundance), args.size_separator )
# Summary
write_summary( samples, sample_logs, log_remove_global, log_remove_spl, args.summary )
finally:
if not args.debug:
tmp_files.deleteAll()
def get_vsearch_version():
"""
@summary: Return the vserach version.
@return: [str] The vsearch version.
"""
version = None
try:
cmd = ["vsearch", "--version"]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
version = stderr.decode('utf-8').split(",")[0].split()[1] # vsearch v1.1.3_linux_x86_64, 126.0GB RAM, 32 cores
except:
version = "unknown"
return version
##################################################################################################################################################
#
# MAIN
#
##################################################################################################################################################
if __name__ == "__main__":
# Manage parameters
parser = argparse.ArgumentParser(
description='Removes PCR chimera by samples.'
)
parser.add_argument( '--size-separator', help="The size separator if the cluster IDs contain the number of represented sequence (format: '<ID_IN_ABUND_FILE><size_separator><NB_SEQ>'" )
parser.add_argument( '-l', '--lenient-filter', default=False, action='store_true', help="Removes one sequence in all samples only if it is detected as chimera in all samples where it is present. Without this option the program removes one sequence in all samples if it is detected as chimera in at least one sample." )
parser.add_argument( '-p', '--nb-cpus', type=int, default=1, help="The maximum number of CPUs used. [Default: %(default)s]" )
parser.add_argument( '--debug', default=False, action='store_true', help="Keep temporary files to debug program." )
parser.add_argument( '-v', '--version', action='version', version=__version__ + " [vsearch " + get_vsearch_version() + "]" )
group_input = parser.add_argument_group( 'Inputs' ) # Inputs
group_input.add_argument( '-s', '--sequences', required=True, help='The cluster sequences.' )
group_exclusion_abundance = group_input.add_mutually_exclusive_group()
group_exclusion_abundance.add_argument( '-b', '--biom', help='The abundance file for clusters by sample (format: BIOM).' )
group_exclusion_abundance.add_argument( '-c', '--count', help='The abundance file for clusters by sample (format: count).' )
group_output = parser.add_argument_group( 'Outputs' ) # Outputs
group_output.add_argument( '-n', '--non-chimera', default='non_chimera.fasta', help='Fasta without chimera. [Default: %(default)s]' )
group_output.add_argument( '-a', '--out-abundance', default=None, help='Abundance file without chimera.' )
group_output.add_argument( '--summary', default='summary.tsv', help='Summary file. [Default: %(default)s]' )
group_output.add_argument( '--log-file', default=sys.stdout, help='This output file will contain several information on executed commands.' )
args = parser.parse_args()
# Process
main_process(args)
| geraldinepascal/FROGS | libexec/parallelChimera.py | Python | gpl-3.0 | 29,179 |
from oscar.apps.voucher.abstract_models import (
AbstractVoucher, AbstractVoucherApplication)
class Voucher(AbstractVoucher):
pass
class VoucherApplication(AbstractVoucherApplication):
pass
| elliotthill/django-oscar | oscar/apps/voucher/models.py | Python | bsd-3-clause | 206 |
"""Module for get scene list frame classes."""
from pyvlx.const import Command
from pyvlx.exception import PyVLXException
from pyvlx.string_helper import bytes_to_string, string_to_bytes
from .frame import FrameBase
class FrameGetSceneListRequest(FrameBase):
"""Frame for get scene list request."""
PAYLOAD_LEN = 0
def __init__(self):
"""Init Frame."""
super().__init__(Command.GW_GET_SCENE_LIST_REQ)
class FrameGetSceneListConfirmation(FrameBase):
"""Frame for confirmation for scene list request."""
PAYLOAD_LEN = 1
def __init__(self, count_scenes=0):
"""Init Frame."""
super().__init__(Command.GW_GET_SCENE_LIST_CFM)
self.count_scenes = count_scenes
def get_payload(self):
"""Return Payload."""
return bytes([self.count_scenes])
def from_payload(self, payload):
"""Init frame from binary data."""
self.count_scenes = payload[0]
def __str__(self):
"""Return human readable string."""
return '<{} count_scenes="{}"/>'.format(
type(self).__name__, self.count_scenes
)
class FrameGetSceneListNotification(FrameBase):
"""Frame for scene list notification."""
def __init__(self):
"""Init Frame."""
super().__init__(Command.GW_GET_SCENE_LIST_NTF)
self.scenes = []
self.remaining_scenes = 0
def get_payload(self):
"""Return Payload."""
ret = bytes([len(self.scenes)])
for number, name in self.scenes:
ret += bytes([number])
ret += string_to_bytes(name, 64)
ret += bytes([self.remaining_scenes])
return ret
def from_payload(self, payload):
"""Init frame from binary data."""
number_of_objects = payload[0]
self.remaining_scenes = payload[-1]
predicted_len = number_of_objects * 65 + 2
if len(payload) != predicted_len:
raise PyVLXException("scene_list_notification_wrong_length")
self.scenes = []
for i in range(number_of_objects):
scene = payload[(i * 65 + 1) : (i * 65 + 66)]
number = scene[0]
name = bytes_to_string(scene[1:])
self.scenes.append((number, name))
def __str__(self):
"""Return human readable string."""
return '<{} scenes="{}" remaining_scenes="{}">'.format(
type(self).__name__, self.scenes, self.remaining_scenes
)
| Julius2342/pyvlx | pyvlx/api/frames/frame_get_scene_list.py | Python | lgpl-3.0 | 2,458 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.