code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1
value | license stringclasses 15
values | size int64 3 1.05M |
|---|---|---|---|---|---|
"""Functionality for manipulating multiple grism exposures simultaneously
"""
import os
import time
import glob
from collections import OrderedDict
import multiprocessing as mp
import scipy.ndimage as nd
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
import astropy.units as u
## local imports
from . import utils
from . import model
#from . import stack
from .fitting import GroupFitter
from .utils_c import disperse
from .utils_c import interp
from .utils import GRISM_COLORS, GRISM_MAJOR, GRISM_LIMITS, DEFAULT_LINE_LIST
def test():
import glob
from grizlidev import utils
import grizlidev.multifit
reload(utils)
reload(grizlidev.model)
reload(grizlidev.multifit)
files=glob.glob('i*flt.fits')
output_list, filter_list = utils.parse_flt_files(files, uniquename=False)
# grism_files = filter_list['G141'][164]
# #grism_files.extend(filter_list['G141'][247])
#
# direct_files = filter_list['F140W'][164][:4]
#direct_files.extend(filter_list['F140W'][247][:4])
# grp = grizlidev.multifit.GroupFLT(grism_files=grism_files, direct_files=direct_files)
#
#
# grp = grizlidev.multifit.GroupFLT(grism_files=grism_files, direct_files=direct_files, ref_file=ref)
# ref = 'MACS0416-F140W_drz_sci_filled.fits'
# seg = 'hff_m0416_v0.1_bkg_detection_seg_grow.fits'
# catalog = 'hff_m0416_v0.1_f140w.cat'
#
# key = 'cl1301-11.3-122.5-g102'
# seg = 'cl1301-11.3-14-122-f105w_seg.fits'
# catalog = 'cl1301-11.3-14-122-f105w.cat'
# #ref = 'cl1301-11.3-14-122-f105w_drz_sci.fits'
# grism_files = output_list[key]
# direct_files = output_list[key.replace('f105w','g102')]
grism_files = filter_list['G141'][1]
grism_files.extend(filter_list['G141'][33])
grism_files = glob.glob('*cmb.fits')
ref = 'F160W_mosaic.fits'
seg = 'F160W_seg_blot.fits'
catalog = '/Users/brammer/3DHST/Spectra/Work/3DHST_Detection/GOODS-N_IR.cat'
direct_files = []
reload(utils)
reload(grizlidev.model)
reload(grizlidev.multifit)
grp = grizlidev.multifit.GroupFLT(grism_files=grism_files[:8], direct_files=direct_files, ref_file=ref, seg_file=seg, catalog=catalog)
self = grp
fit_info = {3286: {'mag':-99, 'spec': None},
3279: {'mag':-99, 'spec': None}}
fit_info = OrderedDict()
bright = self.catalog['MAG_AUTO'] < 25
ids = self.catalog['NUMBER'][bright]
mags = self.catalog['MAG_AUTO'][bright]
for id, mag in zip(ids, mags):
fit_info[id] = {'mag':mag, 'spec': None}
# Fast?
#fit_info = {3212: {'mag':-99, 'spec': None}}
#self.compute_single_model(3212)
### parallel
self.compute_full_model(fit_info, store=False)
## Refine
bright = (self.catalog['MAG_AUTO'] < 22) & (self.catalog['MAG_AUTO'] > 16)
ids = self.catalog['NUMBER'][bright]*1
mags = self.catalog['MAG_AUTO'][bright]*1
so = np.argsort(mags)
ids, mags = ids[so], mags[so]
self.refine_list(ids, mags, ds9=ds9, poly_order=1)
# bright = (self.catalog['MAG_AUTO'] < 22) & (self.catalog['MAG_AUTO'] > 16)
# ids = self.catalog['NUMBER'][bright]*1
# mags = self.catalog['MAG_AUTO'][bright]*1
# so = np.argsort(mags)
#
# self.refine_list(ids, mags, ds9=ds9, poly_order=5)
beams = self.get_beams(3212)
### serial
t0 = time.time()
out = _compute_model(0, self.FLTs[i], fit_info, False, False)
t1 = time.time()
#print t1-t0
id = 3219
fwhm = 1200
zr = [0.58,2.4]
beams = grp.get_beams(id, size=30)
mb = grizlidev.multifit.MultiBeam(beams)
fit, fig = mb.fit_redshift(fwhm=fwhm, zr=zr, poly_order=3, dz=[0.003, 0.003])
A, out_coeffs, chi2, modelf = mb.fit_at_z(poly_order=1)
m2d = mb.reshape_flat(modelf)
def _loadFLT(grism_file, sci_extn, direct_file, pad, ref_file,
ref_ext, seg_file, verbose, catalog, ix):
"""Helper function for loading `.model.GrismFLT` objects with `multiprocessing`.
TBD
"""
import time
try:
import cPickle as pickle
except:
# Python 3
import pickle
## slight random delay to avoid synchronization problems
# np.random.seed(ix)
# sleeptime = ix*1
# print '%s sleep %.3f %d' %(grism_file, sleeptime, ix)
# time.sleep(sleeptime)
#print grism_file, direct_file
new_root = '.{0:02d}.GrismFLT.fits'.format(sci_extn)
save_file = grism_file.replace('_flt.fits', new_root)
save_file = save_file.replace('_flc.fits', new_root)
save_file = save_file.replace('_cmb.fits', new_root)
save_file = save_file.replace('_rate.fits', new_root)
if (grism_file.find('_') < 0) & ('GrismFLT' not in grism_file):
save_file = 'xxxxxxxxxxxxxxxxxxx'
if os.path.exists(save_file):
print('Load {0}!'.format(save_file))
fp = open(save_file.replace('GrismFLT.fits', 'GrismFLT.pkl'), 'rb')
flt = pickle.load(fp)
fp.close()
status = flt.load_from_fits(save_file)
else:
flt = model.GrismFLT(grism_file=grism_file, sci_extn=sci_extn,
direct_file=direct_file, pad=pad,
ref_file=ref_file, ref_ext=ref_ext,
seg_file=seg_file, shrink_segimage=True,
verbose=verbose)
if flt.direct.wcs.wcs.has_pc():
for obj in [flt.grism, flt.direct]:
obj.get_wcs()
if catalog is not None:
flt.catalog = flt.blot_catalog(catalog,
sextractor=('X_WORLD' in catalog.colnames))
flt.catalog_file = catalog
else:
flt.catalog = None
if flt.grism.instrument in ['NIRISS', 'NIRCAM']:
flt.transform_NIRISS()
return flt #, out_cat
def _fit_at_z(self, zgrid, i, templates, fitter, fit_background, poly_order):
"""
For parallel processing
"""
# self, z=0., templates={}, fitter='nnls',
# fit_background=True, poly_order=0
print(i, zgrid[i])
out = self.fit_at_z(z=zgrid[i], templates=templates,
fitter=fitter, poly_order=poly_order,
fit_background=fit_background)
data = {'out':out, 'i':i}
return data
#A, coeffs[i,:], chi2[i], model_2d = out
def test_parallel():
zgrid = np.linspace(1.1,1.3,10)
templates = mb.load_templates(fwhm=800)
fitter = 'nnls'
fit_background = True
poly_order = 0
self.FLTs = []
t0_pool = time.time()
pool = mp.Pool(processes=4)
results = [pool.apply_async(_fit_at_z, (mb, zgrid, i, templates, fitter, fit_background, poly_order)) for i in range(len(zgrid))]
pool.close()
pool.join()
chi = zgrid*0.
for res in results:
data = res.get(timeout=1)
A, coeffs, chi[data['i']], model_2d = data['out']
#flt_i.catalog = cat_i
t1_pool = time.time()
def _compute_model(i, flt, fit_info, is_cgs, store):
"""Helper function for computing model orders.
"""
for id in fit_info:
try:
status = flt.compute_model_orders(id=id, compute_size=True,
mag=fit_info[id]['mag'], in_place=True, store=store,
spectrum_1d = fit_info[id]['spec'], is_cgs=is_cgs,
verbose=False)
except:
print('Failed: {0} {1}'.format(flt.grism.parent_file, id))
continue
print('{0}: _compute_model Done'.format(flt.grism.parent_file))
return i, flt.model, flt.object_dispersers
class GroupFLT():
def __init__(self, grism_files=[], sci_extn=1, direct_files=[],
pad=200, group_name='group',
ref_file=None, ref_ext=0, seg_file=None,
shrink_segimage=True, verbose=True, cpu_count=0,
catalog='', polyx=[0.3, 2.35],
MW_EBV=0.):
"""Main container for handling multiple grism exposures together
Parameters
----------
grism_files : list
List of grism exposures (typically WFC3/IR "FLT" or ACS/UVIS "FLC"
files). These can be from different grisms and/or orients.
sci_extn : int
Science extension to extract from the files in `grism_files`. For
WFC3/IR this can only be 1, though for the two-chip instruments
WFC3/UVIS and ACS/WFC3 this can be 1 or 2.
direct_files : list
List of direct exposures (typically WFC3/IR "FLT" or ACS/UVIS
"FLC" files). This list should either be empty or should
correspond one-to-one with entries in the `grism_files` list,
i.e., from an undithered pair of direct and grism exposures. If
such pairs weren't obtained or if you simply wish to ignore them
and just use the `ref_file` reference image, set to an empty list
(`[]`).
pad : int
Padding in pixels to apply around the edge of the detector to
allow modeling of sources that fall off of the nominal FOV. For
this to work requires using a `ref_file` reference image that
covers this extra area.
group_name : str
Name to apply to products produced by this group.
ref_file : `None` or str
Undistorted reference image filename, e.g., a drizzled mosaic
covering the area around a given grism exposure.
ref_ext : 0
FITS extension of the reference file where to find the image
itself.
seg_file : `None` or str
Segmentation image filename.
shrink_segimage : bool
Do some preprocessing on the segmentation image to speed up the
blotting to the distorted frame of the grism exposures.
verbose : bool
Print verbose information.
cpu_count : int
Use parallelization if > 0. If equal to zero, then use the
maximum number of available cores.
catalog : str
Catalog filename assocated with `seg_file`. These are typically
generated with "SExtractor", but the source of the files
themselves isn't critical.
Attributes
----------
catalog : `~astropy.table.Table`
The table read in with from the above file specified in `catalog`.
FLTs : list
List of `~grizli.model.GrismFLT` objects generated from each of
the files in the `grism_files` list.
grp.N : int
Number of grism files (i.e., `len(FLTs)`.)
"""
self.N = len(grism_files)
if len(direct_files) != len(grism_files):
direct_files = ['']*self.N
self.grism_files = grism_files
self.direct_files = direct_files
self.group_name = group_name
# Wavelengths for polynomial fits
self.polyx = polyx
### Read catalog
if catalog:
if isinstance(catalog, str):
self.catalog = utils.GTable.gread(catalog)
else:
self.catalog = catalog
# necessary columns from SExtractor / photutils
pairs = [['NUMBER','id'],
['MAG_AUTO', 'mag'],
['MAGERR_AUTO', 'mag_err']]
cols = self.catalog.colnames
for pair in pairs:
if (pair[0] not in cols) & (pair[1] in cols):
self.catalog[pair[0]] = self.catalog[pair[1]]
else:
self.catalog = None
if cpu_count == 0:
cpu_count = mp.cpu_count()
if cpu_count < 0:
### serial
self.FLTs = []
t0_pool = time.time()
for i in range(self.N):
flt = _loadFLT(self.grism_files[i], sci_extn, self.direct_files[i], pad, ref_file, ref_ext, seg_file, verbose, self.catalog, i)
self.FLTs.append(flt)
t1_pool = time.time()
else:
### Read files in parallel
self.FLTs = []
t0_pool = time.time()
pool = mp.Pool(processes=cpu_count)
results = [pool.apply_async(_loadFLT, (self.grism_files[i], sci_extn, self.direct_files[i], pad, ref_file, ref_ext, seg_file, verbose, self.catalog, i)) for i in range(self.N)]
pool.close()
pool.join()
for res in results:
flt_i = res.get(timeout=1)
#flt_i.catalog = cat_i
# somehow WCS getting flipped from cd to pc in res.get()???
if flt_i.direct.wcs.wcs.has_pc():
for obj in [flt_i.grism, flt_i.direct]:
obj.get_wcs()
self.FLTs.append(flt_i)
t1_pool = time.time()
# Parse grisms & PAs
self.Ngrism = {}
for i in range(self.N):
if self.FLTs[i].grism.instrument == 'NIRISS':
grism = self.FLTs[i].grism.pupil
else:
grism = self.FLTs[i].grism.filter
if grism in self.Ngrism:
self.Ngrism[grism] += 1
else:
self.Ngrism[grism] = 1
self.grisms = list(self.Ngrism.keys())
self.PA = {}
for g in self.Ngrism:
self.PA[g] = {}
for i in range(self.N):
if self.FLTs[i].grism.instrument == 'NIRISS':
grism = self.FLTs[i].grism.pupil
else:
grism = self.FLTs[i].grism.filter
PA = self.FLTs[i].get_dispersion_PA(decimals=0)
if PA in self.PA[grism]:
self.PA[grism][PA].append(i)
else:
self.PA[grism][PA] = [i]
if verbose:
print('Files loaded - {0:.2f} sec.'.format(t1_pool - t0_pool))
def save_full_data(self, warn=True):
"""Save models and data files for fast regeneration.
The filenames of the outputs are generated from the input grism
exposure filenames with the following:
>>> file = 'ib3701ryq_flt.fits'
>>> sci_extn = 1
>>> new_root = '.{0:02d}.GrismFLT.fits'.format(sci_extn)
>>>
>>> save_file = file.replace('_flt.fits', new_root)
>>> save_file = save_file.replace('_flc.fits', new_root)
>>> save_file = save_file.replace('_cmb.fits', new_root)
>>> save_file = save_file.replace('_rate.fits', new_root)
It will also save data to a `~pickle` file:
>>> pkl_file = save_file.replace('.fits', '.pkl')
Parameters
----------
warn : bool
Print a warning and skip if an output file is already found to
exist.
Notes
-----
The save filename format was changed May 9, 2017 to the format like
`ib3701ryq.01.GrismFLT.fits` from `ib3701ryq_GrismFLT.fits` to both
allow easier filename parsing and also to allow for instruments that
have multiple `SCI` extensions in a single calibrated file
(e.g., ACS and WFC3/UVIS).
"""
for i in range(self.N):
file = self.FLTs[i].grism_file
if self.FLTs[i].grism.data is None:
if warn:
print('{0}: Looks like data already saved!'.format(file))
continue
new_root = '.{0:02d}.GrismFLT.fits'.format(self.FLTs[i].grism.sci_extn)
save_file = file.replace('_flt.fits', new_root)
save_file = save_file.replace('_flc.fits', new_root)
save_file = save_file.replace('_cmb.fits', new_root)
save_file = save_file.replace('_rate.fits', new_root)
print('Save {0}'.format(save_file))
self.FLTs[i].save_full_pickle()
### Reload initialized data
self.FLTs[i].load_from_fits(save_file)
def extend(self, new, verbose=True):
"""Add another `GroupFLT` instance to `self`
This function appends the exposures if a separate `GroupFLT` instance
to the current instance. You might do this, for example, if you
generate separate `GroupFLT` instances for different grisms and
reference images with different filters.
"""
import copy
self.FLTs.extend(new.FLTs)
self.N = len(self.FLTs)
direct_files = copy.copy(self.direct_files)
direct_files.extend(new.direct_files)
self.direct_files = direct_files
grism_files = copy.copy(self.grism_files)
grism_files.extend(new.grism_files)
self.grism_files = grism_files
# self.direct_files.extend(new.direct_files)
# self.grism_files.extend(new.grism_files)
if verbose:
print('Now we have {0:d} FLTs'.format(self.N))
def compute_single_model(self, id, center_rd=None, mag=-99, size=-1, store=False, spectrum_1d=None, is_cgs=False, get_beams=None, in_place=True, psf_param_dict={}):
"""Compute model spectrum in all exposures
TBD
Parameters
----------
id : type
center_rd : None
mag : type
size : type
store : type
spectrum_1d : type
get_beams : type
in_place : type
Returns
-------
TBD
"""
out_beams = []
for flt in self.FLTs:
if flt.grism.parent_file in psf_param_dict:
psf_params = psf_param_dict[flt.grism.parent_file]
else:
psf_params = None
if center_rd is None:
x = y = None
else:
x, y = flt.direct.wcs.all_world2pix(np.array(center_rd)[None,:], 0).flatten()
status = flt.compute_model_orders(id=id, x=x, y=y, verbose=False,
size=size, compute_size=(size < 0),
mag=mag, in_place=in_place, store=store,
spectrum_1d=spectrum_1d, is_cgs=is_cgs,
get_beams=get_beams, psf_params=psf_params)
out_beams.append(status)
if get_beams:
return out_beams
else:
return True
def compute_full_model(self, fit_info=None, verbose=True, store=False,
mag_limit=25, coeffs=[1.2, -0.5], cpu_count=0,
is_cgs=False):
"""TBD
"""
if cpu_count == 0:
cpu_count = mp.cpu_count()
if fit_info is None:
bright = self.catalog['MAG_AUTO'] < mag_limit
ids = self.catalog['NUMBER'][bright]
mags = self.catalog['MAG_AUTO'][bright]
# Polynomial component
#xspec = np.arange(0.3, 5.35, 0.05)-1
xspec = np.arange(self.polyx[0], self.polyx[1], 0.05)-1
yspec = [xspec**o*coeffs[o] for o in range(len(coeffs))]
xspec = (xspec+1)*1.e4
yspec = np.sum(yspec, axis=0)
fit_info = OrderedDict()
for id, mag in zip(ids, mags):
fit_info[id] = {'mag':mag, 'spec': [xspec, yspec]}
is_cgs = False
t0_pool = time.time()
pool = mp.Pool(processes=cpu_count)
results = [pool.apply_async(_compute_model, (i, self.FLTs[i], fit_info, is_cgs, store)) for i in range(self.N)]
pool.close()
pool.join()
for res in results:
i, model, dispersers = res.get(timeout=1)
self.FLTs[i].object_dispersers = dispersers
self.FLTs[i].model = model
t1_pool = time.time()
if verbose:
print('Models computed - {0:.2f} sec.'.format(t1_pool - t0_pool))
def get_beams(self, id, size=10, center_rd=None, beam_id='A',
min_overlap=0.1, min_valid_pix=10, min_mask=0.01,
min_sens=0.08, get_slice_header=True):
"""Extract 2D spectra "beams" from the GroupFLT exposures.
Parameters
----------
id : int
Catalog ID of the object to extract.
size : int
Half-size of the 2D spectrum to extract, along cross-dispersion
axis.
center_rd : optional, (float, float)
Extract based on RA/Dec rather than catalog ID.
beam_id : type
Name of the order to extract.
min_overlap : float
Fraction of the spectrum along wavelength axis that has one
or more valid pixels.
min_valid_pix : int
Minimum number of valid pixels (`beam.fit_mask == True`) in 2D
spectrum.
min_mask : float
Minimum factor relative to the maximum pixel value of the flat
f-lambda model where the 2D cutout data are considered good.
Passed through to `~grizli.model.BeamCutout`.
min_sens : float
See `~grizli.model.BeamCutout`.
get_slice_header : bool
Passed to `~grizli.model.BeamCutout`.
Returns
-------
beams : list
List of `~grizli.model.BeamCutout` objects.
"""
beams = self.compute_single_model(id, center_rd=center_rd, size=size, store=False, get_beams=[beam_id])
out_beams = []
for flt, beam in zip(self.FLTs, beams):
try:
out_beam = model.BeamCutout(flt=flt, beam=beam[beam_id],
conf=flt.conf, min_mask=min_mask,
min_sens=min_sens,
get_slice_header=get_slice_header)
except:
#print('Except: get_beams')
continue
valid = (out_beam.grism['SCI'] != 0)
valid &= out_beam.fit_mask.reshape(out_beam.sh)
hasdata = (valid.sum(axis=0) > 0).sum()
if hasdata*1./out_beam.model.shape[1] < min_overlap:
continue
# Empty direct image?
if out_beam.beam.total_flux == 0:
continue
if out_beam.fit_mask.sum() < min_valid_pix:
continue
out_beams.append(out_beam)
return out_beams
def refine_list(self, ids=[], mags=[], poly_order=3, mag_limits=[16,24],
max_coeff=5, ds9=None, verbose=True, fcontam=0.5,
wave=np.linspace(0.2, 2.5e4, 100)):
"""Refine contamination model for list of objects. Loops over `refine`.
Parameters
----------
ids : list
List of object IDs
mags : list
Magnitudes to to along with IDs. If `ids` and `mags` not
specified, then get the ID list from `self.catalog['MAG_AUTO']`.
poly_order : int
Order of the polynomial fit to the spectra.
mag_limits : [float, float]
Magnitude limits of objects to fit from `self.catalog['MAG_AUTO']`
when `ids` and `mags` not set.
max_coeff : float
Fit is considered bad when one of the coefficients is greater
than this value. See `refine`.
ds9 : `~grizli.ds9.DS9`, optional
Display the refined models to DS9 as they are computed.
verbose : bool
Print fit coefficients.
fcontam : float
Contamination weighting parameter.
wave : `~numpy.array`
Wavelength array for the polynomial fit.
Returns
-------
Updates `self.model` in place.
"""
if (len(ids) == 0) | (len(ids) != len(mags)):
bright = ((self.catalog['MAG_AUTO'] < mag_limits[1]) &
(self.catalog['MAG_AUTO'] > mag_limits[0]))
ids = self.catalog['NUMBER'][bright]*1
mags = self.catalog['MAG_AUTO'][bright]*1
so = np.argsort(mags)
ids, mags = ids[so], mags[so]
#wave = np.linspace(0.2,5.4e4,100)
poly_templates = utils.polynomial_templates(wave, order=poly_order, line=False)
for id, mag in zip(ids, mags):
self.refine(id, mag=mag, poly_order=poly_order,
max_coeff=max_coeff, size=30, ds9=ds9,
verbose=verbose, fcontam=fcontam,
templates=poly_templates)
def refine(self, id, mag=-99, poly_order=3, size=30, ds9=None, verbose=True, max_coeff=2.5, fcontam=0.5, templates=None):
"""Fit polynomial to extracted spectrum of single object to use for contamination model.
Parameters
----------
id : int
Object ID to extract.
mag : float
Object magnitude. Determines which orders to extract; see
`~grizli.model.GrismFLT.compute_model_orders`.
poly_order : int
Order of the polynomial to fit.
size : int
Size of cutout to extract.
ds9 : `~grizli.ds9.DS9`, optional
Display the refined models to DS9 as they are computed.
verbose : bool
Print information about the fit
max_coeff : float
The script computes the implied flux of the polynomial template
at the pivot wavelength of the direct image filters. If this
flux is greater than `max_coeff` times the *observed* flux in the
direct image, then the polynomal fit is considered bad.
fcontam : float
Contamination weighting parameter.
templates : dict, optional
Precomputed template dictionary. If `None` then compute
polynomial templates with order `poly_order`.
Returns
-------
Updates `self.model` in place.
"""
beams = self.get_beams(id, size=size, min_overlap=0.5, get_slice_header=False)
if len(beams) == 0:
return True
mb = MultiBeam(beams, fcontam=fcontam)
if templates is None:
wave = np.linspace(0.9*mb.wavef.min(),1.1*mb.wavef.max(),100)
templates = utils.polynomial_templates(wave, order=poly_order,
line=False)
try:
tfit = mb.template_at_z(z=0, templates=templates, fit_background=True, fitter='lstsq', get_uncertainties=2)
except:
return False
scale_coeffs = [tfit['cfit']['poly {0}'.format(i)][0] for i in range(1+poly_order)]
xspec, ypoly = tfit['cont1d'].wave, tfit['cont1d'].flux
# Check where templates inconsistent with broad-band fluxes
xb = [beam.direct.ref_photplam if beam.direct['REF'] is not None else beam.direct.photplam for beam in beams]
obs_flux = np.array([beam.beam.total_flux for beam in beams])
mod_flux = np.polyval(scale_coeffs[::-1], np.array(xb)/1.e4-1)
nonz = obs_flux != 0
if (np.abs(mod_flux/obs_flux)[nonz].max() > max_coeff) | ((~np.isfinite(mod_flux/obs_flux)[nonz]).sum() > 0) | (np.min(mod_flux[nonz]) < 0) | ((~np.isfinite(ypoly)).sum() > 0):
if verbose:
cstr = ' '.join(['{0:9.2e}'.format(c) for c in scale_coeffs])
print('{0:>5d} mag={1:6.2f} {2} xx'.format(id, mag, cstr))
return True
# Put the refined model into the full-field model
self.compute_single_model(id, mag=mag, size=-1, store=False, spectrum_1d=[xspec, ypoly], is_cgs=True, get_beams=None, in_place=True)
# Display the result?
if ds9:
flt = self.FLTs[0]
mask = flt.grism['SCI'] != 0
ds9.view((flt.grism['SCI'] - flt.model)*mask,
header=flt.grism.header)
if verbose:
cstr = ' '.join(['{0:9.2e}'.format(c) for c in scale_coeffs])
print('{0:>5d} mag={1:6.2f} {2}'.format(id, mag, cstr))
return True
#m2d = mb.reshape_flat(modelf)
############
def old_refine(self, id, mag=-99, poly_order=1, size=30, ds9=None, verbose=True, max_coeff=2.5):
"""TBD
"""
# Extract and fit beam spectra
beams = self.get_beams(id, size=size, min_overlap=0.5, get_slice_header=False)
if len(beams) == 0:
return True
mb = MultiBeam(beams)
try:
A, out_coeffs, chi2, modelf = mb.fit_at_z(poly_order=poly_order, fit_background=True, fitter='lstsq')
except:
return False
# Poly template
scale_coeffs = out_coeffs[mb.N*mb.fit_bg:mb.N*mb.fit_bg+mb.n_poly]
xspec, yfull = mb.eval_poly_spec(out_coeffs)
# Check where templates inconsistent with broad-band fluxes
xb = [beam.direct.ref_photplam if beam.direct['REF'] is not None else beam.direct.photplam for beam in beams]
fb = [beam.beam.total_flux for beam in beams]
mb = np.polyval(scale_coeffs[::-1], np.array(xb)/1.e4-1)
if (np.abs(mb/fb).max() > max_coeff) | (~np.isfinite(mb/fb).sum() > 0) | (np.min(mb) < 0):
if verbose:
print('{0} mag={1:6.2f} {2} xx'.format(id, mag, scale_coeffs))
return True
# Put the refined model into the full-field model
self.compute_single_model(id, mag=mag, size=-1, store=False, spectrum_1d=[(xspec+1)*1.e4, yfull], is_cgs=True, get_beams=None, in_place=True)
# Display the result?
if ds9:
flt = self.FLTs[0]
mask = flt.grism['SCI'] != 0
ds9.view((flt.grism['SCI'] - flt.model)*mask,
header=flt.grism.header)
if verbose:
print('{0} mag={1:6.2f} {2}'.format(id, mag, scale_coeffs))
return True
#m2d = mb.reshape_flat(modelf)
def make_stack(self, id, size=20, target='grism', skip=True, fcontam=1., scale=1, save=True, kernel='point', pixfrac=1, diff=True):
"""Make drizzled 2D stack for a given object
Parameters
----------
id : int
Object ID number.
target : str
Rootname for output files.
skip : bool
If True and the stack PNG file already exists, don't proceed.
fcontam : float
Contamination weighting parameter.
save : bool
Save the figure and FITS HDU to files with names like
>>> img_file = '{0}_{1:05d}.stack.png'.format(target, id)
>>> fits_file = '{0}_{1:05d}.stack.fits'.format(target, id)
diff : bool
Plot residual in final stack panel.
Returns
-------
hdu : `~astropy.io.fits.HDUList`
FITS HDU of the stacked spectra.
fig : `~matplotlib.figure.Figure`
Stack figure object.
"""
print(target, id)
if os.path.exists('{0}_{1:05d}.stack.png'.format(target, id)) & skip:
return True
beams = self.get_beams(id, size=size, beam_id='A')
if len(beams) == 0:
print('id = {0}: No beam cutouts available.'.format(id))
return None
mb = MultiBeam(beams, fcontam=fcontam, group_name=target)
hdu, fig = mb.drizzle_grisms_and_PAs(fcontam=fcontam, flambda=False,
size=size, scale=scale,
kernel=kernel, pixfrac=pixfrac,
diff=diff)
if save:
fig.savefig('{0}_{1:05d}.stack.png'.format(target, id))
hdu.writeto('{0}_{1:05d}.stack.fits'.format(target, id),
clobber=True)
return hdu, fig
def drizzle_grism_models(self, root='grism_model', kernel='square', scale=0.1, pixfrac=1):
"""
Make model-subtracted drizzled images of each grism / PA
Parameters
----------
root : str
Rootname of the output files.
kernel : str
Drizzle kernel e.g., ('square', 'point').
scale : float
Drizzle `scale` parameter, pixel scale in arcsec.
pixfrac : float
Drizzle "pixfrac".
"""
try:
from .utils import drizzle_array_groups
except:
from grizli.utils import drizzle_array_groups
# Loop through grisms and PAs
for g in self.PA:
for pa in self.PA[g]:
idx = self.PA[g][pa]
N = len(idx)
sci_list = [self.FLTs[i].grism['SCI'] for i in idx]
clean_list = [self.FLTs[i].grism['SCI']-self.FLTs[i].model
for i in idx]
wht_list = [1/self.FLTs[i].grism['ERR']**2 for i in idx]
for i in range(N):
mask = ~np.isfinite(wht_list[i])
wht_list[i][mask] = 0
wcs_list = [self.FLTs[i].grism.wcs for i in idx]
for i, ix in enumerate(idx):
if wcs_list[i]._naxis[0] == 0:
wcs_list[i]._naxis = self.FLTs[ix].grism.sh
# Science array
outfile='{0}-{1}-{2}_grism_sci.fits'.format(root, g.lower(),
pa)
print(outfile)
out = drizzle_array_groups(sci_list, wht_list, wcs_list,
scale=scale, kernel=kernel,
pixfrac=pixfrac)
outsci, _, _, header, outputwcs = out
header['FILTER'] = g
header['PA'] = pa
pyfits.writeto(outfile, data=outsci, header=header,
overwrite=True, output_verify='fix')
# Model-subtracted
outfile='{0}-{1}-{2}_grism_clean.fits'.format(root, g.lower(),
pa)
print(outfile)
out = drizzle_array_groups(clean_list, wht_list, wcs_list,
scale=scale, kernel=kernel,
pixfrac=pixfrac)
outsci, _, _, header, outputwcs = out
header['FILTER'] = g
header['PA'] = pa
pyfits.writeto(outfile, data=outsci, header=header,
overwrite=True, output_verify='fix')
def drizzle_full_wavelength(self, wave=1.4e4, ref_header=None,
kernel='point', pixfrac=1., verbose=True,
offset=[0,0], fcontam=0.):
"""Drizzle FLT frames recentered at a specified wavelength
Script computes polynomial coefficients that define the dx and dy
offsets to a specific dispersed wavelengh relative to the reference
position and adds these to the SIP distortion keywords before
drizzling the input exposures to the output frame.
Parameters
----------
wave : float
Reference wavelength to center the output products
ref_header : `~astropy.io.fits.Header`
Reference header for setting the output WCS and image dimensions.
kernel : str, ('square' or 'point')
Drizzle kernel to use
pixfrac : float
Drizzle PIXFRAC (for `kernel` = 'point')
verbose : bool
Print information to terminal
Returns
-------
sci, wht : `~np.ndarray`
Drizzle science and weight arrays with dimensions set in
`ref_header`.
"""
from astropy.modeling import models, fitting
import astropy.wcs as pywcs
try:
import drizzle
if drizzle.__version__ != '1.12.99':
# Not the fork that works for all input/output arrays
raise(ImportError)
#print('drizzle!!')
from drizzle.dodrizzle import dodrizzle
drizzler = dodrizzle
dfillval = '0'
except:
from drizzlepac.astrodrizzle import adrizzle
adrizzle.log.setLevel('ERROR')
drizzler = adrizzle.do_driz
dfillval = 0
## Quick check now for which grism exposures we should use
if wave < 1.1e4:
use_grism = 'G102'
else:
use_grism = 'G141'
# Get the configuration file
conf = None
for i in range(self.N):
if self.FLTs[i].grism.filter == use_grism:
conf = self.FLTs[i].conf
# Grism not found in list
if conf is None:
return False
# Compute field-dependent dispersion parameters
dydx_0_p = conf.conf['DYDX_A_0']
dydx_1_p = conf.conf['DYDX_A_1']
dldp_0_p = conf.conf['DLDP_A_0']
dldp_1_p = conf.conf['DLDP_A_1']
yp, xp = np.indices((1014,1014)) # hardcoded for WFC3/IR
sk = 10 # don't need to evaluate at every pixel
dydx_0 = conf.field_dependent(xp[::sk,::sk], yp[::sk,::sk], dydx_0_p)
dydx_1 = conf.field_dependent(xp[::sk,::sk], yp[::sk,::sk], dydx_1_p)
dldp_0 = conf.field_dependent(xp[::sk,::sk], yp[::sk,::sk], dldp_0_p)
dldp_1 = conf.field_dependent(xp[::sk,::sk], yp[::sk,::sk], dldp_1_p)
# Inverse pixel offsets from the specified wavelength
dp = (wave - dldp_0)/dldp_1
i_x, i_y = 1, 0 # indexing offsets
dx = dp/np.sqrt(1+dydx_1) + i_x
dy = dydx_0 + dydx_1*dx + i_y
dx += offset[0]
dy += offset[1]
# Compute polynomial coefficients
p_init = models.Polynomial2D(degree=4)
#fit_p = fitting.LevMarLSQFitter()
fit_p = fitting.LinearLSQFitter()
p_dx = fit_p(p_init, xp[::sk,::sk]-507, yp[::sk,::sk]-507, -dx)
p_dy = fit_p(p_init, xp[::sk,::sk]-507, yp[::sk,::sk]-507, -dy)
# Output WCS
out_wcs = pywcs.WCS(ref_header, relax=True)
out_wcs.pscale = utils.get_wcs_pscale(out_wcs)
# Initialize outputs
shape = (ref_header['NAXIS2'], ref_header['NAXIS1'])
outsci = np.zeros(shape, dtype=np.float32)
outwht = np.zeros(shape, dtype=np.float32)
outctx = np.zeros(shape, dtype=np.int32)
# Loop through exposures
for i in range(self.N):
flt = self.FLTs[i]
if flt.grism.filter != use_grism:
continue
h = flt.grism.header.copy()
# Update SIP coefficients
for j, p in enumerate(p_dx.param_names):
key = 'A_'+p[1:]
if key in h:
h[key] += p_dx.parameters[j]
else:
h[key] = p_dx.parameters[j]
for j, p in enumerate(p_dy.param_names):
key = 'B_'+p[1:]
if key in h:
h[key] += p_dy.parameters[j]
else:
h[key] = p_dy.parameters[j]
line_wcs = pywcs.WCS(h, relax=True)
line_wcs.pscale = utils.get_wcs_pscale(line_wcs)
# Science and wht arrays
sci = flt.grism['SCI'] - flt.model
wht = 1/(flt.grism['ERR']**2)
scl = np.exp(-(fcontam*np.abs(flt.model)/flt.grism['ERR']))
wht *= scl
wht[~np.isfinite(wht)] = 0
# Drizzle it
if verbose:
print('Drizzle {0} to wavelength {1:.2f}'.format(flt.grism.parent_file, wave))
drizzler(sci, line_wcs, wht, out_wcs,
outsci, outwht, outctx, 1., 'cps', 1,
wcslin_pscale=line_wcs.pscale, uniqid=1,
pixfrac=pixfrac, kernel=kernel, fillval=dfillval)
# Done!
return outsci, outwht
class MultiBeam(GroupFitter):
def __init__(self, beams, group_name='group', fcontam=0., psf=False, polyx=[0.3, 2.5], MW_EBV=0., min_mask=0.01, min_sens=0.08, sys_err=0.0, verbose=True):
"""Tools for dealing with multiple `~.model.BeamCutout` instances
Parameters
----------
beams : list
List of `~.model.BeamCutout` objects.
group_name : str
Rootname to use for saved products
fcontam : float
Factor to use to downweight contaminated pixels. The pixel
inverse variances are scaled by the following weight factor when
evaluating chi-squared of a 2D fit,
`weight = np.exp(-(fcontam*np.abs(contam)*np.sqrt(ivar)))`
where `contam` is the contaminating flux and `ivar` is the initial
pixel inverse variance.
psf : bool
Fit an ePSF model to the direct image to use as the morphological
reference.
MW_EBV : float
Milky way foreground extinction.
min_mask : float
Minimum factor relative to the maximum pixel value of the flat
f-lambda model where the 2D cutout data are considered good.
Passed through to `~grizli.model.BeamCutout`.
min_sens : float
See `~grizli.model.BeamCutout`.
sys_err : float
Systematic error added in quadrature to the pixel variances:
`var_total = var_initial + (beam.sci*sys_err)**2`
Attributes
----------
TBD : type
"""
self.group_name = group_name
self.fcontam = fcontam
self.polyx = polyx
self.min_mask = min_mask
self.min_sens = min_sens
if isinstance(beams, str):
self.load_master_fits(beams, verbose=verbose)
else:
if isinstance(beams[0], str):
### `beams` is list of strings
if 'beams.fits' in beams[0]:
# Master beam files
self.load_master_fits(beams[0], verbose=verbose)
for i in range(1, len(beams)):
b_i = MultiBeam(beams[i], group_name=group_name, fcontam=fcontam, psf=psf, polyx=polyx, MW_EBV=np.maximum(MW_EBV, 0), sys_err=sys_err, verbose=verbose)
self.extend(b_i)
else:
# List of individual beam.fits files
self.load_beam_fits(beams)
else:
self.beams = beams
# minimum error
self.sys_err = sys_err
for beam in self.beams:
beam.ivarf = 1./(1/beam.ivarf + (sys_err*beam.scif)**2)
beam.ivarf[~np.isfinite(beam.ivarf)] = 0
beam.ivar = beam.ivarf.reshape(beam.sh)
self.ra, self.dec = self.beams[0].get_sky_coords()
if MW_EBV < 0:
### Try to get MW_EBV from hsaquery.utils
try:
import hsaquery.utils
MW_EBV = hsaquery.utils.get_irsa_dust(self.ra, self.dec)
except:
MW_EBV = 0
self.MW_EBV = MW_EBV
self._set_MW_EBV(MW_EBV)
self._parse_beams(psf=psf)
self.apply_trace_shift()
self.Nphot = 0
self.is_spec = 1
def _set_MW_EBV(self, MW_EBV, R_V=utils.MW_RV):
"""
Initialize Galactic extinction
Parameters
----------
MW_EBV : float
Local E(B-V)
R_V : float
Relation between specific and total extinction,
``a_v = r_v * ebv``.
"""
for b in self.beams:
beam = b.beam
if beam.MW_EBV != MW_EBV:
beam.MW_EBV = MW_EBV
beam.init_galactic_extinction(MW_EBV, R_V=R_V)
beam.process_config()
b.flat_flam = b.compute_model(in_place=False, is_cgs=True)
def _parse_beams(self, psf=False):
self.N = len(self.beams)
self.Ngrism = {}
for i in range(self.N):
if self.beams[i].grism.instrument == 'NIRISS':
grism = self.beams[i].grism.pupil
else:
grism = self.beams[i].grism.filter
if grism in self.Ngrism:
self.Ngrism[grism] += 1
else:
self.Ngrism[grism] = 1
self.grisms = list(self.Ngrism.keys())
self.PA = {}
for g in self.Ngrism:
self.PA[g] = {}
for i in range(self.N):
if self.beams[i].grism.instrument == 'NIRISS':
grism = self.beams[i].grism.pupil
else:
grism = self.beams[i].grism.filter
PA = self.beams[i].get_dispersion_PA(decimals=0)
if PA in self.PA[grism]:
self.PA[grism][PA].append(i)
else:
self.PA[grism][PA] = [i]
self.id = self.beams[0].id
# Use WFC3 ePSF for the fit
self.psf_param_dict = None
if (psf > 0) & (self.beams[i].grism.instrument in ['WFC3', 'ACS']):
self.psf_param_dict = OrderedDict()
for ib, beam in enumerate(self.beams):
if (beam.direct.data['REF'] is not None):
# Use REF extension. scale factors might be wrong
beam.direct.data['SCI'] = beam.direct.data['REF']
new_err = np.ones_like(beam.direct.data['ERR'])
new_err *= utils.nmad(beam.direct.data['SCI'])
beam.direct.data['ERR'] = new_err
beam.direct.filter = beam.direct.ref_filter #'F160W'
beam.direct.photflam = beam.direct.ref_photflam
beam.init_epsf(yoff=0.0, skip=psf*1, N=4, get_extended=True)
#beam.compute_model = beam.compute_model_psf
#beam.beam.compute_model = beam.beam.compute_model_psf
beam.compute_model(use_psf=True)
m = beam.compute_model(in_place=False)
#beam.modelf = beam.model.flatten()
#beam.model = beam.modelf.reshape(beam.beam.sh_beam)
beam.flat_flam = beam.compute_model(in_place=False, is_cgs=True) #/self.beam.total_flux
self.psf_param_dict[beam.grism.parent_file] = beam.beam.psf_params
self._parse_beam_arrays()
def _parse_beam_arrays(self):
"""
"""
self.poly_order = None
self.shapes = [beam.model.shape for beam in self.beams]
self.Nflat = [np.product(shape) for shape in self.shapes]
self.Ntot = np.sum(self.Nflat)
### Big array of normalized wavelengths (wave / 1.e4 - 1)
self.xpf = np.hstack([np.dot(np.ones((b.beam.sh_beam[0],1)),
b.beam.lam[None,:]).flatten()/1.e4
for b in self.beams]) - 1
### Flat-flambda model spectra
self.flat_flam = np.hstack([b.flat_flam for b in self.beams])
self.fit_mask = np.hstack([b.fit_mask*b.contam_mask
for b in self.beams])
self.DoF = self.fit_mask.sum()
self.ivarf = np.hstack([b.ivarf for b in self.beams])
self.fit_mask &= (self.ivarf >= 0)
self.scif = np.hstack([b.scif for b in self.beams])
#self.ivarf = 1./(1/self.ivarf + (self.sys_err*self.scif)**2)
self.ivarf[~np.isfinite(self.ivarf)] = 0
self.sivarf = np.sqrt(self.ivarf)
self.wavef = np.hstack([b.wavef for b in self.beams])
self.contamf = np.hstack([b.contam.flatten() for b in self.beams])
self.weightf = np.exp(-(self.fcontam*np.abs(self.contamf)*np.sqrt(self.ivarf)))
self.weightf[~np.isfinite(self.weightf)] = 0
self.fit_mask *= self.weightf > 0
self.DoF = int((self.weightf*self.fit_mask).sum())
self.Nmask = np.sum([b.fit_mask.sum() for b in self.beams])
### Initialize background fit array
# self.A_bg = np.zeros((self.N, self.Ntot))
# i0 = 0
# for i in range(self.N):
# self.A_bg[i, i0:i0+self.Nflat[i]] = 1.
# i0 += self.Nflat[i]
self.slices = self._get_slices(masked=False)
self.A_bg = self._init_background(masked=False)
self._update_beam_mask()
self.A_bgm = self._init_background(masked=True)
self.init_poly_coeffs(poly_order=1)
self.ra, self.dec = self.beams[0].get_sky_coords()
def compute_exptime(self):
exptime = {}
for beam in self.beams:
if beam.grism.instrument == 'NIRISS':
grism = beam.grism.pupil
else:
grism = beam.grism.filter
if grism in exptime:
exptime[grism] += beam.grism.exptime
else:
exptime[grism] = beam.grism.exptime
return exptime
def extend(self, new, verbose=True):
"""Concatenate `~grizli.multifit.MultiBeam` objects
Parameters
----------
new : `~grizli.multifit.MultiBeam`
Beam object containing new beams to add.
verbose : bool
Print summary of the change.
"""
self.beams.extend(new.beams)
self._parse_beams()
if verbose:
print('Add beams: {0}\n Now: {1}'.format(new.Ngrism, self.Ngrism))
def write_master_fits(self, verbose=True, get_hdu=False):
"""Store all beams in a single HDU
TBD
"""
hdu = pyfits.HDUList([pyfits.PrimaryHDU()])
rd = self.beams[0].get_sky_coords()
hdu[0].header['ID'] = (self.id, 'Object ID')
hdu[0].header['RA'] = (rd[0], 'Right Ascension')
hdu[0].header['DEC'] = (rd[1], 'Declination')
exptime = {}
for g in self.Ngrism:
exptime[g] = 0.
count = []
for ib, beam in enumerate(self.beams):
hdu_i = beam.write_fits(get_hdu=True, strip=True)
hdu.extend(hdu_i[1:])
count.append(len(hdu_i)-1)
hdu[0].header['FILE{0:04d}'.format(ib)] = (beam.grism.parent_file, 'Grism parent file')
hdu[0].header['GRIS{0:04d}'.format(ib)] = (beam.grism.filter, 'Grism element')
hdu[0].header['NEXT{0:04d}'.format(ib)] = (count[-1], 'Number of extensions')
try:
exptime[beam.grism.filter] += beam.grism.header['EXPTIME']
except:
exptime[beam.grism.pupil] += beam.grism.header['EXPTIME']
hdu[0].header['COUNT'] = (self.N, ' '.join(['{0}'.format(c) for c in count]))
for g in self.Ngrism:
hdu[0].header['T_{0}'.format(g)] = (exptime[g], 'Exposure time in grism {0}'.format(g))
if get_hdu:
return hdu
outfile = '{0}_{1:05d}.beams.fits'.format(self.group_name, self.id)
if verbose:
print(outfile)
hdu.writeto(outfile, clobber=True)
def load_master_fits(self, beam_file, verbose=True):
import copy
try:
utils.fetch_acs_wcs_files(beam_file)
except:
pass
hdu = pyfits.open(beam_file, lazy_load_hdus=False)
N = hdu[0].header['COUNT']
Next = np.cast[int](hdu[0].header.comments['COUNT'].split())
i0 = 1
self.beams = []
for i in range(N):
key = 'NEXT{0:04d}'.format(i)
if key in hdu[0].header:
Next_i = hdu[0].header[key]
else:
Next_i = 6 # Assume doesn't have direct SCI/ERR cutouts
# Testing for multiprocessing
if True:
hducopy = hdu[i0:i0+Next_i]
else:
#print('Copy!')
hducopy = pyfits.HDUList([hdu[i].__class__(data=hdu[i].data*1, header=copy.deepcopy(hdu[i].header), name=hdu[i].name) for i in range(i0, i0+Next_i)])
beam = model.BeamCutout(fits_file=hducopy, min_mask=self.min_mask,
min_sens=self.min_sens)
self.beams.append(beam)
if verbose:
print('{0} {1} {2}'.format(i+1, beam.grism.parent_file, beam.grism.filter))
i0 += Next_i #6#Next[i]
hdu.close()
def write_beam_fits(self, verbose=True):
"""TBD
"""
outfiles = []
for beam in self.beams:
root = beam.grism.parent_file.split('.fits')[0]
outfile = beam.write_fits(root)
if verbose:
print('Wrote {0}'.format(outfile))
outfiles.append(outfile)
return outfiles
def load_beam_fits(self, beam_list, conf=None, verbose=True):
"""TBD
"""
self.beams = []
for file in beam_list:
if verbose:
print(file)
beam = model.BeamCutout(fits_file=file, conf=conf,
min_mask=self.min_mask,
min_sens=self.min_sens)
self.beams.append(beam)
def reshape_flat(self, flat_array):
"""TBD
"""
out = []
i0 = 0
for ib in range(self.N):
im2d = flat_array[i0:i0+self.Nflat[ib]].reshape(self.shapes[ib])
out.append(im2d)
i0 += self.Nflat[ib]
return out
def init_poly_coeffs(self, flat=None, poly_order=1):
"""TBD
"""
### Already done?
if poly_order < 0:
ok_poly = False
poly_order = 0
else:
ok_poly = True
if poly_order == self.poly_order:
return None
self.poly_order = poly_order
if flat is None:
flat = self.flat_flam
### Polynomial continuum arrays
self.A_poly = np.array([self.xpf**order*flat
for order in range(poly_order+1)])
self.A_poly *= ok_poly
self.n_poly = poly_order + 1
self.x_poly = np.array([(self.beams[0].beam.lam/1.e4-1)**order
for order in range(poly_order+1)])
def eval_poly_spec(self, coeffs_full):
"""Evaluate polynomial spectrum
"""
xspec = np.arange(self.polyx[0], self.polyx[1], 0.05)-1
i0 = self.N*self.fit_bg
scale_coeffs = coeffs_full[i0:i0+self.n_poly]
#yspec = [xspec**o*scale_coeffs[o] for o in range(self.poly_order+1)]
yfull = np.polyval(scale_coeffs[::-1], xspec)
return xspec, yfull
def compute_model(self, id=None, spectrum_1d=None, is_cgs=False):
"""TBD
"""
for beam in self.beams:
beam.beam.compute_model(id=id, spectrum_1d=spectrum_1d,
is_cgs=is_cgs)
beam.modelf = beam.beam.modelf
beam.model = beam.beam.modelf.reshape(beam.beam.sh_beam)
def compute_model_psf(self, id=None, spectrum_1d=None, is_cgs=False):
"""TBD
"""
for beam in self.beams:
beam.beam.compute_model_psf(id=id, spectrum_1d=spectrum_1d,
is_cgs=is_cgs)
beam.modelf = beam.beam.modelf
beam.model = beam.beam.modelf.reshape(beam.beam.sh_beam)
def fit_at_z(self, z=0., templates={}, fitter='nnls',
fit_background=True, poly_order=0):
"""TBD
"""
import sklearn.linear_model
import numpy.linalg
import scipy.optimize
#print 'xxx Init poly'
self.init_poly_coeffs(poly_order=poly_order)
#print 'xxx Init bg'
if fit_background:
self.fit_bg = True
A = np.vstack((self.A_bg, self.A_poly))
else:
self.fit_bg = False
A = self.A_poly*1
NTEMP = len(templates)
A_temp = np.zeros((NTEMP, self.Ntot))
#print 'xxx Load templates'
for i, key in enumerate(templates.keys()):
NTEMP += 1
temp = templates[key]#.zscale(z, 1.)
spectrum_1d = [temp.wave*(1+z), temp.flux/(1+z)]
if z > 4:
try:
import eazy.igm
igm = eazy.igm.Inoue14()
igmz = igm.full_IGM(z, spectrum_1d[0])
spectrum_1d[1]*=igmz
#print('IGM')
except:
# No IGM
pass
i0 = 0
for ib in range(self.N):
beam = self.beams[ib]
lam_beam = beam.beam.lam_beam
if ((temp.wave.min()*(1+z) > lam_beam.max()) |
(temp.wave.max()*(1+z) < lam_beam.min())):
tmodel = 0.
else:
tmodel = beam.compute_model(spectrum_1d=spectrum_1d,
in_place=False, is_cgs=True) #/beam.beam.total_flux
A_temp[i, i0:i0+self.Nflat[ib]] = tmodel#.flatten()
i0 += self.Nflat[ib]
if NTEMP > 0:
A = np.vstack((A, A_temp))
ok_temp = np.sum(A, axis=1) > 0
out_coeffs = np.zeros(A.shape[0])
### LSTSQ coefficients
#print 'xxx Fitter'
fit_functions = {'lstsq':np.linalg.lstsq, 'nnls':scipy.optimize.nnls}
if fitter in fit_functions:
#'lstsq':
Ax = A[:, self.fit_mask][ok_temp,:].T
### Weight by ivar
Ax *= np.sqrt(self.ivarf[self.fit_mask][:, np.newaxis])
#print 'xxx lstsq'
#out = numpy.linalg.lstsq(Ax,y)
if fitter == 'lstsq':
y = self.scif[self.fit_mask]
### Weight by ivar
y *= np.sqrt(self.ivarf[self.fit_mask])
try:
out = np.linalg.lstsq(Ax,y, rcond=None)
except:
print(A.min(), Ax.min(), self.fit_mask.sum(), y.min())
raise ValueError
lstsq_coeff, residuals, rank, s = out
coeffs = lstsq_coeff
if fitter == 'nnls':
if fit_background:
off = 0.04
y = self.scif[self.fit_mask]+off
y *= np.sqrt(self.ivarf[self.fit_mask])
coeffs, rnorm = scipy.optimize.nnls(Ax, y+off)
coeffs[:self.N] -= 0.04
else:
y = self.scif[self.fit_mask]
y *= np.sqrt(self.ivarf[self.fit_mask])
coeffs, rnorm = scipy.optimize.nnls(Ax, y)
# if fitter == 'bounded':
# if fit_background:
# off = 0.04
# y = self.scif[self.fit_mask]+off
# y *= self.ivarf[self.fit_mask]
#
# coeffs, rnorm = scipy.optimize.nnls(Ax, y+off)
# coeffs[:self.N] -= 0.04
# else:
# y = self.scif[self.fit_mask]
# y *= np.sqrt(self.ivarf[self.fit_mask])
#
# coeffs, rnorm = scipy.optimize.nnls(Ax, y)
#
# out = scipy.optimize.minimize(self.eval_trace_shift, shifts, bounds=bounds, args=args, method='Powell', tol=tol)
else:
Ax = A[:, self.fit_mask][ok_temp,:].T
y = self.scif[self.fit_mask]
### Wieght by ivar
Ax *= np.sqrt(self.ivarf[self.fit_mask][:, np.newaxis])
y *= np.sqrt(self.ivarf[self.fit_mask])
clf = sklearn.linear_model.LinearRegression()
status = clf.fit(Ax, y)
coeffs = clf.coef_
out_coeffs[ok_temp] = coeffs
modelf = np.dot(out_coeffs, A)
chi2 = np.sum((self.weightf*(self.scif - modelf)**2*self.ivarf)[self.fit_mask])
if fit_background:
poly_coeffs = out_coeffs[self.N:self.N+self.n_poly]
else:
poly_coeffs = out_coeffs[:self.n_poly]
self.y_poly = np.dot(poly_coeffs, self.x_poly)
# x_poly = self.x_poly[1,:]+1 = self.beams[0].beam.lam/1.e4
return A, out_coeffs, chi2, modelf
def parse_fit_outputs(self, z, templates, coeffs_full, A):
"""Parse output from `fit_at_z`.
Parameters
----------
z : float
Redshift at which to evaluate the fits.
templates : list of `~grizli.utils.SpectrumTemplate` objects
Generated with, e.g., `~grizli.utils.load_templates`.
coeffs_full : `~np.ndarray`
Template fit coefficients
A : `~np.ndarray`
Matrix generated for fits and used for computing model 2D spectra:
>>> model_flat = np.dot(coeffs_full, A)
>>> # mb = MultiBeam(...)
>>> all_models = mb.reshape_flat(model_flat)
>>> m0 = all_models[0] # model for mb.beams[0]
Returns
-------
line_flux : dict
Line fluxes and uncertainties, in cgs units (erg/s/cm2)
covar : `~np.ndarray`
Covariance matrix for the fit coefficients
cont1d, line1d, model1d : `~grizli.utils.SpectrumTemplate`
Best-fit continuum, line, and full (continuum + line) templates
model_continuum : `~np.ndarray`
Flat array of the best fit 2D continuum
"""
from collections import OrderedDict
## Covariance matrix for line flux uncertainties
Ax = A[:,self.fit_mask]
ok_temp = (np.sum(Ax, axis=1) > 0) & (coeffs_full != 0)
Ax = Ax[ok_temp,:].T*1 #A[:, self.fit_mask][ok_temp,:].T
Ax *= np.sqrt(self.ivarf[self.fit_mask][:, np.newaxis])
try:
covar = np.matrix(np.dot(Ax.T, Ax)).I
covard = np.sqrt(covar.diagonal())
except:
N = ok_temp.sum()
covar = np.zeros((N,N))
covard = np.zeros(N)#-1.
covar_full = utils.fill_masked_covar(covar, ok_temp)
## Random draws from covariance matrix
# draws = np.random.multivariate_normal(coeffs_full[ok_temp], covar, size=500)
line_flux_err = coeffs_full*0.
line_flux_err[ok_temp] = covard
## Continuum fit
mask = np.isfinite(coeffs_full)
for i, key in enumerate(templates.keys()):
if key.startswith('line'):
mask[self.N*self.fit_bg+self.n_poly+i] = False
model_continuum = np.dot(coeffs_full*mask, A)
self.model_continuum = self.reshape_flat(model_continuum)
#model_continuum.reshape(self.beam.sh_beam)
### 1D spectrum
# Polynomial component
xspec, yspec = self.eval_poly_spec(coeffs_full)
model1d = utils.SpectrumTemplate((xspec+1)*1.e4, yspec)
cont1d = model1d*1
i0 = self.fit_bg*self.N + self.n_poly
line_flux = OrderedDict()
fscl = 1. #self.beams[0].beam.total_flux/1.e-17
line1d = OrderedDict()
for i, key in enumerate(templates.keys()):
temp_i = templates[key].zscale(z, coeffs_full[i0+i])
model1d += temp_i
if not key.startswith('line'):
cont1d += temp_i
else:
line1d[key.split()[1]] = temp_i
line_flux[key.split()[1]] = np.array([coeffs_full[i0+i]*fscl,
line_flux_err[i0+i]*fscl])
return line_flux, covar_full, cont1d, line1d, model1d, model_continuum
def fit_stars(self, poly_order=1, fitter='nnls', fit_background=True,
verbose=True, make_figure=True, zoom=None,
delta_chi2_threshold=0.004, zr=0, dz=0, fwhm=0,
prior=None, templates={}, figsize=[8,5],
fsps_templates=False):
"""TBD
"""
## Polynomial fit
out = self.fit_at_z(z=0., templates={}, fitter='lstsq',
poly_order=3,
fit_background=fit_background)
A, coeffs, chi2_poly, model_2d = out
### Star templates
templates = utils.load_templates(fwhm=fwhm, stars=True)
NTEMP = len(templates)
key = list(templates)[0]
temp_i = {key:templates[key]}
out = self.fit_at_z(z=0., templates=temp_i, fitter=fitter,
poly_order=poly_order,
fit_background=fit_background)
A, coeffs, chi2, model_2d = out
chi2 = np.zeros(NTEMP)
coeffs = np.zeros((NTEMP, coeffs.shape[0]))
chi2min = 1e30
iz = 0
best = key
for i, key in enumerate(list(templates)):
temp_i = {key:templates[key]}
out = self.fit_at_z(z=0., templates=temp_i,
fitter=fitter, poly_order=poly_order,
fit_background=fit_background)
A, coeffs[i,:], chi2[i], model_2d = out
if chi2[i] < chi2min:
iz = i
chi2min = chi2[i]
best = key
if verbose:
print(utils.NO_NEWLINE + ' {0} {1:9.1f} ({2})'.format(key, chi2[i], best))
## Best-fit
temp_i = {best:templates[best]}
out = self.fit_at_z(z=0., templates=temp_i,
fitter=fitter, poly_order=poly_order,
fit_background=fit_background)
A, coeffs_full, chi2_best, model_full = out
## Continuum fit
mask = np.isfinite(coeffs_full)
for i, key in enumerate(templates.keys()):
if key.startswith('line'):
mask[self.N*self.fit_bg+self.n_poly+i] = False
model_continuum = np.dot(coeffs_full*mask, A)
self.model_continuum = self.reshape_flat(model_continuum)
#model_continuum.reshape(self.beam.sh_beam)
### 1D spectrum
# xspec = np.arange(0.3, 2.35, 0.05)-1
# scale_coeffs = coeffs_full[self.N*self.fit_bg:
# self.N*self.fit_bg+self.n_poly]
#
# yspec = [xspec**o*scale_coeffs[o] for o in range(self.poly_order+1)]
xspec, yspec = self.eval_poly_spec(coeffs_full)
model1d = utils.SpectrumTemplate((xspec+1)*1.e4, yspec)
cont1d = model1d*1
i0 = self.fit_bg*self.N + self.n_poly
line_flux = OrderedDict()
fscl = 1. #self.beams[0].beam.total_flux/1.e-17
temp_i = templates[best].zscale(0, coeffs_full[i0])
model1d += temp_i
cont1d += temp_i
fit_data = OrderedDict()
fit_data['poly_order'] = poly_order
fit_data['fwhm'] = 0
fit_data['zbest'] = np.argmin(chi2)
fit_data['chibest'] = chi2_best
fit_data['chi_poly'] = chi2_poly
fit_data['zgrid'] = np.arange(NTEMP)
fit_data['prior'] = 1
fit_data['A'] = A
fit_data['coeffs'] = coeffs
fit_data['chi2'] = chi2
fit_data['DoF'] = self.DoF
fit_data['model_full'] = model_full
fit_data['coeffs_full'] = coeffs_full
fit_data['line_flux'] = {}
#fit_data['templates_full'] = templates
fit_data['model_cont'] = model_continuum
fit_data['model1d'] = model1d
fit_data['cont1d'] = cont1d
#return fit_data
fig = None
if make_figure:
fig = self.show_redshift_fit(fit_data)
#fig.savefig('fit.pdf')
return fit_data, fig
def fit_redshift(self, prior=None, poly_order=1, fwhm=1200,
make_figure=True, zr=None, dz=None, verbose=True,
fit_background=True, fitter='nnls',
delta_chi2_threshold=0.004, zoom=True,
line_complexes=True, templates={}, figsize=[8,5],
fsps_templates=False):
"""TBD
"""
from scipy import polyfit, polyval
if zr is None:
zr = [0.65, 1.6]
if dz is None:
dz = [0.005, 0.0004]
# if True:
# beams = grp.get_beams(id, size=30)
# mb = grizlidev.multifit.MultiBeam(beams)
# self = mb
if zr is 0:
stars = True
zr = [0, 0.01]
fitter='nnls'
else:
stars = False
zgrid = utils.log_zgrid(zr, dz=dz[0])
NZ = len(zgrid)
## Polynomial fit
out = self.fit_at_z(z=0., templates={}, fitter='lstsq',
poly_order=3,
fit_background=fit_background)
A, coeffs, chi2_poly, model_2d = out
### Set up for template fit
if templates == {}:
templates = utils.load_templates(fwhm=fwhm, stars=stars, line_complexes=line_complexes, fsps_templates=fsps_templates)
else:
if verbose:
print('User templates! N={0} \n'.format(len(templates)))
NTEMP = len(templates)
out = self.fit_at_z(z=0., templates=templates, fitter=fitter,
poly_order=poly_order,
fit_background=fit_background)
A, coeffs, chi2, model_2d = out
chi2 = np.zeros(NZ)
coeffs = np.zeros((NZ, coeffs.shape[0]))
chi2min = 1e30
iz = 0
for i in range(NZ):
out = self.fit_at_z(z=zgrid[i], templates=templates,
fitter=fitter, poly_order=poly_order,
fit_background=fit_background)
A, coeffs[i,:], chi2[i], model_2d = out
if chi2[i] < chi2min:
iz = i
chi2min = chi2[i]
if verbose:
print(utils.NO_NEWLINE + ' {0:.4f} {1:9.1f} ({2:.4f})'.format(zgrid[i], chi2[i], zgrid[iz]))
print('First iteration: z_best={0:.4f}\n'.format(zgrid[iz]))
# peaks
import peakutils
# chi2nu = (chi2.min()-chi2)/self.DoF
# indexes = peakutils.indexes((chi2nu+delta_chi2_threshold)*(chi2nu > -delta_chi2_threshold), thres=0.3, min_dist=20)
chi2_rev = (chi2_poly - chi2)/self.DoF
if chi2_poly < (chi2.min() + 9):
chi2_rev = (chi2.min() + 16 - chi2)/self.DoF
chi2_rev[chi2_rev < 0] = 0
indexes = peakutils.indexes(chi2_rev, thres=0.4, min_dist=8)
num_peaks = len(indexes)
if False:
plt.plot(zgrid, (chi2-chi2.min())/ self.DoF)
plt.scatter(zgrid[indexes], (chi2-chi2.min())[indexes]/ self.DoF, color='r')
# delta_chi2 = (chi2.max()-chi2.min())/self.DoF
# if delta_chi2 > delta_chi2_threshold:
if (num_peaks > 0) & (not stars) & zoom:
zgrid_zoom = []
for ix in indexes:
if (ix > 0) & (ix < len(chi2)-1):
c = polyfit(zgrid[ix-1:ix+2], chi2[ix-1:ix+2], 2)
zi = -c[1]/(2*c[0])
chi_i = polyval(c, zi)
zgrid_zoom.extend(np.arange(zi-2*dz[0],
zi+2*dz[0]+dz[1]/10., dz[1]))
# zgrid_zoom = utils.zoom_zgrid(zgrid, chi2/self.DoF,
# threshold=delta_chi2_threshold,
# factor=dz[0]/dz[1])
NZOOM = len(zgrid_zoom)
chi2_zoom = np.zeros(NZOOM)
coeffs_zoom = np.zeros((NZOOM, coeffs.shape[1]))
iz = 0
chi2min = 1.e30
for i in range(NZOOM):
out = self.fit_at_z(z=zgrid_zoom[i], templates=templates,
fitter=fitter, poly_order=poly_order,
fit_background=fit_background)
A, coeffs_zoom[i,:], chi2_zoom[i], model_2d = out
if chi2_zoom[i] < chi2min:
chi2min = chi2_zoom[i]
iz = i
if verbose:
print(utils.NO_NEWLINE+'- {0:.4f} {1:9.1f} ({2:.4f}) {3:d}/{4:d}'.format(zgrid_zoom[i], chi2_zoom[i], zgrid_zoom[iz], i+1, NZOOM))
zgrid = np.append(zgrid, zgrid_zoom)
chi2 = np.append(chi2, chi2_zoom)
coeffs = np.append(coeffs, coeffs_zoom, axis=0)
so = np.argsort(zgrid)
zgrid = zgrid[so]
chi2 = chi2[so]
coeffs=coeffs[so,:]
if prior is not None:
#print('\n\nPrior!\n\n', chi2.min(), prior[1].min())
interp_prior = np.interp(zgrid, prior[0], prior[1])
chi2 += interp_prior
else:
interp_prior = None
print(' Zoom iteration: z_best={0:.4f}\n'.format(zgrid[np.argmin(chi2)]))
### Best redshift
if not stars:
templates = utils.load_templates(line_complexes=False, fwhm=fwhm, fsps_templates=fsps_templates)
zbest = zgrid[np.argmin(chi2)]
ix = np.argmin(chi2)
chibest = chi2.min()
## Fit parabola
if (ix > 0) & (ix < len(chi2)-1):
c = polyfit(zgrid[ix-1:ix+2], chi2[ix-1:ix+2], 2)
zbest = -c[1]/(2*c[0])
chibest = polyval(c, zbest)
out = self.fit_at_z(z=zbest, templates=templates,
fitter=fitter, poly_order=poly_order,
fit_background=fit_background)
A, coeffs_full, chi2_best, model_full = out
# Parse results
out2 = self.parse_fit_outputs(zbest, templates, coeffs_full, A)
line_flux, covar, cont1d, line1d, model1d, model_continuum = out2
# Output dictionary with fit parameters
fit_data = OrderedDict()
fit_data['poly_order'] = poly_order
fit_data['fwhm'] = fwhm
fit_data['zbest'] = zbest
fit_data['chibest'] = chibest
fit_data['chi_poly'] = chi2_poly
fit_data['zgrid'] = zgrid
fit_data['prior'] = interp_prior
fit_data['A'] = A
fit_data['coeffs'] = coeffs
fit_data['chi2'] = chi2
fit_data['DoF'] = self.DoF
fit_data['model_full'] = model_full
fit_data['coeffs_full'] = coeffs_full
fit_data['covar'] = covar
fit_data['line_flux'] = line_flux
#fit_data['templates_full'] = templates
fit_data['model_cont'] = model_continuum
fit_data['model1d'] = model1d
fit_data['cont1d'] = cont1d
fit_data['line1d'] = line1d
#return fit_data
fig = None
if make_figure:
fig = self.show_redshift_fit(fit_data, figsize=figsize)
#fig.savefig('fit.pdf')
return fit_data, fig
def run_individual_fits(self, z=0, templates={}):
"""Run template fits on each *exposure* individually to evaluate
variance in line and continuum fits.
Parameters
----------
z : float
Redshift at which to evaluate the fit
templates : list of `~grizli.utils.SpectrumTemplate` objects
Generated with, e.g., `load_templates`.
Returns
-------
line_flux, line_err : dict
Dictionaries with the measured line fluxes and uncertainties for
each exposure fit.
coeffs_list : `~np.ndarray` [Nbeam x Ntemplate]
Raw fit coefficients
chi2_list, DoF_list : `~np.ndarray` [Nbeam]
Chi-squared and effective degrees of freedom for each separate fit
"""
# Fit on the full set of beams
out = self.fit_at_z(z=z, templates=templates,
fitter='nnls', poly_order=self.poly_order,
fit_background=self.fit_bg)
A, coeffs_full, chi2_best, model_full = out
out2 = self.parse_fit_outputs(z, templates, coeffs_full, A)
line, covar, cont1d, line1d, model1d, model_continuum = out2
NB, NTEMP = len(self.beams), len(templates)
# Outputs
coeffs_list = np.zeros((NB, NTEMP))
chi2_list = np.zeros(NB)
DoF_list = np.zeros(NB)
line_flux = OrderedDict()
line_err = OrderedDict()
line_keys = list(line.keys())
for k in line_keys:
line_flux[k] = np.zeros(NB)
line_err[k] = np.zeros(NB)
# Generate separate MultiBeam objects for each individual beam
for i, b in enumerate(self.beams):
b_i = MultiBeam([b], fcontam=self.fcontam,
group_name=self.group_name)
out_i = b_i.fit_at_z(z=z, templates=templates,
fitter='nnls', poly_order=self.poly_order,
fit_background=self.fit_bg)
A_i, coeffs_i, chi2_i, model_full_i = out_i
# Parse fit information from individual fits
out2 = b_i.parse_fit_outputs(z, templates, coeffs_i, A_i)
line_i, covar_i, cont1d_i, line1d_i, model1d_i, model_continuum_i = out2
for k in line_keys:
line_flux[k][i] = line_i[k][0]
line_err[k][i] = line_i[k][1]
coeffs_list[i,:] = coeffs_i[-NTEMP:]
chi2_list[i] = chi2_i
DoF_list[i] = b_i.DoF
return line_flux, line_err, coeffs_list, chi2_list, DoF_list
def show_redshift_fit(self, fit_data, plot_flambda=True, figsize=[8,5]):
"""TBD
"""
import matplotlib.gridspec
gs = matplotlib.gridspec.GridSpec(2, 1, height_ratios=[0.6,1])
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(gs[0])
c2min = fit_data['chi2'].min()
scale_pz = True
if scale_pz:
scale_nu = c2min/self.DoF
scl_label = '_s'
else:
scale_nu = 1.
scl_label = ''
#axz.plot(z, (chi2-chi2.min())/scale_nu, color='k')
#ax.plot(fit_data['zgrid'], fit_data['chi2']/self.DoF)
ax.plot(fit_data['zgrid'], (fit_data['chi2']-c2min)/scale_nu)
ax.set_xlabel('z')
ax.set_ylabel(r'$\chi^2_\nu$, $\nu$={0:d}'.format(self.DoF))
ax.set_ylim(-4,27)
ax.set_ylabel(r'$\Delta\chi^2{2}$ ({0:.0f}/$\nu$={1:d})'.format(c2min, self.DoF, scl_label))
ax.set_yticks([1,4,9,16,25])
# for delta in [1,4,9]:
# ax.plot(fit_data['zgrid'],
# fit_data['zgrid']*0.+(c2min+delta)/self.DoF,
# color='{0:.2f}'.format(1-delta*1./10))
ax.plot(fit_data['zgrid'], (fit_data['chi2']*0+fit_data['chi_poly']-c2min)/scale_nu, color='b', linestyle='--', alpha=0.8)
ax.set_xlim(fit_data['zgrid'].min(), fit_data['zgrid'].max())
ax.grid()
ax.set_title(r'ID = {0:d}, $z_\mathrm{{grism}}$={1:.4f}'.format(self.beams[0].id, fit_data['zbest']))
ax = fig.add_subplot(gs[1])
ymax = 0
ymin = 1e10
continuum_fit = self.reshape_flat(fit_data['model_cont'])
line_fit = self.reshape_flat(fit_data['model_full'])
grisms = self.Ngrism.keys()
wfull = {}
ffull = {}
efull = {}
for grism in grisms:
wfull[grism] = []
ffull[grism] = []
efull[grism] = []
for ib in range(self.N):
beam = self.beams[ib]
clean = beam.grism['SCI'] - beam.contam
if self.fit_bg:
bg_i = fit_data['coeffs_full'][ib]
clean -= bg_i # background
else:
bg_i = 0.
#ivar = 1./(1./beam.ivar + self.fcontam*beam.contam)
#ivar[~np.isfinite(ivar)] = 0
## New weight scheme
ivar = beam.ivar
weight = np.exp(-(self.fcontam*np.abs(beam.contam)*np.sqrt(ivar)))
wave, flux, err = beam.beam.optimal_extract(clean,
ivar=ivar,
weight=weight)
mwave, mflux, merr = beam.beam.optimal_extract(line_fit[ib]-bg_i,
ivar=ivar,
weight=weight)
flat = beam.flat_flam.reshape(beam.beam.sh_beam)
wave, fflux, ferr = beam.beam.optimal_extract(flat, ivar=ivar,
weight=weight)
if plot_flambda:
ok = beam.beam.sensitivity > 0.1*beam.beam.sensitivity.max()
wave = wave[ok]
fscl = 1./1.e-19 #beam.beam.total_flux/1.e-17
flux = (flux*fscl/fflux)[ok]*beam.beam.scale
err = (err*fscl/fflux)[ok]
mflux = (mflux*fscl/fflux)[ok]*beam.beam.scale
ylabel = r'$f_\lambda\,/\,10^{-19}\,\mathrm{cgs}$'
else:
ylabel = 'flux (e-/s)'
scl_region = np.isfinite(mflux)
if scl_region.sum() == 0:
continue
# try:
# okerr = np.isfinite(err) #& (np.abs(flux/err) > 0.2) & (err != 0)
# med_err = np.median(err[okerr])
#
# ymax = np.maximum(ymax,
# (mflux[scl_region][2:-2] + med_err).max())
# ymin = np.minimum(ymin,
# (mflux[scl_region][2:-2] - med_err).min())
# except:
# continue
#okerr = (err != 0) & (np.abs(flux/err) > 0.2)
okerr = np.isfinite(err)
ax.errorbar(wave[okerr]/1.e4, flux[okerr], err[okerr], alpha=0.15+0.2*(self.N <= 2), linestyle='None', marker='.', color='{0:.2f}'.format(ib*0.5/self.N), zorder=1)
ax.plot(wave[okerr]/1.e4, mflux[okerr], color='r', alpha=0.5, zorder=3)
if beam.grism.instrument == 'NIRISS':
grism = beam.grism.pupil
else:
grism = beam.grism.filter
#for grism in grisms:
wfull[grism] = np.append(wfull[grism], wave[okerr])
ffull[grism] = np.append(ffull[grism], flux[okerr])
efull[grism] = np.append(efull[grism], err[okerr])
## Scatter direct image flux
if beam.direct.ref_photplam is None:
ax.scatter(beam.direct.photplam/1.e4, beam.beam.total_flux/1.e-19, marker='s', edgecolor='k', color=GRISM_COLORS[grism], alpha=0.2, zorder=100, s=100)
else:
ax.scatter(beam.direct.ref_photplam/1.e4, beam.beam.total_flux/1.e-19, marker='s', edgecolor='k', color=GRISM_COLORS[grism], alpha=0.2, zorder=100, s=100)
for grism in grisms:
if self.Ngrism[grism] > 1:
## binned
okb = (np.isfinite(wfull[grism]) & np.isfinite(ffull[grism]) &
np.isfinite(efull[grism]))
so = np.argsort(wfull[grism][okb])
var = efull[grism]**2
N = int(np.ceil(self.Ngrism[grism]/2)*2)*2
kernel = np.ones(N, dtype=float)/N
wht = 1/var[okb][so]
fbin = nd.convolve(ffull[grism][okb][so]*wht, kernel)[N//2::N]
wbin = nd.convolve(wfull[grism][okb][so]*wht, kernel)[N//2::N]
#vbin = nd.convolve(var[okb][so], kernel**2)[N//2::N]
wht_bin = nd.convolve(wht, kernel)[N//2::N]
vbin = nd.convolve(wht, kernel**2)[N//2::N]/wht_bin**2
fbin /= wht_bin
wbin /= wht_bin
#vbin = 1./wht_bin
ax.errorbar(wbin/1.e4, fbin, np.sqrt(vbin), alpha=0.8,
linestyle='None', marker='.',
color=GRISM_COLORS[grism], zorder=2)
med_err = np.median(np.sqrt(vbin))
ymin = np.minimum(ymin, (fbin-2*med_err).min())
ymax = np.maximum(ymax, (fbin+2*med_err).max())
ymin = np.maximum(0, ymin)
ax.set_ylim(ymin - 0.2*np.abs(ymax), 1.3*ymax)
xmin, xmax = 1.e5, 0
for g in GRISM_LIMITS:
if g in grisms:
xmin = np.minimum(xmin, GRISM_LIMITS[g][0])
xmax = np.maximum(xmax, GRISM_LIMITS[g][1])
#print g, xmin, xmax
ax.set_xlim(xmin, xmax)
ax.semilogx(subsx=[xmax])
#axc.set_xticklabels([])
#axc.set_xlabel(r'$\lambda$')
#axc.set_ylabel(r'$f_\lambda \times 10^{-19}$')
from matplotlib.ticker import MultipleLocator
ax.xaxis.set_major_locator(MultipleLocator(0.1))
labels = np.arange(np.ceil(xmin*10), np.ceil(xmax*10))/10.
ax.set_xticks(labels)
ax.set_xticklabels(labels)
ax.grid()
### Label
ax.text(0.03, 1.03, ('{0}'.format(self.Ngrism)).replace('\'','').replace('{','').replace('}',''), ha='left', va='bottom', transform=ax.transAxes, fontsize=10)
#ax.plot(wave/1.e4, wave/1.e4*0., linestyle='--', color='k')
ax.hlines(0, xmin, xmax, linestyle='--', color='k')
ax.set_xlabel(r'$\lambda$')
ax.set_ylabel(ylabel)
gs.tight_layout(fig, pad=0.1)
return fig
def redshift_fit_twod_figure(self, fit, spatial_scale=1, dlam=46., NY=10,
figsize=[8,3.5], **kwargs):
"""Make figure of 2D spectrum
TBD
"""
### xlimits
xmin, xmax = 1.e5, 0
for g in GRISM_LIMITS:
if g in self.Ngrism:
xmin = np.minimum(xmin, GRISM_LIMITS[g][0])
xmax = np.maximum(xmax, GRISM_LIMITS[g][1])
hdu_sci = drizzle_2d_spectrum(self.beams, ds9=None, NY=NY,
spatial_scale=spatial_scale, dlam=dlam,
kernel='point', pixfrac=0.6,
wlimit=[xmin, xmax],
fcontam=self.fcontam)
### Continuum model
cont = self.reshape_flat(fit['model_cont'])
hdu_con = drizzle_2d_spectrum(self.beams, data=cont, ds9=None, NY=NY,
spatial_scale=spatial_scale, dlam=dlam,
kernel='point', pixfrac=0.6,
wlimit=[xmin, xmax],
fcontam=self.fcontam)
full = self.reshape_flat(fit['model_full'])
hdu_full = drizzle_2d_spectrum(self.beams, data=full, ds9=None, NY=NY,
spatial_scale=spatial_scale, dlam=dlam,
kernel='point', pixfrac=0.6,
wlimit=[xmin, xmax],
fcontam=self.fcontam)
clip = hdu_full['WHT'].data > np.percentile(hdu_full['WHT'].data, 30)
#vmax = np.maximum(1.1*np.percentile(hdu_full['SCI'].data[clip], 98), 0.04)
avg_rms = 1/np.median(np.sqrt(hdu_full['WHT'].data[clip]))
vmax = np.maximum(1.1*np.percentile(hdu_full['SCI'].data[clip], 98), 5*avg_rms)
#print 'VMAX: %f\n\n' %vmax
sh = hdu_full[1].data.shape
extent = [hdu_full[0].header['WMIN'], hdu_full[0].header['WMAX'],
0, sh[0]]
fig = plt.figure(figsize=figsize)
show = [hdu_sci[1].data, hdu_full[1].data,
hdu_sci[1].data-hdu_con[1].data]
desc = [r'$Contam$'+'\n'+r'$Cleaned$', r'$Model$', r'$Line$'+'\n'+r'$Residual$']
i=0
for data_i, desc_i in zip(show, desc):
ax = fig.add_subplot(11+i+100*len(show))
ax.imshow(data_i, origin='lower',
interpolation='Nearest', vmin=-0.1*vmax, vmax=vmax,
extent=extent, cmap = plt.cm.viridis_r,
aspect='auto')
ax.set_yticklabels([])
ax.set_ylabel(desc_i)
i+=1
for ax in fig.axes[:-1]:
ax.set_xticklabels([])
fig.axes[-1].set_xlabel(r'$\lambda$')
fig.tight_layout(pad=0.2)
## Label
label = 'ID={0:6d}, z={1:.4f}'.format(self.beams[0].id, fit['zbest'])
fig.axes[-1].text(0.97, -0.27, label, ha='right', va='top',
transform=fig.axes[-1].transAxes, fontsize=10)
label2 = ('{0}'.format(self.Ngrism)).replace('\'', '').replace('{', '').replace('}', '')
fig.axes[-1].text(0.03, -0.27, label2, ha='left', va='top',
transform=fig.axes[-1].transAxes, fontsize=10)
hdu_sci.append(hdu_con[1])
hdu_sci[-1].name = 'CONTINUUM'
hdu_sci.append(hdu_full[1])
hdu_sci[-1].name = 'FULL'
return fig, hdu_sci
def drizzle_segmentation(self, wcsobj=None, kernel='square', pixfrac=1, verbose=False):
"""
Drizzle segmentation image from individual `MultiBeam.beams`.
Parameters
----------
wcsobj: `~astropy.wcs.WCS` or `~astropy.io.fits.Header`
Output WCS.
kernel: e.g., 'square', 'point', 'gaussian'
Drizzle kernel, see `~drizzlepac.adrizzle.drizzle`.
pixfrac: float
Drizzle 'pixfrac', see `~drizzlepac.adrizzle.drizzle`.
verbose: bool
Print status messages.
Returns
----------
drizzled_segm: `~numpy.ndarray`, type `~numpy.int64`.
Drizzled segmentation image, with image dimensions and
WCS defined in `wcsobj`.
"""
import numpy as np
import astropy.wcs as pywcs
import astropy.io.fits as pyfits
try:
from . import utils
except:
from grizli import multifit, utils
all_ids = [np.unique(beam.beam.seg) for beam in self.beams]
all_ids = np.unique(np.hstack(all_ids))[1:]
if isinstance(wcsobj, pyfits.Header):
wcs = pywcs.WCS(wcsobj)
wcs.pscale = utils.get_wcs_pscale(wcs)
else:
wcs = wcsobj
if not hasattr(wcs, 'pscale'):
wcs.pscale = utils.get_wcs_pscale(wcs)
if verbose:
print('Drizzle ID={0:.0f} (primary)'.format(self.id))
drizzled_segm = self.drizzle_segmentation_id(id=self.id, wcsobj=wcsobj, kernel=kernel, pixfrac=pixfrac, verbose=verbose)
for id in all_ids:
if int(id) == self.id:
continue
if verbose:
print('Drizzle ID={0:.0f}'.format(id))
dseg_i = self.drizzle_segmentation_id(id=id, wcsobj=wcsobj, kernel=kernel, pixfrac=pixfrac, verbose=False)
new_seg = drizzled_segm == 0
drizzled_segm[new_seg] = dseg_i[new_seg]
return drizzled_segm
def drizzle_segmentation_id(self, id=None, wcsobj=None, kernel='square', pixfrac=1, verbose=True):
"""
Drizzle segmentation image for a single ID
"""
import numpy as np
import astropy.wcs as pywcs
import astropy.io.fits as pyfits
try:
from . import utils
except:
from grizli import multifit, utils
# Can be either a header or WCS object
if isinstance(wcsobj, pyfits.Header):
wcs = pywcs.WCS(wcsobj)
wcs.pscale = utils.get_wcs_pscale(wcs)
else:
wcs = wcsobj
if not hasattr(wcs, 'pscale'):
wcs.pscale = utils.get_wcs_pscale(wcs)
if id is None:
id = self.id
sci_list = [(beam.beam.seg == id)*1. for beam in self.beams]
wht_list = [np.isfinite(beam.beam.seg)*1. for beam in self.beams]
wcs_list = [beam.direct.wcs for beam in self.beams]
out = utils.drizzle_array_groups(sci_list, wht_list, wcs_list, outputwcs=wcs, scale=0.1, kernel=kernel, pixfrac=pixfrac, verbose=verbose)
drizzled_segm = (out[0] > 0)*id
return drizzled_segm
def drizzle_fit_lines(self, fit, pline, force_line=['Ha', 'OIII', 'Hb', 'OII'], save_fits=True, mask_lines=True, mask_sn_limit=3, mask_4959=True, verbose=True, include_segmentation=True, get_ir_psfs=True):
"""
TBD
"""
line_wavelengths, line_ratios = utils.get_line_wavelengths()
hdu_full = []
saved_lines = []
if ('cfit' in fit) & mask_4959:
if 'line OIII' in fit['templates']:
t_o3 = utils.load_templates(fwhm=fit['templates']['line OIII'].fwhm, line_complexes=False, stars=False, full_line_list=['OIII-4959'], continuum_list=[], fsps_templates=False)
if 'zbest' in fit:
z_driz = fit['zbest']
else:
z_driz = fit['z']
if 'line_flux' in fit:
line_flux_dict = fit['line_flux']
else:
line_flux_dict = OrderedDict()
for key in fit['cfit']:
if key.startswith('line'):
line_flux_dict[key.replace('line ','')] = fit['cfit'][key]
# Compute continuum model
if 'cfit' in fit:
if 'bg {0:03d}'.format(self.N-1) in fit['cfit']:
for ib, beam in enumerate(self.beams):
key = 'bg {0:03d}'.format(ib)
self.beams[ib].background = fit['cfit'][key][0]
cont = fit['cont1d']
for beam in self.beams:
beam.compute_model(spectrum_1d=[cont.wave, cont.flux],
is_cgs=True)
if hasattr(self, 'pscale'):
if (self.pscale is not None):
scale = self.compute_scale_array(self.pscale, beam.wavef)
beam.beam.pscale_array = scale.reshape(beam.sh)
else:
beam.beam.pscale_array = 1.
else:
beam.beam.pscale_array = 1.
for line in line_flux_dict:
line_flux, line_err = line_flux_dict[line]
if line_err == 0:
continue
if (line_flux/line_err > 4) | (line in force_line):
if verbose:
print('Drizzle line -> {0:4s} ({1:.2f} {2:.2f})'.format(line, line_flux/1.e-17, line_err/1.e-17))
line_wave_obs = line_wavelengths[line][0]*(1+z_driz)
if mask_lines:
for beam in self.beams:
beam.oivar = beam.ivar*1
lam = beam.beam.lam_beam
if hasattr(beam.beam, 'pscale_array'):
pscale_array = beam.beam.pscale_array
else:
pscale_array = 1.
### another idea, compute a model for the line itself
### and mask relatively "contaminated" pixels from
### other lines
try:
lm = fit['line1d'][line]
sp = [lm.wave, lm.flux]
except:
key = 'line '+ line
lm = fit['templates'][key]
scl = fit['cfit'][key][0]/(1+z_driz)
sp = [lm.wave*(1+z_driz), lm.flux*scl]
#lm = fit['line1d'][line]
if ((lm.wave.max() < lam.min()) |
(lm.wave.min() > lam.max())):
continue
#sp = [lm.wave, lm.flux]
m = beam.compute_model(spectrum_1d=sp,
in_place=False, is_cgs=True)
lmodel = m.reshape(beam.beam.sh_beam)*pscale_array
if lmodel.max() == 0:
continue
if 'cfit' in fit:
keys = fit['cfit']
else:
keys = fit['line1d']
beam.extra_lines = beam.contam*0.
for lkey in keys:
if not lkey.startswith('line'):
continue
key = lkey.replace('line ', '')
lf, le = line_flux_dict[key]
### Don't mask if the line missing or undetected
if (lf == 0):# | (lf < mask_sn_limit*le):
continue
if key != line:
try:
lm = fit['line1d'][lkey]
sp = [lm.wave, lm.flux]
except:
lm = fit['templates'][lkey]
scl = fit['cfit'][lkey][0]/(1+z_driz)
sp = [lm.wave*(1+z_driz), lm.flux*scl]
if ((lm.wave.max() < lam.min()) |
(lm.wave.min() > lam.max())):
continue
m = beam.compute_model(spectrum_1d=sp,
in_place=False,
is_cgs=True)
lcontam = m.reshape(beam.beam.sh_beam)
lcontam *= pscale_array
if lcontam.max() == 0:
#print beam.grism.parent_file, lkey
continue
beam.extra_lines += lcontam
beam.ivar[lcontam > mask_sn_limit*lmodel] *= 0
# Subtract 4959
if (line == 'OIII') & ('cfit' in fit) & mask_4959:
lm = t_o3['line OIII-4959']
scl = fit['cfit']['line OIII'][0]/(1+z_driz)
scl *= 1./(2.98+1)
sp = [lm.wave*(1+z_driz), lm.flux*scl]
if ((lm.wave.max() < lam.min()) |
(lm.wave.min() > lam.max())):
continue
m = beam.compute_model(spectrum_1d=sp,
in_place=False,
is_cgs=True)
lcontam = m.reshape(beam.beam.sh_beam)
lcontam *= pscale_array
if lcontam.max() == 0:
continue
#print('Mask 4959!')
beam.extra_lines += lcontam
hdu = drizzle_to_wavelength(self.beams, ra=self.ra,
dec=self.dec, wave=line_wave_obs,
fcontam=self.fcontam,
**pline)
if mask_lines:
for beam in self.beams:
beam.ivar = beam.oivar*1
delattr(beam, 'oivar')
hdu[0].header['REDSHIFT'] = (z_driz, 'Redshift used')
#for e in [3,4,5,6]:
for e in [-4,-3,-2,-1]:
hdu[e].header['EXTVER'] = line
hdu[e].header['REDSHIFT'] = (z_driz, 'Redshift used')
hdu[e].header['RESTWAVE'] = (line_wavelengths[line][0],
'Line rest wavelength')
saved_lines.append(line)
if len(hdu_full) == 0:
hdu_full = hdu
hdu_full[0].header['NUMLINES'] = (1,
"Number of lines in this file")
else:
hdu_full.extend(hdu[-4:])
hdu_full[0].header['NUMLINES'] += 1
# Make sure DSCI extension is filled. Can be empty for
# lines at the edge of the grism throughput
for f_i in range(hdu[0].header['NDFILT']):
filt_i = hdu[0].header['DFILT{0:02d}'.format(f_i+1)]
if hdu['DWHT',filt_i].data.max() != 0:
hdu_full['DSCI',filt_i] = hdu['DSCI',filt_i]
hdu_full['DWHT',filt_i] = hdu['DWHT',filt_i]
li = hdu_full[0].header['NUMLINES']
hdu_full[0].header['LINE{0:03d}'.format(li)] = line
hdu_full[0].header['FLUX{0:03d}'.format(li)] = (line_flux,
'Line flux, 1e-17 erg/s/cm2')
hdu_full[0].header['ERR{0:03d}'.format(li)] = (line_err,
'Line flux err, 1e-17 erg/s/cm2')
if len(hdu_full) > 0:
hdu_full[0].header['HASLINES'] = (' '.join(saved_lines),
'Lines in this file')
else:
hdu = drizzle_to_wavelength(self.beams, ra=self.ra,
dec=self.dec,
wave=np.median(self.beams[0].wave),
fcontam=self.fcontam,
**pline)
hdu_full = hdu[:-4]
hdu_full[0].header['REDSHIFT'] = (z_driz, 'Redshift used')
hdu_full[0].header['NUMLINES'] = 0
hdu_full[0].header['HASLINES'] = ' '
if include_segmentation:
line_wcs = pywcs.WCS(hdu_full[1].header)
segm = self.drizzle_segmentation(wcsobj=line_wcs)
seg_hdu = pyfits.ImageHDU(data=segm.astype(np.int32), name='SEG')
hdu_full.insert(1, seg_hdu)
if get_ir_psfs:
import grizli.galfit.psf
ir_beams = []
gr_filters = {'G102':['F105W'], 'G141':['F105W','F125W','F140W','F160W']}
show_filters = []
for gr in ['G102','G141']:
if gr in self.PA:
show_filters.extend(gr_filters[gr])
for pa in self.PA[gr]:
for i in self.PA[gr][pa]:
ir_beams.append(self.beams[i])
if len(ir_beams) > 0:
dp = grizli.galfit.psf.DrizzlePSF(driz_hdu=hdu_full['DSCI'],
beams=self.beams)
for filt in np.unique(show_filters):
if verbose:
print('Get linemap PSF: {0}'.format(filt))
psf = dp.get_psf(ra=dp.driz_wcs.wcs.crval[0],
dec=dp.driz_wcs.wcs.crval[1],
filter=filt,
pixfrac=dp.driz_header['PIXFRAC'],
kernel=dp.driz_header['DRIZKRNL'],
wcs_slice=dp.driz_wcs, get_extended=True,
verbose=False, get_weight=False)
psf[1].header['EXTNAME'] = 'DPSF'
psf[1].header['EXTVER'] = filt
hdu_full.append(psf[1])
if save_fits:
hdu_full.writeto('{0}_{1:05d}.line.fits'.format(self.group_name, self.id), clobber=True, output_verify='silentfix')
return hdu_full
def run_full_diagnostics(self, pzfit={}, pspec2={}, pline={},
force_line=['Ha', 'OIII', 'Hb', 'OII'], GroupFLT=None,
prior=None, zoom=True, verbose=True):
"""TBD
size=20, pixscale=0.1,
pixfrac=0.2, kernel='square'
"""
import copy
## Defaults
pzfit_def, pspec2_def, pline_def = get_redshift_fit_defaults()
if pzfit == {}:
pzfit = pzfit_def
if pspec2 == {}:
pspec2 = pspec2_def
if pline == {}:
pline = pline_def
### Check that keywords allowed
for d, default in zip([pzfit, pspec2, pline],
[pzfit_def, pspec2_def, pline_def]):
for key in d:
if key not in default:
p = d.pop(key)
### Auto generate FWHM (in km/s) to use for line fits
if 'fwhm' in pzfit:
fwhm = pzfit['fwhm']
if pzfit['fwhm'] == 0:
fwhm = 700
if 'G141' in self.Ngrism:
fwhm = 1200
if 'G800L' in self.Ngrism:
fwhm = 1400
#
if 'G280' in self.Ngrism:
fwhm = 1500
# WFIRST
if 'GRISM' in self.Ngrism:
fwhm = 350
### Auto generate delta-wavelength of 2D spectrum
if 'dlam' in pspec2:
dlam = pspec2['dlam']
if dlam == 0:
dlam = 25
if 'G141' in self.Ngrism:
dlam = 45
if 'G800L' in self.Ngrism:
dlam = 40
if 'G280' in self.Ngrism:
dlam = 18
if 'GRISM' in self.Ngrism:
dlam = 11
### Redshift fit
zfit_in = copy.copy(pzfit)
zfit_in['fwhm'] = fwhm
zfit_in['prior'] = prior
zfit_in['zoom'] = zoom
zfit_in['verbose'] = verbose
if zfit_in['zr'] is 0:
fit, fig = self.fit_stars(**zfit_in)
else:
fit, fig = self.fit_redshift(**zfit_in)
### Make sure model attributes are set to the continuum model
models = self.reshape_flat(fit['model_cont'])
for j in range(self.N):
self.beams[j].model = models[j]*1
### 2D spectrum
spec_in = copy.copy(pspec2)
spec_in['fit'] = fit
spec_in['dlam'] = dlam
#fig2, hdu2 = self.redshift_fit_twod_figure(**spec_in)#, kwargs=spec2) #dlam=dlam, spatial_scale=spatial_scale, NY=NY)
fig2 = hdu2 = None
### Update master model
if GroupFLT is not None:
try:
ix = GroupFLT.catalog['NUMBER'] == self.beams[0].id
mag = GroupFLT.catalog['MAG_AUTO'][ix].data[0]
except:
mag = 22
sp = fit['cont1d']
GroupFLT.compute_single_model(id, mag=mag, size=-1, store=False,
spectrum_1d=[sp.wave, sp.flux],
is_cgs=True,
get_beams=None, in_place=True)
## 2D lines to drizzle
hdu_full = self.drizzle_fit_lines(fit, pline, force_line=force_line,
save_fits=True)
fit['id'] = self.id
fit['fit_bg'] = self.fit_bg
fit['grism_files'] = [b.grism.parent_file for b in self.beams]
for item in ['A','coeffs','model_full','model_cont']:
if item in fit:
p = fit.pop(item)
#p = fit.pop('coeffs')
np.save('{0}_{1:05d}.zfit.npy'.format(self.group_name, self.id), [fit])
fig.savefig('{0}_{1:05d}.zfit.png'.format(self.group_name, self.id))
#fig2.savefig('{0}_{1:05d}.zfit.2D.png'.format(self.group_name, self.id))
#hdu2.writeto('{0}_{1:05d}.zfit.2D.fits'.format(self.group_name, self.id), clobber=True, output_verify='silentfix')
label = '# id ra dec zbest '
data = '{0:7d} {1:.6f} {2:.6f} {3:.5f}'.format(self.id, self.ra, self.dec,
fit['zbest'])
for grism in ['G800L', 'G280', 'G102', 'G141', 'GRISM']:
label += ' N{0}'.format(grism)
if grism in self.Ngrism:
data += ' {0:2d}'.format(self.Ngrism[grism])
else:
data += ' {0:2d}'.format(0)
label += ' chi2 DoF '
data += ' {0:14.1f} {1:d} '.format(fit['chibest'], self.DoF)
for line in ['SII', 'Ha', 'OIII', 'Hb', 'Hg', 'OII']:
label += ' {0} {0}_err'.format(line)
if line in fit['line_flux']:
flux = fit['line_flux'][line][0]
err = fit['line_flux'][line][1]
data += ' {0:10.3e} {1:10.3e}'.format(flux, err)
fp = open('{0}_{1:05d}.zfit.dat'.format(self.group_name, self.id),'w')
fp.write(label+'\n')
fp.write(data+'\n')
fp.close()
fp = open('{0}_{1:05d}.zfit.beams.dat'.format(self.group_name, self.id),'w')
fp.write('# file filter origin_x origin_y size pad bg\n')
for ib, beam in enumerate(self.beams):
data = '{0:40s} {1:s} {2:5d} {3:5d} {4:5d} {5:5d}'.format(beam.grism.parent_file, beam.grism.filter,
beam.direct.origin[0],
beam.direct.origin[1],
beam.direct.sh[0],
beam.direct.pad)
if self.fit_bg:
data += ' {0:8.4f}'.format(fit['coeffs_full'][ib])
else:
data += ' {0:8.4f}'.format(0.0)
fp.write(data + '\n')
fp.close()
## Save figures
plt_status = plt.rcParams['interactive']
# if not plt_status:
# plt.close(fig)
# plt.close(fig2)
return fit, fig, fig2, hdu2, hdu_full
def apply_trace_shift(self, set_to_zero=False):
"""
Set beam.yoffset back to zero
"""
indices = [[i] for i in range(self.N)]
if set_to_zero:
s0 = np.zeros(len(indices))
else:
s0 = [beam.beam.yoffset for beam in self.beams]
args = (self, indices, 0, False, False, True)
self.eval_trace_shift(s0, *args)
### Reset model profile for optimal extractions
for b in self.beams:
#b._parse_from_data()
b._parse_from_data(contam_sn_mask=b.contam_sn_mask,
min_mask=b.min_mask, min_sens=b.min_sens)
self._parse_beam_arrays()
def fit_trace_shift(self, split_groups=True, max_shift=5, tol=1.e-2, verbose=True, lm=False, fit_with_psf=False):
"""TBD
"""
from scipy.optimize import leastsq, minimize
if split_groups:
indices = []
for g in self.PA:
for p in self.PA[g]:
indices.append(self.PA[g][p])
else:
indices = [[i] for i in range(self.N)]
s0 = np.zeros(len(indices))
bounds = np.array([[-max_shift,max_shift]]*len(indices))
args = (self, indices, 0, lm, verbose, fit_with_psf)
if lm:
out = leastsq(self.eval_trace_shift, s0, args=args, Dfun=None, full_output=0, col_deriv=0, ftol=1.49012e-08, xtol=1.49012e-08, gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None)
shifts = out[0]
else:
out = minimize(self.eval_trace_shift, s0, bounds=bounds, args=args, method='Powell', tol=tol)
if out.x.shape == ():
shifts = [float(out.x)]
else:
shifts = out.x
# Apply to PSF if necessary
args = (self, indices, 0, lm, verbose, True)
self.eval_trace_shift(shifts, *args)
### Reset model profile for optimal extractions
for b in self.beams:
#b._parse_from_data()
b._parse_from_data(contam_sn_mask=b.contam_sn_mask,
min_mask=b.min_mask, min_sens=b.min_sens)
# Needed for background modeling
if hasattr(b, 'xp'):
delattr(b, 'xp')
self._parse_beam_arrays()
self.initialize_masked_arrays()
return shifts, out
@staticmethod
def eval_trace_shift(shifts, self, indices, poly_order, lm, verbose, fit_with_psf):
"""TBD
"""
import scipy.ndimage as nd
for il, l in enumerate(indices):
for i in l:
beam = self.beams[i]
beam.beam.add_ytrace_offset(shifts[il])
if hasattr(self.beams[i].beam, 'psf') & fit_with_psf:
#beam.model = nd.shift(beam.modelf.reshape(beam.sh_beam), (shifts[il], 0))
# This is slow, so run with fit_with_psf=False if possible
beam.init_epsf(yoff=0, #shifts[il],
psf_params=beam.beam.psf_params)
beam.compute_model(use_psf=True)
m = beam.compute_model(in_place=False)
#beam.modelf = beam.model.flatten()
#beam.model = beam.modelf.reshape(beam.beam.sh_beam)
beam.flat_flam = beam.compute_model(in_place=False, is_cgs=True)
else:
#self.beams[i].beam.add_ytrace_offset(shifts[il])
#self.beams[i].compute_model(is_cgs=True)
beam.compute_model(use_psf=False)
if __name__ == '__main__':
print(self.beams[i].beam.yoffset, shifts[il])
ds9.view(self.beams[i].model)
self.flat_flam = np.hstack([b.beam.model.flatten() for b in self.beams])
self.poly_order=-1
self.init_poly_coeffs(poly_order=poly_order)
self.fit_bg = False
A = self.A_poly*1
ok_temp = np.sum(A, axis=1) != 0
out_coeffs = np.zeros(A.shape[0])
y = self.scif
out = np.linalg.lstsq(A.T, y, rcond=None)
lstsq_coeff, residuals, rank, s = out
coeffs = lstsq_coeff
out_coeffs = np.zeros(A.shape[0])
out_coeffs[ok_temp] = coeffs
modelf = np.dot(out_coeffs, A)
if lm:
# L-M, return residuals
if verbose:
print('{0} [{1}]'.format(utils.NO_NEWLINE, ' '.join(['{0:5.2f}'.format(s) for s in shifts])))
return ((self.scif-modelf)*self.sivarf)[self.fit_mask]
chi2 = np.sum(((self.scif - modelf)**2*self.ivarf)[self.fit_mask])
if verbose:
print('{0} [{1}] {2:6.2f}'.format(utils.NO_NEWLINE, ' '.join(['{0:5.2f}'.format(s) for s in shifts]), chi2/self.DoF))
return chi2/self.DoF
def drizzle_grisms_and_PAs(self, size=10, fcontam=0, flambda=False, scale=1, pixfrac=0.5, kernel='square', make_figure=True, usewcs=False, zfit=None, diff=True, grism_list=['G800L','G102','G141','F090W','F115W','F150W','F200W','F356W','F410M','F444W']):
"""Make figure showing spectra at different orients/grisms
TBD
"""
from matplotlib.ticker import MultipleLocator
#import pysynphot as S
if usewcs:
drizzle_function = drizzle_2d_spectrum_wcs
else:
drizzle_function = drizzle_2d_spectrum
NX = len(self.PA)
NY = 0
for g in self.PA:
NY = np.maximum(NY, len(self.PA[g]))
NY += 1
# keys = list(self.PA)
# keys.sort()
keys = []
for key in grism_list:
if key in self.PA:
keys.append(key)
if zfit is not None:
if 'coeffs_full' in zfit:
bg = zfit['coeffs_full'][:self.N]
z_cont = zfit['zbest']
else:
# fitting.GroupFitter
z_cont = zfit['z']
bg = []
for k in zfit['cfit']:
if k.startswith('bg '):
bg.append(zfit['cfit'][k][0])
bg = np.array(bg)
else:
# Fit background
try:
out = self.xfit_at_z(z=0, templates={}, fitter='lstsq',
poly_order=3, fit_background=True)
bg = out[-3][:self.N]
except:
bg = [0]*self.N
for ib, beam in enumerate(self.beams):
beam.bg = bg[ib]
prim = pyfits.PrimaryHDU()
h0 = prim.header
h0['ID'] = (self.id, 'Object ID')
h0['RA'] = (self.ra, 'Right ascension')
h0['DEC'] = (self.dec, 'Declination')
h0['ISFLAM'] = (flambda, 'Pixels in f-lam units')
h0['FCONTAM'] = (fcontam, 'Contamination parameter')
h0['NGRISM'] = (len(keys), 'Number of grisms')
all_hdus = []
for ig, g in enumerate(keys):
all_beams = []
hdus = []
pas = list(self.PA[g].keys())
pas.sort()
h0['GRISM{0:03d}'.format(ig+1)] = (g, 'Grism name')
h0['N'+g] = (len(pas), 'Number of PAs for grism '+g)
for ipa, pa in enumerate(pas):
h0[g+'{0:02d}'.format(ipa+1)] = (pa, 'PA')
beams = [self.beams[i] for i in self.PA[g][pa]]
all_beams.extend(beams)
#dlam = np.ceil(np.diff(beams[0].beam.lam)[0])*scale
dlam = GRISM_LIMITS[g][2]*scale
data = [beam.grism['SCI']-beam.contam-beam.bg
for beam in beams]
hdu = drizzle_function(beams, data=data,
wlimit=GRISM_LIMITS[g], dlam=dlam,
spatial_scale=scale, NY=size,
pixfrac=pixfrac,
kernel=kernel,
convert_to_flambda=flambda,
fcontam=0, ds9=None)
hdu[0].header['RA'] = (self.ra, 'Right ascension')
hdu[0].header['DEC'] = (self.dec, 'Declination')
hdu[0].header['GRISM'] = (g, 'Grism')
hdu[0].header['PA'] = (pa, 'Dispersion PA')
hdu[0].header['ISFLAM'] = (flambda, 'Pixels in f-lam units')
hdu[0].header['CONF'] = (beams[0].beam.conf.conf_file,
'Configuration file')
hdu[0].header['DLAM0'] = (np.median(np.diff(beams[0].wave)),
'Native dispersion per pix')
## Contam
data = [beam.contam for beam in beams]
hdu_contam = drizzle_function(beams, data=data,
wlimit=GRISM_LIMITS[g], dlam=dlam,
spatial_scale=scale, NY=size,
pixfrac=pixfrac,
kernel=kernel,
convert_to_flambda=flambda,
fcontam=0, ds9=None)
hdu_contam[1].header['EXTNAME'] = 'CONTAM'
hdu.append(hdu_contam[1])
## Continuum model
if zfit is not None:
m = zfit['cont1d']
for beam in beams:
beam.compute_model(spectrum_1d=[m.wave, m.flux],
is_cgs=True)
else:
# simple flat spectrum
for beam in beams:
beam.compute_model()
data = [beam.beam.model for beam in beams]
hdu_model = drizzle_function(beams, data=data,
wlimit=GRISM_LIMITS[g], dlam=dlam,
spatial_scale=scale, NY=size,
pixfrac=pixfrac,
kernel=kernel,
convert_to_flambda=flambda,
fcontam=0, ds9=None)
hdu_model[1].header['EXTNAME'] = 'MODEL'
if zfit is not None:
hdu_model[1].header['CONTIN1D'] = (True, 'Model is fit continuum')
hdu_model[1].header['REDSHIFT'] = (z_cont, 'Redshift of the continuum spectrum')
else:
hdu_model[1].header['CONTIN1D'] = (False, 'Model is fit continuum')
hdu.append(hdu_model[1])
# Line kernel
if not usewcs:
h = hdu[1].header
#gau = S.GaussianSource(1.e-17, h['CRVAL1'], h['CD1_1']*1)
# header keywords scaled to um
toA = 1.e4
#toA = 1.
#gau = S.GaussianSource(1., h['CRVAL1']*toA, h['CD1_1']*toA)
gau = utils.SpectrumTemplate(central_wave=h['CRVAL1']*toA, fwhm=h['CD1_1']*toA)
#print('XXX', h['CRVAL1'], h['CD1_1'], h['CRPIX1'], toA, gau.wave[np.argmax(gau.flux)])
for beam in beams:
beam.compute_model(spectrum_1d=[gau.wave, gau.flux],
is_cgs=True)
data = [beam.beam.model for beam in beams]
h_kern = drizzle_function(beams, data=data,
wlimit=GRISM_LIMITS[g],
dlam=dlam,
spatial_scale=scale, NY=size,
pixfrac=pixfrac,
kernel=kernel,
convert_to_flambda=flambda,
fcontam=0, fill_wht=True,
ds9=None)
kern = h_kern[1].data[:,h['CRPIX1']-1-size:h['CRPIX1']-1+size]
#print('XXX', kern.max(), h_kern[1].data.max())
hdu_kern = pyfits.ImageHDU(data=kern, header=h_kern[1].header, name='KERNEL')
hdu.append(hdu_kern)
else:
hdu['DSCI'].header['EXTNAME'] = 'KERNEL'
## Pull out zeroth extension
for k in hdu[0].header:
hdu[1].header[k] = hdu[0].header[k]
for e in hdu[1:]:
e.header['EXTVER'] = '{0},{1}'.format(g, pa)
hdus.append(hdu[1:])
### Stack of each grism
data = [beam.grism['SCI']-beam.contam-beam.bg
for beam in all_beams]
hdu = drizzle_function(all_beams, data=data,
wlimit=GRISM_LIMITS[g], dlam=dlam,
spatial_scale=scale, NY=size,
pixfrac=pixfrac,
kernel=kernel,
convert_to_flambda=flambda,
fcontam=fcontam, ds9=None)
hdu[0].header['RA'] = (self.ra, 'Right ascension')
hdu[0].header['DEC'] = (self.dec, 'Declination')
hdu[0].header['GRISM'] = (g, 'Grism')
hdu[0].header['ISFLAM'] = (flambda, 'Pixels in f-lam units')
hdu[0].header['CONF'] = (beams[0].beam.conf.conf_file,
'Configuration file')
hdu[0].header['DLAM0'] = (np.median(np.diff(beams[0].wave)),
'Native dispersion per pix')
## Full continuum model
if zfit is not None:
m = zfit['cont1d']
for beam in all_beams:
beam.compute_model(spectrum_1d=[m.wave, m.flux],
is_cgs=True)
else:
for beam in all_beams:
beam.compute_model()
data = [beam.beam.model for beam in all_beams]
hdu_model = drizzle_function(all_beams, data=data,
wlimit=GRISM_LIMITS[g], dlam=dlam,
spatial_scale=scale, NY=size,
pixfrac=pixfrac,
kernel=kernel,
convert_to_flambda=flambda,
fcontam=fcontam, ds9=None)
hdu_model[1].header['EXTNAME'] = 'MODEL'
if zfit is not None:
hdu_model[1].header['CONTIN1D'] = (True, 'Model is fit continuum')
hdu_model[1].header['REDSHIFT'] = (z_cont, 'Redshift of the continuum spectrum')
else:
hdu_model[1].header['CONTIN1D'] = (False, 'Model is fit continuum')
hdu.append(hdu_model[1])
## Full kernel
h = hdu[1].header
#gau = S.GaussianSource(1.e-17, h['CRVAL1'], h['CD1_1']*1)
toA = 1.e4
#gau = S.GaussianSource(1., h['CRVAL1']*toA, h['CD1_1']*toA)
gau = utils.SpectrumTemplate(central_wave=h['CRVAL1']*toA, fwhm=h['CD1_1']*toA)
for beam in all_beams:
beam.compute_model(spectrum_1d=[gau.wave, gau.flux],
is_cgs=True)
data = [beam.beam.model for beam in all_beams]
h_kern = drizzle_function(all_beams, data=data,
wlimit=GRISM_LIMITS[g], dlam=dlam,
spatial_scale=scale, NY=size,
pixfrac=pixfrac,
kernel=kernel,
convert_to_flambda=flambda,
fcontam=0, fill_wht=True, ds9=None)
kern = h_kern[1].data[:,h['CRPIX1']-1-size:h['CRPIX1']-1+size]
hdu_kern = pyfits.ImageHDU(data=kern, header=h_kern[1].header, name='KERNEL')
hdu.append(hdu_kern)
## Pull out zeroth extension
for k in hdu[0].header:
hdu[1].header[k] = hdu[0].header[k]
for e in hdu[1:]:
e.header['EXTVER'] = '{0}'.format(g)
hdus.append(hdu[1:])
all_hdus.extend(hdus)
output_hdu = pyfits.HDUList([prim])
for hdu in all_hdus:
output_hdu.extend(hdu)
if make_figure:
fig = show_drizzle_HDU(output_hdu, diff=diff)
return output_hdu, fig
else:
return output_hdu #all_hdus
def flag_with_drizzled(self, hdul, sigma=4, update=True, interp='nearest', verbose=True):
"""
Update `MultiBeam` masks based on the blotted drizzled combined image
[in progress ... xxx]
Parameters
----------
hdul : `~astropy.io.fits.HDUList`
FITS HDU list output from `drizzle_grisms_and_PAs` or read from a
`stack.fits` file.
sigma : float
Residual threshold to flag.
update : bool
Update the mask.
interp : str
Interpolation method for `~drizzlepac.astrodrizzle.ablot`.
Returns
-------
Updates the individual `fit_mask` attributes of the individual beams
if `update==True`.
"""
try:
from drizzle.doblot import doblot
blotter = doblot
except:
from drizzlepac.astrodrizzle import ablot
blotter = ablot.do_blot
# Read the drizzled arrays
Ng = hdul[0].header['NGRISM']
ref_wcs = {}
ref_data = {}
flag_grism = {}
for i in range(Ng):
g = hdul[0].header['GRISM{0:03d}'.format(i+1)]
ref_wcs[g] = pywcs.WCS(hdul['SCI',g].header)
ref_wcs[g].pscale = utils.get_wcs_pscale(ref_wcs[g])
ref_data[g] = hdul['SCI',g].data
flag_grism[g] = hdul[0].header['N{0}'.format(g)] > 1
# Do the masking
for i, beam in enumerate(self.beams):
g = beam.grism.filter
if not flag_grism[g]:
continue
beam_header, flt_wcs = beam.full_2d_wcs()
blotted = blotter(ref_data[g], ref_wcs[g],
flt_wcs, 1,
coeffs=True, interp=interp, sinscl=1.0,
stepsize=10, wcsmap=None)
resid = (beam.grism['SCI'] - beam.contam - blotted)
resid *= np.sqrt(beam.ivar)
blot_mask = (blotted != 0) & (np.abs(resid) < sigma)
if verbose:
print('Beam {0:>3d}: {1:>4d} new masked pixels'.format(i, beam.fit_mask.sum() - (beam.fit_mask & blot_mask.flatten()).sum()))
if update:
beam.fit_mask &= blot_mask.flatten()
if update:
self._parse_beams()
self.initialize_masked_arrays()
def oned_spectrum(self, tfit=None, **kwargs):
"""Compute full 1D spectrum with optional best-fit model
Parameters
----------
bin : float / int
Bin factor relative to the size of the native spectral bins of a
given grism.
tfit : dict
Output of `~grizli.fitting.mb.template_at_z`.
Returns
-------
sp : dict
Dictionary of the extracted 1D spectra. Keys are the grism
names and the values are `~astropy.table.Table` objects.
"""
import astropy.units as u
# "Flat" spectrum to perform flux calibration
if self.Nphot > 0:
flat_data = self.flat_flam[self.fit_mask[:-self.Nphotbands]]
else:
flat_data = self.flat_flam[self.fit_mask]
sp_flat = self.optimal_extract(flat_data, **kwargs)
# Best-fit line and continuum models, with background fit
if tfit is not None:
bg_model = self.get_flat_background(tfit['coeffs'],
apply_mask=True)
line_model = self.get_flat_model([tfit['line1d'].wave,
tfit['line1d'].flux])
cont_model = self.get_flat_model([tfit['line1d'].wave,
tfit['cont1d'].flux])
sp_line = self.optimal_extract(line_model, **kwargs)
sp_cont = self.optimal_extract(cont_model, **kwargs)
else:
bg_model = 0.
# Optimal spectral extraction
sp = self.optimal_extract(self.scif_mask[:self.Nspec]-bg_model, **kwargs)
# Loop through grisms, change units and add fit columns
# NB: setting units to "count / s" to comply with FITS standard,
# where count / s = electron / s
for k in sp:
sp[k]['flat'] = sp_flat[k]['flux']
flat_unit = (u.count / u.s) / (u.erg / u.s / u.cm**2 / u.AA)
sp[k]['flat'].unit = flat_unit
sp[k]['flux'].unit = u.count / u.s
sp[k]['err'].unit = u.count / u.s
if tfit is not None:
sp[k]['line'] = sp_line[k]['flux']
sp[k]['line'].unit = u.count / u.s
sp[k]['cont'] = sp_cont[k]['flux']
sp[k]['cont'].unit = u.count / u.s
sp[k].meta['GRISM'] = (k, 'Grism name')
# Metadata
exptime = count = 0
for pa in self.PA[k]:
for i in self.PA[k][pa]:
exptime += self.beams[i].grism.header['EXPTIME']
count += 1
parent = (self.beams[i].grism.parent_file, 'Parent file')
sp[k].meta['FILE{0:04d}'.format(count)] = parent
sp[k].meta['NEXP'] = (count, 'Number of exposures')
sp[k].meta['EXPTIME'] = (exptime, 'Total exposure time')
sp[k].meta['NPA'] = (len(self.PA[k]), 'Number of PAs')
return sp
def oned_spectrum_to_hdu(self, sp=None, outputfile=None, units=None, **kwargs):
"""Generate 1D spectra fits HDUList
Parameters
----------
sp : optional, dict
Output of `~grizli.multifit.MultiBeam.oned_spectrum`. If None,
then run that function with `**kwargs`.
outputfile : None, str
If a string supplied, then write the `~astropy.io.fits.HDUList` to
a file.
Returns
-------
hdul : `~astropy.io.fits.HDUList`
FITS version of the 1D spectrum tables.
"""
from astropy.io.fits.convenience import table_to_hdu
# Generate the spectrum if necessary
if sp is None:
sp = self.oned_spectrum(**kwargs)
# Metadata in PrimaryHDU
prim = pyfits.PrimaryHDU()
prim.header['ID'] = (self.id, 'Object ID')
prim.header['RA'] = (self.ra, 'Right Ascension')
prim.header['DEC'] = (self.dec, 'Declination')
prim.header['TARGET'] = (self.group_name, 'Target Name')
prim.header['MW_EBV'] = (self.MW_EBV, 'Galactic extinction E(B-V)')
for g in ['G102', 'G141', 'G800L']:
if g in sp:
prim.header['N_{0}'.format(g)] = sp[g].meta['NEXP']
prim.header['T_{0}'.format(g)] = sp[g].meta['EXPTIME']
prim.header['PA_{0}'.format(g)] = sp[g].meta['NPA']
else:
prim.header['N_{0}'.format(g)] = (0, 'Number of exposures')
prim.header['T_{0}'.format(g)] = (0, 'Total exposure time')
prim.header['PA_{0}'.format(g)] = (0, 'Number of PAs')
for i, k in enumerate(sp):
prim.header['GRISM{0:03d}'.format(i+1)] = (k, 'Grism name')
# Generate HDUList
hdul = [prim]
for k in sp:
hdu = table_to_hdu(sp[k])
hdu.header['EXTNAME'] = k
hdul.append(hdu)
# Outputs
hdul = pyfits.HDUList(hdul)
if outputfile is None:
return hdul
else:
hdul.writeto(outputfile, overwrite=True)
return hdul
def check_for_bad_PAs(self, poly_order=1, chi2_threshold=1.5, fit_background=True, reinit=True):
"""
"""
wave = np.linspace(2000,2.5e4,100)
poly_templates = utils.polynomial_templates(wave, order=poly_order)
fit_log = OrderedDict()
keep_dict = {}
has_bad = False
keep_beams = []
for g in self.PA:
fit_log[g] = OrderedDict()
keep_dict[g] = []
for pa in self.PA[g]:
beams = [self.beams[i] for i in self.PA[g][pa]]
mb_i = MultiBeam(beams, fcontam=self.fcontam,
sys_err=self.sys_err)
try:
chi2, _, _, _ = mb_i.xfit_at_z(z=0,
templates=poly_templates,
fit_background=fit_background)
except:
chi2 = 1e30
if False:
p_i = mb_i.template_at_z(z=0, templates=poly_templates, fit_background=fit_background, fitter='lstsq', fwhm=1400, get_uncertainties=2)
fit_log[g][pa] = {'chi2': chi2, 'DoF': mb_i.DoF,
'chi_nu': chi2/np.maximum(mb_i.DoF, 1)}
min_chinu = 1e30
for pa in self.PA[g]:
min_chinu = np.minimum(min_chinu, fit_log[g][pa]['chi_nu'])
fit_log[g]['min_chinu'] = min_chinu
for pa in self.PA[g]:
fit_log[g][pa]['chinu_ratio'] = fit_log[g][pa]['chi_nu']/min_chinu
if fit_log[g][pa]['chinu_ratio'] < chi2_threshold:
keep_dict[g].append(pa)
keep_beams.extend([self.beams[i] for i in self.PA[g][pa]])
else:
has_bad = True
if reinit:
self.beams = keep_beams
self._parse_beams(psf=self.psf_param_dict is not None)
return fit_log, keep_dict, has_bad
def get_redshift_fit_defaults():
"""TBD
"""
pzfit_def = dict(zr=[0.5, 1.6], dz=[0.005, 0.0004], fwhm=0,
poly_order=0, fit_background=True,
delta_chi2_threshold=0.004, fitter='nnls',
prior=None, templates={}, figsize=[8,5],
fsps_templates=False)
pspec2_def = dict(dlam=0, spatial_scale=1, NY=20, figsize=[8,3.5])
pline_def = dict(size=20, pixscale=0.1, pixfrac=0.2, kernel='square',
wcs=None)
return pzfit_def, pspec2_def, pline_def
def drizzle_2d_spectrum(beams, data=None, wlimit=[1.05, 1.75], dlam=50,
spatial_scale=1, NY=10, pixfrac=0.6, kernel='square',
convert_to_flambda=True, fcontam=0.2, fill_wht=False,
ds9=None):
"""Drizzle 2D spectrum from a list of beams
Parameters
----------
beams : list of `~.model.BeamCutout` objects
data : None or list
optionally, drizzle data specified in this list rather than the
contamination-subtracted arrays from each beam.
wlimit : [float, float]
Limits on the wavelength array to drizzle ([wlim, wmax])
dlam : float
Delta wavelength per pixel
spatial_scale : float
Relative scaling of the spatial axis (1 = native pixels)
NY : int
Size of the cutout in the spatial dimension, in output pixels
pixfrac : float
Drizzle PIXFRAC (for `kernel` = 'point')
kernel : str, ('square' or 'point')
Drizzle kernel to use
convert_to_flambda : bool, float
Convert the 2D spectrum to physical units using the sensitivity curves
and if float provided, scale the flux densities by that value
fcontam: float
Factor by which to scale the contamination arrays and add to the
pixel variances.
fill_wht: bool
Fill `wht==0` pixels of the beam weights with the median nonzero
value.
ds9: `~grizli.ds9.DS9`
Show intermediate steps of the drizzling
Returns
-------
hdu : `~astropy.io.fits.HDUList`
FITS HDUList with the drizzled 2D spectrum and weight arrays
"""
from astropy import log
try:
import drizzle
if drizzle.__version__ != '1.12.99':
# Not the fork that works for all input/output arrays
raise(ImportError)
#print('drizzle!!')
from drizzle.dodrizzle import dodrizzle
drizzler = dodrizzle
dfillval = '0'
except:
from drizzlepac.astrodrizzle import adrizzle
adrizzle.log.setLevel('ERROR')
drizzler = adrizzle.do_driz
dfillval = 0
log.setLevel('ERROR')
#log.disable_warnings_logging()
NX = int(np.round(np.diff(wlimit)[0]*1.e4/dlam)) // 2
center = np.mean(wlimit[:2])*1.e4
out_header, output_wcs = utils.full_spectrum_wcsheader(center_wave=center,
dlam=dlam, NX=NX,
spatial_scale=spatial_scale, NY=NY)
sh = (out_header['NAXIS2'], out_header['NAXIS1'])
outsci = np.zeros(sh, dtype=np.float32)
outwht = np.zeros(sh, dtype=np.float32)
outctx = np.zeros(sh, dtype=np.int32)
outvar = np.zeros(sh, dtype=np.float32)
outwv = np.zeros(sh, dtype=np.float32)
outcv = np.zeros(sh, dtype=np.int32)
if data is None:
data = []
for i, beam in enumerate(beams):
### Contamination-subtracted
beam_data = beam.grism.data['SCI'] - beam.contam
data.append(beam_data)
for i, beam in enumerate(beams):
## Get specific WCS for each beam
beam_header, beam_wcs = beam.full_2d_wcs()
# Downweight contamination
# wht = 1/beam.ivar + (fcontam*beam.contam)**2
# wht = np.cast[np.float32](1/wht)
# wht[~np.isfinite(wht)] = 0.
contam_weight = np.exp(-(fcontam*np.abs(beam.contam)*np.sqrt(beam.ivar)))
wht = beam.ivar*contam_weight
wht[~np.isfinite(wht)] = 0.
contam_weight[beam.ivar == 0] = 0
if fill_wht:
wht_mask = wht == 0
med_wht = np.median(wht[~wht_mask])
wht[wht_mask] = med_wht
#print('xx Fill weight: {0}'.format(med_wht))
data_i = data[i]*1.
scl = 1.
if convert_to_flambda:
#data_i *= convert_to_flambda/beam.beam.sensitivity
#wht *= (beam.beam.sensitivity/convert_to_flambda)**2
scl = convert_to_flambda#/1.e-17
scl *= 1./beam.flat_flam.reshape(beam.beam.sh_beam).sum(axis=0)
#scl = convert_to_flambda/beam.beam.sensitivity
data_i *= scl
wht *= (1/scl)**2
#contam_weight *= scl
wht[~np.isfinite(data_i+scl)] = 0
contam_weight[~np.isfinite(data_i+scl)] = 0
data_i[~np.isfinite(data_i+scl)] = 0
###### Go drizzle
### Contamination-cleaned
drizzler(data_i, beam_wcs, wht, output_wcs,
outsci, outwht, outctx, 1., 'cps', 1,
wcslin_pscale=1., uniqid=1,
pixfrac=pixfrac, kernel=kernel, fillval=dfillval)
# For variance
drizzler(contam_weight, beam_wcs, wht, output_wcs,
outvar, outwv, outcv, 1., 'cps', 1,
wcslin_pscale=1., uniqid=1,
pixfrac=pixfrac, kernel=kernel, fillval=dfillval)
if ds9 is not None:
ds9.view(outsci/output_wcs.pscale**2, header=out_header)
# if False:
# # Plot the spectra for testing
# w, f, e = beam.beam.trace_extract(data_i, ivar=wht, r=3)
# clip = (f/e > 0.5)
# clip &= (e < 2*np.median(e[clip]))
# plt.errorbar(w[clip], f[clip], e[clip], marker='.', color='k', alpha=0.5, ecolor='0.8', linestyle='None')
# dw = np.median(np.diff(w))
### Correct for drizzle scaling
area_ratio = 1./output_wcs.pscale**2
### Preserve flux (has to preserve aperture flux along spatial axis but
### average in spectral axis).
#area_ratio *= spatial_scale
# preserve flux density
flux_density_scale = spatial_scale**2
# science
outsci *= area_ratio*flux_density_scale
# variance
outvar *= area_ratio/outwv*flux_density_scale**2
outwht = 1/outvar
outwht[(outvar == 0) | (~np.isfinite(outwht))] = 0
# if True:
# # Plot for testing....
# yp, xp = np.indices(outsci.shape)
# mask = np.abs(yp-NY) <= 3/spatial_scale
# fl = (outsci*mask).sum(axis=0)
# flv = (1/outwht*mask).sum(axis=0)
#
# wi = grizli.stack.StackedSpectrum.get_wavelength_from_header(out_header)
#
# plt.errorbar(wi[:-1], fl[1:], np.sqrt(flv)[1:], alpha=0.8) #*area_ratio)
#return outwht, outsci, outvar, outwv, output_wcs.pscale
p = pyfits.PrimaryHDU()
p.header['ID'] = (beams[0].id, 'Object ID')
p.header['WMIN'] = (wlimit[0], 'Minimum wavelength')
p.header['WMAX'] = (wlimit[1], 'Maximum wavelength')
p.header['DLAM'] = (dlam, 'Delta wavelength')
p.header['SSCALE'] = (spatial_scale, 'Spatial scale factor w.r.t native')
p.header['FCONTAM'] = (fcontam, 'Contamination weight')
p.header['PIXFRAC'] = (pixfrac, 'Drizzle PIXFRAC')
p.header['DRIZKRNL'] = (kernel, 'Drizzle kernel')
p.header['BEAM'] = (beams[0].beam.beam, 'Grism order')
p.header['NINPUT'] = (len(beams), 'Number of drizzled beams')
exptime = 0.
for i, beam in enumerate(beams):
p.header['FILE{0:04d}'.format(i+1)] = (beam.grism.parent_file,
'Parent filename')
p.header['GRIS{0:04d}'.format(i+1)] = (beam.grism.filter,
'Beam grism element')
p.header['PA{0:04d}'.format(i+1)] = (beam.get_dispersion_PA(),
'PA of dispersion axis')
exptime += beam.grism.exptime
p.header['EXPTIME'] = (exptime, 'Total exposure time [s]')
h = out_header.copy()
grism_sci = pyfits.ImageHDU(data=outsci, header=h, name='SCI')
grism_wht = pyfits.ImageHDU(data=outwht, header=h, name='WHT')
hdul = pyfits.HDUList([p, grism_sci, grism_wht])
return hdul
def drizzle_to_wavelength(beams, wcs=None, ra=0., dec=0., wave=1.e4, size=5,
pixscale=0.1, pixfrac=0.6, kernel='square',
direct_extension='REF', fcontam=0.2, ds9=None):
"""Drizzle a cutout at a specific wavelength from a list of `BeamCutout`s
Parameters
----------
beams : list of `~.model.BeamCutout` objects.
wcs : `~astropy.wcs.WCS` or None
Pre-determined WCS. If not specified, generate one based on `ra`,
`dec`, `pixscale` and `pixscale`
ra, dec, wave : float
Sky coordinates and central wavelength
size : float
Size of the output thumbnail, in arcsec
pixscale : float
Pixel scale of the output thumbnail, in arcsec
pixfrac : float
Drizzle PIXFRAC (for `kernel` = 'point')
kernel : str, ('square' or 'point')
Drizzle kernel to use
direct_extension : str, ('SCI' or 'REF')
Extension of `self.direct.data` do drizzle for the thumbnail
fcontam: float
Factor by which to scale the contamination arrays and add to the
pixel variances.
ds9 : `~grizli.ds9.DS9`, optional
Display each step of the drizzling to an open DS9 window
Returns
-------
hdu : `~astropy.io.fits.HDUList`
FITS HDUList with the drizzled thumbnail, line and continuum
cutouts.
"""
try:
import drizzle
if drizzle.__version__ != '1.12.99':
# Not the fork that works for all input/output arrays
raise(ImportError)
#print('drizzle!!')
from drizzle.dodrizzle import dodrizzle
drizzler = dodrizzle
dfillval = '0'
except:
from drizzlepac.astrodrizzle import adrizzle
adrizzle.log.setLevel('ERROR')
drizzler = adrizzle.do_driz
dfillval = 0
# Nothing to do
if len(beams) == 0:
return False
### Get output header and WCS
if wcs is None:
header, output_wcs = utils.make_wcsheader(ra=ra, dec=dec, size=size, pixscale=pixscale, get_hdu=False)
else:
output_wcs = wcs.copy()
if not hasattr(output_wcs, 'pscale'):
output_wcs.pscale = utils.get_wcs_pscale(output_wcs)
header = utils.to_header(output_wcs, relax=True)
### Initialize data
sh = (header['NAXIS2'], header['NAXIS1'])
outsci = np.zeros(sh, dtype=np.float32)
outwht = np.zeros(sh, dtype=np.float32)
outctx = np.zeros(sh, dtype=np.int32)
coutsci = np.zeros(sh, dtype=np.float32)
coutwht = np.zeros(sh, dtype=np.float32)
coutctx = np.zeros(sh, dtype=np.int32)
xoutsci = np.zeros(sh, dtype=np.float32)
xoutwht = np.zeros(sh, dtype=np.float32)
xoutctx = np.zeros(sh, dtype=np.int32)
#direct_filters = np.unique([b.direct.filter for b in self.beams])
all_direct_filters = []
for beam in beams:
if direct_extension == 'REF':
if beam.direct['REF'] is None:
filt_i = beam.direct.ref_filter
direct_extension = 'SCI'
else:
filt_i = beam.direct.filter
all_direct_filters.append(filt_i)
direct_filters = np.unique(all_direct_filters)
doutsci, doutwht, doutctx = {}, {}, {}
for f in direct_filters:
doutsci[f] = np.zeros(sh, dtype=np.float32)
doutwht[f] = np.zeros(sh, dtype=np.float32)
doutctx[f] = np.zeros(sh, dtype=np.int32)
# doutsci = np.zeros(sh, dtype=np.float32)
# doutwht = np.zeros(sh, dtype=np.float32)
# doutctx = np.zeros(sh, dtype=np.int32)
## Loop through beams and run drizzle
for i, beam in enumerate(beams):
## Get specific wavelength WCS for each beam
beam_header, beam_wcs = beam.get_wavelength_wcs(wave)
## Make sure CRPIX set correctly for the SIP header
for j in [0,1]:
# if beam_wcs.sip is not None:
# beam_wcs.sip.crpix[j] = beam_wcs.wcs.crpix[j]
if beam.direct.wcs.sip is not None:
beam.direct.wcs.sip.crpix[j] = beam.direct.wcs.wcs.crpix[j]
for wcs_ext in [beam_wcs.sip]:
if wcs_ext is not None:
wcs_ext.crpix[j] = beam_wcs.wcs.crpix[j]
# ACS requires additional wcs attributes
ACS_CRPIX = [4096/2,2048/2]
dx_crpix = beam_wcs.wcs.crpix[0] - ACS_CRPIX[0]
dy_crpix = beam_wcs.wcs.crpix[1] - ACS_CRPIX[1]
for wcs_ext in [beam_wcs.cpdis1, beam_wcs.cpdis2, beam_wcs.det2im1, beam_wcs.det2im2]:
if wcs_ext is not None:
wcs_ext.crval[0] += dx_crpix
wcs_ext.crval[1] += dy_crpix
beam_data = beam.grism.data['SCI'] - beam.contam
if hasattr(beam, 'background'):
beam_data -= beam.background
if hasattr(beam, 'extra_lines'):
beam_data -= beam.extra_lines
beam_continuum = beam.beam.model*1
if hasattr(beam.beam, 'pscale_array'):
beam_continuum *= beam.beam.pscale_array
# Downweight contamination
if fcontam > 0:
# wht = 1/beam.ivar + (fcontam*beam.contam)**2
# wht = np.cast[np.float32](1/wht)
# wht[~np.isfinite(wht)] = 0.
contam_weight = np.exp(-(fcontam*np.abs(beam.contam)*np.sqrt(beam.ivar)))
wht = beam.ivar*contam_weight
wht[~np.isfinite(wht)] = 0.
else:
wht = beam.ivar*1
### Convert to f_lambda integrated line fluxes:
### (Inverse of the aXe sensitivity) x (size of pixel in \AA)
sens = np.interp(wave, beam.beam.lam, beam.beam.sensitivity,
left=0, right=0)
dlam = np.interp(wave, beam.beam.lam[1:], np.diff(beam.beam.lam))
# 1e-17 erg/s/cm2 #, scaling closer to e-/s
sens *= 1.e-17
sens *= 1./dlam
if sens == 0:
continue
else:
wht *= sens**2
beam_data /= sens
beam_continuum /= sens
###### Go drizzle
### Contamination-cleaned
drizzler(beam_data, beam_wcs, wht, output_wcs,
outsci, outwht, outctx, 1., 'cps', 1,
wcslin_pscale=beam.grism.wcs.pscale, uniqid=1,
pixfrac=pixfrac, kernel=kernel, fillval=dfillval)
### Continuum
drizzler(beam_continuum, beam_wcs, wht, output_wcs,
coutsci, coutwht, coutctx, 1., 'cps', 1,
wcslin_pscale=beam.grism.wcs.pscale, uniqid=1,
pixfrac=pixfrac, kernel=kernel, fillval=dfillval)
### Contamination
drizzler(beam.contam, beam_wcs, wht, output_wcs,
xoutsci, xoutwht, xoutctx, 1., 'cps', 1,
wcslin_pscale=beam.grism.wcs.pscale, uniqid=1,
pixfrac=pixfrac, kernel=kernel, fillval=dfillval)
### Direct thumbnail
filt_i = all_direct_filters[i]
if direct_extension == 'REF':
thumb = beam.direct['REF']
thumb_wht = np.cast[np.float32]((thumb != 0)*1)
else:
thumb = beam.direct[direct_extension]#/beam.direct.photflam
thumb_wht = 1./(beam.direct.data['ERR']/beam.direct.photflam)**2
thumb_wht[~np.isfinite(thumb_wht)] = 0
drizzler(thumb, beam.direct.wcs, thumb_wht, output_wcs,
doutsci[filt_i], doutwht[filt_i], doutctx[filt_i],
1., 'cps', 1,
wcslin_pscale=beam.direct.wcs.pscale, uniqid=1,
pixfrac=pixfrac, kernel=kernel, fillval=dfillval)
## Show in ds9
if ds9 is not None:
ds9.view((outsci-coutsci), header=header)
## Scaling of drizzled outputs
#print 'Pscale: ', output_wcs.pscale
#outsci /= (output_wcs.pscale)**2
#coutsci /= (output_wcs.pscale)**2
# doutsci /= (output_wcs.pscale)**2
outwht *= (beams[0].grism.wcs.pscale/output_wcs.pscale)**4
coutwht *= (beams[0].grism.wcs.pscale/output_wcs.pscale)**4
xoutwht *= (beams[0].grism.wcs.pscale/output_wcs.pscale)**4
for filt_i in all_direct_filters:
doutwht[filt_i] *= (beams[0].direct.wcs.pscale/output_wcs.pscale)**4
### Make output FITS products
p = pyfits.PrimaryHDU()
p.header['ID'] = (beams[0].id, 'Object ID')
p.header['RA'] = (ra, 'Central R.A.')
p.header['DEC'] = (dec, 'Central Decl.')
p.header['PIXFRAC'] = (pixfrac, 'Drizzle PIXFRAC')
p.header['DRIZKRNL'] = (kernel, 'Drizzle kernel')
p.header['NINPUT'] = (len(beams), 'Number of drizzled beams')
for i, beam in enumerate(beams):
p.header['FILE{0:04d}'.format(i+1)] = (beam.grism.parent_file,
'Parent filename')
p.header['GRIS{0:04d}'.format(i+1)] = (beam.grism.filter,
'Beam grism element')
p.header['PA{0:04d}'.format(i+1)] = (beam.get_dispersion_PA(),
'PA of dispersion axis')
h = header.copy()
h['ID'] = (beam.id, 'Object ID')
h['PIXFRAC'] = (pixfrac, 'Drizzle PIXFRAC')
h['DRIZKRNL'] = (kernel, 'Drizzle kernel')
p.header['NDFILT'] = len(direct_filters), 'Number of direct image filters'
for i, filt_i in enumerate(direct_filters):
p.header['DFILT{0:02d}'.format(i+1)] = filt_i
p.header['NFILT{0:02d}'.format(i+1)] = all_direct_filters.count(filt_i), 'Number of beams with this direct filter'
HDUL = [p]
for i, filt_i in enumerate(direct_filters):
h['FILTER'] = (filt_i, 'Direct image filter')
thumb_sci = pyfits.ImageHDU(data=doutsci[filt_i], header=h,
name='DSCI')
thumb_wht = pyfits.ImageHDU(data=doutwht[filt_i], header=h,
name='DWHT')
thumb_sci.header['EXTVER'] = filt_i
thumb_wht.header['EXTVER'] = filt_i
HDUL += [thumb_sci, thumb_wht]
#thumb_seg = pyfits.ImageHDU(data=seg_slice, header=h, name='DSEG')
h['FILTER'] = (beam.grism.filter, 'Grism filter')
h['WAVELEN'] = (wave, 'Central wavelength')
grism_sci = pyfits.ImageHDU(data=outsci-coutsci, header=h, name='LINE')
grism_cont = pyfits.ImageHDU(data=coutsci, header=h, name='CONTINUUM')
grism_contam = pyfits.ImageHDU(data=xoutsci, header=h, name='CONTAM')
grism_wht = pyfits.ImageHDU(data=outwht, header=h, name='LINEWHT')
#HDUL = [p, thumb_sci, thumb_wht, grism_sci, grism_cont, grism_contam, grism_wht]
HDUL += [grism_sci, grism_cont, grism_contam, grism_wht]
return pyfits.HDUList(HDUL)
def show_drizzle_HDU(hdu, diff=True):
"""Make a figure from the multiple extensions in the drizzled grism file.
Parameters
----------
hdu : `~astropy.io.fits.HDUList`
HDU list output by `drizzle_grisms_and_PAs`.
diff : bool
If True, then plot the stacked spectrum minus the model.
Returns
-------
fig : `~matplotlib.figure.Figure`
The figure.
"""
from collections import OrderedDict
from matplotlib.gridspec import GridSpec
from matplotlib.ticker import MultipleLocator
h0 = hdu[0].header
NX = h0['NGRISM']
NY = 0
grisms = OrderedDict()
for ig in range(NX):
g = h0['GRISM{0:03d}'.format(ig+1)]
NY = np.maximum(NY, h0['N'+g])
grisms[g] = h0['N'+g]
NY += 1
fig = plt.figure(figsize=(5*NX, 1*NY))
widths = []
for i in range(NX):
widths.extend([0.2, 1])
gs = GridSpec(NY, NX*2, height_ratios=[1]*NY, width_ratios=widths)
for ig, g in enumerate(grisms):
sci_i = hdu['SCI',g]
wht_i = hdu['WHT',g]
model_i = hdu['MODEL',g]
kern_i = hdu['KERNEL',g]
h_i = sci_i.header
clip = wht_i.data > 0
if clip.sum() == 0:
clip = np.isfinite(wht_i.data)
avg_rms = 1/np.median(np.sqrt(wht_i.data[clip]))
vmax = np.maximum(1.1*np.percentile(sci_i.data[clip],98),
5*avg_rms)
vmax_kern = 1.1*np.percentile(kern_i.data,99.5)
# Kernel
ax = fig.add_subplot(gs[NY-1, ig*2+0])
sh = kern_i.data.shape
extent = [0, sh[1], 0, sh[0]]
ax.imshow(kern_i.data, origin='lower', interpolation='Nearest',
vmin=-0.1*vmax_kern, vmax=vmax_kern, cmap=plt.cm.viridis_r,
extent=extent, aspect='auto')
ax.set_xticklabels([]); ax.set_yticklabels([])
ax.xaxis.set_tick_params(length=0)
ax.yaxis.set_tick_params(length=0)
# Spectrum
sh = sci_i.data.shape
extent = [h_i['WMIN'], h_i['WMAX'], 0, sh[0]]
ax = fig.add_subplot(gs[NY-1, ig*2+1])
if diff:
#print('xx DIFF!')
m = model_i.data
else:
m = 0
ax.imshow(sci_i.data-m, origin='lower',
interpolation='Nearest', vmin=-0.1*vmax, vmax=vmax,
extent=extent, cmap = plt.cm.viridis_r,
aspect='auto')
ax.set_yticklabels([])
ax.set_xlabel(r'$\lambda$ ($\mu$m) - '+g)
ax.xaxis.set_major_locator(MultipleLocator(GRISM_MAJOR[g]))
for ip in range(grisms[g]):
#print(ip, ig)
pa = h0['{0}{1:02d}'.format(g, ip+1)]
sci_i = hdu['SCI','{0},{1}'.format(g, pa)]
wht_i = hdu['WHT','{0},{1}'.format(g, pa)]
kern_i = hdu['KERNEL','{0},{1}'.format(g, pa)]
h_i = sci_i.header
# Kernel
ax = fig.add_subplot(gs[ip, ig*2+0])
sh = kern_i.data.shape
extent = [0, sh[1], 0, sh[0]]
ax.imshow(kern_i.data, origin='lower', interpolation='Nearest',
vmin=-0.1*vmax_kern, vmax=vmax_kern, extent=extent,
cmap=plt.cm.viridis_r, aspect='auto')
ax.set_xticklabels([]); ax.set_yticklabels([])
ax.xaxis.set_tick_params(length=0)
ax.yaxis.set_tick_params(length=0)
# Spectrum
sh = sci_i.data.shape
extent = [h_i['WMIN'], h_i['WMAX'], 0, sh[0]]
ax = fig.add_subplot(gs[ip, ig*2+1])
ax.imshow(sci_i.data, origin='lower',
interpolation='Nearest', vmin=-0.1*vmax, vmax=vmax,
extent=extent, cmap = plt.cm.viridis_r,
aspect='auto')
ax.set_yticklabels([]); ax.set_xticklabels([])
ax.xaxis.set_major_locator(MultipleLocator(GRISM_MAJOR[g]))
ax.text(0.015, 0.94, '{0:3.0f}'.format(pa), ha='left',
va='top',
transform=ax.transAxes, fontsize=8,
backgroundcolor='w')
if (ig == (NX-1)) & (ip == 0):
ax.text(0.98, 0.94, 'ID = {0}'.format(h0['ID']),
ha='right', va='top', transform=ax.transAxes,
fontsize=8, backgroundcolor='w')
gs.tight_layout(fig, pad=0.1)
return fig
def drizzle_2d_spectrum_wcs(beams, data=None, wlimit=[1.05, 1.75], dlam=50,
spatial_scale=1, NY=10, pixfrac=0.6, kernel='square',
convert_to_flambda=True, fcontam=0.2, fill_wht=False,
ds9=None):
"""Drizzle 2D spectrum from a list of beams
Parameters
----------
beams : list of `~.model.BeamCutout` objects
data : None or list
optionally, drizzle data specified in this list rather than the
contamination-subtracted arrays from each beam.
wlimit : [float, float]
Limits on the wavelength array to drizzle ([wlim, wmax])
dlam : float
Delta wavelength per pixel
spatial_scale : float
Relative scaling of the spatial axis (1 = native pixels)
NY : int
Size of the cutout in the spatial dimension, in output pixels
pixfrac : float
Drizzle PIXFRAC (for `kernel` = 'point')
kernel : str, ('square' or 'point')
Drizzle kernel to use
convert_to_flambda : bool, float
Convert the 2D spectrum to physical units using the sensitivity curves
and if float provided, scale the flux densities by that value
fcontam: float
Factor by which to scale the contamination arrays and add to the
pixel variances.
ds9: `~grizli.ds9.DS9`
Show intermediate steps of the drizzling
Returns
-------
hdu : `~astropy.io.fits.HDUList`
FITS HDUList with the drizzled 2D spectrum and weight arrays
"""
try:
import drizzle
if drizzle.__version__ != '1.12.99':
# Not the fork that works for all input/output arrays
raise(ImportError)
#print('drizzle!!')
from drizzle.dodrizzle import dodrizzle
drizzler = dodrizzle
dfillval = '0'
except:
from drizzlepac.astrodrizzle import adrizzle
adrizzle.log.setLevel('ERROR')
drizzler = adrizzle.do_driz
dfillval = 0
from stwcs import distortion
from astropy import log
log.setLevel('ERROR')
#log.disable_warnings_logging()
adrizzle.log.setLevel('ERROR')
NX = int(np.round(np.diff(wlimit)[0]*1.e4/dlam)) // 2
center = np.mean(wlimit[:2])*1.e4
out_header, output_wcs = utils.make_spectrum_wcsheader(center_wave=center,
dlam=dlam, NX=NX,
spatial_scale=spatial_scale, NY=NY)
pixscale = 0.128*spatial_scale
# # Get central RA, reference pixel of beam[0]
# #rd = beams[0].get_sky_coords()
# x0 = beams[0].beam.x0.reshape((1,2))
# #x0[0,1] += beam.direct.origin[1]-beam.grism.origin[1]
# rd = beam.grism.wcs.all_pix2world(x0,1)[0]
# theta = 270-beams[0].get_dispersion_PA()
#out_header, output_wcs = utils.make_wcsheader(ra=rd[0], dec=rd[1], size=[50,10], pixscale=pixscale, get_hdu=False, theta=theta)
if True:
theta = -np.arctan2(np.diff(beams[0].beam.ytrace)[0], 1)
undist_wcs = distortion.utils.output_wcs([beams[0].grism.wcs],undistort=True)
undist_wcs = utils.transform_wcs(undist_wcs, rotation=theta, scale=undist_wcs.pscale/pixscale)
output_wcs = undist_wcs.copy()
out_header = utils.to_header(output_wcs)
# Direct image
d_undist_wcs = distortion.utils.output_wcs([beams[0].direct.wcs],undistort=True)
d_undist_wcs = utils.transform_wcs(d_undist_wcs, rotation=0., scale=d_undist_wcs.pscale/pixscale)
d_output_wcs = d_undist_wcs.copy()
# Make square
dx = d_output_wcs._naxis1-d_output_wcs._naxis2
d_output_wcs._naxis1 = d_output_wcs._naxis2
d_output_wcs.wcs.crpix[0] -= dx/2.
d_out_header = utils.to_header(d_output_wcs)
#delattr(output_wcs, 'orientat')
#beam_header = utils.to_header(beam_wcs)
#output_wcs = beam_wcs
#output_wcs = pywcs.WCS(beam_header, relax=True)
#output_wcs.pscale = utils.get_wcs_pscale(output_wcs)
# shift CRPIX to reference position of beam[0]
sh = (out_header['NAXIS2'], out_header['NAXIS1'])
sh_d = (d_out_header['NAXIS2'], d_out_header['NAXIS1'])
outsci = np.zeros(sh, dtype=np.float32)
outwht = np.zeros(sh, dtype=np.float32)
outctx = np.zeros(sh, dtype=np.int32)
doutsci = np.zeros(sh_d, dtype=np.float32)
doutwht = np.zeros(sh_d, dtype=np.float32)
doutctx = np.zeros(sh_d, dtype=np.int32)
outvar = np.zeros(sh, dtype=np.float32)
outwv = np.zeros(sh, dtype=np.float32)
outcv = np.zeros(sh, dtype=np.int32)
outls = np.zeros(sh, dtype=np.float32)
outlw = np.zeros(sh, dtype=np.float32)
outlc = np.zeros(sh, dtype=np.int32)
if data is None:
data = []
for i, beam in enumerate(beams):
### Contamination-subtracted
beam_data = beam.grism.data['SCI'] - beam.contam
data.append(beam_data)
for i, beam in enumerate(beams):
## Get specific WCS for each beam
beam_header, beam_wcs = beam.get_2d_wcs()
beam_wcs = beam.grism.wcs.deepcopy()
# Shift SIP reference
dx_sip = beam.grism.origin[1] - beam.direct.origin[1]
#beam_wcs.sip.crpix[0] += dx_sip
for wcs_ext in [beam_wcs.sip]:
if wcs_ext is not None:
wcs_ext.crpix[0] += dx_sip
for wcs_ext in [beam_wcs.cpdis1, beam_wcs.cpdis2, beam_wcs.det2im1, beam_wcs.det2im2]:
if wcs_ext is not None:
wcs_ext.crval[0] += dx_sip
# Shift y for trace
xy0 = beam.grism.wcs.all_world2pix(output_wcs.wcs.crval.reshape((1,2)),0)[0]
dy = np.interp(xy0[0], np.arange(beam.beam.sh_beam[1]), beam.beam.ytrace)
#beam_wcs.sip.crpix[1] += dy
beam_wcs.wcs.crpix[1] += dy
for wcs_ext in [beam_wcs.sip]:
if wcs_ext is not None:
wcs_ext.crpix[1] += dy
for wcs_ext in [beam_wcs.cpdis1, beam_wcs.cpdis2, beam_wcs.det2im1, beam_wcs.det2im2]:
if wcs_ext is not None:
wcs_ext.crval[1] += dy
d_beam_wcs = beam.direct.wcs
if beam.direct['REF'] is None:
d_wht = 1./beam.direct['ERR']**2
d_wht[~np.isfinite(d_wht)] = 0
d_sci = beam.direct['SCI']*1
else:
d_sci = beam.direct['REF']*1
d_wht = d_sci*0.+1
d_sci *= (beam.beam.seg == beam.id)
# Downweight contamination
# wht = 1/beam.ivar + (fcontam*beam.contam)**2
# wht = np.cast[np.float32](1/wht)
# wht[~np.isfinite(wht)] = 0.
contam_weight = np.exp(-(fcontam*np.abs(beam.contam)*np.sqrt(beam.ivar)))
wht = beam.ivar*contam_weight
wht[~np.isfinite(wht)] = 0.
contam_weight[beam.ivar == 0] = 0
data_i = data[i]*1.
scl = 1.
if convert_to_flambda:
#data_i *= convert_to_flambda/beam.beam.sensitivity
#wht *= (beam.beam.sensitivity/convert_to_flambda)**2
scl = convert_to_flambda#/1.e-17
scl *= 1./beam.flat_flam.reshape(beam.beam.sh_beam).sum(axis=0)
#scl = convert_to_flambda/beam.beam.sensitivity
data_i *= scl
wht *= (1/scl)**2
#contam_weight *= scl
wht[~np.isfinite(data_i+scl)] = 0
contam_weight[~np.isfinite(data_i+scl)] = 0
data_i[~np.isfinite(data_i+scl)] = 0
###### Go drizzle
data_wave = np.dot(np.ones(beam.beam.sh_beam[0])[:,None], beam.beam.lam[None,:])
drizzler(data_wave, beam_wcs, wht*0.+1, output_wcs,
outls, outlw, outlc, 1., 'cps', 1,
wcslin_pscale=1., uniqid=1,
pixfrac=1, kernel='square', fillval=dfillval)
### Direct image
drizzler(d_sci, d_beam_wcs, d_wht, d_output_wcs,
doutsci, doutwht, doutctx, 1., 'cps', 1,
wcslin_pscale=d_beam_wcs.pscale, uniqid=1,
pixfrac=pixfrac, kernel=kernel, fillval=dfillval)
### Contamination-cleaned
drizzler(data_i, beam_wcs, wht, output_wcs,
outsci, outwht, outctx, 1., 'cps', 1,
wcslin_pscale=beam_wcs.pscale, uniqid=1,
pixfrac=pixfrac, kernel=kernel, fillval=dfillval)
# For variance
drizzler(contam_weight, beam_wcs, wht, output_wcs,
outvar, outwv, outcv, 1., 'cps', 1,
wcslin_pscale=beam_wcs.pscale, uniqid=1,
pixfrac=pixfrac, kernel=kernel, fillval=dfillval)
if ds9 is not None:
ds9.view(outsci, header=out_header)
# if True:
# w, f, e = beam.beam.optimal_extract(data_i, ivar=beam.ivar)
# plt.scatter(w, f, marker='.', color='k', alpha=0.5)
### Correct for drizzle scaling
#outsci /= output_wcs.pscale**2
outls /= output_wcs.pscale**2
wave = np.median(outls, axis=0)
# # Testing
# fl = (sp[1].data*mask).sum(axis=0)
# variance
outvar /= outwv#*output_wcs.pscale**2
outwht = 1/outvar
outwht[(outvar == 0) | (~np.isfinite(outwht))] = 0
#return outwht, outsci, outvar, outwv, output_wcs.pscale
p = pyfits.PrimaryHDU()
p.header['ID'] = (beams[0].id, 'Object ID')
p.header['WMIN'] = (wave[0], 'Minimum wavelength')
p.header['WMAX'] = (wave[-1], 'Maximum wavelength')
p.header['DLAM'] = ((wave[-1]-wave[0])/wave.size, 'Delta wavelength')
p.header['FCONTAM'] = (fcontam, 'Contamination weight')
p.header['PIXFRAC'] = (pixfrac, 'Drizzle PIXFRAC')
p.header['DRIZKRNL'] = (kernel, 'Drizzle kernel')
p.header['NINPUT'] = (len(beams), 'Number of drizzled beams')
for i, beam in enumerate(beams):
p.header['FILE{0:04d}'.format(i+1)] = (beam.grism.parent_file,
'Parent filename')
p.header['GRIS{0:04d}'.format(i+1)] = (beam.grism.filter,
'Beam grism element')
h = out_header.copy()
for k in p.header:
h[k] = p.header[k]
direct_sci = pyfits.ImageHDU(data=doutsci, header=d_out_header, name='DSCI')
grism_sci = pyfits.ImageHDU(data=outsci, header=h, name='SCI')
grism_wht = pyfits.ImageHDU(data=outwht, header=h, name='WHT')
hdul = pyfits.HDUList([p, grism_sci, grism_wht, direct_sci])
return hdul
| albertfxwang/grizli | grizli/multifit.py | Python | mit | 180,455 |
# Copyright 2015-2021 D.G. MacCarthy <https://dmaccarthy.github.io/sc8pr>
#
# This file is part of "sc8pr".
#
# "sc8pr" is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# "sc8pr" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "sc8pr". If not, see <http://www.gnu.org/licenses/>.
from zipfile import ZipFile
from json import loads, dumps
from sc8pr import PixelData, version
class S8Vfile:
"Read and append compressed PixelData binary data to an S8V ZipFile"
@staticmethod
def info(fn):
with ZipFile(fn, "r") as zf:
try: meta = loads(str(zf.read("metadata"), encoding="utf-8"))
except: meta = {}
return meta
def __init__(self, fn, mode="r", **meta):
self._zf = ZipFile(fn, mode)
self.frames = 0
try:
data = self._zf.read("metadata")
self.meta = loads(str(data, encoding="utf-8"))
except:
metadata = {"Saved By": "sc8pr{}".format(version)}
metadata.update(meta)
self.meta = metadata
data = bytes(dumps(metadata, ensure_ascii=False), encoding="utf-8")
self._zf.writestr("metadata", data)
names = self._zf.namelist()
for name in names:
try:
name = int(name)
self.frames += 1
except: pass
self._last = bytes(self.read(self.frames - 1)) if self.frames else None
def append(self, data):
"Append a PixelData instance to the file"
if type(data) is PixelData: data.compress()
else: data = PixelData(data, True)
data = bytes(data)
if data == self._last: data = b""
else: self._last = data
self._zf.writestr(str(self.frames), data)
self.frames += 1
def read(self, frame, allowEmpty=False, compress=True):
"Read one frame as a compressed PixelData instance"
data = self._zf.read(str(frame))
if not allowEmpty:
while not data:
data = self._zf.read(str(frame))
frame -= 1
return PixelData(data, compress) if data else None
def clip(self, start=0, end=None):
"Generate a sequence of consecutive frames as PixelData instances"
last = None
for i in range(start, end if end else self.frames):
pxd = self.read(i, True)
if pxd: last = pxd
else: pxd = last
yield pxd
def __enter__(self): return self
def __exit__(self, *args): self._zf.close()
close = __exit__
capture = append
| dmaccarthy/sc8pr | sc8pr/misc/s8v.py | Python | gpl-3.0 | 3,010 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ItemReorder(Document):
pass | indictranstech/focal-erpnext | stock/doctype/item_reorder/item_reorder.py | Python | agpl-3.0 | 315 |
from .views import (ManageOrganizationList, ManageOrganizationDetail, ManageOrganizationAddressDetail,
ManageOrganizationAddressList, ManageOrganizationDocumentList, ManageOrganizationDocumentDetail)
from django.conf.urls import patterns, url, include
from surlex.dj import surl
from .views import OrganizationDetail, OrganizationList
urlpatterns = patterns('',
url(r'^$', OrganizationList.as_view(), name='organization-list'),
surl(r'^<pk:#>$', OrganizationDetail.as_view(), name='organization-detail'),
surl(r'^manage/$', ManageOrganizationList.as_view(), name='manage-organization-list'),
surl(r'^manage/<pk:#>$', ManageOrganizationDetail.as_view(), name='manage-organization-detail'),
url(r'^addresses/manage/$', ManageOrganizationAddressList.as_view(), name='manage-organization-address-list'),
surl(r'^addresses/manage/<pk:#>$', ManageOrganizationAddressDetail.as_view(), name='manage-organization-address-detail'),
url(r'^documents/manage/$', ManageOrganizationDocumentList.as_view(), name='manage-organization-document-list'),
surl(r'^documents/manage/<pk:#>$', ManageOrganizationDocumentDetail.as_view(), name='manage-organization-document-detail'),
)
| gannetson/sportschooldeopenlucht | apps/organizations/urlsapi.py | Python | bsd-3-clause | 1,217 |
from rest_framework.pagination import LimitOffsetPagination
from rest_framework import status
from rest_framework.response import Response
class AdministratorPagination(LimitOffsetPagination):
def get_paginated_response(self, data):
return Response({
"status": True,
"code": status.HTTP_200_OK,
'next': self.get_next_link(),
'previous': self.get_previous_link(),
'count': self.count,
'results': data
})
| belatrix/BackendAllStars | administrator/pagination.py | Python | apache-2.0 | 497 |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License:
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#--------------------------------------------------------------------------
import unittest
import pandas as pd
from pandas.util.testing import assert_frame_equal
from azure.storage import BlobService
from azureml import (
BytesIO,
Workspace,
DataTypeIds,
)
from tests import (
id_generator,
load_test_settings,
)
settings = load_test_settings()
class RoundTripTests(unittest.TestCase):
def setUp(self):
self.workspace = Workspace(
settings.workspace.id,
settings.workspace.token,
settings.workspace.endpoint
)
self.blob = BlobService(
settings.storage.account_name,
settings.storage.account_key
)
def _write_blob_contents(self, filename, data):
if settings.diagnostics.write_blob_contents:
with open('original-blob-' + filename, 'wb') as data_file:
data_file.write(data)
def _write_serialized_frame(self, filename, data):
if settings.diagnostics.write_serialized_frame:
with open('serialized-frame-' + filename, 'wb') as data_file:
data_file.write(data)
def test_download_blob_then_upload_as_dataframe_then_read_dataset(self):
def datatypeid_from_header_and_format(header, format):
if format == 'csv':
if header == 'wh':
return DataTypeIds.GenericCSV
else:
return DataTypeIds.GenericCSVNoHeader
elif format == 'tsv':
if header == 'wh':
return DataTypeIds.GenericTSV
else:
return DataTypeIds.GenericTSVNoHeader
elif format == 'txt':
return DataTypeIds.PlainText
else:
self.assertTrue(False, 'Unexpected format')
def split_blob_name(blob_name):
# blob naming convention:
# name_<header>.<format>
# <header>: WH: with header
# NH: no header
# <format>: CSV: comma separated
# TSV: tab separated
# TXT: newline separated
name, format = blob_name.lower().split('.')
if format != 'txt':
name, header = name.split('_')
else:
header = 'nh'
return name, format, header
for blob_name in settings.storage.blobs:
print(blob_name)
name, format, header = split_blob_name(blob_name)
# Read the data from blob storage
original_data = self.blob.get_blob_to_bytes(settings.storage.container, blob_name)
self._write_blob_contents(blob_name, original_data)
# Parse the data to a dataframe using Pandas
original_dataframe = pd.read_csv(
BytesIO(original_data),
header=0 if header == 'wh' else None,
sep=',' if format == 'csv' else '\t' if format == 'tsv' else '\n',
encoding='utf-8-sig'
)
# Upload the dataframe as a new dataset
dataset_name = 'unittest' + name + id_generator()
description = 'safe to be deleted - ' + dataset_name
data_type_id = datatypeid_from_header_and_format(header, format)
self.workspace.datasets.add_from_dataframe(
original_dataframe,
data_type_id,
dataset_name,
description,
)
# Get the new dataset
dataset = self.workspace.datasets[dataset_name]
self.assertIsNotNone(dataset)
# Read the dataset as a dataframe
result_data = dataset.read_as_binary()
self._write_serialized_frame(blob_name, result_data)
result_dataframe = dataset.to_dataframe()
# Verify that the dataframes are equal
assert_frame_equal(original_dataframe, result_dataframe)
def test_azureml_example_datasets(self):
max_size = 10 * 1024 * 1024
skip = [
'Restaurant feature data',
'IMDB Movie Titles',
'Book Reviews from Amazon',
]
for dataset in self.workspace.example_datasets:
if not hasattr(dataset, 'to_dataframe'):
print('skipped (unsupported format): {0}'.format(dataset.name))
continue
if dataset.size > max_size:
print('skipped (max size): {0}'.format(dataset.name))
continue
if dataset.name in skip:
print('skipped: {0}'.format(dataset.name))
continue
print('downloading: ' + dataset.name)
frame = dataset.to_dataframe()
print('uploading: ' + dataset.name)
dataset_name = 'unittest' + dataset.name + id_generator()
description = 'safe to be deleted - ' + dataset_name
self.workspace.datasets.add_from_dataframe(frame, dataset.data_type_id, dataset_name, description)
if __name__ == '__main__':
unittest.main()
| kod3r/Azure-MachineLearning-ClientLibrary-Python | tests/roundtriptests.py | Python | mit | 6,355 |
# coding=utf-8
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.base.deprecated import deprecated_module
from pants.java.jar.exclude import Exclude
deprecated_module('1.5.0.dev0', 'Use pants.backend.jvm.exclude instead')
Exclude = Exclude
| landism/pants | src/python/pants/backend/jvm/targets/exclude.py | Python | apache-2.0 | 489 |
"""
Django settings for code4sa project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DJANGO_DEBUG', 'true') == 'true'
# SECURITY WARNING: keep the secret key used in production secret!
if DEBUG:
SECRET_KEY = '-r&cjf5&l80y&(q_fiidd$-u7&o$=gv)s84=2^a2$o^&9aco0o'
else:
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY')
# XXX set me
GOOGLE_ANALYTICS_ID = set this to something
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pipeline',
'django_extensions',
'code4sa',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'code4sa.urls'
WSGI_APPLICATION = 'code4sa.wsgi.application'
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
import dj_database_url
db_config = dj_database_url.config(default='sqlite:///db.sqlite3')
db_config['ATOMIC_REQUESTS'] = True
DATABASES = {
'default': db_config,
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Templates
TEMPLATE_DEBUG = DEBUG
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"code4sa.context_processors.google_analytics",
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
ASSETS_DEBUG = DEBUG
ASSETS_URL_EXPIRE = False
# assets must be placed in the 'static' dir of your Django app
# where the compiled assets go
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# the URL for assets
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
"pipeline.finders.PipelineFinder",
)
PYSCSS_LOAD_PATHS = [
os.path.join(BASE_DIR, 'code4sa', 'static'),
os.path.join(BASE_DIR, 'code4sa', 'static', 'bower_components'),
]
PIPELINE_CSS = {
'css': {
'source_filenames': (
'bower_components/fontawesome/css/font-awesome.css',
'stylesheets/app.scss',
),
'output_filename': 'app.css',
},
}
PIPELINE_JS = {
'js': {
'source_filenames': (
'bower_components/jquery/dist/jquery.min.js',
'javascript/app.js',
),
'output_filename': 'app.js',
},
}
PIPELINE_CSS_COMPRESSOR = None
PIPELINE_JS_COMPRESSOR = None
PIPELINE_COMPILERS = (
'code4sa.pipeline.PyScssCompiler',
)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'code4sa.pipeline.GzipManifestPipelineStorage'
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'simple': {
'format': '%(asctime)s %(levelname)s %(module)s %(process)d %(thread)d %(message)s'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
}
},
'root': {
'handlers': ['console'],
'level': 'ERROR'
},
'loggers': {
# put any custom loggers here
# 'your_package_name': {
# 'level': 'DEBUG' if DEBUG else 'INFO',
# },
'django': {
'level': 'DEBUG' if DEBUG else 'INFO',
}
}
}
| Code4SA/django-template | code4sa/settings.py | Python | mit | 4,773 |
#!/usr/bin/python
#
# Copyright 2012 Anthony Campbell (anthonycampbell.co.uk)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Required imports
import os, getopt, sys, re, subprocess, exceptions
# Constants
_default_output_file = "./output.txt"
_script_directory = os.path.dirname(os.path.realpath(__file__))
# Help
_help = """
Clojure IMDB Parser
__file__ [options]
Simple wrapper script for the Clojure IMDB parser.
Options:
-q --query Option which specifies the query text.
-f --file Option which writes the search result to the defailt output file: __default_output_file__
-o --output [path/to/file] If specified, writes the search result to the a file.
-v --verbose Option to enable verbose output.
-h -? --help Option to display this text.
Examples:
__file__ -q "Clash of the Titans" -o output.txt
__file__ --query "Clash of the Titans" --output output.txt
"""
_help = _help.replace("__file__", __file__)
_help = _help.replace("__default_output_file__", _default_output_file)
# Main method
def main():
# Initialise variables
verbose = False
output = ""
query_term = ""
output_file = ""
latest_jar = ""
try:
opts, args = getopt.getopt(sys.argv[1:], "q:fo:?hv", ["query=", "file", "output=", "help", "verbose"])
except getopt.GetoptError as error:
# Print help information and exit:
print "\n " + str(error)
print _help
sys.exit(2)
for option, argument in opts:
if option in ("-q", "--query"):
query_term = str(argument)
elif option in ("-f", "--file"):
output_file = _default_output_file
elif option in ("-o", "--output"):
output_file = str(argument)
elif option in ("-v", "--verbose"):
verbose = True
elif option in ("-h", "--help"):
print _help
sys.exit(0)
# Check we're good to go
if query_term == None or query_term == "":
print _help
sys.exit(2)
if verbose:
print "\n Clojure IMDB Parser"
try:
# Determine newest parser
process = subprocess.Popen(["ls -r " + _script_directory + "/release | grep \"clojure-imdb-parser.*.jar\" | head -n 1"],
stdout=subprocess.PIPE, shell=True)
latest_jar, stderr = process.communicate()
process.wait()
except exceptions.Exception as error:
print "\n Unable to find latest clojure-imdb-parser.jar:"
print "\n " + str(error)
sys.exit(1)
if latest_jar != None and str(latest_jar) != "":
latest_jar = _script_directory + "/release/" + str(latest_jar)
# Clean up path
pattern = re.compile(r'\n')
latest_jar = pattern.sub(" ", latest_jar).strip()
if verbose:
print "\n Latest clojure-imdb-parser.jar:"
print "\n " + latest_jar + "\n"
try:
# Execute the parser
process = subprocess.Popen(["java", "-jar", latest_jar, query_term, output_file, str(verbose)],
stdout=subprocess.PIPE)
output, stderr = process.communicate()
process.wait()
except exceptions.Exception as error:
print "\n Unable to execute clojure-imdb-parser.jar!"
print "\n " + str(error)
sys.exit(1)
else:
print "\n Unable to find latest clojure-imdb-parser.jar!"
sys.exit(1)
# Where we at?
print output
# If we're being run directly
if __name__ == "__main__":
main()
| acampbell3000/clojure-imdb-parser | run.py | Python | apache-2.0 | 4,140 |
'''
@input: python3 generate_data.py <user_size> <item_size> <factor_size> <user_item_density> <user_factor_density> <folder_name>
@output:
rating_table.csv
effect_talbe.csv
conversion_table.csv
rating_list.csv, formats: user_id, itme_id, rating_value
conversion_list.csv, formats: user_id, factor_id
@process:
1. generate effect matrix with random value from N(u,sigma^2)
2. generate conversion matrix with random value from N(u,sigma^2)
3. filter conversion matrix with user_factor_density
4. generate rating matrix based on effect and conversion matrix
5. filter rating matrix based on user_item_density
'''
import sys
import numpy
user_size = int(sys.argv[1]) if len(sys.argv)>1 else 20
item_size = int(sys.argv[2]) if len(sys.argv)>2 else 4
factor_size = int(sys.argv[3]) if len(sys.argv)>3 else 5
user_item_density = float(sys.argv[4]) if len(sys.argv)>4 else 0.1
user_factor_density = float(sys.argv[5]) if len(sys.argv)>5 else 0.1
folder_name = sys.argv[6] if len(sys.argv)>6 else "./sample/"
mu = 5
sigma = 1
rating = numpy.zeros([user_size,item_size])
effect = sigma * numpy.random.randn(factor_size * item_size) + mu
conversion = sigma * numpy.random.randn(user_size * factor_size) + mu
effect = effect.reshape(factor_size,item_size)
for i in range(len(conversion)):
if numpy.random.random() >= user_factor_density:
conversion[i] = 0.
conversion = conversion.reshape(user_size,factor_size)
for i in range(len(rating)):
for j in range(len(rating[0])):
rating[i][j] = numpy.dot(conversion[i,:],effect[:,j])
rating = rating.reshape(user_size * item_size)
for i in range(len(rating)):
if numpy.random.random() >= user_item_density:
rating[i] = 0.
rating = rating.reshape(user_size, item_size)
print ("rating table: ", rating.shape)
print ("conversion table: ",conversion.shape)
print ("effect table: ",effect.shape)
numpy.savetxt(folder_name+"rating_table.csv",rating,delimiter=',', fmt ='%.8f')
numpy.savetxt(folder_name+"effect_table.csv",effect,delimiter=',', fmt ='%.8f')
numpy.savetxt(folder_name+"conversion_table.csv",conversion,delimiter=',', fmt ='%.8f')
f = open(folder_name+"rating_list.csv","w")
for i in range(len(rating)):
for j in range(len(rating[0])):
if rating[i][j] > 0.:
f.write(str(i)+","+str(j)+","+str(rating[i][j])+"\n")
f.close()
f = open(folder_name+"conversion_list.csv","w")
for i in range(len(conversion)):
for j in range(len(conversion[0])):
if conversion[i][j] > 0. :
f.write(str(i)+","+str(j)+"\n")
f.close()
| JalexChang/cross-media-attribution | data/generator.py | Python | bsd-2-clause | 2,583 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
parse = lambda s: datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S.%f')
commissions = orm.Commission.objects.all()
for commission in commissions:
dt = parse(commission.issue_time)
commission.issue_datetime = dt
commission.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'quotaholder_app.commission': {
'Meta': {'object_name': 'Commission'},
'clientkey': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'issue_datetime': ('django.db.models.fields.DateTimeField', [], {}),
'issue_time': ('django.db.models.fields.CharField', [], {'max_length': '24'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4096'}),
'serial': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'quotaholder_app.holding': {
'Meta': {'unique_together': "(('holder', 'source', 'resource'),)", 'object_name': 'Holding'},
'holder': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'limit': ('snf_django.lib.db.fields.IntDecimalField', [], {'max_digits': '38', 'decimal_places': '0'}),
'resource': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'null': 'True'}),
'usage_max': ('snf_django.lib.db.fields.IntDecimalField', [], {'default': '0', 'max_digits': '38', 'decimal_places': '0'}),
'usage_min': ('snf_django.lib.db.fields.IntDecimalField', [], {'default': '0', 'max_digits': '38', 'decimal_places': '0'})
},
'quotaholder_app.provision': {
'Meta': {'object_name': 'Provision'},
'holder': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'quantity': ('snf_django.lib.db.fields.IntDecimalField', [], {'max_digits': '38', 'decimal_places': '0'}),
'resource': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'serial': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'provisions'", 'to': "orm['quotaholder_app.Commission']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'null': 'True'})
},
'quotaholder_app.provisionlog': {
'Meta': {'object_name': 'ProvisionLog'},
'delta_quantity': ('snf_django.lib.db.fields.IntDecimalField', [], {'max_digits': '38', 'decimal_places': '0'}),
'holder': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue_time': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'limit': ('snf_django.lib.db.fields.IntDecimalField', [], {'max_digits': '38', 'decimal_places': '0'}),
'log_time': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'resource': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'serial': ('django.db.models.fields.BigIntegerField', [], {}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'null': 'True'}),
'usage_max': ('snf_django.lib.db.fields.IntDecimalField', [], {'max_digits': '38', 'decimal_places': '0'}),
'usage_min': ('snf_django.lib.db.fields.IntDecimalField', [], {'max_digits': '38', 'decimal_places': '0'})
}
}
complete_apps = ['quotaholder_app']
| grnet/synnefo | snf-astakos-app/astakos/quotaholder_app/migrations/old/0006_datetime.py | Python | gpl-3.0 | 4,257 |
"""Build and install scmtiles."""
# Copyright 2016 Andrew Dawson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
import versioneer
packages = ['scmtiles']
setup(name='scmtiles',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='Toolkit for running a single-column model over a grid',
author='Andrew Dawson',
packages=packages,
)
| aopp-pred/scmtiles | setup.py | Python | apache-2.0 | 921 |
#!/usr/bin/env python
import matplotlib
matplotlib.use('agg')
import sys
import yt
import numpy as np
from yt.visualization.volume_rendering.api import \
Scene, \
VolumeSource
# this is for the wdconvect problem
def doit(plotfile):
ds = yt.load(plotfile)
ds.periodicity = (True, True, True)
field = ('boxlib', 'density')
ds._get_field_info(field).take_log = True
sc = Scene()
# add a volume: select a sphere
vol = VolumeSource(ds, field=field)
vol.use_ghost_zones = True
sc.add_source(vol)
# transfer function
vals = [-1, 0, 1, 2, 3, 4, 5, 6, 7]
#vals = [0.1, 1.0, 10, 100., 1.e4, 1.e5, 1.e6, 1.e7]
sigma = 0.1
tf = yt.ColorTransferFunction((min(vals), max(vals)))
tf.clear()
cm = "coolwarm"
cm = "spectral"
for v in vals:
if v < 3:
alpha = 0.1
else:
alpha = 0.5
tf.sample_colormap(v, sigma**2, colormap=cm, alpha=alpha)
sc.get_source(0).transfer_function = tf
cam = sc.add_camera(ds, lens_type="perspective")
cam.resolution = (1920, 1080)
cam.position = 1.5*ds.arr(np.array([0.0, 5.e9, 5.e9]), 'cm')
# look toward the center -- we are dealing with an octant
center = 0.5*(ds.domain_left_edge + ds.domain_right_edge)
normal = (center - cam.position)
normal /= np.sqrt(normal.dot(normal))
cam.switch_orientation(normal_vector=normal,
north_vector=[0., 0., 1.])
cam.set_width(ds.domain_width)
#sc.annotate_axes()
#sc.annotate_domain(ds)
pid = plotfile.split("plt")[1]
sc.render()
sc.save("wdmerger_{}_new.png".format(pid), sigma_clip=6.0)
sc.save_annotated("wdmerger_annotated_{}_new.png".format(pid),
text_annotate=[[(0.05, 0.05),
"t = {:.3f} s".format(float(ds.current_time.d)),
dict(horizontalalignment="left")],
[(0.5,0.95),
"Castro simulation of merging white dwarfs (0.6 $M_\odot$ + 0.9 $M_\odot$)",
dict(color="y", fontsize="22",
horizontalalignment="center")],
[(0.95,0.05),
"M. Katz et al.",
dict(color="w", fontsize="16",
horizontalalignment="right")]])
if __name__ == "__main__":
# Choose a field
plotfile = ""
try:
plotfile = sys.argv[1]
except:
sys.exit("ERROR: no plotfile specified")
for plt in sys.argv[1:]:
plotfile = plt
doit(plotfile)
| BoxLib-Codes/wdmerger | analysis/vol-wd.py | Python | mit | 2,763 |
"""This module defines the class DesiForest to represent DESI forests"""
from picca.delta_extraction.astronomical_objects.forest import Forest
from picca.delta_extraction.errors import AstronomicalObjectError
class DesiForest(Forest):
"""Forest Object
Methods
-------
__gt__ (from AstronomicalObject)
__eq__ (from AstronomicalObject)
class_variable_check (from Forest)
consistency_check (from Forest)
get_data (from Forest)
rebin (from Forest)
__init__
coadd
get_header
Class Attributes
----------------
delta_lambda: float or None (from Forest)
Variation of the wavelength (in Angs) between two pixels. This should not
be None if wave_solution is "lin". Ignored if wave_solution is "log".
delta_log_lambda: float or None (from Forest)
Variation of the logarithm of the wavelength (in Angs) between two pixels.
This should not be None if wave_solution is "log". Ignored if wave_solution
is "lin".
lambda_max: float or None (from Forest)
Maximum wavelength (in Angs) to be considered in a forest. This should not
be None if wave_solution is "lin". Ignored if wave_solution is "log".
lambda_max_rest_frame: float or None (from Forest)
As wavelength_max but for rest-frame wavelength. This should not
be None if wave_solution is "lin". Ignored if wave_solution is "log".
lambda_min: float or None (from Forest)
Minimum wavelength (in Angs) to be considered in a forest. This should not
be None if wave_solution is "lin". Ignored if wave_solution is "log".
lambda_min_rest_frame: float or None (from Forest)
As wavelength_min but for rest-frame wavelength. This should not
be None if wave_solution is "lin". Ignored if wave_solution is "log".
log_lambda_max: float or None (from Forest)
Logarithm of the maximum wavelength (in Angs) to be considered in a forest.
This should not be None if wave_solution is "log". Ignored if wave_solution
is "lin".
log_lambda_max_rest_frame: float or None (from Forest)
As log_lambda_max but for rest-frame wavelength. This should not be None if
wave_solution is "log". Ignored if wave_solution is "lin".
log_lambda_min: float or None (from Forest)
Logarithm of the minimum wavelength (in Angs) to be considered in a forest.
This should not be None if wave_solution is "log". Ignored if wave_solution
is "lin".
log_lambda_min_rest_frame: float or None (from Forest)
As log_lambda_min but for rest-frame wavelength. This should not be None if
wave_solution is "log". Ignored if wave_solution is "lin".
mask_fields: list of str (from Forest)
Names of the fields that are affected by masking. In general it will
be "flux" and "ivar" but some child classes might add more.
wave_solution: "lin" or "log" (from Forest)
Determines whether the wavelength solution has linear spacing ("lin") or
logarithmic spacing ("log").
Attributes
----------
dec: float (from AstronomicalObject)
Declination (in rad)
healpix: int (from AstronomicalObject)
Healpix number associated with (ra, dec)
los_id: longint (from AstronomicalObject)
Line-of-sight id. Same as targetid
ra: float (from AstronomicalObject)
Right ascention (in rad)
z: float (from AstronomicalObject)
Redshift
bad_continuum_reason: str or None
Reason as to why the continuum fit is not acceptable. None for acceptable
contiuum.
continuum: array of float or None (from Forest)
Quasar continuum. None for no information
deltas: array of float or None (from Forest)
Flux-transmission field (delta field). None for no information
flux: array of float (from Forest)
Flux
ivar: array of float (from Forest)
Inverse variance
lambda_: array of float or None (from Forest)
Wavelength (in Angstroms)
log_lambda: array of float or None (from Forest)
Logarithm of the wavelength (in Angstroms)
mean_snr: float (from Forest)
Mean signal-to-noise of the forest
transmission_correction: array of float (from Forest)
Transmission correction.
weights: array of float or None (from Forest)
Weights associated to the delta field. None for no information
night: list of int
Identifier of the night where the observation was made. None for no info
petal: list of int
Identifier of the spectrograph used in the observation. None for no info
targetid: int
Targetid of the object
tile: list of int
Identifier of the tile used in the observation. None for no info
"""
def __init__(self, **kwargs):
"""Initialize instance
Arguments
---------
**kwargs: dict
Dictionary contiaing the information
Raise
-----
AstronomicalObjectError if there are missing variables
"""
self.night = []
if kwargs.get("night") is not None:
self.night.append(kwargs.get("night"))
del kwargs["night"]
self.petal = []
if kwargs.get("petal") is not None:
self.petal.append(kwargs.get("petal"))
del kwargs["petal"]
self.targetid = kwargs.get("targetid")
if self.targetid is None:
raise AstronomicalObjectError("Error constructing DesiForest. "
"Missing variable 'targetid'")
del kwargs["targetid"]
self.tile = []
if kwargs.get("tile") is not None:
self.tile.append(kwargs.get("tile"))
del kwargs["tile"]
# call parent constructor
kwargs["los_id"] = self.targetid
super().__init__(**kwargs)
def coadd(self, other):
"""Coadd the information of another forest.
Forests are coadded by calling the coadd function from Forest.
DESI night, petal and night from other are added to the current list
Arguments
---------
other: DesiForest
The forest instance to be coadded.
Raise
-----
AstronomicalObjectError if other is not a DesiForest instance
"""
if not isinstance(other, DesiForest):
raise AstronomicalObjectError("Error coadding DesiForest. Expected "
"DesiForest instance in other. Found: "
f"{type(other)}")
self.night += other.night
self.petal += other.petal
self.tile += other.tile
super().coadd(other)
def get_header(self):
"""Return line-of-sight data to be saved as a fits file header
Adds specific DESI keys to general header (defined in class Forest)
Return
------
header : list of dict
A list of dictionaries containing 'name', 'value' and 'comment' fields
"""
header = super().get_header()
header += [
{
'name': 'TARGETID',
'value': self.targetid,
'comment': 'Object identification'
},
{
'name': 'NIGHT',
'value': "-".join(str(night) for night in self.night),
'comment': "Observation night(s)"
},
{
'name': 'PETAL',
'value': "-".join(str(petal) for petal in self.petal),
'comment': 'Observation petal(s)'
},
{
'name': 'TILE',
'value': "-".join(str(tile) for tile in self.tile),
'comment': 'Observation tile(s)'
},
]
return header
| igmhub/picca | py/picca/delta_extraction/astronomical_objects/desi_forest.py | Python | gpl-3.0 | 7,693 |
#!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Electron Cash - lightweight Bitcoin Cash client
# Copyright (C) 2019 The Electron Cash Developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, traceback, queue
from xmlrpc.client import ServerProxy, Transport
import http.client
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from electroncash import bitcoin, util, keystore
from electroncash import transaction
from electroncash.plugins import BasePlugin, hook
from electroncash.i18n import _
from electroncash.wallet import Multisig_Wallet
from electroncash.util import bh2u, bfh, Weak, InvalidPassword, print_error
from electroncash_gui.qt.transaction_dialog import show_transaction, TxDialog
# Workarounds to the fact that xmlrpc.client doesn't take a timeout= arg.
class TimeoutTransport(Transport):
def __init__(self, timeout=2.0, *l, **kw):
super().__init__(*l, **kw)
self.timeout = timeout
def make_connection(self, host):
return http.client.HTTPConnection(host, timeout=self.timeout)
class TimeoutServerProxy(ServerProxy):
def __init__(self, uri, timeout=2.0, *l, **kw):
kw['transport'] = TimeoutTransport(timeout=timeout, use_datetime=kw.get('use_datetime', False))
super().__init__(uri, *l, **kw)
# /end timeout= Workarounds
PORT = 8081
HOST = 'sync.imaginary.cash'
class Listener(util.DaemonThread):
def __init__(self, state):
super().__init__()
self.daemon = True
self.state_ref = Weak.ref(state)
self.received = set()
self.keyhashes = []
self.timeoutQ = queue.Queue() # this queue's sole purpose is to provide an interruptible sleep
def diagnostic_name(self):
wname = str(self.state_ref() and self.state_ref().window_ref() and self.state_ref().window_ref().diagnostic_name())
return super().diagnostic_name() + "@" + wname
def set_keyhashes(self, keyhashes):
self.keyhashes = keyhashes
def clear(self, keyhash):
state = self.state_ref()
if state: state.server.delete(keyhash)
try: self.received.remove(keyhash)
except (ValueError, KeyError): pass
def run(self):
self.print_error("started.")
while self.running:
try:
if not self.keyhashes:
self.timeoutQ.get(timeout=2.0) # this shouldn't ever happen but.. poll until ready.
continue
for keyhash in self.keyhashes:
if keyhash in self.received:
# already seen.. avoids popup window spam
continue
try:
message = self.state_ref() and self.state_ref().server.get(keyhash)
except Exception as e:
self.print_error("cannot contact cosigner pool", repr(e))
break
if message:
self.received.add(keyhash)
self.print_error("received message for", keyhash)
self.state_ref() and self.state_ref().cosigner_receive_signal.emit(keyhash, message)
# poll every 10 seconds
self.timeoutQ.get(timeout=10.0)
except queue.Empty:
# timed out, continue
continue
self.print_error("exiting.")
def stop(self):
# extends DaemonThread by also writing to the timeoutQ to wake up the sleeping thread, if any
super().stop()
self.timeoutQ.put(None) # wake up sleeper, if any
def start(self):
# overrides DaemonThread -- clears queue on (re)start
if not self.is_running():
self.timeoutQ = queue.Queue() # clear queue in case it had stale data.
super().start()
def stop_join(self):
self.stop()
try: self.join()
except RuntimeError: pass # was never started
class State(QObject):
''' Window-specific state. Gets inserted into cosigner_pool_state attribute
for window. '''
cosigner_receive_signal = pyqtSignal(object, object)
listener = None
keys = []
cosigner_list = []
plugin_ref = None # Weak.ref to plugin object
window_ref = None # Weak.ref to window object
server = None
def __init__(self, plugin, window):
super().__init__() # top-level QObject, no parent()
self.server = TimeoutServerProxy('http://%s:%d'%(HOST,PORT), allow_none=True, timeout = 2.0)
self.listener = Listener(self)
self.plugin_ref = Weak.ref(plugin)
self.window_ref = Weak.ref(window)
self.cosigner_receive_signal.connect(self.on_receive)
def on_receive(self, k, m):
plugin = self.plugin_ref()
window = self.window_ref()
if plugin and window:
plugin.on_receive(window, k, m)
class _Dead:
pass
class Plugin(BasePlugin):
Instance_ref = Weak.ref(_Dead()) # Make sure Instance_ref is always defined, defaults to dead object
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.windows = []
self.initted = False
@hook
def init_qt(self, gui):
if self.initted: return # already initted
self.print_error("Initializing...")
for window in gui.windows:
self.on_new_window(window)
Plugin.Instance_ref = Weak.ref(self)
self.initted = True
@hook
def on_new_window(self, window):
try: wallet = window.wallet
except AttributeError:
# this can happen if wallet is not started up properly
self.print_error("WARNING: Window {} lacks a wallet -- startup race condition likely. FIXME!".format(window.diagnostic_name()))
return
if isinstance(wallet, Multisig_Wallet):
window.cosigner_pool_state = state = State(self, window)
self.windows.append(window)
self.update(window)
# un-gray-out buttons for tx dialogs left around related to this window
for b in Plugin.get_all_cosigner_buttons():
if b.wallet_ref() == wallet:
b.setEnabled(True)
@hook
def on_close_window(self, window):
if window in self.windows:
state = getattr(window, 'cosigner_pool_state', None)
if state:
if state.listener:
self.print_error("shutting down listener for",window.diagnostic_name())
state.listener.stop_join()
state.deleteLater()
delattr(window, 'cosigner_pool_state')
self.print_error("unregistered for window",window.diagnostic_name())
self.windows.remove(window)
# gray out buttons for tx dialogs left around related to this window
for b in Plugin.get_all_cosigner_buttons():
if b.wallet_ref() == window.wallet:
b.setEnabled(False)
@staticmethod
def get_all_cosigner_buttons():
ret = []
app = QApplication.instance()
for w in app.topLevelWidgets():
if isinstance(w, TxDialog):
but = getattr(w, 'cosigner_send_button', None)
if but: ret.append(but)
return ret
def is_available(self):
return True
def on_close(self):
for w in self.windows.copy():
self.on_close_window(w)
self.windows = []
self.initted = False
super().on_close()
def update(self, window):
wallet = window.wallet
state = window.cosigner_pool_state
if not state:
self.print_error("No cosigner pool state object for window", window.diagnostic_name())
return
listener = state.listener
state.keys = []
state.cosigner_list = []
for key, keystore in wallet.keystores.items():
xpub = keystore.get_master_public_key()
K = bitcoin.deserialize_xpub(xpub)[-1]
_hash = bh2u(bitcoin.Hash(K))
if not keystore.is_watching_only():
state.keys.append((key, _hash))
else:
state.cosigner_list.append((xpub, K, _hash))
listener.set_keyhashes([t[1] for t in state.keys])
if not listener.is_running():
self.print_error("Starting listener for", window.diagnostic_name())
listener.start()
@hook
def transaction_dialog(self, d):
window, state = self._find_window_and_state_for_wallet(d.wallet)
if window and state:
d.cosigner_send_button = b = QPushButton(_("Send to cosigner"))
b.wallet_ref = Weak.ref(window.wallet)
b.clicked.connect(lambda: Plugin.do_send_static(d))
d.buttons.insert(0, b)
self.transaction_dialog_update(d)
@hook
def transaction_dialog_update(self, d):
window, state = self._find_window_and_state_for_wallet(d.wallet)
but = getattr(d, 'cosigner_send_button', None)
if not but or not window or not state or d.tx.is_complete() or d.wallet.can_sign(d.tx):
but and but.hide()
return
for xpub, K, _hash in state.cosigner_list:
if self.cosigner_can_sign(d.tx, xpub):
but and but.show()
break
else:
but and but.hide()
def _find_window_and_state_for_wallet(self, wallet):
for window in self.windows:
if window.wallet == wallet:
return window, window.cosigner_pool_state
return None, None
def cosigner_can_sign(self, tx, cosigner_xpub):
from electroncash.keystore import is_xpubkey, parse_xpubkey
xpub_set = set([])
for txin in tx.inputs():
for x_pubkey in txin['x_pubkeys']:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
xpub_set.add(xpub)
return cosigner_xpub in xpub_set
@staticmethod
def do_send_static(d):
''' Decouples button slot from running instance in case user stops/restarts the plugin while TxDialogs are up. '''
plugin = Plugin.Instance_ref()
if plugin:
plugin.do_send(d)
else:
print_error("[cosigner_pool] No plugin.")
def do_send(self, d):
tx = d.tx
window, state = self._find_window_and_state_for_wallet(d.wallet)
if not tx or not window or not state:
self.print_error("Missing tx or window or state")
return
for xpub, K, _hash in state.cosigner_list:
if not self.cosigner_can_sign(tx, xpub):
continue
message = bitcoin.encrypt_message(bfh(tx.raw), bh2u(K)).decode('ascii')
try:
state.server.put(_hash, message)
except Exception as e:
traceback.print_exc(file=sys.stdout)
window.show_error(_("Failed to send transaction to cosigning pool."))
return
d.show_message(_("Your transaction was sent to the cosigning pool.") + '\n' +
_("Open your cosigner wallet to retrieve it."))
def on_receive(self, window, keyhash, message):
self.print_error("signal arrived for", keyhash, "@", window.diagnostic_name())
state = getattr(window, 'cosigner_pool_state', None)
if not state:
self.print_error("Error: state object not found")
return
keys = state.keys
for key, _hash in keys:
if _hash == keyhash:
break
else:
self.print_error("keyhash not found")
return
wallet = window.wallet
if isinstance(wallet.keystore, keystore.Hardware_KeyStore):
window.show_warning(_('An encrypted transaction was retrieved from cosigning pool.') + '\n' +
_('However, hardware wallets do not support message decryption, '
'which makes them not compatible with the current design of cosigner pool.'))
return
password = None
if wallet.has_password():
password = window.password_dialog(_('An encrypted transaction was retrieved from cosigning pool.') + '\n' +
_('Please enter your password to decrypt it.'))
if not password:
return
else:
details = (_("If you choose 'Yes', it will be decrypted and a transaction window will be shown, giving you the opportunity to sign the transaction.")
+ "\n\n" + _("If you choose 'No', you will be asked again later (the next time this wallet window is opened)."))
ret = window.msg_box(icon = QMessageBox.Question, parent = None, title=_("Cosigner Pool"), buttons=QMessageBox.Yes|QMessageBox.No,
text = _("An encrypted transaction was retrieved from cosigning pool.") + '\n' + _("Do you want to open it now?"),
detail_text = details)
if ret != QMessageBox.Yes:
return
err, badpass = "Unknown Error", False
try:
xprv = wallet.keystore.get_master_private_key(password)
except InvalidPassword as e:
err, badpass = str(e), True
xprv = None
if not xprv:
window.show_error(err)
if badpass:
self.on_receive(window, keyhash, message) # try again
return
try:
k = bh2u(bitcoin.deserialize_xprv(xprv)[-1])
EC = bitcoin.EC_KEY(bfh(k))
message = bh2u(EC.decrypt_message(message))
except Exception as e:
traceback.print_exc(file=sys.stdout)
window.show_error(repr(e))
return
state.listener.clear(keyhash)
tx = transaction.Transaction(message)
show_transaction(tx, window, prompt_if_unsaved=True)
| fyookball/electrum | plugins/cosigner_pool/qt.py | Python | mit | 15,181 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
import openerp.exceptions
def login(db, login, password):
res_users = openerp.registry(db)['res.users']
return res_users._login(db, login, password)
def check_super(passwd):
if passwd == openerp.tools.config['admin_passwd']:
return True
else:
raise openerp.exceptions.AccessDenied()
def check(db, uid, passwd):
res_users = openerp.registry(db)['res.users']
return res_users.check(db, uid, passwd)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| diogocs1/comps | web/openerp/service/security.py | Python | apache-2.0 | 1,510 |
#!/usr/bin/env python3
"""
Demo of reading raw Bayer 10-bit data from Raspberry Pi camera chip using PiCamera module.
Notes:
1) can only read full chip, no binning or ROI: 2592x1944 pixel image with current imaging chip
2) captures a single image
3) sudo apt-get install python3-picamera python3-scipy
Basis from Michael Hirsch https://scivision.co
Inspiration from Camera Particle Detector (Magdalen College School)
Pieter Kuiper
"""
from __future__ import division,absolute_import
from time import sleep,time
from scipy.misc import bytescale,imsave
from matplotlib.pyplot import figure,draw,pause
from numpy import unravel_index
#
from picamera import PiCamera
#
from params import getparams,setparams
from rawbayer import grabframe
def pibayerraw(fn,exposure_sec,bit8):
with PiCamera() as cam: #load camera driver
print('camera startup gain autocal')
#LED automatically turns on, this turns it off
cam.led = False
sleep(0.75) # somewhere between 0.5..0.75 seconds to let camera settle to final gain value.
setparams(cam,exposure_sec) #wait till after sleep() so that gains settle before turning off auto
getparams(cam)
counter = 1
#%% main loop
while True:
# tic = time()
img10 = grabframe(cam)
# print('{:.1f} sec. to grab frame'.format(time()-tic))
#%% linear scale 10-bit to 8-bit
if bit8:
img = bytescale(img10,0,1024,255,0)
else:
img = img10
#%% write to PNG or JPG or whatever based on file extension
max_value = img.max()
print(max_value)
if max_value > 50:
idx = unravel_index(img.argmax(), img.shape)
xidx = idx[0]
yidx = idx[1]
print(xidx, yidx)
xlow = max(0, xidx-25)
ylow = max(0, yidx-25)
xhi = min(1944, xidx+25)
yhi = min(2592, yidx+25)
imsave(fn+'%03d' % counter + '.png',img[xlow:xhi,ylow:yhi])
counter = counter + 1
# break
return img
if __name__ == '__main__':
from argparse import ArgumentParser
p = ArgumentParser(description='Raspberry Pi Picamera demo with raw Bayer data')
p.add_argument('-e','--exposure',help='exposure time [seconds]',type=float)
p.add_argument('-8','--bit8',help="convert output to 8-bit",action='store_true')
p.add_argument('filename',help='output filename to write [png,jpg]',nargs='?')
p = p.parse_args()
try:
print('press Ctrl c to end program')
img = pibayerraw(p.filename, p.exposure,p.bit8)
except KeyboardInterrupt:
pass
| pietkuip/raspberrypi_muon_microscope | getrawimage.py | Python | mit | 2,714 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# ReGraph documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 4 16:51:07 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.autosummary',
'numpydoc',
'sphinx.ext.mathjax'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'ReGraph'
copyright = '2017, Eugenia Oshurko, Yves-Stan Le Cornec'
author = 'Eugenia Oshurko, Yves-Stan Le Cornec'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ReGraphdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ReGraph.tex', 'ReGraph Documentation',
'Eugenia Oshurko, Yves-Stan Le Cornec', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'regraph', 'ReGraph Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ReGraph', 'ReGraph Documentation',
author, 'ReGraph', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| eugeniashurko/ReGraph | docs/source/conf.py | Python | mit | 5,928 |
"""`List` provider example."""
import dataclasses
from typing import List
from dependency_injector import containers, providers
@dataclasses.dataclass
class Module:
name: str
@dataclasses.dataclass
class Dispatcher:
modules: List[Module]
class Container(containers.DeclarativeContainer):
dispatcher_factory = providers.Factory(
Dispatcher,
modules=providers.List(
providers.Factory(Module, name='m1'),
providers.Factory(Module, name='m2'),
),
)
if __name__ == '__main__':
container = Container()
dispatcher = container.dispatcher_factory()
assert isinstance(dispatcher.modules, list)
assert dispatcher.modules[0].name == 'm1'
assert dispatcher.modules[1].name == 'm2'
# Call "dispatcher = container.dispatcher_factory()" is equivalent to:
# dispatcher = Dispatcher(
# modules=[
# Module(name='m1'),
# Module(name='m2'),
# ],
# )
| rmk135/objects | examples/providers/list.py | Python | bsd-3-clause | 979 |
""" Simple implementation of mutinomial Naive Bayes for text classfification.
TODO: Apply to 20 Newsgroups, Reuters-21578 datasets
"""
__author__ = 'Duong Nguyen'
__version__ = '0.0'
import math
import sys
from collections import defaultdict
class NaiveBayes(object):
""" Multinomial Naive Bayes"""
def __init__(self):
self.categories = set()
self.vocabularies = set()
self.wordcount = {}
self.catcount = {}
self.denom = {}
def train(self, data):
for d in data:
cat = d[0]
self.categories.add(cat)
for cat in self.categories:
self.wordcount[cat] = defaultdict(int)
self.catcount[cat] = 0
for d in data:
cat, doc = d[0], d[1:]
self.catcount[cat] += 1
for word in doc:
self.vocabularies.add(word)
self.wordcount[cat][word] += 1
for cat in self.categories:
self.denom[cat] = sum(self.wordcount[cat].values()) + len(self.vocabularies)
def wordProb(self, word, cat):
""" Compute P(word|cat) with Laplace smoothing.
"""
return float(self.wordcount[cat][word] + 1) / self.denom[cat]
def docProb(self, doc, cat):
""" Compute log P(cat|doc) = log P(cat) + sum_i log P(word_i|cat)
"""
total = sum(self.catcount.values()) # number of docs in training data
score = math.log(float(self.catcount[cat])/total) # log P(cat)
for word in doc:
score += math.log(self.wordProb(word, cat)) # + sum_i log P(word_i|cat)
return score
def classify(self, doc):
""" Classify doc by argmax_cat log P(cat|doc).
"""
best = None
maxP = -sys.maxint
for cat in self.categories:
p = self.docProb(doc, cat)
if p > maxP:
maxP = p
best = cat
return best
if __name__ == '__main__':
pass | ntduong/ML | Misc/naivebayes.py | Python | mit | 2,081 |
# -*- coding: utf-8 -*-
import os
import shutil
from jinja2 import Environment, FileSystemLoader
from webassets import Environment as AssetsEnvironment
from webassets.ext.jinja2 import AssetsExtension
from webassets.loaders import YAMLLoader
class TemplateBuilder(object):
def __init__(self, path, output,
static_path='static', static_url='static',
asset_config='config.yml'):
self.path = path
self.output = output
self.output_path = os.path.join(path, output)
self.env = Environment(loader=FileSystemLoader(path),
extensions=[AssetsExtension])
try:
config_path = os.path.join(self.path, asset_config)
asset_config = YAMLLoader(config_path)
self.assets_env = asset_config.load_environment()
except IOError:
self.assets_env = AssetsEnvironment()
if 'directory' not in self.assets_env.config:
self.assets_env.directory = self.output_path
if 'url' not in self.assets_env.config:
self.assets_env.url = static_url
self.assets_env.load_path = [self.path]
self.env.assets_environment = self.assets_env
def build_template(self, template, context={}):
tmpl = self.env.get_template(template)
dump_path = os.path.join(self.output_path, template)
tmpl.stream().dump(dump_path)
def list_files(self):
templates, other = set(), set()
if getattr(self.assets_env, '_named_bundles', None):
bundles = [fp for name, bundle in self.assets_env._named_bundles.iteritems()
for fp in bundle.contents]
else:
bundles = []
for dirpath, dirnames, filenames in os.walk(self.path):
for filename in filenames:
filepath = os.path.join(dirpath, filename) \
[len(self.path):].strip(os.path.sep).replace(os.path.sep, '/')
if filepath[:2] == './':
filepath = filepath[2:]
if self.output in filepath or filepath in bundles:
continue
elif '.html' in filepath:
templates.add(filepath)
else:
other.add(filepath)
return sorted(templates), sorted(bundles), sorted(other)
class SiteBuilder(object):
def __init__(self, path, output='public', tmpl_builder_class=TemplateBuilder, **kwargs):
self.path = path
self.output_path = os.path.join(path, output)
self.tmpl_builder = tmpl_builder_class(self.path, output, **kwargs)
def build(self):
if not os.path.exists(self.output_path):
os.mkdir(self.output_path)
templates, bundles, others = self.tmpl_builder.list_files()
for template in templates:
# XXX: for now we are not handling contexts
self.tmpl_builder.build_template(template)
for other in others:
dirname = os.path.join(self.output_path, os.path.dirname(other))
if not os.path.exists(dirname):
os.makedirs(dirname)
shutil.copyfile(os.path.join(self.path, other), os.path.join(self.output_path, other))
| regadas/presstatic | presstatic/builder.py | Python | mit | 3,269 |
# -*- coding: iso-8859-1 -*-
##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""
Package wrapper for Page Templates
This wrapper allows the Page Template modules to be segregated in a
separate package.
"""
# Placeholder for Zope Product data
misc_ = {}
def initialize (context):
# Import lazily, and defer initialization to the module
import ZopePageTemplate
ZopePageTemplate.initialize(context)
| HomeRad/TorCleaner | wc/webgui/PageTemplates/__init__.py | Python | gpl-2.0 | 997 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
class Analysis(object):
"""Parsed representation of an analysis for some JVM language.
An analysis provides information on the src -> class product mappings
and on the src -> {src|class|jar} file dependency mappings.
"""
@classmethod
def merge(cls, analyses):
"""Merge multiple analysis instances into one."""
raise NotImplementedError()
def split(self, splits, catchall=False):
"""Split the analysis according to splits, which is a list of K iterables of source files.
If catchall is False, returns a list of K ZincAnalysis objects, one for each of the splits, in order.
If catchall is True, returns K+1 ZincAnalysis objects, the last one containing the analysis for any
remainder sources not mentioned in the K splits.
"""
raise NotImplementedError()
def write_to_path(self, outfile_path, rebasings=None):
with open(outfile_path, 'w') as outfile:
self.write(outfile, rebasings)
def write(self, outfile, rebasings=None):
"""Write this Analysis to outfile.
rebasings: A list of path prefix pairs [from_prefix, to_prefix] to rewrite.
to_prefix may be None, in which case matching paths are removed entirely.
"""
raise NotImplementedError()
| tejal29/pants | src/python/pants/backend/jvm/tasks/jvm_compile/analysis.py | Python | apache-2.0 | 1,537 |
#!/usr/bin/env python
#!/usr/bin/env python3
# download_ipc
# Written in 2013 by Chris Pavlina
# CC0 1.0 Universal
# This script downloads the IPC libraries from FreePCB and converts them to
# KiCad format using freepcb2kicad.
import zipfile
import tempfile
import shutil
import imp
import os
import shutil
VERSION = "1.0"
FREEPCB2KICAD_ARGS = ["--blurb", "--rounded-pads", "--strip-lmn"]
# Py2/3 imports
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
try:
from io import BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
try:
raw_input
except NameError:
raw_input = input
try:
bytes
except NameError:
bytes = str
# Confirm the FreePCB license
CONFIRMLICENSE_MSG="""\
IPC libraries must be downloaded from FreePCB (www.freepcb.com).
They are covered by the GNU General Public License, version 2 or
later."""
class LicenseException (Exception):
pass
class ConfirmLicense (object):
def __init__ (self):
self.already_confirmed = False
def __call__ (self):
if self.already_confirmed:
return
print (CONFIRMLICENSE_MSG)
acc = raw_input ("Do you accept the license? (y/n) ")
if acc.lower () not in ("y", "yes"):
raise LicenseException ("License not accepted.")
self.already_confirmed = True
confirm_license = ConfirmLicense ()
def main ():
# Get args
from argparse import ArgumentParser
description = "Download FreePCB IPC libraries and convert to KiCad " + \
"format."
p = ArgumentParser (description=description)
p.add_argument ("-v", "--version", action="version",
version="%(prog)s " + VERSION)
p.add_argument ("src", metavar="SRC", type=str,
help="URL or path to IPC FreePCB zipfile")
p.add_argument ("dest", metavar="DEST", type=str,
help="Path to KiCad output")
p.add_argument ("fp2kicad", metavar="FP2KICAD", type=str,
help="Path to freepcb2kicad.py")
p.add_argument ("--no-confirm-license", dest="no_confirm_license",
action="store_const", const=True, default=False,
help="Do not ask the user to accept the GPL")
p.add_argument ("--3dmap", dest="threedmap", type=str,
help="Module-3D model map. See freepcb2kicad.py documentation.")
p.add_argument ("--rounded-pads", dest="roundedpads", action="store_const", const="all", default=None)
p.add_argument ("--rounded-except-1", dest="roundedpads", action="store_const", const="allbut1", default=None)
p.add_argument ("--rounded-pad-exceptions", dest="rpexcept", type=str,
help="Rounded pad exception file. See freepcb2kicad.py for " + \
"documentation.")
p.add_argument ("--rounded-center-exceptions", dest="rcexcept", type=str,
help="Rounded center pad exception file. See freepcb2kicad.py for " + \
"documentation.")
p.add_argument ("--add-courtyard", dest="courtyard", type=str,
default=None,
help="Add a courtyard a fixed number of mm outside the bounding box")
p.add_argument ("--hash-time", dest="hashtime", action="store_const",
const=True, default=False,
help="Set a fake edit time on the footprints using a hash")
args = p.parse_args ()
if args.threedmap is not None:
FREEPCB2KICAD_ARGS.extend (["--3dmap", args.threedmap])
if args.rpexcept is not None:
FREEPCB2KICAD_ARGS.extend (["--rounded-pad-exceptions", args.rpexcept])
if args.rcexcept is not None:
FREEPCB2KICAD_ARGS.extend (["--rounded-center-exceptions", args.rcexcept])
if args.courtyard is not None:
FREEPCB2KICAD_ARGS.extend (["--add-courtyard", args.courtyard])
if args.roundedpads == "all":
FREEPCB2KICAD_ARGS.append ("--rounded-pads")
elif args.roundedpads == "allbut1":
FREEPCB2KICAD_ARGS.append ("--rounded-except-1")
if args.hashtime:
FREEPCB2KICAD_ARGS.append ("--hash-time")
# Download, if necessary, then open file
if args.src.startswith ("http:/"):
if not args.no_confirm_license:
confirm_license ()
url = urlopen (args.src)
print ("Downloading FreePCB library...")
try:
data = url.read ()
except Exception as e:
url.close ()
raise
else:
url.close ()
ipc_f = BytesIO (data) # data is bytes in Py3
else:
ipc_f = open (args.src, 'rb')
ipc_zip = zipfile.ZipFile (ipc_f)
# Create a temporary working directory, and extract the IPC files
# into it.
tempdir = tempfile.mkdtemp ()
# Wrap the rest of the code in an exception catcher so we can clean up
# the files.
try:
main_2 (args, tempdir, ipc_zip)
except:
try:
ipc_f.close ()
except Exception as e:
print (e)
try:
shutil.rmtree (tempdir)
except Exception as e:
print (e)
raise
else:
exceptions = []
try:
ipc_f.close ()
except Exception as e:
exceptions.append (e)
try:
shutil.rmtree (tempdir)
except Exception as e:
exceptions.append (e)
for exc in exceptions:
print (exc)
if exceptions:
raise Exception ("Errors occurred.")
def main_2 (args, tempdir, zipfile):
# If there is an exception, it will be caught and all working files will
# be cleaned up.
# Load freepcb2kicad
freepcb2kicad = imp.load_source ("freepcb2kicad", args.fp2kicad)
# Generate KiCad files
fpargs = FREEPCB2KICAD_ARGS + [args.dest]
freepcb2kicad.main (fpargs, zipfile=zipfile)
if __name__ == "__main__":
main ()
| iromero91/kicad-pcblib | download_ipc.py | Python | cc0-1.0 | 5,859 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MetaGraph and related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os.path
import re
import six
from google.protobuf.any_pb2 import Any
from google.protobuf import text_format
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import importer
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import versions
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
# Prefix to be added to unbound input names so they are easily identifiable.
_UNBOUND_INPUT_PREFIX = "$unbound_inputs_"
# List of collections that didn't register proto functions, as a result in
# a previously exported meta_graph the items are of a different data type.
_COMPAT_COLLECTION_LIST = [ops.GraphKeys.LOCAL_VARIABLES,
ops.GraphKeys.MODEL_VARIABLES]
def _node_def(from_node_def, export_scope, unbound_inputs, clear_devices=False):
"""Create a `NodeDef` proto with export_scope stripped.
Args:
from_node_def: A `node_def_pb2.NodeDef` protocol buffer.
export_scope: A `string` representing the name scope to remove.
unbound_inputs: An array of unbound input names if they exist.
clear_devices: Boolean which controls whether to clear device information
from node_def. Default false.
Returns:
A `node_def_pb2.NodeDef` protocol buffer.
"""
node_def = copy.deepcopy(from_node_def)
for i, v in enumerate(node_def.input):
if (export_scope and
not node_def.input[i].lstrip("^").startswith(export_scope)):
# Adds "$unbound_inputs_" prefix to the unbound name so they are easily
# identifiable.
node_def.input[i] = re.sub(r"([\^]|^)(.*)",
r"\1" + _UNBOUND_INPUT_PREFIX + r"\2",
compat.as_str(v))
unbound_inputs.append(node_def.input[i])
else:
node_def.input[i] = ops.strip_name_scope(v, export_scope)
node_def.name = compat.as_bytes(
ops.strip_name_scope(from_node_def.name, export_scope))
for k, v in six.iteritems(from_node_def.attr):
if k == "_class":
new_s = [compat.as_bytes(
ops.strip_name_scope(s, export_scope)) for s in v.list.s
if not export_scope or
compat.as_str(s).split("@")[1].startswith(export_scope)]
node_def.attr[k].CopyFrom(attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=new_s)))
elif node_def.op in ("Enter", "RefEnter") and k == "frame_name":
if not export_scope or compat.as_str(v.s).startswith(export_scope):
new_s = compat.as_bytes(ops.strip_name_scope(v.s, export_scope))
node_def.attr[k].CopyFrom(attr_value_pb2.AttrValue(s=new_s))
else:
node_def.attr[k].CopyFrom(v)
if clear_devices:
node_def.device = ""
return node_def
def _read_file(filename):
"""Reads a file containing `GraphDef` and returns the protocol buffer.
Args:
filename: `graph_def` filename including the path.
Returns:
A `GraphDef` protocol buffer.
Raises:
IOError: If the file doesn't exist, or cannot be successfully parsed.
"""
graph_def = graph_pb2.GraphDef()
if not file_io.file_exists(filename):
raise IOError("File %s does not exist." % filename)
# First try to read it as a binary file.
file_content = file_io.FileIO(filename, "rb").read()
try:
graph_def.ParseFromString(file_content)
return graph_def
except Exception: # pylint: disable=broad-except
pass
# Next try to read it as a text file.
try:
text_format.Merge(file_content, graph_def)
except text_format.ParseError as e:
raise IOError("Cannot parse file %s: %s." % (filename, str(e)))
return graph_def
def ops_used_by_graph_def(graph_def):
"""Collect the list of ops used by a graph.
Does not validate that the ops are all registered.
Args:
graph_def: A `GraphDef` proto, as from `graph.as_graph_def()`.
Returns:
A list of strings, each naming an op used by the graph.
"""
# Map function names to definitions
name_to_function = {}
for fun in graph_def.library.function:
name_to_function[fun.signature.name] = fun
# Collect the list of op names. Since functions can reference functions, we
# need a recursive traversal.
used_ops = set() # Includes both primitive ops and functions
functions_to_process = [] # A subset of used_ops
def mark_op_as_used(op):
if op not in used_ops and op in name_to_function:
functions_to_process.append(name_to_function[op])
used_ops.add(op)
for node in graph_def.node:
mark_op_as_used(node.op)
while functions_to_process:
fun = functions_to_process.pop()
for node in fun.node_def:
mark_op_as_used(node.op)
return [op for op in used_ops if op not in name_to_function]
def stripped_op_list_for_graph(graph_def):
"""Collect the stripped OpDefs for ops used by a graph.
This function computes the `stripped_op_list` field of `MetaGraphDef` and
similar protos. The result can be communicated from the producer to the
consumer, which can then use the C++ function
`RemoveNewDefaultAttrsFromGraphDef` to improve forwards compatibility.
Args:
graph_def: A `GraphDef` proto, as from `graph.as_graph_def()`.
Returns:
An `OpList` of ops used by the graph.
Raises:
ValueError: If an unregistered op is used.
"""
# This is the Python equivalent of StrippedOpListForGraph in C++.
# Unfortunately, since the Python op registry can differ from that in C++, we
# can't remove the duplication using swig (at least naively).
# TODO(irving): Support taking graphs directly.
used_ops = ops_used_by_graph_def(graph_def)
# Verify that all used ops are registered.
registered_ops = op_def_registry.get_registered_ops()
# These internal ops used by functions are not registered, so we need to
# whitelist them. # TODO(irving): Do something better here.
op_whitelist = ("_Arg", "_Retval", "_ListToArray", "_ArrayToList")
for op in used_ops:
if op not in registered_ops and op not in op_whitelist:
raise ValueError("Op %s is used by the graph, but is not registered" % op)
# Build the stripped op list in sorted order
return op_def_pb2.OpList(op=[registered_ops[op] for op in sorted(used_ops)
if op in registered_ops])
def _get_kind_name(item):
"""Returns the kind name in CollectionDef.
Args:
item: A data item.
Returns:
The string representation of the kind in CollectionDef.
"""
if isinstance(item, (six.string_types, six.binary_type)):
kind = "bytes_list"
elif isinstance(item, six.integer_types):
kind = "int64_list"
elif isinstance(item, float):
kind = "float_list"
elif isinstance(item, Any):
kind = "any_list"
else:
kind = "node_list"
return kind
SAVE_AND_RESTORE_OPS = ["SaveV2",
"Save", "SaveSlice",
"LegacySave", "LegacySaveSlice",
"RestoreV2",
"Restore", "RestoreSlice",
"LegacyRestore", "LegacyRestoreSlice"]
def _op_name(tensor_name):
"""Extract the Op name from a Tensor name.
The Op name is everything before a colon, if present,
not including any ^ prefix denoting a control dependency.
Args:
tensor_name: the full name of a Tensor in the graph.
Returns:
The name of the Op of which the given Tensor is an output.
Raises:
ValueError: if tensor_name is None or empty.
"""
if not tensor_name:
raise ValueError("Tensor name cannot be empty or None.")
# Control dependency inputs start with ^.
if tensor_name.startswith("^"):
tensor_name = tensor_name[1:]
if ":" in tensor_name:
op_name, _ = tensor_name.split(":")
return op_name
return tensor_name
def _get_scope(node_name):
"""Extract the scope name from a node name.
The scope name is everything before the final slash,
not including any ^ prefix denoting a control dependency.
Args:
node_name: the full name of an Op or a Tensor in the graph.
Returns:
The deepest named scope containing the node.
Raises:
ValueError: if tensor_name is None or empty
"""
if not node_name:
raise ValueError("Node name cannot be empty or None.")
# Control dependency inputs start with ^.
if node_name.startswith("^"):
node_name = node_name[1:]
if "/" in node_name:
scope, _ = node_name.rsplit("/", 1)
return scope
return ""
def _find_extraneous_saver_nodes(graph_def, saver_def):
"""Identifies any nodes in the graph_def related to unused Savers.
This approach assumes that each Saver is cleanly isolated in its own name
scope, so we need only identify the scopes associated with extraneous Savers
and return all the nodes in those scopes.
Args:
graph_def: a GraphDef proto to evaluate.
saver_def: a SaverDef proto referencing Save/Restore ops to be retained.
Returns:
An iterable of node names that may be safely omitted.
"""
# TODO(soergel): confirm that the assumption of scope isolation is valid.
# If not, we need to walk up the graph from any restore_all nodes, and walk
# down the graph from any Save/Restore nodes. I drafted that approach too,
# but it seems unnecessarily complex given the name scope solution.
# load the graph DAG in minimal form, without initializing a full Graph object
nodes = {node_def.name:
(set([_op_name(x) for x in node_def.input]), node_def.op)
for node_def in graph_def.node}
retain_scope_save = None
retain_scope_restore = None
# It's possible to have no saver if the graph has no Variables
if saver_def is not None:
save_op_name = _op_name(saver_def.save_tensor_name)
restore_op_name = _op_name(saver_def.restore_op_name)
# The save and restore scopes should always be the same, but if they differ
# for some reason, we retain them both to be safe.
retain_scope_restore = _get_scope(restore_op_name) + "/"
retain_scope_save = _get_scope(save_op_name) + "/"
all_saver_node_names = set([name for name, (_, op) in nodes.items()
if op in SAVE_AND_RESTORE_OPS])
all_saver_scopes = (set([_get_scope(x) for x in all_saver_node_names])
- all_saver_node_names)
all_saver_scopes = set([x + "/" for x in all_saver_scopes])
extraneous_scopes = all_saver_scopes - set([retain_scope_save,
retain_scope_restore])
extraneous_node_names = set()
for name, _ in nodes.items():
for extraneous_scope in extraneous_scopes:
if name.startswith(extraneous_scope):
extraneous_node_names.add(name)
break
return extraneous_node_names
def _should_include_node(node_or_node_name, export_scope, exclude_nodes):
"""Returns `True` if a node should be included.
Args:
node_or_node_name: A node or `string` node name.
export_scope: `string`. Name scope under which to extract the subgraph. The
scope name will be stripped from the node definitions for easy import
later into new name scopes.
exclude_nodes: An iterable of nodes or `string` node names to omit from the
export, or None. Note no sanity-checking is done, so this list must be
carefully constructed to avoid producing an invalid graph.
Returns:
`True` if the node should be included.
"""
if not isinstance(node_or_node_name, six.string_types):
try:
node_name = node_or_node_name.name
except AttributeError:
# Keep the object that we don't know how to process.
return True
else:
node_name = node_or_node_name
if exclude_nodes and (node_or_node_name in exclude_nodes
or node_name in exclude_nodes):
return False
return (node_name.startswith(_UNBOUND_INPUT_PREFIX) or
(not export_scope or node_name.startswith(export_scope)))
def add_collection_def(meta_graph_def, key, graph=None,
export_scope=None, exclude_nodes=None,
override_contents=None):
"""Adds a collection to MetaGraphDef protocol buffer.
Args:
meta_graph_def: MetaGraphDef protocol buffer.
key: One of the GraphKeys or user-defined string.
graph: The `Graph` from which to get collections.
export_scope: Optional `string`. Name scope to remove.
exclude_nodes: An iterable of nodes or `string` node names to omit from the
collection, or None.
override_contents: An iterable of values to place in the collection,
ignoring the current values (if set).
"""
if graph and not isinstance(graph, ops.Graph):
raise TypeError("graph must be of type Graph, not %s", type(graph))
if not isinstance(key, six.string_types) and not isinstance(key, bytes):
logging.warning("Only collections with string type keys will be "
"serialized. This key has %s", type(key))
return
# Sets graph to default graph if it's not passed in.
graph = graph or ops.get_default_graph()
if override_contents:
collection_list = override_contents
else:
collection_list = graph.get_collection(key)
# Remove nodes that should not be exported from the collection list.
collection_list = [x for x in collection_list if
_should_include_node(x, export_scope, exclude_nodes)]
if not collection_list:
return
try:
col_def = meta_graph_def.collection_def[key]
to_proto = ops.get_to_proto_function(key)
proto_type = ops.get_collection_proto_type(key)
if to_proto:
kind = "bytes_list"
for x in collection_list:
# Additional type check to make sure the returned proto is indeed
# what we expect.
proto = to_proto(x, export_scope=export_scope)
if proto:
assert isinstance(proto, proto_type)
getattr(col_def, kind).value.append(proto.SerializeToString())
else:
kind = _get_kind_name(collection_list[0])
if kind == "node_list":
for x in collection_list:
if not export_scope or x.name.startswith(export_scope):
getattr(col_def, kind).value.append(
ops.strip_name_scope(x.name, export_scope))
elif kind == "bytes_list":
# NOTE(opensource): This force conversion is to work around the fact
# that Python3 distinguishes between bytes and strings.
getattr(col_def, kind).value.extend(
[compat.as_bytes(x) for x in collection_list])
else:
getattr(col_def, kind).value.extend([x for x in collection_list])
except Exception as e: # pylint: disable=broad-except
logging.warning("Error encountered when serializing %s.\n"
"Type is unsupported, or the types of the items don't "
"match field type in CollectionDef.\n%s", key, str(e))
if key in meta_graph_def.collection_def:
del meta_graph_def.collection_def[key]
return
def _is_default_attr_value(op_def, attr_name, attr_value):
"""Checks if given attribute matches the default value in the op def."""
for attr_def in op_def.attr:
if attr_def.name == attr_name:
if not attr_def.HasField("default_value"):
return False
# pywrap_tensorflow.EqualAttrValueWrapper returns an empty string
# if both arguments represent an equivalent AttrValue instance.
return not pywrap_tensorflow.EqualAttrValueWrapper(
attr_value.SerializeToString(),
attr_def.default_value.SerializeToString())
return False
def _strip_graph_default_valued_attrs(meta_graph_def):
"""Strips default valued attributes for node defs in given MetaGraphDef.
This method also sets `meta_info_def.stripped_default_attrs` in the given
`MetaGraphDef` proto to True.
Args:
meta_graph_def: `MetaGraphDef` protocol buffer
Returns:
None.
"""
# Map function op names to their function definitions.
op_name_to_function = {}
for function_def in meta_graph_def.graph_def.library.function:
op_name_to_function[function_def.signature.name] = function_def
# Get all registered ops.
registered_ops = op_def_registry.get_registered_ops()
def _strip_node_default_valued_attrs(node_def):
"""Removes default valued attributes from a single node def."""
if node_def.op in op_name_to_function or node_def.op not in registered_ops:
return
op_def = registered_ops[node_def.op]
attrs_to_strip = set()
for attr_name, attr_value in node_def.attr.items():
if _is_default_attr_value(op_def, attr_name, attr_value):
attrs_to_strip.add(attr_name)
for attr in attrs_to_strip:
del node_def.attr[attr]
# Process all NodeDef instances in graph_def.
for node_def in meta_graph_def.graph_def.node:
_strip_node_default_valued_attrs(node_def)
# Process all NodeDef instances in graph_def.library.function.
for function_def in meta_graph_def.graph_def.library.function:
for function_node_def in function_def.node_def:
_strip_node_default_valued_attrs(function_node_def)
# Tell consumers of this graph that default valued attrs have been stripped.
meta_graph_def.meta_info_def.stripped_default_attrs = True
def create_meta_graph_def(meta_info_def=None,
graph_def=None,
saver_def=None,
collection_list=None,
graph=None,
export_scope=None,
exclude_nodes=None,
clear_extraneous_savers=False,
strip_default_attrs=False):
# pylint: disable=line-too-long
"""Construct and returns a `MetaGraphDef` protocol buffer.
Args:
meta_info_def: `MetaInfoDef` protocol buffer.
graph_def: `GraphDef` protocol buffer.
saver_def: `SaverDef` protocol buffer.
collection_list: List of string keys to collect.
graph: The `Graph` to create `MetaGraphDef` out of.
export_scope: Optional `string`. Name scope to remove.
exclude_nodes: An iterable of nodes or `string` node names to omit from all
collection, or None.
clear_extraneous_savers: Remove any preexisting SaverDefs from the SAVERS
collection. Note this method does not alter the graph, so any
extraneous Save/Restore ops should have been removed already, as needed.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
Returns:
MetaGraphDef protocol buffer.
Raises:
TypeError: If the arguments are not of the correct proto buffer type.
"""
# pylint: enable=line-too-long
# Type check.
if graph and not isinstance(graph, ops.Graph):
raise TypeError("graph must be of type Graph, not %s", type(graph))
if meta_info_def and not isinstance(meta_info_def,
meta_graph_pb2.MetaGraphDef.MetaInfoDef):
raise TypeError("meta_info_def must be of type MetaInfoDef, not %s",
type(meta_info_def))
if graph_def and not isinstance(graph_def, graph_pb2.GraphDef):
raise TypeError("graph_def must be of type GraphDef, not %s",
type(graph_def))
if saver_def and not isinstance(saver_def, saver_pb2.SaverDef):
raise TypeError("saver_def must be of type SaverDef, not %s",
type(saver_def))
# Sets graph to default graph if it's not passed in.
graph = graph or ops.get_default_graph()
# Creates a MetaGraphDef proto.
meta_graph_def = meta_graph_pb2.MetaGraphDef()
# Adds meta_info_def.
if not meta_info_def:
meta_info_def = meta_graph_pb2.MetaGraphDef.MetaInfoDef()
# Set the tf version strings to the current tf build.
meta_info_def.tensorflow_version = versions.__version__
meta_info_def.tensorflow_git_version = versions.__git_version__
meta_graph_def.meta_info_def.MergeFrom(meta_info_def)
# Adds graph_def or the default.
if not graph_def:
meta_graph_def.graph_def.MergeFrom(graph.as_graph_def(add_shapes=True))
else:
meta_graph_def.graph_def.MergeFrom(graph_def)
# Fills in meta_info_def.stripped_op_list using the ops from graph_def.
# pylint: disable=g-explicit-length-test
if len(meta_graph_def.meta_info_def.stripped_op_list.op) == 0:
meta_graph_def.meta_info_def.stripped_op_list.MergeFrom(
stripped_op_list_for_graph(meta_graph_def.graph_def))
# pylint: enable=g-explicit-length-test
# Strip default valued attributes in graph_def.
if strip_default_attrs:
_strip_graph_default_valued_attrs(meta_graph_def)
# Adds saver_def.
if saver_def:
meta_graph_def.saver_def.MergeFrom(saver_def)
# Adds collection_list.
if collection_list is not None:
clist = collection_list
else:
clist = graph.get_all_collection_keys()
for ctype in clist:
if clear_extraneous_savers and ctype == ops.GraphKeys.SAVERS:
# Avoid importing Saver here
from_proto = ops.get_from_proto_function(ctype)
add_collection_def(meta_graph_def, ctype,
graph=graph,
export_scope=export_scope,
exclude_nodes=exclude_nodes,
override_contents=[from_proto(saver_def)])
else:
add_collection_def(meta_graph_def, ctype,
graph=graph,
export_scope=export_scope,
exclude_nodes=exclude_nodes)
return meta_graph_def
def read_meta_graph_file(filename):
"""Reads a file containing `MetaGraphDef` and returns the protocol buffer.
Args:
filename: `meta_graph_def` filename including the path.
Returns:
A `MetaGraphDef` protocol buffer.
Raises:
IOError: If the file doesn't exist, or cannot be successfully parsed.
"""
meta_graph_def = meta_graph_pb2.MetaGraphDef()
if not file_io.file_exists(filename):
raise IOError("File %s does not exist." % filename)
# First try to read it as a binary file.
file_content = file_io.FileIO(filename, "rb").read()
try:
meta_graph_def.ParseFromString(file_content)
return meta_graph_def
except Exception: # pylint: disable=broad-except
pass
# Next try to read it as a text file.
try:
text_format.Merge(file_content.decode("utf-8"), meta_graph_def)
except text_format.ParseError as e:
raise IOError("Cannot parse file %s: %s." % (filename, str(e)))
return meta_graph_def
def import_scoped_meta_graph(meta_graph_or_file,
clear_devices=False,
graph=None,
import_scope=None,
input_map=None,
unbound_inputs_col_name="unbound_inputs",
restore_collections_predicate=(lambda key: True)):
"""Recreates a `Graph` saved in a `MetaGraphDef` proto.
This function takes a `MetaGraphDef` protocol buffer as input. If
the argument is a file containing a `MetaGraphDef` protocol buffer ,
it constructs a protocol buffer from the file content. The function
then adds all the nodes from the `graph_def` field to the
current graph, recreates the desired collections, and returns a dictionary of
all the Variables imported into the name scope.
In combination with `export_scoped_meta_graph()`, this function can be used to
* Serialize a graph along with other Python objects such as `QueueRunner`,
`Variable` into a `MetaGraphDef`.
* Restart training from a saved graph and checkpoints.
* Run inference from a saved graph and checkpoints.
Args:
meta_graph_or_file: `MetaGraphDef` protocol buffer or filename (including
the path) containing a `MetaGraphDef`.
clear_devices: Boolean which controls whether to clear device information
from graph_def. Default false.
graph: The `Graph` to import into. If `None`, use the default graph.
import_scope: Optional `string`. Name scope into which to import the
subgraph. If `None`, the graph is imported to the root name scope.
input_map: A dictionary mapping input names (as strings) in `graph_def` to
`Tensor` objects. The values of the named input tensors in the imported
graph will be re-mapped to the respective `Tensor` values.
unbound_inputs_col_name: Collection name for looking up unbound inputs.
restore_collections_predicate: a predicate on collection names. A collection
named c (i.e whose key is c) will be restored iff
1) `restore_collections_predicate(c)` is True, and
2) `c != unbound_inputs_col_name`.
Returns:
A dictionary of all the `Variables` imported into the name scope.
Raises:
ValueError: If the graph_def contains unbound inputs.
"""
if context.executing_eagerly():
raise ValueError("Exporting/importing meta graphs is not supported when "
"eager execution is enabled.")
if isinstance(meta_graph_or_file, meta_graph_pb2.MetaGraphDef):
meta_graph_def = meta_graph_or_file
else:
meta_graph_def = read_meta_graph_file(meta_graph_or_file)
if unbound_inputs_col_name:
for key, col_def in meta_graph_def.collection_def.items():
if key == unbound_inputs_col_name:
kind = col_def.WhichOneof("kind")
field = getattr(col_def, kind)
if field.value and (
not input_map or
sorted([compat.as_str(v) for v in field.value]) !=
sorted(input_map)):
raise ValueError("Graph contains unbound inputs: %s. Must "
"provide these inputs through input_map." %
",".join([compat.as_str(v) for v in field.value
if not input_map or v not in input_map]))
break
# Sets graph to default graph if it's not passed in.
graph = graph or ops.get_default_graph()
# Gathers the list of nodes we are interested in.
with graph.as_default():
producer_op_list = None
if meta_graph_def.meta_info_def.HasField("stripped_op_list"):
producer_op_list = meta_graph_def.meta_info_def.stripped_op_list
input_graph_def = meta_graph_def.graph_def
# Remove all the explicit device specifications for this node. This helps to
# make the graph more portable.
if clear_devices:
for node in input_graph_def.node:
node.device = ""
scope_to_prepend_to_names = graph.unique_name(
import_scope or "", mark_as_used=False)
importer.import_graph_def(
input_graph_def, name=(import_scope or ""), input_map=input_map,
producer_op_list=producer_op_list)
# Restores all the other collections.
variable_objects = {}
for key, col_def in sorted(meta_graph_def.collection_def.items()):
# Don't add unbound_inputs to the new graph.
if key == unbound_inputs_col_name:
continue
if not restore_collections_predicate(key):
continue
kind = col_def.WhichOneof("kind")
if kind is None:
logging.error("Cannot identify data type for collection %s. Skipping.",
key)
continue
from_proto = ops.get_from_proto_function(key)
if from_proto and kind == "bytes_list":
proto_type = ops.get_collection_proto_type(key)
if key in ops.GraphKeys._VARIABLE_COLLECTIONS: # pylint: disable=protected-access
for value in col_def.bytes_list.value:
variable = variable_objects.get(value, None)
if variable is None:
proto = proto_type()
proto.ParseFromString(value)
variable = from_proto(
proto, import_scope=scope_to_prepend_to_names)
variable_objects[value] = variable
graph.add_to_collection(key, variable)
else:
for value in col_def.bytes_list.value:
proto = proto_type()
proto.ParseFromString(value)
graph.add_to_collection(
key, from_proto(
proto, import_scope=scope_to_prepend_to_names))
else:
field = getattr(col_def, kind)
if key in _COMPAT_COLLECTION_LIST:
logging.warning(
"The saved meta_graph is possibly from an older release:\n"
"'%s' collection should be of type 'byte_list', but instead "
"is of type '%s'.", key, kind)
if kind == "node_list":
for value in field.value:
col_op = graph.as_graph_element(
ops.prepend_name_scope(value, scope_to_prepend_to_names))
graph.add_to_collection(key, col_op)
elif kind == "int64_list":
# NOTE(opensource): This force conversion is to work around the fact
# that Python2 distinguishes between int and long, while Python3 has
# only int.
for value in field.value:
graph.add_to_collection(key, int(value))
else:
for value in field.value:
graph.add_to_collection(
key, ops.prepend_name_scope(value, scope_to_prepend_to_names))
var_list = {}
variables = graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
scope=scope_to_prepend_to_names)
for v in variables:
var_list[ops.strip_name_scope(v.name, scope_to_prepend_to_names)] = v
return var_list
def export_scoped_meta_graph(filename=None,
graph_def=None,
graph=None,
export_scope=None,
as_text=False,
unbound_inputs_col_name="unbound_inputs",
clear_devices=False,
saver_def=None,
clear_extraneous_savers=False,
strip_default_attrs=False,
**kwargs):
"""Returns `MetaGraphDef` proto. Optionally writes it to filename.
This function exports the graph, saver, and collection objects into
`MetaGraphDef` protocol buffer with the intention of it being imported
at a later time or location to restart training, run inference, or be
a subgraph.
Args:
filename: Optional filename including the path for writing the
generated `MetaGraphDef` protocol buffer.
graph_def: `GraphDef` protocol buffer.
graph: The `Graph` to export. If `None`, use the default graph.
export_scope: Optional `string`. Name scope under which to extract
the subgraph. The scope name will be stripped from the node definitions
for easy import later into new name scopes. If `None`, the whole graph
is exported.
as_text: If `True`, writes the `MetaGraphDef` as an ASCII proto.
unbound_inputs_col_name: Optional `string`. If provided, a string collection
with the given name will be added to the returned `MetaGraphDef`,
containing the names of tensors that must be remapped when importing the
`MetaGraphDef`.
clear_devices: Boolean which controls whether to clear device information
before exporting the graph.
saver_def: `SaverDef` protocol buffer.
clear_extraneous_savers: Remove any Saver-related information from the
graph (both Save/Restore ops and SaverDefs) that are not associated
with the provided SaverDef.
strip_default_attrs: Set to true if default valued attributes must be
removed while exporting the GraphDef.
**kwargs: Optional keyed arguments, including meta_info_def and
collection_list.
Returns:
A `MetaGraphDef` proto and dictionary of `Variables` in the exported
name scope.
Raises:
ValueError: When the `GraphDef` is larger than 2GB.
"""
if context.executing_eagerly():
raise ValueError("Exporting/importing meta graphs is not supported when "
"Eager Execution is enabled.")
graph = graph or ops.get_default_graph()
exclude_nodes = None
unbound_inputs = []
if export_scope or clear_extraneous_savers or clear_devices:
if graph_def:
new_graph_def = graph_pb2.GraphDef()
new_graph_def.versions.CopyFrom(graph_def.versions)
new_graph_def.library.CopyFrom(graph_def.library)
if clear_extraneous_savers:
exclude_nodes = _find_extraneous_saver_nodes(graph_def, saver_def)
for node_def in graph_def.node:
if _should_include_node(node_def.name, export_scope, exclude_nodes):
new_node_def = _node_def(node_def, export_scope, unbound_inputs,
clear_devices=clear_devices)
new_graph_def.node.extend([new_node_def])
graph_def = new_graph_def
else:
# Only do this complicated work if we want to remove a name scope.
graph_def = graph_pb2.GraphDef()
# pylint: disable=protected-access
graph_def.versions.CopyFrom(graph.graph_def_versions)
bytesize = 0
if clear_extraneous_savers:
exclude_nodes = _find_extraneous_saver_nodes(graph.as_graph_def(),
saver_def)
for key in sorted(graph._nodes_by_id):
if _should_include_node(graph._nodes_by_id[key].name,
export_scope,
exclude_nodes):
value = graph._nodes_by_id[key]
# pylint: enable=protected-access
node_def = _node_def(value.node_def, export_scope, unbound_inputs,
clear_devices=clear_devices)
graph_def.node.extend([node_def])
if value.outputs:
assert "_output_shapes" not in graph_def.node[-1].attr
graph_def.node[-1].attr["_output_shapes"].list.shape.extend([
output.get_shape().as_proto() for output in value.outputs])
bytesize += value.node_def.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
graph._copy_functions_to_graph_def(graph_def, bytesize) # pylint: disable=protected-access
# It's possible that not all the inputs are in the export_scope.
# If we would like such information included in the exported meta_graph,
# add them to a special unbound_inputs collection.
if unbound_inputs_col_name:
# Clears the unbound_inputs collections.
graph.clear_collection(unbound_inputs_col_name)
for k in unbound_inputs:
graph.add_to_collection(unbound_inputs_col_name, k)
var_list = {}
variables = graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
scope=export_scope)
for v in variables:
if _should_include_node(v, export_scope, exclude_nodes):
var_list[ops.strip_name_scope(v.name, export_scope)] = v
scoped_meta_graph_def = create_meta_graph_def(
graph_def=graph_def,
graph=graph,
export_scope=export_scope,
exclude_nodes=exclude_nodes,
clear_extraneous_savers=clear_extraneous_savers,
saver_def=saver_def,
strip_default_attrs=strip_default_attrs,
**kwargs)
if filename:
graph_io.write_graph(
scoped_meta_graph_def,
os.path.dirname(filename),
os.path.basename(filename),
as_text=as_text)
return scoped_meta_graph_def, var_list
def copy_scoped_meta_graph(from_scope, to_scope,
from_graph=None, to_graph=None):
"""Copies a sub-meta_graph from one scope to another.
Args:
from_scope: `String` name scope containing the subgraph to be copied.
to_scope: `String` name scope under which the copied subgraph will reside.
from_graph: Optional `Graph` from which to copy the subgraph. If `None`, the
default graph is use.
to_graph: Optional `Graph` to which to copy the subgraph. If `None`, the
default graph is used.
Returns:
A dictionary of `Variables` that has been copied into `to_scope`.
Raises:
ValueError: If `from_scope` and `to_scope` are the same while
`from_graph` and `to_graph` are also the same.
"""
from_graph = from_graph or ops.get_default_graph()
to_graph = to_graph or ops.get_default_graph()
if from_graph == to_graph and from_scope == to_scope:
raise ValueError("'from_scope' and 'to_scope' need to be different "
"when performing copy in the same graph.")
orig_meta_graph, var_list = export_scoped_meta_graph(
export_scope=from_scope, graph=from_graph)
var_list = import_scoped_meta_graph(orig_meta_graph,
graph=to_graph,
import_scope=to_scope)
return var_list
| Xeralux/tensorflow | tensorflow/python/framework/meta_graph.py | Python | apache-2.0 | 38,066 |
# Copyright 2013 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heleosapi import backend_operations as h_op
from heleosapi import constants as h_con
from heleosapi import exceptions as h_exc
from oslo.config import cfg
from sqlalchemy.orm import exc
from neutron.common import constants as l3_constants
from neutron.common import exceptions as neutron_exc
from neutron.db import extraroute_db
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.extensions import l3
from neutron.i18n import _LE
from neutron.openstack.common import log as logging
from neutron.plugins.embrane.agent import dispatcher
from neutron.plugins.embrane.common import config # noqa
from neutron.plugins.embrane.common import constants as p_con
from neutron.plugins.embrane.common import contexts as embrane_ctx
from neutron.plugins.embrane.common import operation
from neutron.plugins.embrane.common import utils
LOG = logging.getLogger(__name__)
conf = cfg.CONF.heleos
class EmbranePlugin(object):
"""Embrane Neutron plugin.
uses the heleos(c) platform and a support L2 plugin to leverage networking
in cloud environments.
"""
_l3super = extraroute_db.ExtraRoute_db_mixin
def __init__(self):
pass
def _run_embrane_config(self):
# read configurations
config_esm_mgmt = conf.esm_mgmt
config_admin_username = conf.admin_username
config_admin_password = conf.admin_password
config_router_image_id = conf.router_image
config_security_zones = {h_con.SzType.IB: conf.inband_id,
h_con.SzType.OOB: conf.oob_id,
h_con.SzType.MGMT: conf.mgmt_id,
h_con.SzType.DUMMY: conf.dummy_utif_id}
config_resource_pool = conf.resource_pool_id
self._embrane_async = conf.async_requests
self._esm_api = h_op.BackendOperations(
esm_mgmt=config_esm_mgmt,
admin_username=config_admin_username,
admin_password=config_admin_password,
router_image_id=config_router_image_id,
security_zones=config_security_zones,
resource_pool=config_resource_pool)
self._dispatcher = dispatcher.Dispatcher(self, self._embrane_async)
def _make_router_dict(self, *args, **kwargs):
return self._l3super._make_router_dict(self, *args, **kwargs)
def _delete_router(self, context, router_id):
self._l3super.delete_router(self, context, router_id)
def _update_db_router_state(self, context, neutron_router, dva_state):
if not dva_state:
new_state = p_con.Status.ERROR
elif dva_state == h_con.DvaState.POWER_ON:
new_state = p_con.Status.ACTIVE
else:
new_state = p_con.Status.READY
self._set_db_router_state(context, neutron_router, new_state)
return new_state
def _set_db_router_state(self, context, neutron_router, new_state):
return utils.set_db_item_state(context, neutron_router, new_state)
def _update_db_interfaces_state(self, context, neutron_router):
router_ports = self.get_ports(context,
{"device_id": [neutron_router["id"]]})
self._esm_api.update_ports_status(neutron_router["id"], router_ports)
for port in router_ports:
db_port = self._get_port(context, port["id"])
db_port["status"] = port["status"]
context.session.merge(db_port)
def _update_neutron_state(self, context, neutron_router, state):
try:
self._update_db_interfaces_state(context, neutron_router)
except Exception:
LOG.exception(_LE("Unhandled exception occurred"))
return self._set_db_router_state(context, neutron_router, state)
def _retrieve_prefix_from_port(self, context, neutron_port):
subnet_id = neutron_port["fixed_ips"][0]["subnet_id"]
subnet = utils.retrieve_subnet(context, subnet_id)
prefix = subnet["cidr"].split("/")[1]
return prefix
# L3 extension
def create_router(self, context, router):
r = router["router"]
self._get_tenant_id_for_create(context, r)
db_router = self._l3super.create_router(self, context, router)
neutron_router = self._get_router(context, db_router['id'])
gw_port = neutron_router.gw_port
# For now, only small flavor is used
utif_info = (self._plugin_support.retrieve_utif_info(context,
gw_port)
if gw_port else None)
ip_allocation_info = (utils.retrieve_ip_allocation_info(context,
gw_port)
if gw_port else None)
neutron_router = self._l3super._get_router(self, context,
neutron_router["id"])
neutron_router["status"] = p_con.Status.CREATING
self._dispatcher.dispatch_l3(
d_context=embrane_ctx.DispatcherContext(
p_con.Events.CREATE_ROUTER, neutron_router, context, None),
args=(h_con.Flavor.SMALL, utif_info, ip_allocation_info))
return self._make_router_dict(neutron_router)
def update_router(self, context, id, router):
db_router = self._l3super.update_router(self, context, id, router)
neutron_router = self._get_router(context, db_router['id'])
gw_port = neutron_router.gw_port
utif_info = (self._plugin_support.retrieve_utif_info(context,
gw_port)
if gw_port else None)
ip_allocation_info = (utils.retrieve_ip_allocation_info(context,
gw_port)
if gw_port else None)
routes_info = router["router"].get("routes")
neutron_router = self._l3super._get_router(self, context, id)
state_change = operation.Operation(
self._set_db_router_state,
args=(context, neutron_router, p_con.Status.UPDATING))
self._dispatcher.dispatch_l3(
d_context=embrane_ctx.DispatcherContext(
p_con.Events.UPDATE_ROUTER, neutron_router, context,
state_change),
args=(utif_info, ip_allocation_info, routes_info))
return self._make_router_dict(neutron_router)
def get_router(self, context, id, fields=None):
"""Ensures that id does exist in the ESM."""
neutron_router = self._get_router(context, id)
try:
if neutron_router["status"] != p_con.Status.CREATING:
self._esm_api.get_dva(id)
except h_exc.DvaNotFound:
LOG.error(_LE("The following routers have not physical match: %s"),
id)
self._set_db_router_state(context, neutron_router,
p_con.Status.ERROR)
LOG.debug("Requested router: %s", neutron_router)
return self._make_router_dict(neutron_router, fields)
def get_routers(self, context, filters=None, fields=None, sorts=None,
limit=None, marker=None, page_reverse=False):
"""Retrieves the router list defined by the incoming filters."""
router_query = self._apply_filters_to_query(
self._model_query(context, l3_db.Router),
l3_db.Router, filters)
id_list = [x["id"] for x in router_query
if x["status"] != p_con.Status.CREATING]
try:
self._esm_api.get_dvas(id_list)
except h_exc.DvaNotFound:
LOG.error(_LE("The following routers have not physical match: %s"),
repr(id_list))
error_routers = []
for id in id_list:
try:
error_routers.append(self._get_router(context, id))
except l3.RouterNotFound:
pass
for error_router in error_routers:
self._set_db_router_state(context, error_router,
p_con.Status.ERROR)
return [self._make_router_dict(router, fields)
for router in router_query]
def delete_router(self, context, id):
"""Deletes the DVA with the specific router id."""
# Copy of the parent validation code, shouldn't the base modules
# provide functions for validating operations?
device_owner_router_intf = l3_constants.DEVICE_OWNER_ROUTER_INTF
fips = self.get_floatingips_count(context.elevated(),
filters={"router_id": [id]})
if fips:
raise l3.RouterInUse(router_id=id)
device_filter = {"device_id": [id],
"device_owner": [device_owner_router_intf]}
ports = self.get_ports_count(context.elevated(),
filters=device_filter)
if ports:
raise l3.RouterInUse(router_id=id)
neutron_router = self._get_router(context, id)
state_change = operation.Operation(self._set_db_router_state,
args=(context, neutron_router,
p_con.Status.DELETING))
self._dispatcher.dispatch_l3(
d_context=embrane_ctx.DispatcherContext(
p_con.Events.DELETE_ROUTER, neutron_router, context,
state_change), args=())
LOG.debug("Deleting router=%s", neutron_router)
return neutron_router
def add_router_interface(self, context, router_id, interface_info):
"""Grows DVA interface in the specified subnet."""
neutron_router = self._get_router(context, router_id)
rport_qry = context.session.query(models_v2.Port)
ports = rport_qry.filter_by(
device_id=router_id).all()
if len(ports) >= p_con.UTIF_LIMIT:
raise neutron_exc.BadRequest(
resource=router_id,
msg=("this router doesn't support more than "
+ str(p_con.UTIF_LIMIT) + " interfaces"))
neutron_router_iface = self._l3super.add_router_interface(
self, context, router_id, interface_info)
port = self._get_port(context, neutron_router_iface["port_id"])
utif_info = self._plugin_support.retrieve_utif_info(context, port)
ip_allocation_info = utils.retrieve_ip_allocation_info(context,
port)
state_change = operation.Operation(self._set_db_router_state,
args=(context, neutron_router,
p_con.Status.UPDATING))
self._dispatcher.dispatch_l3(
d_context=embrane_ctx.DispatcherContext(
p_con.Events.GROW_ROUTER_IF, neutron_router, context,
state_change),
args=(utif_info, ip_allocation_info))
return neutron_router_iface
def remove_router_interface(self, context, router_id, interface_info):
port_id = None
if "port_id" in interface_info:
port_id = interface_info["port_id"]
elif "subnet_id" in interface_info:
subnet_id = interface_info["subnet_id"]
subnet = utils.retrieve_subnet(context, subnet_id)
rport_qry = context.session.query(models_v2.Port)
ports = rport_qry.filter_by(
device_id=router_id,
device_owner=l3_constants.DEVICE_OWNER_ROUTER_INTF,
network_id=subnet["network_id"])
for p in ports:
if p["fixed_ips"][0]["subnet_id"] == subnet_id:
port_id = p["id"]
break
neutron_router = self._get_router(context, router_id)
self._l3super.remove_router_interface(self, context, router_id,
interface_info)
state_change = operation.Operation(self._set_db_router_state,
args=(context, neutron_router,
p_con.Status.UPDATING))
self._dispatcher.dispatch_l3(
d_context=embrane_ctx.DispatcherContext(
p_con.Events.SHRINK_ROUTER_IF, neutron_router, context,
state_change),
args=(port_id,))
def create_floatingip(self, context, floatingip):
result = self._l3super.create_floatingip(
self, context, floatingip)
if result["port_id"]:
neutron_router = self._get_router(context, result["router_id"])
db_fixed_port = self._get_port(context, result["port_id"])
fixed_prefix = self._retrieve_prefix_from_port(context,
db_fixed_port)
db_floating_port = neutron_router["gw_port"]
floating_prefix = self._retrieve_prefix_from_port(
context, db_floating_port)
nat_info = utils.retrieve_nat_info(context, result,
fixed_prefix,
floating_prefix,
neutron_router)
state_change = operation.Operation(
self._set_db_router_state,
args=(context, neutron_router, p_con.Status.UPDATING))
self._dispatcher.dispatch_l3(
d_context=embrane_ctx.DispatcherContext(
p_con.Events.SET_NAT_RULE, neutron_router, context,
state_change),
args=(nat_info,))
return result
def update_floatingip(self, context, id, floatingip):
db_fip = self._l3super.get_floatingip(self, context, id)
result = self._l3super.update_floatingip(self, context, id,
floatingip)
if db_fip["port_id"] and db_fip["port_id"] != result["port_id"]:
neutron_router = self._get_router(context, db_fip["router_id"])
fip_id = db_fip["id"]
state_change = operation.Operation(
self._set_db_router_state,
args=(context, neutron_router, p_con.Status.UPDATING))
self._dispatcher.dispatch_l3(
d_context=embrane_ctx.DispatcherContext(
p_con.Events.RESET_NAT_RULE, neutron_router, context,
state_change),
args=(fip_id,))
if result["port_id"]:
neutron_router = self._get_router(context, result["router_id"])
db_fixed_port = self._get_port(context, result["port_id"])
fixed_prefix = self._retrieve_prefix_from_port(context,
db_fixed_port)
db_floating_port = neutron_router["gw_port"]
floating_prefix = self._retrieve_prefix_from_port(
context, db_floating_port)
nat_info = utils.retrieve_nat_info(context, result,
fixed_prefix,
floating_prefix,
neutron_router)
state_change = operation.Operation(
self._set_db_router_state,
args=(context, neutron_router, p_con.Status.UPDATING))
self._dispatcher.dispatch_l3(
d_context=embrane_ctx.DispatcherContext(
p_con.Events.SET_NAT_RULE, neutron_router, context,
state_change),
args=(nat_info,))
return result
def disassociate_floatingips(self, context, port_id, do_notify=True):
try:
fip_qry = context.session.query(l3_db.FloatingIP)
floating_ip = fip_qry.filter_by(fixed_port_id=port_id).one()
router_id = floating_ip["router_id"]
except exc.NoResultFound:
return
router_ids = self._l3super.disassociate_floatingips(
self, context, port_id, do_notify=do_notify)
if router_id:
neutron_router = self._get_router(context, router_id)
fip_id = floating_ip["id"]
state_change = operation.Operation(
self._set_db_router_state,
args=(context, neutron_router, p_con.Status.UPDATING))
self._dispatcher.dispatch_l3(
d_context=embrane_ctx.DispatcherContext(
p_con.Events.RESET_NAT_RULE, neutron_router, context,
state_change),
args=(fip_id,))
return router_ids
| projectcalico/calico-neutron | neutron/plugins/embrane/base_plugin.py | Python | apache-2.0 | 17,449 |
from nose.tools import assert_equal, assert_raises, assert_not_equal
import networkx as nx
import io
import tempfile
import os
from networkx.readwrite.p2g import *
from networkx.testing import *
class TestP2G:
def setUp(self):
self.G = nx.Graph(name="test")
e = [('a', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'e'), ('e', 'f'), ('a', 'f')]
self.G.add_edges_from(e)
self.G.add_node('g')
self.DG = nx.DiGraph(self.G)
def test_read_p2g(self):
s = b"""\
name
3 4
a
1 2
b
c
0 2
"""
bytesIO = io.BytesIO(s)
G = read_p2g(bytesIO)
assert_equal(G.name, 'name')
assert_equal(sorted(G), ['a', 'b', 'c'])
edges = [(str(u), str(v)) for u, v in G.edges()]
assert_edges_equal(G.edges(), [('a', 'c'), ('a', 'b'), ('c', 'a'), ('c', 'c')])
def test_write_p2g(self):
s = b"""foo
3 2
1
1
2
2
3
"""
fh = io.BytesIO()
G = nx.OrderedDiGraph()
G.name = 'foo'
G.add_edges_from([(1, 2), (2, 3)])
write_p2g(G, fh)
fh.seek(0)
r = fh.read()
assert_equal(r, s)
def test_write_read_p2g(self):
fh = io.BytesIO()
G = nx.DiGraph()
G.name = 'foo'
G.add_edges_from([('a', 'b'), ('b', 'c')])
write_p2g(G, fh)
fh.seek(0)
H = read_p2g(fh)
assert_edges_equal(G.edges(), H.edges())
| kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/networkx/readwrite/tests/test_p2g.py | Python | gpl-3.0 | 1,394 |
from hkns.scraper import HKitem, max_id, get_bests, get_news, get_tops, log
from hkns.config import db
from celery import Celery
app = Celery()
app.config_from_object('hkns.config')
@app.on_after_configure.connect
def setup(sender, **kwargs):
sender.add_periodic_task(301.0, get_ranking.s())
sender.add_periodic_task(151.0, scrape_story.s())
@app.task(name="ranking")
def get_ranking():
bests = get_bests()
tops = get_tops()
if bests and tops:
return "Got bests and top"
else:
log("error", {"bt": "no bests or tops"})
return "Error best"
@app.task(name="item")
def get_item(_id):
item = HKitem(_id)
item.get_data()
if item.data:
return "Got {}".format(_id)
@app.task(name='scrape_story')
def scrape_story():
"""
Every once in a while, we scrape the max_id,
we would get all of the item from that max_id to our current latest item.
:return: None
"""
d = max_id()
if d:
lid = d
mid = db['items'].find_one(sort=[("id", -1)])
if mid:
while int(lid) > int(mid['id']):
get_item.delay(lid)
lid -= 1
else:
get_item.delay(lid)
return
if __name__ == "__main__":
_max = max_id()
print(_max)
# a = HKitem(_max)
# a.get_data()
# print(a.data)
| zuik/stuff | hkns/run.py | Python | mit | 1,351 |
from django.db import models
from django.db.models import Max
from questionnaire.models.base import BaseModel
from questionnaire.utils.model_utils import map_question_type_with, profiles_that_can_edit
class QuestionGroup(BaseModel):
question = models.ManyToManyField("Question", blank=False, null=False, related_name="question_group")
subsection = models.ForeignKey("SubSection", blank=False, null=False, related_name="question_group")
name = models.CharField(max_length=200, blank=False, null=True)
instructions = models.TextField(blank=False, null=True)
parent = models.ForeignKey("QuestionGroup", null=True, related_name="sub_group")
order = models.PositiveIntegerField(null=True, blank=False)
allow_multiples = models.BooleanField(default=False)
grid = models.BooleanField(default=False)
display_all = models.BooleanField(default=False)
hybrid = models.BooleanField(default=False)
is_core = models.BooleanField(default=False)
@property
def region(self):
questions_with_region = self.all_questions().exclude(region=None)
if questions_with_region.exists():
return questions_with_region[0].region
return None
class Meta:
ordering = ('order',)
app_label = 'questionnaire'
def all_questions(self):
return self.question.all()
def swap_order(self, other_group):
self_order = self.order
other_order = other_group.order
self.order = other_order
other_group.order = self_order
self.save()
other_group.save()
def contains_or_sub_group_contains(self, question):
return question in self.ordered_questions()
def sub_groups(self):
all_groups = list(self.sub_group.all())
for group in self.sub_group.all():
all_groups.extend(list(group.sub_groups()))
return all_groups
def is_in_grid(self):
if self.parent is not None:
return self.grid or self.parent.grid
return self.grid
def parent_group(self):
if self.parent:
return self.parent
return self
def parent_group_id(self):
return self.parent_group().id
def is_in_hybrid_grid(self):
if self.parent:
return self.hybrid or self.parent.hybrid
return self.hybrid
def remove_question(self, question):
self.orders.filter(question=question).delete()
if question in self.question.all():
self.question.remove(question)
map(lambda sub_group: sub_group.remove_question(question), self.sub_group.all())
@classmethod
def next_order_in(cls, subsection):
first_order = 1
max_orders = cls.objects.filter(subsection=subsection, parent__isnull=True).aggregate(Max('order')).get('order__max')
return max_orders + 1 if max_orders else first_order
@classmethod
def delete_empty_groups(cls, subsection):
groups = cls.objects.filter(subsection=subsection)
for group in groups:
if not group.question.exists():
group.delete()
def add_question(self, question, order):
self.question.add(question)
question.orders.create(question_group=self, order=order)
def remove_question_and_reorder(self, question):
self.remove_question(question)
for i, q in enumerate(self.orders.order_by('order')):
q.order = i + 1
q.save()
def is_grid_or_has_less_than_two_question(self):
return self.grid or (len(self.and_sub_group_questions()) <= 1)
def and_sub_group_questions(self):
questions = list(self.all_questions())
for sub_group in self.sub_groups():
questions.extend(sub_group.and_sub_group_questions())
return questions
def ordered_questions(self):
return [order.question for order in self.question_orders()]
def question_orders(self):
related = ['question']
query = self.orders.order_by('order')
if self.parent:
query = self.parent.orders.order_by('order').filter(question__in=self.all_questions())
return query.select_related(*related)
def has_at_least_two_questions(self):
return self.question.count() > 1
def primary_question(self):
by_attribute = self.question.filter(is_primary=True)
if by_attribute.exists():
return by_attribute[0]
by_order = self.orders.order_by('order').select_related('question')
if by_order.exists():
return by_order[0].question
return None
def all_non_primary_questions(self):
non_primary_questions = self.ordered_questions()
non_primary_questions.remove(self.primary_question())
return non_primary_questions
def has_subgroups(self):
return self.sub_group.exists()
def max_questions_order(self):
group_orders = self.orders.order_by('-order')
if group_orders.exists():
return group_orders[0].order
return 0
def map_orders_with_answer_type(self, mapped_orders):
orders = self.orders.order_by('order').select_related('question')
if self.primary_question() and self.grid and self.display_all:
for option in self.primary_question().options.all():
map_question_type_with(orders, mapped_orders, option)
else:
map_question_type_with(orders, mapped_orders) | eJRF/ejrf | questionnaire/models/question_groups.py | Python | bsd-3-clause | 5,459 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import os
from dateutil.relativedelta import relativedelta
from datetime import datetime, date
import click
from werkzeug.security import generate_password_hash
import newspipe.models
from newspipe.bootstrap import application, db
from newspipe.controllers import UserController, ArticleController
logger = logging.getLogger("commands")
@application.cli.command("db_empty")
def db_empty():
"Will drop every datas stocked in db."
with application.app_context():
newspipe.models.db_empty(db)
@application.cli.command("db_create")
def db_create():
"Will create the database from conf parameters."
with application.app_context():
try:
db.create_all()
except Exception as e:
print(e)
@application.cli.command("create_admin")
@click.option("--nickname", default="admin", help="Nickname")
@click.option("--password", default="password", help="Password")
def create_admin(nickname, password):
"Will create an admin user."
admin = {
"is_admin": True,
"is_api": True,
"is_active": True,
"nickname": nickname,
"pwdhash": generate_password_hash(password),
}
with application.app_context():
try:
UserController(ignore_context=True).create(**admin)
except Exception as e:
print(e)
@application.cli.command("delete_user")
@click.option("--user-id", required=True, help="Id of the user to delete.")
def delete_user(user_id=None):
"Delete the user with the id specified in the command line."
try:
user = UserController().delete(user_id)
print("User {} deleted".format(user.nickname))
except Exception as e:
print(e)
@application.cli.command("delete_inactive_users")
@click.option("--last-seen", default=6, help="Number of months since last seen.")
def delete_inactive_users(last_seen):
"Delete inactive users (inactivity is given in parameter and specified in number of months)."
filter = {}
filter["last_seen__lt"] = date.today() - relativedelta(months=last_seen)
users = UserController().read(**filter)
for user in users:
db.session.delete(user)
try:
print("Deleting user {}...".format(user.nickname))
db.session.commit()
except:
db.session.rollback()
print("Inactive users deleted.")
@application.cli.command("disable_inactive_users")
@click.option("--last-seen", default=6, help="Number of months since last seen.")
def disable_inactive_users(last_seen):
"Disable inactive users (inactivity is given in parameter and specified in number of months)."
filter = {}
filter["last_seen__lt"] = date.today() - relativedelta(months=last_seen)
users = UserController().read(**filter)
for user in users:
user.is_active = False
user.is_public_profile = False
user.automatic_crawling = False
try:
print("Updating user {}...".format(user.nickname))
db.session.commit()
except:
db.session.rollback()
print("Inactive users disabled.")
@application.cli.command("delete_read_articles")
def delete_read_articles():
"Delete read articles (and not liked) retrieved since more than 60 days ago."
filter = {}
filter["user_id__ne"] = 1
filter["readed"] = True
filter["like"] = False
filter["retrieved_date__lt"] = date.today() - relativedelta(days=60)
articles = ArticleController().read(**filter).limit(5000)
for article in articles:
try:
db.session.delete(article)
db.session.commit()
except:
db.session.rollback()
print("Read articles deleted.")
@application.cli.command("fix_article_entry_id")
def fix_article_entry_id():
filter = {}
filter["entry_id"] = None
articles = ArticleController().read(**filter).limit(50)
for article in articles:
try:
article.entry_id = str(article.id)
db.session.commit()
except:
db.session.rollback()
@application.cli.command("fetch_asyncio")
@click.option("--user-id", default=None, help="Id of the user")
@click.option("--feed-id", default=None, help="If of the feed")
def fetch_asyncio(user_id=None, feed_id=None):
"Crawl the feeds with asyncio."
import asyncio
with application.app_context():
from newspipe.crawler import default_crawler
filters = {}
filters["is_active"] = True
filters["automatic_crawling"] = True
if None is not user_id:
filters["id"] = user_id
users = UserController().read(**filters).all()
try:
feed_id = int(feed_id)
except:
feed_id = None
loop = asyncio.get_event_loop()
queue = asyncio.Queue(maxsize=3, loop=loop)
producer_coro = default_crawler.retrieve_feed(queue, users, feed_id)
consumer_coro = default_crawler.insert_articles(queue, 1)
logger.info("Starting crawler.")
start = datetime.now()
loop.run_until_complete(asyncio.gather(producer_coro, consumer_coro))
end = datetime.now()
loop.close()
logger.info("Crawler finished in {} seconds.".format((end - start).seconds))
| JARR-aggregator/JARR | newspipe/commands.py | Python | agpl-3.0 | 5,322 |
# -*- coding: utf8 -*-
import re
import requests
# copied from stackoverflow
HANUNI = re.compile(ur'^[⺀-⺙⺛-⻳⼀-⿕々〇〡-〩〸-〺〻㐀-䶵一-鿃豈-鶴侮-頻並-龎]+$', re.UNICODE)
def guoyu(uid, txt):
print u'UID %s 查國語萌典: %s' % (uid, txt)
r = 'undefined'
if HANUNI.match(txt):
get = requests.get('https://www.moedict.tw/a/%s.json' % txt)
if get.status_code == 200:
j = get.json()
if 'r' in j and 'n' in j:
r = u'%s (%s部%d劃)\n' % (stripWordSeg(j['t']), stripWordSeg(j['r']), j['n'])
else:
r = stripWordSeg(j['t']) + '\n'
for h in j['h']:
i = 1
if 'b' in h and 'p' in h:
r = r + u'%s %s\n' % (h['b'], h['p'])
for d in h['d']:
if 'type' in d:
word_class = u'[%s詞]' % stripWordSeg(d['type'])
else:
word_class = ''
r = r + '%d. %s %s\n' % (i, word_class, stripWordSeg(d['f']))
if 'e' in d:
for ex in d['e']:
r = r + u' %s\n' % stripWordSeg(ex)
i = i + 1
if 's' in h:
r = r + u'相似詞: %s' % stripWordSeg(h['s'])
return r
elif get.status_code == 404:
return u'查無此字。'
else:
app.logger.warn(str(get.status_code))
app.logger.warn(str(get.text))
return u'系統錯誤,請稍候再試。'
else:
return u'查詢字串內含非漢字的字元,請重新輸入。'
return r
def taigi(uid, txt):
print u'UID %s 查台語萌典: %s' % (uid, txt)
r = 'undefined'
if HANUNI.match(txt):
get = requests.get('https://www.moedict.tw/t/%s.json' % txt)
if get.status_code == 200:
j = get.json()
if 'r' in j and 'n' in j:
r = u'%s (%s部%d劃)\n' % (stripWordSeg(j['t']), stripWordSeg(j['r']), j['n'])
else:
r = stripWordSeg(j['t']) + '\n'
for h in j['h']:
i = 1
reading = stripWordSeg(h.get('reading', u'發'))
if 'T' in h:
r = r + u'%s音: %s\n' % (reading, h['T'])
for d in h['d']:
if 'type' in d:
word_class = u'[%s詞] ' % stripWordSeg(d['type'])
else:
word_class = ''
r = r + '%d. %s%s\n' % (i, word_class, stripWordSeg(d['f']))
if 'e' in d:
for ex in d['e']:
r = r + u'%s\n' % renderMoeExample(stripWordSeg(ex))
i = i + 1
if 's' in h:
r = r + u'相似詞: %s' % stripWordSeg(h['s'])
return r
# MP3 in https://1763c5ee9859e0316ed6-db85b55a6a3fbe33f09b9245992383bd.ssl.cf1.rackcdn.com/04208.mp3
# j['h'][0]['_'] left pad 0 to 5 digits
elif get.status_code == 404:
return u'查無此字。'
else:
app.logger.warn(str(get.status_code))
app.logger.warn(str(get.text))
return u'系統錯誤,請稍候再試。'
else:
return u'查詢字串內含非漢字的字元,請重新輸入。'
return
def hakkafa(uid, txt):
print u'UID %s 查客語萌典: %s' % (uid, txt)
r = 'undefined'
if HANUNI.match(txt):
get = requests.get('https://www.moedict.tw/h/%s.json' % txt)
if get.status_code == 200:
j = get.json()
r = stripWordSeg(j['t']) + '\n'
for h in j['h']:
i = 1
reading = stripWordSeg(h.get('reading', u'發'))
if 'p' in h:
r = r + h['p'].replace(u'\u20de', '') + '\n' # 不要四方框
for d in h['d']:
if 'type' in d and d['type'] != '':
word_class = u'[%s詞] ' % stripWordSeg(d['type'])
else:
word_class = ''
r = r + '%d. %s%s\n' % (i, word_class, stripWordSeg(d['f']))
if 'e' in d:
for ex in d['e']:
r = r + u'%s\n' % renderMoeExample(stripWordSeg(ex))
i = i + 1
if 's' in h:
r = r + u'相似詞: %s' % stripWordSeg(h['s'])
return r
# MP3 in https://1763c5ee9859e0316ed6-db85b55a6a3fbe33f09b9245992383bd.ssl.cf1.rackcdn.com/04208.mp3
# j['h'][0]['='] left pad 0 to 5 digits
elif get.status_code == 404:
return u'查無此字。'
else:
pprint.pprint(txt)
app.logger.warn(str(get.status_code))
app.logger.warn(str(get.text))
return u'系統錯誤,請稍候再試。'
else:
return u'查詢字串內含非漢字的字元,請重新輸入。'
return
def renderMoeExample(s):
r = s.replace(u'\ufff9', u' ') \
.replace(u'\ufffa', u'\n ') \
.replace(u'\ufffb', u'\n (') + ')'
return r.replace(u'\n ()', '') # XXX: Dirty Hack
def stripWordSeg(s):
#TAG_RE = re.compile(r'<[^>]+>')
#return TAG_RE.sub('', s)
return s.replace('`', '').replace('~', '')
| miaoski/amis-linebot | moe.py | Python | mit | 5,510 |
# Fermat's Last Theorem says that there are no integers a, b, and c such that
# a^n + b^n = c^n
# for any values of n greater than 2.
# 1. Write a function named check_fermat that takes four parameters-a, b, c
# and n-and that checks to see if Fermat's theorem holds. If n is greater than
# 2 and it turns out to be true that a^n + b^n = c^n
# the program should print, "Holy smokes, Fermat was wrong!" Otherwise the
# program should print, "No, that doesn't work."
# 2. Write a function that prompts the user to input values for a, b, c and n,
# converts them to integers, and uses check_fermat to check whether they
# violate Fermat's theorem.
# Current Status: Complete
a = int(raw_input('What value to use for a?\n'))
b = int(raw_input('What value to use for b?\n'))
c = int(raw_input('What value to use for c?\n'))
n = int(raw_input('What value to use for n?\n'))
def check_fermat(a, b, c, n):
if n == 2:
return 'Pythagoras got that one already.'
elif a**n + b**n == c**n:
return 'Holy Smokes, Fermat was Wrong!'
return "No, that doesn't work."
print check_fermat(a, b, c, n) | epequeno/ThinkPy-Solutions | ch05/5.01.py | Python | gpl-3.0 | 1,117 |
# OfflineIMAP initialization code
# Copyright (C) 2002-2007 John Goerzen
# <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import imaplib
from offlineimap import imapserver, repository, folder, mbnames, threadutil, version, syncmaster, accounts
from offlineimap.localeval import LocalEval
from offlineimap.threadutil import InstanceLimitedThread, ExitNotifyThread
from offlineimap.ui import UIBase
import re, os, os.path, offlineimap, sys
from offlineimap.CustomConfig import CustomConfigParser
from threading import *
import threading, socket
from getopt import getopt
try:
import fcntl
hasfcntl = 1
except:
hasfcntl = 0
lockfd = None
def lock(config, ui):
global lockfd, hasfcntl
if not hasfcntl:
return
lockfd = open(config.getmetadatadir() + "/lock", "w")
try:
fcntl.flock(lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
ui.locked()
ui.terminate(1)
def startup(versionno):
assert versionno == version.versionstr, "Revision of main program (%s) does not match that of library (%s). Please double-check your PYTHONPATH and installation locations." % (versionno, version.versionstr)
options = {}
if '--help' in sys.argv[1:]:
sys.stdout.write(version.getcmdhelp() + "\n")
sys.exit(0)
for optlist in getopt(sys.argv[1:], 'P:1oqa:c:d:l:u:hk:f:')[0]:
options[optlist[0]] = optlist[1]
if options.has_key('-h'):
sys.stdout.write(version.getcmdhelp())
sys.stdout.write("\n")
sys.exit(0)
configfilename = os.path.expanduser("~/.offlineimaprc")
if options.has_key('-c'):
configfilename = options['-c']
if options.has_key('-P'):
if not options.has_key('-1'):
sys.stderr.write("FATAL: profile mode REQUIRES -1\n")
sys.exit(100)
profiledir = options['-P']
os.mkdir(profiledir)
threadutil.setprofiledir(profiledir)
sys.stderr.write("WARNING: profile mode engaged;\nPotentially large data will be created in " + profiledir + "\n")
config = CustomConfigParser()
if not os.path.exists(configfilename):
sys.stderr.write(" *** Config file %s does not exist; aborting!\n" % configfilename)
sys.exit(1)
config.read(configfilename)
# override config values with option '-k'
for option in options.keys():
if option == '-k':
(key, value) = options['-k'].split('=', 1)
if ':' in key:
(secname, key) = key.split(':', 1)
section = secname.replace("_", " ")
else:
section = "general"
config.set(section, key, value)
ui = offlineimap.ui.detector.findUI(config, options.get('-u'))
UIBase.setglobalui(ui)
if options.has_key('-l'):
ui.setlogfd(open(options['-l'], 'wt'))
ui.init_banner()
if options.has_key('-d'):
for debugtype in options['-d'].split(','):
ui.add_debug(debugtype.strip())
if debugtype == 'imap':
imaplib.Debug = 5
if debugtype == 'thread':
threading._VERBOSE = 1
if options.has_key('-o'):
# FIXME: maybe need a better
for section in accounts.getaccountlist(config):
config.remove_option('Account ' + section, "autorefresh")
if options.has_key('-q'):
for section in accounts.getaccountlist(config):
config.set('Account ' + section, "quick", '-1')
if options.has_key('-f'):
foldernames = options['-f'].replace(" ", "").split(",")
folderfilter = "lambda f: f in %s" % foldernames
folderincludes = "[]"
for accountname in accounts.getaccountlist(config):
account_section = 'Account ' + accountname
remote_repo_section = 'Repository ' + \
config.get(account_section, 'remoterepository')
local_repo_section = 'Repository ' + \
config.get(account_section, 'localrepository')
for section in [remote_repo_section, local_repo_section]:
config.set(section, "folderfilter", folderfilter)
config.set(section, "folderincludes", folderincludes)
lock(config, ui)
try:
pidfd = open(config.getmetadatadir() + "/pid", "w")
pidfd.write(str(os.getpid()) + "\n")
pidfd.close()
except:
pass
try:
if options.has_key('-l'):
sys.stderr = ui.logfile
socktimeout = config.getdefaultint("general", "socktimeout", 0)
if socktimeout > 0:
socket.setdefaulttimeout(socktimeout)
activeaccounts = config.get("general", "accounts")
if options.has_key('-a'):
activeaccounts = options['-a']
activeaccounts = activeaccounts.replace(" ", "")
activeaccounts = activeaccounts.split(",")
allaccounts = accounts.AccountHashGenerator(config)
syncaccounts = {}
for account in activeaccounts:
if account not in allaccounts:
if len(allaccounts) == 0:
errormsg = 'The account "%s" does not exist because no accounts are defined!'%account
else:
errormsg = 'The account "%s" does not exist. Valid accounts are:'%account
for name in allaccounts.keys():
errormsg += '\n%s'%name
ui.terminate(1, errortitle = 'Unknown Account "%s"'%account, errormsg = errormsg)
syncaccounts[account] = allaccounts[account]
server = None
remoterepos = None
localrepos = None
if options.has_key('-1'):
threadutil.initInstanceLimit("ACCOUNTLIMIT", 1)
else:
threadutil.initInstanceLimit("ACCOUNTLIMIT",
config.getdefaultint("general", "maxsyncaccounts", 1))
for reposname in config.getsectionlist('Repository'):
for instancename in ["FOLDER_" + reposname,
"MSGCOPY_" + reposname]:
if options.has_key('-1'):
threadutil.initInstanceLimit(instancename, 1)
else:
threadutil.initInstanceLimit(instancename,
config.getdefaultint('Repository ' + reposname, "maxconnections", 1))
threadutil.initexitnotify()
t = ExitNotifyThread(target=syncmaster.syncitall,
name='Sync Runner',
kwargs = {'accounts': syncaccounts,
'config': config})
t.setDaemon(1)
t.start()
except:
ui.mainException()
try:
threadutil.exitnotifymonitorloop(threadutil.threadexited)
except SystemExit:
raise
except:
ui.mainException() # Also expected to terminate.
| avsm/lifedb-plugins | IMAP/offlineimap/offlineimap/init.py | Python | gpl-2.0 | 7,684 |
from unittest.case import TestCase
from zsl import Config, inject
from zsl.application.containers.container import IoCContainer
from zsl.testing.db import IN_MEMORY_DB_SETTINGS
from zsl.testing.zsl import ZslTestCase, ZslTestConfiguration
from zsl.utils.redis_helper import Keymaker
from zsl.utils.testing import set_test_responder
__author__ = 'peter'
set_test_responder()
class TestKeymaker(ZslTestCase, TestCase):
ZSL_TEST_CONFIGURATION = ZslTestConfiguration(
'test_redis_helper',
container=IoCContainer,
config_object=IN_MEMORY_DB_SETTINGS
)
def test_empty(self):
keymaker = Keymaker({})
with self.assertRaises(AttributeError):
keymaker.a()
self.assertEqual(len(keymaker.__dict__), 0, "There should be no method in keymaker")
def test_normal(self):
keymaker = Keymaker({'a': 'AAA', 'b': 'XX'})
self.assertEqual(keymaker.a(), 'AAA', "Pure method")
self.assertEqual(keymaker.b('x', 'y'), 'XX:x:y', "Method with arguments")
self.assertEqual(keymaker.b('x', None, 'y'), 'XX:x:y', "Method with a None argument")
self.assertEqual(keymaker.b('x', None, 0, False, 'y'),
'XX:x:0:False:y',
"Method with a None and falsified arguments")
def test_with_prefix(self):
keymaker = Keymaker({'a': 'AAA', 'b': 'XX'}, prefix="testing")
self.assertEqual(keymaker.a(), 'testing:AAA', "Pure method with prefix")
self.assertEqual(keymaker.b('x', 'y'), 'testing:XX:x:y', "Method with arguments and prefix")
@inject(config=Config)
def test_with_global_prefix(self, config):
# type: (Config)->None
config.setdefault('REDIS', {'prefix': 'global_prefix'})
keymaker = Keymaker({'a': 'AAA', 'b': 'XX'}, prefix="testing")
self.assertEqual(keymaker.a(), 'global_prefix:testing:AAA', "Pure method with global and local prefix")
self.assertEqual(keymaker.b('x', 'y'),
'global_prefix:testing:XX:x:y',
"Method with arguments and global and local prefix")
config['REDIS']['prefix'] = None
| AtteqCom/zsl | tests/utils/redis_helper_test.py | Python | mit | 2,174 |
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
import logging, logging.handlers
import pttlib
log_line_format = '%(asctime)s %(name)s %(levelname)s: %(message)s'
log_handlers = [
{
'log_level': logging.INFO,
'class': logging.StreamHandler,
'config': {'stream': None},
},
{
'log_level': logging.DEBUG,
'class': logging.handlers.TimedRotatingFileHandler,
'config': {
'filename': 'sr0wx.log',
'when': 'D',
'interval': 1,
'backupCount': 30,
'delay': True,
'utc': True,
}
}
]
# There are three ways for PTT with sr0wx.
#
# This is the "null" option where your transmitter is turn on with VOX:
#
ptt = pttlib.vox()
#
# The other way is to use pySerial and PTT on one of two pins: DTR or RTS
#
#ptt = pttlib.serial('/dev/ttyUSB0', signal='DTR')
#
# The third way is to use GPIO from Raspberry PI:
# ptt = pttlib.gpio(17)
import pl_microsoft.pl_microsoft as pl_microsoft
lang = "pl_microsoft"
pygame_bug = 0
hello_msg = ["tu_eksperymentalna_automatyczna_stacja_pogodowa", "sr0wx", ]
goodbye_msg = ["_", "tu_sr0wx"]
#
# Modules configuration
#
# List of activated modules is at the very bottom of this file
#
# world weather online
from world_weather_online import WorldWeatherOnline
worldweatheronline = WorldWeatherOnline(
api_key="CHANGEME",
latitude=52.71,
longitude=19.11,
language=pl_microsoft,
message_template="""\
stan_pogody_z_godziny {OBSERVATION_TIME}
_ {CURRENT_WEATHER}
temperatura {CURRENT_TEMP_C} wilgotnosc {CURRENT_HUMIDITY}
_ kierunek_wiatru {CURRENT_WIND_DIR}
{CURRENT_WIND_DIR_DEG} predkosc_wiatru {CURRENT_WIND_SPEED_MPS}
{CURRENT_WIND_SPEED_KMPH} _ cisnienie {CURRENT_PRESSURE}
pokrywa_chmur {CURRENT_CLOUDCOVER} _
prognoza_na_nastepne trzy godziny
{FCAST_WEATHER} temperatura {FCAST_TEMP_C} stopni_celsjusza
kierunek_wiatru {FCAST_WIND_DIR} {FCAST_WIND_DIR_DEG} predkosc_wiatru
{FCAST_WIND_SPEED_MPS} {FCAST_WIND_SPEED_KMPH}""")
# -------------
# activity_map
# ------------
from activity_map import ActivityMap
activitymap = ActivityMap(
service_url="http://test.ostol.pl/?base=",
callsign=None,
latitude=0,
longitude=0,
hour_quarter=5,
above_sea_level=118,
above_ground_level=20,
station_range=30,
additional_info="",
)
# List of modules to query on program run
modules = [activitymap, worldweatheronline, ]
| sq6jnx/sr0wx.py | config.py | Python | apache-2.0 | 2,439 |
from pageobject import *
from context import *
from util import *
| imsardine/pyuia | pyuia/appium/__init__.py | Python | mit | 67 |
'''
LineByLineMerge.py
'''
import sys
import os
import codecs
FILE_MODE = 0
PROCESS_MODE = 0
class TxtFileProcessor:
def __init__(self):
self.filePaths = []
self.output_filename = "output.txt"
def load_from_folder_tree(self, expression = '.txt', folder_path = './'):
self.filePaths = [os.path.join(root, name)
for root, dirs, files in os.walk(folder_path)
for name in files
if (name.find(expression) != -1)]
return len(self.filePaths)
def load_from_folder(self, expression = '.txt', folder_path = './'):
print os.listdir(folder_path)
all_files_in_folder = [f for f in os.listdir(folder_path) if os.path.isfile((folder_path + f))]
all_files_matching = [f for f in all_files_in_folder if f.find(expression) != -1]
all_file_paths = [folder_path + file_name for file_name in all_files_matching]
for f in all_file_paths:
self.filePaths.append(f)
self.output_filename = all_files_matching[0]
return len(self.filePaths)
def load_from_args(self):
fileNames = []
if len(sys.argv) < 2:
print "Not enough args"
sys.exit(1)
else:
print str(len(sys.argv)) + " arguments"
# remove the path to this python file
sys.argv.pop(0)
print fileNames
# put argv arguments into filePaths
for argument in sys.argv:
self.filePaths.append(argument)
self.output_filename = sys.argv[0]
return len(self.filePaths)
def extract_subsequent_line(self, start = ''):
if len(self.filePaths) == 0:
raise Exception("No files loaded")
fileHandlers = []
self.output_filename = 'xtracted_' + self.output_filename
output_this = False
with codecs.open(self.output_filename, 'w+') as out:
print "Writing to " + self.output_filename
for argument in self.filePaths:
try:
f = codecs.open(argument, 'r+')
fileHandlers.append(f)
print f.closed
except:
print "open " + argument + "failed"
for handler in fileHandlers:
for line in handler:
if output_this:
out.write(line)
output_this = False
else:
if line.startswith(start):
# get next line
output_this = True
def top_bottom_merge(self):
if len(self.filePaths) == 0:
raise Exception("No files loaded")
fileHandlers = []
self.output_filename = 'merged_' + self.output_filename
with codecs.open(self.output_filename, 'w+') as out:
print "Writing to " + self.output_filename
for argument in self.filePaths:
try:
f = codecs.open(argument, 'r+')
fileHandlers.append(f)
except:
print "open " + argument + "failed"
print fileHandlers
for handler in fileHandlers:
fileContents = handler.read()
print fileContents
out.write(fileContents)
out.write('\n')
def line_by_line_merge(self, seperator = ''):
if len(self.filePaths) == 0:
raise Exception("No files loaded")
fileHandlers = []
self.output_filename = 'merged_' + self.output_filename
exitFlag = True
i = 1
with codecs.open(self.output_filename, 'w+') as out:
print "Writing to " + self.output_filename
for argument in self.filePaths:
try:
f = codecs.open(argument, 'r+')
fileHandlers.append(f)
except:
print "open " + argument + "failed"
while(exitFlag):
print "Appending row " + str(i)
for index, handler in enumerate(fileHandlers):
# get just one row from each file
currentLine = handler.readline()
if not currentLine:
# determined by shortest file
exitFlag = False
break
else:
# need to remove the newlines
currentLine = currentLine.rstrip()
print currentLine
out.write(currentLine)
if index == 0:
out.write(seperator)
# add a newline to indicate done with row
if exitFlag:
out.write('\n')
i += 1
def unload(self):
self.filePaths = []
self.output_filename = "output.txt"
if __name__ == "__main__":
path = './'
if len(sys.argv) == 2:
path = sys.argv[1]
expression = ''
start = ''
myProcessor = TxtFileProcessor()
if FILE_MODE == 0:
myProcessor.load_from_folder(expression, path)
elif FILE_MODE == 1:
myProcessor.load_from_folder_tree(expression, path)
elif FILE_MODE == 2:
myProcessor.load_from_args()
if PROCESS_MODE == 0:
myProcessor.line_by_line_merge()
elif PROCESS_MODE == 1:
myProcessor.top_bottom_merge()
elif PROCESS_MODE == 2:
myProcessor.extract_subsequent_line(start) | resolutedreamer/LineByLineMerge | src/LineByLineMerge.py | Python | apache-2.0 | 5,640 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gael Varoqueux
# Modified for Documentation merge by Jaques Grobler
# License: BSD
import numpy as np
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.iteritems():
fig = pl.figure(fignum, figsize=(4, 3))
pl.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
pl.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = pl.figure(fignum, figsize=(4, 3))
pl.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
pl.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
pl.show()
| florian-f/sklearn | examples/cluster/plot_cluster_iris.py | Python | bsd-3-clause | 2,573 |
"""Tests for local fab commands."""
from django.test import TestCase
from ..fabfile.local import check, flake8, jshint
class JshintTestCase(TestCase):
def test_command(self):
jshint()
class Flake8TestCase(TestCase):
def test_command(self):
flake8()
class CheckTestCase(TestCase):
def test_command(self):
with self.assertRaises(SystemExit):
check()
| bitmazk/django-development-fabfile | development_fabfile/tests/local_tests.py | Python | mit | 403 |
# coding: utf8
# try something like
from gluon.tools import Service
service = Service(globals())
def call():
session.forget()
return service()
response.generic_patterns = ['*.json', '*.html']
def get_project_id(project_name):
"Find the project and create a new one if it doesn't exists"
project = db(db.psp_project.name==project_name).select().first()
if project:
project_id = project.project_id
else:
project_id = db.psp_project.insert(name=project_name)
return project_id
@service.jsonrpc
def get_projects():
projects = db(db.psp_project.project_id>0).select()
return [project.name for project in projects]
@service.jsonrpc
def save_project(project_name, defects, time_summaries, comments):
project_id = get_project_id(project_name)
# clean and store defects:
db(db.psp_defect.project_id==project_id).delete()
for defect in defects:
defect['project_id'] = project_id
defect.pop("id", None)
defect.pop("defect_id", None)
# JSON seems adding time ("2014-11-12 00:00:00"), remove it
if ' ' in defect['date']:
defect['date'] = defect['date'].split(' ')[0]
db.psp_defect.insert(**defect)
# clean and store time summaries:
db(db.psp_time_summary.project_id==project_id).delete()
for time_summary in time_summaries:
time_summary['project_id'] = project_id
if 'id' in time_summary:
del time_summary['id']
db.psp_time_summary.insert(**time_summary)
# clean and store comments:
db(db.psp_comment.project_id==project_id).delete()
for comment in comments:
comment['project_id'] = project_id
if 'id' in comment:
del comment['id']
db.psp_comment.insert(**comment)
return True
@service.jsonrpc
def load_project(project_name):
project_id = get_project_id(project_name)
defects = db(db.psp_defect.project_id==project_id).select()
time_summaries = db(db.psp_time_summary.project_id==project_id).select()
comments = db(db.psp_comment.project_id==project_id).select()
return defects, time_summaries, comments
@service.jsonrpc
def update_project(project_name, actual_loc, reuse_library_entries):
"Update counted LOC and reuse library entries (postmortem)"
project_id = get_project_id(project_name)
# update total loc counted:
if project:
db(db.psp_project.project_id==project_id).update(actual_loc=actual_loc)
# clean and store reuse library entries:
if project:
db(db.psp_reuse_library.project_id==project_id).delete()
for entry in reuse_library_entries:
entry['project_id'] = project_id
db.psp_reuse_library.insert(**entry)
return True
| plcode7/rad2py | psp2py/controllers/services.py | Python | gpl-3.0 | 2,763 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from Tkinter import *
from r515.projector import *
import time
import ConfigParser
class Application(Frame):
def _ms_to_tc(self, ms):
#ms = int(ms)
seconds = ms / 1000
hours = seconds/3600
minutes = (seconds%3600)/60
seconds = seconds%60
return '%(hours)02d:%(minutes)02d:%(seconds)02d' % {"hours": hours, "minutes": minutes, "seconds": seconds}
def sync_time(self):
info = self._values
if info["uuid"] != None:
if info["typ"] == "CPL":
self._remaining_cpl = int(info["time"]["remaining"])
self.cpl_time["text"] = "DCP verbleibend: " + self._ms_to_tc(self._remaining_cpl)
self.spl_time["text"] = "Playlist verbleibend: 00:00:00"
self.spl_name["text"] = "Keine Playlist ausgewählt"
elif info["typ"] == "SPL":
self._remaining_spl = int(info["time"]["remaining"])
self.spl_time["text"] = "Playlist verbleibend: " + self._ms_to_tc(self._remaining_spl)
if self._cpl != None:
self._remaining_cpl = self._cpl["duration"] - (int(info["time"]["elapsed"]) - self._cpl["offset"])
self.cpl_time["text"] = "DCP verbleibend: " + self._ms_to_tc(self._remaining_cpl)
def sync_playlist(self):
playlist = Playlist(conn, self._values["uuid"])
self._cpl = playlist.get_cpl_at_offset(self._values["time"]["elapsed"])
self.spl_name["text"] = playlist.get_title()
if self._cpl != None:
self.cpl_name["text"] = library.get_cpl_info(self._cpl["uuid"])["title"]
def update_time(self):
if self._playing:
self._remaining_cpl = self._remaining_cpl - 1000
self.cpl_time["text"] = "DCP verbleibend: " + self._ms_to_tc(self._remaining_cpl)
if self._values["typ"] == "SPL":
self._remaining_spl = self._remaining_spl - 1000
self.spl_time["text"] = "Playlist verbleibend: " + self._ms_to_tc(self._remaining_spl)
if self._remaining_cpl - 1000 <= 100:
if self._values["typ"] == "SPL":
self._values = prj.get_show_info()
self.sync_playlist()
self.sync_time()
else:
self.sync_status(False)
self.after(1000, self.update_time)
def sync_status(self, loop = True):
current_settings = settings.get_projector_settings()
self._values = prj.get_show_info()
self.sync_time()
values = self._values
self.status["text"] = values["status"]
if values["status"] == "STOPPED" or values["status"] == "PAUSED" or values["status"] == "COMPLETED":
self.play["text"] = "Play"
self._playing = False
elif values["status"] == "PLAYING":
self.play["text"] = "Pause"
self._playing = True
if current_settings["picture_mode"] == "0":
self.format["text"] = "DCI Flat"
elif current_settings["picture_mode"] == "1":
self.format["text"] = "DCI CS"
elif current_settings["picture_mode"] == "2":
self.format["text"] = "HDMI Flat"
elif current_settings["picture_mode"] == "3":
self.format["text"] = "HDMI CS"
if values["typ"] == "CPL":
self.cpl_name["text"] = library.get_cpl_info(values["uuid"])["title"]
elif values["typ"] == "SPL":
self.sync_playlist()
if loop:
self.after(30000, self.sync_status)
def update_power(self):
status = prj.get_power_status()
if status == "STANDBY":
self.power["text"] = "Lampe anschalten"
elif status == "ON":
self.power["text"] = "Lampe ausschalten"
else:
self.power["text"] = status
self.after(40000, self.update_power)
def play_toggle(self):
status = self._values["status"]
if status == "STOPPED" or status == "NONE" or status == "COMPLETED":
prj.play()
elif status == "PLAYING":
prj.pause()
elif status == "PAUSED":
prj.resume()
self.after(800, self.sync_status(False))
def power_toggle(self):
status = prj.get_power_status()
if status == "STANDBY":
prj.power_on()
self.power["text"] = "WARM_UP"
elif status == "ON":
prj.power_standby()
self.power["text"] = "COOLING"
else:
self.power["text"] = status
def stop_command(self):
prj.stop()
self.after(2000, self.sync_status(False))
def play_logo(self):
prj.close_douser()
prj.stop()
if settings.get_projector_settings()["picture_mode"] != (int(config.get('Logo', 'Format'))-1):
settings.load_format(int(config.get('Logo', 'Format')))
print "Format", (int(config.get('Logo', 'Format'))-1)
#time.sleep(3)
status = prj.get_show_info()["status"]
while not (status == "STOPPED" or status == "NONE" or status == "COMPLETED"):
status = prj.get_show_info()["status"]
time.sleep(0.8)
prj.play(config.get('Logo', 'UUID'), "CPL", 0)
if config.get('Logo', 'Loop') == "False":
time.sleep(4)
prj.pause()
prj.open_douser()
self.after(800, self.sync_status(False))
def toggle_douser(self):
if self._douser:
prj.open_douser()
else:
prj.close_douser()
self._douser = not self._douser
def createWidgets(self):
self.status = Label(self)
self.status["text"] = "Stopped"
self.status["justify"] = "center"
self.status["padx"] = 10
self.status.pack({"anchor": "n", "side": "top", "expand": 1, "fill":"x"})
self.format = Label(self)
self.format["text"] = "DCI Flat"
self.format["justify"] = "center"
self.format["padx"] = 10
self.format.pack({"anchor": "n", "side": "top", "expand": 1, "fill":"x"})
self.spl_name = Label(self)
self.spl_name["text"] = "Keine Playlist ausgewählt"
self.spl_name["justify"] = "center"
self.spl_name["padx"] = 10
self.spl_name.pack({"anchor": "n", "side": "top", "expand": 1, "fill":"x"})
self.spl_time = Label(self)
self.spl_time["text"] = "Playlist verbleibend: 00:00:00"
self.spl_time["justify"] = "center"
self.spl_time["padx"] = 10
self.spl_time.pack({"anchor": "n", "side": "top", "expand": 1, "fill":"x"})
self.cpl_name = Label(self)
self.cpl_name["text"] = "Keine DCP ausgewählt"
self.cpl_name["justify"] = "center"
self.cpl_name["padx"] = 10
self.cpl_name.pack({"anchor": "n", "side": "top", "expand": 1, "fill":"x"})
self.cpl_time = Label(self)
self.cpl_time["text"] = "DCP verbleibend: 00:00:00"
self.cpl_time["justify"] = "center"
self.cpl_time["padx"] = 10
self.cpl_time.pack({"anchor": "n", "side": "top", "expand": 1, "fill":"x"})
self.play = Button(self)
self.play["text"] = "Play"
self.play["command"] = self.play_toggle
self.play.pack({"anchor": "n", "side": "top", "expand": 1, "fill":"x"})
self.stop = Button(self)
self.stop["text"] = "Stop"
self.stop["command"] = self.stop_command
self.stop.pack({"anchor": "n", "side": "top", "expand": 1, "fill":"x"})
self.open_douser = Button(self)
self.open_douser["text"] = "Klappe öffnen"
self.open_douser["command"] = prj.open_douser
self.open_douser.pack({"anchor": "n", "side": "top", "expand": 1, "fill":"x"})
self.close_douser = Button(self)
self.close_douser["text"] = "Klappe schließen"
self.close_douser["command"] = prj.close_douser
self.close_douser.pack({"anchor": "n", "side": "top", "expand": 1, "fill":"x"})
self.dci_flat = Button(self)
self.dci_flat["text"] = "DCI Flat"
self.dci_flat["command"] = lambda: settings.load_format(1)
self.dci_flat.pack({"anchor": "n", "side": "top", "expand": 1, "fill":"x"})
self.dci_cs = Button(self)
self.dci_cs["text"] = "DCI CS"
self.dci_cs["command"] = lambda: settings.load_format(2)
self.dci_cs.pack({"anchor": "n", "side": "top", "expand": 1, "fill":"x"})
self.hdmi_flat = Button(self)
self.hdmi_flat["text"] = "HDMI Flat"
self.hdmi_flat["command"] = lambda: settings.load_format(3)
self.hdmi_flat.pack({"anchor": "n", "side": "top", "expand": 1, "fill":"x"})
self.hdmi_cs = Button(self)
self.hdmi_cs["text"] = "HDMI CS"
self.hdmi_cs["command"] = lambda: settings.load_format(4)
self.hdmi_cs.pack({"anchor": "n", "side": "top", "expand": 1, "fill":"x"})
self.power = Button(self)
self.power["text"] = "Lampe anschalten"
self.power["command"] = self.power_toggle
self.power.pack({"anchor": "n", "side": "top", "expand": 1, "fill":"x"})
self.logo = Button(self)
self.logo["text"] = "Instant Logo"
self.logo["command"] = self.play_logo
self.logo.pack({"anchor": "n", "side": "top", "expand": 1, "fill":"x"})
def __init__(self, master=None):
self._cpl = None
Frame.__init__(self, master)
self.pack({"side": "top", "expand": 1, "fill":"both"})
self.createWidgets()
master.bind("p", lambda e: self.play_toggle())
master.bind("l", lambda e: self.play_logo())
master.bind("s", lambda e: self.stop())
master.bind("1", lambda e: settings.load_format(1))
master.bind("2", lambda e: settings.load_format(2))
master.bind("3", lambda e: settings.load_format(3))
master.bind("4", lambda e: settings.load_format(4))
master.bind("<space>", lambda e: self.toggle_douser())
self._douser = False
self.after(0, self.sync_status)
self.after(0, self.sync_time)
self.after(0, self.update_power)
self.after(1000, self.update_time)
class Library_Window(Frame):
def load_dcp(self):
self.dcp["state"] = DISABLED
self.spl["state"] = NORMAL
self.table.delete(0, self.table.size()-1)
self.table.bind("<Double-Button-1>", (lambda e: self.play(e, "CPL")))
self._list = library.get_cpl_list()
for cpl in self._list:
self.table.insert(END, cpl['title'])
def load_spl(self):
self.spl["state"] = DISABLED
self.dcp["state"] = NORMAL
self.table.delete(0, self.table.size()-1)
self.table.bind("<Double-Button-1>", (lambda e: self.play(e, "SPL")))
self._list = library.get_spl_list()
for cpl in self._list:
self.table.insert(END, cpl['title'])
def play(self, event, typ):
#prj.close_douser()
#prj.stop()
#status = prj.get_show_info()["status"]
#while not (status == "STOPPED" or status == "NONE" or status == "COMPLETED"):
# status = prj.get_show_info()["status"]
#time.sleep(0.5)
#prj.play(self._list[int(self.table.curselection()[0])]["uuid"], typ, 0)
print self._list[int(self.table.curselection()[0])]["uuid"]
#prj.open_douser()
#self.after(800, app.sync_status(False))
def createWidgets(self):
self.pane = PanedWindow(self)
self.pane["showhandle"] = True
self.pane.pack({"anchor": "n", "side": "top", "expand": 1, "fill":"both"})
self.button_frame = Frame(self.pane)
self.button_frame.pack({"anchor": "n", "side": "top"})
self.pane.add(self.button_frame)
self.dcp = Button(self.button_frame)
self.dcp["text"] = "DCP"
self.dcp["justify"] = "center"
self.dcp["padx"] = 10
self.dcp["command"] = self.load_dcp
self.dcp.pack({"anchor": "n", "side": "top", "expand": 0, "fill":"x"})
self.spl = Button(self.button_frame)
self.spl["text"] = "Playlist"
self.spl["justify"] = "center"
self.spl["padx"] = 10
self.spl["command"] = self.load_spl
self.button_frame["height"] = 50
self.spl.pack({"anchor": "n", "side": "top", "expand": 0, "fill":"x"})
self.table_frame = LabelFrame(self.pane)
self.table_frame["text"] = "DCP Auswahl (Start durch Doppelklick)"
self.table_frame.pack({"anchor": "n", "side": "top", "expand": 1, "fill":"both"})
self.pane.add(self.table_frame)
self.scrollbar = Scrollbar(self.table_frame)
self.scrollbar.pack({"side": "right", "fill": "y"})
self.table = Listbox(self.table_frame)
self.table["yscrollcommand"] = self.scrollbar.set
self.scrollbar["command"] = self.table.yview
self.table.pack({"anchor": "n", "side": "left", "expand": 1, "fill":"both"})
def __init__(self, master=None):
Frame.__init__(self, master)
self.pack({"side": "top", "expand": 1, "fill":"both"})
self.createWidgets()
#Load settings: IP Adresse Benutzer Passwort
config = ConfigParser.ConfigParser()
config.read(['r515.cfg'])
conn = Connection(config.get('Connection', 'IP'), config.get('Connection', 'USR'), config.get('Connection', 'PWD'))
prj = BasicFunctions(conn)
settings = BasicSettings(conn)
library = Library(conn)
root = Tk()
root.wm_title("R515 Control")
app = Application(master=root)
t = Toplevel(root)
t.wm_title("R515 Library")
l = Library_Window(t)
app.mainloop()
root.destroy()
| DirkWilhelmi/mikro_tms | gui.py | Python | gpl-2.0 | 13,886 |
#!/usr/bin/python2
# Copyright (c) 2011 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Process ppapi header files, e.g.
../ppapi/c/ppp_*h and
../ppapi/c/ppb_*h
And check whether they could cause pnacl calling convention problems
"""
import sys
import re
# NOTE: there is an extra white space at the end
# which distinguishes them from pointer types
# like struct PP_Var*
BAD_TYPES = ["struct PP_CompletionCallback ",
"struct PP_Var ",
"struct PP_Point ",
"struct PP_FloatPoint ",
"struct PP_Context3DTrustedState ",
]
RE_INTERFACE_STRUCT_START = re.compile(r"^struct +PP[PB]_[_0-9A-Za-z]+ +{$")
def ProcessHeader(filename):
"""extract interface structs from headers
"""
result = []
found_struct = 0
for line in open(filename):
if found_struct:
result.append(line)
if line.startswith("};"):
found_struct = 0
else:
if RE_INTERFACE_STRUCT_START.search(line.strip()):
result.append(line)
found_struct = 1
return result
def StripComments(lines):
"""strip comments and empty lines
"""
result = []
found_comment = 0
for line in lines:
if not line.strip():
continue
if found_comment:
if line.startswith(" */"):
found_comment = 0
else:
if line.startswith(" //"):
continue
elif line.startswith(" /*"):
found_comment = 1
else:
result.append(line)
return result
def MakeSingleLineProtos(lines):
"""move function prototypes into single lines for easier post processing
"""
result = []
# we use the presence of the newline in result[-1] as an indicator
# whether we should append to that line or start a new one
for line in lines:
if line.endswith(",\n") or line.endswith("(\n"):
line = line.rstrip()
if not result or result[-1].endswith("\n"):
result.append(line)
else:
result[-1] = result[-1] + line.lstrip()
return result
def ExtractInterfaces(lines):
result = []
for line in lines:
if RE_INTERFACE_STRUCT_START.search(line.strip()):
result.append([])
result[-1].append(line)
return result
######################################################################
#
######################################################################
def AnalyzeInterface(lines):
bad_functions = []
for line in lines[1:-1]:
# functions look like:
# void (*PostMessage)(PP_Instance instance, struct PP_Var message);
result, rest = line.split("(*", 1)
name, rest = rest.split(")(", 1)
args, rest = rest.split(");", 1)
args = args.split(",")
# print result, name, repr(args)
result = result.strip()
for bad in BAD_TYPES:
bad = bad.strip()
if result.startswith(bad):
print "@PROBLEM: [result %s] in:" % result, line.strip()
bad_functions.append(name)
for a in args:
a = a.strip()
for bad in BAD_TYPES:
if a.startswith(bad):
print "@PROBLEM: [%s] in:" % a, line.strip()
bad_functions.append(name)
return bad_functions
######################################################################
#
######################################################################
bad_interfaces = []
bad_functions = []
for filename in sys.argv:
lines = MakeSingleLineProtos(StripComments(ProcessHeader(filename)))
# a number of files contain multiple interfacess
for interface in ExtractInterfaces(lines):
print
print
print filename
print "".join(interface),
errors = AnalyzeInterface(interface)
if len(errors) > 0:
bad_functions += errors
# the first line looks like:
# struct PPB_URLLoader {
tokens = interface[0].split()
bad_interfaces.append(tokens[1])
print "\nBAD INTERFACES (%d):" % len(bad_interfaces)
for b in bad_interfaces:
print b
print
print "BAD FUNCTIONS (%d):" % len(bad_functions)
for b in bad_functions:
print b
| Lind-Project/native_client | pnacl/unsupported/interface_checker.py | Python | bsd-3-clause | 4,077 |
# -*- coding: utf-8 -*-
import logging
log = logging.getLogger(__name__)
class Light(object):
def __init__(self, light_id, bridge):
self.bridge = bridge
self.id = int(light_id)
self.json = None
self._update()
def __str__(self):
return '<%s %s "%s">' % (self.__class__.__name__, self.id,
self.name)
def _update(self):
j = self.bridge.get('/lights/%s' % self.id)
self.json = j
def __getattr__(self, name):
self._update()
if name in self.json:
return self.json[name]
else:
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, name))
def _put(self, path, data):
return self.bridge.put('/lights/%s/%s' % (self.id, path), data)
@property
def name(self):
self._update()
return self.json['name']
@name.setter
def name(self, name):
self._put('', {"name": name})
self._update()
def off(self):
self.set_state({"on": False})
def on(self, state={}):
state.update({"on": True})
self.set_state(state)
def get_state(self, state):
self._update()
return self.json['state'][state]
def set_state(self, state):
self._put('state', state)
self._update()
@property
def alert(self):
self.get_state('alert')
@alert.setter
def alert(self, state):
self.set_state({"alert": state})
@property
def bri(self):
return self.get_state('bri')
@bri.setter
def bri(self, brightness):
if 0 <= brightness <= 255:
self.set_state({"bri": brightness})
@property
def ct(self):
return self.get_state('ct')
@ct.setter
def ct(self, ct):
return self.set_state({"ct": ct})
@property
def effect(self):
return self.get_state('effect')
@effect.setter
def effect(self, effect):
self.set_state({"effect": effect})
@property
def hue(self):
return self.get_state('hue')
@hue.setter
def hue(self, hue):
if 0 <= hue <= 65535:
self.set_state({"hue": hue})
@property
def sat(self):
return self.get_state('sat')
@sat.setter
def sat(self, saturation):
if 0 <= saturation <= 255:
self.set_state({"sat": saturation})
@property
def xy(self):
return self.get_state('xy')
@xy.setter
def xy(self, xy):
if len(xy) == 2 and 0 <= xy[0] <= 1 and 0 <= xy[1] <= 1:
self.set_state({"xy": xy})
| sebastianw/hue | hue/light.py | Python | mit | 2,671 |
from sympy import (
Abs, Dummy, Eq, Gt, Function, Mod,
LambertW, Piecewise, Poly, Rational, S, Symbol, Matrix,
asin, acos, acsc, asec, atan, atanh, cos, csc, erf, erfinv, erfc, erfcinv,
exp, log, pi, sin, sinh, sec, sqrt, symbols,
tan, tanh, atan2, arg,
Lambda, imageset, cot, acot, I, EmptySet, Union, E, Interval, Intersection,
oo)
from sympy.core.function import nfloat
from sympy.core.relational import Unequality as Ne
from sympy.functions.elementary.complexes import im, re
from sympy.functions.elementary.hyperbolic import HyperbolicFunction
from sympy.functions.elementary.trigonometric import TrigonometricFunction
from sympy.polys.rootoftools import CRootOf
from sympy.sets import (FiniteSet, ConditionSet, Complement, ImageSet)
from sympy.utilities.pytest import XFAIL, raises, skip, slow
from sympy.utilities.randtest import verify_numerically as tn
from sympy.physics.units import cm
from sympy.solvers.solveset import (
solveset_real, domain_check, solveset_complex, linear_eq_to_matrix,
linsolve, _is_function_class_equation, invert_real, invert_complex,
solveset)
a = Symbol('a', real=True)
b = Symbol('b', real=True)
c = Symbol('c', real=True)
x = Symbol('x', real=True)
y = Symbol('y', real=True)
z = Symbol('z', real=True)
q = Symbol('q', real=True)
m = Symbol('m', real=True)
n = Symbol('n', real=True)
def test_invert_real():
x = Symbol('x', real=True)
y = Symbol('y')
n = Symbol('n')
def ireal(x, s=S.Reals):
return Intersection(s, x)
minus_n = Intersection(Interval(-oo, 0), FiniteSet(-n))
plus_n = Intersection(Interval(0, oo), FiniteSet(n))
assert solveset(abs(x) - n, x, S.Reals) == Union(minus_n, plus_n)
assert invert_real(exp(x), y, x) == (x, ireal(FiniteSet(log(y))))
y = Symbol('y', positive=True)
n = Symbol('n', real=True)
assert invert_real(x + 3, y, x) == (x, FiniteSet(y - 3))
assert invert_real(x*3, y, x) == (x, FiniteSet(y / 3))
assert invert_real(exp(x), y, x) == (x, FiniteSet(log(y)))
assert invert_real(exp(3*x), y, x) == (x, FiniteSet(log(y) / 3))
assert invert_real(exp(x + 3), y, x) == (x, FiniteSet(log(y) - 3))
assert invert_real(exp(x) + 3, y, x) == (x, ireal(FiniteSet(log(y - 3))))
assert invert_real(exp(x)*3, y, x) == (x, FiniteSet(log(y / 3)))
assert invert_real(log(x), y, x) == (x, FiniteSet(exp(y)))
assert invert_real(log(3*x), y, x) == (x, FiniteSet(exp(y) / 3))
assert invert_real(log(x + 3), y, x) == (x, FiniteSet(exp(y) - 3))
minus_y = Intersection(Interval(-oo, 0), FiniteSet(-y))
plus_y = Intersection(Interval(0, oo), FiniteSet(y))
assert invert_real(Abs(x), y, x) == (x, Union(minus_y, plus_y))
assert invert_real(2**x, y, x) == (x, FiniteSet(log(y)/log(2)))
assert invert_real(2**exp(x), y, x) == (x, ireal(FiniteSet(log(log(y)/log(2)))))
assert invert_real(x**2, y, x) == (x, FiniteSet(sqrt(y), -sqrt(y)))
assert invert_real(x**Rational(1, 2), y, x) == (x, FiniteSet(y**2))
raises(ValueError, lambda: invert_real(x, x, x))
raises(ValueError, lambda: invert_real(x**pi, y, x))
raises(ValueError, lambda: invert_real(S.One, y, x))
assert invert_real(x**31 + x, y, x) == (x**31 + x, FiniteSet(y))
y_1 = Intersection(Interval(-1, oo), FiniteSet(y - 1))
y_2 = Intersection(Interval(-oo, -1), FiniteSet(-y - 1))
assert invert_real(Abs(x**31 + x + 1), y, x) == (x**31 + x,
Union(y_1, y_2))
assert invert_real(sin(x), y, x) == \
(x, imageset(Lambda(n, n*pi + (-1)**n*asin(y)), S.Integers))
assert invert_real(sin(exp(x)), y, x) == \
(x, imageset(Lambda(n, log((-1)**n*asin(y) + n*pi)), S.Integers))
assert invert_real(csc(x), y, x) == \
(x, imageset(Lambda(n, n*pi + (-1)**n*acsc(y)), S.Integers))
assert invert_real(csc(exp(x)), y, x) == \
(x, imageset(Lambda(n, log((-1)**n*acsc(y) + n*pi)), S.Integers))
assert invert_real(cos(x), y, x) == \
(x, Union(imageset(Lambda(n, 2*n*pi + acos(y)), S.Integers), \
imageset(Lambda(n, 2*n*pi - acos(y)), S.Integers)))
assert invert_real(cos(exp(x)), y, x) == \
(x, Union(imageset(Lambda(n, log(2*n*pi + Mod(acos(y), 2*pi))), S.Integers), \
imageset(Lambda(n, log(2*n*pi + Mod(-acos(y), 2*pi))), S.Integers)))
assert invert_real(sec(x), y, x) == \
(x, Union(imageset(Lambda(n, 2*n*pi + asec(y)), S.Integers), \
imageset(Lambda(n, 2*n*pi - asec(y)), S.Integers)))
assert invert_real(sec(exp(x)), y, x) == \
(x, Union(imageset(Lambda(n, log(2*n*pi + Mod(asec(y), 2*pi))), S.Integers), \
imageset(Lambda(n, log(2*n*pi + Mod(-asec(y), 2*pi))), S.Integers)))
assert invert_real(tan(x), y, x) == \
(x, imageset(Lambda(n, n*pi + atan(y) % pi), S.Integers))
assert invert_real(tan(exp(x)), y, x) == \
(x, imageset(Lambda(n, log(n*pi + atan(y) % pi)), S.Integers))
assert invert_real(cot(x), y, x) == \
(x, imageset(Lambda(n, n*pi + acot(y) % pi), S.Integers))
assert invert_real(cot(exp(x)), y, x) == \
(x, imageset(Lambda(n, log(n*pi + acot(y) % pi)), S.Integers))
assert invert_real(tan(tan(x)), y, x) == \
(tan(x), imageset(Lambda(n, n*pi + atan(y) % pi), S.Integers))
x = Symbol('x', positive=True)
assert invert_real(x**pi, y, x) == (x, FiniteSet(y**(1/pi)))
# Test for ``set_h`` containing information about the domain
n = Dummy('n')
x = Symbol('x')
h1 = Intersection(Interval(-oo, -3), FiniteSet(-a + b - 3),
imageset(Lambda(n, n - a - 3), Interval(0, oo)))
h2 = Intersection(Interval(-3, oo), FiniteSet(a - b - 3),
imageset(Lambda(n, -n + a - 3), Interval(0, oo)))
assert invert_real(Abs(Abs(x + 3) - a) - b, 0, x) == (x, Union(h1, h2))
def test_invert_complex():
assert invert_complex(x + 3, y, x) == (x, FiniteSet(y - 3))
assert invert_complex(x*3, y, x) == (x, FiniteSet(y / 3))
assert invert_complex(exp(x), y, x) == \
(x, imageset(Lambda(n, I*(2*pi*n + arg(y)) + log(Abs(y))), S.Integers))
assert invert_complex(log(x), y, x) == (x, FiniteSet(exp(y)))
raises(ValueError, lambda: invert_real(1, y, x))
raises(ValueError, lambda: invert_complex(x, x, x))
raises(ValueError, lambda: invert_complex(x, x, 1))
def test_domain_check():
assert domain_check(1/(1 + (1/(x+1))**2), x, -1) is False
assert domain_check(x**2, x, 0) is True
assert domain_check(x, x, oo) is False
assert domain_check(0, x, oo) is False
def test_is_function_class_equation():
from sympy.abc import x, a
assert _is_function_class_equation(TrigonometricFunction,
tan(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x) - 1, x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x) + sin(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x) + sin(x) - a, x) is True
assert _is_function_class_equation(TrigonometricFunction,
sin(x)*tan(x) + sin(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
sin(x)*tan(x + a) + sin(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
sin(x)*tan(x*a) + sin(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
a*tan(x) - 1, x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x)**2 + sin(x) - 1, x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x) + x, x) is False
assert _is_function_class_equation(TrigonometricFunction,
tan(x**2), x) is False
assert _is_function_class_equation(TrigonometricFunction,
tan(x**2) + sin(x), x) is False
assert _is_function_class_equation(TrigonometricFunction,
tan(x)**sin(x), x) is False
assert _is_function_class_equation(TrigonometricFunction,
tan(sin(x)) + sin(x), x) is False
assert _is_function_class_equation(HyperbolicFunction,
tanh(x), x) is True
assert _is_function_class_equation(HyperbolicFunction,
tanh(x) - 1, x) is True
assert _is_function_class_equation(HyperbolicFunction,
tanh(x) + sinh(x), x) is True
assert _is_function_class_equation(HyperbolicFunction,
tanh(x) + sinh(x) - a, x) is True
assert _is_function_class_equation(HyperbolicFunction,
sinh(x)*tanh(x) + sinh(x), x) is True
assert _is_function_class_equation(HyperbolicFunction,
sinh(x)*tanh(x + a) + sinh(x), x) is True
assert _is_function_class_equation(HyperbolicFunction,
sinh(x)*tanh(x*a) + sinh(x), x) is True
assert _is_function_class_equation(HyperbolicFunction,
a*tanh(x) - 1, x) is True
assert _is_function_class_equation(HyperbolicFunction,
tanh(x)**2 + sinh(x) - 1, x) is True
assert _is_function_class_equation(HyperbolicFunction,
tanh(x) + x, x) is False
assert _is_function_class_equation(HyperbolicFunction,
tanh(x**2), x) is False
assert _is_function_class_equation(HyperbolicFunction,
tanh(x**2) + sinh(x), x) is False
assert _is_function_class_equation(HyperbolicFunction,
tanh(x)**sinh(x), x) is False
assert _is_function_class_equation(HyperbolicFunction,
tanh(sinh(x)) + sinh(x), x) is False
def test_garbage_input():
raises(ValueError, lambda: solveset_real(x, 1))
raises(ValueError, lambda: solveset_real([x], x))
raises(ValueError, lambda: solveset_real(x, pi))
raises(ValueError, lambda: solveset_real(x, x**2))
raises(ValueError, lambda: solveset_complex([x], x))
raises(ValueError, lambda: solveset_complex(x, pi))
def test_solve_mul():
assert solveset_real((a*x + b)*(exp(x) - 3), x) == \
FiniteSet(-b/a, log(3))
assert solveset_real((2*x + 8)*(8 + exp(x)), x) == FiniteSet(S(-4))
assert solveset_real(x/log(x), x) == EmptySet()
def test_solve_invert():
assert solveset_real(exp(x) - 3, x) == FiniteSet(log(3))
assert solveset_real(log(x) - 3, x) == FiniteSet(exp(3))
assert solveset_real(3**(x + 2), x) == FiniteSet()
assert solveset_real(3**(2 - x), x) == FiniteSet()
assert solveset_real(y - b*exp(a/x), x) == Intersection(S.Reals, FiniteSet(a/log(y/b)))
# issue 4504
assert solveset_real(2**x - 10, x) == FiniteSet(log(10)/log(2))
def test_errorinverses():
assert solveset_real(erf(x) - S.One/2, x) == \
FiniteSet(erfinv(S.One/2))
assert solveset_real(erfinv(x) - 2, x) == \
FiniteSet(erf(2))
assert solveset_real(erfc(x) - S.One, x) == \
FiniteSet(erfcinv(S.One))
assert solveset_real(erfcinv(x) - 2, x) == FiniteSet(erfc(2))
def test_solve_polynomial():
assert solveset_real(3*x - 2, x) == FiniteSet(Rational(2, 3))
assert solveset_real(x**2 - 1, x) == FiniteSet(-S(1), S(1))
assert solveset_real(x - y**3, x) == FiniteSet(y ** 3)
a11, a12, a21, a22, b1, b2 = symbols('a11, a12, a21, a22, b1, b2')
assert solveset_real(x**3 - 15*x - 4, x) == FiniteSet(
-2 + 3 ** Rational(1, 2),
S(4),
-2 - 3 ** Rational(1, 2))
assert solveset_real(sqrt(x) - 1, x) == FiniteSet(1)
assert solveset_real(sqrt(x) - 2, x) == FiniteSet(4)
assert solveset_real(x**Rational(1, 4) - 2, x) == FiniteSet(16)
assert solveset_real(x**Rational(1, 3) - 3, x) == FiniteSet(27)
assert len(solveset_real(x**5 + x**3 + 1, x)) == 1
assert len(solveset_real(-2*x**3 + 4*x**2 - 2*x + 6, x)) > 0
def test_return_root_of():
f = x**5 - 15*x**3 - 5*x**2 + 10*x + 20
s = list(solveset_complex(f, x))
for root in s:
assert root.func == CRootOf
# if one uses solve to get the roots of a polynomial that has a CRootOf
# solution, make sure that the use of nfloat during the solve process
# doesn't fail. Note: if you want numerical solutions to a polynomial
# it is *much* faster to use nroots to get them than to solve the
# equation only to get CRootOf solutions which are then numerically
# evaluated. So for eq = x**5 + 3*x + 7 do Poly(eq).nroots() rather
# than [i.n() for i in solve(eq)] to get the numerical roots of eq.
assert nfloat(list(solveset_complex(x**5 + 3*x**3 + 7, x))[0],
exponent=False) == CRootOf(x**5 + 3*x**3 + 7, 0).n()
sol = list(solveset_complex(x**6 - 2*x + 2, x))
assert all(isinstance(i, CRootOf) for i in sol) and len(sol) == 6
f = x**5 - 15*x**3 - 5*x**2 + 10*x + 20
s = list(solveset_complex(f, x))
for root in s:
assert root.func == CRootOf
s = x**5 + 4*x**3 + 3*x**2 + S(7)/4
assert solveset_complex(s, x) == \
FiniteSet(*Poly(s*4, domain='ZZ').all_roots())
# Refer issue #7876
eq = x*(x - 1)**2*(x + 1)*(x**6 - x + 1)
assert solveset_complex(eq, x) == \
FiniteSet(-1, 0, 1, CRootOf(x**6 - x + 1, 0),
CRootOf(x**6 - x + 1, 1),
CRootOf(x**6 - x + 1, 2),
CRootOf(x**6 - x + 1, 3),
CRootOf(x**6 - x + 1, 4),
CRootOf(x**6 - x + 1, 5))
def test__has_rational_power():
from sympy.solvers.solveset import _has_rational_power
assert _has_rational_power(sqrt(2), x)[0] is False
assert _has_rational_power(x*sqrt(2), x)[0] is False
assert _has_rational_power(x**2*sqrt(x), x) == (True, 2)
assert _has_rational_power(sqrt(2)*x**(S(1)/3), x) == (True, 3)
assert _has_rational_power(sqrt(x)*x**(S(1)/3), x) == (True, 6)
def test_solveset_sqrt_1():
assert solveset_real(sqrt(5*x + 6) - 2 - x, x) == \
FiniteSet(-S(1), S(2))
assert solveset_real(sqrt(x - 1) - x + 7, x) == FiniteSet(10)
assert solveset_real(sqrt(x - 2) - 5, x) == FiniteSet(27)
assert solveset_real(sqrt(x) - 2 - 5, x) == FiniteSet(49)
assert solveset_real(sqrt(x**3), x) == FiniteSet(0)
assert solveset_real(sqrt(x - 1), x) == FiniteSet(1)
def test_solveset_sqrt_2():
# http://tutorial.math.lamar.edu/Classes/Alg/SolveRadicalEqns.aspx#Solve_Rad_Ex2_a
assert solveset_real(sqrt(2*x - 1) - sqrt(x - 4) - 2, x) == \
FiniteSet(S(5), S(13))
assert solveset_real(sqrt(x + 7) + 2 - sqrt(3 - x), x) == \
FiniteSet(-6)
# http://www.purplemath.com/modules/solverad.htm
assert solveset_real(sqrt(17*x - sqrt(x**2 - 5)) - 7, x) == \
FiniteSet(3)
eq = x + 1 - (x**4 + 4*x**3 - x)**Rational(1, 4)
assert solveset_real(eq, x) == FiniteSet(-S(1)/2, -S(1)/3)
eq = sqrt(2*x + 9) - sqrt(x + 1) - sqrt(x + 4)
assert solveset_real(eq, x) == FiniteSet(0)
eq = sqrt(x + 4) + sqrt(2*x - 1) - 3*sqrt(x - 1)
assert solveset_real(eq, x) == FiniteSet(5)
eq = sqrt(x)*sqrt(x - 7) - 12
assert solveset_real(eq, x) == FiniteSet(16)
eq = sqrt(x - 3) + sqrt(x) - 3
assert solveset_real(eq, x) == FiniteSet(4)
eq = sqrt(2*x**2 - 7) - (3 - x)
assert solveset_real(eq, x) == FiniteSet(-S(8), S(2))
# others
eq = sqrt(9*x**2 + 4) - (3*x + 2)
assert solveset_real(eq, x) == FiniteSet(0)
assert solveset_real(sqrt(x - 3) - sqrt(x) - 3, x) == FiniteSet()
eq = (2*x - 5)**Rational(1, 3) - 3
assert solveset_real(eq, x) == FiniteSet(16)
assert solveset_real(sqrt(x) + sqrt(sqrt(x)) - 4, x) == \
FiniteSet((-S.Half + sqrt(17)/2)**4)
eq = sqrt(x) - sqrt(x - 1) + sqrt(sqrt(x))
assert solveset_real(eq, x) == FiniteSet()
eq = (sqrt(x) + sqrt(x + 1) + sqrt(1 - x) - 6*sqrt(5)/5)
ans = solveset_real(eq, x)
ra = S('''-1484/375 - 4*(-1/2 + sqrt(3)*I/2)*(-12459439/52734375 +
114*sqrt(12657)/78125)**(1/3) - 172564/(140625*(-1/2 +
sqrt(3)*I/2)*(-12459439/52734375 + 114*sqrt(12657)/78125)**(1/3))''')
rb = S(4)/5
assert all(abs(eq.subs(x, i).n()) < 1e-10 for i in (ra, rb)) and \
len(ans) == 2 and \
set([i.n(chop=True) for i in ans]) == \
set([i.n(chop=True) for i in (ra, rb)])
assert solveset_real(sqrt(x) + x**Rational(1, 3) +
x**Rational(1, 4), x) == FiniteSet(0)
assert solveset_real(x/sqrt(x**2 + 1), x) == FiniteSet(0)
eq = (x - y**3)/((y**2)*sqrt(1 - y**2))
assert solveset_real(eq, x) == FiniteSet(y**3)
# issue 4497
assert solveset_real(1/(5 + x)**(S(1)/5) - 9, x) == \
FiniteSet(-295244/S(59049))
@XFAIL
def test_solve_sqrt_fail():
# this only works if we check real_root(eq.subs(x, S(1)/3))
# but checksol doesn't work like that
eq = (x**3 - 3*x**2)**Rational(1, 3) + 1 - x
assert solveset_real(eq, x) == FiniteSet(S(1)/3)
@slow
def test_solve_sqrt_3():
R = Symbol('R')
eq = sqrt(2)*R*sqrt(1/(R + 1)) + (R + 1)*(sqrt(2)*sqrt(1/(R + 1)) - 1)
sol = solveset_complex(eq, R)
assert sol == FiniteSet(*[S(5)/3 + 4*sqrt(10)*cos(atan(3*sqrt(111)/251)/3)/3,
-sqrt(10)*cos(atan(3*sqrt(111)/251)/3)/3 + 40*re(1/((-S(1)/2 -
sqrt(3)*I/2)*(S(251)/27 + sqrt(111)*I/9)**(S(1)/3)))/9 +
sqrt(30)*sin(atan(3*sqrt(111)/251)/3)/3 + S(5)/3 +
I*(-sqrt(30)*cos(atan(3*sqrt(111)/251)/3)/3 -
sqrt(10)*sin(atan(3*sqrt(111)/251)/3)/3 + 40*im(1/((-S(1)/2 -
sqrt(3)*I/2)*(S(251)/27 + sqrt(111)*I/9)**(S(1)/3)))/9)])
# the number of real roots will depend on the value of m: for m=1 there are 4
# and for m=-1 there are none.
eq = -sqrt((m - q)**2 + (-m/(2*q) + S(1)/2)**2) + sqrt((-m**2/2 - sqrt(
4*m**4 - 4*m**2 + 8*m + 1)/4 - S(1)/4)**2 + (m**2/2 - m - sqrt(
4*m**4 - 4*m**2 + 8*m + 1)/4 - S(1)/4)**2)
unsolved_object = ConditionSet(q, Eq((-2*sqrt(4*q**2*(m - q)**2 +
(-m + q)**2) + sqrt((-2*m**2 - sqrt(4*m**4 - 4*m**2 + 8*m + 1) -
1)**2 + (2*m**2 - 4*m - sqrt(4*m**4 - 4*m**2 + 8*m + 1) - 1)**2
)*Abs(q))/Abs(q), 0), S.Reals)
assert solveset_real(eq, q) == unsolved_object
def test_solve_polynomial_symbolic_param():
assert solveset_complex((x**2 - 1)**2 - a, x) == \
FiniteSet(sqrt(1 + sqrt(a)), -sqrt(1 + sqrt(a)),
sqrt(1 - sqrt(a)), -sqrt(1 - sqrt(a)))
# issue 4507
assert solveset_complex(y - b/(1 + a*x), x) == \
FiniteSet((b/y - 1)/a) - FiniteSet(-1/a)
# issue 4508
assert solveset_complex(y - b*x/(a + x), x) == \
FiniteSet(-a*y/(y - b)) - FiniteSet(-a)
def test_solve_rational():
assert solveset_real(1/x + 1, x) == FiniteSet(-S.One)
assert solveset_real(1/exp(x) - 1, x) == FiniteSet(0)
assert solveset_real(x*(1 - 5/x), x) == FiniteSet(5)
assert solveset_real(2*x/(x + 2) - 1, x) == FiniteSet(2)
assert solveset_real((x**2/(7 - x)).diff(x), x) == \
FiniteSet(S(0), S(14))
def test_solveset_real_gen_is_pow():
assert solveset_real(sqrt(1) + 1, x) == EmptySet()
def test_no_sol():
assert solveset_real(4, x) == EmptySet()
assert solveset_real(exp(x), x) == EmptySet()
assert solveset_real(x**2 + 1, x) == EmptySet()
assert solveset_real(-3*a/sqrt(x), x) == EmptySet()
assert solveset_real(1/x, x) == EmptySet()
assert solveset_real(-(1 + x)/(2 + x)**2 + 1/(2 + x), x) == \
EmptySet()
def test_sol_zero_real():
assert solveset_real(0, x) == S.Reals
assert solveset(0, x, Interval(1, 2)) == Interval(1, 2)
assert solveset_real(-x**2 - 2*x + (x + 1)**2 - 1, x) == S.Reals
def test_no_sol_rational_extragenous():
assert solveset_real((x/(x + 1) + 3)**(-2), x) == EmptySet()
assert solveset_real((x - 1)/(1 + 1/(x - 1)), x) == EmptySet()
def test_solve_polynomial_cv_1a():
"""
Test for solving on equations that can be converted to
a polynomial equation using the change of variable y -> x**Rational(p, q)
"""
assert solveset_real(sqrt(x) - 1, x) == FiniteSet(1)
assert solveset_real(sqrt(x) - 2, x) == FiniteSet(4)
assert solveset_real(x**Rational(1, 4) - 2, x) == FiniteSet(16)
assert solveset_real(x**Rational(1, 3) - 3, x) == FiniteSet(27)
assert solveset_real(x*(x**(S(1) / 3) - 3), x) == \
FiniteSet(S(0), S(27))
def test_solveset_real_rational():
"""Test solveset_real for rational functions"""
assert solveset_real((x - y**3) / ((y**2)*sqrt(1 - y**2)), x) \
== FiniteSet(y**3)
# issue 4486
assert solveset_real(2*x/(x + 2) - 1, x) == FiniteSet(2)
def test_solveset_real_log():
assert solveset_real(log((x-1)*(x+1)), x) == \
FiniteSet(sqrt(2), -sqrt(2))
def test_poly_gens():
assert solveset_real(4**(2*(x**2) + 2*x) - 8, x) == \
FiniteSet(-Rational(3, 2), S.Half)
@XFAIL
def test_uselogcombine_1():
assert solveset_real(log(x - 3) + log(x + 3), x) == \
FiniteSet(sqrt(10))
assert solveset_real(log(x + 1) - log(2*x - 1), x) == FiniteSet(2)
assert solveset_real(log(x + 3) + log(1 + 3/x) - 3) == FiniteSet(
-3 + sqrt(-12 + exp(3))*exp(S(3)/2)/2 + exp(3)/2,
-sqrt(-12 + exp(3))*exp(S(3)/2)/2 - 3 + exp(3)/2)
@XFAIL
def test_uselogcombine_2():
eq = z - log(x) + log(y/(x*(-1 + y**2/x**2)))
assert solveset_real(eq, x) == \
FiniteSet(-sqrt(y*(y - exp(z))), sqrt(y*(y - exp(z))))
def test_solve_abs():
assert solveset_real(Abs(x) - 2, x) == FiniteSet(-2, 2)
assert solveset_real(Abs(x + 3) - 2*Abs(x - 3), x) == \
FiniteSet(1, 9)
assert solveset_real(2*Abs(x) - Abs(x - 1), x) == \
FiniteSet(-1, Rational(1, 3))
assert solveset_real(Abs(x - 7) - 8, x) == FiniteSet(-S(1), S(15))
# issue 9565
assert solveset_real(Abs((x - 1)/(x - 5)) <= S(1)/3, x) == Interval(-1, 2)
# issue #10069
eq = abs(1/(x - 1)) - 1 > 0
u = Union(Interval.open(0, 1), Interval.open(1, 2))
assert solveset_real(eq, x) == u
assert solveset(eq, x, domain=S.Reals) == u
raises(ValueError, lambda: solveset(abs(x) - 1, x))
@XFAIL
def test_rewrite_trigh():
# if this import passes then the test below should also pass
from sympy import sech
assert solveset_real(sinh(x) + sech(x), x) == FiniteSet(
2*atanh(-S.Half + sqrt(5)/2 - sqrt(-2*sqrt(5) + 2)/2),
2*atanh(-S.Half + sqrt(5)/2 + sqrt(-2*sqrt(5) + 2)/2),
2*atanh(-sqrt(5)/2 - S.Half + sqrt(2 + 2*sqrt(5))/2),
2*atanh(-sqrt(2 + 2*sqrt(5))/2 - sqrt(5)/2 - S.Half))
def test_real_imag_splitting():
a, b = symbols('a b', real=True, finite=True)
assert solveset_real(sqrt(a**2 - b**2) - 3, a) == \
FiniteSet(-sqrt(b**2 + 9), sqrt(b**2 + 9))
assert solveset_real(sqrt(a**2 + b**2) - 3, a) != \
S.EmptySet
def test_units():
assert solveset_real(1/x - 1/(2*cm), x) == FiniteSet(2*cm)
def test_solve_only_exp_1():
y = Symbol('y', positive=True, finite=True)
assert solveset_real(exp(x) - y, x) == FiniteSet(log(y))
assert solveset_real(exp(x) + exp(-x) - 4, x) == \
FiniteSet(log(-sqrt(3) + 2), log(sqrt(3) + 2))
assert solveset_real(exp(x) + exp(-x) - y, x) != S.EmptySet
@XFAIL
def test_solve_only_exp_2():
assert solveset_real(exp(x/y)*exp(-z/y) - 2, y) == \
FiniteSet((x - z)/log(2))
assert solveset_real(sqrt(exp(x)) + sqrt(exp(-x)) - 4, x) == \
FiniteSet(2*log(-sqrt(3) + 2), 2*log(sqrt(3) + 2))
def test_atan2():
# The .inverse() method on atan2 works only if x.is_real is True and the
# second argument is a real constant
assert solveset_real(atan2(x, 2) - pi/3, x) == FiniteSet(2*sqrt(3))
def test_piecewise():
eq = Piecewise((x - 2, Gt(x, 2)), (2 - x, True)) - 3
assert set(solveset_real(eq, x)) == set(FiniteSet(-1, 5))
absxm3 = Piecewise(
(x - 3, S(0) <= x - 3),
(3 - x, S(0) > x - 3))
y = Symbol('y', positive=True)
assert solveset_real(absxm3 - y, x) == FiniteSet(-y + 3, y + 3)
f = Piecewise(((x - 2)**2, x >= 0), (0, True))
assert solveset(f, x, domain=S.Reals) == Union(FiniteSet(2), Interval(-oo, 0, True, True))
assert solveset(Piecewise((x + 1, x > 0), (I, True)) - I, x) == \
Interval(-oo, 0)
def test_solveset_complex_polynomial():
from sympy.abc import x, a, b, c
assert solveset_complex(a*x**2 + b*x + c, x) == \
FiniteSet(-b/(2*a) - sqrt(-4*a*c + b**2)/(2*a),
-b/(2*a) + sqrt(-4*a*c + b**2)/(2*a))
assert solveset_complex(x - y**3, y) == FiniteSet(
(-x**Rational(1, 3))/2 + I*sqrt(3)*x**Rational(1, 3)/2,
x**Rational(1, 3),
(-x**Rational(1, 3))/2 - I*sqrt(3)*x**Rational(1, 3)/2)
assert solveset_complex(x + 1/x - 1, x) == \
FiniteSet(Rational(1, 2) + I*sqrt(3)/2, Rational(1, 2) - I*sqrt(3)/2)
def test_sol_zero_complex():
assert solveset_complex(0, x) == S.Complexes
def test_solveset_complex_rational():
assert solveset_complex((x - 1)*(x - I)/(x - 3), x) == \
FiniteSet(1, I)
assert solveset_complex((x - y**3)/((y**2)*sqrt(1 - y**2)), x) == \
FiniteSet(y**3)
assert solveset_complex(-x**2 - I, x) == \
FiniteSet(-sqrt(2)/2 + sqrt(2)*I/2, sqrt(2)/2 - sqrt(2)*I/2)
def test_solve_quintics():
skip("This test is too slow")
f = x**5 - 110*x**3 - 55*x**2 + 2310*x + 979
s = solveset_complex(f, x)
for root in s:
res = f.subs(x, root.n()).n()
assert tn(res, 0)
f = x**5 + 15*x + 12
s = solveset_complex(f, x)
for root in s:
res = f.subs(x, root.n()).n()
assert tn(res, 0)
def test_solveset_complex_exp():
from sympy.abc import x, n
assert solveset_complex(exp(x) - 1, x) == \
imageset(Lambda(n, I*2*n*pi), S.Integers)
assert solveset_complex(exp(x) - I, x) == \
imageset(Lambda(n, I*(2*n*pi + pi/2)), S.Integers)
assert solveset_complex(1/exp(x), x) == S.EmptySet
assert solveset_complex(sinh(x).rewrite(exp), x) == \
imageset(Lambda(n, n*pi*I), S.Integers)
def test_solve_complex_log():
assert solveset_complex(log(x), x) == FiniteSet(1)
assert solveset_complex(1 - log(a + 4*x**2), x) == \
FiniteSet(-sqrt(-a/4 + E/4), sqrt(-a/4 + E/4))
def test_solve_complex_sqrt():
assert solveset_complex(sqrt(5*x + 6) - 2 - x, x) == \
FiniteSet(-S(1), S(2))
assert solveset_complex(sqrt(5*x + 6) - (2 + 2*I) - x, x) == \
FiniteSet(-S(2), 3 - 4*I)
assert solveset_complex(4*x*(1 - a * sqrt(x)), x) == \
FiniteSet(S(0), 1 / a ** 2)
def test_solveset_complex_tan():
s = solveset_complex(tan(x).rewrite(exp), x)
assert s == imageset(Lambda(n, pi*n), S.Integers) - \
imageset(Lambda(n, pi*n + pi/2), S.Integers)
def test_solve_trig():
from sympy.abc import n
assert solveset_real(sin(x), x) == \
Union(imageset(Lambda(n, 2*pi*n), S.Integers),
imageset(Lambda(n, 2*pi*n + pi), S.Integers))
assert solveset_real(sin(x) - 1, x) == \
imageset(Lambda(n, 2*pi*n + pi/2), S.Integers)
assert solveset_real(cos(x), x) == \
Union(imageset(Lambda(n, 2*pi*n - pi/2), S.Integers),
imageset(Lambda(n, 2*pi*n + pi/2), S.Integers))
assert solveset_real(sin(x) + cos(x), x) == \
Union(imageset(Lambda(n, 2*n*pi - pi/4), S.Integers),
imageset(Lambda(n, 2*n*pi + 3*pi/4), S.Integers))
assert solveset_real(sin(x)**2 + cos(x)**2, x) == S.EmptySet
assert solveset_complex(cos(x) - S.Half, x) == \
Union(imageset(Lambda(n, 2*n*pi + pi/3), S.Integers),
imageset(Lambda(n, 2*n*pi - pi/3), S.Integers))
y, a = symbols('y,a')
assert solveset(sin(y + a) - sin(y), a, domain=S.Reals) == \
imageset(Lambda(n, 2*n*pi), S.Integers)
@XFAIL
def test_solve_trig_abs():
assert solveset(Eq(sin(Abs(x)), 1), x, domain=S.Reals) == \
Union(ImageSet(Lambda(n, n*pi + (-1)**n*pi/2), S.Naturals0),
ImageSet(Lambda(n, -n*pi - (-1)**n*pi/2), S.Naturals0))
def test_solve_invalid_sol():
assert 0 not in solveset_real(sin(x)/x, x)
assert 0 not in solveset_complex((exp(x) - 1)/x, x)
@XFAIL
def test_solve_trig_simplified():
from sympy.abc import n
assert solveset_real(sin(x), x) == \
imageset(Lambda(n, n*pi), S.Integers)
assert solveset_real(cos(x), x) == \
imageset(Lambda(n, n*pi + pi/2), S.Integers)
assert solveset_real(cos(x) + sin(x), x) == \
imageset(Lambda(n, n*pi - pi/4), S.Integers)
@XFAIL
def test_solve_lambert():
assert solveset_real(x*exp(x) - 1, x) == FiniteSet(LambertW(1))
assert solveset_real(x + 2**x, x) == \
FiniteSet(-LambertW(log(2))/log(2))
# issue 4739
assert solveset_real(exp(log(5)*x) - 2**x, x) == FiniteSet(0)
ans = solveset_real(3*x + 5 + 2**(-5*x + 3), x)
assert ans == FiniteSet(-Rational(5, 3) +
LambertW(-10240*2**(S(1)/3)*log(2)/3)/(5*log(2)))
eq = 2*(3*x + 4)**5 - 6*7**(3*x + 9)
result = solveset_real(eq, x)
ans = FiniteSet((log(2401) +
5*LambertW(-log(7**(7*3**Rational(1, 5)/5))))/(3*log(7))/-1)
assert result == ans
assert solveset_real(eq.expand(), x) == result
assert solveset_real(5*x - 1 + 3*exp(2 - 7*x), x) == \
FiniteSet(Rational(1, 5) + LambertW(-21*exp(Rational(3, 5))/5)/7)
assert solveset_real(2*x + 5 + log(3*x - 2), x) == \
FiniteSet(Rational(2, 3) + LambertW(2*exp(-Rational(19, 3))/3)/2)
assert solveset_real(3*x + log(4*x), x) == \
FiniteSet(LambertW(Rational(3, 4))/3)
assert solveset_complex(x**z*y**z - 2, z) == \
FiniteSet(log(2)/(log(x) + log(y)))
assert solveset_real(x**x - 2) == FiniteSet(exp(LambertW(log(2))))
a = Symbol('a')
assert solveset_real(-a*x + 2*x*log(x), x) == FiniteSet(exp(a/2))
a = Symbol('a', real=True)
assert solveset_real(a/x + exp(x/2), x) == \
FiniteSet(2*LambertW(-a/2))
assert solveset_real((a/x + exp(x/2)).diff(x), x) == \
FiniteSet(4*LambertW(sqrt(2)*sqrt(a)/4))
assert solveset_real(1/(1/x - y + exp(y)), x) == EmptySet()
# coverage test
p = Symbol('p', positive=True)
w = Symbol('w')
assert solveset_real((1/p + 1)**(p + 1), p) == EmptySet()
assert solveset_real(tanh(x + 3)*tanh(x - 3) - 1, x) == EmptySet()
assert solveset_real(2*x**w - 4*y**w, w) == \
solveset_real((x/y)**w - 2, w)
assert solveset_real((x**2 - 2*x + 1).subs(x, log(x) + 3*x), x) == \
FiniteSet(LambertW(3*S.Exp1)/3)
assert solveset_real((x**2 - 2*x + 1).subs(x, (log(x) + 3*x)**2 - 1), x) == \
FiniteSet(LambertW(3*exp(-sqrt(2)))/3, LambertW(3*exp(sqrt(2)))/3)
assert solveset_real((x**2 - 2*x - 2).subs(x, log(x) + 3*x), x) == \
FiniteSet(LambertW(3*exp(1 + sqrt(3)))/3, LambertW(3*exp(-sqrt(3) + 1))/3)
assert solveset_real(x*log(x) + 3*x + 1, x) == \
FiniteSet(exp(-3 + LambertW(-exp(3))))
eq = (x*exp(x) - 3).subs(x, x*exp(x))
assert solveset_real(eq, x) == \
FiniteSet(LambertW(3*exp(-LambertW(3))))
assert solveset_real(3*log(a**(3*x + 5)) + a**(3*x + 5), x) == \
FiniteSet(-((log(a**5) + LambertW(S(1)/3))/(3*log(a))))
p = symbols('p', positive=True)
assert solveset_real(3*log(p**(3*x + 5)) + p**(3*x + 5), x) == \
FiniteSet(
log((-3**(S(1)/3) - 3**(S(5)/6)*I)*LambertW(S(1)/3)**(S(1)/3)/(2*p**(S(5)/3)))/log(p),
log((-3**(S(1)/3) + 3**(S(5)/6)*I)*LambertW(S(1)/3)**(S(1)/3)/(2*p**(S(5)/3)))/log(p),
log((3*LambertW(S(1)/3)/p**5)**(1/(3*log(p)))),) # checked numerically
# check collection
b = Symbol('b')
eq = 3*log(a**(3*x + 5)) + b*log(a**(3*x + 5)) + a**(3*x + 5)
assert solveset_real(eq, x) == FiniteSet(
-((log(a**5) + LambertW(1/(b + 3)))/(3*log(a))))
# issue 4271
assert solveset_real((a/x + exp(x/2)).diff(x, 2), x) == FiniteSet(
6*LambertW((-1)**(S(1)/3)*a**(S(1)/3)/3))
assert solveset_real(x**3 - 3**x, x) == \
FiniteSet(-3/log(3)*LambertW(-log(3)/3))
assert solveset_real(x**2 - 2**x, x) == FiniteSet(2)
assert solveset_real(-x**2 + 2**x, x) == FiniteSet(2)
assert solveset_real(3**cos(x) - cos(x)**3) == FiniteSet(
acos(-3*LambertW(-log(3)/3)/log(3)))
assert solveset_real(4**(x/2) - 2**(x/3), x) == FiniteSet(0)
assert solveset_real(5**(x/2) - 2**(x/3), x) == FiniteSet(0)
b = sqrt(6)*sqrt(log(2))/sqrt(log(5))
assert solveset_real(5**(x/2) - 2**(3/x), x) == FiniteSet(-b, b)
def test_solveset():
x = Symbol('x')
raises(ValueError, lambda: solveset(x + y))
raises(ValueError, lambda: solveset(x, 1))
assert solveset(0, domain=S.Reals) == S.Reals
assert solveset(1) == S.EmptySet
assert solveset(True, domain=S.Reals) == S.Reals # issue 10197
assert solveset(False, domain=S.Reals) == S.EmptySet
assert solveset(exp(x) - 1, domain=S.Reals) == FiniteSet(0)
assert solveset(exp(x) - 1, x, S.Reals) == FiniteSet(0)
assert solveset(Eq(exp(x), 1), x, S.Reals) == FiniteSet(0)
assert solveset(x - 1 >= 0, x, S.Reals) == Interval(1, oo)
assert solveset(exp(x) - 1 >= 0, x, S.Reals) == Interval(0, oo)
assert solveset(exp(x) - 1, x) == imageset(Lambda(n, 2*I*pi*n), S.Integers)
assert solveset(Eq(exp(x), 1), x) == imageset(Lambda(n, 2*I*pi*n),
S.Integers)
def test_conditionset():
assert solveset(Eq(sin(x)**2 + cos(x)**2, 1), x, domain=S.Reals) == \
ConditionSet(x, True, S.Reals)
assert solveset(Eq(x**2 + x*sin(x), 1), x, domain=S.Reals) == \
ConditionSet(x, Eq(x*(x + sin(x)) - 1, 0), S.Reals)
assert solveset(Eq(sin(Abs(x)), x), x, domain=S.Reals) == \
ConditionSet(x, Eq(-x + sin(Abs(x)), 0), Interval(-oo, oo))
assert solveset(Eq(-I*(exp(I*x) - exp(-I*x))/2, 1), x) == \
imageset(Lambda(n, 2*n*pi + pi/2), S.Integers)
assert solveset(x + sin(x) > 1, x, domain=S.Reals) == \
ConditionSet(x, x + sin(x) > 1, S.Reals)
@XFAIL
def test_conditionset_equality():
''' Checking equality of different representations of ConditionSet'''
assert solveset(Eq(tan(x), y), x) == ConditionSet(x, Eq(tan(x), y), S.Complexes)
def test_solveset_domain():
x = Symbol('x')
assert solveset(x**2 - x - 6, x, Interval(0, oo)) == FiniteSet(3)
assert solveset(x**2 - 1, x, Interval(0, oo)) == FiniteSet(1)
assert solveset(x**4 - 16, x, Interval(0, 10)) == FiniteSet(2)
def test_improve_coverage():
from sympy.solvers.solveset import _has_rational_power
x = Symbol('x')
y = exp(x+1/x**2)
solution = solveset(y**2+y, x, S.Reals)
unsolved_object = ConditionSet(x, Eq((exp((x**3 + 1)/x**2) + 1)*exp((x**3 + 1)/x**2), 0), S.Reals)
assert solution == unsolved_object
assert _has_rational_power(sin(x)*exp(x) + 1, x) == (False, S.One)
assert _has_rational_power((sin(x)**2)*(exp(x) + 1)**3, x) == (False, S.One)
def test_issue_9522():
x = Symbol('x')
expr1 = Eq(1/(x**2 - 4) + x, 1/(x**2 - 4) + 2)
expr2 = Eq(1/x + x, 1/x)
assert solveset(expr1, x, S.Reals) == EmptySet()
assert solveset(expr2, x, S.Reals) == EmptySet()
def test_linear_eq_to_matrix():
x, y, z = symbols('x, y, z')
eqns1 = [2*x + y - 2*z - 3, x - y - z, x + y + 3*z - 12]
eqns2 = [Eq(3*x + 2*y - z, 1), Eq(2*x - 2*y + 4*z, -2), -2*x + y - 2*z]
A, b = linear_eq_to_matrix(eqns1, x, y, z)
assert A == Matrix([[2, 1, -2], [1, -1, -1], [1, 1, 3]])
assert b == Matrix([[3], [0], [12]])
A, b = linear_eq_to_matrix(eqns2, x, y, z)
assert A == Matrix([[3, 2, -1], [2, -2, 4], [-2, 1, -2]])
assert b == Matrix([[1], [-2], [0]])
# Pure symbolic coefficients
from sympy.abc import a, b, c, d, e, f, g, h, i, j, k, l
eqns3 = [a*x + b*y + c*z - d, e*x + f*y + g*z - h, i*x + j*y + k*z - l]
A, B = linear_eq_to_matrix(eqns3, x, y, z)
assert A == Matrix([[a, b, c], [e, f, g], [i, j, k]])
assert B == Matrix([[d], [h], [l]])
# raise ValueError if no symbols are given
raises(ValueError, lambda: linear_eq_to_matrix(eqns3))
def test_linsolve():
x, y, z, u, v, w = symbols("x, y, z, u, v, w")
x1, x2, x3, x4 = symbols('x1, x2, x3, x4')
# Test for different input forms
M = Matrix([[1, 2, 1, 1, 7], [1, 2, 2, -1, 12], [2, 4, 0, 6, 4]])
system1 = A, b = M[:, :-1], M[:, -1]
Eqns = [x1 + 2*x2 + x3 + x4 - 7, x1 + 2*x2 + 2*x3 - x4 - 12,
2*x1 + 4*x2 + 6*x4 - 4]
sol = FiniteSet((-2*x2 - 3*x4 + 2, x2, 2*x4 + 5, x4))
assert linsolve(M, (x1, x2, x3, x4)) == sol
assert linsolve(Eqns, (x1, x2, x3, x4)) == sol
assert linsolve(system1, (x1, x2, x3, x4)) == sol
# raise ValueError if no symbols are given
raises(ValueError, lambda: linsolve(system1))
# raise ValueError if, A & b is not given as tuple
raises(ValueError, lambda: linsolve(A, b, x1, x2, x3, x4))
# raise ValueError for garbage value
raises(ValueError, lambda: linsolve(Eqns[0], x1, x2, x3, x4))
# Fully symbolic test
a, b, c, d, e, f = symbols('a, b, c, d, e, f')
A = Matrix([[a, b], [c, d]])
B = Matrix([[e], [f]])
system2 = (A, B)
sol = FiniteSet(((-b*f + d*e)/(a*d - b*c), (a*f - c*e)/(a*d - b*c)))
assert linsolve(system2, [x, y]) == sol
# Test for Dummy Symbols issue #9667
x1 = Dummy('x1')
x2 = Dummy('x2')
x3 = Dummy('x3')
x4 = Dummy('x4')
assert linsolve(system1, x1, x2, x3, x4) == FiniteSet((-2*x2 - 3*x4 + 2, x2, 2*x4 + 5, x4))
# No solution
A = Matrix([[1, 2, 3], [2, 4, 6], [3, 6, 9]])
b = Matrix([0, 0, 1])
assert linsolve((A, b), (x, y, z)) == EmptySet()
# Issue #10056
A, B, J1, J2 = symbols('A B J1 J2')
Augmatrix = Matrix([
[2*I*J1, 2*I*J2, -2/J1],
[-2*I*J2, -2*I*J1, 2/J2],
[0, 2, 2*I/(J1*J2)],
[2, 0, 0],
])
assert linsolve(Augmatrix, A, B) == FiniteSet((0, I/(J1*J2)))
# Issue #10121 - Assignment of free variables
a, b, c, d, e = symbols('a, b, c, d, e')
Augmatrix = Matrix([[0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]])
assert linsolve(Augmatrix, a, b, c, d, e) == FiniteSet((a, 0, c, 0, e))
def test_issue_9556():
x = Symbol('x')
b = Symbol('b', positive=True)
assert solveset(Abs(x) + 1, x, S.Reals) == EmptySet()
assert solveset(Abs(x) + b, x, S.Reals) == EmptySet()
assert solveset(Eq(b, -1), b, S.Reals) == EmptySet()
def test_issue_9611():
x = Symbol('x')
a = Symbol('a')
y = Symbol('y')
assert solveset(Eq(x - x + a, a), x, S.Reals) == S.Reals
assert solveset(Eq(y - y + a, a), y) == S.Complexes
def test_issue_9557():
x = Symbol('x')
a = Symbol('a')
assert solveset(x**2 + a, x, S.Reals) == Intersection(S.Reals,
FiniteSet(-sqrt(-a), sqrt(-a)))
def test_issue_9778():
assert solveset(x**3 + 1, x, S.Reals) == FiniteSet(-1)
assert solveset(x**(S(3)/5) + 1, x, S.Reals) == S.EmptySet
assert solveset(x**3 + y, x, S.Reals) == Intersection(Interval(-oo, oo), \
FiniteSet((-y)**(S(1)/3)*Piecewise((1, Ne(-im(y), 0)), ((-1)**(S(2)/3), -y < 0), (1, True))))
@XFAIL
def test_issue_failing_pow():
assert solveset(x**(S(3)/2) + 4, x, S.Reals) == S.EmptySet
def test_issue_9849():
assert solveset(Abs(sin(x)) + 1, x, S.Reals) == S.EmptySet
def test_issue_9953():
assert linsolve([ ], x) == S.EmptySet
def test_issue_9913():
assert solveset(2*x + 1/(x - 10)**2, x, S.Reals) == \
FiniteSet(-(3*sqrt(24081)/4 + S(4027)/4)**(S(1)/3)/3 - 100/
(3*(3*sqrt(24081)/4 + S(4027)/4)**(S(1)/3)) + S(20)/3)
def test_issue_10397():
assert solveset(sqrt(x), x, S.Complexes) == FiniteSet(0)
def test_simplification():
eq = x + (a - b)/(-2*a + 2*b)
assert solveset(eq, x) == FiniteSet(S.Half)
assert solveset(eq, x, S.Reals) == FiniteSet(S.Half)
def test_issue_10555():
f = Function('f')
assert solveset(f(x) - pi/2, x, S.Reals) == \
ConditionSet(x, Eq(2*f(x) - pi, 0), S.Reals)
def test_issue_8715():
eq = x + 1/x > -2 + 1/x
assert solveset(eq, x, S.Reals) == \
(Interval.open(-2, oo) - FiniteSet(0))
assert solveset(eq.subs(x,log(x)), x, S.Reals) == \
Interval.open(exp(-2), oo) - FiniteSet(1)
| ChristinaZografou/sympy | sympy/solvers/tests/test_solveset.py | Python | bsd-3-clause | 40,985 |
# -*- coding: utf-8 -*-
"""
A number of generic default fixtures to use with tests.
All model-related fixtures defined here require the database, and should imply as much by
including ``db`` fixture in the function resolution scope.
"""
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import sys
import yaml
import sentry
import pytest
import six
from datetime import datetime
# These chars cannot be used in Windows paths so repalce them:
# https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file#naming-conventions
UNSAFE_PATH_CHARS = ("<", ">", ":", '"', " | ", "?", "*")
DIRECTORY_GROUPING_CHARS = ("::", "-", "[", "]", "\\")
DEFAULT_EVENT_DATA = {
"extra": {
"loadavg": [0.97607421875, 0.88330078125, 0.833984375],
"sys.argv": [
"/Users/dcramer/.virtualenvs/sentry/bin/raven",
"test",
"https://ebc35f33e151401f9deac549978bda11:[email protected]/1",
],
"user": "dcramer",
},
"modules": {"raven": "3.1.13"},
"request": {
"cookies": {},
"data": {},
"env": {},
"headers": {},
"method": "GET",
"query_string": "",
"url": "http://example.com",
},
"stacktrace": {
"frames": [
{
"abs_path": "www/src/sentry/models/foo.py",
"context_line": " string_max_length=self.string_max_length)",
"filename": "sentry/models/foo.py",
"function": "build_msg",
"in_app": True,
"lineno": 29,
"module": "raven.base",
"post_context": [
" },",
" })",
"",
" if 'stacktrace' in data:",
" if self.include_paths:",
],
"pre_context": [
"",
" data.update({",
" 'stacktrace': {",
" 'frames': get_stack_info(frames,",
" list_max_length=self.list_max_length,",
],
"vars": {
"culprit": "raven.scripts.runner",
"date": "datetime.datetime(2013, 2, 14, 20, 6, 33, 479471)",
"event_id": "598fb19363e745ec8be665e6ba88b1b2",
"event_type": "raven.events.Message",
"frames": "<generator object iter_stack_frames at 0x103fef050>",
"handler": "<raven.events.Message object at 0x103feb710>",
"k": "logentry",
"public_key": None,
"result": {
"logentry": "{'message': 'This is a test message generated using ``raven test``', 'params': []}"
},
"self": "<raven.base.Client object at 0x104397f10>",
"stack": True,
"tags": None,
"time_spent": None,
},
},
{
"abs_path": "/Users/dcramer/.virtualenvs/sentry/lib/python2.7/site-packages/raven/base.py",
"context_line": " string_max_length=self.string_max_length)",
"filename": "raven/base.py",
"function": "build_msg",
"in_app": False,
"lineno": 290,
"module": "raven.base",
"post_context": [
" },",
" })",
"",
" if 'stacktrace' in data:",
" if self.include_paths:",
],
"pre_context": [
"",
" data.update({",
" 'stacktrace': {",
" 'frames': get_stack_info(frames,",
" list_max_length=self.list_max_length,",
],
"vars": {
"culprit": "raven.scripts.runner",
"date": "datetime.datetime(2013, 2, 14, 20, 6, 33, 479471)",
"event_id": "598fb19363e745ec8be665e6ba88b1b2",
"event_type": "raven.events.Message",
"frames": "<generator object iter_stack_frames at 0x103fef050>",
"handler": "<raven.events.Message object at 0x103feb710>",
"k": "logentry",
"public_key": None,
"result": {
"logentry": "{'message': 'This is a test message generated using ``raven test``', 'params': []}"
},
"self": "<raven.base.Client object at 0x104397f10>",
"stack": True,
"tags": None,
"time_spent": None,
},
},
]
},
"tags": [],
"platform": "python",
}
@pytest.mark.django_db
@pytest.fixture
def factories():
# XXX(dcramer): hack to prevent recursive imports
from sentry.testutils.factories import Factories
return Factories
@pytest.mark.django_db
@pytest.fixture
def project(team, factories):
return factories.create_project(name="bar", slug="bar", teams=[team])
@pytest.fixture
def task_runner():
from sentry.testutils.helpers.task_runner import TaskRunner
return TaskRunner
@pytest.fixture(scope="function")
def session():
return factories.create_session()
@pytest.mark.django_db
@pytest.fixture(scope="function")
def default_user(factories):
return factories.create_user(email="admin@localhost", is_superuser=True)
@pytest.mark.django_db
@pytest.fixture(scope="function")
def default_organization(factories, default_user):
# XXX(dcramer): ensure that your org slug doesnt match your team slug
# and the same for your project slug
return factories.create_organization(name="baz", slug="baz", owner=default_user)
@pytest.mark.django_db
@pytest.fixture(scope="function")
def default_team(factories, default_organization):
from sentry.models import OrganizationMember, OrganizationMemberTeam
team = factories.create_team(organization=default_organization, name="foo", slug="foo")
# XXX: handle legacy team fixture
queryset = OrganizationMember.objects.filter(organization=default_organization)
for om in queryset:
OrganizationMemberTeam.objects.create(team=team, organizationmember=om, is_active=True)
return team
@pytest.mark.django_db
@pytest.fixture(scope="function")
def default_project(factories, default_team):
return factories.create_project(name="Bar", slug="bar", teams=[default_team])
@pytest.mark.django_db
@pytest.fixture(scope="function")
def default_projectkey(factories, default_project):
return factories.create_project_key(project=default_project)
@pytest.mark.django_db
@pytest.fixture(scope="function")
def default_environment(factories, default_project):
return factories.create_environment(name="development", project=default_project)
@pytest.mark.django_db
@pytest.fixture(scope="function")
def default_group(factories, default_project):
return factories.create_group(project=default_project, message="\u3053\u3093\u306b\u3061\u306f")
@pytest.mark.django_db
@pytest.fixture(scope="function")
def default_event(factories, default_group):
return factories.create_event(
group=default_group, event_id="a" * 32, message="\u3053\u3093\u306b\u3061\u306f"
)
@pytest.mark.django_db
@pytest.fixture(scope="function")
def default_activity(default_group, default_project, default_user):
from sentry.models import Activity
return Activity.objects.create(
group=default_group, project=default_project, type=Activity.NOTE, user=default_user, data={}
)
_snapshot_writeback = os.environ.get("SENTRY_SNAPSHOTS_WRITEBACK") or "0"
if _snapshot_writeback in ("true", "1", "overwrite"):
_snapshot_writeback = "overwrite"
elif _snapshot_writeback != "new":
_snapshot_writeback = None
_test_base = os.path.realpath(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(sentry.__file__))))
)
_yaml_snap_re = re.compile(r"^---\r?\n(.*?)\r?\n---\r?\n(.*)$(?s)")
@pytest.fixture
def log():
def inner(x):
return sys.stdout.write(x + "\n")
return inner
class ReadableYamlDumper(yaml.dumper.SafeDumper):
"""Disable pyyaml aliases for identical object references"""
def ignore_aliases(self, data):
return True
@pytest.fixture
def insta_snapshot(request, log):
def inner(output, reference_file=None, subname=None):
if reference_file is None:
name = request.node.name
for c in UNSAFE_PATH_CHARS:
name = name.replace(c, "@")
for c in DIRECTORY_GROUPING_CHARS:
name = name.replace(c, "/")
name = name.strip("/")
reference_file = os.path.join(
os.path.dirname(six.text_type(request.node.fspath)),
"snapshots",
os.path.splitext(os.path.basename(request.node.parent.name))[0],
name + ".pysnap",
)
elif subname is not None:
raise ValueError(
"subname only works if you don't provide your own entire reference_file"
)
if not isinstance(output, six.string_types):
output = yaml.dump(
output, indent=2, default_flow_style=False, Dumper=ReadableYamlDumper
)
try:
with open(reference_file) as f:
match = _yaml_snap_re.match(f.read().decode("utf-8"))
if match is None:
raise IOError()
_header, refval = match.groups()
except IOError:
refval = ""
refval = refval.rstrip()
output = output.rstrip()
if _snapshot_writeback is not None and refval != output:
if not os.path.isdir(os.path.dirname(reference_file)):
os.makedirs(os.path.dirname(reference_file))
source = os.path.realpath(six.text_type(request.node.fspath))
if source.startswith(_test_base + os.path.sep):
source = source[len(_test_base) + 1 :]
if _snapshot_writeback == "new":
reference_file += ".new"
with open(reference_file, "w") as f:
f.write(
"---\n%s\n---\n%s\n"
% (
yaml.safe_dump(
{
"created": datetime.utcnow().isoformat() + "Z",
"creator": "sentry",
"source": source,
},
indent=2,
default_flow_style=False,
).rstrip(),
output,
)
)
else:
log("Run with SENTRY_SNAPSHOTS_WRITEBACK=1 to update snapshots.")
assert refval == output
yield inner
| mvaled/sentry | src/sentry/utils/pytest/fixtures.py | Python | bsd-3-clause | 11,418 |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains various formatters which can help format a chart
object. To use these, add them to your chart's list of formatters. For
example:
chart.formatters.append(InlineLegend)
chart.formatters.append(LabelSeparator(right=8))
Feel free to write your own formatter. Formatters are just callables that
modify the chart in some (hopefully useful) way. For example, the AutoColor
formatter makes sure each DataSeries has a color applied to it. The formatter
should take the chart to format as its only argument.
(The formatters work on a deepcopy of the user's chart, so modifications
shouldn't leak back into the user's original chart)
"""
def AutoLegend(chart):
"""Automatically fill out the legend based on series labels. This will only
fill out the legend if is at least one series with a label.
"""
chart._show_legend = False
labels = []
for series in chart.data:
if series.label is None:
labels.append('')
else:
labels.append(series.label)
chart._show_legend = True
if chart._show_legend:
chart._legend_labels = labels
class AutoColor(object):
"""Automatically add colors to any series without colors.
Object attributes:
colors: The list of colors (hex strings) to cycle through. You can modify
this list if you don't like the default colors.
"""
def __init__(self):
# TODO: Add a few more default colors.
# TODO: Add a default styles too, so if you don't specify color or
# style, you get a unique set of colors & styles for your data.
self.colors = ['0000ff', 'ff0000', '00dd00', '000000']
def __call__(self, chart):
index = -1
for series in chart.data:
if series.style.color is None:
index += 1
if index >= len(self.colors):
index = 0
series.style.color = self.colors[index]
class AutoScale(object):
"""If you don't set min/max on the dependent axes, this fills them in
automatically by calculating min/max dynamically from the data.
You can set just min or just max and this formatter will fill in the other
value for you automatically. For example, if you only set min then this will
set max automatically, but leave min untouched.
Charts can have multiple dependent axes (chart.left & chart.right, for
example.) If you set min/max on some axes but not others, then this formatter
copies your min/max to the un-set axes. For example, if you set up min/max on
only the right axis then your values will be automatically copied to the left
axis. (if you use different min/max values for different axes, the
precendence is undefined. So don't do that.)
"""
def __init__(self, buffer=0.05):
"""Create a new AutoScale formatter.
Args:
buffer: percentage of extra space to allocate around the chart's axes.
"""
self.buffer = buffer
def __call__(self, chart):
"""Format the chart by setting the min/max values on its dependent axis."""
if not chart.data:
return # Nothing to do.
min_value, max_value = chart.GetMinMaxValues()
if None in (min_value, max_value):
return # No data. Nothing to do.
# Honor user's choice, if they've picked min/max.
for axis in chart.GetDependentAxes():
if axis.min is not None:
min_value = axis.min
if axis.max is not None:
max_value = axis.max
buffer = (max_value - min_value) * self.buffer # Stay away from edge.
for axis in chart.GetDependentAxes():
if axis.min is None:
axis.min = min_value - buffer
if axis.max is None:
axis.max = max_value + buffer
class LabelSeparator(object):
"""Adjust the label positions to avoid having them overlap. This happens for
any axis with minimum_label_spacing set.
"""
def __init__(self, left=None, right=None, bottom=None):
self.left = left
self.right = right
self.bottom = bottom
def __call__(self, chart):
self.AdjustLabels(chart.left, self.left)
self.AdjustLabels(chart.right, self.right)
self.AdjustLabels(chart.bottom, self.bottom)
def AdjustLabels(self, axis, minimum_label_spacing):
if minimum_label_spacing is None:
return
if len(axis.labels) <= 1: # Nothing to adjust
return
if axis.max is not None and axis.min is not None:
# Find the spacing required to fit all labels evenly.
# Don't try to push them farther apart than that.
maximum_possible_spacing = (axis.max - axis.min) / (len(axis.labels) - 1)
if minimum_label_spacing > maximum_possible_spacing:
minimum_label_spacing = maximum_possible_spacing
labels = [list(x) for x in zip(axis.label_positions, axis.labels)]
labels = sorted(labels, reverse=True)
# First pass from the top, moving colliding labels downward
for i in range(1, len(labels)):
if labels[i - 1][0] - labels[i][0] < minimum_label_spacing:
new_position = labels[i - 1][0] - minimum_label_spacing
if axis.min is not None and new_position < axis.min:
new_position = axis.min
labels[i][0] = new_position
# Second pass from the bottom, moving colliding labels upward
for i in range(len(labels) - 2, -1, -1):
if labels[i][0] - labels[i + 1][0] < minimum_label_spacing:
new_position = labels[i + 1][0] + minimum_label_spacing
if axis.max is not None and new_position > axis.max:
new_position = axis.max
labels[i][0] = new_position
# Separate positions and labels
label_positions, labels = zip(*labels)
axis.labels = labels
axis.label_positions = label_positions
def InlineLegend(chart):
"""Provide a legend for line charts by attaching labels to the right
end of each line. Supresses the regular legend.
"""
show = False
labels = []
label_positions = []
for series in chart.data:
if series.label is None:
labels.append('')
else:
labels.append(series.label)
show = True
label_positions.append(series.data[-1])
if show:
chart.right.min = chart.left.min
chart.right.max = chart.left.max
chart.right.labels = labels
chart.right.label_positions = label_positions
chart._show_legend = False # Supress the regular legend.
| ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/graphy/graphy/formatters.py | Python | bsd-3-clause | 6,836 |
'''
Created on 03.02.2016
@author: fabian
'''
import os
import collections
import csv
class ResultParser(object):
'''
classdocs
'''
def __init__(self, resultFolder):
'''
Constructor
'''
self.resultFolder = resultFolder
def getAllCommitsWithJacocoErrors(self):
commitsWithJacocoErrors = []
for commit in self.getImmediateSubdirectories(self.resultFolder):
errorFile = self.resultFolder+"/"+commit+"/jacoco_errors.txt"
if(os.path.exists(errorFile) and os.path.isfile(errorFile)):
commitsWithJacocoErrors.append(commit)
return commitsWithJacocoErrors
def getAllCommitsWithPitErrors(self):
commitsWithPitErrors = []
for commit in self.getImmediateSubdirectories(self.resultFolder):
errorFile = self.resultFolder+"/"+commit+"/pit_errors.txt"
if(os.path.exists(errorFile) and os.path.isfile(errorFile)):
commitsWithPitErrors.append(commit)
return commitsWithPitErrors
def createCSVFile(self, outputFile):
jacocoErrors = self.getAllCommitsWithJacocoErrors()
pitErrors = self.getAllCommitsWithPitErrors()
result = {}
for commit in self.getImmediateSubdirectories(self.resultFolder):
if(not os.listdir(self.resultFolder+"/"+commit) == []):
parts = commit.split("-")
result[int(parts[0])] = {'hash': parts[1],'jacocoError' : (commit in jacocoErrors), 'pitError' : (commit in pitErrors)}
sortedResults = collections.OrderedDict(sorted(result.items()))
writer = csv.writer(open(outputFile, 'w'))
writer.writerow(['Number', 'Hash', 'HasJacocoError', 'HasPitError'])
for key, value in sortedResults.items():
writer.writerow([key, value['hash'], value['jacocoError'], value['pitError']])
def getImmediateSubdirectories(self, a_dir):
""" Helper method, which gets the **immediate** subdirectoriesof a path. Is helpful, if one want to create a
parser, which looks if certain folders are there.
:param a_dir: directory from which **immediate** subdirectories should be listed """
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
if __name__ == "__main__":
#resultParser = ResultParser("/home/fabian/Arbeit/testEvolution/results/checkstyle")
#print(resultParser.getAllCommitsWithJacocoErrors())
#resultParser.createCSVFile("/home/fabian/test.csv")
with open("/home/fabian/Arbeit/testEvolution/tmp/checkstyle_working/src/checkstyle/com/puppycrawl/tools/checkstyle/gui/ParseTreeInfoPanel.java", "rb") as sourceFile:
data = sourceFile.readlines()
sourceFile.close()
| ftrautsch/testEvolution | parser/resultparser.py | Python | apache-2.0 | 2,937 |
# Imperialism remake
# Copyright (C) 2014-16 Trilarion
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
"""
Processes sound effects. (Not yet implemented.)
wavfile (taken from scipy) requires numpy
"""
import wavfile
# read sound effects configuration
pass | Trilarion/imperialism-remake | tools/process_and_merge_sound_effects.py | Python | gpl-3.0 | 846 |
#
# Instant Python
# $Id: tkCommonDialog.py 32140 2003-04-06 09:01:11Z rhettinger $
#
# base class for tk common dialogues
#
# this module provides a base class for accessing the common
# dialogues available in Tk 4.2 and newer. use tkFileDialog,
# tkColorChooser, and tkMessageBox to access the individual
# dialogs.
#
# written by Fredrik Lundh, May 1997
#
from Tkinter import *
class Dialog:
command = None
def __init__(self, master=None, **options):
# FIXME: should this be placed on the module level instead?
if TkVersion < 4.2:
raise TclError, "this module requires Tk 4.2 or newer"
self.master = master
self.options = options
if not master and options.get('parent'):
self.master = options['parent']
def _fixoptions(self):
pass # hook
def _fixresult(self, widget, result):
return result # hook
def show(self, **options):
# update instance options
for k, v in options.items():
self.options[k] = v
self._fixoptions()
# we need a dummy widget to properly process the options
# (at least as long as we use Tkinter 1.63)
w = Frame(self.master)
try:
s = w.tk.call(self.command, *w._options(self.options))
s = self._fixresult(w, s)
finally:
try:
# get rid of the widget
w.destroy()
except:
pass
return s
| xbmc/atv2 | xbmc/lib/libPython/Python/Lib/lib-tk/tkCommonDialog.py | Python | gpl-2.0 | 1,504 |
# -*- coding: utf-8 -*-
"""
shellstreaming.core.batch_queue
~~~~~~~~~~~~~~~~~~~~~~~~~~
:synopsis: Provides queue of output batch
Simple wrapper of internal queue class
"""
import Queue as q
import threading
class BatchQueue(object):
"""Queue of output batch"""
# [todo] - use ActiveMQ for performance?
def __init__(self):
"""Constructor"""
self._q = q.Queue()
self._records = 0 # BatchQueue would be popped/pushed at the same time. Atomically inc/dec this var.
self._lock = threading.Lock()
self._finished = False
def push(self, batch):
""""""
self._q.put(batch)
if batch is not None:
self._lock.acquire()
self._records += len(batch)
self._lock.release()
def pop(self):
""""""
batch = self._q.get(timeout=0.01) # workaround: enable Ctrl-C http://bugs.python.org/issue1360
if batch is None:
self._finished = True
self.push(None) # supply `None` again in case other consumers are informed `empty`
return None
self._lock.acquire()
self._records -= len(batch)
self._lock.release()
return batch
def records(self):
if self._finished:
return None
return self._records
| laysakura/shellstreaming | shellstreaming/core/batch_queue.py | Python | apache-2.0 | 1,347 |
# coding=utf-8
#
# This file is part of Dragonfly.
# (c) Copyright 2007, 2008 by Christo Butcher
# Licensed under the LGPL.
#
# Dragonfly is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragonfly is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Dragonfly. If not, see
# <http://www.gnu.org/licenses/>.
#
"""
Arabic (ar) language implementations of Integer and Digits classes
============================================================================
"""
from ..base.integer_internal import (MapIntBuilder, CollectionIntBuilder,
MagnitudeIntBuilder, IntegerContentBase)
from ..base.digits_internal import DigitsContentBase
#---------------------------------------------------------------------------
int_0 = MapIntBuilder({
"صفر": 0,
})
int_1_9 = MapIntBuilder({
"واحد": 1,
"اثنان": 2,
"ثلاثة": 3,
"اربعة": 4,
"خمسة": 5,
"ستة": 6,
"سبعة": 7,
"ثمانية": 8,
"تسعة": 9,
})
int_10_19 = MapIntBuilder({
"عشرة": 10,
"احدى عشر": 11,
"اثنا عشر": 12,
"ثلاثة عشر": 13,
"اربعة عشر": 14,
"خمسة عشر": 15,
"ستة عشر": 16,
"سبعة عشر": 17,
"ثمانية عشر": 18,
"تسعة عشر": 19,
})
int_20_90_10 = MapIntBuilder({
"عشرون": 2,
"ثلاثون": 3,
"اربعون": 4,
"خمسون": 5,
"ستون": 6,
"سبعون": 7,
"ثمانون": 8,
"تسعون": 9,
})
int_20_99 = MagnitudeIntBuilder(
factor = 10,
spec = "<multiplier> [<remainder>]",
multipliers = [int_20_90_10],
remainders = [int_1_9],
)
int_and_1_99 = CollectionIntBuilder(
spec = "[و] <element>",
set = [int_1_9, int_10_19, int_20_99],
)
int_100s = MagnitudeIntBuilder(
factor = 100,
spec = "[<multiplier>] hundred [<remainder>]",
multipliers = [int_1_9],
remainders = [int_and_1_99],
)
int_100big = MagnitudeIntBuilder(
factor = 100,
spec = "[<multiplier>] hundred [<remainder>]",
multipliers = [int_10_19, int_20_99],
remainders = [int_and_1_99]
)
int_1000s = MagnitudeIntBuilder(
factor = 1000,
spec = "[<multiplier>] thousand [<remainder>]",
multipliers = [int_1_9, int_10_19, int_20_99, int_100s],
remainders = [int_and_1_99, int_100s]
)
int_1000000s = MagnitudeIntBuilder(
factor = 1000000,
spec = "[<multiplier>] million [<remainder>]",
multipliers = [int_1_9, int_10_19, int_20_99, int_100s, int_1000s],
remainders = [int_and_1_99, int_100s, int_1000s],
)
#---------------------------------------------------------------------------
class IntegerContent(IntegerContentBase):
builders = [int_0, int_1_9, int_10_19, int_20_99,
int_100s, int_100big, int_1000s, int_1000000s]
class DigitsContent(DigitsContentBase):
digits = [("صفر", "اووه"), "واحد", "اثنان", "ثلاثة", "اربعة",
"خمسة", "ستة", "سبعة", "ثمانية", "تسعة"]
| tylercal/dragonfly | dragonfly/language/ar/number.py | Python | lgpl-3.0 | 5,178 |
import unittest
import datetime
import json
from garage.formatters.json import encode_datetime
from garage.formatters.json import encode_mapping
from garage.formatters.json import join_encoders
from garage.timezones import TimeZone
from tests.utils import make_sorted_ordered_dict
class JsonTest(unittest.TestCase):
def test_encoders(self):
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 6, TimeZone.UTC)
dt_json = '"2000-01-02T03:04:05.000006+0000"'
mapping = make_sorted_ordered_dict(c=3, a=1, b=2)
mapping_json = '{"a": 1, "b": 2, "c": 3}'
with self.assertRaises(TypeError):
json.dumps(dt)
self.assertEqual(
dt_json,
json.dumps(dt, default=encode_datetime),
)
self.assertEqual(
dt_json,
json.dumps(
dt, default=join_encoders(encode_mapping, encode_datetime)
),
)
self.assertEqual(
mapping_json,
json.dumps(mapping, default=encode_mapping),
)
self.assertEqual(
mapping_json,
json.dumps(
mapping, default=join_encoders(encode_datetime, encode_mapping)
),
)
if __name__ == '__main__':
unittest.main()
| clchiou/garage | py/garage/tests/formatters/test_json.py | Python | mit | 1,288 |
#!/usr/bin/env python3
#Copyright 2014 Carl Johnson IV
'''
command line rpn calculator
'''
#TODO: allow for function definition inline (ephemeral) and in config (persistent)
import argparse
import sys
from decimal import Decimal
from command import Command
from stack import Stack
COMMAND_DEFINITIONS = {
'p': Command(0, 0, lambda x: print(x.peek())),
'f': Command(0, 0, lambda x: print('\n'.join([str(a) for a in x.walk()]))),
'+': Command(2, 1, lambda _,x,y: x+y),
'-': Command(2, 1, lambda _,x,y: y-x),
'*': Command(2, 1, lambda _,x,y: x*y),
'/': Command(2, 1, lambda _,x,y: y/x)
}
def getargs():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('commands', nargs='+', help='commands and data to process')
return parser.parse_args()
def main(args):
stack = Stack()
for command_key in args.commands:
if command_key in COMMAND_DEFINITIONS:
COMMAND_DEFINITIONS[command_key](stack)
else:
stack.push(Decimal(command_key)) #it's a value
if stack.peek() is not None:
COMMAND_DEFINITIONS['f'](stack)
if __name__=='__main__':
try:
main(getargs())
except KeyboardInterrupt:
print('exiting gracefully', file=sys.stderr)
| soshtolsus/repoca | main.py | Python | gpl-3.0 | 1,181 |
import random
import numpy
from rdkit.ML.DecTree import ID3
def GenRandomExamples(nVars=10, randScale=0.3, bitProb=0.5, nExamples=500, seed=(0, 0),
addResults=1):
random.seed(seed[0])
varWeights = numpy.array([random.random() for _ in range(nVars)]) * randScale
examples = [None] * nExamples
for i in range(nExamples):
varVals = [random.random() > bitProb for _ in range(nVars)]
temp = numpy.array(varVals) * varWeights
res = sum(temp)
if addResults:
varVals.append(res >= 1.)
examples[i] = varVals
nPossibleVals = [2] * (nExamples + 1)
attrs = list(range(nVars))
return (examples, attrs, nPossibleVals)
if __name__ == '__main__': # pragma: nocover
from rdkit.six.moves import cPickle
examples, attrs, nPossibleVals = GenRandomExamples()
outF = open('random.dat.pkl', 'wb+')
cPickle.dump(examples, outF)
cPickle.dump(attrs, outF)
cPickle.dump(nPossibleVals, outF)
tree = ID3.ID3Boot(examples, attrs, nPossibleVals)
tree.Pickle('save.pkl')
| rvianello/rdkit | rdkit/ML/DecTree/randomtest.py | Python | bsd-3-clause | 1,028 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Point of Sale',
'version': '1.0.1',
'category': 'Point Of Sale',
'sequence': 20,
'summary': 'Touchscreen Interface for Shops',
'description': """
Quick and Easy sale process
===========================
This module allows you to manage your shop sales very easily with a fully web based touchscreen interface.
It is compatible with all PC tablets and the iPad, offering multiple payment methods.
Product selection can be done in several ways:
* Using a barcode reader
* Browsing through categories of products or via a text search.
Main Features
-------------
* Fast encoding of the sale
* Choose one payment method (the quick way) or split the payment between several payment methods
* Computation of the amount of money to return
* Create and confirm the picking list automatically
* Allows the user to create an invoice automatically
* Refund previous sales
""",
'depends': ['stock_account', 'barcodes'],
'data': [
'security/point_of_sale_security.xml',
'security/ir.model.access.csv',
'data/default_barcode_patterns.xml',
'wizard/pos_box.xml',
'wizard/pos_details.xml',
'wizard/pos_discount.xml',
'wizard/pos_open_statement.xml',
'wizard/pos_payment.xml',
'views/pos_templates.xml',
'views/point_of_sale_template.xml',
'views/point_of_sale_report.xml',
'views/point_of_sale_view.xml',
'views/pos_order_view.xml',
'views/product_view.xml',
'views/pos_category_view.xml',
'views/account_journal_view.xml',
'views/pos_config_view.xml',
'views/pos_session_view.xml',
'views/point_of_sale_sequence.xml',
'data/point_of_sale_data.xml',
'views/pos_order_report_view.xml',
'views/account_statement_view.xml',
'views/account_statement_report.xml',
'views/res_users_view.xml',
'views/res_partner_view.xml',
'views/res_config_view.xml',
'views/report_statement.xml',
'views/report_userlabel.xml',
'views/report_saledetails.xml',
'views/point_of_sale.xml',
'views/point_of_sale_dashboard.xml',
],
'demo': [
'data/point_of_sale_demo.xml',
],
'installable': True,
'application': True,
'qweb': ['static/src/xml/pos.xml'],
'website': 'https://www.odoo.com/page/point-of-sale',
}
| chienlieu2017/it_management | odoo/addons/point_of_sale/__manifest__.py | Python | gpl-3.0 | 2,501 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Triangle Project Code.
# Triangle analyzes the lengths of the sides of a triangle
# (represented by a, b and c) and returns the type of triangle.
#
# It returns:
# 'equilateral' if all sides are equal
# 'isosceles' if exactly 2 sides are equal
# 'scalene' if no sides are equal
#
# The tests for this method can be found in
# about_triangle_project.py
# and
# about_triangle_project_2.py
#
def triangle(a, b, c):
sides = [a,b,c]
tri_set = set(sides)
if any(n <= 0 for n in tri_set):
raise TriangleError(AttributeError('Looking for actual triangles, kids'))
elif a + b + c <= 2 * max(a, b, c):
raise TriangleError(AttributeError('There is no triangle, or Arizona'))
elif len(tri_set) == 1:
return 'equilateral'
elif len(tri_set) == 2:
return 'isosceles'
elif len(tri_set) == 3:
return 'scalene'
# Error class used in part 2. No need to change this code.
class TriangleError(Exception):
pass
| lorimccurry/python_koans | python3/koans/triangle.py | Python | mit | 1,034 |
#!/usr/bin/env python3
__author__ = 'DSOWASP'
| dianshen/python_day | day8/__init__.py | Python | apache-2.0 | 46 |
import plugnhack_extension
import datetime
import json
def enum(**named_values):
"""
enum represents a simulation of 'enum' data type in programming languages as
C, C++, Java etc.
enum type is available as a built-in type in Python 3.4, not in Python 2.7,
enum data type was backported to 2.7 through pypi.
For installing it run: pip install enum34
"""
return type('Enum', (), named_values)
class ClientMessage(object):
"""
ClientMessage represents information about a client message (state of a message
, receiving time, if its content was changed), methods to modify a message
(change 'type', 'data', 'name' etc.), organize message content in a human
readable format and appropriate for computers.
"""
def __init__(self, client_id=None, json_obj=None):
# id of monitored page
self._client_id = client_id or None
# information that must be updated/verified is represented as json
self._json_obj = json.loads(json_obj) or None
# save at which time message was received
self._received = datetime.datetime.now()
# state that a message pass
self.State = enum(pending="pending",received="received",resent="resent",dropped="dropped",oraclehit="oraclehit")
# first state for a message
self._state = self.State.received
# a new message haven't been modified
self._changed = False
self._index = -1
self._extra_fields = dict()
self._reflect_fields = ["eventData","originalEventTarget"]
# Respond with the self._index current value
@property
def index(self):
return self._index
# Change value of self._index to a new value
@index.setter
def index(self, index):
self._index = index
# Respond with the self._json_obj current value
@property
def json_obj(self):
return json.dumps(self._json_obj)
# Update self._json_obj with new information
@json_obj.setter
def json_obj(self, json_obj):
self._json_obj = json.loads(json_obj)
# Respond with the self._received current value
@property
def received(self):
return self._received
# Update time at which message was received
@received.setter
def received(self, received):
self._received = received
# Respond with the value of key 'from', if it is present in self._json_obj
def get_from(self):
value = self._json_obj.get("from")
if value:
return value
else:
return None
# Update/add the value for key 'from'
def set_from(self, msg_from):
self._json_obj["from"] = msg_from
# Respond with the value of key 'to', if it is present in self._json_obj
def get_to(self):
value = self._json_obj.get("to")
if value:
return value
else:
return None
# Update/add the value of key 'to'
def set_to(self, msg_to):
self._json_obj["to"] = msg_to
# Respond with the value of key 'type', if it is present in self._json_obj
# An example of type: "type":"setConfig"
def get_type(self):
value = self._json_obj.get("type")
if value:
return value
else:
return None
#Update/add the value of key 'type'
def set_type(self, type_p):
self._json_obj["type"] = type_p
# Respond with the value of key 'data', if it is present in self._json_obj
def get_data(self):
value = self._json_obj.get("data")
if value:
return value
else:
return None
# Update/add the value of key 'data'
def set_data(self, data):
self._json_obj["data"] = data
# Respond with the value of key 'endpointId', if it is present in self._json_obj
def get_endpoint_id(self):
value = self._json_obj.get("endpointId")
if value:
return value
else:
return None
# Update/add the value of key 'endpointId'
def set_endpoint_id(self, endpoint_id):
self._json_obj["endpointId"] = endpoint_id
# Respond with a dictionary containing key-value pairs of a message
def to_map(self):
h_map = dict()
if self.get_to() is not None:
h_map["to"] = self.get_to()
if self.get_from() is not None:
h_map["from"] = self.get_from()
if self.get_type() is not None:
h_map["type"] = self.get_type()
if self.get_target() is not None:
h_map["target"] = self.get_target()
if self.get_data() is not None:
h_map["data"] = self.get_data()
if self.get_message_id() is not None:
h_map["messageId"] = self.get_message_id()
if self.get_endpoint_id() is not None:
h_map["endpointId"] = self.get_endpoint_id()
for field in self._reflect_fields:
data = self._json_obs.get(field)
if data is not None:
h_map[field] = data
for key, value in self._extra_fields.iteritems():
h_map[key] = value
if self._changed:
h_map["changed"] = True
return h_map
# Respond with the value of key 'target', if it is present in self._json_obj
def get_target(self):
value = self._json_obj.get("target")
if value:
return value
else:
return None
# Update/add the value of key 'target'
def set_target(self, target):
self._json_obj["target"] = target
# Respond with the value of key 'messageId', if it is present in self._json_onj
def get_message_id(self):
value = self._json_obj.get("messageId")
if value:
return value
else:
return None
# Update/add the value of key 'messageId'
def set_message_id(self, msg_id):
self._json_obj["messageId"] = msg_id
# Respond with the value of self._client_id
@property
def client_id(self):
return self._client_id
# Update value of self._client_id
@client_id.setter
def client_id(self, client_id):
self._client_id = client_id
def is_in_scope(self):
return False
def is_force_intercept(self):
return False
# Respond with the value of self._changed
@property
def changed(self):
return self._changed
# Update the value of self._changed
@changed.setter
def changed(self, changed):
self._changed = changed
# Respond with the value of self._state
@property
def state(self):
return self._state
# Update the value of self._state
@state.setter
def state(self, state):
self._state = state
# Update key-value pair in self._extra_fields and self._json_obj depending on the value of 'value' argument
def set_key_value(self, key, value):
if value is None:
self._extra_fields.pop(key, None)
self._json_obj.pop(key, None)
else:
self._extra_fields[key] = value
self._json_obj[key] = value
# Respond with the value of 'key' argument as json data
def get_json(self, key):
return json.dumps(self._json_obj.get(key))
# Respond with a boolean if self._json_obj contains 'key' or not
def get_bool(self, key):
if self._json_obj.has_key(key):
return True
else:
return False
# Respond with self._extra_fields list DS
@property
def extra_fields(self):
return self._extra_fields
| DePierre/owtf | framework/http/proxy/plugnhack/client_message.py | Python | bsd-3-clause | 7,671 |
#!/usr/bin/env python3
# ./upload.py
# Upload archives to Google Drive.
import pyautogui, time, sys
if len(sys.argv) <= 1 or sys.argv[1] != '-f':
print('Adapt the script to the local context before running.'); exit()
pyautogui.PAUSE = 1.5
# go to web browser
pyautogui.hotkey('winleft', '1')
pyautogui.hotkey('ctrl', 't')
pyautogui.typewrite('drive/u/1')
time.sleep(1)
pyautogui.hotkey('down')
pyautogui.hotkey('enter')
time.sleep(5)
# open import menu
pyautogui.click(x=69, y=247)
pyautogui.hotkey('down')
pyautogui.hotkey('down')
pyautogui.hotkey('enter')
pyautogui.hotkey('winleft', '3')
input("Please press [enter] if the two archives are done with code zero: ")
pyautogui.hotkey('winleft', '1')
pyautogui.click(x=328, y=249)
pyautogui.click(x=761, y=443)
pyautogui.hotkey('enter')
time.sleep(1)
# open import menu
pyautogui.click(x=69, y=247)
pyautogui.hotkey('down')
pyautogui.hotkey('down')
pyautogui.hotkey('enter')
pyautogui.click(x=755, y=388)
pyautogui.hotkey('enter')
pyautogui.click(x=794, y=273)
pyautogui.hotkey('enter')
# go back
pyautogui.hotkey('winleft', '3')
| Fornost461/drafts-and-stuff | Python/pyautogui/upload.py | Python | cc0-1.0 | 1,095 |
# -*- coding: utf-8 -*-
"""
Классы для взаимодействия конфига и GUI
"""
from abc import ABCMeta, abstractmethod
import wx
class BaseElement(metaclass=ABCMeta):
def __init__(self,
option,
control: wx.Control):
"""
option - опция из core.config
"""
self.option = option
self.control = control
self._setGUIValue()
def isValueChanged(self):
"""
Изменилось ли значение в интерфейсном элементе
"""
return self._getGUIValue() != self.option.value
def save(self):
self.option.value = self._getGUIValue()
@abstractmethod
def _getGUIValue(self):
"""
Получить значение из интерфейстного элемента
В производных классах этот метод переопределяется
"""
pass
@abstractmethod
def _setGUIValue(self):
"""
Обновить интерфейсный элемент.
В производных классах этот метод переопределяется
"""
pass
class StringElement (BaseElement):
def _getGUIValue(self):
"""
Получить значение из интерфейстного элемента
В производных классах этот метод переопределяется
"""
return self.control.GetValue()
def _setGUIValue(self):
"""
Обновить интерфейсный элемент.
В производных классах этот метод переопределяется
"""
self.control.SetValue(self.option.value)
class BooleanElement (BaseElement):
"""
Булевская настройка.
Элемент управления - wx.CheckBox
"""
def _getGUIValue(self):
"""
Получить значение из интерфейстного элемента
В производных классах этот метод переопределяется
"""
return self.control.IsChecked()
def _setGUIValue(self):
"""
Обновить интерфейсный элемент.
В производных классах этот метод переопределяется
"""
self.control.SetValue(self.option.value)
class ColourElement (BaseElement):
"""
Настройка цвета.
Элемент управления - wx.ColourPickerCtrl
"""
def _getGUIValue(self):
"""
Получить значение из интерфейстного элемента
В производных классах этот метод переопределяется
"""
return self.control.GetColour().GetAsString(wx.C2S_HTML_SYNTAX)
def _setGUIValue(self):
"""
Обновить интерфейсный элемент.
В производных классах этот метод переопределяется
"""
self.control.SetColour(self.option.value)
class IntegerElement (BaseElement):
"""
Настройка для целых чисел.
Элемент управления - wx.SpinCtrl
"""
def __init__(self, option, control, minValue, maxValue):
super().__init__(option, control)
self.control.SetRange(minValue, maxValue)
self._setGUIValue()
def _getGUIValue(self):
"""
Получить значение из интерфейстного элемента
В производных классах этот метод переопределяется
"""
return self.control.GetValue()
def _setGUIValue(self):
"""
Обновить интерфейсный элемент.
В производных классах этот метод переопределяется
"""
self.control.SetValue(self.option.value)
class FontElement (object):
"""
Настройка для выбора шрифта
Элемент управления - wx.FontPickerCtrl
"""
def __init__(self, option, control):
self.option = option
self.control = control
self._setGUIValue()
def isValueChanged(self):
"""
Изменилось ли значение в интерфейсном элементе
"""
# Будем считать, что значение изменяется всегда.
# Если что, потом доделаю честную проверку
return True
def save(self):
newFont = self.control.GetSelectedFont()
self.option.size.value = newFont.GetPointSize()
self.option.faceName.value = newFont.GetFaceName()
self.option.bold.value = newFont.GetWeight() == wx.FONTWEIGHT_BOLD
self.option.italic.value = newFont.GetStyle() == wx.FONTSTYLE_ITALIC
def _setGUIValue(self):
"""
Обновить интерфейсный элемент.
В производных классах этот метод переопределяется
"""
fontSize = self.option.size.value
fontFaceName = self.option.faceName.value
fontIsBold = self.option.bold.value
fontIsItalic = self.option.italic.value
font = wx.Font(
fontSize, wx.FONTFAMILY_DEFAULT,
wx.FONTSTYLE_ITALIC if fontIsItalic else wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_BOLD if fontIsBold else wx.FONTWEIGHT_NORMAL,
False,
fontFaceName,
wx.FONTENCODING_DEFAULT)
self.control.SetSelectedFont(font)
| unreal666/outwiker | src/outwiker/gui/preferences/configelements.py | Python | gpl-3.0 | 5,919 |
from vsg.rule_group import indent
from vsg import parser
from vsg import violation
from vsg.rules import utils as rules_utils
class token_indent(indent.Rule):
'''
Checks the case for words.
Parameters
----------
name : string
The group the rule belongs to.
identifier : string
unique identifier. Usually in the form of 00N.
lTokens : list of token types
token type to apply the indent rule
'''
def __init__(self, name, identifier, lTokens):
indent.Rule.__init__(self, name=name, identifier=identifier)
self.lTokens = lTokens
def _get_tokens_of_interest(self, oFile):
return oFile.get_tokens_at_beginning_of_line_matching(self.lTokens)
def _analyze(self, lToi):
for oToi in lToi:
lTokens = oToi.get_tokens()
if indent_should_be_zero_but_has_leading_whitespace(lTokens):
create_zero_indent_violation(self, oToi)
elif indent_exists_but_is_incorrect(self, lTokens):
create_indent_violation(self, oToi, lTokens)
elif no_indent_exists_but_should(self, lTokens):
create_no_indent_violation(self, oToi, lTokens)
def _fix_violation(self, oViolation):
lTokens = oViolation.get_tokens()
if oViolation.get_action() == 'remove_whitespace':
oViolation.set_tokens([lTokens[1]])
elif oViolation.get_action() == 'adjust_whitespace':
lTokens[0].set_value(lTokens[1].get_indent() * self.indentSize * ' ')
oViolation.set_tokens(lTokens)
elif oViolation.get_action() == 'add_whitespace':
rules_utils.insert_whitespace(lTokens, 0, lTokens[0].get_indent() * self.indentSize)
oViolation.set_tokens(lTokens)
def indent_should_be_zero_but_has_leading_whitespace(lTokens):
if len(lTokens) == 2 and lTokens[1].get_indent() == 0:
return True
return False
def create_zero_indent_violation(self, oToi):
sSolution = "Indent level 0"
create_violation(self, oToi, sSolution, 'remove_whitespace')
def indent_exists_but_is_incorrect(self, lTokens):
if len(lTokens) == 2:
if lTokens[1].get_indent() is None:
return False
iWhitespace = len(lTokens[0].get_value())
iIndent = self.indentSize * lTokens[1].get_indent()
if iWhitespace != iIndent:
return True
return False
def create_indent_violation(self, oToi, lTokens):
sSolution = 'Indent level ' + str(lTokens[1].get_indent())
create_violation(self, oToi, sSolution, 'adjust_whitespace')
def no_indent_exists_but_should(self, lTokens):
if not len(lTokens) == 1:
return False
if lTokens[0].get_indent() is None:
return False
if self.indentSize == 0:
return False
if lTokens[0].get_indent() != 0:
return True
return False
def create_no_indent_violation(self, oToi, lTokens):
sSolution = 'Indent level ' + str(lTokens[0].get_indent())
create_violation(self, oToi, sSolution, 'add_whitespace')
def create_violation(self, oToi, sSolution, sAction):
oViolation = violation.New(oToi.get_line_number(), oToi, sSolution)
oViolation.set_action(sAction)
self.add_violation(oViolation)
| jeremiah-c-leary/vhdl-style-guide | vsg/rules/token_indent.py | Python | gpl-3.0 | 3,272 |
class BigchainDBError(Exception):
"""Base class for BigchainDB exceptions."""
class CriticalDoubleSpend(BigchainDBError):
"""Data integrity error that requires attention"""
class CriticalDoubleInclusion(BigchainDBError):
"""Data integrity error that requires attention"""
class CriticalDuplicateVote(BigchainDBError):
"""Data integrity error that requires attention"""
| stanta/darfchain | darfchain_docker_vagrant/bigchaindb/exceptions.py | Python | gpl-3.0 | 391 |
# Copyright 2016 Joel Dunham
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the :class:`MorphologiesController` and its auxiliary functions.
.. module:: morphologies
:synopsis: Contains the morphologies controller and its auxiliary functions.
"""
import logging
import simplejson as json
import os
import cPickle
from uuid import uuid4
import codecs
from paste.fileapp import FileApp
from pylons.controllers.util import forward
from pylons import request, response, session, config
from formencode.validators import Invalid
from onlinelinguisticdatabase.lib.base import BaseController
from onlinelinguisticdatabase.lib.schemata import MorphologySchema, MorphemeSequencesSchema
import onlinelinguisticdatabase.lib.helpers as h
from onlinelinguisticdatabase.lib.SQLAQueryBuilder import SQLAQueryBuilder, OLDSearchParseError
from onlinelinguisticdatabase.model.meta import Session
from onlinelinguisticdatabase.model import Morphology, MorphologyBackup
from onlinelinguisticdatabase.lib.foma_worker import foma_worker_q
log = logging.getLogger(__name__)
class MorphologiesController(BaseController):
"""Generate responses to requests on morphology resources.
A morphology, as here conceived, is an FST that is both a recognizer and a transducer, i.e.,
it recognizes only those sequences of morphemes that are form valid words and it maps sequences
of morphemes (in the general sense) to sequences of morpheme *forms*. By a morpheme in the general
sense, I mean to refer to ordered pairs of morpheme form and morpheme gloss. That is, an OLD
morphology is an FST that maps something like 'chien|dog-s|PL' to 'chien-s' (and vice versa) and
which does not recognize 's|PL-chien|dog'.
REST Controller styled on the Atom Publishing Protocol.
.. note::
The ``h.jsonify`` decorator converts the return value of the methods to
JSON.
TODO: consider generating values for ``lexicon_script`` and ``rules_script`` attributes
which, by default, are concatenated to produce a value for the ``script`` attribute but
where such default auto-generation can be overridden by the user so that, for example, the
auto-generated subscripts could be used to hand-write a more intelligent morphology FST script.
"""
query_builder = SQLAQueryBuilder('Morphology', config=config)
@h.jsonify
@h.restrict('SEARCH', 'POST')
@h.authenticate
def search(self):
"""Return the list of morphology resources matching the input JSON
query.
:URL: ``SEARCH /morphologies`` (or ``POST /morphologies/search``)
:request body: A JSON object of the form::
{"query": {"filter": [ ... ], "order_by": [ ... ]},
"paginator": { ... }}
where the ``order_by`` and ``paginator`` attributes are optional.
"""
try:
json_search_params = unicode(request.body, request.charset)
python_search_params = json.loads(json_search_params)
query = self.query_builder.get_SQLA_query(python_search_params.get('query'))
return h.add_pagination(query, python_search_params.get('paginator'))
except h.JSONDecodeError:
response.status_int = 400
return h.JSONDecodeErrorResponse
except (OLDSearchParseError, Invalid), e:
response.status_int = 400
return {'errors': e.unpack_errors()}
except:
response.status_int = 400
return {'error': u'The specified search parameters generated an invalid database query'}
@h.jsonify
@h.restrict('GET')
@h.authenticate
def new_search(self):
"""Return the data necessary to search the morphology resources.
:URL: ``GET /morphologies/new_search``
:returns: ``{"search_parameters": {"attributes": { ... }, "relations": { ... }}``
"""
return {'search_parameters': h.get_search_parameters(self.query_builder)}
@h.jsonify
@h.restrict('GET')
@h.authenticate
def index(self):
"""Get all morphology resources.
:URL: ``GET /morphologies`` with optional query string parameters for
ordering and pagination.
:returns: a list of all morphology resources.
.. note::
See :func:`utils.add_order_by` and :func:`utils.add_pagination` for the
query string parameters that effect ordering and pagination.
"""
try:
query = h.eagerload_morphology(Session.query(Morphology))
query = h.add_order_by(query, dict(request.GET), self.query_builder)
return h.add_pagination(query, dict(request.GET))
except Invalid, e:
response.status_int = 400
return {'errors': e.unpack_errors()}
@h.jsonify
@h.restrict('POST')
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def create(self):
"""Create a new morphology resource and return it.
:URL: ``POST /morphologies``
:request body: JSON object representing the morphology to create.
:returns: the newly created morphology.
"""
try:
schema = MorphologySchema()
values = json.loads(unicode(request.body, request.charset))
data = schema.to_python(values)
morphology = create_new_morphology(data)
Session.add(morphology)
Session.commit()
morphology.make_directory_safely(morphology.directory)
return morphology
except h.JSONDecodeError:
response.status_int = 400
return h.JSONDecodeErrorResponse
except Invalid, e:
response.status_int = 400
return {'errors': e.unpack_errors()}
@h.jsonify
@h.restrict('GET')
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def new(self):
"""Return the data necessary to create a new morphology.
:URL: ``GET /morphologies/new``.
:returns: a dictionary containing summarizing the corpora.
"""
return get_data_for_new_edit(dict(request.GET))
@h.jsonify
@h.restrict('PUT')
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def update(self, id):
"""Update a morphology and return it.
:URL: ``PUT /morphologies/id``
:Request body: JSON object representing the morphology with updated attribute values.
:param str id: the ``id`` value of the morphology to be updated.
:returns: the updated morphology model.
"""
morphology = h.eagerload_morphology(Session.query(Morphology)).get(int(id))
if morphology:
try:
schema = MorphologySchema()
values = json.loads(unicode(request.body, request.charset))
state = h.get_state_object(values)
state.id = id
data = schema.to_python(values, state)
morphology_dict = morphology.get_dict()
morphology = update_morphology(morphology, data)
# morphology will be False if there are no changes (cf. update_morphology).
if morphology:
backup_morphology(morphology_dict)
Session.add(morphology)
Session.commit()
return morphology
else:
response.status_int = 400
return {'error':
u'The update request failed because the submitted data were not new.'}
except h.JSONDecodeError:
response.status_int = 400
return h.JSONDecodeErrorResponse
except Invalid, e:
response.status_int = 400
return {'errors': e.unpack_errors()}
else:
response.status_int = 404
return {'error': 'There is no morphology with id %s' % id}
@h.jsonify
@h.restrict('DELETE')
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def delete(self, id):
"""Delete an existing morphology and return it.
:URL: ``DELETE /morphologies/id``
:param str id: the ``id`` value of the morphology to be deleted.
:returns: the deleted morphology model.
"""
morphology = h.eagerload_morphology(Session.query(Morphology)).get(id)
if morphology:
morphology_dict = morphology.get_dict()
backup_morphology(morphology_dict)
Session.delete(morphology)
Session.commit()
morphology.remove_directory()
return morphology
else:
response.status_int = 404
return {'error': 'There is no morphology with id %s' % id}
@h.jsonify
@h.restrict('GET')
@h.authenticate
def show(self, id):
"""Return a morphology.
:URL: ``GET /morphologies/id``
:param str id: the ``id`` value of the morphology to be returned.
:GET param str script: if set to '1', the script will be returned with the morphology
:GET param str lexicon: if set to '1', the lexicon (dict) will be returned with the morphology
:returns: a morphology model object.
"""
morphology = h.eagerload_morphology(Session.query(Morphology)).get(id)
if morphology:
morphology_dict = morphology.get_dict()
if request.GET.get('script') == u'1':
morphology_script_path = morphology.get_file_path('script')
if os.path.isfile(morphology_script_path):
morphology_dict['script'] = codecs.open(morphology_script_path, mode='r', encoding='utf8').read()
else:
morphology_dict['script'] = u''
if request.GET.get('lexicon') == u'1':
morphology_lexicon_path = morphology.get_file_path('lexicon')
if os.path.isfile(morphology_lexicon_path):
morphology_dict['lexicon'] = cPickle.load(open(morphology_lexicon_path, 'rb'))
else:
morphology_dict['lexicon'] = {}
return morphology_dict
else:
response.status_int = 404
return {'error': 'There is no morphology with id %s' % id}
@h.jsonify
@h.restrict('GET')
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def edit(self, id):
"""Return a morphology and the data needed to update it.
:URL: ``GET /morphologies/id/edit``
:param str id: the ``id`` value of the morphology that will be updated.
:returns: a dictionary of the form::
{"morphology": {...}, "data": {...}}
where the value of the ``morphology`` key is a dictionary
representation of the morphology and the value of the ``data`` key
is a list of corpora in the database.
"""
morphology = h.eagerload_morphology(Session.query(Morphology)).get(id)
if morphology:
return {'data': get_data_for_new_edit(dict(request.GET)), 'morphology': morphology}
else:
response.status_int = 404
return {'error': 'There is no morphology with id %s' % id}
@h.jsonify
@h.restrict('GET')
@h.authenticate
def history(self, id):
"""Return the morphology with ``morphology.id==id`` and its previous versions.
:URL: ``GET /morphologies/history/id``
:param str id: a string matching the ``id`` or ``UUID`` value of the
morphology whose history is requested.
:returns: A dictionary of the form::
{"morphology": { ... }, "previous_versions": [ ... ]}
where the value of the ``morphology`` key is the morphology whose
history is requested and the value of the ``previous_versions`` key
is a list of dictionaries representing previous versions of the
morphology.
"""
morphology, previous_versions = h.get_model_and_previous_versions('Morphology', id)
if morphology or previous_versions:
return {'morphology': morphology,
'previous_versions': previous_versions}
else:
response.status_int = 404
return {'error': 'No morphologies or morphology backups match %s' % id}
@h.jsonify
@h.restrict('PUT')
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def generate_and_compile(self, id):
"""Generate the morphology's script and compile it as a foma FST.
:URL: ``PUT /morphologies/compile/id``
:param str id: the ``id`` value of the morphology whose script will be compiled.
:returns: if the morphology exists and foma is installed, the morphology
model is returned; ``GET /morphologies/id`` must be polled to
determine when and how the compilation task has terminated.
.. note::
The script is compiled asynchronously in a worker thread. See
:mod:`onlinelinguisticdatabase.lib.foma_worker`.
"""
return generate_and_compile_morphology(id)
@h.jsonify
@h.restrict('PUT')
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def generate(self, id):
"""Generate the morphology's script -- do not compile it.
:URL: ``PUT /morphologies/compile/id``
:param str id: the ``id`` value of the morphology whose script will be compiled.
:returns: if the morphology exists and foma is installed, the morphology
model is returned; ``GET /morphologies/id`` must be polled to
determine when the generation task has terminated.
"""
return generate_and_compile_morphology(id, compile_=False)
@h.restrict('GET')
@h.authenticate_with_JSON
def servecompiled(self, id):
"""Serve the compiled foma script of the morphology.
:URL: ``PUT /morphologies/servecompiled/id``
:param str id: the ``id`` value of a morphology.
:returns: a stream of bytes -- the compiled morphology script.
"""
morphology = Session.query(Morphology).get(id)
if morphology:
if h.foma_installed():
foma_file_path = morphology.get_file_path('binary')
if os.path.isfile(foma_file_path):
return forward(FileApp(foma_file_path))
else:
response.status_int = 400
return json.dumps({'error': 'Morphology %d has not been compiled yet.' % morphology.id})
else:
response.status_int = 400
return json.dumps({'error': 'Foma and flookup are not installed.'})
else:
response.status_int = 404
return json.dumps({'error': 'There is no morphology with id %s' % id})
@h.jsonify
@h.restrict('PUT')
@h.authenticate
def applydown(self, id):
"""Call foma apply down on the input in the request body using a morphology.
:URL: ``PUT /morphologies/applydown/id``
:param str id: the ``id`` value of the morphology that will be used.
:Request body: JSON object of the form ``{'transcriptions': [t1, t2, ...]}``.
:returns: if the morphology exists and foma is installed, a JSON object
of the form ``{t1: [p1t1, p2t1, ...], ...}`` where ``t1`` is a
transcription from the request body and ``p1t1``, ``p2t1``, etc. are
outputs of ``t1`` after apply down.
"""
return self.apply(id, 'down')
@h.jsonify
@h.restrict('PUT')
@h.authenticate
def applyup(self, id):
"""Call foma apply up on the input in the request body using a morphology.
:URL: ``PUT /morphologies/applyup/id``
:param str id: the ``id`` value of the morphology that will be used.
:Request body: JSON object of the form ``{'transcriptions': [t1, t2, ...]}``.
:returns: if the morphology exists and foma is installed, a JSON object
of the form ``{t1: [p1t1, p2t1, ...], ...}`` where ``t1`` is a
transcription from the request body and ``p1t1``, ``p2t1``, etc. are
outputs of ``t1`` after apply up.
"""
return self.apply(id, 'up')
def apply(self, id, direction):
"""Call foma apply in the direction of ``direction`` on the input in the request body using a morphology.
:param str id: the ``id`` value of the morphology that will be used.
:param str direction: the direction of foma application.
:Request body: JSON object of the form ``{'transcriptions': [t1, t2, ...]}``.
:returns: if the morphology exists and foma is installed, a JSON object
of the form ``{t1: [p1t1, p2t1, ...], ...}`` where ``t1`` is a
transcription from the request body and ``p1t1``, ``p2t1``, etc. are
outputs of ``t1`` after apply up/down.
"""
morphology = Session.query(Morphology).get(id)
if morphology:
if h.foma_installed():
morphology_binary_path = morphology.get_file_path('binary')
if os.path.isfile(morphology_binary_path):
try:
inputs = json.loads(unicode(request.body, request.charset))
inputs = MorphemeSequencesSchema.to_python(inputs)
inputs = [h.normalize(i) for i in inputs['morpheme_sequences']]
return morphology.apply(direction, inputs)
except h.JSONDecodeError:
response.status_int = 400
return h.JSONDecodeErrorResponse
except Invalid, e:
response.status_int = 400
return {'errors': e.unpack_errors()}
else:
response.status_int = 400
return {'error': 'Morphology %d has not been compiled yet.' % morphology.id}
else:
response.status_int = 400
return {'error': 'Foma and flookup are not installed.'}
else:
response.status_int = 404
return {'error': 'There is no morphology with id %s' % id}
def get_data_for_new_edit(GET_params):
"""Return the data needed to create a new morphology or edit one."""
model_name_map = {'corpora': 'Corpus'}
getter_map = {'corpora': h.get_mini_dicts_getter('Corpus')}
return h.get_data_for_new_action(GET_params, getter_map, model_name_map)
################################################################################
# Backup morphology
################################################################################
def backup_morphology(morphology_dict):
"""Backup a morphology.
:param dict morphology_dict: a representation of a morphology model.
:returns: ``None``
"""
morphology_backup = MorphologyBackup()
morphology_backup.vivify(morphology_dict)
Session.add(morphology_backup)
################################################################################
# Morphology Create & Update Functions
################################################################################
def create_new_morphology(data):
"""Create a new morphology.
:param dict data: the data for the morphology to be created.
:returns: an SQLAlchemy model object representing the morphology.
"""
morphology = Morphology(
parent_directory = h.get_OLD_directory_path('morphologies', config=config),
word_boundary_symbol = h.word_boundary_symbol,
morpheme_delimiters = h.get_morpheme_delimiters(type_=u'unicode'),
rare_delimiter = h.rare_delimiter,
UUID = unicode(uuid4()),
name = h.normalize(data['name']),
description = h.normalize(data['description']),
enterer = session['user'],
modifier = session['user'],
datetime_modified = h.now(),
datetime_entered = h.now(),
lexicon_corpus = data['lexicon_corpus'],
rules_corpus = data['rules_corpus'],
script_type = data['script_type'],
extract_morphemes_from_rules_corpus = data['extract_morphemes_from_rules_corpus'],
rules = data['rules'],
rich_upper = data['rich_upper'],
rich_lower = data['rich_lower'],
include_unknowns = data['include_unknowns']
)
return morphology
def update_morphology(morphology, data):
"""Update a morphology.
:param morphology: the morphology model to be updated.
:param dict data: representation of the updated morphology.
:returns: the updated morphology model or, if ``changed`` has not been set
to ``True``, ``False``.
"""
changed = False
changed = morphology.set_attr('name', h.normalize(data['name']), changed)
changed = morphology.set_attr('description', h.normalize(data['description']), changed)
changed = morphology.set_attr('lexicon_corpus', data['lexicon_corpus'], changed)
changed = morphology.set_attr('rules_corpus', data['rules_corpus'], changed)
changed = morphology.set_attr('script_type', data['script_type'], changed)
changed = morphology.set_attr('extract_morphemes_from_rules_corpus', data['extract_morphemes_from_rules_corpus'], changed)
changed = morphology.set_attr('rules', data['rules'], changed)
changed = morphology.set_attr('rich_upper', data['rich_upper'], changed)
changed = morphology.set_attr('rich_lower', data['rich_lower'], changed)
changed = morphology.set_attr('include_unknowns', data['include_unknowns'], changed)
changed = morphology.set_attr('rare_delimiter', h.rare_delimiter, changed)
changed = morphology.set_attr('word_boundary_symbol', h.word_boundary_symbol, changed)
if changed:
session['user'] = Session.merge(session['user'])
morphology.modifier = session['user']
morphology.datetime_modified = h.now()
return morphology
return changed
def generate_and_compile_morphology(morphology_id, compile_=True):
morphology = Session.query(Morphology).get(morphology_id)
if not morphology:
response.status_int = 404
return {'error': 'There is no morphology with id %s' % id}
if compile_ and not h.foma_installed():
response.status_int = 400
return {'error': 'Foma and flookup are not installed.'}
foma_worker_q.put({
'id': h.generate_salt(),
'func': 'generate_and_compile_morphology',
'args': {
'morphology_id': morphology.id,
'compile': compile_,
'user_id': session['user'].id,
'timeout': h.morphology_compile_timeout
}
})
return morphology
| jrwdunham/old | onlinelinguisticdatabase/controllers/morphologies.py | Python | apache-2.0 | 23,387 |
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import numbers
import numpy as np
from pyemma.coordinates.data._base.datasource import DataSourceIterator, DataSource
from pyemma.coordinates.data._base.random_accessible import RandomAccessStrategy
import functools
__author__ = 'noe, marscher'
class DataInMemory(DataSource):
r"""
multi-dimensional data fully stored in memory.
Used to pass arbitrary coordinates to pipeline. Data is being flattened to
two dimensions to ensure it is compatible.
Parameters
----------
data : ndarray (nframe, ndim) or list of ndarrays (nframe, ndim)
Data has to be either one 2d array which stores amount of frames in first
dimension and coordinates/features in second dimension or a list of this
arrays.
"""
IN_MEMORY_FILENAME = '<in_memory_file>'
def _create_iterator(self, skip=0, chunk=0, stride=1, return_trajindex=False, cols=None):
return DataInMemoryIterator(self, skip, chunk, stride, return_trajindex, cols)
def __init__(self, data, chunksize=5000, **kw):
super(DataInMemory, self).__init__(chunksize=chunksize)
self._is_reader = True
self._is_random_accessible = True
self._ra_cuboid = DataInMemoryCuboidRandomAccessStrategy(self, 3)
self._ra_jagged = DataInMemoryJaggedRandomAccessStrategy(self, 3)
self._ra_linear_strategy = DataInMemoryLinearRandomAccessStrategy(self, 2)
self._ra_linear_itraj_strategy = DataInMemoryLinearItrajRandomAccessStrategy(self, 3)
if not isinstance(data, (list, tuple)):
data = [data]
# storage for arrays (used in _add_array_to_storage)
self._data = []
# everything is an array
if all(isinstance(d, np.ndarray) for d in data):
for d in data:
self._add_array_to_storage(d)
else:
raise ValueError("Please supply numpy.ndarray, or list/tuple of ndarray."
" Your input was %s" % str(data))
self._set_dimensions_and_lenghts()
self._filenames = [DataInMemory.IN_MEMORY_FILENAME] * self._ntraj
@property
def data(self):
"""
Property that returns the data that was hold in storage (data in memory mode).
Returns
-------
list : The stored data.
"""
return self._data
def _add_array_to_storage(self, array):
"""
checks shapes, eg convert them (2d), raise if not possible
after checks passed, add array to self._data
"""
if array.ndim == 1:
array = np.atleast_2d(array).T
elif array.ndim == 2:
pass
else:
shape = array.shape
# hold first dimension, multiply the rest
shape_2d = (shape[0], functools.reduce(lambda x, y: x * y, shape[1:]))
array = np.reshape(array, shape_2d)
self.data.append(array)
def _set_dimensions_and_lenghts(self):
# number of trajectories/data sets
self._ntraj = len(self.data)
if self.ntraj == 0:
raise ValueError("no valid data")
# this works since everything is flattened to 2d
self._lengths = [np.shape(d)[0] for d in self.data]
# ensure all trajs have same dim
ndims = [np.shape(x)[1] for x in self.data]
if not np.unique(ndims).size == 1:
raise ValueError("input data has different dimensions!"
"Dimensions are = %s" % ndims)
self._ndim = ndims[0]
@classmethod
def load_from_files(cls, files):
""" construct this by loading all files into memory
Parameters
----------
files: str or list of str
filenames to read from
"""
# import here to avoid cyclic import
from pyemma.coordinates.data.numpy_filereader import NumPyFileReader
reader = NumPyFileReader(files)
data = reader.get_output()
return cls(data)
def describe(self):
return "[DataInMemory array shapes: %s]" % [np.shape(x) for x in self.data]
class DataInMemoryCuboidRandomAccessStrategy(RandomAccessStrategy):
def _handle_slice(self, idx):
idx = np.index_exp[idx]
itrajs, frames, dims = None, None, None
if isinstance(idx, (list, tuple)):
if len(idx) == 1:
itrajs, frames, dims = idx[0], slice(None, None, None), slice(None, None, None)
if len(idx) == 2:
itrajs, frames, dims = idx[0], idx[1], slice(None, None, None)
if len(idx) == 3:
itrajs, frames, dims = idx[0], idx[1], idx[2]
if len(idx) > 3 or len(idx) == 0:
raise IndexError("invalid slice by %s" % idx)
return self._get_itraj_random_accessible(itrajs, frames, dims)
def _get_itraj_random_accessible(self, itrajs, frames, dims):
dims = [dims] if isinstance(dims, numbers.Integral) else dims
itrajs = self._get_indices(itrajs, self._source.ntraj)
frames = self._get_indices(frames, min(self._source.trajectory_lengths(1, 0)[itrajs]))
if isinstance(dims, (list, tuple)):
return np.array(
[self._source.data[itraj][frames] for itraj in itrajs],
dtype=self._source.output_type()
)[:, :, dims]
return np.array([self._source.data[itraj][frames, dims] for itraj in itrajs], dtype=self._source.output_type())
class DataInMemoryJaggedRandomAccessStrategy(DataInMemoryCuboidRandomAccessStrategy):
def _get_itraj_random_accessible(self, itrajs, frames, dims):
itrajs = self._get_indices(itrajs, self._source.ntraj)
return [self._source.data[itraj][frames, dims] for itraj in itrajs]
class DataInMemoryLinearRandomAccessStrategy(RandomAccessStrategy):
def _handle_slice(self, idx):
idx = np.index_exp[idx]
frames, dims = None, None
if isinstance(idx, (tuple, list)):
if len(idx) == 1:
frames, dims = idx[0], slice(None, None, None)
if len(idx) == 2:
frames, dims = idx[0], idx[1]
if len(idx) > 2:
raise IndexError("Slice was more than two-dimensional, not supported.")
cumsum = np.cumsum(self._source.trajectory_lengths())
if not isinstance(frames, (list, np.ndarray)):
frames = self._get_indices(frames, cumsum[-1])
dims = self._get_indices(dims, self._source.ndim)
nframes = len(frames)
ndims = len(dims)
data = np.empty((nframes, ndims), dtype=self._source.output_type())
from pyemma.coordinates.clustering import UniformTimeClustering
for i, x in enumerate(frames):
traj, idx = UniformTimeClustering._idx_to_traj_idx(x, cumsum)
data[i, :] = self._source.data[traj][idx, dims]
return data
class DataInMemoryLinearItrajRandomAccessStrategy(DataInMemoryCuboidRandomAccessStrategy):
def _get_itraj_random_accessible(self, itrajs, frames, dims):
itrajs = self._get_indices(itrajs, self._source.ntraj)
frames = self._get_indices(frames, sum(self._source.trajectory_lengths()[itrajs]))
dims = self._get_indices(dims, self._source.ndim)
nframes = len(frames)
ndims = len(dims)
if max(dims) > self._source.ndim:
raise IndexError("Data only has %s dimensions, wanted to slice by dimension %s."
% (self._source.ndim, max(dims)))
cumsum = np.cumsum(self._source.trajectory_lengths()[itrajs])
data = np.empty((nframes, ndims), dtype=self._source.output_type())
from pyemma.coordinates.clustering import UniformTimeClustering
for i, x in enumerate(frames):
traj, idx = self._map_to_absolute_traj_idx(UniformTimeClustering._idx_to_traj_idx(x, cumsum), itrajs)
data[i, :] = self._source.data[traj][idx, dims]
return data
@staticmethod
def _map_to_absolute_traj_idx(cumsum_idx, itrajs):
return itrajs[cumsum_idx[0]], cumsum_idx[1]
class DataInMemoryIterator(DataSourceIterator):
def close(self):
pass
def __init__(self, data_source, skip=0, chunk=0, stride=1, return_trajindex=False, cols=None):
super(DataInMemoryIterator, self).__init__(data_source, skip, chunk,
stride, return_trajindex, cols)
def _next_chunk(self):
if self._itraj >= self._data_source.ntraj:
raise StopIteration()
traj_len = self._data_source._lengths[self._itraj]
traj = self._data_source.data[self._itraj]
# only apply _skip at the beginning of each trajectory
skip = self.skip if self._t == 0 else 0
# complete trajectory mode
if self.chunksize == 0:
if not self.uniform_stride:
chunk = self._data_source.data[self._itraj][self.ra_indices_for_traj(self._itraj)]
self._itraj += 1
# skip trajs which are not included in stride
while self._itraj not in self.traj_keys and self._itraj < self.number_of_trajectories():
self._itraj += 1
return chunk
else:
chunk = traj[skip::self.stride]
self._itraj += 1
return chunk
# chunked mode
else:
if not self.uniform_stride:
random_access_chunk = self._data_source.data[self._itraj][
self.ra_indices_for_traj(self._itraj)[self._t:min(
self._t + self.chunksize, self.ra_trajectory_length(self._itraj)
)]
]
self._t += self.chunksize
if self._t >= self.ra_trajectory_length(self._itraj):
self._itraj += 1
self._t = 0
# skip trajs which are not included in stride
while (self._itraj not in self.traj_keys or self._t >= self.ra_trajectory_length(self._itraj)) \
and self._itraj < self.number_of_trajectories():
self._itraj += 1
self._t = 0
return random_access_chunk
else:
upper_bound = min(skip + self._t + self.chunksize * self.stride, traj_len)
slice_x = slice(skip + self._t, upper_bound, self.stride)
chunk = traj[slice_x]
self._t = upper_bound
if upper_bound >= traj_len:
self._itraj += 1
self._t = 0
return chunk
| gph82/PyEMMA | pyemma/coordinates/data/data_in_memory.py | Python | lgpl-3.0 | 11,501 |
"""
Primitive replacement for requests to avoid extra dependency.
Avoids use of urllib2 due to lack of SNI support.
"""
from __future__ import absolute_import, print_function
import json
try:
from urllib import urlencode
except ImportError:
# noinspection PyCompatibility, PyUnresolvedReferences
from urllib.parse import urlencode # pylint: disable=locally-disabled, import-error, no-name-in-module
try:
# noinspection PyCompatibility
from urlparse import urlparse
except ImportError:
# noinspection PyCompatibility, PyUnresolvedReferences
from urllib.parse import urlparse # pylint: disable=locally-disabled, ungrouped-imports
from lib.util import (
CommonConfig,
ApplicationError,
run_command,
)
class HttpClient(object):
"""Make HTTP requests via curl."""
def __init__(self, args, always=False):
"""
:type args: CommonConfig
:type always: bool
"""
self.args = args
self.always = always
def get(self, url):
"""
:type url: str
:rtype: HttpResponse
"""
return self.request('GET', url)
def delete(self, url):
"""
:type url: str
:rtype: HttpResponse
"""
return self.request('DELETE', url)
def put(self, url, data=None, headers=None):
"""
:type url: str
:type data: str | None
:type headers: dict[str, str] | None
:rtype: HttpResponse
"""
return self.request('PUT', url, data, headers)
def request(self, method, url, data=None, headers=None):
"""
:type method: str
:type url: str
:type data: str | None
:type headers: dict[str, str] | None
:rtype: HttpResponse
"""
cmd = ['curl', '-s', '-S', '-i', '-X', method]
if headers is None:
headers = {}
headers['Expect'] = '' # don't send expect continue header
for header in headers.keys():
cmd += ['-H', '%s: %s' % (header, headers[header])]
if data is not None:
cmd += ['-d', data]
cmd += [url]
stdout, _ = run_command(self.args, cmd, capture=True, always=self.always, cmd_verbosity=2)
if self.args.explain and not self.always:
return HttpResponse(200, '')
header, body = stdout.split('\r\n\r\n', 1)
response_headers = header.split('\r\n')
first_line = response_headers[0]
http_response = first_line.split(' ')
status_code = int(http_response[1])
return HttpResponse(status_code, body)
class HttpResponse(object):
"""HTTP response from curl."""
def __init__(self, status_code, response):
"""
:type status_code: int
:type response: str
"""
self.status_code = status_code
self.response = response
def json(self):
"""
:rtype: any
"""
try:
return json.loads(self.response)
except ValueError:
raise HttpError(self.status_code, 'Cannot parse response as JSON:\n%s' % self.response)
class HttpError(ApplicationError):
"""HTTP response as an error."""
def __init__(self, status, message):
"""
:type status: int
:type message: str
"""
super(HttpError, self).__init__('%s: %s' % (status, message))
self.status = status
| andreaso/ansible | test/runner/lib/http.py | Python | gpl-3.0 | 3,424 |
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
import functools
from devtools_testutils import recorded_by_proxy, set_bodiless_matcher
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer import DocumentAnalysisClient, DocumentModelAdministrationClient, AnalyzeResult
from azure.ai.formrecognizer._generated.v2022_01_30_preview.models import AnalyzeResultOperation
from testcase import FormRecognizerTest
from preparers import GlobalClientPreparer as _GlobalClientPreparer
from preparers import FormRecognizerPreparer
DocumentModelAdministrationClientPreparer = functools.partial(_GlobalClientPreparer, DocumentModelAdministrationClient)
class TestDACAnalyzeCustomModelFromUrl(FormRecognizerTest):
def teardown(self):
self.sleep(4)
@FormRecognizerPreparer()
def test_document_analysis_none_model(self, **kwargs):
formrecognizer_test_endpoint = kwargs.pop("formrecognizer_test_endpoint")
formrecognizer_test_api_key = kwargs.pop("formrecognizer_test_api_key")
client = DocumentAnalysisClient(formrecognizer_test_endpoint, AzureKeyCredential(formrecognizer_test_api_key))
with pytest.raises(ValueError):
client.begin_analyze_document_from_url(model=None, document_url="https://badurl.jpg")
@FormRecognizerPreparer()
def test_document_analysis_empty_model_id(self, **kwargs):
formrecognizer_test_endpoint = kwargs.pop("formrecognizer_test_endpoint")
formrecognizer_test_api_key = kwargs.pop("formrecognizer_test_api_key")
client = DocumentAnalysisClient(formrecognizer_test_endpoint, AzureKeyCredential(formrecognizer_test_api_key))
with pytest.raises(ValueError):
client.begin_analyze_document_from_url(model="", document_url="https://badurl.jpg")
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
@recorded_by_proxy
def test_custom_document_selection_mark(self, client, formrecognizer_selection_mark_storage_container_sas_url, **kwargs):
set_bodiless_matcher()
da_client = client.get_document_analysis_client()
poller = client.begin_build_model(formrecognizer_selection_mark_storage_container_sas_url, "template")
model = poller.result()
responses = []
def callback(raw_response, _, headers):
analyze_result = da_client._deserialize(AnalyzeResultOperation, raw_response)
document = AnalyzeResult._from_generated(analyze_result.analyze_result)
responses.append(analyze_result)
responses.append(document)
poller = da_client.begin_analyze_document_from_url(
model=model.model_id,
document_url=self.selection_mark_url_pdf,
cls=callback
)
document = poller.result()
raw_analyze_result = responses[0].analyze_result
returned_model = responses[1]
# Check AnalyzeResult
assert returned_model.model_id == raw_analyze_result.model_id
assert returned_model.api_version == raw_analyze_result.api_version
assert returned_model.content == raw_analyze_result.content
self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages)
self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents)
self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables)
self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs)
self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities)
self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles)
# check page range
assert len(raw_analyze_result.pages) == len(returned_model.pages)
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
@recorded_by_proxy
def test_label_tables_variable_rows(self, client, formrecognizer_table_variable_rows_container_sas_url, **kwargs):
set_bodiless_matcher()
da_client = client.get_document_analysis_client()
build_poller = client.begin_build_model(formrecognizer_table_variable_rows_container_sas_url, "template")
model = build_poller.result()
responses = []
def callback(raw_response, _, headers):
analyze_result = da_client._deserialize(AnalyzeResultOperation, raw_response)
document = AnalyzeResult._from_generated(analyze_result.analyze_result)
responses.append(analyze_result)
responses.append(document)
poller = da_client.begin_analyze_document_from_url(
model.model_id,
self.label_table_variable_row_url_pdf,
cls=callback
)
document = poller.result()
raw_analyze_result = responses[0].analyze_result
returned_model = responses[1]
# Check AnalyzeResult
assert returned_model.model_id == raw_analyze_result.model_id
assert returned_model.api_version == raw_analyze_result.api_version
assert returned_model.content == raw_analyze_result.content
self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages)
self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents)
self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables)
self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs)
self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities)
self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles)
# check page range
assert len(raw_analyze_result.pages) == len(returned_model.pages)
@FormRecognizerPreparer()
@DocumentModelAdministrationClientPreparer()
@recorded_by_proxy
def test_label_tables_fixed_rows(self, client, formrecognizer_table_fixed_rows_container_sas_url, **kwargs):
set_bodiless_matcher()
da_client = client.get_document_analysis_client()
build_poller = client.begin_build_model(formrecognizer_table_fixed_rows_container_sas_url, "template")
model = build_poller.result()
responses = []
def callback(raw_response, _, headers):
analyze_result = da_client._deserialize(AnalyzeResultOperation, raw_response)
document = AnalyzeResult._from_generated(analyze_result.analyze_result)
responses.append(analyze_result)
responses.append(document)
poller = da_client.begin_analyze_document_from_url(
model.model_id,
self.label_table_fixed_row_url_pdf,
cls=callback
)
document = poller.result()
raw_analyze_result = responses[0].analyze_result
returned_model = responses[1]
# Check AnalyzeResult
assert returned_model.model_id == raw_analyze_result.model_id
assert returned_model.api_version == raw_analyze_result.api_version
assert returned_model.content == raw_analyze_result.content
self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages)
self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents)
self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables)
self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs)
self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities)
self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles)
# check page range
assert len(raw_analyze_result.pages) == len(returned_model.pages)
| Azure/azure-sdk-for-python | sdk/formrecognizer/azure-ai-formrecognizer/tests/test_dac_analyze_custom_model_from_url.py | Python | mit | 8,155 |
from __future__ import absolute_import
import re
import string
from six import StringIO
try:
from string import maketrans
def bytes(chars):
return ''.join(map(chr, chars))
except ImportError:
maketrans = bytes.maketrans
from fulltext.util import BaseBackend
BUFFER_MAX = 1024 * 1024
# Translate printable chars to themselves and anything else to a space.
TRANSLATE = (
bytes([i for i in range(256)]),
bytes([i if chr(i) in string.printable else 32 for i in range(256)])
)
TRANSLATE = maketrans(*TRANSLATE)
# I wish Python re module had a punctuation char class!
# https://pythex.org/
STRIP_PUNCTUATION = re.compile(
r'\b(\w*[!"#$%&\'()*+,-.\\/:;<=>?@[\]^_`{|}~]{2,}\w+)|((?<!\w)[!"#$%&\'()*'
r'+,-.\\/:;<=>?@[\]^_`{|}~0-9]+)|((?<!\w)[!"#$%&\'()*+,-.\\/:;<=>?@[\]^_`{'
r'|}~0-9])[^\w]*\b')
class Backend(BaseBackend):
def handle_fobj(self, f):
buffer = StringIO()
while True:
text = f.read(BUFFER_MAX)
if not text:
break
# Emulate the `strings` CLI tool.
text = text.translate(TRANSLATE)
text = text.decode('ascii', 'ignore')
# Remove any "words" that consist mainly of punctuation.
text = STRIP_PUNCTUATION.sub(' ', text)
buffer.write(text)
return buffer.getvalue()
| btimby/fulltext | fulltext/backends/__bin.py | Python | mit | 1,365 |
'''
This script splits the files into train/dev and held-out test set,
depending on the CSV file that has the 'fold' information for each
document.
'''
import shutil
import sys
import csv
import os
inDir = sys.argv[1]
csvFile = sys.argv[2]
outDirTrain = sys.argv[3]
outDirTest = sys.argv[4]
fileEnding = sys.argv[5]
splitInfo = {}
with open(csvFile) as f:
csvFile = csv.reader(f, delimiter="\t")
for row in csvFile:
splitInfo[row[0][:-4]] = row[3] # removing ".txt" ending
if os.path.exists(outDirTrain):
shutil.rmtree(outDirTrain)
if os.path.exists(outDirTest):
shutil.rmtree(outDirTest)
os.makedirs(outDirTrain)
os.makedirs(outDirTest)
# traverse input directory
for filename in os.listdir(inDir):
if not filename.endswith(fileEnding):
continue
filename = filename[:-1-len(fileEnding)]
fileId = filename.split("_", 1)[1]
if splitInfo[filename] == "train":
shutil.copyfile(os.path.join(inDir, filename + "." + fileEnding), os.path.join(outDirTrain, filename + "." + fileEnding))
if splitInfo[filename] == "test":
shutil.copyfile(os.path.join(inDir, filename + "." + fileEnding), os.path.join(outDirTest, filename + "." + fileEnding))
# copy type system file (if using this for xmi)
if fileEnding == "xmi":
shutil.copyfile(os.path.join(inDir, "typesystem.xml"), os.path.join(outDirTrain, "typesystem.xml"))
shutil.copyfile(os.path.join(inDir, "typesystem.xml"), os.path.join(outDirTest, "typesystem.xml"))
| annefried/sitent | de.uni-saarland.coli.sitent/python-scripts/split.py | Python | apache-2.0 | 1,493 |
from hashlib import md5
import datetime, calendar
from flask import Flask, request, session, url_for, redirect, render_template, abort, g, flash
from werkzeug import check_password_hash, generate_password_hash
from google.appengine.ext import ndb
# create our little application :)
app = Flask(__name__)
app.config.from_object(__name__)
app.config.from_envvar('FLASK_SETTINGS', silent=True)
# Set up schemes
class User(ndb.Model):
username = ndb.StringProperty(required=True)
email = ndb.StringProperty(required=True)
pw_hash = ndb.StringProperty(required=True)
following = ndb.IntegerProperty(repeated=True)
start_date = ndb.DateTimeProperty(auto_now_add=True)
class Message(ndb.Model):
author = ndb.IntegerProperty(required=True)
text = ndb.TextProperty(required=True)
pub_date = ndb.DateTimeProperty(auto_now_add=True)
email = ndb.StringProperty(required=True)
username = ndb.StringProperty(required=True)
def get_user_id(u):
a = User.query(User.username == u).get()
if a:
return a.key.id()
return None
def format_datetime(d):
"""Format a timestamp for display."""
stamp = calendar.timegm(d.timetuple())
return datetime.datetime.utcfromtimestamp(stamp).strftime('%Y-%m-%d @ %H:%M')
def gravatar_url(email, size=80):
"""Return the gravatar image for the given email address."""
return 'http://www.gravatar.com/avatar/%s?d=identicon&s=%d' % \
(md5(email.strip().lower().encode('utf-8')).hexdigest(), size)
@app.before_request
def before_request():
g.user = None
if 'user_id' in session:
g.user = User.get_by_id(session['user_id'])
@app.route('/')
def timeline():
"""Shows a users timeline or if no user is logged in it will
redirect to the public timeline. This timeline shows the user's
messages as well as all the messages of followed users.
"""
if not g.user:
return redirect(url_for('public_timeline'))
cid = session['user_id']
f = User.get_by_id(cid).following
ids = f if isinstance(f, list) else [f]
ids.append(cid)
messages = Message.query(Message.author.IN(ids)).order(-Message.pub_date).fetch(30)
return render_template('timeline.html', messages = messages)
@app.route('/public')
def public_timeline():
"""Displays the latest messages of all users."""
messages = Message.query().order(-Message.pub_date).fetch(30)
return render_template('timeline.html', messages = messages)
@app.route('/<username>')
def user_timeline(username):
"""Display's a users tweets."""
profile_user = User.query(User.username == username).get()
if profile_user is None:
abort(404)
pid = profile_user.key.id()
followed = False
if g.user:
cid = session['user_id']
if pid in User.get_by_id(cid).following:
followed = True
return render_template('timeline.html', messages = Message.query(Message.author == pid).order(-Message.pub_date).fetch(30), \
followed = followed, profile_user = profile_user)
@app.route('/<username>/follow')
def follow_user(username):
"""Adds the current user as follower of the given user."""
if not g.user:
abort(401) # user has not logged in yet
cid = session['user_id']
whom_id = get_user_id(username)
if whom_id is None:
abort(404)
u = User.get_by_id(cid)
if u.following is None:
u.following = [whom_id]
u.put()
else:
u.following.append(whom_id)
u.put()
flash('You are now following "%s"' % username)
return redirect(url_for('user_timeline', username = username))
@app.route('/<username>/unfollow')
def unfollow_user(username):
"""Removes the current user as follower of the given user."""
if not g.user:
abort(401)
cid = session['user_id']
whom_id = get_user_id(username)
if whom_id is None:
abort(404)
u = User.get_by_id(cid)
u.following.remove(whom_id)
u.put()
flash('You are no longer following "%s"' % username)
return redirect(url_for('user_timeline', username = username))
@app.route('/add_message', methods=['POST'])
def add_message():
"""Registers a new message for the user."""
if 'user_id' not in session:
abort(401)
cid = session['user_id']
if request.form['text']:
u = User.get_by_id(cid)
new_message = Message(author = cid, text = request.form['text'], email = u.email, username = u.username)
new_message.put()
flash('Your message was recorded')
return redirect(url_for('timeline'))
@app.route('/login', methods=['GET', 'POST'])
def login():
"""Logs the user in."""
if g.user:
return redirect(url_for('timeline'))
error = None
if request.method == 'POST':
user = User.query(User.username == request.form['username']).get()
if user is None:
error = 'Invalid username'
elif not check_password_hash(user.pw_hash, request.form['password']):
error = 'Invalid password'
else:
flash('You were logged in')
session['user_id'] = get_user_id(request.form['username'])
return redirect(url_for('timeline'))
return render_template('login.html', error = error)
@app.route('/register', methods=['GET', 'POST'])
def register():
"""Registers the user."""
if g.user:
return redirect(url_for('timeline'))
error = None
if request.method == 'POST':
if not request.form['username']:
error = 'You have to enter a username'
elif not request.form['email'] or \
'@' not in request.form['email']:
error = 'You have to enter a valid email address'
elif not request.form['password']:
error = 'You have to enter a password'
elif request.form['password'] != request.form['password2']:
error = 'The two passwords do not match'
elif get_user_id(request.form['username']) is not None:
error = 'The username is already taken'
else:
new_user = User(username = request.form['username'], email = request.form['email'], \
pw_hash = generate_password_hash(request.form['password']))
new_user.put()
flash('You were successfully registered and can login now')
return redirect(url_for('login'))
return render_template('register.html', error = error)
@app.route('/logout')
def logout():
"""Logs the user out."""
flash('You were logged out')
session.pop('user_id', None)
return redirect(url_for('public_timeline'))
# add some filters to jinja
app.jinja_env.filters['datetimeformat'] = format_datetime
app.jinja_env.filters['gravatar'] = gravatar_url
if __name__ == '__main__':
app.run()
| dapangmao/minitwit | main.py | Python | apache-2.0 | 6,806 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#Library of common functions
import json, urllib2, re, time, datetime, sys, cgi, os, string, random, math
import locale
import base64
import hashlib
import sqlite3
from tempfile import TemporaryFile
from email.mime.text import MIMEText
from werkzeug.security import generate_password_hash, \
check_password_hash
#to enable debugging
import cgitb
# cgitb.enable()
# ###############################################################################
# ###############################################################################
#
# SQL FUNCTIONS
#
# ###############################################################################
# ###############################################################################
# Open database connection
def sql_open_db(in_db=''):
# # Fatcow DB
# db_host="......fatcowmysql.com"
# db_username="...."
# db_password="..."
# db_database="....."
# AWS DB
db_host="/home/ubuntu/..../"
db_username=""
db_password=""
db_database=in_db
db_host_db = db_host+db_database
try:
# FATCOW
# db = MySQLdb.connect(db_host,db_username,db_password,db_database)
# AWS - sqlite3 -- outside of Flask app framework.
db = sqlite3.connect(db_host_db)
cur = db.cursor()
#cur.execute("SELECT VERSION()")
cur.execute("SELECT SQLITE_VERSION()")
ver = cur.fetchone()
return db
except:
return False
def sql_create_table():
try:
db = sql_open_db()
# prepare a cursor object using cursor() method
cursor = db.cursor()
except:
return False
# Drop table if it already exist using execute() method.
try:
cursor.execute("DROP TABLE IF EXISTS FM_MODEL")
except:
return False
sql = """CREATE TABLE FM_MODELS (
USERNAME CHAR(50) NOT NULL,
MODELNAME CHAR(112),
MODELDATA TEXT,
LASTSAVED TEXT,
MODELTYPE CHAR(50)
)"""
try:
cursor.execute(sql)
# disconnect from server
db.close()
return True
except:
return False
def sql_add_user(u_info):
#module to add a user - get the data as a list
#open the database
try:
db = sql_open_db()
# prepare a cursor object using cursor() method
cursor = db.cursor()
except:
return False
# Prepare SQL query to INSERT a record into the database.
sql_1 = 'INSERT INTO FM_USERS(USERNAME, PASSWORD, EMAIL, REGTYPE) VALUES ( '
sql_2 = sql_1 + "'" + u_info[0] + "','" + u_info[1] + "','" + u_info[2] + "','" + u_info[3] + "'"
sql_3 = sql_2 + ' )'
sql = sql_3
try:
# Execute the SQL command
cursor.execute(sql)
# Commit your changes in the database
db.commit()
# disconnect from server
db.close()
return True
except:
# Rollback in case there is any error
db.rollback()
return False
def sql_get_userinfo(u_name):
#module to get all users info
#open the database
try:
db = sql_open_db()
# prepare a cursor object using cursor() method
cursor = db.cursor()
except:
return False
# Prepare SQL query to INSERT a record into the database.
sql = "SELECT * FROM FM_USERS WHERE USERNAME = '"+u_name+"'"
try:
# Execute the SQL command
cursor.execute(sql)
# Commit your changes in the database
results = cursor.fetchall()
# disconnect from server
db.close()
return results
except:
return False
def sql_update_password(u_info):
#module to update password - data comes in as a list - name and new pswd
#open the database
try:
db = sql_open_db()
# prepare a cursor object using cursor() method
cursor = db.cursor()
except:
return False
# Prepare SQL query to INSERT a record into the database.
sql = "UPDATE FM_USERS SET PASSWORD = '"+u_info[1]+"' WHERE USERNAME = '"+u_info[0]+"'"
#print '<p>',sql
try:
# Execute the SQL command
cursor.execute(sql)
# Commit your changes in the database
db.commit()
# disconnect from server
db.close()
return True
except:
return False
def sql_update_registration(u_info):
#module to get all users info
#open the database
try:
db = sql_open_db()
# prepare a cursor object using cursor() method
cursor = db.cursor()
except:
return False
# Prepare SQL query to INSERT a record into the database.
sql = "UPDATE FM_USERS SET REGTYPE = '"+u_info[1]+"' WHERE USERNAME = '"+u_info[0]+"'"
#print '<p>',sql
try:
# Execute the SQL command
cursor.execute(sql)
# Commit your changes in the database
db.commit()
#print <p>1 Done with ",sql
# disconnect from server
db.close()
return True
except:
return False
def sql_get_modelinfo_all(u_name):
#module to get info for the models saved for a given user
#open the database
try:
db = sql_open_db('fm_model.db')
# prepare a cursor object using cursor() method
cursor = db.cursor()
except:
return False
# Prepare SQL query to INSERT a record into the database.
sql = "SELECT * FROM FM_MODELS WHERE USERNAME = '"+u_name+"'"
sql3 = "SELECT * FROM FM_MODEL WHERE USERNAME = ?"
args = [u_name]
try:
# Execute the SQL command
#cursor.execute(sql)
cursor.execute(sql3,args)
# Commit your changes in the database
results = cursor.fetchall()
# disconnect from server
db.close()
return results
except:
return False
def sql_get_modelinfo(u_name,m_type):
#module to get info for the models saved for a given user and model type
#open the database
try:
db = sql_open_db('fm_model.db')
# prepare a cursor object using cursor() method
cursor = db.cursor()
except:
return False
# Prepare SQL query to INSERT a record into the database.
# sql = "SELECT * FROM FM_MODELS WHERE USERNAME = '"+u_name+"' AND MODELTYPE = '"+m_type+"'"
sql3 = "SELECT * FROM FM_MODEL WHERE USERNAME = ? AND MODELTYPE = ?"
args = [u_name,m_type]
try:
# Execute the SQL command
# cursor.execute(sql)
cursor.execute(sql3,args)
# Commit your changes in the database
results = cursor.fetchall()
# disconnect from server
db.close()
return results
except:
return False
def sql_process_model(m_info):
#module to add or update a model -
#get the data as a list -- ADD/UPDATE/DELETE, USERNAME, MODELNAME, MODELDATA and MODELTYPE
# time stamp placed automatically
#open the database
try:
db = sql_open_db('fm_model')
# prepare a cursor object using cursor() method
cursor = db.cursor()
except:
return False
#replace all the ' with \' in the sql input
#model_data = m_info[3].replace("'","''")
# use the msqld.escape_string function to do this.
#model_data = MySQLdb.escape_string(str(m_info[3]))
model_data = m_info[3]
# Prepare SQL query to INSERT a record into the database.
if m_info[0].upper()=="ADD":
sql_1 = 'INSERT INTO FM_MODEL (USERNAME, MODELNAME, MODELDATA, LASTSAVED, MODELTYPE) VALUES ( '
sql_2 = sql_1 + "'" + m_info[1] + "','" + m_info[2] + "','" + model_data + "',now()" + ",'" + m_info[4] + "'"
sql3 = 'INSERT INTO FM_MODEL (USERNAME, MODELNAME, MODELDATA, LASTSAVED, MODELTYPE) VALUES (?,?,?,?,?)'
args = [m_info[1],m_info[2],model_data,now(),m_info[4]]
elif m_info[0].upper()=="UPDATE":
sql3 = "UPDATE FM_MODEL SET MODELDATA = ? LASTSAVED = ? WHERE ( USERNAME = ? AND MODELNAME = ?)"
args = [model_data,now(),m_info[1],m_info[2]]
elif m_info[0].upper()=="DELETE":
sql3 = "DELETE FROM FM_MODEL WHERE (USERNAME = ? AND MODELNAME = ?)"
args = [m_info[1],m_info[2]]
else:
sql3 = "SELECT SQLITE_VERSION()"
args = []
try:
# Execute the SQL command
cursor.execute(sql3,args)
# Commit your changes in the database
db.commit()
# disconnect from server
db.close()
return True
except:
# Rollback in case there is any error
db.rollback()
return False
def sql_get_maxmodels(utype=''):
#module to define the max number of models that a user can store
return 10
| nikb999/Helper-Functions | sql_functions.py | Python | mit | 8,879 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-11 16:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('staff', '0013_attendance_phone_status'),
]
operations = [
migrations.AlterField(
model_name='attendance',
name='phone_status',
field=models.CharField(choices=[('x', 'nicht angerufen'), ('n', 'angerufen + nicht erreicht'), ('e', 'angerufen + erreicht')], default='x', max_length=1, verbose_name='Telefoniestatus'),
),
]
| d120/pyophase | staff/migrations/0014_auto_20161011_1857.py | Python | agpl-3.0 | 612 |
#!/usr/bin/env python
#
###############################################################################
# Copyright (C) 2016 Cortney T. Buffington, N0MJS <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
###############################################################################
#NOTE: This program uses a configuration file specified on the command line
# if none is specified, then dmrlink.cfg in the same directory as this
# file will be tried. Finally, if that does not exist, this process
# will terminate
from __future__ import print_function
# Full imports
import logging
import cPickle as pickle
# Function Imports
from hmac import new as hmac_new
from binascii import b2a_hex as ahex
from binascii import a2b_hex as bhex
from hashlib import sha1
from socket import inet_ntoa as IPAddr
from socket import inet_aton as IPHexStr
from time import time
# Twisted Imports
from twisted.internet.protocol import DatagramProtocol, Factory, Protocol
from twisted.protocols.basic import NetstringReceiver
from twisted.internet import reactor, task
# Imports files in the dmrlink subdirectory (these things shouldn't change often)
from ipsc.ipsc_const import *
from ipsc.ipsc_mask import *
from ipsc.reporting_const import *
# Imports from DMR Utilities package
from dmr_utils.utils import hex_str_2, hex_str_3, hex_str_4, int_id, try_download, mk_id_dict
__author__ = 'Cortney T. Buffington, N0MJS'
__copyright__ = 'Copyright (c) 2013 - 2016 Cortney T. Buffington, N0MJS and the K0USY Group'
__credits__ = 'Adam Fast, KC0YLK; Dave Kierzkowski, KD8EYF; Steve Zingman, N4IRS; Mike Zingman, N4IRR'
__license__ = 'GNU GPLv3'
__maintainer__ = 'Cort Buffington, N0MJS'
__email__ = '[email protected]'
# Global variables used whether we are a module or __main__
systems = {}
# Timed loop used for reporting IPSC status
#
# REPORT BASED ON THE TYPE SELECTED IN THE MAIN CONFIG FILE
def config_reports(_config, _logger, _factory):
if _config['REPORTS']['REPORT_NETWORKS'] == 'PRINT':
def reporting_loop(_logger):
_logger.debug('Periodic Reporting Loop Started (PRINT)')
for system in _config['SYSTEMS']:
print_master(_config, system)
print_peer_list(_config, system)
reporting = task.LoopingCall(reporting_loop, _logger)
reporting.start(_config['REPORTS']['REPORT_INTERVAL'])
report_server = False
elif _config['REPORTS']['REPORT_NETWORKS'] == 'NETWORK':
def reporting_loop(_logger, _server):
_logger.debug('Periodic Reporting Loop Started (NETWORK)')
_server.send_config()
_logger.info('DMRlink TCP reporting server starting')
report_server = _factory(_config, _logger)
report_server.clients = []
reactor.listenTCP(_config['REPORTS']['REPORT_PORT'], report_server)
reporting = task.LoopingCall(reporting_loop, _logger, report_server)
reporting.start(_config['REPORTS']['REPORT_INTERVAL'])
else:
def reporting_loop(_logger):
_logger.debug('Periodic Reporting Loop Started (NULL)')
report_server = False
return report_server
# ID ALIAS CREATION
# Download
def build_aliases(_config, _logger):
if _config['ALIASES']['TRY_DOWNLOAD'] == True:
# Try updating peer aliases file
result = try_download(_config['ALIASES']['PATH'], _config['ALIASES']['PEER_FILE'], _config['ALIASES']['PEER_URL'], _config['ALIASES']['STALE_TIME'])
_logger.info(result)
# Try updating subscriber aliases file
result = try_download(_config['ALIASES']['PATH'], _config['ALIASES']['SUBSCRIBER_FILE'], _config['ALIASES']['SUBSCRIBER_URL'], _config['ALIASES']['STALE_TIME'])
_logger.info(result)
# Make Dictionaries
peer_ids = mk_id_dict(_config['ALIASES']['PATH'], _config['ALIASES']['PEER_FILE'])
if peer_ids:
_logger.info('ID ALIAS MAPPER: peer_ids dictionary is available')
subscriber_ids = mk_id_dict(_config['ALIASES']['PATH'], _config['ALIASES']['SUBSCRIBER_FILE'])
if subscriber_ids:
_logger.info('ID ALIAS MAPPER: subscriber_ids dictionary is available')
talkgroup_ids = mk_id_dict(_config['ALIASES']['PATH'], _config['ALIASES']['TGID_FILE'])
if talkgroup_ids:
_logger.info('ID ALIAS MAPPER: talkgroup_ids dictionary is available')
local_ids = mk_id_dict(_config['ALIASES']['PATH'], _config['ALIASES']['LOCAL_FILE'])
if local_ids:
_logger.info('ID ALIAS MAPPER: local_ids dictionary is available')
return(peer_ids, subscriber_ids, talkgroup_ids, local_ids)
# Make the IPSC systems from the config and the class used to build them.
#
def mk_ipsc_systems(_config, _logger, _systems, _ipsc, _report_server):
for system in _config['SYSTEMS']:
if _config['SYSTEMS'][system]['LOCAL']['ENABLED']:
_systems[system] = _ipsc(system, _config, _logger, _report_server)
reactor.listenUDP(_config['SYSTEMS'][system]['LOCAL']['PORT'], _systems[system], interface=_config['SYSTEMS'][system]['LOCAL']['IP'])
return _systems
# Process the MODE byte in registration/peer list packets for determining master and peer capabilities
#
def process_mode_byte(_hex_mode):
_mode = int(ahex(_hex_mode), 16)
# Determine whether or not the peer is operational
_peer_op = bool(_mode & PEER_OP_MSK)
# Determine whether or not timeslot 1 is linked
_ts1 = bool(_mode & IPSC_TS1_MSK)
# Determine whether or not timeslot 2 is linked
_ts2 = bool(_mode & IPSC_TS2_MSK)
# Determine the operational mode of the peer
if _mode & PEER_MODE_MSK == PEER_MODE_MSK:
_peer_mode = 'UNKNOWN'
elif not _mode & PEER_MODE_MSK:
_peer_mode = 'NO_RADIO'
elif _mode & PEER_MODE_ANALOG:
_peer_mode = 'ANALOG'
elif _mode & PEER_MODE_DIGITAL:
_peer_mode = 'DIGITAL'
return {
'PEER_OP': _peer_op,
'PEER_MODE': _peer_mode,
'TS_1': _ts1,
'TS_2': _ts2
}
# Process the FLAGS bytes in registration replies for determining what services are available
#
def process_flags_bytes(_hex_flags):
_byte3 = int(ahex(_hex_flags[2]), 16)
_byte4 = int(ahex(_hex_flags[3]), 16)
_csbk = bool(_byte3 & CSBK_MSK)
_rpt_mon = bool(_byte3 & RPT_MON_MSK)
_con_app = bool(_byte3 & CON_APP_MSK)
_xnl_con = bool(_byte4 & XNL_STAT_MSK)
_xnl_master = bool(_byte4 & XNL_MSTR_MSK)
_xnl_slave = bool(_byte4 & XNL_SLAVE_MSK)
_auth = bool(_byte4 & PKT_AUTH_MSK)
_data = bool(_byte4 & DATA_CALL_MSK)
_voice = bool(_byte4 & VOICE_CALL_MSK)
_master = bool(_byte4 & MSTR_PEER_MSK)
return {
'CSBK': _csbk,
'RCM': _rpt_mon,
'CON_APP': _con_app,
'XNL_CON': _xnl_con,
'XNL_MASTER': _xnl_master,
'XNL_SLAVE': _xnl_slave,
'AUTH': _auth,
'DATA': _data,
'VOICE': _voice,
'MASTER': _master
}
# Build a peer list - used when a peer registers, re-regiseters or times out
#
def build_peer_list(_peers):
concatenated_peers = ''
for peer in _peers:
hex_ip = IPHexStr(_peers[peer]['IP'])
hex_port = hex_str_2(_peers[peer]['PORT'])
mode = _peers[peer]['MODE']
concatenated_peers += peer + hex_ip + hex_port + mode
peer_list = hex_str_2(len(concatenated_peers)) + concatenated_peers
return peer_list
# Gratuitous print-out of the peer list.. Pretty much debug stuff.
#
def print_peer_list(_config, _network):
_peers = _config['SYSTEMS'][_network]['PEERS']
_status = _config['SYSTEMS'][_network]['MASTER']['STATUS']['PEER_LIST']
#print('Peer List Status for {}: {}' .format(_network, _status))
if _status and not _config['SYSTEMS'][_network]['PEERS']:
print('We are the only peer for: %s' % _network)
print('')
return
print('Peer List for: %s' % _network)
for peer in _peers.keys():
_this_peer = _peers[peer]
_this_peer_stat = _this_peer['STATUS']
if peer == _config['SYSTEMS'][_network]['LOCAL']['RADIO_ID']:
me = '(self)'
else:
me = ''
print('\tRADIO ID: {} {}' .format(int_id(peer), me))
print('\t\tIP Address: {}:{}' .format(_this_peer['IP'], _this_peer['PORT']))
if _this_peer['MODE_DECODE'] and _config['REPORTS']['PRINT_PEERS_INC_MODE']:
print('\t\tMode Values:')
for name, value in _this_peer['MODE_DECODE'].items():
print('\t\t\t{}: {}' .format(name, value))
if _this_peer['FLAGS_DECODE'] and _config['REPORTS']['PRINT_PEERS_INC_FLAGS']:
print('\t\tService Flags:')
for name, value in _this_peer['FLAGS_DECODE'].items():
print('\t\t\t{}: {}' .format(name, value))
print('\t\tStatus: {}, KeepAlives Sent: {}, KeepAlives Outstanding: {}, KeepAlives Missed: {}' .format(_this_peer_stat['CONNECTED'], _this_peer_stat['KEEP_ALIVES_SENT'], _this_peer_stat['KEEP_ALIVES_OUTSTANDING'], _this_peer_stat['KEEP_ALIVES_MISSED']))
print('\t\t KeepAlives Received: {}, Last KeepAlive Received at: {}' .format(_this_peer_stat['KEEP_ALIVES_RECEIVED'], _this_peer_stat['KEEP_ALIVE_RX_TIME']))
print('')
# Gratuitous print-out of Master info.. Pretty much debug stuff.
#
def print_master(_config, _network):
if _config['SYSTEMS'][_network]['LOCAL']['MASTER_PEER']:
print('DMRlink is the Master for %s' % _network)
else:
_master = _config['SYSTEMS'][_network]['MASTER']
print('Master for %s' % _network)
print('\tRADIO ID: {}' .format(int(ahex(_master['RADIO_ID']), 16)))
if _master['MODE_DECODE'] and _config['REPORTS']['PRINT_PEERS_INC_MODE']:
print('\t\tMode Values:')
for name, value in _master['MODE_DECODE'].items():
print('\t\t\t{}: {}' .format(name, value))
if _master['FLAGS_DECODE'] and _config['REPORTS']['PRINT_PEERS_INC_FLAGS']:
print('\t\tService Flags:')
for name, value in _master['FLAGS_DECODE'].items():
print('\t\t\t{}: {}' .format(name, value))
print('\t\tStatus: {}, KeepAlives Sent: {}, KeepAlives Outstanding: {}, KeepAlives Missed: {}' .format(_master['STATUS']['CONNECTED'], _master['STATUS']['KEEP_ALIVES_SENT'], _master['STATUS']['KEEP_ALIVES_OUTSTANDING'], _master['STATUS']['KEEP_ALIVES_MISSED']))
print('\t\t KeepAlives Received: {}, Last KeepAlive Received at: {}' .format(_master['STATUS']['KEEP_ALIVES_RECEIVED'], _master['STATUS']['KEEP_ALIVE_RX_TIME']))
#************************************************
# IPSC CLASS
#************************************************
class IPSC(DatagramProtocol):
def __init__(self, _name, _config, _logger, _report):
# Housekeeping: create references to the configuration and status data for this IPSC instance.
# Some configuration objects that are used frequently and have lengthy names are shortened
# such as (self._master_sock) expands to (self._config['MASTER']['IP'], self._config['MASTER']['PORT']).
# Note that many of them reference each other... this is the Pythonic way.
#
self._system = _name
self._CONFIG = _config
self._logger = _logger
self._report = _report
self._config = self._CONFIG['SYSTEMS'][self._system]
self._rcm = self._CONFIG['REPORTS']['REPORT_RCM'] and self._report
#
self._local = self._config['LOCAL']
self._local_id = self._local['RADIO_ID']
#
self._master = self._config['MASTER']
self._master_stat = self._master['STATUS']
self._master_sock = self._master['IP'], self._master['PORT']
#
self._peers = self._config['PEERS']
#
# This is a regular list to store peers for the IPSC. At times, parsing a simple list is much less
# Spendy than iterating a list of dictionaries... Maybe I'll find a better way in the future. Also
# We have to know when we have a new peer list, so a variable to indicate we do (or don't)
#
args = ()
# Packet 'constructors' - builds the necessary control packets for this IPSC instance.
# This isn't really necessary for anything other than readability (reduction of code golf)
#
# General Items
self.TS_FLAGS = (self._local['MODE'] + self._local['FLAGS'])
#
# Peer Link Maintenance Packets
self.MASTER_REG_REQ_PKT = (MASTER_REG_REQ + self._local_id + self.TS_FLAGS + IPSC_VER)
self.MASTER_ALIVE_PKT = (MASTER_ALIVE_REQ + self._local_id + self.TS_FLAGS + IPSC_VER)
self.PEER_LIST_REQ_PKT = (PEER_LIST_REQ + self._local_id)
self.PEER_REG_REQ_PKT = (PEER_REG_REQ + self._local_id + IPSC_VER)
self.PEER_REG_REPLY_PKT = (PEER_REG_REPLY + self._local_id + IPSC_VER)
self.PEER_ALIVE_REQ_PKT = (PEER_ALIVE_REQ + self._local_id + self.TS_FLAGS)
self.PEER_ALIVE_REPLY_PKT = (PEER_ALIVE_REPLY + self._local_id + self.TS_FLAGS)
#
# Master Link Maintenance Packets
# self.MASTER_REG_REPLY_PKT is not static and must be generated when it is sent
self.MASTER_ALIVE_REPLY_PKT = (MASTER_ALIVE_REPLY + self._local_id + self.TS_FLAGS + IPSC_VER)
self.PEER_LIST_REPLY_PKT = (PEER_LIST_REPLY + self._local_id)
#
# General Link Maintenance Packets
self.DE_REG_REQ_PKT = (DE_REG_REQ + self._local_id)
self.DE_REG_REPLY_PKT = (DE_REG_REPLY + self._local_id)
#
self._logger.info('(%s) IPSC Instance Created: %s, %s:%s', self._system, int_id(self._local['RADIO_ID']), self._local['IP'], self._local['PORT'])
#******************************************************
# SUPPORT FUNCTIONS FOR HANDLING IPSC OPERATIONS
#******************************************************
# Determine if the provided peer ID is valid for the provided network
#
def valid_peer(self, _peerid):
if _peerid in self._peers:
return True
return False
# Determine if the provided master ID is valid for the provided network
#
def valid_master(self, _peerid):
if self._master['RADIO_ID'] == _peerid:
return True
else:
return False
# De-register a peer from an IPSC by removing it's information
#
def de_register_peer(self, _peerid):
# Iterate for the peer in our data
if _peerid in self._peers.keys():
del self._peers[_peerid]
self._logger.info('(%s) Peer De-Registration Requested for: %s', self._system, int_id(_peerid))
return
else:
self._logger.warning('(%s) Peer De-Registration Requested for: %s, but we don\'t have a listing for this peer', self._system, int_id(_peerid))
pass
# De-register ourselves from the IPSC
def de_register_self(self):
self._logger.info('(%s) De-Registering self from the IPSC system', self._system)
de_reg_req_pkt = self.hashed_packet(self._local['AUTH_KEY'], self.DE_REG_REQ_PKT)
self.send_to_ipsc(de_reg_req_pkt)
# Take a received peer list and the network it belongs to, process and populate the
# data structure in my_ipsc_config with the results, and return a simple list of peers.
#
def process_peer_list(self, _data):
# Create a temporary peer list to track who we should have in our list -- used to find old peers we should remove.
_temp_peers = []
# Determine the length of the peer list for the parsing iterator
_peer_list_length = int(ahex(_data[5:7]), 16)
# Record the number of peers in the data structure... we'll use it later (11 bytes per peer entry)
self._local['NUM_PEERS'] = _peer_list_length/11
self._logger.info('(%s) Peer List Received from Master: %s peers in this IPSC', self._system, self._local['NUM_PEERS'])
# Iterate each peer entry in the peer list. Skip the header, then pull the next peer, the next, etc.
for i in range(7, _peer_list_length +7, 11):
# Extract various elements from each entry...
_hex_radio_id = (_data[i:i+4])
_hex_address = (_data[i+4:i+8])
_ip_address = IPAddr(_hex_address)
_hex_port = (_data[i+8:i+10])
_port = int(ahex(_hex_port), 16)
_hex_mode = (_data[i+10:i+11])
# Add this peer to a temporary PeerID list - used to remove any old peers no longer with us
_temp_peers.append(_hex_radio_id)
# This is done elsewhere for the master too, so we use a separate function
_decoded_mode = process_mode_byte(_hex_mode)
# If this entry WAS already in our list, update everything except the stats
# in case this was a re-registration with a different mode, flags, etc.
if _hex_radio_id in self._peers.keys():
self._peers[_hex_radio_id]['IP'] = _ip_address
self._peers[_hex_radio_id]['PORT'] = _port
self._peers[_hex_radio_id]['MODE'] = _hex_mode
self._peers[_hex_radio_id]['MODE_DECODE'] = _decoded_mode
self._peers[_hex_radio_id]['FLAGS'] = ''
self._peers[_hex_radio_id]['FLAGS_DECODE'] = ''
self._logger.debug('(%s) Peer Updated: %s', self._system, self._peers[_hex_radio_id])
# If this entry was NOT already in our list, add it.
if _hex_radio_id not in self._peers.keys():
self._peers[_hex_radio_id] = {
'IP': _ip_address,
'PORT': _port,
'MODE': _hex_mode,
'MODE_DECODE': _decoded_mode,
'FLAGS': '',
'FLAGS_DECODE': '',
'STATUS': {
'CONNECTED': False,
'KEEP_ALIVES_SENT': 0,
'KEEP_ALIVES_MISSED': 0,
'KEEP_ALIVES_OUTSTANDING': 0,
'KEEP_ALIVES_RECEIVED': 0,
'KEEP_ALIVE_RX_TIME': 0
}
}
self._logger.debug('(%s) Peer Added: %s', self._system, self._peers[_hex_radio_id])
# Finally, check to see if there's a peer already in our list that was not in this peer list
# and if so, delete it.
for peer in self._peers.keys():
if peer not in _temp_peers:
self.de_register_peer(peer)
self._logger.warning('(%s) Peer Deleted (not in new peer list): %s', self._system, int_id(peer))
#************************************************
# CALLBACK FUNCTIONS FOR USER PACKET TYPES
#************************************************
# If RCM reporting and reporting is network-based in the global configuration,
# send the RCM packet to the monitoring server
def call_mon_status(self, _data):
self._logger.debug('(%s) Repeater Call Monitor Origin Packet Received: %s', self._system, ahex(_data))
if self._rcm:
self._report.send_rcm(self._system + ','+ _data)
def call_mon_rpt(self, _data):
self._logger.debug('(%s) Repeater Call Monitor Repeating Packet Received: %s', self._system, ahex(_data))
if self._rcm:
self._report.send_rcm(self._system + ',' + _data)
def call_mon_nack(self, _data):
self._logger.debug('(%s) Repeater Call Monitor NACK Packet Received: %s', self._system, ahex(_data))
if self._rcm:
self._report.send_rcm(self._system + ',' + _data)
def xcmp_xnl(self, _data):
self._logger.debug('(%s) XCMP/XNL Packet Received: %s', self._system, ahex(_data))
def repeater_wake_up(self, _data):
self._logger.debug('(%s) Repeater Wake-Up Packet Received: %s', self._system, ahex(_data))
def group_voice(self, _src_sub, _dst_sub, _ts, _end, _peerid, _data):
self._logger.debug('(%s) Group Voice Packet Received From: %s, IPSC Peer %s, Destination %s', self._system, int_id(_src_sub), int_id(_peerid), int_id(_dst_sub))
def private_voice(self, _src_sub, _dst_sub, _ts, _end, _peerid, _data):
self._logger.debug('(%s) Private Voice Packet Received From: %s, IPSC Peer %s, Destination %s', self._system, int_id(_src_sub), int_id(_peerid), int_id(_dst_sub))
def group_data(self, _src_sub, _dst_sub, _ts, _end, _peerid, _data):
self._logger.debug('(%s) Group Data Packet Received From: %s, IPSC Peer %s, Destination %s', self._system, int_id(_src_sub), int_id(_peerid), int_id(_dst_sub))
def private_data(self, _src_sub, _dst_sub, _ts, _end, _peerid, _data):
self._logger.debug('(%s) Private Data Packet Received From: %s, IPSC Peer %s, Destination %s', self._system, int_id(_src_sub), int_id(_peerid), int_id(_dst_sub))
def unknown_message(self, _packettype, _peerid, _data):
self._logger.error('(%s) Unknown Message - Type: %s From: %s Packet: %s', self._system, ahex(_packettype), int_id(_peerid), ahex(_data))
#************************************************
# IPSC SPECIFIC MAINTENANCE FUNCTIONS
#************************************************
# Simple function to send packets - handy to have it all in one place for debugging
#
def send_packet(self, _packet, (_host, _port)):
if self._local['AUTH_ENABLED']:
_hash = bhex((hmac_new(self._local['AUTH_KEY'],_packet,sha1)).hexdigest()[:20])
_packet = _packet + _hash
self.transport.write(_packet, (_host, _port))
# USE THE FOLLOWING ONLY UNDER DIRE CIRCUMSTANCES -- PERFORMANCE IS ADVERSLY AFFECTED!
#self._logger.debug('(%s) TX Packet to %s on port %s: %s', self._system, _host, _port, ahex(_packet))
# Accept a complete packet, ready to be sent, and send it to all active peers + master in an IPSC
#
def send_to_ipsc(self, _packet):
if self._local['AUTH_ENABLED']:
_hash = bhex((hmac_new(self._local['AUTH_KEY'],_packet,sha1)).hexdigest()[:20])
_packet = _packet + _hash
# Send to the Master
if self._master['STATUS']['CONNECTED']:
self.transport.write(_packet, (self._master['IP'], self._master['PORT']))
# Send to each connected Peer
for peer in self._peers.keys():
if self._peers[peer]['STATUS']['CONNECTED']:
self.transport.write(_packet, (self._peers[peer]['IP'], self._peers[peer]['PORT']))
# FUNTIONS FOR IPSC MAINTENANCE ACTIVITIES WE RESPOND TO
# SOMEONE HAS SENT US A KEEP ALIVE - WE MUST ANSWER IT
def peer_alive_req(self, _data, _peerid, _host, _port):
_hex_mode = (_data[5])
_hex_flags = (_data[6:10])
_decoded_mode = process_mode_byte(_hex_mode)
_decoded_flags = process_flags_bytes(_hex_flags)
self._peers[_peerid]['MODE'] = _hex_mode
self._peers[_peerid]['MODE_DECODE'] = _decoded_mode
self._peers[_peerid]['FLAGS'] = _hex_flags
self._peers[_peerid]['FLAGS_DECODE'] = _decoded_flags
self.send_packet(self.PEER_ALIVE_REPLY_PKT, (_host, _port))
self.reset_keep_alive(_peerid) # Might as well reset our own counter, we know it's out there...
self._logger.debug('(%s) Keep-Alive reply sent to Peer %s, %s:%s', self._system, int_id(_peerid), _host, _port)
# SOMEONE WANTS TO REGISTER WITH US - WE'RE COOL WITH THAT
def peer_reg_req(self, _peerid, _host, _port):
self.send_packet(self.PEER_REG_REPLY_PKT, (_host, _port))
self._logger.info('(%s) Peer Registration Request From: %s, %s:%s', self._system, int_id(_peerid), _host, _port)
# SOMEONE HAS ANSWERED OUR KEEP-ALIVE REQUEST - KEEP TRACK OF IT
def peer_alive_reply(self, _peerid):
self.reset_keep_alive(_peerid)
self._peers[_peerid]['STATUS']['KEEP_ALIVES_RECEIVED'] += 1
self._peers[_peerid]['STATUS']['KEEP_ALIVE_RX_TIME'] = int(time())
self._logger.debug('(%s) Keep-Alive Reply (we sent the request) Received from Peer %s, %s:%s', self._system, int_id(_peerid), self._peers[_peerid]['IP'], self._peers[_peerid]['PORT'])
# SOMEONE HAS ANSWERED OUR REQEST TO REGISTER WITH THEM - KEEP TRACK OF IT
def peer_reg_reply(self, _peerid):
if _peerid in self._peers.keys():
self._peers[_peerid]['STATUS']['CONNECTED'] = True
self._logger.info('(%s) Registration Reply From: %s, %s:%s', self._system, int_id(_peerid), self._peers[_peerid]['IP'], self._peers[_peerid]['PORT'])
# OUR MASTER HAS ANSWERED OUR KEEP-ALIVE REQUEST - KEEP TRACK OF IT
def master_alive_reply(self, _peerid):
self.reset_keep_alive(_peerid)
self._master['STATUS']['KEEP_ALIVES_RECEIVED'] += 1
self._master['STATUS']['KEEP_ALIVE_RX_TIME'] = int(time())
self._logger.debug('(%s) Keep-Alive Reply (we sent the request) Received from the Master %s, %s:%s', self._system, int_id(_peerid), self._master['IP'], self._master['PORT'])
# OUR MASTER HAS SENT US A PEER LIST - PROCESS IT
def peer_list_reply(self, _data, _peerid):
self._master['STATUS']['PEER_LIST'] = True
if len(_data) > 18:
self.process_peer_list(_data)
self._logger.debug('(%s) Peer List Reply Received From Master %s, %s:%s', self._system, int_id(_peerid), self._master['IP'], self._master['PORT'])
# OUR MASTER HAS ANSWERED OUR REQUEST TO REGISTER - LOTS OF INFORMATION TO TRACK
def master_reg_reply(self, _data, _peerid):
_hex_mode = _data[5]
_hex_flags = _data[6:10]
_num_peers = _data[10:12]
_decoded_mode = process_mode_byte(_hex_mode)
_decoded_flags = process_flags_bytes(_hex_flags)
self._local['NUM_PEERS'] = int(ahex(_num_peers), 16)
self._master['RADIO_ID'] = _peerid
self._master['MODE'] = _hex_mode
self._master['MODE_DECODE'] = _decoded_mode
self._master['FLAGS'] = _hex_flags
self._master['FLAGS_DECODE'] = _decoded_flags
self._master_stat['CONNECTED'] = True
self._master_stat['KEEP_ALIVES_OUTSTANDING'] = 0
self._logger.warning('(%s) Registration response (we requested reg) from the Master: %s, %s:%s (%s peers)', self._system, int_id(_peerid), self._master['IP'], self._master['PORT'], self._local['NUM_PEERS'])
# WE ARE MASTER AND SOMEONE HAS REQUESTED REGISTRATION FROM US - ANSWER IT
def master_reg_req(self, _data, _peerid, _host, _port):
_ip_address = _host
_port = _port
_hex_mode = _data[5]
_hex_flags = _data[6:10]
_decoded_mode = process_mode_byte(_hex_mode)
_decoded_flags = process_flags_bytes(_hex_flags)
self.MASTER_REG_REPLY_PKT = (MASTER_REG_REPLY + self._local_id + self.TS_FLAGS + hex_str_2(self._local['NUM_PEERS']) + IPSC_VER)
self.send_packet(self.MASTER_REG_REPLY_PKT, (_host, _port))
self._logger.info('(%s) Master Registration Packet Received from peer %s, %s:%s', self._system, int_id(_peerid), _host, _port)
# If this entry was NOT already in our list, add it.
if _peerid not in self._peers.keys():
self._peers[_peerid] = {
'IP': _ip_address,
'PORT': _port,
'MODE': _hex_mode,
'MODE_DECODE': _decoded_mode,
'FLAGS': _hex_flags,
'FLAGS_DECODE': _decoded_flags,
'STATUS': {
'CONNECTED': True,
'KEEP_ALIVES_SENT': 0,
'KEEP_ALIVES_MISSED': 0,
'KEEP_ALIVES_OUTSTANDING': 0,
'KEEP_ALIVES_RECEIVED': 0,
'KEEP_ALIVE_RX_TIME': int(time())
}
}
self._local['NUM_PEERS'] = len(self._peers)
self._logger.debug('(%s) Peer Added To Peer List: %s, %s:%s (IPSC now has %s Peers)', self._system, self._peers[_peerid], _host, _port, self._local['NUM_PEERS'])
# WE ARE MASTER AND SOEMONE SENT US A KEEP-ALIVE - ANSWER IT, TRACK IT
def master_alive_req(self, _peerid, _host, _port):
if _peerid in self._peers.keys():
self._peers[_peerid]['STATUS']['KEEP_ALIVES_RECEIVED'] += 1
self._peers[_peerid]['STATUS']['KEEP_ALIVE_RX_TIME'] = int(time())
self.send_packet(self.MASTER_ALIVE_REPLY_PKT, (_host, _port))
self._logger.debug('(%s) Master Keep-Alive Request Received from peer %s, %s:%s', self._system, int_id(_peerid), _host, _port)
else:
self._logger.warning('(%s) Master Keep-Alive Request Received from *UNREGISTERED* peer %s, %s:%s', self._system, int_id(_peerid), _host, _port)
# WE ARE MASTER AND A PEER HAS REQUESTED A PEER LIST - SEND THEM ONE
def peer_list_req(self, _peerid):
if _peerid in self._peers.keys():
self._logger.debug('(%s) Peer List Request from peer %s', self._system, int_id(_peerid))
self.send_to_ipsc(self.PEER_LIST_REPLY_PKT + build_peer_list(self._peers))
else:
self._logger.warning('(%s) Peer List Request Received from *UNREGISTERED* peer %s', self._system, int_id(_peerid))
# Reset the outstanding keep-alive counter for _peerid...
# Used when receiving acks OR when we see traffic from a repeater, since they ignore keep-alives when transmitting
#
def reset_keep_alive(self, _peerid):
if _peerid in self._peers.keys():
self._peers[_peerid]['STATUS']['KEEP_ALIVES_OUTSTANDING'] = 0
self._peers[_peerid]['STATUS']['KEEP_ALIVE_RX_TIME'] = int(time())
if _peerid == self._master['RADIO_ID']:
self._master_stat['KEEP_ALIVES_OUTSTANDING'] = 0
# THE NEXT SECTION DEFINES FUNCTIONS THAT MUST BE DIFFERENT FOR HASHED AND UNHASHED PACKETS
# HASHED MEANS AUTHENTICATED IPSC
# UNHASHED MEANS UNAUTHENTICATED IPSC
# NEXT THREE FUNCITONS ARE FOR AUTHENTICATED PACKETS
# Take a packet to be SENT, calculate auth hash and return the whole thing
#
def hashed_packet(self, _key, _data):
_hash = bhex((hmac_new(_key,_data,sha1)).hexdigest()[:20])
return _data + _hash
# Remove the hash from a packet and return the payload
#
def strip_hash(self, _data):
return _data[:-10]
# Take a RECEIVED packet, calculate the auth hash and verify authenticity
#
def validate_auth(self, _key, _data):
_payload = self.strip_hash(_data)
_hash = _data[-10:]
_chk_hash = bhex((hmac_new(_key,_payload,sha1)).hexdigest()[:20])
if _chk_hash == _hash:
return True
else:
return False
#************************************************
# TIMED LOOP - CONNECTION MAINTENANCE
#************************************************
# Timed loop initialization (called by the twisted reactor)
#
def startProtocol(self):
# Timed loops for:
# IPSC connection establishment and maintenance
# Reporting/Housekeeping
#
# IF WE'RE NOT THE MASTER...
if not self._local['MASTER_PEER']:
self._peer_maintenance = task.LoopingCall(self.peer_maintenance_loop)
self._peer_maintenance_loop = self._peer_maintenance.start(self._local['ALIVE_TIMER'])
#
# IF WE ARE THE MASTER...
if self._local['MASTER_PEER']:
self._master_maintenance = task.LoopingCall(self.master_maintenance_loop)
self._master_maintenance_loop = self._master_maintenance.start(self._local['ALIVE_TIMER'])
# Timed loop used for IPSC connection Maintenance when we are the MASTER
#
def master_maintenance_loop(self):
self._logger.debug('(%s) MASTER Connection Maintenance Loop Started', self._system)
update_time = int(time())
for peer in self._peers.keys():
keep_alive_delta = update_time - self._peers[peer]['STATUS']['KEEP_ALIVE_RX_TIME']
self._logger.debug('(%s) Time Since Last KeepAlive Request from Peer %s: %s seconds', self._system, int_id(peer), keep_alive_delta)
if keep_alive_delta > 120:
self.de_register_peer(peer)
self.send_to_ipsc(self.PEER_LIST_REPLY_PKT + build_peer_list(self._peers))
self._logger.warning('(%s) Timeout Exceeded for Peer %s, De-registering', self._system, int_id(peer))
# Timed loop used for IPSC connection Maintenance when we are a PEER
#
def peer_maintenance_loop(self):
self._logger.debug('(%s) PEER Connection Maintenance Loop Started', self._system)
# If the master isn't connected, we have to do that before we can do anything else!
#
if not self._master_stat['CONNECTED']:
self.send_packet(self.MASTER_REG_REQ_PKT, self._master_sock)
self._logger.info('(%s) Registering with the Master: %s:%s', self._system, self._master['IP'], self._master['PORT'])
# Once the master is connected, we have to send keep-alives.. and make sure we get them back
elif self._master_stat['CONNECTED']:
# Send keep-alive to the master
self.send_packet(self.MASTER_ALIVE_PKT, self._master_sock)
self._logger.debug('(%s) Keep Alive Sent to the Master: %s, %s:%s', self._system, int_id(self._master['RADIO_ID']) ,self._master['IP'], self._master['PORT'])
# If we had a keep-alive outstanding by the time we send another, mark it missed.
if (self._master_stat['KEEP_ALIVES_OUTSTANDING']) > 0:
self._master_stat['KEEP_ALIVES_MISSED'] += 1
self._logger.info('(%s) Master Keep-Alive Missed: %s:%s', self._system, self._master['IP'], self._master['PORT'])
# If we have missed too many keep-alives, de-register the master and start over.
if self._master_stat['KEEP_ALIVES_OUTSTANDING'] >= self._local['MAX_MISSED']:
self._master_stat['CONNECTED'] = False
self._master_stat['KEEP_ALIVES_OUTSTANDING'] = 0
self._logger.error('(%s) Maximum Master Keep-Alives Missed -- De-registering the Master: %s:%s', self._system, self._master['IP'], self._master['PORT'])
# Update our stats before we move on...
self._master_stat['KEEP_ALIVES_SENT'] += 1
self._master_stat['KEEP_ALIVES_OUTSTANDING'] += 1
else:
# This is bad. If we get this message, we need to reset the state and try again
self._logger.error('->> (%s) Master in UNKOWN STATE: %s:%s', self._system, self._master_sock)
self._master_stat['CONNECTED'] = False
# If the master is connected and we don't have a peer-list yet....
#
if (self._master_stat['CONNECTED'] == True) and (self._master_stat['PEER_LIST'] == False):
# Ask the master for a peer-list
if self._local['NUM_PEERS']:
self.send_packet(self.PEER_LIST_REQ_PKT, self._master_sock)
self._logger.info('(%s), No Peer List - Requesting One From the Master', self._system)
else:
self._master_stat['PEER_LIST'] = True
self._logger.debug('(%s), Skip asking for a Peer List, we are the only Peer', self._system)
# If we do have a peer-list, we need to register with the peers and send keep-alives...
#
if self._master_stat['PEER_LIST']:
# Iterate the list of peers... so we do this for each one.
for peer in self._peers.keys():
# We will show up in the peer list, but shouldn't try to talk to ourselves.
if peer == self._local_id:
continue
# If we haven't registered to a peer, send a registration
if not self._peers[peer]['STATUS']['CONNECTED']:
self.send_packet(self.PEER_REG_REQ_PKT, (self._peers[peer]['IP'], self._peers[peer]['PORT']))
self._logger.info('(%s) Registering with Peer %s, %s:%s', self._system, int_id(peer), self._peers[peer]['IP'], self._peers[peer]['PORT'])
# If we have registered with the peer, then send a keep-alive
elif self._peers[peer]['STATUS']['CONNECTED']:
self.send_packet(self.PEER_ALIVE_REQ_PKT, (self._peers[peer]['IP'], self._peers[peer]['PORT']))
self._logger.debug('(%s) Keep-Alive Sent to the Peer %s, %s:%s', self._system, int_id(peer), self._peers[peer]['IP'], self._peers[peer]['PORT'])
# If we have a keep-alive outstanding by the time we send another, mark it missed.
if self._peers[peer]['STATUS']['KEEP_ALIVES_OUTSTANDING'] > 0:
self._peers[peer]['STATUS']['KEEP_ALIVES_MISSED'] += 1
self._logger.info('(%s) Peer Keep-Alive Missed for %s, %s:%s', self._system, int_id(peer), self._peers[peer]['IP'], self._peers[peer]['PORT'])
# If we have missed too many keep-alives, de-register the peer and start over.
if self._peers[peer]['STATUS']['KEEP_ALIVES_OUTSTANDING'] >= self._local['MAX_MISSED']:
self._peers[peer]['STATUS']['CONNECTED'] = False
#del peer # Becuase once it's out of the dictionary, you can't use it for anything else.
self._logger.warning('(%s) Maximum Peer Keep-Alives Missed -- De-registering the Peer: %s, %s:%s', self._system, int_id(peer), self._peers[peer]['IP'], self._peers[peer]['PORT'])
# Update our stats before moving on...
self._peers[peer]['STATUS']['KEEP_ALIVES_SENT'] += 1
self._peers[peer]['STATUS']['KEEP_ALIVES_OUTSTANDING'] += 1
#************************************************
# MESSAGE RECEIVED - TAKE ACTION
#************************************************
# Actions for received packets by type: For every packet received, there are some things that we need to do:
# Decode some of the info
# Check for auth and authenticate the packet
# Strip the hash from the end... we don't need it anymore
#
# Once they're done, we move on to the processing or callbacks for each packet type.
#
# Callbacks are iterated in the order of "more likely" to "less likely" to reduce processing time
#
def datagramReceived(self, data, (host, port)):
_packettype = data[0:1]
_peerid = data[1:5]
_ipsc_seq = data[5:6]
# AUTHENTICATE THE PACKET
if self._local['AUTH_ENABLED']:
if not self.validate_auth(self._local['AUTH_KEY'], data):
self._logger.warning('(%s) AuthError: IPSC packet failed authentication. Type %s: Peer: %s, %s:%s', self._system, ahex(_packettype), int_id(_peerid), host, port)
return
# REMOVE SHA-1 AUTHENTICATION HASH: WE NO LONGER NEED IT
else:
data = self.strip_hash(data)
# PACKETS THAT WE RECEIVE FROM ANY VALID PEER OR VALID MASTER
if _packettype in ANY_PEER_REQUIRED:
if not(self.valid_master(_peerid) == False or self.valid_peer(_peerid) == False):
self._logger.warning('(%s) PeerError: Peer not in peer-list: %s, %s:%s', self._system, int_id(_peerid), host, port)
return
# ORIGINATED BY SUBSCRIBER UNITS - a.k.a someone transmitted
if _packettype in USER_PACKETS:
# Extract IPSC header not already extracted
_src_sub = data[6:9]
_dst_sub = data[9:12]
_call_type = data[12:13]
_unknown_1 = data[13:17]
_call_info = int_id(data[17:18])
_ts = bool(_call_info & TS_CALL_MSK) + 1
_end = bool(_call_info & END_MSK)
# Extract RTP Header Fields
'''
Coming soon kids!!!
Looks like version, padding, extention, CSIC, payload type and SSID never change.
The things we might care about are below.
_rtp_byte_1 = int_id(data[18:19])
_rtp_byte_2 = int_id(data[19:20])
_rtp_seq = int_id(data[20:22])
_rtp_tmstmp = int_id(data[22:26])
_rtp_ssid = int_id(data[26:30])
# Extract RTP Payload Data Fields
_payload_type = int_id(data[30:31])
'''
# User Voice and Data Call Types:
if _packettype == GROUP_VOICE:
self.reset_keep_alive(_peerid)
self.group_voice(_src_sub, _dst_sub, _ts, _end, _peerid, data)
return
elif _packettype == PVT_VOICE:
self.reset_keep_alive(_peerid)
self.private_voice(_src_sub, _dst_sub, _ts, _end, _peerid, data)
return
elif _packettype == GROUP_DATA:
self.reset_keep_alive(_peerid)
self.group_data(_src_sub, _dst_sub, _ts, _end, _peerid, data)
return
elif _packettype == PVT_DATA:
self.reset_keep_alive(_peerid)
self.private_data(_src_sub, _dst_sub, _ts, _end, _peerid, data)
return
return
# MOTOROLA XCMP/XNL CONTROL PROTOCOL: We don't process these (yet)
elif _packettype == XCMP_XNL:
self.xcmp_xnl(data)
return
# ORIGINATED BY PEERS, NOT IPSC MAINTENANCE: Call monitoring is all we've found here so far
elif _packettype == CALL_MON_STATUS:
self.call_mon_status(data)
return
elif _packettype == CALL_MON_RPT:
self.call_mon_rpt(data)
return
elif _packettype == CALL_MON_NACK:
self.call_mon_nack(data)
return
# IPSC CONNECTION MAINTENANCE MESSAGES
elif _packettype == DE_REG_REQ:
self.de_register_peer(_peerid)
self._logger.warning('(%s) Peer De-Registration Request From: %s, %s:%s', self._system, int_id(_peerid), host, port)
return
elif _packettype == DE_REG_REPLY:
self._logger.warning('(%s) Peer De-Registration Reply From: %s, %s:%s', self._system, int_id(_peerid), host, port)
return
elif _packettype == RPT_WAKE_UP:
self.repeater_wake_up(data)
self._logger.debug('(%s) Repeater Wake-Up Packet From: %s, %s:%s', self._system, int_id(_peerid), host, port)
return
return
# THE FOLLOWING PACKETS ARE RECEIVED ONLY IF WE ARE OPERATING AS A PEER
# ONLY ACCEPT FROM A PREVIOUSLY VALIDATED PEER
if _packettype in PEER_REQUIRED:
if not self.valid_peer(_peerid):
self._logger.warning('(%s) PeerError: Peer not in peer-list: %s, %s:%s', self._system, int_id(_peerid), host, port)
return
# REQUESTS FROM PEERS: WE MUST REPLY IMMEDIATELY FOR IPSC MAINTENANCE
if _packettype == PEER_ALIVE_REQ:
self.peer_alive_req(data, _peerid, host, port)
return
elif _packettype == PEER_REG_REQ:
self.peer_reg_req(_peerid, host, port)
return
# ANSWERS FROM REQUESTS WE SENT TO PEERS: WE DO NOT REPLY
elif _packettype == PEER_ALIVE_REPLY:
self.peer_alive_reply(_peerid)
return
elif _packettype == PEER_REG_REPLY:
self.peer_reg_reply(_peerid)
return
return
# PACKETS ONLY ACCEPTED FROM OUR MASTER
# PACKETS WE ONLY ACCEPT IF WE HAVE FINISHED REGISTERING WITH OUR MASTER
if _packettype in MASTER_REQUIRED:
if not self.valid_master(_peerid):
self._logger.warning('(%s) MasterError: %s, %s:%s is not the master peer', self._system, int_id(_peerid), host, port)
return
# ANSWERS FROM REQUESTS WE SENT TO THE MASTER: WE DO NOT REPLY
if _packettype == MASTER_ALIVE_REPLY:
self.master_alive_reply(_peerid)
return
elif _packettype == PEER_LIST_REPLY:
self.peer_list_reply(data, _peerid)
return
return
# THIS MEANS WE HAVE SUCCESSFULLY REGISTERED TO OUR MASTER - RECORD MASTER INFORMATION
elif _packettype == MASTER_REG_REPLY:
self.master_reg_reply(data, _peerid)
return
# THE FOLLOWING PACKETS ARE RECEIVED ONLLY IF WE ARE OPERATING AS A MASTER
# REQUESTS FROM PEERS: WE MUST REPLY IMMEDIATELY FOR IPSC MAINTENANCE
# REQUEST TO REGISTER TO THE IPSC
elif _packettype == MASTER_REG_REQ:
self.master_reg_req(data, _peerid, host, port)
return
# REQUEST FOR A KEEP-ALIVE REPLY (WE KNOW THE PEER IS STILL ALIVE TOO)
elif _packettype == MASTER_ALIVE_REQ:
self.master_alive_req(_peerid, host, port)
return
# REQUEST FOR A PEER LIST
elif _packettype == PEER_LIST_REQ:
self.peer_list_req(_peerid)
return
# PACKET IS OF AN UNKNOWN TYPE. LOG IT AND IDENTTIFY IT!
else:
self.unknown_message(_packettype, _peerid, data)
return
#
# Socket-based reporting section
#
class report(NetstringReceiver):
def __init__(self, factory):
self._factory = factory
def connectionMade(self):
self._factory.clients.append(self)
self._factory._logger.info('DMRlink reporting client connected: %s', self.transport.getPeer())
def connectionLost(self, reason):
self._factory._logger.info('DMRlink reporting client disconnected: %s', self.transport.getPeer())
self._factory.clients.remove(self)
def stringReceived(self, data):
self.process_message(data)
def process_message(self, _message):
opcode = _message[:1]
if opcode == REPORT_OPCODES['CONFIG_REQ']:
self._factory._logger.info('DMRlink reporting client sent \'CONFIG_REQ\': %s', self.transport.getPeer())
self.send_config()
else:
print('got unknown opcode')
class reportFactory(Factory):
def __init__(self, config, logger):
self._config = config
self._logger = logger
def buildProtocol(self, addr):
if (addr.host) in self._config['REPORTS']['REPORT_CLIENTS'] or '*' in self._config['REPORTS']['REPORT_CLIENTS']:
self._logger.debug('Permitting report server connection attempt from: %s:%s', addr.host, addr.port)
return report(self)
else:
self._logger.error('Invalid report server connection attempt from: %s:%s', addr.host, addr.port)
return None
def send_clients(self, _message):
for client in self.clients:
client.sendString(_message)
def send_config(self):
serialized = pickle.dumps(self._config['SYSTEMS'], protocol=pickle.HIGHEST_PROTOCOL)
self.send_clients(REPORT_OPCODES['CONFIG_SND']+serialized)
def send_rcm(self, _data):
self.send_clients(REPORT_OPCODES['RCM_SND']+_data)
#************************************************
# MAIN PROGRAM LOOP STARTS HERE
#************************************************
if __name__ == '__main__':
import argparse
import sys
import os
import signal
from ipsc.dmrlink_config import build_config
from ipsc.dmrlink_log import config_logging
# Change the current directory to the location of the application
os.chdir(os.path.dirname(os.path.realpath(sys.argv[0])))
# CLI argument parser - handles picking up the config file from the command line, and sending a "help" message
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', action='store', dest='CFG_FILE', help='/full/path/to/config.file (usually dmrlink.cfg)')
parser.add_argument('-ll', '--log_level', action='store', dest='LOG_LEVEL', help='Override config file logging level.')
parser.add_argument('-lh', '--log_handle', action='store', dest='LOG_HANDLERS', help='Override config file logging handler.')
cli_args = parser.parse_args()
if not cli_args.CFG_FILE:
cli_args.CFG_FILE = os.path.dirname(os.path.abspath(__file__))+'/dmrlink.cfg'
# Call the external routine to build the configuration dictionary
CONFIG = build_config(cli_args.CFG_FILE)
# Call the external routing to start the system logger
if cli_args.LOG_LEVEL:
CONFIG['LOGGER']['LOG_LEVEL'] = cli_args.LOG_LEVEL
if cli_args.LOG_HANDLERS:
CONFIG['LOGGER']['LOG_HANDLERS'] = cli_args.LOG_HANDLERS
logger = config_logging(CONFIG['LOGGER'])
logger.info('DMRlink \'dmrlink.py\' (c) 2013 - 2017 N0MJS & the K0USY Group - SYSTEM STARTING...')
# Set signal handers so that we can gracefully exit if need be
def sig_handler(_signal, _frame):
logger.info('*** DMRLINK IS TERMINATING WITH SIGNAL %s ***', str(_signal))
for system in systems:
systems[system].de_register_self()
reactor.stop()
for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGQUIT]:
signal.signal(sig, sig_handler)
# INITIALIZE THE REPORTING LOOP
report_server = config_reports(CONFIG, logger, reportFactory)
# Build ID Aliases
peer_ids, subscriber_ids, talkgroup_ids, local_ids = build_aliases(CONFIG, logger)
# INITIALIZE AN IPSC OBJECT (SELF SUSTAINING) FOR EACH CONFIGRUED IPSC
systems = mk_ipsc_systems(CONFIG, logger, systems, IPSC, report_server)
# INITIALIZATION COMPLETE -- START THE REACTOR
reactor.run()
| n0mjs710/DMRlink | dmrlink.py | Python | gpl-3.0 | 52,232 |
import datetime
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from corehq.apps.crud.models import BaseAdminHQTabularCRUDManager
from corehq.apps.indicators.utils import get_namespace_name
from dimagi.utils.data.crud import CRUDFormRequestManager
class IndicatorCRUDFormRequestManager(CRUDFormRequestManager):
"""
Form request manager for Indicator CRUD forms.
"""
def _get_form(self):
if self.request.method == 'POST' and not self.success:
return self.form_class(self.request.POST, doc_id=self.doc_id, domain=self.request.domain)
return self.form_class(doc_id=self.doc_id, domain=self.request.domain)
class IndicatorAdminCRUDManager(BaseAdminHQTabularCRUDManager):
"""
Base CRUDManager for Indicator Definitions
"""
domain = None
@property
def properties_in_row(self):
return ["slug", "namespace", "version", "last_modified"]
def format_property(self, key, property):
if isinstance(property, datetime.datetime):
return property.strftime("%d %B %Y")
if key == "namespace":
return get_namespace_name(self.document_instance.domain, property)
return super(IndicatorAdminCRUDManager, self).format_property(key, property)
def create(self, **kwargs):
namespace = kwargs['namespace']
del kwargs['namespace']
self.document_instance = self.document_class.increment_or_create_unique(namespace, self.domain, **kwargs)
def update(self, **kwargs):
# update and create behave the same here
self.create(**kwargs)
class FormLabelIndicatorAdminCRUDManager(IndicatorAdminCRUDManager):
@property
def properties_in_row(self):
original_props = super(FormLabelIndicatorAdminCRUDManager, self).properties_in_row
return original_props[:2] + ["xmlns"] + original_props[-2:]
class BaseFormIndicatorAdminCRUDManager(FormLabelIndicatorAdminCRUDManager):
def format_property(self, key, property):
if key == "xmlns":
from corehq.apps.indicators.models import FormLabelIndicatorDefinition
label = FormLabelIndicatorDefinition.get_label_for_xmlns(self.document_instance.namespace,
self.document_instance.domain, property)
return mark_safe(render_to_string("indicators/partials/form_label.html", {
"label": label,
"xmlns": property,
}))
return super(BaseFormIndicatorAdminCRUDManager, self).format_property(key, property)
class FormAliasIndicatorAdminCRUDManager(BaseFormIndicatorAdminCRUDManager):
@property
def properties_in_row(self):
original_props = super(FormAliasIndicatorAdminCRUDManager, self).properties_in_row
return original_props[:3] + ["question_id"] + original_props[-2:]
class CaseDataInFormIndicatorAdminCRUDManager(BaseFormIndicatorAdminCRUDManager):
@property
def properties_in_row(self):
original_props = super(CaseDataInFormIndicatorAdminCRUDManager, self).properties_in_row
return original_props[:3] + ["case_property"] + original_props[-2:]
class FormDataInCaseAdminCRUDManager(BaseFormIndicatorAdminCRUDManager):
@property
def properties_in_row(self):
original_props = super(FormDataInCaseAdminCRUDManager, self).properties_in_row
return original_props[:2] + ["case_type", "xmlns", "question_id"] + original_props[-2:]
class BaseDynamicIndicatorCRUDManager(IndicatorAdminCRUDManager):
@property
def properties_in_row(self):
original_props = super(BaseDynamicIndicatorCRUDManager, self).properties_in_row
return original_props[:2] + ["title", "description"] + original_props[-2:]
class CouchIndicatorCRUDManager(BaseDynamicIndicatorCRUDManager):
@property
def properties_in_row(self):
original_props = super(CouchIndicatorCRUDManager, self).properties_in_row
return original_props[:4] + ["couch_view", "indicator_key", "startdate_shift"] + original_props[-2:]
def format_property(self, key, property):
if key == "startdate_shift":
return mark_safe(render_to_string("indicators/partials/time_shift_summary.html", {
"startdate_shift": self.document_instance.startdate_shift,
"enddate_shift": self.document_instance.enddate_shift,
"fixed_datespan_days": self.document_instance.fixed_datespan_days,
"fixed_datespan_months": self.document_instance.fixed_datespan_months,
}))
if key == "indicator_key":
return property or '<span class="label">None</span>'
return super(CouchIndicatorCRUDManager, self).format_property(key, property)
class CombinedCouchIndicatorCRUDManager(BaseDynamicIndicatorCRUDManager):
@property
def properties_in_row(self):
original_props = super(CombinedCouchIndicatorCRUDManager, self).properties_in_row
return original_props[:4] + ["numerator_slug", "denominator_slug"] + original_props[-2:]
| gmimano/commcaretest | corehq/apps/indicators/admin/crud.py | Python | bsd-3-clause | 5,171 |
class Unit(object):
event = "event"
grams = "g"
milligrams_per_deciliter = "mg/dL"
percent_of_basal = "percent"
units = "U"
units_per_hour = "U/hour"
| channemann/openaps-predict | openapscontrib/predict/models.py | Python | mit | 176 |
# Shared Goal Light resources
import time
from gpiozero import LED
goalLED = LED(17)
def triggerGoalLight(onTime=15):
goalLED.on()
time.sleep(onTime)
goalLED.off()
| kopertop/pi-goal-light | light.py | Python | unlicense | 168 |
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
__all__ = ('autoclass', 'ensureclass')
from six import with_metaclass
from .jnius import (
JavaClass, MetaJavaClass, JavaMethod, JavaStaticMethod,
JavaField, JavaStaticField, JavaMultipleMethod, find_javaclass,
JavaException
)
class Class(with_metaclass(MetaJavaClass, JavaClass)):
__javaclass__ = 'java/lang/Class'
desiredAssertionStatus = JavaMethod('()Z')
forName = JavaMultipleMethod([
('(Ljava/lang/String,Z,Ljava/lang/ClassLoader;)Ljava/langClass;', True, False),
('(Ljava/lang/String;)Ljava/lang/Class;', True, False), ])
getClassLoader = JavaMethod('()Ljava/lang/ClassLoader;')
getClasses = JavaMethod('()[Ljava/lang/Class;')
getComponentType = JavaMethod('()Ljava/lang/Class;')
getConstructor = JavaMethod('([Ljava/lang/Class;)Ljava/lang/reflect/Constructor;')
getConstructors = JavaMethod('()[Ljava/lang/reflect/Constructor;')
getDeclaredClasses = JavaMethod('()[Ljava/lang/Class;')
getDeclaredConstructor = JavaMethod('([Ljava/lang/Class;)Ljava/lang/reflect/Constructor;')
getDeclaredConstructors = JavaMethod('()[Ljava/lang/reflect/Constructor;')
getDeclaredField = JavaMethod('(Ljava/lang/String;)Ljava/lang/reflect/Field;')
getDeclaredFields = JavaMethod('()[Ljava/lang/reflect/Field;')
getDeclaredMethod = JavaMethod('(Ljava/lang/String,[Ljava/lang/Class;)Ljava/lang/reflect/Method;')
getDeclaredMethods = JavaMethod('()[Ljava/lang/reflect/Method;')
getDeclaringClass = JavaMethod('()Ljava/lang/Class;')
getField = JavaMethod('(Ljava/lang/String;)Ljava/lang/reflect/Field;')
getFields = JavaMethod('()[Ljava/lang/reflect/Field;')
getInterfaces = JavaMethod('()[Ljava/lang/Class;')
getMethod = JavaMethod('(Ljava/lang/String,[Ljava/lang/Class;)Ljava/lang/reflect/Method;')
getMethods = JavaMethod('()[Ljava/lang/reflect/Method;')
getModifiers = JavaMethod('()[I')
getName = JavaMethod('()Ljava/lang/String;')
getPackage = JavaMethod('()Ljava/lang/Package;')
getProtectionDomain = JavaMethod('()Ljava/security/ProtectionDomain;')
getResource = JavaMethod('(Ljava/lang/String;)Ljava/net/URL;')
getResourceAsStream = JavaMethod('(Ljava/lang/String;)Ljava/io/InputStream;')
getSigners = JavaMethod('()[Ljava/lang/Object;')
getSuperclass = JavaMethod('()Ljava/lang/reflect/Class;')
isArray = JavaMethod('()Z')
isAssignableFrom = JavaMethod('(Ljava/lang/reflect/Class;)Z')
isInstance = JavaMethod('(Ljava/lang/Object;)Z')
isInterface = JavaMethod('()Z')
isPrimitive = JavaMethod('()Z')
newInstance = JavaMethod('()Ljava/lang/Object;')
toString = JavaMethod('()Ljava/lang/String;')
class Object(with_metaclass(MetaJavaClass, JavaClass)):
__javaclass__ = 'java/lang/Object'
getClass = JavaMethod('()Ljava/lang/Class;')
hashCode = JavaMethod('()I')
class Modifier(with_metaclass(MetaJavaClass, JavaClass)):
__javaclass__ = 'java/lang/reflect/Modifier'
isAbstract = JavaStaticMethod('(I)Z')
isFinal = JavaStaticMethod('(I)Z')
isInterface = JavaStaticMethod('(I)Z')
isNative = JavaStaticMethod('(I)Z')
isPrivate = JavaStaticMethod('(I)Z')
isProtected = JavaStaticMethod('(I)Z')
isPublic = JavaStaticMethod('(I)Z')
isStatic = JavaStaticMethod('(I)Z')
isStrict = JavaStaticMethod('(I)Z')
isSynchronized = JavaStaticMethod('(I)Z')
isTransient = JavaStaticMethod('(I)Z')
isVolatile = JavaStaticMethod('(I)Z')
class Method(with_metaclass(MetaJavaClass, JavaClass)):
__javaclass__ = 'java/lang/reflect/Method'
getName = JavaMethod('()Ljava/lang/String;')
toString = JavaMethod('()Ljava/lang/String;')
getParameterTypes = JavaMethod('()[Ljava/lang/Class;')
getReturnType = JavaMethod('()Ljava/lang/Class;')
getModifiers = JavaMethod('()I')
isVarArgs = JavaMethod('()Z')
class Field(with_metaclass(MetaJavaClass, JavaClass)):
__javaclass__ = 'java/lang/reflect/Field'
getName = JavaMethod('()Ljava/lang/String;')
toString = JavaMethod('()Ljava/lang/String;')
getType = JavaMethod('()Ljava/lang/Class;')
getModifiers = JavaMethod('()I')
class Constructor(with_metaclass(MetaJavaClass, JavaClass)):
__javaclass__ = 'java/lang/reflect/Constructor'
toString = JavaMethod('()Ljava/lang/String;')
getParameterTypes = JavaMethod('()[Ljava/lang/Class;')
getModifiers = JavaMethod('()I')
isVarArgs = JavaMethod('()Z')
def get_signature(cls_tp):
tp = cls_tp.getName()
if tp[0] == '[':
return tp.replace('.', '/')
signatures = {
'void': 'V', 'boolean': 'Z', 'byte': 'B',
'char': 'C', 'short': 'S', 'int': 'I',
'long': 'J', 'float': 'F', 'double': 'D'}
ret = signatures.get(tp)
if ret:
return ret
# don't do it in recursive way for the moment,
# error on the JNI/android: JNI ERROR (app bug): local reference table
# overflow (max=512)
# ensureclass(tp)
return 'L{0};'.format(tp.replace('.', '/'))
registers = []
def ensureclass(clsname):
if clsname in registers:
return
jniname = clsname.replace('.', '/')
if MetaJavaClass.get_javaclass(jniname):
return
registers.append(clsname)
autoclass(clsname)
def lower_name(s):
return s[:1].lower() + s[1:] if s else ''
def bean_getter(s):
return (s.startswith('get') and len(s) > 3 and s[3].isupper()) or (s.startswith('is') and len(s) > 2 and s[2].isupper())
def autoclass(clsname):
jniname = clsname.replace('.', '/')
cls = MetaJavaClass.get_javaclass(jniname)
if cls:
return cls
classDict = {}
# c = Class.forName(clsname)
c = find_javaclass(clsname)
if c is None:
raise Exception('Java class {0} not found'.format(c))
return None
constructors = []
for constructor in c.getConstructors():
sig = '({0})V'.format(
''.join([get_signature(x) for x in constructor.getParameterTypes()]))
constructors.append((sig, constructor.isVarArgs()))
classDict['__javaconstructor__'] = constructors
methods = c.getMethods()
methods_name = [x.getName() for x in methods]
for index, method in enumerate(methods):
name = methods_name[index]
if name in classDict:
continue
count = methods_name.count(name)
# only one method available
if count == 1:
static = Modifier.isStatic(method.getModifiers())
varargs = method.isVarArgs()
sig = '({0}){1}'.format(
''.join([get_signature(x) for x in method.getParameterTypes()]),
get_signature(method.getReturnType()))
cls = JavaStaticMethod if static else JavaMethod
classDict[name] = cls(sig, varargs=varargs)
if name != 'getClass' and bean_getter(name) and len(method.getParameterTypes()) == 0:
lowername = lower_name(name[3:])
classDict[lowername] = (lambda n: property(lambda self: getattr(self, n)()))(name)
continue
# multiple signatures
signatures = []
for index, subname in enumerate(methods_name):
if subname != name:
continue
method = methods[index]
sig = '({0}){1}'.format(
''.join([get_signature(x) for x in method.getParameterTypes()]),
get_signature(method.getReturnType()))
'''
print 'm', name, sig, method.getModifiers()
m = method.getModifiers()
print 'Public', Modifier.isPublic(m)
print 'Private', Modifier.isPrivate(m)
print 'Protected', Modifier.isProtected(m)
print 'Static', Modifier.isStatic(m)
print 'Final', Modifier.isFinal(m)
print 'Synchronized', Modifier.isSynchronized(m)
print 'Volatile', Modifier.isVolatile(m)
print 'Transient', Modifier.isTransient(m)
print 'Native', Modifier.isNative(m)
print 'Interface', Modifier.isInterface(m)
print 'Abstract', Modifier.isAbstract(m)
print 'Strict', Modifier.isStrict(m)
'''
signatures.append((sig, Modifier.isStatic(method.getModifiers()), method.isVarArgs()))
classDict[name] = JavaMultipleMethod(signatures)
def _getitem(self, index):
try:
return self.get(index)
except JavaException as e:
# initialize the subclass before getting the Class.forName
# otherwise isInstance does not know of the subclass
mock_exception_object = autoclass(e.classname)()
if Class.forName("java.lang.IndexOutOfBoundsException").isInstance(mock_exception_object):
# python for...in iteration checks for end of list by waiting for IndexError
raise IndexError()
else:
raise
for iclass in c.getInterfaces():
if iclass.getName() == 'java.util.List':
classDict['__getitem__'] = _getitem
classDict['__len__'] = lambda self: self.size()
break
for field in c.getFields():
static = Modifier.isStatic(field.getModifiers())
sig = get_signature(field.getType())
cls = JavaStaticField if static else JavaField
classDict[field.getName()] = cls(sig)
classDict['__javaclass__'] = clsname.replace('.', '/')
return MetaJavaClass.__new__(
MetaJavaClass,
clsname, # .replace('.', '_'),
(JavaClass, ),
classDict)
| KyleAMoore/KanjiNani | Android/.buildozer/android/platform/build/dists/KanjiNani/crystax_python/crystax_python/site-packages/jnius/reflect.py | Python | gpl-3.0 | 9,703 |
#! rhf gradient code
"""
This script calculates nuclear gradients of RHF Wavefunction using
gradients of one and two electron integrals obtained from PSI4.
Reference: "Derivative studies in Hartree--Fock and Moller--Plesset theories",
J. A. Pople, R. Krishnan, H. B. Schlegel and J. S. Binkley
DOI: 10.1002/qua.560160825
"""
__authors__ = "Ashutosh Kumar"
__credits__ = ["Ashutosh Kumar"]
__copyright__ = "(c) 2014-2017, The Psi4NumPy Developers"
__license__ = "BSD-3-Clause"
__date__ = "2017-12-17"
import time
import numpy as np
np.set_printoptions(precision=15, linewidth=200, suppress=True)
import psi4
psi4.set_output_file("output.dat", False)
mol = psi4.geometry("""
O
H 1 1.1
H 1 1.1 2 104
symmetry c1
""")
psi4.core.set_active_molecule(mol)
options = {'BASIS':'STO-3G', 'SCF_TYPE':'PK',
'E_CONVERGENCE':1e-10,
'D_CONVERGENCE':1e-10}
psi4.set_options(options)
rhf_e, wfn = psi4.energy('SCF', return_wfn=True)
# Assuming C1 symmetry
occ = wfn.doccpi()[0]
nmo = wfn.nmo()
C = wfn.Ca_subset("AO", "ALL")
npC = np.asarray(C)
mints = psi4.core.MintsHelper(wfn.basisset())
H_ao = np.asarray(mints.ao_kinetic()) + np.asarray(mints.ao_potential())
# Update H, transform to MO basis
H = np.einsum('uj,vi,uv', npC, npC, H_ao)
# Integral generation from Psi4's MintsHelper
MO = np.asarray(mints.mo_eri(C, C, C, C))
# Physicist notation
MO = MO.swapaxes(1,2)
F = H + 2.0 * np.einsum('pmqm->pq', MO[:, :occ, :, :occ])
F -= np.einsum('pmmq->pq', MO[:, :occ, :occ, :])
natoms = mol.natom()
cart = ['_X', '_Y', '_Z']
oei_dict = {"S" : "OVERLAP", "T" : "KINETIC", "V" : "POTENTIAL"}
deriv1_mat = {}
deriv1_np = {}
Gradient = {}
Gradient["N"] = np.zeros((natoms, 3))
Gradient["S"] = np.zeros((natoms, 3))
Gradient["S'"] = np.zeros((natoms, 3))
Gradient["V"] = np.zeros((natoms, 3))
Gradient["T"] = np.zeros((natoms, 3))
Gradient["J"] = np.zeros((natoms, 3))
Gradient["K"] = np.zeros((natoms, 3))
Gradient["Total"] = np.zeros((natoms, 3))
Gradient["N"] = np.asarray(mol.nuclear_repulsion_energy_deriv1([0,0,0]))
psi4.core.print_out("\n\n")
Mat = psi4.core.Matrix.from_array(Gradient["N"])
Mat.name = "NUCLEAR GRADIENT"
Mat.print_out()
# 1st Derivative of OEIs
for atom in range(natoms):
for key in oei_dict:
deriv1_mat[key + str(atom)] = mints.mo_oei_deriv1(oei_dict[key], atom, C, C)
for p in range(3):
map_key = key + str(atom) + cart[p]
deriv1_np[map_key] = np.asarray(deriv1_mat[key + str(atom)][p])
if key == "S":
Gradient[key][atom, p] = -2.0 * np.einsum("ii,ii->", F[:occ,:occ], deriv1_np[map_key][:occ,:occ])
Gradient["S'"][atom, p] = 2.0 * np.einsum("ii->", deriv1_np[map_key][:occ,:occ]) # For comparison with PSI4's overlap_grad
else:
Gradient[key][atom, p] = 2.0 * np.einsum("ii->", deriv1_np[map_key][:occ,:occ])
psi4.core.print_out("\n\n OEI Gradients\n\n")
for key in Gradient:
Mat = psi4.core.Matrix.from_array(Gradient[key])
if key in oei_dict:
Mat.name = oei_dict[key] + " GRADIENT"
Mat.print_out()
psi4.core.print_out("\n")
Gradient["J"] = np.zeros((natoms, 3))
Gradient["K"] = np.zeros((natoms, 3))
# 1st Derivative of TEIs
for atom in range(natoms):
string = "TEI" + str(atom)
deriv1_mat[string] = mints.mo_tei_deriv1(atom, C, C, C, C)
for p in range(3):
map_key = string + cart[p]
deriv1_np[map_key] = np.asarray(deriv1_mat[string][p])
Gradient["J"][atom, p] = 2.0 * np.einsum("iijj->", deriv1_np[map_key][:occ,:occ,:occ,:occ])
Gradient["K"][atom, p] = -1.0 * np.einsum("ijij->", deriv1_np[map_key][:occ,:occ,:occ,:occ])
psi4.core.print_out("\n\n TEI Gradients\n\n")
JMat = psi4.core.Matrix.from_array(Gradient["J"])
KMat = psi4.core.Matrix.from_array(Gradient["K"])
JMat.name = " COULOMB GRADIENT"
KMat.name = " EXCHANGE GRADIENT"
JMat.print_out()
KMat.print_out()
Gradient["OEI"] = Gradient["S"] + Gradient["V"] + Gradient["T"]
Gradient["TEI"] = Gradient["J"] + Gradient["K"]
Gradient["Total"] = Gradient["OEI"] + Gradient["TEI"] + Gradient["N"]
# PIS4's overlap_grad, kinetic_grad and potential_grad
PSI4_Grad = {}
D = wfn.Da()
D.add(wfn.Db())
PSI4_Grad["S"] = mints.overlap_grad(D)
PSI4_Grad["T"] = mints.kinetic_grad(D)
PSI4_Grad["V"] = mints.potential_grad(D)
#Convert np array into PSI4 Matrix
G_python_S_mat = psi4.core.Matrix.from_array(Gradient["S'"])
G_python_T_mat = psi4.core.Matrix.from_array(Gradient["T"])
G_python_V_mat = psi4.core.Matrix.from_array(Gradient["V"])
# Test OEI gradients with that of PSI4
psi4.compare_matrices(PSI4_Grad["S"], G_python_S_mat, 10, "OVERLAP_GRADIENT_TEST") #TEST
psi4.compare_matrices(PSI4_Grad["T"], G_python_T_mat, 10, "KINETIC_GRADIENT_TEST") #TEST
psi4.compare_matrices(PSI4_Grad["V"], G_python_V_mat, 10, "POTENTIAL_GRADIENT_TEST") #TEST
# PSI4's Total Gradient
Total_G_psi4 = psi4.core.Matrix.from_list([
[ 0.000000000000, 0.00000000000000, -0.09744143723018],
[ 0.000000000000, -0.08630009812231, 0.04872071861516],
[ 0.000000000000, 0.08630009812231, 0.04872071861516],
])
G_python_Total_mat = psi4.core.Matrix.from_array(Gradient["Total"])
psi4.compare_matrices(Total_G_psi4, G_python_Total_mat, 10, "RHF_TOTAL_GRADIENT_TEST") #TEST
| psi4/psi4 | tests/psi4numpy/rhf-gradient/input.py | Python | lgpl-3.0 | 5,395 |
from services.acservice.constants import *
class ACLicensingMixin(object):
"""
Mixin that defines methods to allow licensing of Audio Commons content.
Services are expected to override methods to adapt them to their own APIs.
"""
LICENSING_ACID_DOMAINS = list()
def conf_licensing(self, conf):
self.implemented_components.append(LICENSING_COMPONENT)
def describe_licensing(self):
"""
Returns structured representation of component capabilities
Component capabilities include a list of `acid_domains` which indicate for which
domain of resources the service provides licensing for (i.e., 'Jamendo' domain means all
resources identified by Jamendo:xxx)
:return: tuple with (component name, dictionary with component capabilities)
"""
return LICENSING_COMPONENT, {
ACID_DOMAINS_DESCRIPTION_KEYWORD: self.LICENSING_ACID_DOMAINS,
}
def get_licensing_url(self, context, acid, *args, **kwargs):
"""
Given an Audio Commons unique resource identifier (acid), this function returns a url
where the resource can be licensed. If the 3rd party service can't license that resource or
some other errors occur during the collection of the url, an AC exceptions should be raised.
Individual services can extend this method with extra parameters to make it more suitable to their
needs (e.g., to call the method given an already retrieved resource and avoid in this way an
extra request).
:param context: Dict with context information for the request (see api.views.get_request_context)
:param acid: Audio Commons unique resource identifier
:return: url to license the input resource (string)
"""
raise NotImplementedError("Service must implement method ACLicensingMixin.get_licensing_url")
def license(self, context, acid, *args, **kwargs):
"""
This endpoint returns a license url along with a list of warnings that might contain relevant
information for the application. To get the URL, it uses 'get_licensing_url' method, therefore
'get_licensing_url' is the main method that should be overwritten by third party services.
Raise warnings using the BaseACService.add_response_warning method.
:param context: Dict with context information for the request (see api.views.get_request_context)
:param acid: Audio Commons unique resource identifier
:return: url where to get a license
"""
return {'license_url': self.get_licensing_url(context, acid, *args, **kwargs)}
| AudioCommons/ac-mediator | services/acservice/licensing.py | Python | apache-2.0 | 2,657 |
"""
IceManager initializes Ice and implement all low level methods necessary to IceHMS
"""
import sys
import socket # to get ip address
import logging
import Ice
import IceGrid
import IceStorm
import icehms
class IceManager(object):
"""
create connection to ice
creates also usefull proxies and wrapper methods around Ice methods
"""
def __init__(self, adapterId=None, defaultTimeout=500, endpoints=None, logLevel=logging.WARNING, publishedEndpoints=None):
"""
No adapterId argument means no adapter is created
it can currently only handle one adapter, but we may have
to add support for several adapters....maybe
"""
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(logLevel)
if len(logging.root.handlers) == 0: #dirty hack
logging.basicConfig()
self._defaultTimeout = defaultTimeout
self._publishedEndpoints = publishedEndpoints
self._endpoints = endpoints
self.initialized = False
self._adapterId = adapterId
self._session = None
self._admin = None
self.adapter = None
self.registry = None
self.query = None
self.ic = None
self.topicMgr = None
self.messageTopicMgr = None
self.realtimeMgr = None
#authentication is disable so whatever works
self._adminUser = "foo"
self._adminPasswd = "bar"
def init(self, properties=None):
""" Initiliaze Ice and keep proxy to many interesting ice objects
properties is and IceProperties object which can be used to set Ice properties (see doc)
properties = Ice.createProperties(sys.argv)
for example:
prop.setProperty("Ice.ThreadPool.Server.SizeMax", "100000")
Note: some properties are required by icehms and are arbitrarily set in this method
"""
if self.initialized:
self.logger.warn("IceManager is allready initialized")
return
if not properties:
properties = Ice.createProperties(sys.argv)
#self.logger.critical("Using ice registry located at: %s ", icehms.IceRegistryServer )
print("IceHMS::IceManager: Using ice registry located at: {0} ".format(icehms.IceRegistryServer))
# those could be in cfg file but setting them programmatically gives much more flexibility
if self._adapterId:
properties.setProperty("hms.AdapterId", self._adapterId)
myIP = self._getIP_to_IceGrid()
properties.setProperty("hms.Endpoints", "tcp -h {}: udp -h {}".format( myIP, myIP))
properties.setProperty("Ice.Default.Locator", "IceGrid/Locator:" + icehms.IceRegistryServer)
properties.setProperty("Ice.ThreadPool.Server.Size", "1")
properties.setProperty("Ice.ThreadPool.Server.SizeWarn", "180")
properties.setProperty("Ice.ThreadPool.Server.SizeMax", "200")
properties.setProperty("Ice.ThreadPool.Client.Size", "1")
properties.setProperty("Ice.ThreadPool.Client.SizeWarn", "180")
properties.setProperty("Ice.ThreadPool.Client.SizeMax", "200")
#properties.setProperty("Ice.Trace.Network", "1") #debugging
properties.setProperty("Ice.IPv6", "0")#disable ipv6 as it may hang on some systems
if self._publishedEndpoints:
self.logger.info( "setting published endpoints %s: ", self._publishedEndpoints)
properties.setProperty("hms.PublishedEndpoints", self._publishedEndpoints)
if self._endpoints:
self.logger.info( "setting endpoints: %s", self._endpoints)
properties.setProperty("hms.Endpoints", self._endpoints)
#All properties set, now initialize Ice and get communicator object
iceid = Ice.InitializationData()
iceid.properties = properties
self.ic = Ice.initialize(sys.argv, iceid)
if self._adapterId:
#create the adapter object
#hms is the name used in the properties, so we cannot
# change it without changing the ice properties
self.adapter = self.ic.createObjectAdapter("hms")
self.adapter.activate() # allow request
#Those objects must be created after adapter has been activated
self.query = IceGrid.QueryPrx.checkedCast(self.ic.stringToProxy("IceGrid/Query"))
self.registry = IceGrid.RegistryPrx.uncheckedCast(self.ic.stringToProxy("IceGrid/Registry"))
try:
self.topicMgr = IceStorm.TopicManagerPrx.checkedCast(self.ic.stringToProxy("IceStorm/TopicManager"))
self.messageTopicMgr = IceStorm.TopicManagerPrx.checkedCast(self.ic.stringToProxy("EventServer/TopicManager"))
self.realtimeMgr = IceStorm.TopicManagerPrx.checkedCast(self.ic.stringToProxy("RealTimeServer/TopicManager"))
except Ice.NotRegisteredException:
print("Exception : if we fail here it is proably because icestorm is not registered in node !!")
print("run register_services.sh in icehms")
self.ic.destroy()
raise
# if we are here initilization should have worked
self.initialized = True
def _getIP_to_IceGrid(self):
"""
return IP address on interface where we found the IceGrid server
This is tricky
return 127.0.0.1 if IceGrid server is not known
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
serv = icehms.IceRegistryServer.split()
ip = None
for idx, val in enumerate(serv):
if val == "-h":
ip = serv[idx + 1]
if not ip :
return ""
s.connect((ip, 0))#opening a dummy socket on the icegrid server machine
ip = s.getsockname()[0]
self.logger.info( "Deduced local IP address is: ".format(ip))
print( "Deduced local IP address is: ", ip)
return ip
def _initAdmin(self):
self._session = self.registry.createAdminSession(self._adminUser, self._adminPasswd)
return self._session.getAdmin()
def get_admin(self):
"""
this method is implemented to work around timeout
of admin session
"""
if not self._admin:
return self._initAdmin()
else:
try:
self._session.ice_ping()
self._admin.ice_ping()
except Ice.Exception:
return self._initAdmin()
return self._admin
def automated_cast(self, prx):
"""
get ice type from ice, parse string and cast to specific type
This is very usefull to avoid having to cast correctly every proxy we get from Ice
This contains a lot of python magic and when something breaks in IceHMS it is usually here...
"""
prx = prx.ice_timeout(300)
try:
prx.ice_ping()
except Ice.Exception as why:
self.logger.warn("Proxy could not be ping, proxy is dead or database need cleaning %s, %s", why, prx)
return prx # prx is dead but maybe user wants to investigate it
try:
prxobj = self._get_prxobj(prx) #Try to cast
except (ImportError, KeyError) as ex:
self.logger.info( "Coult not cast to %s, casting to Holon. Error was: %s %s", prx.ice_id(), ex.__repr__(), ex)
prxobj = icehms.hms.HolonPrx#If we fail here we have an installation problem or a bug
prx = prxobj.checkedCast(prx)
prx = prx.ice_timeout(self._defaultTimeout) #set timeout since we changed it for pinging
return prx
def _get_prxobj(self, prx):
"""
absolutely not understandable python magic I wrote several years ago, but it works...
"""
tmp = None
icetype = prx.ice_id()
icetype = icetype.split("::")
tmp = __import__(icetype[1]) #The first identifier is a slice module to import
for t in icetype[2:-1]:
tmp = tmp.__dict__[t]
return tmp.__dict__[icetype[-1] + "Prx"]
def register_to_IceGrid(self, agent):
""" register Agent to iceregistry so that it can be found by type and ID
"""
self.logger.debug( "Registring: %s with type %s", agent, agent.hmstype)
try:
self.get_admin().addObjectWithType(agent.proxy, agent.hmstype)
agent.registeredToGrid = True
return True
except (IceGrid.ObjectExistsException):
self.get_admin().updateObject(agent.proxy)
agent.registeredToGrid = True
return True
except Ice.Exception as why:
self.logger.error( "Could not register holon to grid: %s", why)
return False
def deregister_to_IceGrid(self, iceid):
"""
deregister ice object to ice grid, if you have registered an object,
it is a good idea to deregister it using this method
"""
try:
self.get_admin().removeObject(iceid)
except IceGrid.ObjectNotRegisteredException:
self.logger.warn( "Holon was not registered in database" )
except Ice.ObjectNotExistException as why:
self.logger.warn( "Could not de-register holon, admin obejct is dead !!!! report !!, %s", why )
else:
self.logger.info( "Holon %s de-registered" % iceid.name )
def get_proxy(self, name):
return self.get_holon(name)
def get_holon(self, name):
"""
return a proxy object of an Ice Holon, knowing its name(==id)
return None if not found
"""
prx = None
if self.query:
prx = self.query.findObjectById(self.ic.stringToIdentity(name))
if prx:
prx = self.automated_cast(prx)
if prx:
self.logger.info( "got proxy for %s", prx)
return prx
def find_holons_quick(self, icetype):
""" simple wrapper around findAllObjectsByType from ice
but cast proxies to lowest level inherited object before returng list
type is a string like "::hms::agv::Localizer"
"""
holons = self.query.findAllObjectsByType( icetype )
newlist = []
for holon in holons:
try:
holon.proxy.ice_ping()
except Ice.Exception:
self.get_admin().removeObject(holon.proxy.ice_getIdentity())# it is dead
else:
prx = self.automated_cast(holon.proxy)
newlist.append(prx)
return newlist
def find_holons(self, icetype="::hms::Holon", cast=True):
"""
more expensive version of find_holons
returns all object which inherit the given type
"""
objs = self.get_admin().getAllObjectInfos("*")
holons = []
for obj in objs:
try:
if obj.proxy.ice_isA(icetype):
if not cast:
holons.append(obj.proxy)
else:
holons.append(self.automated_cast(obj.proxy))
except Ice.Exception as e:
self.logger.warn("%s seems dead: %s, deleting it from registry", obj.proxy, e)
try:
self.get_admin().removeObject(obj.proxy.ice_getIdentity())# it is dead
except Ice.Exception as ex:
pass #This fails sometimes if the object has been deleted in-between and we do not care
return holons
findAllObjectsByType = find_holons
find_holons_by_type = find_holons
def get_topic(self, topicName, create=True, server=None):
"""
return an ice topic object for name topicName
if create is True(default) then create topic if it does not exist
"""
if not server:
server = self.topicMgr
try:
topic = server.retrieve(topicName)
except Ice.Exception: #sometime we crash with strange error message so better catch everything
if create:
try:
topic = server.create(topicName)
except IceStorm.TopicExists:
#maybe someone has created it in between so re-try without catching check
# if we get an exception here we cannot do much more
topic = server.retrieve(topicName)
else:
raise
return topic
def get_all_topics(self, server=None):
""" return a dict of existing topics on given topic server
if None the default topic supporting messages is used
"""
if server is None:
server = self.messageTopicMgr
return self.messageTopicMgr.retrieveAll()
def get_publisher(self, topicName, prxobj, server=None):
"""
get a publisher object for a topic
create it if it does not exist
prxobj is the ice interface obj for the desired topic. This is necessary since topics have an interface
if server is None, default server is used
"""
self.logger.debug("getting publisher for %s with prx %s at server %s", topicName, prxobj, server)
topic = self.get_topic(topicName, server=server)
publisher = topic.getPublisher() # get twoways publisher for topic
self.logger.info("Got publisher for %s", topicName)
return prxobj.uncheckedCast(publisher)
def subscribe_topic(self, topicName, prx, server=None):
"""
subscribe prx to a topic
The object pointed by the proxy needs to inherit the topic proxy and implemented the topic methods
"""
topic = self.get_topic(topicName, server=server)
qos = {}
qos["reliability"] = "" #"" and "ordered" are the only possibilities see doc
qos["retryCount"] = "-1" #-1 means to never remove a dead subscriber from list
try:
topic.subscribeAndGetPublisher(qos, prx)
except IceStorm.AlreadySubscribed:
self.logger.info( "Allready subscribed to topic" )
self.logger.info( "subscribed %s to topic %s", prx, topicName )
return topic
def destroy(self):
if self.ic:
self.ic.destroy()
shutdown = destroy
def wait_for_shutdown(self):
if self.ic: # we might crash here, waitForShutdown crashes if we are allready down
self.ic.waitForShutdown()
def is_shutdown(self):
if self.ic:
return self.ic.isShutdown()
else:
return True
def get_cleaner(self):
return icehms.Cleaner(self)
| oroulet/icehms | src/python/icehms/icemanager.py | Python | gpl-3.0 | 14,695 |
from django.test import TestCase
from pxl.models import PXLBoardModel
from django.contrib.auth.models import User
import factory
class UserFactory(factory.django.DjangoModelFactory):
"""Set up a user."""
class Meta:
model = User
username = factory.Faker('user_name')
password = factory.Faker('password')
class HeadlineTests(TestCase):
"""Test sports API views."""
def setUp(self):
"""Set up test user."""
self.user = UserFactory()
def test_pxlboard_model(self):
"""Test pxl board model."""
params = {'mlb': 'true',
'nfl': '',
'nhl': '',
'headlines': '',
'weather': 'true'}
newboard = PXLBoardModel(owner=self.user, **params)
newboard.save()
test_board = PXLBoardModel.objects.get(owner=self.user)
self.assertEqual(test_board.owner, self.user)
self.assertEqual(test_board.mlb, True)
self.assertEqual(test_board.nfl, False)
self.assertEqual(test_board.nhl, False)
self.assertEqual(test_board.weather, True)
self.assertEqual(test_board.headlines, False)
| PXL-CF2016/pxl-master-server | pxl/tests/test_models.py | Python | mit | 1,169 |
#
# history.py - Changelog dialog for TortoiseHg
#
# Copyright 2007 Brad Schick, brad at gmail . com
# Copyright (C) 2007 TK Soh <[email protected]>
#
import os
import pygtk
pygtk.require('2.0')
import gtk
import gobject
import pango
import StringIO
from mercurial.node import *
from mercurial import ui, hg, commands, extensions
from gdialog import *
from changeset import ChangeSet
from logfilter import FilterDialog
from update import UpdateDialog
from merge import MergeDialog
from vis import treemodel
from vis.treeview import TreeView
from hglib import toutf
import gtklib
def create_menu(label, callback):
menuitem = gtk.MenuItem(label, True)
menuitem.connect('activate', callback)
menuitem.set_border_width(1)
return menuitem
class GLog(GDialog):
"""GTK+ based dialog for displaying repository logs
"""
def get_title(self):
return os.path.basename(self.repo.root) + ' log'
def get_icon(self):
return 'menulog.ico'
def parse_opts(self):
# Disable quiet to get full log info
self.ui.quiet = False
def get_tbbuttons(self):
return [
self.make_toolbutton(gtk.STOCK_REFRESH,
'Re_fresh',
self._refresh_clicked,
tip='Reload revision history'),
gtk.SeparatorToolItem(),
self.make_toolbutton(gtk.STOCK_INDEX,
'_Filter',
self._filter_clicked,
menu=self._filter_menu(),
tip='Filter revisions for display'),
gtk.SeparatorToolItem(),
self.make_toolbutton(gtk.STOCK_FIND,
'_DataMine',
self._datamine_clicked,
tip='Search Repository History'),
gtk.SeparatorToolItem()
] + self.changeview.get_tbbuttons()
def toggle_view_column(self, button, property):
bool = button.get_active()
self.graphview.set_property(property, bool)
def _more_clicked(self, button):
self.graphview.next_revision_batch()
def _load_all_clicked(self, button):
self.graphview.load_all_revisions()
self.nextbutton.set_sensitive(False)
self.allbutton.set_sensitive(False)
def revisions_loaded(self, graphview):
'''Treeview reports log generator has exited'''
if not self.graphview.graphdata:
self.changeview._buffer.set_text('')
self.changeview._filelist.clear()
self._last_rev = None
self.nextbutton.set_sensitive(False)
self.allbutton.set_sensitive(False)
def _datamine_clicked(self, toolbutton, data=None):
from datamine import DataMineDialog
dialog = DataMineDialog(self.ui, self.repo, self.cwd, [], {}, False)
dialog.display()
dialog.add_search_page()
def _filter_clicked(self, toolbutton, data=None):
if self._filter_dialog:
self._filter_dialog.show()
self._filter_dialog.present()
else:
self._show_filter_dialog()
def _show_filter_dialog(self):
'''Launch a modeless filter dialog'''
def do_reload(opts):
self.custombutton.set_active(True)
self.reload_log(opts)
def close_filter_dialog(dialog, response_id):
dialog.hide()
revs = []
if self.currow is not None:
revs.append(self.currow[treemodel.REVID])
if self.graphview.get_mark_rev() is not None:
revs.append(self.graphview.get_mark_rev())
dlg = FilterDialog(self.repo.root, revs, self.pats,
filterfunc=do_reload)
dlg.connect('response', close_filter_dialog)
dlg.set_modal(False)
dlg.show()
self._filter_dialog = dlg
def _filter_selected(self, widget, data=None):
if widget.get_active():
self._filter = data
self.reload_log()
def _view_menu(self):
menu = gtk.Menu()
button = gtk.CheckMenuItem("Show Ids")
button.connect("toggled", self.toggle_view_column,
'rev-column-visible')
button.set_active(True)
button.set_draw_as_radio(True)
menu.append(button)
button = gtk.CheckMenuItem("Show Tags")
button.connect("toggled", self.toggle_view_column,
'tags-column-visible')
button.set_active(True)
button.set_draw_as_radio(True)
menu.append(button)
button = gtk.CheckMenuItem("Show Date")
button.connect("toggled", self.toggle_view_column,
'date-column-visible')
button.set_active(True)
button.set_draw_as_radio(True)
menu.append(button)
menu.show_all()
return menu
def _filter_menu(self):
menu = gtk.Menu()
button = gtk.RadioMenuItem(None, "Show All Revisions")
button.set_active(True)
button.connect("toggled", self._filter_selected, 'all')
menu.append(button)
button = gtk.RadioMenuItem(button, "Show Tagged Revisions")
button.connect("toggled", self._filter_selected, 'tagged')
menu.append(button)
button = gtk.RadioMenuItem(button, "Show Parent Revisions")
button.connect("toggled", self._filter_selected, 'parents')
menu.append(button)
button = gtk.RadioMenuItem(button, "Show Head Revisions")
button.connect("toggled", self._filter_selected, 'heads')
menu.append(button)
button = gtk.RadioMenuItem(button, "Show Only Merge Revisions")
button.connect("toggled", self._filter_selected, 'only_merges')
menu.append(button)
button = gtk.RadioMenuItem(button, "Show Non-Merge Revisions")
button.connect("toggled", self._filter_selected, 'no_merges')
menu.append(button)
self.custombutton = gtk.RadioMenuItem(button, "Custom Filter")
self.custombutton.set_sensitive(False)
menu.append(self.custombutton)
menu.show_all()
return menu
def open_with_file(self, file):
'''Call this before display() to open with file history'''
self.opts['filehist'] = file
def prepare_display(self):
'''Called at end of display() method'''
self._last_rev = None
self._filter = "all"
self.currow = None
self.curfile = None
self.opts['rev'] = [] # This option is dangerous - used directly by hg
self.opts['revs'] = None
os.chdir(self.repo.root) # paths relative to repo root do not work otherwise
if 'filehist' in self.opts:
self.custombutton.set_active(True)
self.graphview.refresh(True, None, self.opts)
del self.opts['filehist']
elif 'revrange' in self.opts:
self.custombutton.set_active(True)
self.graphview.refresh(True, None, self.opts)
elif self.pats == [self.repo.root] or self.pats == ['']:
self.pats = []
self.reload_log()
elif self.pats:
self.custombutton.set_active(True)
self.graphview.refresh(False, self.pats, self.opts)
else:
self.reload_log()
def save_settings(self):
settings = GDialog.save_settings(self)
settings['glog'] = (self._vpaned.get_position(),
self._hpaned.get_position())
return settings
def load_settings(self, settings):
'''Called at beginning of display() method'''
limit_opt = self.repo.ui.config('tortoisehg', 'graphlimit', '500')
if limit_opt:
try:
limit = int(limit_opt)
except ValueError:
limit = 0
if limit <= 0:
limit = None
else:
limit = None
# Allocate TreeView instance to use internally
self.limit = limit
self.stbar = gtklib.StatusBar()
self.graphview = TreeView(self.repo, limit, self.stbar)
# Allocate ChangeSet instance to use internally
self.changeview = ChangeSet(self.ui, self.repo, self.cwd, [],
self.opts, False, self.stbar)
self.changeview.display(False)
self.changeview.glog_parent = self
GDialog.load_settings(self, settings)
if settings:
set = settings['glog']
if type(set) == int:
self._setting_vpos = set
self._setting_hpos = -1
else:
(self._setting_vpos, self._setting_hpos) = set
else:
self._setting_vpos = -1
self._setting_hpos = -1
def reload_log(self, filteropts={}):
"""Send refresh event to treeview object"""
os.chdir(self.repo.root) # paths relative to repo root do not work otherwise
self.nextbutton.set_sensitive(True)
self.allbutton.set_sensitive(True)
self.opts['rev'] = []
self.opts['revs'] = None
self.opts['no_merges'] = False
self.opts['only_merges'] = False
self.opts['revrange'] = filteropts.get('revrange', None)
self.opts['date'] = filteropts.get('date', None)
self.opts['keyword'] = filteropts.get('keyword', [])
revs = []
if filteropts:
branch = filteropts.get('branch', None)
if 'revrange' in filteropts or 'branch' in filteropts:
self.graphview.refresh(True, branch, self.opts)
else:
filter = filteropts.get('pats', [])
self.graphview.refresh(False, filter, self.opts)
elif self._filter == "all":
self.graphview.refresh(True, None, self.opts)
elif self._filter == "only_merges":
self.opts['only_merges'] = True
self.graphview.refresh(False, [], self.opts)
elif self._filter == "no_merges":
self.opts['no_merges'] = True
self.graphview.refresh(False, [], self.opts)
elif self._filter == "tagged":
tagged = []
for t, r in self.repo.tagslist():
hr = hex(r)
if hr not in tagged:
tagged.insert(0, hr)
self.opts['revs'] = tagged
self.graphview.refresh(False, [], self.opts)
elif self._filter == "parents":
repo_parents = [x.rev() for x in self.repo.workingctx().parents()]
self.opts['revs'] = [str(x) for x in repo_parents]
self.graphview.refresh(False, [], self.opts)
elif self._filter == "heads":
heads = [self.repo.changelog.rev(x) for x in self.repo.heads()]
self.opts['revs'] = [str(x) for x in heads]
self.graphview.refresh(False, [], self.opts)
def tree_context_menu(self):
_menu = gtk.Menu()
_menu.append(create_menu('di_splay', self._show_status))
_menu.append(create_menu('_checkout', self._checkout))
self._cmenu_merge = create_menu('_merge with', self._merge)
_menu.append(self._cmenu_merge)
_menu.append(create_menu('_export patch', self._export_patch))
_menu.append(create_menu('e_mail patch', self._email_patch))
_menu.append(create_menu('add/remove _tag', self._add_tag))
_menu.append(create_menu('backout revision', self._backout_rev))
# need mq extension for strip command
extensions.loadall(self.ui)
extensions.load(self.ui, 'mq', None)
_menu.append(create_menu('strip revision', self._strip_rev))
_menu.show_all()
return _menu
def tree_diff_context_menu(self):
_menu = gtk.Menu()
_menu.append(create_menu('_diff with selected', self._diff_revs))
_menu.append(create_menu('visual diff with selected',
self._vdiff_selected))
_menu.show_all()
return _menu
def get_body(self):
self._filter_dialog = None
self._menu = self.tree_context_menu()
self._menu2 = self.tree_diff_context_menu()
self.tree_frame = gtk.Frame()
self.tree_frame.set_shadow_type(gtk.SHADOW_ETCHED_IN)
# PyGtk 2.6 and below did not automatically register types
if gobject.pygtk_version < (2, 8, 0):
gobject.type_register(TreeView)
self.tree = self.graphview.treeview
self.graphview.connect('revision-selected', self.selection_changed)
self.graphview.connect('revisions-loaded', self.revisions_loaded)
#self.tree.connect('button-release-event', self._tree_button_release)
self.tree.connect('button-press-event', self._tree_button_press)
#self.tree.connect('popup-menu', self._tree_popup_menu)
self.tree.connect('row-activated', self._tree_row_act)
#self.tree.modify_font(pango.FontDescription(self.fontlist))
hbox = gtk.HBox()
hbox.pack_start(self.graphview, True, True, 0)
vbox = gtk.VBox()
self.colmenu = gtk.MenuToolButton('')
self.colmenu.set_menu(self._view_menu())
# A MenuToolButton has two parts; a Button and a ToggleButton
# we want to see the togglebutton, but not the button
b = self.colmenu.child.get_children()[0]
b.unmap()
b.set_sensitive(False)
self.nextbutton = gtk.ToolButton(gtk.STOCK_GO_DOWN)
self.nextbutton.connect('clicked', self._more_clicked)
self.allbutton = gtk.ToolButton(gtk.STOCK_GOTO_BOTTOM)
self.allbutton.connect('clicked', self._load_all_clicked)
vbox.pack_start(self.colmenu, False, False)
vbox.pack_start(gtk.Label(''), True, True) # expanding blank label
vbox.pack_start(self.nextbutton, False, False)
vbox.pack_start(self.allbutton, False, False)
self.nextbutton.set_tooltip(self.tooltips,
'show next %d revisions' % self.limit)
self.allbutton.set_tooltip(self.tooltips,
'show all remaining revisions')
hbox.pack_start(vbox, False, False, 0)
self.tree_frame.add(hbox)
self.tree_frame.show_all()
# Add ChangeSet instance to bottom half of vpane
self.changeview.graphview = self.graphview
self._hpaned = self.changeview.get_body()
self._vpaned = gtk.VPaned()
self._vpaned.pack1(self.tree_frame, True, False)
self._vpaned.pack2(self._hpaned)
self._vpaned.set_position(self._setting_vpos)
self._hpaned.set_position(self._setting_hpos)
vbox = gtk.VBox()
vbox.pack_start(self._vpaned, True, True)
# Append status bar
vbox.pack_start(gtk.HSeparator(), False, False)
vbox.pack_start(self.stbar, False, False)
return vbox
def _strip_rev(self, menuitem):
rev = self.currow[treemodel.REVID]
res = Confirm('Strip Revision(s)', [], self,
'Remove revision %d and all descendants?' % rev).run()
if res != gtk.RESPONSE_YES:
return
from hgcmd import CmdDialog
cmdline = ['hg', 'strip', str(rev)]
dlg = CmdDialog(cmdline)
dlg.show_all()
dlg.run()
dlg.hide()
self.repo.invalidate()
self.reload_log()
def _backout_rev(self, menuitem):
from backout import BackoutDialog
rev = self.currow[treemodel.REVID]
rev = short(self.repo.changelog.node(rev))
parents = [x.node() for x in self.repo.workingctx().parents()]
dialog = BackoutDialog(self.repo.root, rev)
dialog.set_transient_for(self)
dialog.show_all()
dialog.set_notify_func(self.checkout_completed, parents)
dialog.present()
dialog.set_transient_for(None)
def _diff_revs(self, menuitem):
from status import GStatus
from gtools import cmdtable
rev0, rev1 = self._revs
statopts = self.merge_opts(cmdtable['gstatus|gst'][1],
('include', 'exclude', 'git'))
statopts['rev'] = ['%u:%u' % (rev0, rev1)]
statopts['modified'] = True
statopts['added'] = True
statopts['removed'] = True
dialog = GStatus(self.ui, self.repo, self.cwd, [], statopts, False)
dialog.display()
return True
def _vdiff_selected(self, menuitem):
rev0, rev1 = self._revs
self.opts['rev'] = ["%s:%s" % (rev0, rev1)]
self._diff_file(None, '')
def _mark_rev(self, menuitem):
rev = self.currow[treemodel.REVID]
self.graphview.set_mark_rev(rev)
def _add_tag(self, menuitem):
from tagadd import TagAddDialog
rev = self.currow[treemodel.REVID]
parents = self.currow[treemodel.PARENTS]
# save tag info for detecting new tags added
oldtags = self.repo.tagslist()
def refresh(*args):
self.repo.invalidate()
newtags = self.repo.tagslist()
if newtags != oldtags:
self.reload_log()
dialog = TagAddDialog(self.repo.root, rev=str(rev))
dialog.set_transient_for(self)
dialog.connect('destroy', refresh)
dialog.show_all()
dialog.present()
dialog.set_transient_for(None)
def _show_status(self, menuitem):
rev = self.currow[treemodel.REVID]
statopts = {'rev' : [str(rev)] }
dialog = ChangeSet(self.ui, self.repo, self.cwd, [], statopts, False)
dialog.display()
def _export_patch(self, menuitem):
rev = self.currow[treemodel.REVID]
filename = "%s_rev%s.patch" % (os.path.basename(self.repo.root), rev)
fd = NativeSaveFileDialogWrapper(Title = "Save patch to",
InitialDir=self.repo.root,
FileName=filename)
result = fd.run()
if result:
# In case new export args are added in the future, merge the
# hg defaults
exportOpts= self.merge_opts(commands.table['^export'][1], ())
exportOpts['output'] = result
def dohgexport():
commands.export(self.ui,self.repo,str(rev),**exportOpts)
success, outtext = self._hg_call_wrapper("Export",dohgexport,False)
def _email_patch(self, menuitem):
from hgemail import EmailDialog
rev = self.currow[treemodel.REVID]
dlg = EmailDialog(self.repo.root, ['--rev', str(rev)])
dlg.set_transient_for(self)
dlg.show_all()
dlg.present()
dlg.set_transient_for(None)
def _checkout(self, menuitem):
rev = self.currow[treemodel.REVID]
parents = [x.node() for x in self.repo.workingctx().parents()]
dialog = UpdateDialog(self.cwd, rev)
dialog.set_transient_for(self)
dialog.show_all()
dialog.set_notify_func(self.checkout_completed, parents)
dialog.present()
dialog.set_transient_for(None)
def checkout_completed(self, oldparents):
newparents = [x.node() for x in self.repo.workingctx().parents()]
if not oldparents == newparents:
self.reload_log()
def _merge(self, menuitem):
rev = self.currow[treemodel.REVID]
parents = [x.node() for x in self.repo.workingctx().parents()]
node = short(self.repo.changelog.node(rev))
dialog = MergeDialog(self.repo.root, self.cwd, node)
dialog.set_transient_for(self)
dialog.show_all()
dialog.set_notify_func(self.merge_completed, parents)
dialog.present()
dialog.set_transient_for(None)
def merge_completed(self, oldparents):
newparents = [x.node() for x in self.repo.workingctx().parents()]
if not oldparents == newparents:
self.reload_log()
def selection_changed(self, treeview):
self.currow = self.graphview.get_revision()
rev = self.currow[treemodel.REVID]
if rev != self._last_rev:
self._last_rev = rev
self.changeview.opts['rev'] = [str(rev)]
self.changeview.load_details(rev)
return False
def _refresh_clicked(self, toolbutton, data=None):
self.reload_log()
return True
def _tree_button_release(self, widget, event) :
if event.button == 3 and not (event.state & (gtk.gdk.SHIFT_MASK |
gtk.gdk.CONTROL_MASK)):
self._tree_popup_menu(widget, event.button, event.time)
return False
def _tree_button_press(self, widget, event):
if event.button == 3 and not (event.state & (gtk.gdk.SHIFT_MASK |
gtk.gdk.CONTROL_MASK)):
crow = widget.get_path_at_pos(int(event.x), int(event.y))[0]
(model, pathlist) = widget.get_selection().get_selected_rows()
if pathlist == []:
return False
srow = pathlist[0]
if srow == crow:
self._tree_popup_menu(widget, event.button, event.time)
else:
self._revs = (int(model[srow][treemodel.REVID]),
int(model[crow][treemodel.REVID]))
self._tree_popup_menu_diff(widget, event.button, event.time)
return True
return False
def _tree_popup_menu(self, treeview, button=0, time=0) :
selrev = self.currow[treemodel.REVID]
# disable/enable menus as required
parents = [self.repo.changelog.rev(x.node()) for x in
self.repo.workingctx().parents()]
can_merge = selrev not in parents and \
len(self.repo.heads()) > 1 and \
len(parents) < 2
self._cmenu_merge.set_sensitive(can_merge)
# display the context menu
self._menu.popup(None, None, None, button, time)
return True
def _tree_popup_menu_diff(self, treeview, button=0, time=0):
# display the context menu
self._menu2.popup(None, None, None, button, time)
return True
def _tree_row_act(self, tree, path, column) :
"""Default action is the first entry in the context menu
"""
self._menu.get_children()[0].activate()
return True
def run(root='', cwd='', files=[], **opts):
u = ui.ui()
u.updateopts(debug=False, traceback=False)
repo = hg.repository(u, path=root)
files = [util.canonpath(root, cwd, f) for f in files]
cmdoptions = {
'follow':False, 'follow-first':False, 'copies':False, 'keyword':[],
'limit':0, 'rev':[], 'removed':False, 'no_merges':False, 'date':None,
'only_merges':None, 'prune':[], 'git':False, 'verbose':False,
'include':[], 'exclude':[]
}
dialog = GLog(u, repo, cwd, files, cmdoptions, True)
gtk.gdk.threads_init()
gtk.gdk.threads_enter()
dialog.display()
gtk.main()
gtk.gdk.threads_leave()
if __name__ == "__main__":
import sys
opts = {}
opts['root'] = len(sys.argv) > 1 and sys.argv[1] or os.getcwd()
opts['files'] = [opts['root']]
run(**opts)
| tdjordan/tortoisegit | gitgtk/history.py | Python | gpl-2.0 | 23,107 |
import six
from .. import errors
from ..utils import format_environment, split_command
class TaskTemplate(dict):
"""
Describe the task specification to be used when creating or updating a
service.
Args:
container_spec (ContainerSpec): Container settings for containers
started as part of this task.
log_driver (DriverConfig): Log configuration for containers created as
part of the service.
resources (Resources): Resource requirements which apply to each
individual container created as part of the service.
restart_policy (RestartPolicy): Specification for the restart policy
which applies to containers created as part of this service.
placement (list): A list of constraints.
"""
def __init__(self, container_spec, resources=None, restart_policy=None,
placement=None, log_driver=None):
self['ContainerSpec'] = container_spec
if resources:
self['Resources'] = resources
if restart_policy:
self['RestartPolicy'] = restart_policy
if placement:
if isinstance(placement, list):
placement = {'Constraints': placement}
self['Placement'] = placement
if log_driver:
self['LogDriver'] = log_driver
@property
def container_spec(self):
return self.get('ContainerSpec')
@property
def resources(self):
return self.get('Resources')
@property
def restart_policy(self):
return self.get('RestartPolicy')
@property
def placement(self):
return self.get('Placement')
class ContainerSpec(dict):
"""
Describes the behavior of containers that are part of a task, and is used
when declaring a :py:class:`~docker.types.TaskTemplate`.
Args:
image (string): The image name to use for the container.
command (string or list): The command to be run in the image.
args (list): Arguments to the command.
env (dict): Environment variables.
dir (string): The working directory for commands to run in.
user (string): The user inside the container.
labels (dict): A map of labels to associate with the service.
mounts (list): A list of specifications for mounts to be added to
containers created as part of the service. See the
:py:class:`~docker.types.Mount` class for details.
stop_grace_period (int): Amount of time to wait for the container to
terminate before forcefully killing it.
"""
def __init__(self, image, command=None, args=None, env=None, workdir=None,
user=None, labels=None, mounts=None, stop_grace_period=None):
self['Image'] = image
if isinstance(command, six.string_types):
command = split_command(command)
self['Command'] = command
self['Args'] = args
if env is not None:
if isinstance(env, dict):
self['Env'] = format_environment(env)
else:
self['Env'] = env
if workdir is not None:
self['Dir'] = workdir
if user is not None:
self['User'] = user
if labels is not None:
self['Labels'] = labels
if mounts is not None:
for mount in mounts:
if isinstance(mount, six.string_types):
mounts.append(Mount.parse_mount_string(mount))
mounts.remove(mount)
self['Mounts'] = mounts
if stop_grace_period is not None:
self['StopGracePeriod'] = stop_grace_period
class Mount(dict):
"""
Describes a mounted folder's configuration inside a container. A list of
``Mount``s would be used as part of a
:py:class:`~docker.types.ContainerSpec`.
Args:
target (string): Container path.
source (string): Mount source (e.g. a volume name or a host path).
type (string): The mount type (``bind`` or ``volume``).
Default: ``volume``.
read_only (bool): Whether the mount should be read-only.
propagation (string): A propagation mode with the value ``[r]private``,
``[r]shared``, or ``[r]slave``. Only valid for the ``bind`` type.
no_copy (bool): False if the volume should be populated with the data
from the target. Default: ``False``. Only valid for the ``volume``
type.
labels (dict): User-defined name and labels for the volume. Only valid
for the ``volume`` type.
driver_config (DriverConfig): Volume driver configuration. Only valid
for the ``volume`` type.
"""
def __init__(self, target, source, type='volume', read_only=False,
propagation=None, no_copy=False, labels=None,
driver_config=None):
self['Target'] = target
self['Source'] = source
if type not in ('bind', 'volume'):
raise errors.DockerError(
'Only acceptable mount types are `bind` and `volume`.'
)
self['Type'] = type
if type == 'bind':
if propagation is not None:
self['BindOptions'] = {
'Propagation': propagation
}
if any([labels, driver_config, no_copy]):
raise errors.DockerError(
'Mount type is binding but volume options have been '
'provided.'
)
else:
volume_opts = {}
if no_copy:
volume_opts['NoCopy'] = True
if labels:
volume_opts['Labels'] = labels
if driver_config:
volume_opts['DriverConfig'] = driver_config
if volume_opts:
self['VolumeOptions'] = volume_opts
if propagation:
raise errors.DockerError(
'Mount type is volume but `propagation` argument has been '
'provided.'
)
@classmethod
def parse_mount_string(cls, string):
parts = string.split(':')
if len(parts) > 3:
raise errors.DockerError(
'Invalid mount format "{0}"'.format(string)
)
if len(parts) == 1:
return cls(target=parts[0])
else:
target = parts[1]
source = parts[0]
read_only = not (len(parts) == 3 or parts[2] == 'ro')
return cls(target, source, read_only=read_only)
class Resources(dict):
"""
Configures resource allocation for containers when made part of a
:py:class:`~docker.types.ContainerSpec`.
Args:
cpu_limit (int): CPU limit in units of 10^9 CPU shares.
mem_limit (int): Memory limit in Bytes.
cpu_reservation (int): CPU reservation in units of 10^9 CPU shares.
mem_reservation (int): Memory reservation in Bytes.
"""
def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None,
mem_reservation=None):
limits = {}
reservation = {}
if cpu_limit is not None:
limits['NanoCPUs'] = cpu_limit
if mem_limit is not None:
limits['MemoryBytes'] = mem_limit
if cpu_reservation is not None:
reservation['NanoCPUs'] = cpu_reservation
if mem_reservation is not None:
reservation['MemoryBytes'] = mem_reservation
if limits:
self['Limits'] = limits
if reservation:
self['Reservations'] = reservation
class UpdateConfig(dict):
"""
Used to specify the way container updates should be performed by a service.
Args:
parallelism (int): Maximum number of tasks to be updated in one
iteration (0 means unlimited parallelism). Default: 0.
delay (int): Amount of time between updates.
failure_action (string): Action to take if an updated task fails to
run, or stops running during the update. Acceptable values are
``continue`` and ``pause``. Default: ``continue``
"""
def __init__(self, parallelism=0, delay=None, failure_action='continue'):
self['Parallelism'] = parallelism
if delay is not None:
self['Delay'] = delay
if failure_action not in ('pause', 'continue'):
raise errors.DockerError(
'failure_action must be either `pause` or `continue`.'
)
self['FailureAction'] = failure_action
class RestartConditionTypesEnum(object):
_values = (
'none',
'on-failure',
'any',
)
NONE, ON_FAILURE, ANY = _values
class RestartPolicy(dict):
"""
Used when creating a :py:class:`~docker.types.ContainerSpec`,
dictates whether a container should restart after stopping or failing.
Args:
condition (string): Condition for restart (``none``, ``on-failure``,
or ``any``). Default: `none`.
delay (int): Delay between restart attempts. Default: 0
attempts (int): Maximum attempts to restart a given container before
giving up. Default value is 0, which is ignored.
window (int): Time window used to evaluate the restart policy. Default
value is 0, which is unbounded.
"""
condition_types = RestartConditionTypesEnum
def __init__(self, condition=RestartConditionTypesEnum.NONE, delay=0,
max_attempts=0, window=0):
if condition not in self.condition_types._values:
raise TypeError(
'Invalid RestartPolicy condition {0}'.format(condition)
)
self['Condition'] = condition
self['Delay'] = delay
self['MaxAttempts'] = max_attempts
self['Window'] = window
class DriverConfig(dict):
"""
Indicates which driver to use, as well as its configuration. Can be used
as ``log_driver`` in a :py:class:`~docker.types.ContainerSpec`,
and for the `driver_config` in a volume
:py:class:`~docker.types.Mount`.
Args:
name (string): Name of the driver to use.
options (dict): Driver-specific options. Default: ``None``.
"""
def __init__(self, name, options=None):
self['Name'] = name
if options:
self['Options'] = options
class EndpointSpec(dict):
"""
Describes properties to access and load-balance a service.
Args:
mode (string): The mode of resolution to use for internal load
balancing between tasks (``'vip'`` or ``'dnsrr'``). Defaults to
``'vip'`` if not provided.
ports (dict): Exposed ports that this service is accessible on from the
outside, in the form of ``{ target_port: published_port }`` or
``{ target_port: (published_port, protocol) }``. Ports can only be
provided if the ``vip`` resolution mode is used.
"""
def __init__(self, mode=None, ports=None):
if ports:
self['Ports'] = convert_service_ports(ports)
if mode:
self['Mode'] = mode
def convert_service_ports(ports):
if isinstance(ports, list):
return ports
if not isinstance(ports, dict):
raise TypeError(
'Invalid type for ports, expected dict or list'
)
result = []
for k, v in six.iteritems(ports):
port_spec = {
'Protocol': 'tcp',
'PublishedPort': k
}
if isinstance(v, tuple):
port_spec['TargetPort'] = v[0]
if len(v) == 2:
port_spec['Protocol'] = v[1]
else:
port_spec['TargetPort'] = v
result.append(port_spec)
return result
| jarv/cmdchallenge-site | lambda_src/runcmd/docker/types/services.py | Python | mit | 11,780 |
import sys
class Solution:
def lengthOfLastWord(self, s: str) -> int:
s = s.strip()
if not s:
return 0
last = s.split(' ')[-1]
return len(last)
if __name__ == "__main__":
sol = Solution()
input = sys.argv[1]
print(sol.lengthOfLastWord(input))
| shenfei/oj_codes | leetcode/python/n58_Length_of_Last_Word.py | Python | mit | 308 |
from django.db import models
from django.contrib.auth.models import User
class DocumentsExamined(models.Model):
user = models.ForeignKey(User)
title = models.CharField(max_length=200)
docid = models.CharField(max_length=30)
doc_num = models.CharField(max_length=30)
judgement = models.IntegerField()
judgement_date = models.DateTimeField('Date Examined')
url = models.CharField(max_length=200)
task = models.IntegerField(default=0)
topic_num = models.IntegerField(default=0)
def __unicode__(self):
return self.docid
class TaskDescription(models.Model):
topic_num = models.IntegerField(default=0)
title = models.CharField(max_length=100)
description = models.CharField(max_length=1500)
diversify = models.CharField(max_length=1500, default="")
def __unicode__(self):
return self.title
class TopicQuerySuggestion(models.Model):
topic_num = models.IntegerField(default=0)
title = models.CharField(max_length=40)
link = models.CharField(max_length=150)
def __unicode__(self):
return self.title
class UserProfile(models.Model):
# This field is required.
user = models.OneToOneField(User, related_name='profile')
# Other fields here
data = models.CharField(max_length=200, null=True, blank=True)
experiment = models.IntegerField(default=0)
condition = models.IntegerField(default=0)
rotation = models.IntegerField(default=0)
tasks_completed = models.IntegerField(default=0)
steps_completed = models.IntegerField(default=0)
def __unicode__(self):
return self.user.username
# def create_user_profile(sender, instance, created, **kwargs):
# if created:
# UserProfile.objects.create(user=instance)
#post_save.connect(create_user_profile, sender=User)
| leifos/treconomics | treconomics_project/treconomics/models.py | Python | mit | 1,842 |
# -*- coding: utf-8 -*-
"""
javascript.timeago_filter
~~~~~~~~~~~~~~~~~~~~~~~~~
A Filter for the jQuery Timeago Plugin
http://flask.pocoo.org/snippets/49/
"""
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from flask import request, Response
from app import app
"""
Timeago is a jQuery plugin that makes it easy to support automatically
updating fuzzy timestamps (e.g. “4 minutes ago” or “about 1 day ago”).
It automatically keeps them updated and only needs a very basic span
tag or something similar with a certain class and title attribute.
For instance
<span class=timeago title="2008-07-17T09:24:17Z">...</span>
turns into something like this:
<span class=timeago title="July 17, 2008">2 years ago</span>
"""
@app.template_filter()
def datetimeformat(datetime, timeago=True):
readable = datetime.strftime('%Y-%m-%d @ %H:%M')
if not timeago:
return readable
iso_format = datetime.strftime('%Y-%m-%dT%H:%M:%SZ')
return '<span class=timeago title="%s">%s</span>' % (
iso_format,
readable
)
"""
Usage:
<p class=date>Date: {{ the_date|datetimeformat }}
$(function() {
$('span.timeago').timeago();
});
"""
@app.route('/')
def index():
return 'index'
if __name__ == "__main__":
app.run()
| fengsp/flask-snippets | javascript/filter_for_timeago.py | Python | bsd-3-clause | 1,367 |
# -*- coding: utf-8 -*-
#
# regulations documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 3 16:47:19 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'regulations.settings.dev')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'regulations'
copyright = u'2014, Author'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'regulationsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'regulations.tex', u'regulations Documentation',
u'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'regulations', u'regulations Documentation',
[u'Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'regulations', u'regulations Documentation',
u'Author', 'regulations', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'regulations'
epub_author = u'Author'
epub_publisher = u'Author'
epub_copyright = u'2014, Author'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'regulations'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| ascott1/regulations-site | docs/conf.py | Python | cc0-1.0 | 10,364 |
__author__ = 'Tom'
import math
from Tkinter import *
from assembly_line_scheduling import *
class AssemblyLineCanvas:
def __init__(self, root, als):
self.root = root
self.als = als
self.setup_root()
self.draw_factory()
def setup_root(self):
self.root.title("Assembly-line scheduling")
self.root.resizable(0, 0)
# make the width of the canvas dependent upon the assembly line length
self.canvas = Canvas(self.root, width=250 + 150 * (self.als.num_stations - 1), height=500)
self.canvas.pack(fill='both', expand='yes')
self.canvas.configure(bg='white')
def draw_factory(self):
for i, assembly_line in enumerate(self.als.assembly_lines):
# draw lines from entries to first stations
self.canvas.create_line(50, 150 + 200 * i, 125, 75 + 350 * i)
# draw arrow
self.draw_arrow(125, 75 + 350 * i, math.pi / 4 if not i else -math.pi / 4)
# draw lines from last station to exit
# make exit station distance from assembly line symmetric with the enter station's distance.
self.canvas.create_line(
125 + 150 * (self.als.num_stations - 1),
75 + 350 * i,
200 + 150 * (self.als.num_stations - 1),
150 + 200 * i)
# draw arrow
self.draw_arrow(950, 150 + 200 * i, -math.pi / 4 if not i else math.pi / 4)
# draw entry points
self.draw_circle(25, 125 + 200 * i, text=str(assembly_line.entry_time))
for j, station in enumerate(assembly_line.stations):
if station.transfer_time is not None:
# draw transfer lines
self.canvas.tag_lower(
self.canvas.create_line(125 + 150 * j, 75 + 350 * i, 275 + 150 * j, 75 + 350 * ((i + 1) % 2)))
# draw arrows on stations
self.draw_arrow(
275 + 150 * j,
75 + 350 * i,
math.atan(float(7) / 3) if not i else -math.atan(float(7) / 3))
# draw arrows on transfers
self.draw_arrow(
179 + 150 * j,
300 - 100 * i,
math.atan(float(7) / 3) if not i else -math.atan(float(7) / 3))
# create lines amongst stations
self.canvas.create_line(150 + 150 * j, 75 + 350 * i, 250 + 150 * j, 75 + 350 * i)
# create arrow to next station on line
self.draw_arrow(275 + 150 * j, 75 + 350 * i, 0)
# draw transfer node
self.draw_circle(154 + 150 * j, 175 + 100 * i, text=str(station.transfer_time))
# draw station
self.draw_circle(100 + 150 * j, 50 + 350 * i, text=str(station))
# draw exit points
self.draw_circle(175 + 150 * (self.als.num_stations - 1), 125 + 200 * i, text=str(assembly_line.exit_time))
self.draw_fastest_way()
# draw assembly-line bounding rectangles
# make width of the rectangles proportional to number of stations on the line
for i in range(2):
self.canvas.tag_lower(self.canvas.create_rectangle(
95,
45 + 350 * i,
155 + 150 * (self.als.num_stations - 1),
105 + 350 * i, fill="gray"))
def draw_circle(self, x, y, text):
self.canvas.create_oval(x, y, x + 50, y + 50, fill="light grey")
self.canvas.create_text(x + 25, y + 25, text=text)
def draw_arrow(self, x, y, r):
self.canvas.create_polygon(
# base point
x - 25 * math.cos(r),
y + 25 * math.sin(r),
# top-right point
x - 25 * math.cos(r) + 10 * math.cos(3 * math.pi / 4 - r),
y + 25 * math.sin(r) + 10 * math.sin(3 * math.pi / 4 - r),
# top-left point
x - 25 * math.cos(r) - 10 * math.cos(r - math.pi / 4),
y + 25 * math.sin(r) + 10 * math.sin(r - math.pi / 4),
fill="black"
)
def draw_fastest_way(self):
self.als.fastest_way()
i = self.als.fastest_line
self.draw_highlighted_line(
200 + 150 * (self.als.num_stations - 1),
150 + 200 * i,
125 + 150 * (self.als.num_stations - 1),
75 + 350 * i,
math.pi / 4 if not i else -math.pi / 4)
for j in range(self.als.num_stations - 2, -1, -1):
prev_line = self.als.assembly_lines[i].fastest_lines[j]
if i != prev_line:
self.draw_highlighted_line(
125 + 150 * (j + 1),
75 + 350 * i,
125 + 150 * j,
75 + 350 * prev_line,
math.atan(float(7) / 3) if not prev_line else -math.atan(float(7) / 3))
else:
self.draw_highlighted_line(
125 + 150 * (j + 1),
75 + 350 * i,
125 + 150 * j,
75 + 350 * prev_line,
0)
if not j:
self.draw_highlighted_line(
125,
75 + 350 * prev_line,
50,
150 + 200 * prev_line,
-math.pi / 4 if not prev_line else math.pi / 4)
i = prev_line
def draw_highlighted_line(self, x1, y1, x2, y2, r):
# TODO: Calculate the slope, r, in this method
self.canvas.tag_lower(
self.canvas.create_polygon(
x1 + 5 * math.sqrt(2) * math.cos(math.pi / 2 - r),
y1 - 5 * math.sqrt(2) * math.sin(math.pi / 2 - r),
x1 - 5 * math.sqrt(2) * math.cos(math.pi / 2 - r),
y1 + 5 * math.sqrt(2) * math.sin(math.pi / 2 - r),
x2 - 5 * math.sqrt(2) * math.cos(math.pi / 2 - r),
y2 + 5 * math.sqrt(2) * math.sin(math.pi / 2 - r),
x2 + 5 * math.sqrt(2) * math.cos(math.pi / 2 - r),
y2 - 5 * math.sqrt(2) * math.sin(math.pi / 2 - r),
fill="gray20")
)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Finds the fastest way through a factory')
parser.add_argument('infile', type=argparse.FileType('r'))
args = parser.parse_args()
assembly_lines = read_assembly_lines_from_file(args.infile)
if assembly_lines is not None:
als = AssemblyLineScheduler(*assembly_lines)
root = Tk()
AssemblyLineCanvas(root, als)
root.mainloop()
| tjtrebat/algorithms | algorithms/dynamic_programming/assembly_line_scheduling/assembly_line_canvas.py | Python | gpl-2.0 | 6,751 |
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute resource tracking."""
import copy
import datetime
import six
import uuid
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from nova.compute.monitors import base as monitor_base
from nova.compute import resource_tracker
from nova.compute import resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
from nova.objects import pci_device_pool
from nova import rpc
from nova import test
from nova.tests.unit.pci import fakes as pci_fakes
from nova.virt import driver
FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_MEMORY_OVERHEAD = 1
FAKE_VIRT_MEMORY_WITH_OVERHEAD = (
FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD)
FAKE_VIRT_NUMA_TOPOLOGY = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([1, 2]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([3, 4]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([]))])
FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
ROOT_GB = 5
EPHEMERAL_GB = 1
FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
FAKE_VIRT_VCPUS = 1
FAKE_VIRT_STATS = {'virt_stat': 10}
FAKE_VIRT_STATS_COERCED = {'virt_stat': '10'}
FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS)
RESOURCE_NAMES = ['vcpu']
CONF = cfg.CONF
class UnsupportedVirtDriver(driver.ComputeDriver):
"""Pretend version of a lame virt driver."""
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
class FakeVirtDriver(driver.ComputeDriver):
def __init__(self, pci_support=False, stats=None,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY):
super(FakeVirtDriver, self).__init__(None)
self.memory_mb = FAKE_VIRT_MEMORY_MB
self.local_gb = FAKE_VIRT_LOCAL_GB
self.vcpus = FAKE_VIRT_VCPUS
self.numa_topology = numa_topology
self.memory_mb_used = 0
self.local_gb_used = 0
self.pci_support = pci_support
self.pci_devices = [
{
'label': 'label_8086_0443',
'dev_type': fields.PciDeviceType.SRIOV_VF,
'compute_node_id': 1,
'address': '0000:00:01.1',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0443',
'dev_type': fields.PciDeviceType.SRIOV_VF,
'compute_node_id': 1,
'address': '0000:00:01.2',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0443',
'dev_type': fields.PciDeviceType.SRIOV_PF,
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_0123',
'dev_type': 'type-PCI',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0123',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1
},
{
'label': 'label_8086_7891',
'dev_type': fields.PciDeviceType.SRIOV_VF,
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '7891',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': None
},
] if self.pci_support else []
self.pci_stats = [
{
'count': 2,
'vendor_id': '8086',
'product_id': '0443',
'numa_node': 1
},
{
'count': 1,
'vendor_id': '8086',
'product_id': '7891',
'numa_node': None
},
] if self.pci_support else []
if stats is not None:
self.stats = stats
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': 0,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
'numa_topology': (
self.numa_topology._to_json() if self.numa_topology else None),
}
if self.pci_support:
d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices)
if hasattr(self, 'stats'):
d['stats'] = self.stats
return d
def estimate_instance_overhead(self, instance_info):
instance_info['memory_mb'] # make sure memory value is present
overhead = {
'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD
}
return overhead # just return a constant value for testing
class BaseTestCase(test.TestCase):
@mock.patch('stevedore.enabled.EnabledExtensionManager')
def setUp(self, _mock_ext_mgr):
super(BaseTestCase, self).setUp()
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self.context = context.get_admin_context()
self.flags(pci_passthrough_whitelist=[
'{"vendor_id": "8086", "product_id": "0443"}',
'{"vendor_id": "8086", "product_id": "7891"}'])
self.flags(use_local=True, group='conductor')
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self._instances = {}
self._instance_types = {}
self.stubs.Set(objects.InstanceList, 'get_by_host_and_node',
self._fake_instance_get_by_host_and_node)
self.stubs.Set(self.conductor.db,
'flavor_get', self._fake_flavor_get)
self.host = 'fakehost'
self.compute = self._create_compute_node()
self.updated = False
self.deleted = False
self.update_call_count = 0
def _create_compute_node(self, values=None):
# This creates a db representation of a compute_node.
compute = {
"id": 1,
"service_id": 1,
"host": "fakehost",
"vcpus": 1,
"memory_mb": 1,
"local_gb": 1,
"vcpus_used": 1,
"memory_mb_used": 1,
"local_gb_used": 1,
"free_ram_mb": 1,
"free_disk_gb": 1,
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
"numa_topology": None,
"stats": '{"num_instances": "1"}',
"hypervisor_hostname": "fakenode",
'hypervisor_version': 1,
'hypervisor_type': 'fake-hyp',
'disk_available_least': None,
'host_ip': None,
'metrics': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'cpu_allocation_ratio': None,
'ram_allocation_ratio': None,
}
if values:
compute.update(values)
return compute
def _create_compute_node_obj(self, context):
# Use the db representation of a compute node returned
# by _create_compute_node() to create an equivalent compute
# node object.
compute = self._create_compute_node()
compute_obj = objects.ComputeNode()
compute_obj = objects.ComputeNode._from_db_object(
context, compute_obj, compute)
return compute_obj
def _create_service(self, host="fakehost", compute=None):
if compute:
compute = [compute]
service = {
"id": 1,
"host": host,
"binary": "nova-compute",
"topic": "compute",
"compute_node": compute,
"report_count": 0,
'disabled': False,
'disabled_reason': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'last_seen_up': None,
'forced_down': False,
'version': 0,
}
return service
def _fake_instance_obj(self, stash=True, flavor=None, **kwargs):
# Default to an instance ready to resize to or from the same
# instance_type
flavor = flavor or self._fake_flavor_create()
if not isinstance(flavor, objects.Flavor):
flavor = objects.Flavor(**flavor)
instance_uuid = str(uuid.uuid1())
instance = objects.Instance(context=self.context, uuid=instance_uuid,
flavor=flavor)
instance.update({
'vm_state': vm_states.RESIZED,
'task_state': None,
'ephemeral_key_uuid': None,
'os_type': 'Linux',
'project_id': '123456',
'host': None,
'node': None,
'instance_type_id': flavor['id'],
'memory_mb': flavor['memory_mb'],
'vcpus': flavor['vcpus'],
'root_gb': flavor['root_gb'],
'ephemeral_gb': flavor['ephemeral_gb'],
'launched_on': None,
'system_metadata': {},
'availability_zone': None,
'vm_mode': None,
'reservation_id': None,
'display_name': None,
'default_swap_device': None,
'power_state': None,
'access_ip_v6': None,
'access_ip_v4': None,
'key_name': None,
'updated_at': None,
'cell_name': None,
'locked': None,
'locked_by': None,
'launch_index': None,
'architecture': None,
'auto_disk_config': None,
'terminated_at': None,
'ramdisk_id': None,
'user_data': None,
'cleaned': None,
'deleted_at': None,
'id': 333,
'disable_terminate': None,
'hostname': None,
'display_description': None,
'key_data': None,
'deleted': None,
'default_ephemeral_device': None,
'progress': None,
'launched_at': None,
'config_drive': None,
'kernel_id': None,
'user_id': None,
'shutdown_terminate': None,
'created_at': None,
'image_ref': None,
'root_device_name': None,
})
if stash:
instance.old_flavor = flavor
instance.new_flavor = flavor
instance.numa_topology = kwargs.pop('numa_topology', None)
instance.update(kwargs)
self._instances[instance_uuid] = instance
return instance
def _fake_flavor_create(self, **kwargs):
instance_type = {
'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'disabled': False,
'is_public': True,
'name': 'fakeitype',
'memory_mb': FAKE_VIRT_MEMORY_MB,
'vcpus': FAKE_VIRT_VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'flavorid': 'fakeflavor',
'extra_specs': {},
}
instance_type.update(**kwargs)
instance_type = objects.Flavor(**instance_type)
id_ = instance_type['id']
self._instance_types[id_] = instance_type
return instance_type
def _fake_instance_get_by_host_and_node(self, context, host, nodename,
expected_attrs=None):
return objects.InstanceList(
objects=[i for i in self._instances.values() if i['host'] == host])
def _fake_flavor_get(self, ctxt, id_):
return self._instance_types[id_]
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _driver(self):
return FakeVirtDriver()
def _tracker(self, host=None):
if host is None:
host = self.host
node = "fakenode"
driver = self._driver()
tracker = resource_tracker.ResourceTracker(host, driver, node)
tracker.compute_node = self._create_compute_node_obj(self.context)
tracker.ext_resources_handler = \
resources.ResourceHandler(RESOURCE_NAMES, True)
return tracker
class UnsupportedDriverTestCase(BaseTestCase):
"""Resource tracking should be disabled when the virt driver doesn't
support it.
"""
def setUp(self):
super(UnsupportedDriverTestCase, self).setUp()
self.tracker = self._tracker()
# seed tracker with data:
self.tracker.update_available_resource(self.context)
def _driver(self):
return UnsupportedVirtDriver()
def test_disabled(self):
# disabled = no compute node stats
self.assertTrue(self.tracker.disabled)
self.assertIsNone(self.tracker.compute_node)
def test_disabled_claim(self):
# basic claim:
instance = self._fake_instance_obj()
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_claim(self):
# instance variation:
instance = self._fake_instance_obj()
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
@mock.patch('nova.objects.Instance.save')
def test_disabled_instance_context_claim(self, mock_save):
# instance context manager variation:
instance = self._fake_instance_obj()
self.tracker.instance_claim(self.context, instance)
with self.tracker.instance_claim(self.context, instance) as claim:
self.assertEqual(0, claim.memory_mb)
def test_disabled_updated_usage(self):
instance = self._fake_instance_obj(host='fakehost', memory_mb=5,
root_gb=10)
self.tracker.update_usage(self.context, instance)
def test_disabled_resize_claim(self):
instance = self._fake_instance_obj()
instance_type = self._fake_flavor_create()
claim = self.tracker.resize_claim(self.context, instance,
instance_type)
self.assertEqual(0, claim.memory_mb)
self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
self.assertEqual(instance_type['id'],
claim.migration['new_instance_type_id'])
def test_disabled_resize_context_claim(self):
instance = self._fake_instance_obj()
instance_type = self._fake_flavor_create()
with self.tracker.resize_claim(self.context, instance, instance_type) \
as claim:
self.assertEqual(0, claim.memory_mb)
class MissingComputeNodeTestCase(BaseTestCase):
def setUp(self):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stubs.Set(db, 'compute_node_create',
self._fake_create_compute_node)
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def _fake_create_compute_node(self, context, values):
self.created = True
return self._create_compute_node(values)
def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
return service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
# return no compute node
raise exception.ComputeHostNotFound(host=host)
def test_create_compute_node(self):
self.tracker.compute_node = None
self.tracker.update_available_resource(self.context)
self.assertTrue(self.created)
def test_enabled(self):
self.tracker.update_available_resource(self.context)
self.assertFalse(self.tracker.disabled)
class BaseTrackerTestCase(BaseTestCase):
def setUp(self):
# setup plumbing for a working resource tracker with required
# database models and a compatible compute driver:
super(BaseTrackerTestCase, self).setUp()
self.tracker = self._tracker()
self._migrations = {}
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stubs.Set(db, 'compute_node_update',
self._fake_compute_node_update)
self.stubs.Set(db, 'compute_node_delete',
self._fake_compute_node_delete)
self.stubs.Set(db, 'migration_update',
self._fake_migration_update)
self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node',
self._fake_migration_get_in_progress_by_host_and_node)
# Note that this must be called before the call to _init_tracker()
patcher = pci_fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self._init_tracker()
self.limits = self._limits()
def _fake_service_get_by_compute_host(self, ctx, host):
self.service = self._create_service(host, compute=self.compute)
return self.service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
self.compute = self._create_compute_node()
return self.compute
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _fake_compute_node_delete(self, ctx, compute_node_id):
self.deleted = True
self.compute.update({'deleted': 1})
return self.compute
def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host,
node):
status = ['confirmed', 'reverted', 'error']
migrations = []
for migration in self._migrations.values():
migration = obj_base.obj_to_primitive(migration)
if migration['status'] in status:
continue
uuid = migration['instance_uuid']
migration['instance'] = self._instances[uuid]
migrations.append(migration)
return migrations
def _fake_migration_update(self, ctxt, migration_id, values):
# cheat and assume there's only 1 migration present
migration = self._migrations.values()[0]
migration.update(values)
return migration
def _init_tracker(self):
self.tracker.update_available_resource(self.context)
def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD,
disk_gb=FAKE_VIRT_LOCAL_GB,
vcpus=FAKE_VIRT_VCPUS,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD):
"""Create limits dictionary used for oversubscribing resources."""
return {
'memory_mb': memory_mb,
'disk_gb': disk_gb,
'vcpu': vcpus,
'numa_topology': numa_topology,
}
def assertEqualNUMAHostTopology(self, expected, got):
attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage')
if None in (expected, got):
if expected != got:
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
else:
return
if len(expected) != len(got):
raise AssertionError("Topologies don't match due to different "
"number of cells. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
for exp_cell, got_cell in zip(expected.cells, got.cells):
for attr in attrs:
if getattr(exp_cell, attr) != getattr(got_cell, attr):
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
def assertEqualPciDevicePool(self, expected, observed):
self.assertEqual(expected.product_id, observed.product_id)
self.assertEqual(expected.vendor_id, observed.vendor_id)
self.assertEqual(expected.tags, observed.tags)
self.assertEqual(expected.count, observed.count)
def assertEqualPciDevicePoolList(self, expected, observed):
ex_objs = expected.objects
ob_objs = observed.objects
self.assertEqual(len(ex_objs), len(ob_objs))
for i in range(len(ex_objs)):
self.assertEqualPciDevicePool(ex_objs[i], ob_objs[i])
def _assert(self, value, field, tracker=None):
if tracker is None:
tracker = self.tracker
if field not in tracker.compute_node:
raise test.TestingException(
"'%(field)s' not in compute node." % {'field': field})
x = tracker.compute_node[field]
if field == 'numa_topology':
self.assertEqualNUMAHostTopology(
value, objects.NUMATopology.obj_from_db_obj(x))
else:
self.assertEqual(value, x)
class TrackerTestCase(BaseTrackerTestCase):
def test_free_ram_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.memory_mb - driver.memory_mb_used
self.assertEqual(mem_free, self.tracker.compute_node.free_ram_mb)
def test_free_disk_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.local_gb - driver.local_gb_used
self.assertEqual(mem_free, self.tracker.compute_node.free_disk_gb)
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node.current_workload)
expected = pci_device_pool.from_pci_stats(driver.pci_stats)
self.assertEqual(expected,
self.tracker.compute_node.pci_device_pools)
def test_set_instance_host_and_node(self):
inst = objects.Instance()
with mock.patch.object(inst, 'save') as mock_save:
self.tracker._set_instance_host_and_node(self.context, inst)
mock_save.assert_called_once_with()
self.assertEqual(self.tracker.host, inst.host)
self.assertEqual(self.tracker.nodename, inst.node)
self.assertEqual(self.tracker.host, inst.launched_on)
class SchedulerClientTrackerTestCase(BaseTrackerTestCase):
def setUp(self):
super(SchedulerClientTrackerTestCase, self).setUp()
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def test_update_resource(self):
# NOTE(pmurray): we are not doing a full pass through the resource
# trackers update path, so safest to do two updates and look for
# differences then to rely on the initial state being the same
# as an update
urs_mock = self.tracker.scheduler_client.update_resource_stats
self.tracker._update(self.context)
urs_mock.reset_mock()
# change a compute node value to simulate a change
self.tracker.compute_node.local_gb_used += 1
self.tracker._update(self.context)
urs_mock.assert_called_once_with(self.tracker.compute_node)
def test_no_update_resource(self):
# NOTE(pmurray): we are not doing a full pass through the resource
# trackers update path, so safest to do two updates and look for
# differences then to rely on the initial state being the same
# as an update
self.tracker._update(self.context)
update = self.tracker.scheduler_client.update_resource_stats
update.reset_mock()
self.tracker._update(self.context)
self.assertFalse(update.called, "update_resource_stats should not be "
"called when there is no change")
class TrackerPciStatsTestCase(BaseTrackerTestCase):
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node.current_workload)
expected_pools = pci_device_pool.from_pci_stats(driver.pci_stats)
observed_pools = self.tracker.compute_node.pci_device_pools
self.assertEqualPciDevicePoolList(expected_pools, observed_pools)
def _driver(self):
return FakeVirtDriver(pci_support=True)
class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
def test_set_empty_ext_resources(self):
resources = self._create_compute_node_obj(self.context)
del resources.stats
self.tracker._write_ext_resources(resources)
self.assertEqual({}, resources.stats)
def test_set_extra_resources(self):
def fake_write_resources(resources):
resources['stats']['resA'] = '123'
resources['stats']['resB'] = 12
self.stubs.Set(self.tracker.ext_resources_handler,
'write_resources',
fake_write_resources)
resources = self._create_compute_node_obj(self.context)
del resources.stats
self.tracker._write_ext_resources(resources)
expected = {"resA": "123", "resB": "12"}
self.assertEqual(sorted(expected),
sorted(resources.stats))
class InstanceClaimTestCase(BaseTrackerTestCase):
def _instance_topology(self, mem):
mem = mem * 1024
return objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([1]), memory=mem),
objects.InstanceNUMACell(
id=1, cpuset=set([3]), memory=mem)])
def _claim_topology(self, mem, cpus=1):
if self.tracker.driver.numa_topology is None:
return None
mem = mem * 1024
return objects.NUMATopology(
cells=[objects.NUMACell(
id=0, cpuset=set([1, 2]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([])),
objects.NUMACell(
id=1, cpuset=set([3, 4]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([]))])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_instance_claim_with_oversubscription(self, mock_get):
memory_mb = FAKE_VIRT_MEMORY_MB * 2
root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB
vcpus = FAKE_VIRT_VCPUS * 2
claim_topology = self._claim_topology(3)
instance_topology = self._instance_topology(3)
limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
'disk_gb': root_gb * 2,
'vcpu': vcpus,
'numa_topology': FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD}
instance = self._fake_instance_obj(memory_mb=memory_mb,
root_gb=root_gb, ephemeral_gb=ephemeral_gb,
numa_topology=instance_topology)
with mock.patch.object(instance, 'save'):
self.tracker.instance_claim(self.context, instance, limits)
self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(root_gb * 2,
self.tracker.compute_node.local_gb_used)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_additive_claims(self, mock_save, mock_get):
self.limits['vcpu'] = 2
claim_topology = self._claim_topology(2, cpus=2)
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=1, ephemeral_gb=0)
instance_topology = self._instance_topology(1)
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD),
self.tracker.compute_node.memory_mb_used)
self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']),
self.tracker.compute_node.local_gb_used)
self.assertEqual(2 * flavor['vcpus'],
self.tracker.compute_node.vcpus_used)
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_context_claim_with_exception(self, mock_save, mock_get):
instance = self._fake_instance_obj(memory_mb=1, root_gb=1,
ephemeral_gb=1)
try:
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
raise test.TestingException()
except test.TestingException:
pass
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
self.assertEqual(0, self.compute['memory_mb_used'])
self.assertEqual(0, self.compute['local_gb_used'])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_instance_context_claim(self, mock_get_all, mock_save, mock_get):
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=2, ephemeral_gb=3)
claim_topology = self._claim_topology(1)
instance_topology = self._instance_topology(1)
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node.local_gb_used)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
# after exiting claim context, build is marked as finished. usage
# totals should be same:
mock_get_all.return_value = [instance]
self.tracker.update_available_resource(self.context)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node.local_gb_used)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_load_stats_for_instance(self, mock_get):
instance = self._fake_instance_obj(task_state=task_states.SCHEDULING)
with mock.patch.object(instance, 'save'):
with self.tracker.instance_claim(self.context, instance):
pass
self.assertEqual(1, self.tracker.compute_node.current_workload)
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
instance['host'] = 'fakehost'
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node.current_workload)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_cpu_stats(self, mock_save, mock_get):
limits = {'disk_gb': 100, 'memory_mb': 100}
self.assertEqual(0, self.tracker.compute_node.vcpus_used)
vcpus = 1
instance = self._fake_instance_obj(vcpus=vcpus)
# should not do anything until a claim is made:
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node.vcpus_used)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
# instance state can change without modifying vcpus in use:
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
add_vcpus = 10
vcpus += add_vcpus
instance = self._fake_instance_obj(vcpus=add_vcpus)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
instance['vm_state'] = vm_states.DELETED
self.tracker.update_usage(self.context, instance)
vcpus -= add_vcpus
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
def test_skip_deleted_instances(self):
# ensure that the audit process skips instances that have vm_state
# DELETED, but the DB record is not yet deleted.
self._fake_instance_obj(vm_state=vm_states.DELETED, host=self.host)
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_deleted_instances_with_migrations(self, mock_migration_list):
migration = objects.Migration(context=self.context,
migration_type='resize',
instance_uuid='invalid')
mock_migration_list.return_value = [migration]
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_instances_with_live_migrations(self, mock_migration_list):
instance = self._fake_instance_obj()
migration = objects.Migration(context=self.context,
migration_type='live-migration',
instance_uuid=instance.uuid)
mock_migration_list.return_value = [migration]
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
@mock.patch('nova.compute.claims.Claim')
@mock.patch('nova.objects.Instance.save')
def test_claim_saves_numa_topology(self, mock_save, mock_claim):
def fake_save():
self.assertEqual(set(['numa_topology', 'host', 'node',
'launched_on']),
inst.obj_what_changed())
mock_save.side_effect = fake_save
inst = objects.Instance(host=None, node=None, memory_mb=1024)
inst.obj_reset_changes()
numa = objects.InstanceNUMATopology()
claim = mock.MagicMock()
claim.claimed_numa_topology = numa
mock_claim.return_value = claim
with mock.patch.object(self.tracker, '_update_usage_from_instance'):
self.tracker.instance_claim(self.context, inst)
mock_save.assert_called_once_with()
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_sets_instance_host_and_node(self, mock_get):
instance = self._fake_instance_obj()
self.assertIsNone(instance['host'])
self.assertIsNone(instance['launched_on'])
self.assertIsNone(instance['node'])
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertNotEqual(0, claim.memory_mb)
self.assertEqual('fakehost', instance['host'])
self.assertEqual('fakehost', instance['launched_on'])
self.assertEqual('fakenode', instance['node'])
class _MoveClaimTestCase(BaseTrackerTestCase):
def setUp(self):
super(_MoveClaimTestCase, self).setUp()
self.instance = self._fake_instance_obj()
self.instance_type = self._fake_flavor_create()
self.claim_method = self.tracker._move_claim
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_abort(self, mock_get, mock_save):
try:
with self.claim_method(self.context, self.instance,
self.instance_type, limits=self.limits):
raise test.TestingException("abort")
except test.TestingException:
pass
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self.assertEqual(0, len(self.tracker.tracked_migrations))
mock_save.assert_called_once_with()
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_additive_claims(self, mock_get, mock_save):
limits = self._limits(
2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
2 * FAKE_VIRT_LOCAL_GB,
2 * FAKE_VIRT_VCPUS)
self.claim_method(
self.context, self.instance, self.instance_type, limits=limits)
mock_save.assert_called_once_with()
mock_save.reset_mock()
instance2 = self._fake_instance_obj()
self.claim_method(
self.context, instance2, self.instance_type, limits=limits)
mock_save.assert_called_once_with()
self._assert(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_revert(self, mock_get, mock_save):
self.claim_method(
self.context, self.instance, self.instance_type,
image_meta={}, limits=self.limits)
mock_save.assert_called_once_with()
self.tracker.drop_move_claim(self.context, self.instance)
self.assertEqual(0, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_move_type_not_tracked(self, mock_get, mock_save):
self.claim_method(self.context, self.instance, self.instance_type,
limits=self.limits, move_type="live-migration")
mock_save.assert_called_once_with()
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self.assertEqual(0, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.Instance.save')
@mock.patch.object(objects.Migration, 'save')
def test_existing_migration(self, save_mock, save_inst_mock):
migration = objects.Migration(self.context, id=42,
instance_uuid=self.instance.uuid,
source_compute='fake-other-compute',
source_node='fake-other-node',
status='accepted',
migration_type='evacuation')
self.claim_method(self.context, self.instance, self.instance_type,
migration=migration)
self.assertEqual(self.tracker.host, migration.dest_compute)
self.assertEqual(self.tracker.nodename, migration.dest_node)
self.assertEqual("pre-migrating", migration.status)
self.assertEqual(1, len(self.tracker.tracked_migrations))
save_mock.assert_called_once_with()
save_inst_mock.assert_called_once_with()
class ResizeClaimTestCase(_MoveClaimTestCase):
def setUp(self):
super(ResizeClaimTestCase, self).setUp()
self.claim_method = self.tracker.resize_claim
def test_move_type_not_tracked(self):
self.skipTest("Resize_claim does already sets the move_type.")
def test_existing_migration(self):
self.skipTest("Resize_claim does not support having existing "
"migration record.")
class OrphanTestCase(BaseTrackerTestCase):
def _driver(self):
class OrphanVirtDriver(FakeVirtDriver):
def get_per_instance_usage(self):
return {
'1-2-3-4-5': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '1-2-3-4-5'},
'2-3-4-5-6': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '2-3-4-5-6'},
}
return OrphanVirtDriver()
def test_usage(self):
self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
def test_find(self):
# create one legit instance and verify the 2 orphans remain
self._fake_instance_obj()
orphans = self.tracker._find_orphaned_instances()
self.assertEqual(2, len(orphans))
class ComputeMonitorTestCase(BaseTestCase):
def setUp(self):
super(ComputeMonitorTestCase, self).setUp()
self.tracker = self._tracker()
self.node_name = 'nodename'
self.user_id = 'fake'
self.project_id = 'fake'
self.info = {}
self.context = context.RequestContext(self.user_id,
self.project_id)
def test_get_host_metrics_none(self):
self.tracker.monitors = []
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertEqual(len(metrics), 0)
@mock.patch.object(resource_tracker.LOG, 'warning')
def test_get_host_metrics_exception(self, mock_LOG_warning):
monitor = mock.MagicMock()
monitor.add_metrics_to_list.side_effect = Exception
self.tracker.monitors = [monitor]
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_LOG_warning.assert_called_once_with(
u'Cannot get the metrics from %s.', mock.ANY)
self.assertEqual(0, len(metrics))
def test_get_host_metrics(self):
class FakeCPUMonitor(monitor_base.MonitorBase):
NOW_TS = timeutils.utcnow()
def __init__(self, *args):
super(FakeCPUMonitor, self).__init__(*args)
self.source = 'FakeCPUMonitor'
def get_metric_names(self):
return set(["cpu.frequency"])
def get_metric(self, name):
return 100, self.NOW_TS
self.tracker.monitors = [FakeCPUMonitor(None)]
mock_notifier = mock.Mock()
with mock.patch.object(rpc, 'get_notifier',
return_value=mock_notifier) as mock_get:
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_get.assert_called_once_with(service='compute',
host=self.node_name)
expected_metrics = [
{
'timestamp': timeutils.strtime(
FakeCPUMonitor.NOW_TS),
'name': 'cpu.frequency',
'value': 100,
'source': 'FakeCPUMonitor'
},
]
payload = {
'metrics': expected_metrics,
'host': self.tracker.host,
'host_ip': CONF.my_ip,
'nodename': self.node_name
}
mock_notifier.info.assert_called_once_with(
self.context, 'compute.metrics.update', payload)
self.assertEqual(metrics, expected_metrics)
class TrackerPeriodicTestCase(BaseTrackerTestCase):
def test_periodic_status_update(self):
# verify update called on instantiation
self.assertEqual(1, self.update_call_count)
# verify update not called if no change to resources
self.tracker.update_available_resource(self.context)
self.assertEqual(1, self.update_call_count)
# verify update is called when resources change
driver = self.tracker.driver
driver.memory_mb += 1
self.tracker.update_available_resource(self.context)
self.assertEqual(2, self.update_call_count)
def test_update_available_resource_calls_locked_inner(self):
@mock.patch.object(self.tracker, 'driver')
@mock.patch.object(self.tracker,
'_update_available_resource')
@mock.patch.object(self.tracker, '_verify_resources')
@mock.patch.object(self.tracker, '_report_hypervisor_resource_view')
def _test(mock_rhrv, mock_vr, mock_uar, mock_driver):
resources = {'there is someone in my head': 'but it\'s not me'}
mock_driver.get_available_resource.return_value = resources
self.tracker.update_available_resource(self.context)
mock_uar.assert_called_once_with(self.context, resources)
_test()
class StatsDictTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a dictionary.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS)
def test_virt_stats(self):
# start with virt driver stats
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
# adding an instance should keep virt driver stats
self._fake_instance_obj(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
# compute node stats are coerced to strings
expected_stats = copy.deepcopy(FAKE_VIRT_STATS_COERCED)
for k, v in self.tracker.stats.iteritems():
expected_stats[k] = six.text_type(v)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
class StatsJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a json string.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS_JSON)
def test_virt_stats(self):
# start with virt driver stats
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
# adding an instance should keep virt driver stats
# and add rt stats
self._fake_instance_obj(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
# compute node stats are coerced to strings
expected_stats = copy.deepcopy(FAKE_VIRT_STATS_COERCED)
for k, v in self.tracker.stats.iteritems():
expected_stats[k] = six.text_type(v)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
class StatsInvalidJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats='this is not json')
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for string that does not parse as json
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
class StatsInvalidTypeTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats=10)
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for incorrect stats value type
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
class UpdateUsageFromMigrationsTestCase(BaseTrackerTestCase):
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage_from_migration')
def test_no_migrations(self, mock_update_usage):
migrations = []
self.tracker._update_usage_from_migrations(self.context, migrations)
self.assertFalse(mock_update_usage.called)
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage_from_migration')
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_instance_not_found(self, mock_get_instance, mock_update_usage):
mock_get_instance.side_effect = exception.InstanceNotFound(
instance_id='some_id',
)
migration = objects.Migration(
context=self.context,
instance_uuid='some_uuid',
)
self.tracker._update_usage_from_migrations(self.context, [migration])
mock_get_instance.assert_called_once_with(self.context, 'some_uuid')
self.assertFalse(mock_update_usage.called)
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage_from_migration')
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_update_usage_called(self, mock_get_instance, mock_update_usage):
instance = self._fake_instance_obj()
mock_get_instance.return_value = instance
migration = objects.Migration(
context=self.context,
instance_uuid=instance.uuid,
)
self.tracker._update_usage_from_migrations(self.context, [migration])
mock_get_instance.assert_called_once_with(self.context, instance.uuid)
mock_update_usage.assert_called_once_with(
self.context, instance, None, migration)
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage_from_migration')
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_flavor_not_found(self, mock_get_instance, mock_update_usage):
mock_update_usage.side_effect = exception.FlavorNotFound(flavor_id='')
instance = self._fake_instance_obj()
mock_get_instance.return_value = instance
migration = objects.Migration(
context=self.context,
instance_uuid=instance.uuid,
)
self.tracker._update_usage_from_migrations(self.context, [migration])
mock_get_instance.assert_called_once_with(self.context, instance.uuid)
mock_update_usage.assert_called_once_with(
self.context, instance, None, migration)
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage_from_migration')
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_not_resizing_state(self, mock_get_instance, mock_update_usage):
instance = self._fake_instance_obj()
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.SUSPENDING
mock_get_instance.return_value = instance
migration = objects.Migration(
context=self.context,
instance_uuid=instance.uuid,
)
self.tracker._update_usage_from_migrations(self.context, [migration])
mock_get_instance.assert_called_once_with(self.context, instance.uuid)
self.assertFalse(mock_update_usage.called)
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage_from_migration')
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_use_most_recent(self, mock_get_instance, mock_update_usage):
instance = self._fake_instance_obj()
mock_get_instance.return_value = instance
migration_2002 = objects.Migration(
id=2002,
context=self.context,
instance_uuid=instance.uuid,
updated_at=datetime.datetime(2002, 1, 1, 0, 0, 0),
)
migration_2003 = objects.Migration(
id=2003,
context=self.context,
instance_uuid=instance.uuid,
updated_at=datetime.datetime(2003, 1, 1, 0, 0, 0),
)
migration_2001 = objects.Migration(
id=2001,
context=self.context,
instance_uuid=instance.uuid,
updated_at=datetime.datetime(2001, 1, 1, 0, 0, 0),
)
self.tracker._update_usage_from_migrations(
self.context, [migration_2002, migration_2003, migration_2001])
mock_get_instance.assert_called_once_with(self.context, instance.uuid)
mock_update_usage.assert_called_once_with(
self.context, instance, None, migration_2003)
| Francis-Liu/animated-broccoli | nova/tests/unit/compute/test_resource_tracker.py | Python | apache-2.0 | 60,776 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyWebsockets(PythonPackage):
"""websockets is a library for building WebSocket servers and
clients in Python with a focus on correctness and simplicity."""
homepage = "https://github.com/aaugustin/websockets"
url = "https://github.com/aaugustin/websockets/archive/8.1.tar.gz"
version('8.1', sha256='c19ce96ad5f7606127d3915364144df93fb865a215784b06048fae3d39364f14')
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type=('build', 'run'))
| iulian787/spack | var/spack/repos/builtin/packages/py-websockets/package.py | Python | lgpl-2.1 | 700 |
#!/usr/bin/env python
# encoding: utf-8
from .user import User
| luke0922/celery_learning | application/models/__init__.py | Python | apache-2.0 | 63 |
import types
import functools
import unittest
from .agent import Config, Agent
# XXX bring into compliance with python 2.7 unittest api
class AssertRaisesContextManager(object):
def __init__(self, expected):
self.expected = expected
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if type is None:
raise AssertionError('%s expected but not raised' % str(self.expected))
if type != self.expected:
raise AssertionError('%s expected, not `%s`' % (self.expected.__class__, str(value)))
self.exception = value
# silence exception
return True
class WebTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(WebTestCase, self).__init__(*args, **kwargs)
# XXX does not inherit
self.config = getattr(self.__class__, '_config', None) or Config()
def setUp(self):
super(WebTestCase, self).setUp()
self._agent = self._create_agent()
def _create_agent(self):
kwargs = {}
kwargs['config'] = self.config
agent_class = self.config.agent_class or Agent
return agent_class(**kwargs)
def agent(self):
agent = self._create_agent()
return agent
@property
def response(self):
return self._agent.response
def request(self, method, url, *args, **kwargs):
if hasattr(self, '_no_session') and self._no_session:
self._agent = self._create_agent()
return self._agent.request(method, url, *args, **kwargs)
def get(self, url, *args, **kwargs):
return self.request('get', url, *args, **kwargs)
def post(self, url, *args, **kwargs):
return self.request('post', url, *args, **kwargs)
def follow_redirect(self):
return self._agent.follow_redirect()
def submit_form(self, form, elements=None):
return self._agent.submit_form(form, elements)
# XXX move to utu
# XXX accept kwargs
def assert_raises(self, expected, *args):
if args:
return self.assertRaises(expected, *args)
else:
return AssertRaisesContextManager(expected)
def assert_status(self, code):
self._agent.assert_status(code)
def assert_redirected_to_uri(self, target):
self._agent.assert_redirected_to_uri(target)
def assert_redirected_to_url(self, target):
self._agent.assert_redirected_to_url(target)
def assert_response_cookie(self, name, **kwargs):
self._agent.assert_response_cookie(name, **kwargs)
def assert_not_response_cookie(self, name):
self._agent.assert_not_response_cookie(name)
def assert_cookie_jar_cookie(self, name, **kwargs):
self._agent.assert_cookie_jar_cookie(name, **kwargs)
def assert_not_cookie_jar_cookie(self, name):
self._agent.assert_not_cookie_jar_cookie(name)
@property
def cookies(self):
return self._agent.response.cookies
@property
def raw_headers(self):
return self._agent.raw_headers
@property
def headers(self):
return self._agent.headers
@property
def current_url(self):
'''Contains the full URL for the last request made.
None if no requests have been made.
'''
return self._agent.current_url
def no_session(cls):
'''Class decorator requesting that session management should not be
performed.
'''
cls._no_session = True
return cls
def config(**kwargs):
'''Function and class decorator for setting configuration on test cases.'''
def decorator(cls_or_fn):
if isinstance(cls_or_fn, types.FunctionType):
fn = cls_or_fn
@functools.wraps(fn)
def decorated(self):
saved = {}
for key in kwargs:
saved[key] = getattr(self.config, key)
setattr(self.config, key, kwargs[key])
try:
fn(self)
finally:
for key in kwargs:
setattr(self.config, key, saved[key])
return decorated
else:
cls = cls_or_fn
config = getattr(cls, '_config', None) or Config()
for name in kwargs:
setattr(config, name, kwargs[name])
cls._config = config
return cls
return decorator
| p/webracer | webracer/testcase.py | Python | bsd-2-clause | 4,545 |
from kapteyn import maputils
from matplotlib import pyplot as plt
f = maputils.FITSimage("m101.fits")
mplim = f.Annotatedimage()
im = mplim.Image()
mplim.plot()
plt.show()
| kapteyn-astro/kapteyn | doc/source/EXAMPLES/mu_simple.py | Python | bsd-3-clause | 175 |
# based on
# PetersonNoiseModel.m
from __future__ import print_function
#from math import pi,log10
#import numpy as np
from numpy import pi,log10,array
def PetersonNoiseModel(Period,s=''):
#debug
#print "PetersonNoiseModel: ",Period,s
# initialize
accel = 0.0
veloc = 0.0
displ = 0.0
if s == 'NLNM':
# Peterson's New Low Noise Model
P = array([0.1,0.17,0.4,0.8,1.24,2.4,4.3,5.0,6.0,10.0,12.0,15.6,21.9,31.6,45.0,70.0,101.0,154.0,328.0,600.0,10000.0])
A = array([-162.36,-166.7,-170.0,-166.4,-168.6,-159.98,-141.1,-71.36,-97.26,-132.18,-205.27,-37.65,-114.37,-160.58,-187.5,-216.47,-185.0,-168.34,-217.43,-258.28,-346.88])
B = array([5.64,0.0,-8.3,28.9,52.48,29.81,0.0,-99.77,-66.49,-31.57,36.16,-104.33,-47.1,-16.28,0.0,15.7,0.0,-7.61,11.9,26.6,48.75])
elif s == 'NHNM':
# Peterson's New High Noise Model
P = array([0.1,0.22,0.32,0.8,3.8,4.6,6.3,7.9,15.4,20.0,354.8])
A = array([-108.73,-150.34,-122.31,-116.85,-108.48,-74.66,0.66,-93.37,73.54,-151.52,-206.66])
B = array([-17.23,-80.5,-23.87,32.51,18.08,-32.95,-127.18,-22.42,-162.98,10.01,31.63])
N = len(P)
l = 1
# Period is less than P(1) (minimum period defined in the model)
if Period < P[0]:
accel = A[0] + B[0] * log10(P[0])
veloc = accel + 20.0 * log10(P[0] / 2.0 / pi)
displ = accel + 20.0 * log10(P[0]**2 / 4.0 / pi**2)
# Period is between P(1) and P(N)
while l <= N - 2:
if Period >= P[l] and Period < P[l + 1]:
accel = A[l] + B[l] * log10(Period)
veloc = accel + 20.0 * log10(Period / 2.0 / pi)
displ = accel + 20.0 * log10(Period**2 / 4.0 / pi**2)
break
else:
l = l + 1
# Period is larger than P(N) and less than 1e5 (maximum period defined in the model)
if Period >= P[N-1] and Period < 100000.0:
accel = A[N-1] + B[N-1] * log10(Period)
veloc = accel + 20.0 * log10(Period / 2.0 / pi)
displ = accel + 20.0 * log10(Period**2 / 4.0 / pi**2)
elif Period > 100000.0:
accel = A[N-1] + B[N-1] * log10(100000.0)
veloc = accel + 20.0 * log10(100000.0 / 2.0 / pi)
displ = accel + 20.0 * log10(100000.0**2 / 4.0 / pi ** 2)
#debug
#print "PetersonNoiseModel: ",accel,veloc,displ
return accel,veloc,displ
| geodynamics/specfem3d | EXAMPLES/noise_tomography/PetersonNoiseModel.py | Python | gpl-3.0 | 2,363 |
"""
Taiga integration for Zulip.
Tips for notification output:
*Emojis*: most of the events have specific emojis e.g.
- :notebook: - change of subject/name/description
- :chart_with_upwards_trend: - change of status
etc. If no there's no meaningful emoji for certain event, the defaults are used:
- :thought_balloon: - event connected to commenting
- :busts_in_silhouette: - event connected to a certain user
- :package: - all other events connected to user story
- :calendar: - all other events connected to milestones
- :clipboard: - all other events connected to tasks
- :bulb: - all other events connected to issues
*Text formatting*: if there has been a change of a property, the new value should always be in bold; otherwise the
subject of US/task should be in bold.
"""
from __future__ import absolute_import
from django.utils.translation import ugettext as _
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
import ujson
from six.moves import range
@api_key_only_webhook_view('Taiga')
@has_request_variables
def api_taiga_webhook(request, user_profile, client, message=REQ(argument_type='body'),
stream=REQ(default='taiga'), topic=REQ(default='General')):
parsed_events = parse_message(message)
content = ""
for event in parsed_events:
content += generate_content(event) + '\n'
check_send_message(user_profile, client, 'stream', [stream], topic, content)
return json_success()
templates = {
'userstory': {
'create': u':package: %(user)s created user story **%(subject)s**.',
'set_assigned_to': u':busts_in_silhouette: %(user)s assigned user story **%(subject)s** to %(new)s.',
'changed_assigned_to': u':busts_in_silhouette: %(user)s reassigned user story **%(subject)s** from %(old)s to %(new)s.',
'points': u':game_die: %(user)s changed estimation of user story **%(subject)s**.',
'blocked': u':lock: %(user)s blocked user story **%(subject)s**.',
'unblocked': u':unlock: %(user)s unblocked user story **%(subject)s**.',
'set_milestone': u':calendar: %(user)s added user story **%(subject)s** to sprint %(new)s.',
'changed_milestone': u':calendar: %(user)s changed sprint of user story **%(subject)s** from %(old)s to %(new)s.',
'changed_status': u':chart_with_upwards_trend: %(user)s changed status of user story **%(subject)s** from %(old)s to %(new)s.',
'closed': u':checkered_flag: %(user)s closed user story **%(subject)s**.',
'reopened': u':package: %(user)s reopened user story **%(subject)s**.',
'renamed': u':notebook: %(user)s renamed user story from %(old)s to **%(new)s**.',
'description': u':notebook: %(user)s updated description of user story **%(subject)s**.',
'commented': u':thought_balloon: %(user)s commented on user story **%(subject)s**.',
'delete': u':x: %(user)s deleted user story **%(subject)s**.'
},
'milestone': {
'create': u':calendar: %(user)s created sprint **%(subject)s**.',
'renamed': u':notebook: %(user)s renamed sprint from %(old)s to **%(new)s**.',
'estimated_start': u':calendar: %(user)s changed estimated start of sprint **%(subject)s** from %(old)s to %(new)s.',
'estimated_finish': u':calendar: %(user)s changed estimated finish of sprint **%(subject)s** from %(old)s to %(new)s.',
'delete': u':x: %(user)s deleted sprint **%(subject)s**.'
},
'task': {
'create': u':clipboard: %(user)s created task **%(subject)s**.',
'set_assigned_to': u':busts_in_silhouette: %(user)s assigned task **%(subject)s** to %(new)s.',
'changed_assigned_to': u':busts_in_silhouette: %(user)s reassigned task **%(subject)s** from %(old)s to %(new)s.',
'blocked': u':lock: %(user)s blocked task **%(subject)s**.',
'unblocked': u':unlock: %(user)s unblocked task **%(subject)s**.',
'set_milestone': u':calendar: %(user)s added task **%(subject)s** to sprint %(new)s.',
'changed_milestone': u':calendar: %(user)s changed sprint of task **%(subject)s** from %(old)s to %(new)s.',
'changed_status': u':chart_with_upwards_trend: %(user)s changed status of task **%(subject)s** from %(old)s to %(new)s.',
'renamed': u':notebook: %(user)s renamed task %(old)s to **%(new)s**.',
'description': u':notebook: %(user)s updated description of task **%(subject)s**.',
'commented': u':thought_balloon: %(user)s commented on task **%(subject)s**.',
'delete': u':x: %(user)s deleted task **%(subject)s**.',
'changed_us': u':clipboard: %(user)s moved task **%(subject)s** from user story %(old)s to %(new)s.'
},
'issue': {
'create': u':bulb: %(user)s created issue **%(subject)s**.',
'set_assigned_to': u':busts_in_silhouette: %(user)s assigned issue **%(subject)s** to %(new)s.', #
'changed_assigned_to': u':busts_in_silhouette: %(user)s reassigned issue **%(subject)s** from %(old)s to %(new)s.',
'changed_priority': u':rocket: %(user)s changed priority of issue **%(subject)s** from %(old)s to %(new)s.',
'changed_severity': u':warning: %(user)s changed severity of issue **%(subject)s** from %(old)s to %(new)s.',
'changed_status': u':chart_with_upwards_trend: %(user)s changed status of issue **%(subject)s** from %(old)s to %(new)s.',
'changed_type': u':bulb: %(user)s changed type of issue **%(subject)s** from %(old)s to %(new)s.',
'renamed': u':notebook: %(user)s renamed issue %(old)s to **%(new)s**.',
'description': u':notebook: %(user)s updated description of issue **%(subject)s**.',
'commented': u':thought_balloon: %(user)s commented on issue **%(subject)s**.',
'delete': u':x: %(user)s deleted issue **%(subject)s**.'
},
}
def get_old_and_new_values(change_type, message):
""" Parses the payload and finds previous and current value of change_type."""
values_map = {
'assigned_to': 'users',
'status': 'status',
'severity': 'severity',
'priority': 'priority',
'milestone': 'milestone',
'type': 'type',
'user_story': 'user_story'
}
if change_type in ['subject', 'name', 'estimated_finish', 'estimated_start']:
old = message["change"]["diff"][change_type]["from"]
new = message["change"]["diff"][change_type]["to"]
return old, new
try:
old_id = message["change"]["diff"][change_type]["from"]
old = message["change"]["values"][values_map[change_type]][str(old_id)]
except KeyError:
old = None
try:
new_id = message["change"]["diff"][change_type]["to"]
new = message["change"]["values"][values_map[change_type]][str(new_id)]
except KeyError:
new = None
return old, new
def parse_comment(message):
""" Parses the comment to issue, task or US. """
return {
'event': 'commented',
'type': message["type"],
'values': {
'user': message["change"]["user"]["name"],
'subject': message["data"]["subject"] if "subject" in list(message["data"].keys()) else message["data"]["name"]
}
}
def parse_create_or_delete(message):
""" Parses create or delete event. """
return {
'type': message["type"],
'event': message["action"],
'values':
{
'user': message["data"]["owner"]["name"],
'subject': message["data"]["subject"] if "subject" in list(message["data"].keys()) else message["data"]["name"]
}
}
def parse_change_event(change_type, message):
""" Parses change event. """
evt = {}
values = {
'user': message["change"]["user"]["name"],
'subject': message["data"]["subject"] if "subject" in list(message["data"].keys()) else message["data"]["name"]
}
if change_type in ["description", "points"]:
event_type = change_type
elif change_type in ["milestone", "assigned_to"]:
old, new = get_old_and_new_values(change_type, message)
if not old:
event_type = "set_" + change_type
values["new"] = new
else:
event_type = "changed_" + change_type
values.update({'old': old, 'new': new})
elif change_type == "is_blocked":
if message["change"]["diff"]["is_blocked"]["to"]:
event_type = "blocked"
else:
event_type = "unblocked"
elif change_type == "is_closed":
if message["change"]["diff"]["is_closed"]["to"]:
event_type = "closed"
else:
event_type = "reopened"
elif change_type == "user_story":
old, new = get_old_and_new_values(change_type, message)
event_type = "changed_us"
values.update({'old': old, 'new': new})
elif change_type in ["subject", 'name']:
event_type = 'renamed'
old, new = get_old_and_new_values(change_type, message)
values.update({'old': old, 'new': new})
elif change_type in ["estimated_finish", "estimated_start"]:
old, new = get_old_and_new_values(change_type, message)
if not old == new:
event_type = change_type
values.update({'old': old, 'new': new})
else:
# date hasn't changed
return None
elif change_type in ["priority", "severity", "type", "status"]:
event_type = 'changed_' + change_type
old, new = get_old_and_new_values(change_type, message)
values.update({'old': old, 'new': new})
else:
# we are not supporting this type of event
return None
evt.update({"type": message["type"], "event": event_type, "values": values})
return evt
def parse_message(message):
""" Parses the payload by delegating to specialized functions. """
events = []
if message["action"] in ['create', 'delete']:
events.append(parse_create_or_delete(message))
elif message["action"] == 'change':
if message["change"]["diff"]:
for value in message["change"]["diff"]:
parsed_event = parse_change_event(value, message)
if parsed_event: events.append(parsed_event)
if message["change"]["comment"]:
events.append(parse_comment(message))
return events
def generate_content(data):
""" Gets the template string and formats it with parsed data. """
try:
return templates[data['type']][data['event']] % data['values']
except KeyError:
return json_error(_("Unknown message"))
| Vallher/zulip | zerver/views/webhooks/taiga.py | Python | apache-2.0 | 10,726 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Moritz
"""
import io
from collections import deque
import numpy as np
import cv2
import scipy
from image_processor import ProcessOutput
from goal_detector import GoalDetector
def main():
po = ProcessOutput()
gd = GoalDetector()
i = 0
videopath = './match2.h264'
camera = cv2.VideoCapture(videopath)
print(np.shape(camera))
(grabbed, frame) = camera.read()
while grabbed:
print("iteration", i)
is_success, buf = cv2.imencode(".jpg", frame, params=[cv2.IMWRITE_JPEG_QUALITY, 99])
io_buf = io.BytesIO(buf)
#frame = cv2.imdecode(buf, cv2.IMREAD_COLOR)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# gd.step(hsv)
po.write(io_buf.getvalue())
cv2.imshow('image', hsv)
cv2.waitKey(1)
(grabbed, frame) = camera.read()
i += 1
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
| moritzschaefer/kickercam | src/kickercam/readstream.py | Python | gpl-3.0 | 975 |
# -*- coding: utf-8 -*-
"""
Copyright 2013-2014 Olivier Cortès <[email protected]>
This file is part of the 1flow project.
1flow is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
1flow is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with 1flow. If not, see http://www.gnu.org/licenses/
"""
# import json
import logging
from statsd import statsd
from celery import chain as tasks_chain
from constance import config
from django.conf import settings
from django.db import models, transaction, IntegrityError
from django.db.models.signals import post_save, pre_save, pre_delete
from django.utils.translation import ugettext_lazy as _
from django.utils.text import slugify
from simple_history.models import HistoricalRecords
from oneflow.base.utils import register_task_method
# from oneflow.base.utils.http import clean_url
# from oneflow.base.utils.dateutils import now, datetime
from oneflow.core.constants import MAIL_MATCH_ACTIONS
from ..common import ORIGINS, CONTENT_TYPES
from ..author import Author
from base import (
BaseItemQuerySet,
BaseItemManager,
BaseItem,
baseitem_process_task,
baseitem_create_reads_task,
)
from abstract import ContentItem
LOGGER = logging.getLogger(__name__)
__all__ = [
'Email',
# Tasks will be added below by register_task_method().
]
# —————————————————————————————————————————————————————————— Manager / QuerySet
def BaseItemQuerySet_email_method(self):
""" Patch BaseItemQuerySet to know how to return emails. """
return self.instance_of(Email)
BaseItemQuerySet.email = BaseItemQuerySet_email_method
# ——————————————————————————————————————————————————————————————————————— Model
# BIG FAT WARNING: inheritance order matters. BaseItem must come first,
# else `create_post_task()` is not found by register_task_method().
class Email(BaseItem, ContentItem):
""" An e-mail.
Cf. https://docs.python.org/2/library/email.message.html (but not only).
"""
class Meta:
app_label = 'core'
verbose_name = _(u'E-mail')
verbose_name_plural = _(u'E-mails')
objects = BaseItemManager()
# Django simple history.
history = HistoricalRecords()
message_id = models.CharField(
# http://tools.ietf.org/html/rfc5322#section-3.6.4 → no length specified
# http://www.imc.org/ietf-usefor/2000/Jun/0020.html → 250 bytes
max_length=256,
verbose_name=_(u'Unique message-ID'),
blank=True, unique=True, db_index=True
)
is_hidden = models.BooleanField(
verbose_name=_(u'hidden from the feed'),
# Should have a partial index.
default=False, blank=True,
help_text=_(u'is_hidden is True when the user has choosen not '
u'to store emails in the feed. 1flow always stores '
u'them, but does not display the hidden ones.')
)
# is_deleted = models.BooleanField(
# verbose_name=_(u'deleted from inbox'),
# # Should have a partial index.
# default=False, blank=True
# )
attachments_fetched = models.BooleanField(
verbose_name=_(u'Attachments fetched?'),
# Should have a partial index.
default=False, blank=True
)
attachments = models.ManyToManyField(
BaseItem, blank=True, null=True,
verbose_name=_(u'Attachments'),
related_name='emails'
)
# TODO: is_multipart ?
#
# subject → name
# date → date_published
# lang → BaseItem.language
# From → authors (but how ?)
# To → ? (user is already reachable via mailfeed)
#
# feeds → the mailfeed.
# —————————————————————————————————————————————————————————————— Django
def __unicode__(self):
return _(u'{0} ({1})').format(
self.name[:40] + (self.name[40:] and u'…'), self.id)
# —————————————————————————————————————————————————————————————— Properties
@property
def is_good(self):
""" Return True if the e-mail is not hidden. """
return not self.is_hidden
@property
def is_crawlable(self):
""" Return True if any of the email feeds says so. """
return self.feeds.exclude(
MailFeed___match_action=MAIL_MATCH_ACTIONS.STORE).exists()
# ————————————————————————————————————————————————————————————————— Methods
@classmethod
def create_email(cls, email_data, feeds, **kwargs):
""" Returns ``True`` if email created, ``False`` if a pure duplicate
(already exists in the same feed), ``None`` if exists but not in
the same feed. If more than one feed given, only returns ``True``
or ``False`` (mutualized state is not checked). """
email = email_data.get('email')
message_body = {
'plain': u'',
'html': u'',
}
name = None
message_id = None
for part in email.walk():
if settings.DEBUG:
# Print a log of debugging ouput about the email structure.
part_content_type = part.get_content_type()
LOGGER.debug(u' |> part %s (%s keys)',
part_content_type, len(part.keys()))
for key, value in part.items():
LOGGER.debug(u' |> %s (len: %s): %s',
key, len(value),
unicode(value)[:40])
if 'subject' in part and name is None:
# Some subjects are long text wrapped at ~80 chars.
name = part.get('subject').replace(
u'\r', u'').replace(u'\n', u'')
if 'message-id' in part and message_id is None:
message_id = part.get('message-id').strip()
if part.is_multipart():
# Multipart parts are just glue,
# skip to the interesting parts.
continue
if not part.get_content_type().startswith('text'):
LOGGER.error(u'Skipped e-mail %s part (not implemented yet).',
part.get_content_type())
continue
part_payload = part.get_payload(decode=True)
charset = part.get_charset()
if not bool(charset):
content_type = part.get('Content-Type',
# Using this as default allows
# to have only one mechanic to
# extract the charset.
'text/plain; charset="utf-8"')
content_type_parts = [
x.strip().lower() for x in content_type.split(';')
]
if len(content_type_parts) != 2:
LOGGER.error(u'Could not get email part charset, thus '
u'skipped. E-mail body will probably be '
u'incomplete!')
continue
charset = content_type_parts[1].split(
u'=')[1].replace('"', '').replace("'", "")
# LOGGER.info(u'payload: %s of %s, charset=%s',
# len(part_payload), type(part_payload), charset)
message_body_key = (
'plain' if content_type_parts[0] == 'text/plain' else 'html'
)
# Concatenate every body part to get a full body.
if isinstance(part_payload, str):
try:
message_body[message_body_key] += part_payload.decode(
charset)
except LookupError:
message_body[message_body_key] += part_payload.decode(
'utf-8', errors='replace')
else:
message_body[message_body_key] += part_payload
# HTML content has precedence, because data will be richer.
# In case of newsletters / mailing-lists, HTML content will
# allow us to follow links, while text-only content will
# [perhaps] not, or less easily. BTW, text/plain content will
# not contain markdown links, and we will loose the ability
# to render them in the GUI, while HTML will be converted to
# Markdown as usual and the user will see richer content.
if message_body['html']:
content_type = CONTENT_TYPES.HTML
content = message_body['html']
else:
content_type = CONTENT_TYPES.MARKDOWN
content = message_body['plain']
defaults = {
'name': name,
'origin': kwargs.pop('origin', ORIGINS.EMAIL),
'date_published': email_data.get('date'),
'content': content,
'content_type': content_type,
}
defaults.update(kwargs)
if message_id.startswith(u'<'):
# Remove the <> around the ID.
message_id = message_id[1:-1]
email, created = cls.objects.get_or_create(message_id=message_id,
defaults=defaults)
if created:
LOGGER.info(u'Created email #%s in feed(s) %s.', message_id,
u', '.join(unicode(f) for f in feeds))
if feeds:
try:
with transaction.atomic():
email.feeds.add(*feeds)
except IntegrityError:
LOGGER.exception(u'Integrity error on created email #%s',
message_id)
pass
od = email.add_original_data(
'email',
value=email_data.get('raw_email'),
# We will commit at next call.
commit=False,
# We do not launch the post-processing
# task, it's not implemented yet anyway.
launch_task=False
)
email.add_original_data(
'matching_rule',
value=email_data.get('meta'),
# Use the OD returned before to commit on.
original_data=od,
# No post-processing for matching rules.
launch_task=False
)
return email, True
# —————————————————————————————————————————————————————— existing email
# Get a change to catch a duplicate if workers were fast.
if email.duplicate_of_id:
LOGGER.info(u'Swaping duplicate email #%s with master #%s on '
u'the fly.', email.id, email.duplicate_of_id)
email = email.duplicate_of
created_retval = False
previous_feeds_count = email.feeds.count()
try:
with transaction.atomic():
email.feeds.add(*feeds)
except IntegrityError:
# Race condition when backfill_if_needed() is run after
# reception of first item in a stream, and they both create
# the same email.
LOGGER.exception(u'Integrity error when adding feeds %s to '
u'email #%s', feeds, message_id)
else:
if email.feeds.count() > previous_feeds_count:
# This email is already there, but has not yet been
# fetched for this feed. It's mutualized, and as such
# it is considered at partly new. At least, it's not
# as bad as being a true duplicate.
created_retval = None
LOGGER.info(u'Mutualized email #%s #%s in feed(s) %s.',
message_id, email.id,
u', '.join(unicode(f) for f in feeds))
email.create_reads(feeds=feeds)
else:
# No statsd, because we didn't create any record in database.
LOGGER.info(u'Duplicate email “%s” #%s #%s in feed(s) %s.',
name, message_id, email.id,
u', '.join(unicode(f) for f in feeds))
return email, created_retval
def fetch_attachments(self, attachments=None, commit=True):
""" Fetch Email attachments. """
raise NotImplementedError('refresh for emails (mentions ←→ To:/Cc:)')
if self.attachments_fetched:
LOGGER.info(u'%s: attachments already fetched.', self)
# return
if attachments is None:
attachments = self.original_data.twitter_hydrated['attachments']
all_went_ok = True
for attachments_name, fetch_attachments_method in (
('urls', self.fetch_attachments_urls, ),
('media', self.fetch_attachments_media, ),
('user_mentions', self.connect_mentions, ),
):
attachments_values = attachments.get(attachments_name, None)
if attachments_values:
if not fetch_attachments_method(attachments_values):
all_went_ok = False
if all_went_ok:
self.attachments_fetched = True
if commit:
self.save()
def connect_mentions(self, user_mentions):
""" Connect mentions to the current email. """
raise NotImplementedError('refresh for emails (mentions ←→ To:/Cc:)')
all_went_ok = True
for user_mention in user_mentions:
try:
author = Author.get_author_from_twitter_user(user_mention)
self.mentions.add(author)
except:
all_went_ok = False
LOGGER.exception(u'Could not connect user mention '
u'%s in email %s', user_mention, self)
return all_went_ok
def post_create_task(self, apply_now=False):
""" Method meant to be run from a celery task. """
if apply_now:
baseitem_create_reads_task.apply((self.id, ))
baseitem_process_task.apply((self.id, ))
else:
post_create_reads_chain = tasks_chain(
baseitem_process_task.si(self.id),
)
baseitem_create_reads_task.apply_async(
args=(self.id, ),
kwargs={'stop_chain_on_false': True},
link=post_create_reads_chain
)
# ———————————————————————————————————————————————————————————————— Celery Tasks
register_task_method(Email, Email.post_create_task,
globals(), queue=u'create')
# ————————————————————————————————————————————————————————————————————— Signals
def email_pre_save(instance, **kwargs):
""" Make a slug if none. """
email = instance
if not email.slug:
email.slug = slugify(email.name)
def email_post_save(instance, **kwargs):
email = instance
if kwargs.get('created', False):
with statsd.pipeline() as spipe:
spipe.gauge('emails.counts.total', 1, delta=True)
globals()['email_post_create_task'].apply_async(
args=(email.id, ), countdown=config.POST_CREATE_TASKS_DELAY)
def email_pre_delete(instance, **kwargs):
with statsd.pipeline() as spipe:
spipe.gauge('emails.counts.total', -1, delta=True)
pre_delete.connect(email_pre_delete, sender=Email)
pre_save.connect(email_pre_save, sender=Email)
post_save.connect(email_post_save, sender=Email)
| 1flow/1flow | oneflow/core/models/reldb/item/email.py | Python | agpl-3.0 | 16,956 |
"A collection of individuals with fixed relationships"
import numpy as np
from pydigree.paths import fraternity
from pydigree.common import table
from pydigree.population import Population
class Pedigree(Population):
"A collection of individuals with fixed relationships"
def __init__(self, label=None):
"""
Create a pedigree.
:param label: pedigree label
"""
Population.__init__(self)
self.label = label
self.kinmat = {}
self.fratmat = {}
def __prepare_nonfounder_contraint(self, con):
if not con:
return lambda x: x.is_founder()
else:
return lambda x: x.is_founder() and con(x)
def bit_size(self):
"""
Returns the bit size of the pedigree. The bitsize is defined as 2*n-f
where n is the number of nonfounders and f is the number of founders.
This represents the number of bits it takes to represent the
inheritance vector in the Lander-Green algorithm.
:returns: bit size
:rtype: pedigree
"""
t = table([x.is_founder() for x in self.individuals])
return 2 * t[False] - t[True]
# Relationships
#
def kinship(self, id1, id2):
"""
Get the Malecot coefficient of coancestry for two individuals in
the pedigree. These are calculated recursively.
For pedigree objects, results are stored to reduce the calculation
time for kinship matrices.
:param id1: the label of a individual to be evaluated
:param id2: the label of a individual to be evaluated
:returns: Malecot's coefficient of coancestry
:rtype: float
Reference:
Lange. Mathematical and Statistical Methods for Genetic Analysis.
1997. Springer.
"""
pair = frozenset([id1, id2])
if pair in self.kinmat:
return self.kinmat[pair]
if id1 is None or id2 is None:
return 0
# Since with pedigree objects we're typically working with IDs,
# I define these functions to get parents for IDs by looking them
# up in the pedigree
def fa(lab):
return (self[lab].father.label
if self[lab].father is not None else None)
def mo(lab):
return (self[lab].mother.label
if self[lab].mother is not None else None)
# Use tuples here to take advantage of the implicit tuple ordering
# With depth as the first item, it assures that descendants aren't
# listed before their ancestors.
t1 = self[id1].depth, id1
t2 = self[id2].depth, id2
if id1 == id2:
k = (1 + self.kinship(fa(id1), mo(id1))) / 2.0
elif t1 < t2:
k = (self.kinship(id1, fa(id2)) + self.kinship(id1, mo(id2))) / 2.0
else:
k = (self.kinship(id2, fa(id1)) + self.kinship(id2, mo(id1))) / 2.0
self.kinmat[pair] = k
return k
def fraternity(self, id1, id2):
"""
Like Pedigree.kinship, this is a convenience function for getting
fraternity coefficients for two pedigree memebers by their ID label.
This is a wrapper for paths.fraternity
:param id1: the label of a individual to be evaluated
:param id2: the label of a individual to be evaluated
:returns: coefficient of fraternity
:rtype: float
"""
pair = frozenset([id1, id2])
if pair not in self.fratmat:
f = f = fraternity(self[id1], self[id2])
self.fratmat[pair] = f
return f
else:
return self.fratmat[pair]
def inbreeding(self, indlab):
"""
Like Pedigree.kinship, this is a convenience function for getting
inbreeding coefficients for individuals in pedigrees by their id
label. As inbreeding coefficients are the kinship coefficient of
the parents, this function calls Pedigree.kinship to check for
stored values.
:param id: the label of the individual to be evaluated
:returns: inbreeding coefficient
:rtype: a double
"""
ind = self[indlab]
if ind.is_founder():
return 0.0
if ind.father.is_founder() or ind.mother.is_founder():
return 0.0
return self.kinship(ind.father.label, ind.mother.label)
def additive_relationship_matrix(self, ids=None):
"""
Calculates an additive relationship matrix (the A matrix) for
quantiatitive genetics.
A_ij = 2 * kinship(i,j) if i != j.
(See the notes on function 'kinship')
A_ij = 1 + inbreeding(i) if i == j
(inbreeding(i) is equivalent to kinship(i.father,i.mother))
:param ids: IDs of pedigree members to include in the matrix
Important: if not given, the rows/columns are all individuals in the
pedigree, sorted by id. If you're not sure about this, try
sorted(x.label for x in ped) to see the ordering.
:returns: additive relationship matrix
:rtype: matrix
"""
if not ids:
ids = sorted(x.label for x in self.individuals)
else:
ids = [label for ped, label in ids if ped == self.label and
label in self.population.keys()]
mat = []
for a in ids:
row = []
for b in ids:
if a == b:
row.append(1 + self.inbreeding(a))
else:
row.append(2 * self.kinship(a, b))
mat.append(row)
return np.matrix(mat)
def dominance_relationship_matrix(self, ids=None):
"""
Calculates the dominance genetic relationship matrix (the D matrix)
for quantitative genetics.
D_ij = fraternity(i,j) if i != j
D_ij = 1 if i == j
:param ids: IDs of pedigree members to include in the matrix
Important: if not given, the rows/columns are all individuals in the
pedigree, sorted by id. If you're not sure about this, try
sorted(x.label for x in ped) to see the ordering.
:returns: dominance relationship matrix
:rtype: matrix
"""
if not ids:
ids = sorted(x.label for x in self.individuals)
else:
ids = [label for ped, label in ids if ped == self.label and
label in self.population.keys()]
mat = []
for a in ids:
row = []
for b in ids:
if a == b:
row.append(1)
else:
row.append(self.fraternity(a, b))
mat.append(row)
return np.matrix(mat)
def mitochondrial_relationship_matrix(self, ids=None):
"""
Calculates the mitochondrial relationship matrix.
M_ij = 1 if matriline(i) == matriline(j)
:param ids: IDs of pedigree members to include in the matrix
Important: if not given, the rows/columns are all individuals in the
pedigree, sorted by id. If you're not sure about this, try
sorted(x.label for x in ped) to see the ordering.
Returns: A numpy matrix
Reference:
Liu et al. "Association Testing of the Mitochondrial Genome Using
Pedigree Data". Genetic Epidemiology. (2013). 37,3:239-247
"""
if not ids:
inds = sorted((x for x in self.individuals), key=lambda x: x.label)
else:
inds = [self[id] for id in ids]
mat = []
for a in inds:
row = [1 if a.matriline() == b.matriline() else 0
for b in ids]
mat.append(row)
return np.matrix(mat)
# Gene dropping
#
def simulate_ibd_states(self, inds=None):
"""
Simulate IBD patterns by gene dropping: Everyone's genotypes reflect
the founder chromosome that they received the genotype from. You can
then use the ibs function to determine IBD state. This effectively an
infinite-alleles simulation.
Returns: Nothing
"""
self.clear_genotypes()
for x in self.founders():
x.label_genotypes()
if inds:
for x in inds:
x.get_genotypes()
else:
for x in self.nonfounders():
x.get_genotypes()
| jameshicks/pydigree | pydigree/pedigree.py | Python | apache-2.0 | 8,471 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-23 17:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rango', '0003_category_slug'),
]
operations = [
migrations.AlterField(
model_name='category',
name='slug',
field=models.SlugField(unique=True),
),
]
| raghuraju/tango-with-django-110 | rango/migrations/0004_auto_20160923_1726.py | Python | mit | 442 |
from pixie.vm.object import Object, Type, safe_invoke
from pixie.vm.primitives import true
import rpython.rlib.rthread as rthread
from pixie.vm.primitives import nil
import rpython.rlib.rgil as rgil
from pixie.vm.code import as_var
import pixie.vm.rt as rt
class Bootstrapper(object):
def __init__(self):
self._is_inited = False
#_self.init()
def init(self):
if not self._is_inited:
self._lock = rthread.allocate_lock()
self._is_inited = True
rgil.allocate()
def aquire(self, fn):
self.init()
self._lock.acquire(True)
self._fn = fn
def fn(self):
return self._fn
def release(self):
self._fn = None
self._lock.release()
def _cleanup_(self):
self._lock = None
self._is_inited = False
def bootstrap():
rthread.gc_thread_start()
fn = bootstrapper.fn()
bootstrapper.release()
safe_invoke(fn, [])
rthread.gc_thread_die()
bootstrapper = Bootstrapper()
@as_var("-thread")
def new_thread(fn):
bootstrapper.aquire(fn)
ident = rthread.start_new_thread(bootstrap, ())
return nil
@as_var("-yield-thread")
def yield_thread():
rgil.yield_thread()
return nil
# Locks
class Lock(Object):
_type = Type(u"pixie.stdlib.Lock")
def __init__(self, ll_lock):
self._ll_lock = ll_lock
@as_var("-create-lock")
def _create_lock():
return Lock(rthread.allocate_lock())
@as_var("-acquire-lock")
def _acquire_lock(self, no_wait):
assert isinstance(self, Lock)
return rt.wrap(self._ll_lock.acquire(no_wait == true))
@as_var("-acquire-lock-timed")
def _acquire_lock(self, ms):
assert isinstance(self, Lock)
return rt.wrap(self._ll_lock.acquire(ms.int_val()))
@as_var("-release-lock")
def _release_lock(self):
assert isinstance(self, Lock)
return rt.wrap(self._ll_lock.release())
# The *_external_call() functions are themselves called only from the rffi
# module from a helper function that also has this hint.
| pixie-lang/pixie | pixie/vm/threads.py | Python | gpl-3.0 | 2,030 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""This is where all the public API calls are accessible
This is the only file containing public calls and everything that is
present here can be considered private by the invenio modules.
"""
import os
from urllib import urlretrieve
from tempfile import mkstemp
from invenio.refextract_engine import parse_references, \
get_plaintext_document_body, \
parse_reference_line, \
get_kbs
from invenio.refextract_text import extract_references_from_fulltext
from invenio.search_engine_utils import get_fieldvalues
from invenio.bibindex_tokenizers.BibIndexJournalTokenizer import \
CFG_JOURNAL_PUBINFO_STANDARD_FORM, \
CFG_JOURNAL_TAG
from invenio.bibdocfile import BibRecDocs, InvenioBibDocFileError
from invenio.search_engine import get_record
from invenio.bibtask import task_low_level_submission
from invenio.bibrecord import record_delete_fields, record_xml_output, \
create_record, record_get_field_instances, record_add_fields, \
record_has_field
from invenio.refextract_find import get_reference_section_beginning, \
find_numeration_in_body
from invenio.refextract_text import rebuild_reference_lines
from invenio.refextract_config import CFG_REFEXTRACT_FILENAME
from invenio.config import CFG_TMPSHAREDDIR
class FullTextNotAvailable(Exception):
"""Raised when we cannot access the document text"""
class RecordHasReferences(Exception):
"""Raised when
* we asked to updated references for a record
* we explicitely asked for not overwriting references for this record
(via the appropriate function argument)
* the record has references thus we cannot update them
"""
def extract_references_from_url_xml(url):
"""Extract references from the pdf specified in the url
The single parameter is the path to the pdf.
It raises FullTextNotAvailable if the url gives a 404
The result is given in marcxml.
"""
filename, dummy = urlretrieve(url)
try:
try:
marcxml = extract_references_from_file_xml(filename)
except IOError, err:
if err.code == 404:
raise FullTextNotAvailable()
else:
raise
finally:
os.remove(filename)
return marcxml
def extract_references_from_file_xml(path, recid=None):
"""Extract references from a local pdf file
The single parameter is the path to the file
It raises FullTextNotAvailable if the file does not exist
The result is given in marcxml.
"""
return extract_references_from_file(path=path, recid=recid).to_xml()
def extract_references_from_file(path, recid=None):
"""Extract references from a local pdf file
The single parameter is the path to the file
It raises FullTextNotAvailable if the file does not exist
The result is given as a bibrecord class.
"""
if not os.path.isfile(path):
raise FullTextNotAvailable()
docbody, dummy = get_plaintext_document_body(path)
reflines, dummy, dummy = extract_references_from_fulltext(docbody)
if not len(reflines):
docbody, dummy = get_plaintext_document_body(path, keep_layout=True)
reflines, dummy, dummy = extract_references_from_fulltext(docbody)
return parse_references(reflines, recid=recid)
def extract_references_from_string_xml(source,
is_only_references=True,
recid=None):
"""Extract references from a string
The single parameter is the document
The result is given as a bibrecord class.
"""
r = extract_references_from_string(source=source,
is_only_references=is_only_references,
recid=recid)
return r.to_xml()
def extract_references_from_string(source,
is_only_references=True,
recid=None):
"""Extract references from a string
The single parameter is the document
The result is given in marcxml.
"""
docbody = source.split('\n')
if not is_only_references:
reflines, dummy, dummy = extract_references_from_fulltext(docbody)
else:
refs_info = get_reference_section_beginning(docbody)
if not refs_info:
refs_info, dummy = find_numeration_in_body(docbody)
refs_info['start_line'] = 0
refs_info['end_line'] = len(docbody) - 1,
reflines = rebuild_reference_lines(docbody, refs_info['marker_pattern'])
return parse_references(reflines, recid=recid)
def extract_references_from_record(recid):
"""Extract references from a record id
The single parameter is the document
The result is given in marcxml.
"""
path = look_for_fulltext(recid)
if not path:
raise FullTextNotAvailable()
return extract_references_from_file(path, recid=recid)
def extract_references_from_record_xml(recid):
"""Extract references from a record id
The single parameter is the document
The result is given in marcxml.
"""
return extract_references_from_record(recid).to_xml()
def replace_references(recid):
"""Replace references for a record
The record itself is not updated, the marc xml of the document with updated
references is returned
Parameters:
* recid: the id of the record
"""
# Parse references
references_xml = extract_references_from_record_xml(recid)
references = create_record(references_xml)
# Record marc xml
record = get_record(recid)
if references[0]:
fields_to_add = record_get_field_instances(references[0],
tag='999',
ind1='%',
ind2='%')
# Replace 999 fields
record_delete_fields(record, '999')
record_add_fields(record, '999', fields_to_add)
# Update record references
out_xml = record_xml_output(record)
else:
out_xml = None
return out_xml
def update_references(recid, overwrite=True):
"""Update references for a record
First, we extract references from a record.
Then, we are not updating the record directly but adding a bibupload
task in -c mode which takes care of updating the record.
Parameters:
* recid: the id of the record
"""
if not overwrite:
# Check for references in record
record = get_record(recid)
if record and record_has_field(record, '999'):
raise RecordHasReferences('Record has references and overwrite '
'mode is disabled: %s' % recid)
if get_fieldvalues(recid, '999C59'):
raise RecordHasReferences('Record has been curated: %s' % recid)
# Parse references
references_xml = extract_references_from_record_xml(recid)
# Save new record to file
(temp_fd, temp_path) = mkstemp(prefix=CFG_REFEXTRACT_FILENAME,
dir=CFG_TMPSHAREDDIR)
temp_file = os.fdopen(temp_fd, 'w')
temp_file.write(references_xml)
temp_file.close()
# Update record
task_low_level_submission('bibupload', 'refextract', '-P', '4',
'-c', temp_path)
def list_pdfs(recid):
rec_info = BibRecDocs(recid)
docs = rec_info.list_bibdocs()
for doc in docs:
for ext in ('pdf', 'pdfa', 'PDF'):
try:
yield doc.get_file(ext)
except InvenioBibDocFileError:
pass
def get_pdf_doc(recid):
try:
doc = list_pdfs(recid).next()
except StopIteration:
doc = None
return doc
def look_for_fulltext(recid):
doc = get_pdf_doc(recid)
path = None
if doc:
path = doc.get_full_path()
return path
def record_has_fulltext(recid):
"""Checks if we can access the fulltext for the given recid"""
path = look_for_fulltext(recid)
return path is not None
def search_from_reference(text):
"""Convert a raw reference to a search query
Called by the search engine to convert a raw reference:
find rawref John, JINST 4 (1994) 45
is converted to
journal:"JINST,4,45"
"""
field = ''
pattern = ''
kbs = get_kbs()
references, dummy_m, dummy_c, dummy_co = parse_reference_line(text, kbs)
for elements in references:
for el in elements:
if el['type'] == 'JOURNAL':
field = 'journal'
pattern = CFG_JOURNAL_PUBINFO_STANDARD_FORM \
.replace(CFG_JOURNAL_TAG.replace('%', 'p'), el['title']) \
.replace(CFG_JOURNAL_TAG.replace('%', 'v'), el['volume']) \
.replace(CFG_JOURNAL_TAG.replace('%', 'c'), el['page']) \
.replace(CFG_JOURNAL_TAG.replace('%', 'y'), el['year'])
break
elif el['type'] == 'REPORTNUMBER':
field = 'report'
pattern = el['report_num']
break
return field, pattern.encode('utf-8')
def check_record_for_refextract(recid):
if get_fieldvalues(recid, '999C6v'):
# References extracted by refextract
if get_fieldvalues(recid, '999C59'):
# They have been curated
# To put in the HP and create ticket in the future
needs_submitting = False
else:
# They haven't been curated, we safely extract from the new pdf
needs_submitting = True
elif not get_fieldvalues(recid, '999C5_'):
# No references in the record, we can safely extract
# new references
needs_submitting = True
else:
# Old record, with either no curated references or references
# curated by SLAC. We cannot distinguish, so we do nothing
needs_submitting = False
return needs_submitting
| GRArmstrong/invenio-inspire-ops | modules/docextract/lib/refextract_api.py | Python | gpl-2.0 | 10,884 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.