query
stringlengths
1
46.9k
pos
stringlengths
75
104k
neg
listlengths
12
12
scores
listlengths
12
12
Estimate discontinuity in basis of low resolution image segmentation. :return: discontinuity in low resolution
def __msgc_step3_discontinuity_localization(self): """ Estimate discontinuity in basis of low resolution image segmentation. :return: discontinuity in low resolution """ import scipy start = self._start_time seg = 1 - self.segmentation.astype(np.int8) self.stats["low level object voxels"] = np.sum(seg) self.stats["low level image voxels"] = np.prod(seg.shape) # in seg is now stored low resolution segmentation # back to normal parameters # step 2: discontinuity localization # self.segparams = sparams_hi seg_border = scipy.ndimage.filters.laplace(seg, mode="constant") logger.debug("seg_border: %s", scipy.stats.describe(seg_border, axis=None)) # logger.debug(str(np.max(seg_border))) # logger.debug(str(np.min(seg_border))) seg_border[seg_border != 0] = 1 logger.debug("seg_border: %s", scipy.stats.describe(seg_border, axis=None)) # scipy.ndimage.morphology.distance_transform_edt boundary_dilatation_distance = self.segparams["boundary_dilatation_distance"] seg = scipy.ndimage.morphology.binary_dilation( seg_border, # seg, np.ones( [ (boundary_dilatation_distance * 2) + 1, (boundary_dilatation_distance * 2) + 1, (boundary_dilatation_distance * 2) + 1, ] ), ) if self.keep_temp_properties: self.temp_msgc_lowres_discontinuity = seg else: self.temp_msgc_lowres_discontinuity = None if self.debug_images: import sed3 pd = sed3.sed3(seg_border) # ), contour=seg) pd.show() pd = sed3.sed3(seg) # ), contour=seg) pd.show() # segzoom = scipy.ndimage.interpolation.zoom(seg.astype('float'), zoom, # order=0).astype('int8') self.stats["t3"] = time.time() - start return seg
[ "def branchScale(self):\n \"\"\"See docs for `Model` abstract base class.\"\"\"\n bs = -(self.prx * scipy.diagonal(self.Prxy, axis1=1, axis2=2)\n ).sum() * self.mu / float(self.nsites)\n assert bs > 0\n return bs", "def branchScale(self):\n \"\"\"See docs for `Model` abstract base class.\"\"\"\n bs = -(self.Phi_x * scipy.diagonal(self.Pxy[0])).sum() * self.mu\n assert bs > 0\n return bs", "def __msgc_step12_low_resolution_segmentation(self):\n \"\"\"\n Get the segmentation and the\n :return:\n \"\"\"\n import scipy\n\n start = self._start_time\n # ===== low resolution data processing\n # default parameters\n # TODO segparams_lo and segparams_hi je tam asi zbytecně\n sparams_lo = {\n \"boundary_dilatation_distance\": 2,\n \"block_size\": 6,\n \"use_boundary_penalties\": True,\n \"boundary_penalties_weight\": 1,\n \"tile_zoom_constant\": 1,\n }\n\n sparams_lo.update(self.segparams)\n sparams_hi = copy.copy(sparams_lo)\n # sparams_lo['boundary_penalties_weight'] = (\n # sparams_lo['boundary_penalties_weight'] *\n # sparams_lo['block_size'])\n self.segparams = sparams_lo\n\n self.stats[\"t1\"] = time.time() - start\n # step 1: low res GC\n hiseeds = self.seeds\n # ms_zoom = 4 # 0.125 #self.segparams['scale']\n # ms_zoom = self.segparams['block_size']\n # loseeds = pyed.getSeeds()\n # logger.debug(\"msc \" + str(np.unique(hiseeds)))\n loseeds = seed_zoom(hiseeds, self.segparams[\"block_size\"])\n\n hard_constraints = True\n\n self.seeds = loseeds\n\n modelparams_hi = self.modelparams.copy()\n # feature vector will be computed from selected voxels\n self.modelparams[\"use_extra_features_for_training\"] = True\n\n # TODO what with voxels? It is used from here\n # hiseeds and hiimage is used to create intensity model\n self.voxels1 = self.img[hiseeds == 1].reshape(-1, 1)\n self.voxels2 = self.img[hiseeds == 2].reshape(-1, 1)\n # this is how to compute with loseeds resolution but in wrong way\n # self.voxels1 = self.img[self.seeds == 1]\n # self.voxels2 = self.img[self.seeds == 2]\n\n # self.voxels1 = pyed.getSeedsVal(1)\n # self.voxels2 = pyed.getSeedsVal(2)\n\n img_orig = self.img\n\n # TODO this should be done with resize_to_shape_whith_zoom\n zoom = np.asarray(loseeds.shape).astype(np.float) / img_orig.shape\n self.img = scipy.ndimage.interpolation.zoom(img_orig, zoom, order=0)\n voxelsize_orig = self.voxelsize\n logger.debug(\"zoom %s\", zoom)\n logger.debug(\"vs %s\", self.voxelsize)\n self.voxelsize = self.voxelsize * zoom\n\n # self.img = resize_to_shape_with_zoom(img_orig, loseeds.shape, 1.0 / ms_zoom, order=0)\n\n # this step set the self.segmentation\n self.__single_scale_gc_run()\n # logger.debug(\n # 'segmentation - max: %d min: %d' % (\n # np.max(self.segmentation),\n # np.min(self.segmentation)\n # )\n # )\n logger.debug(\n \"segmentation: %s\", scipy.stats.describe(self.segmentation, axis=None)\n )\n\n self.modelparams = modelparams_hi\n self.voxelsize = voxelsize_orig\n self.img = img_orig\n self.seeds = hiseeds\n self.stats[\"t2\"] = time.time() - start\n return hard_constraints", "def __multiscale_gc_lo2hi_run(self): # , pyed):\n \"\"\"\n Run Graph-Cut segmentation with refinement of low resolution multiscale graph.\n In first step is performed normal GC on low resolution data\n Second step construct finer grid on edges of segmentation from first\n step.\n There is no option for use without `use_boundary_penalties`\n \"\"\"\n # from PyQt4.QtCore import pyqtRemoveInputHook\n # pyqtRemoveInputHook()\n self._msgc_lo2hi_resize_init()\n self.__msgc_step0_init()\n\n hard_constraints = self.__msgc_step12_low_resolution_segmentation()\n # ===== high resolution data processing\n seg = self.__msgc_step3_discontinuity_localization()\n\n self.stats[\"t3.1\"] = (time.time() - self._start_time)\n graph = Graph(\n seg,\n voxelsize=self.voxelsize,\n nsplit=self.segparams[\"block_size\"],\n edge_weight_table=self._msgc_npenalty_table,\n compute_low_nodes_index=True,\n )\n\n # graph.run() = graph.generate_base_grid() + graph.split_voxels()\n # graph.run()\n graph.generate_base_grid()\n self.stats[\"t3.2\"] = (time.time() - self._start_time)\n graph.split_voxels()\n\n self.stats[\"t3.3\"] = (time.time() - self._start_time)\n\n self.stats.update(graph.stats)\n self.stats[\"t4\"] = (time.time() - self._start_time)\n mul_mask, mul_val = self.__msgc_tlinks_area_weight_from_low_segmentation(seg)\n area_weight = 1\n unariesalt = self.__create_tlinks(\n self.img,\n self.voxelsize,\n self.seeds,\n area_weight=area_weight,\n hard_constraints=hard_constraints,\n mul_mask=None,\n mul_val=None,\n )\n # N-links prepared\n self.stats[\"t5\"] = (time.time() - self._start_time)\n un, ind = np.unique(graph.msinds, return_index=True)\n self.stats[\"t6\"] = (time.time() - self._start_time)\n\n self.stats[\"t7\"] = (time.time() - self._start_time)\n unariesalt2_lo2hi = np.hstack(\n [unariesalt[ind, 0, 0].reshape(-1, 1), unariesalt[ind, 0, 1].reshape(-1, 1)]\n )\n nlinks_lo2hi = np.hstack([graph.edges, graph.edges_weights.reshape(-1, 1)])\n if self.debug_images:\n import sed3\n\n ed = sed3.sed3(unariesalt[:, :, 0].reshape(self.img.shape))\n ed.show()\n import sed3\n\n ed = sed3.sed3(unariesalt[:, :, 1].reshape(self.img.shape))\n ed.show()\n # ed = sed3.sed3(seg)\n # ed.show()\n # import sed3\n # ed = sed3.sed3(graph.data)\n # ed.show()\n # import sed3\n # ed = sed3.sed3(graph.msinds)\n # ed.show()\n\n # nlinks, unariesalt2, msinds = self.__msgc_step45678_construct_graph(area_weight, hard_constraints, seg)\n # self.__msgc_step9_finish_perform_gc_and_reshape(nlinks, unariesalt2, msinds)\n self.__msgc_step9_finish_perform_gc_and_reshape(\n nlinks_lo2hi, unariesalt2_lo2hi, graph.msinds\n )\n self._msgc_lo2hi_resize_clean_finish()", "def __multiscale_gc_hi2lo_run(self): # , pyed):\n \"\"\"\n Run Graph-Cut segmentation with simplifiyng of high resolution multiscale graph.\n In first step is performed normal GC on low resolution data\n Second step construct finer grid on edges of segmentation from first\n step.\n There is no option for use without `use_boundary_penalties`\n \"\"\"\n # from PyQt4.QtCore import pyqtRemoveInputHook\n # pyqtRemoveInputHook()\n\n self.__msgc_step0_init()\n hard_constraints = self.__msgc_step12_low_resolution_segmentation()\n # ===== high resolution data processing\n seg = self.__msgc_step3_discontinuity_localization()\n nlinks, unariesalt2, msinds = self.__msgc_step45678_hi2lo_construct_graph(\n hard_constraints, seg\n )\n self.__msgc_step9_finish_perform_gc_and_reshape(nlinks, unariesalt2, msinds)", "def estimateBackgroundLevel(img, image_is_artefact_free=False, \r\n min_rel_size=0.05, max_abs_size=11):\r\n '''\r\n estimate background level through finding the most homogeneous area\r\n and take its average\r\n \r\n min_size - relative size of the examined area\r\n '''\r\n\r\n s0,s1 = img.shape[:2]\r\n s = min(max_abs_size, int(max(s0,s1)*min_rel_size))\r\n arr = np.zeros(shape=(s0-2*s, s1-2*s), dtype=img.dtype)\r\n \r\n #fill arr:\r\n _spatialStd(img, arr, s)\r\n #most homogeneous area:\r\n i,j = np.unravel_index(arr.argmin(), arr.shape)\r\n sub = img[int(i+0.5*s):int(i+s*1.5), \r\n int(j+s*0.5):int(j+s*1.5)]\r\n\r\n return np.median(sub)", "def _convert_slice_incement_inconsistencies(dicom_input):\n \"\"\"\n If there is slice increment inconsistency detected, for the moment CT images, then split the volumes into subvolumes based on the slice increment and process each volume separately using a space constructed based on the highest resolution increment\n \"\"\"\n\n # Estimate the \"first\" slice increment based on the 2 first slices\n increment = numpy.array(dicom_input[0].ImagePositionPatient) - numpy.array(dicom_input[1].ImagePositionPatient)\n\n # Create as many volumes as many changes in slice increment. NB Increments might be repeated in different volumes\n max_slice_increment = 0\n slice_incement_groups = []\n current_group = [dicom_input[0], dicom_input[1]]\n previous_image_position = numpy.array(dicom_input[1].ImagePositionPatient)\n for dicom in dicom_input[2:]:\n current_image_position = numpy.array(dicom.ImagePositionPatient)\n current_increment = previous_image_position - current_image_position\n max_slice_increment = max(max_slice_increment, numpy.linalg.norm(current_increment))\n if numpy.allclose(increment, current_increment, rtol=0.05, atol=0.1):\n current_group.append(dicom)\n if not numpy.allclose(increment, current_increment, rtol=0.05, atol=0.1):\n slice_incement_groups.append(current_group)\n current_group = [current_group[-1], dicom]\n increment = current_increment\n previous_image_position = current_image_position\n slice_incement_groups.append(current_group)\n\n # Create nibabel objects for each volume based on the corresponding headers\n slice_incement_niftis = []\n for dicom_slices in slice_incement_groups:\n data = common.get_volume_pixeldata(dicom_slices)\n affine, _ = common.create_affine(dicom_slices)\n slice_incement_niftis.append(nibabel.Nifti1Image(data, affine))\n\n nifti_volume = resample.resample_nifti_images(slice_incement_niftis)\n\n return nifti_volume, max_slice_increment", "def segment(self):\n \"\"\"An non-overlap version Compror\"\"\"\n\n if not self.seg:\n j = 0\n else:\n j = self.seg[-1][1]\n last_len = self.seg[-1][0]\n if last_len + j > self.n_states:\n return\n\n i = j\n while j < self.n_states - 1:\n while not (not (i < self.n_states - 1) or not (self.lrs[i + 1] >= i - j + 1)):\n i += 1\n if i == j:\n i += 1\n self.seg.append((0, i))\n else:\n if (self.sfx[i] + self.lrs[i]) <= i:\n self.seg.append((i - j, self.sfx[i] - i + j + 1))\n\n else:\n _i = j + i - self.sfx[i]\n self.seg.append((_i - j, self.sfx[i] - i + j + 1))\n _j = _i\n while not (not (_i < i) or not (self.lrs[_i + 1] - self.lrs[_j] >= _i - _j + 1)):\n _i += 1\n if _i == _j:\n _i += 1\n self.seg.append((0, _i))\n else:\n self.seg.append((_i - _j, self.sfx[_i] - _i + _j + 1))\n j = i\n return self.seg", "def get_step(self, tol):\n '''Return step at which bound falls below tolerance. '''\n return 2 * numpy.log(tol/2.)/numpy.log(self.base)", "def get_interpolated_gap(self, tol=0.001, abs_tol=False, spin=None):\n \"\"\"\n Expects a DOS object and finds the gap\n\n Args:\n tol: tolerance in occupations for determining the gap\n abs_tol: Set to True for an absolute tolerance and False for a\n relative one.\n spin: Possible values are None - finds the gap in the summed\n densities, Up - finds the gap in the up spin channel,\n Down - finds the gap in the down spin channel.\n\n Returns:\n (gap, cbm, vbm):\n Tuple of floats in eV corresponding to the gap, cbm and vbm.\n \"\"\"\n\n tdos = self.y if len(self.ydim) == 1 else np.sum(self.y, axis=1)\n if not abs_tol:\n tol = tol * tdos.sum() / tdos.shape[0]\n energies = self.x\n below_fermi = [i for i in range(len(energies))\n if energies[i] < self.efermi and tdos[i] > tol]\n above_fermi = [i for i in range(len(energies))\n if energies[i] > self.efermi and tdos[i] > tol]\n vbm_start = max(below_fermi)\n cbm_start = min(above_fermi)\n if vbm_start == cbm_start:\n return 0.0, self.efermi, self.efermi\n else:\n # Interpolate between adjacent values\n terminal_dens = tdos[vbm_start:vbm_start + 2][::-1]\n terminal_energies = energies[vbm_start:vbm_start + 2][::-1]\n start = get_linear_interpolated_value(terminal_dens,\n terminal_energies, tol)\n terminal_dens = tdos[cbm_start - 1:cbm_start + 1]\n terminal_energies = energies[cbm_start - 1:cbm_start + 1]\n end = get_linear_interpolated_value(terminal_dens,\n terminal_energies, tol)\n return end - start, end, start", "def _get_bandgap_doscar(filename):\n \"\"\"Get the bandgap from the DOSCAR file\"\"\"\n with open(filename) as fp:\n for i in range(6):\n l = fp.readline()\n efermi = float(l.split()[3])\n step1 = fp.readline().split()[0]\n step2 = fp.readline().split()[0]\n step_size = float(step2)-float(step1)\n not_found = True\n while not_found:\n l = fp.readline().split()\n e = float(l.pop(0))\n dens = 0.0\n for i in range(int(len(l)/2)):\n dens += float(l[i])\n if e < efermi and dens > 1e-3:\n bot = e\n elif e > efermi and dens > 1e-3:\n top = e\n not_found = False\n if top - bot < step_size*2:\n bandgap = 0.0\n else:\n bandgap = float(top - bot)\n\n return bandgap", "def _cutoff(self, coeffs, vscale):\n \"\"\"\n Compute cutoff index after which the coefficients are deemed negligible.\n \"\"\"\n bnd = self._threshold(vscale)\n inds = np.nonzero(abs(coeffs) >= bnd)\n if len(inds[0]):\n N = inds[0][-1]\n else:\n N = 0\n return N+1" ]
[ 0.6861903071403503, 0.6760287284851074, 0.6727304458618164, 0.6622427105903625, 0.6478081941604614, 0.6416714191436768, 0.6381795406341553, 0.6336018443107605, 0.633543848991394, 0.6316496729850769, 0.6313363313674927, 0.6291797757148743 ]
Run Graph-Cut segmentation with refinement of low resolution multiscale graph. In first step is performed normal GC on low resolution data Second step construct finer grid on edges of segmentation from first step. There is no option for use without `use_boundary_penalties`
def __multiscale_gc_lo2hi_run(self): # , pyed): """ Run Graph-Cut segmentation with refinement of low resolution multiscale graph. In first step is performed normal GC on low resolution data Second step construct finer grid on edges of segmentation from first step. There is no option for use without `use_boundary_penalties` """ # from PyQt4.QtCore import pyqtRemoveInputHook # pyqtRemoveInputHook() self._msgc_lo2hi_resize_init() self.__msgc_step0_init() hard_constraints = self.__msgc_step12_low_resolution_segmentation() # ===== high resolution data processing seg = self.__msgc_step3_discontinuity_localization() self.stats["t3.1"] = (time.time() - self._start_time) graph = Graph( seg, voxelsize=self.voxelsize, nsplit=self.segparams["block_size"], edge_weight_table=self._msgc_npenalty_table, compute_low_nodes_index=True, ) # graph.run() = graph.generate_base_grid() + graph.split_voxels() # graph.run() graph.generate_base_grid() self.stats["t3.2"] = (time.time() - self._start_time) graph.split_voxels() self.stats["t3.3"] = (time.time() - self._start_time) self.stats.update(graph.stats) self.stats["t4"] = (time.time() - self._start_time) mul_mask, mul_val = self.__msgc_tlinks_area_weight_from_low_segmentation(seg) area_weight = 1 unariesalt = self.__create_tlinks( self.img, self.voxelsize, self.seeds, area_weight=area_weight, hard_constraints=hard_constraints, mul_mask=None, mul_val=None, ) # N-links prepared self.stats["t5"] = (time.time() - self._start_time) un, ind = np.unique(graph.msinds, return_index=True) self.stats["t6"] = (time.time() - self._start_time) self.stats["t7"] = (time.time() - self._start_time) unariesalt2_lo2hi = np.hstack( [unariesalt[ind, 0, 0].reshape(-1, 1), unariesalt[ind, 0, 1].reshape(-1, 1)] ) nlinks_lo2hi = np.hstack([graph.edges, graph.edges_weights.reshape(-1, 1)]) if self.debug_images: import sed3 ed = sed3.sed3(unariesalt[:, :, 0].reshape(self.img.shape)) ed.show() import sed3 ed = sed3.sed3(unariesalt[:, :, 1].reshape(self.img.shape)) ed.show() # ed = sed3.sed3(seg) # ed.show() # import sed3 # ed = sed3.sed3(graph.data) # ed.show() # import sed3 # ed = sed3.sed3(graph.msinds) # ed.show() # nlinks, unariesalt2, msinds = self.__msgc_step45678_construct_graph(area_weight, hard_constraints, seg) # self.__msgc_step9_finish_perform_gc_and_reshape(nlinks, unariesalt2, msinds) self.__msgc_step9_finish_perform_gc_and_reshape( nlinks_lo2hi, unariesalt2_lo2hi, graph.msinds ) self._msgc_lo2hi_resize_clean_finish()
[ "def __multiscale_gc_hi2lo_run(self): # , pyed):\n \"\"\"\n Run Graph-Cut segmentation with simplifiyng of high resolution multiscale graph.\n In first step is performed normal GC on low resolution data\n Second step construct finer grid on edges of segmentation from first\n step.\n There is no option for use without `use_boundary_penalties`\n \"\"\"\n # from PyQt4.QtCore import pyqtRemoveInputHook\n # pyqtRemoveInputHook()\n\n self.__msgc_step0_init()\n hard_constraints = self.__msgc_step12_low_resolution_segmentation()\n # ===== high resolution data processing\n seg = self.__msgc_step3_discontinuity_localization()\n nlinks, unariesalt2, msinds = self.__msgc_step45678_hi2lo_construct_graph(\n hard_constraints, seg\n )\n self.__msgc_step9_finish_perform_gc_and_reshape(nlinks, unariesalt2, msinds)", "def __msgc_step12_low_resolution_segmentation(self):\n \"\"\"\n Get the segmentation and the\n :return:\n \"\"\"\n import scipy\n\n start = self._start_time\n # ===== low resolution data processing\n # default parameters\n # TODO segparams_lo and segparams_hi je tam asi zbytecně\n sparams_lo = {\n \"boundary_dilatation_distance\": 2,\n \"block_size\": 6,\n \"use_boundary_penalties\": True,\n \"boundary_penalties_weight\": 1,\n \"tile_zoom_constant\": 1,\n }\n\n sparams_lo.update(self.segparams)\n sparams_hi = copy.copy(sparams_lo)\n # sparams_lo['boundary_penalties_weight'] = (\n # sparams_lo['boundary_penalties_weight'] *\n # sparams_lo['block_size'])\n self.segparams = sparams_lo\n\n self.stats[\"t1\"] = time.time() - start\n # step 1: low res GC\n hiseeds = self.seeds\n # ms_zoom = 4 # 0.125 #self.segparams['scale']\n # ms_zoom = self.segparams['block_size']\n # loseeds = pyed.getSeeds()\n # logger.debug(\"msc \" + str(np.unique(hiseeds)))\n loseeds = seed_zoom(hiseeds, self.segparams[\"block_size\"])\n\n hard_constraints = True\n\n self.seeds = loseeds\n\n modelparams_hi = self.modelparams.copy()\n # feature vector will be computed from selected voxels\n self.modelparams[\"use_extra_features_for_training\"] = True\n\n # TODO what with voxels? It is used from here\n # hiseeds and hiimage is used to create intensity model\n self.voxels1 = self.img[hiseeds == 1].reshape(-1, 1)\n self.voxels2 = self.img[hiseeds == 2].reshape(-1, 1)\n # this is how to compute with loseeds resolution but in wrong way\n # self.voxels1 = self.img[self.seeds == 1]\n # self.voxels2 = self.img[self.seeds == 2]\n\n # self.voxels1 = pyed.getSeedsVal(1)\n # self.voxels2 = pyed.getSeedsVal(2)\n\n img_orig = self.img\n\n # TODO this should be done with resize_to_shape_whith_zoom\n zoom = np.asarray(loseeds.shape).astype(np.float) / img_orig.shape\n self.img = scipy.ndimage.interpolation.zoom(img_orig, zoom, order=0)\n voxelsize_orig = self.voxelsize\n logger.debug(\"zoom %s\", zoom)\n logger.debug(\"vs %s\", self.voxelsize)\n self.voxelsize = self.voxelsize * zoom\n\n # self.img = resize_to_shape_with_zoom(img_orig, loseeds.shape, 1.0 / ms_zoom, order=0)\n\n # this step set the self.segmentation\n self.__single_scale_gc_run()\n # logger.debug(\n # 'segmentation - max: %d min: %d' % (\n # np.max(self.segmentation),\n # np.min(self.segmentation)\n # )\n # )\n logger.debug(\n \"segmentation: %s\", scipy.stats.describe(self.segmentation, axis=None)\n )\n\n self.modelparams = modelparams_hi\n self.voxelsize = voxelsize_orig\n self.img = img_orig\n self.seeds = hiseeds\n self.stats[\"t2\"] = time.time() - start\n return hard_constraints", "def run(self, run_fit_model=True):\n \"\"\"\n Run the Graph Cut segmentation according to preset parameters.\n\n :param run_fit_model: Allow to skip model fit when the model is prepared before\n :return:\n \"\"\"\n\n if run_fit_model:\n self.fit_model(self.img, self.voxelsize, self.seeds)\n\n self._start_time = time.time()\n if self.segparams[\"method\"].lower() in (\"graphcut\", \"gc\"):\n self.__single_scale_gc_run()\n elif self.segparams[\"method\"].lower() in (\n \"multiscale_graphcut\",\n \"multiscale_gc\",\n \"msgc\",\n \"msgc_lo2hi\",\n \"lo2hi\",\n \"multiscale_graphcut_lo2hi\",\n ):\n logger.debug(\"performing multiscale Graph-Cut lo2hi\")\n self.__multiscale_gc_lo2hi_run()\n elif self.segparams[\"method\"].lower() in (\n \"msgc_hi2lo\",\n \"hi2lo\",\n \"multiscale_graphcut_hi2lo\",\n ):\n logger.debug(\"performing multiscale Graph-Cut hi2lo\")\n self.__multiscale_gc_hi2lo_run()\n else:\n logger.error(\"Unknown segmentation method: \" + self.segparams[\"method\"])", "def __msgc_step3_discontinuity_localization(self):\n \"\"\"\n Estimate discontinuity in basis of low resolution image segmentation.\n :return: discontinuity in low resolution\n \"\"\"\n import scipy\n\n start = self._start_time\n seg = 1 - self.segmentation.astype(np.int8)\n self.stats[\"low level object voxels\"] = np.sum(seg)\n self.stats[\"low level image voxels\"] = np.prod(seg.shape)\n # in seg is now stored low resolution segmentation\n # back to normal parameters\n # step 2: discontinuity localization\n # self.segparams = sparams_hi\n seg_border = scipy.ndimage.filters.laplace(seg, mode=\"constant\")\n logger.debug(\"seg_border: %s\", scipy.stats.describe(seg_border, axis=None))\n # logger.debug(str(np.max(seg_border)))\n # logger.debug(str(np.min(seg_border)))\n seg_border[seg_border != 0] = 1\n logger.debug(\"seg_border: %s\", scipy.stats.describe(seg_border, axis=None))\n # scipy.ndimage.morphology.distance_transform_edt\n boundary_dilatation_distance = self.segparams[\"boundary_dilatation_distance\"]\n seg = scipy.ndimage.morphology.binary_dilation(\n seg_border,\n # seg,\n np.ones(\n [\n (boundary_dilatation_distance * 2) + 1,\n (boundary_dilatation_distance * 2) + 1,\n (boundary_dilatation_distance * 2) + 1,\n ]\n ),\n )\n if self.keep_temp_properties:\n self.temp_msgc_lowres_discontinuity = seg\n else:\n self.temp_msgc_lowres_discontinuity = None\n\n if self.debug_images:\n import sed3\n\n pd = sed3.sed3(seg_border) # ), contour=seg)\n pd.show()\n pd = sed3.sed3(seg) # ), contour=seg)\n pd.show()\n # segzoom = scipy.ndimage.interpolation.zoom(seg.astype('float'), zoom,\n # order=0).astype('int8')\n self.stats[\"t3\"] = time.time() - start\n return seg", "def _cnvkit_segment(cnr_file, cov_interval, data, items, out_file=None, detailed=False):\n \"\"\"Perform segmentation and copy number calling on normalized inputs\n \"\"\"\n if not out_file:\n out_file = \"%s.cns\" % os.path.splitext(cnr_file)[0]\n if not utils.file_uptodate(out_file, cnr_file):\n with file_transaction(data, out_file) as tx_out_file:\n if not _cna_has_values(cnr_file):\n with open(tx_out_file, \"w\") as out_handle:\n out_handle.write(\"chromosome\\tstart\\tend\\tgene\\tlog2\\tprobes\\tCN1\\tCN2\\tbaf\\tweight\\n\")\n else:\n # Scale cores to avoid memory issues with segmentation\n # https://github.com/etal/cnvkit/issues/346\n if cov_interval == \"genome\":\n cores = max(1, dd.get_cores(data) // 2)\n else:\n cores = dd.get_cores(data)\n cmd = [_get_cmd(), \"segment\", \"-p\", str(cores), \"-o\", tx_out_file, cnr_file]\n small_vrn_files = _compatible_small_variants(data, items)\n if len(small_vrn_files) > 0 and _cna_has_values(cnr_file) and cov_interval != \"genome\":\n cmd += [\"--vcf\", small_vrn_files[0].name, \"--sample-id\", small_vrn_files[0].sample]\n if small_vrn_files[0].normal:\n cmd += [\"--normal-id\", small_vrn_files[0].normal]\n resources = config_utils.get_resources(\"cnvkit_segment\", data[\"config\"])\n user_options = resources.get(\"options\", [])\n cmd += [str(x) for x in user_options]\n if cov_interval == \"genome\" and \"--threshold\" not in user_options:\n cmd += [\"--threshold\", \"0.00001\"]\n # For tumors, remove very low normalized regions, avoiding upcaptured noise\n # https://github.com/bcbio/bcbio-nextgen/issues/2171#issuecomment-348333650\n # unless we want detailed segmentation for downstream tools\n paired = vcfutils.get_paired(items)\n if paired:\n #if detailed:\n # cmd += [\"-m\", \"hmm-tumor\"]\n if \"--drop-low-coverage\" not in user_options:\n cmd += [\"--drop-low-coverage\"]\n # preferentially use conda installed Rscript\n export_cmd = (\"%s && export TMPDIR=%s && \"\n % (utils.get_R_exports(), os.path.dirname(tx_out_file)))\n do.run(export_cmd + \" \".join(cmd), \"CNVkit segment\")\n return out_file", "def _setup_gc2_framework(self):\n \"\"\"\n This method establishes the GC2 framework for a multi-segment\n (and indeed multi-typology) case based on the description in\n Spudich & Chiou (2015) - see section on Generalized Coordinate\n System for Multiple Rupture Traces\n \"\"\"\n # Generate cartesian edge set\n edge_sets = self._get_cartesian_edge_set()\n self.gc2_config = {}\n # Determine furthest two points apart\n endpoint_set = numpy.vstack([cep for cep in self.cartesian_endpoints])\n dmat = squareform(pdist(endpoint_set))\n irow, icol = numpy.unravel_index(numpy.argmax(dmat), dmat.shape)\n # Join further points to form a vector (a_hat in Spudich & Chiou)\n # According to Spudich & Chiou, a_vec should be eastward trending\n if endpoint_set[irow, 0] > endpoint_set[icol, 0]:\n # Row point is to the east of column point\n beginning = endpoint_set[icol, :2]\n ending = endpoint_set[irow, :2]\n else:\n # Column point is to the east of row point\n beginning = endpoint_set[irow, :2]\n ending = endpoint_set[icol, :2]\n\n # Convert to unit vector\n a_vec = ending - beginning\n self.gc2_config[\"a_hat\"] = a_vec / numpy.linalg.norm(a_vec)\n # Get e_j set\n self.gc2_config[\"ejs\"] = []\n for c_edges in self.cartesian_edges:\n self.gc2_config[\"ejs\"].append(\n numpy.dot(c_edges[-1, :2] - c_edges[0, :2],\n self.gc2_config[\"a_hat\"]))\n # A \"total E\" is defined as the sum of the e_j values\n self.gc2_config[\"e_tot\"] = sum(self.gc2_config[\"ejs\"])\n sign_etot = numpy.sign(self.gc2_config[\"e_tot\"])\n b_vec = numpy.zeros(2)\n self.gc2_config[\"sign\"] = []\n for i, c_edges in enumerate(self.cartesian_edges):\n segment_sign = numpy.sign(self.gc2_config[\"ejs\"][i]) * sign_etot\n self.gc2_config[\"sign\"].append(segment_sign)\n if segment_sign < 0:\n # Segment is discordant - reverse the points\n c_edges = numpy.flipud(c_edges)\n self.cartesian_edges[i] = c_edges\n self.cartesian_endpoints[i] = numpy.flipud(\n self.cartesian_endpoints[i])\n b_vec += (c_edges[-1, :2] - c_edges[0, :2])\n\n # Get unit vector\n self.gc2_config[\"b_hat\"] = b_vec / numpy.linalg.norm(b_vec)\n if numpy.dot(a_vec, self.gc2_config[\"b_hat\"]) >= 0.0:\n self.p0 = beginning\n else:\n self.p0 = ending\n # To later calculate Ry0 it is necessary to determine the maximum\n # GC2-U coordinate for the fault\n self._get_gc2_coordinates_for_rupture(edge_sets)", "def graphcut_stawiaski(regions, gradient = False, foreground = False, background = False):\n \"\"\"\n Executes a Stawiaski label graph cut.\n \n Parameters\n ----------\n regions : ndarray\n The regions image / label map.\n gradient : ndarray\n The gradient image.\n foreground : ndarray\n The foreground markers.\n background : ndarray\n The background markers.\n \n Returns\n -------\n segmentation : ndarray\n The graph-cut segmentation result as boolean array.\n \n Raises\n ------\n ArgumentError\n When the supplied data is erroneous.\n \"\"\"\n # initialize logger\n logger = Logger.getInstance()\n \n # unpack images if required\n # !TODO: This is an ugly hack, especially since it can be seen inside the function definition\n # How to overcome this, since I can not use a wrapper function as the whole thing must be pickable\n if not gradient and not foreground and not background: \n regions, gradient, foreground, background = regions\n \n # ensure that input images are scipy arrays\n img_region = scipy.asarray(regions)\n img_gradient = scipy.asarray(gradient)\n img_fg = scipy.asarray(foreground, dtype=scipy.bool_)\n img_bg = scipy.asarray(background, dtype=scipy.bool_)\n \n # ensure correctness of supplied images\n if not (img_region.shape == img_gradient.shape == img_fg.shape == img_bg.shape): raise ArgumentError('All supplied images must be of the same shape.')\n\n # recompute the label ids to start from id = 1\n img_region = relabel(img_region)\n \n # generate graph\n gcgraph = graph_from_labels(img_region, img_fg, img_bg, boundary_term = boundary_stawiaski, boundary_term_args = (img_gradient))\n \n # execute min-cut\n maxflow = gcgraph.maxflow() # executes the cut and returns the maxflow value\n \n logger.debug('Graph-cut terminated successfully with maxflow of {}.'.format(maxflow))\n \n # apply results to the region image\n mapping = [0] # no regions with id 1 exists in mapping, entry used as padding\n mapping.extend([0 if gcgraph.termtype.SINK == gcgraph.what_segment(int(x) - 1) else 1 for x in scipy.unique(img_region)])\n img_results = relabel_map(img_region, mapping)\n \n return img_results.astype(scipy.bool_)", "def segment(self, document):\n \"\"\"\n document: list[str]\n return list[int],\n i-th element denotes whether exists a boundary right before paragraph i(0 indexed)\n \"\"\"\n # ensure document is not empty and every element is an instance of str\n assert(len(document) > 0 and len([d for d in document if not isinstance(d, str)]) == 0)\n # step 1, do preprocessing\n n = len(document)\n self.window = max(min(self.window, n / 3), 1)\n cnts = [Counter(self.tokenizer.tokenize(document[i])) for i in range(n)]\n\n # step 2, calculate gap score\n gap_score = [0 for _ in range(n)]\n for i in range(n):\n sz = min(min(i + 1, n - i - 1), self.window)\n lcnt, rcnt = Counter(), Counter()\n for j in range(i - sz + 1, i + 1):\n lcnt += cnts[j]\n for j in range(i + 1, i + sz + 1):\n rcnt += cnts[j]\n gap_score[i] = cosine_sim(lcnt, rcnt)\n\n # step 3, calculate depth score\n depth_score = [0 for _ in range(n)]\n for i in range(n):\n if i < self.window or i + self.window >= n:\n continue\n ptr = i - 1\n while ptr >= 0 and gap_score[ptr] >= gap_score[ptr + 1]:\n ptr -= 1\n lval = gap_score[ptr + 1]\n ptr = i + 1\n while ptr < n and gap_score[ptr] >= gap_score[ptr - 1]:\n ptr += 1\n rval = gap_score[ptr - 1]\n depth_score[i] = lval + rval - 2 * gap_score[i]\n\n # step 4, smooth depth score with fixed window size 3\n smooth_dep_score = [0 for _ in range(n)]\n for i in range(n):\n if i - 1 < 0 or i + 1 >= n:\n smooth_dep_score[i] = depth_score[i]\n else:\n smooth_dep_score[i] = np.average(depth_score[(i - 1):(i + 2)])\n\n # step 5, determine boundaries\n boundaries = [0 for _ in range(n)]\n avg = np.average(smooth_dep_score)\n stdev = np.std(smooth_dep_score)\n cutoff = avg - stdev / 2.0\n\n depth_tuples = list(zip(smooth_dep_score, list(range(len(smooth_dep_score)))))\n depth_tuples.sort()\n depth_tuples.reverse()\n hp = [x for x in depth_tuples if (x[0] > cutoff)]\n for dt in hp:\n boundaries[dt[1]] = 1\n for i in range(dt[1] - 4, dt[1] + 4 + 1):\n if i != dt[1] and i >= 0 and i < n and boundaries[i] == 1:\n boundaries[dt[1]] = 0\n break\n return [1] + boundaries[:-1]", "def model_segments(copy_file, work_dir, paired):\n \"\"\"Perform segmentation on input copy number log2 ratio file.\n \"\"\"\n out_file = os.path.join(work_dir, \"%s.cr.seg\" % dd.get_sample_name(paired.tumor_data))\n tumor_counts, normal_counts = heterogzygote_counts(paired)\n if not utils.file_exists(out_file):\n with file_transaction(paired.tumor_data, out_file) as tx_out_file:\n params = [\"-T\", \"ModelSegments\",\n \"--denoised-copy-ratios\", copy_file,\n \"--allelic-counts\", tumor_counts,\n \"--output-prefix\", dd.get_sample_name(paired.tumor_data),\n \"-O\", os.path.dirname(tx_out_file)]\n if normal_counts:\n params += [\"--normal-allelic-counts\", normal_counts]\n _run_with_memory_scaling(params, tx_out_file, paired.tumor_data)\n for tx_fname in glob.glob(os.path.join(os.path.dirname(tx_out_file),\n \"%s*\" % dd.get_sample_name(paired.tumor_data))):\n shutil.copy(tx_fname, os.path.join(work_dir, os.path.basename(tx_fname)))\n return {\"seg\": out_file, \"tumor_hets\": out_file.replace(\".cr.seg\", \".hets.tsv\"),\n \"final_seg\": out_file.replace(\".cr.seg\", \".modelFinal.seg\")}", "def correct_segmentation(segments, clusters, min_time):\n \"\"\" Corrects the predicted segmentation\n\n This process prevents over segmentation\n\n Args:\n segments (:obj:`list` of :obj:`list` of :obj:`Point`):\n segments to correct\n min_time (int): minimum required time for segmentation\n \"\"\"\n # segments = [points for points in segments if len(points) > 1]\n\n result_segments = []\n prev_segment = None\n for i, segment in enumerate(segments):\n if len(segment) >= 1:\n continue\n\n cluster = clusters[i]\n if prev_segment is None:\n prev_segment = segment\n else:\n cluster_dt = 0\n if len(cluster) > 0:\n cluster_dt = abs(cluster[0].time_difference(cluster[-1]))\n if cluster_dt <= min_time:\n prev_segment.extend(segment)\n else:\n prev_segment.append(segment[0])\n result_segments.append(prev_segment)\n prev_segment = segment\n if prev_segment is not None:\n result_segments.append(prev_segment)\n\n return result_segments", "def boundary_stawiaski(graph, label_image, gradient_image): # label image is not required to hold continuous ids or to start from 1\n r\"\"\"\n Boundary term based on the sum of border voxel pairs differences.\n \n An implementation of the boundary term in [1]_, suitable to be used with the `~medpy.graphcut.generate.graph_from_labels` function.\n \n Determines for each two supplied regions the voxels forming their border assuming\n :math:`ndim*2`-connectedness (e.g. :math:`3*2=6` for 3D). From the gradient magnitude values of each\n end-point voxel the border-voxel pairs, the highest one is selected and passed to a\n strictly positive and decreasing function :math:`g(x)`, which is defined as:\n \n .. math::\n \n g(x) = \\left(\\frac{1}{1+|x|}\\right)^k\n \n ,where :math:`k=2`. The final weight :math:`w_{i,j}` between two regions :math:`r_i` and\n :math:`r_j` is then determined by the sum of all these neighbour values:\n \n .. math::\n \n w_{i,j} = \\sum_{e_{m,n}\\in F_{(r_i,r_j)}}g(\\max(|I(m)|,|I(n)|))\n \n , where :math:`F_{(r_i,r_j)}` is the set of border voxel-pairs :math:`e_{m,n}` between\n the regions :math:`r_i` and :math:`r_j` and :math:`|I(p)|` the absolute of the gradient\n magnitude at the voxel :math:`p`\n \n This boundary_function works as an edge indicator in the original image. In simpler\n words the weight (and therefore the energy) is obtained by summing the local contrast\n along the boundaries between two regions.\n \n Parameters\n ----------\n graph : GCGraph\n The graph to add the weights to.\n label_image : ndarray\n The label image. Must contain consecutively labelled regions starting from index 1.\n gradient_image : ndarray\n The gradient image.\n \n Notes\n -----\n This function requires the gradient magnitude image of the original image to be passed\n along. That means that `~medpy.graphcut.generate.graph_from_labels` has to be called\n with ``boundary_term_args`` set to the gradient image. This can be obtained e.g. with\n `generic_gradient_magnitude` and `prewitt` from `scipy.ndimage`.\n \n This function is tested on 2D and 3D images and theoretically works for all dimensionalities. \n \n References\n ----------\n .. [1] Stawiaski J., Decenciere E., Bidlaut F. \"Interactive Liver Tumor Segmentation\n Using Graph-cuts and watershed\" MICCAI 2008 participation\n \"\"\"\n # convert to arrays if necessary\n label_image = scipy.asarray(label_image)\n gradient_image = scipy.asarray(gradient_image)\n \n if label_image.flags['F_CONTIGUOUS']: # strangely, this one is required to be ctype ordering\n label_image = scipy.ascontiguousarray(label_image)\n \n __check_label_image(label_image)\n \n for dim in range(label_image.ndim):\n # prepare slicer for all minus last and all minus first \"row\"\n slicer_from = [slice(None)] * label_image.ndim\n slicer_to = [slice(None)] * label_image.ndim\n slicer_from[dim] = slice(None, -1)\n slicer_to[dim] = slice(1, None)\n # slice views of keys\n keys_from = label_image[slicer_from]\n keys_to = label_image[slicer_to]\n # determine not equal keys\n valid_edges = keys_from != keys_to\n # determine largest gradient\n gradient_max = numpy.maximum(numpy.abs(gradient_image[slicer_from]), numpy.abs(gradient_image[slicer_to]))[valid_edges]\n # determine key order\n keys_max = numpy.maximum(keys_from, keys_to)[valid_edges]\n keys_min = numpy.minimum(keys_from, keys_to)[valid_edges]\n # set edges / nweights\n for k1, k2, val in zip(keys_min, keys_max, gradient_max):\n weight = math.pow(1./(1. + val), 2) # weight contribution of a single pixel\n weight = max(weight, sys.float_info.min)\n graph.set_nweight(k1 - 1 , k2 - 1, weight, weight)", "def __ms_npenalty_fcn(self, axis, mask, orig_shape):\n \"\"\"\n :param axis: direction of edge\n :param mask: 3d ndarray with ones where is fine resolution\n\n Neighboorhood penalty between small pixels should be smaller then in\n bigger tiles. This is the way how to set it.\n\n \"\"\"\n maskz = zoom_to_shape(mask, orig_shape)\n\n maskz_new = np.zeros(orig_shape, dtype=np.int16)\n maskz_new[maskz == 0] = self._msgc_npenalty_table[0, axis]\n maskz_new[maskz == 1] = self._msgc_npenalty_table[1, axis]\n # import sed3\n # ed = sed3.sed3(maskz_new)\n # import ipdb; ipdb.set_trace() # noqa BREAKPOINT\n\n return maskz_new" ]
[ 0.8354851007461548, 0.717715322971344, 0.7040823698043823, 0.6870102882385254, 0.6714046597480774, 0.6607586145401001, 0.6603902578353882, 0.6547603011131287, 0.6508672833442688, 0.6498038172721863, 0.6489167213439941, 0.641808032989502 ]
Run Graph-Cut segmentation with simplifiyng of high resolution multiscale graph. In first step is performed normal GC on low resolution data Second step construct finer grid on edges of segmentation from first step. There is no option for use without `use_boundary_penalties`
def __multiscale_gc_hi2lo_run(self): # , pyed): """ Run Graph-Cut segmentation with simplifiyng of high resolution multiscale graph. In first step is performed normal GC on low resolution data Second step construct finer grid on edges of segmentation from first step. There is no option for use without `use_boundary_penalties` """ # from PyQt4.QtCore import pyqtRemoveInputHook # pyqtRemoveInputHook() self.__msgc_step0_init() hard_constraints = self.__msgc_step12_low_resolution_segmentation() # ===== high resolution data processing seg = self.__msgc_step3_discontinuity_localization() nlinks, unariesalt2, msinds = self.__msgc_step45678_hi2lo_construct_graph( hard_constraints, seg ) self.__msgc_step9_finish_perform_gc_and_reshape(nlinks, unariesalt2, msinds)
[ "def __multiscale_gc_lo2hi_run(self): # , pyed):\n \"\"\"\n Run Graph-Cut segmentation with refinement of low resolution multiscale graph.\n In first step is performed normal GC on low resolution data\n Second step construct finer grid on edges of segmentation from first\n step.\n There is no option for use without `use_boundary_penalties`\n \"\"\"\n # from PyQt4.QtCore import pyqtRemoveInputHook\n # pyqtRemoveInputHook()\n self._msgc_lo2hi_resize_init()\n self.__msgc_step0_init()\n\n hard_constraints = self.__msgc_step12_low_resolution_segmentation()\n # ===== high resolution data processing\n seg = self.__msgc_step3_discontinuity_localization()\n\n self.stats[\"t3.1\"] = (time.time() - self._start_time)\n graph = Graph(\n seg,\n voxelsize=self.voxelsize,\n nsplit=self.segparams[\"block_size\"],\n edge_weight_table=self._msgc_npenalty_table,\n compute_low_nodes_index=True,\n )\n\n # graph.run() = graph.generate_base_grid() + graph.split_voxels()\n # graph.run()\n graph.generate_base_grid()\n self.stats[\"t3.2\"] = (time.time() - self._start_time)\n graph.split_voxels()\n\n self.stats[\"t3.3\"] = (time.time() - self._start_time)\n\n self.stats.update(graph.stats)\n self.stats[\"t4\"] = (time.time() - self._start_time)\n mul_mask, mul_val = self.__msgc_tlinks_area_weight_from_low_segmentation(seg)\n area_weight = 1\n unariesalt = self.__create_tlinks(\n self.img,\n self.voxelsize,\n self.seeds,\n area_weight=area_weight,\n hard_constraints=hard_constraints,\n mul_mask=None,\n mul_val=None,\n )\n # N-links prepared\n self.stats[\"t5\"] = (time.time() - self._start_time)\n un, ind = np.unique(graph.msinds, return_index=True)\n self.stats[\"t6\"] = (time.time() - self._start_time)\n\n self.stats[\"t7\"] = (time.time() - self._start_time)\n unariesalt2_lo2hi = np.hstack(\n [unariesalt[ind, 0, 0].reshape(-1, 1), unariesalt[ind, 0, 1].reshape(-1, 1)]\n )\n nlinks_lo2hi = np.hstack([graph.edges, graph.edges_weights.reshape(-1, 1)])\n if self.debug_images:\n import sed3\n\n ed = sed3.sed3(unariesalt[:, :, 0].reshape(self.img.shape))\n ed.show()\n import sed3\n\n ed = sed3.sed3(unariesalt[:, :, 1].reshape(self.img.shape))\n ed.show()\n # ed = sed3.sed3(seg)\n # ed.show()\n # import sed3\n # ed = sed3.sed3(graph.data)\n # ed.show()\n # import sed3\n # ed = sed3.sed3(graph.msinds)\n # ed.show()\n\n # nlinks, unariesalt2, msinds = self.__msgc_step45678_construct_graph(area_weight, hard_constraints, seg)\n # self.__msgc_step9_finish_perform_gc_and_reshape(nlinks, unariesalt2, msinds)\n self.__msgc_step9_finish_perform_gc_and_reshape(\n nlinks_lo2hi, unariesalt2_lo2hi, graph.msinds\n )\n self._msgc_lo2hi_resize_clean_finish()", "def __msgc_step12_low_resolution_segmentation(self):\n \"\"\"\n Get the segmentation and the\n :return:\n \"\"\"\n import scipy\n\n start = self._start_time\n # ===== low resolution data processing\n # default parameters\n # TODO segparams_lo and segparams_hi je tam asi zbytecně\n sparams_lo = {\n \"boundary_dilatation_distance\": 2,\n \"block_size\": 6,\n \"use_boundary_penalties\": True,\n \"boundary_penalties_weight\": 1,\n \"tile_zoom_constant\": 1,\n }\n\n sparams_lo.update(self.segparams)\n sparams_hi = copy.copy(sparams_lo)\n # sparams_lo['boundary_penalties_weight'] = (\n # sparams_lo['boundary_penalties_weight'] *\n # sparams_lo['block_size'])\n self.segparams = sparams_lo\n\n self.stats[\"t1\"] = time.time() - start\n # step 1: low res GC\n hiseeds = self.seeds\n # ms_zoom = 4 # 0.125 #self.segparams['scale']\n # ms_zoom = self.segparams['block_size']\n # loseeds = pyed.getSeeds()\n # logger.debug(\"msc \" + str(np.unique(hiseeds)))\n loseeds = seed_zoom(hiseeds, self.segparams[\"block_size\"])\n\n hard_constraints = True\n\n self.seeds = loseeds\n\n modelparams_hi = self.modelparams.copy()\n # feature vector will be computed from selected voxels\n self.modelparams[\"use_extra_features_for_training\"] = True\n\n # TODO what with voxels? It is used from here\n # hiseeds and hiimage is used to create intensity model\n self.voxels1 = self.img[hiseeds == 1].reshape(-1, 1)\n self.voxels2 = self.img[hiseeds == 2].reshape(-1, 1)\n # this is how to compute with loseeds resolution but in wrong way\n # self.voxels1 = self.img[self.seeds == 1]\n # self.voxels2 = self.img[self.seeds == 2]\n\n # self.voxels1 = pyed.getSeedsVal(1)\n # self.voxels2 = pyed.getSeedsVal(2)\n\n img_orig = self.img\n\n # TODO this should be done with resize_to_shape_whith_zoom\n zoom = np.asarray(loseeds.shape).astype(np.float) / img_orig.shape\n self.img = scipy.ndimage.interpolation.zoom(img_orig, zoom, order=0)\n voxelsize_orig = self.voxelsize\n logger.debug(\"zoom %s\", zoom)\n logger.debug(\"vs %s\", self.voxelsize)\n self.voxelsize = self.voxelsize * zoom\n\n # self.img = resize_to_shape_with_zoom(img_orig, loseeds.shape, 1.0 / ms_zoom, order=0)\n\n # this step set the self.segmentation\n self.__single_scale_gc_run()\n # logger.debug(\n # 'segmentation - max: %d min: %d' % (\n # np.max(self.segmentation),\n # np.min(self.segmentation)\n # )\n # )\n logger.debug(\n \"segmentation: %s\", scipy.stats.describe(self.segmentation, axis=None)\n )\n\n self.modelparams = modelparams_hi\n self.voxelsize = voxelsize_orig\n self.img = img_orig\n self.seeds = hiseeds\n self.stats[\"t2\"] = time.time() - start\n return hard_constraints", "def run(self, run_fit_model=True):\n \"\"\"\n Run the Graph Cut segmentation according to preset parameters.\n\n :param run_fit_model: Allow to skip model fit when the model is prepared before\n :return:\n \"\"\"\n\n if run_fit_model:\n self.fit_model(self.img, self.voxelsize, self.seeds)\n\n self._start_time = time.time()\n if self.segparams[\"method\"].lower() in (\"graphcut\", \"gc\"):\n self.__single_scale_gc_run()\n elif self.segparams[\"method\"].lower() in (\n \"multiscale_graphcut\",\n \"multiscale_gc\",\n \"msgc\",\n \"msgc_lo2hi\",\n \"lo2hi\",\n \"multiscale_graphcut_lo2hi\",\n ):\n logger.debug(\"performing multiscale Graph-Cut lo2hi\")\n self.__multiscale_gc_lo2hi_run()\n elif self.segparams[\"method\"].lower() in (\n \"msgc_hi2lo\",\n \"hi2lo\",\n \"multiscale_graphcut_hi2lo\",\n ):\n logger.debug(\"performing multiscale Graph-Cut hi2lo\")\n self.__multiscale_gc_hi2lo_run()\n else:\n logger.error(\"Unknown segmentation method: \" + self.segparams[\"method\"])", "def __msgc_step3_discontinuity_localization(self):\n \"\"\"\n Estimate discontinuity in basis of low resolution image segmentation.\n :return: discontinuity in low resolution\n \"\"\"\n import scipy\n\n start = self._start_time\n seg = 1 - self.segmentation.astype(np.int8)\n self.stats[\"low level object voxels\"] = np.sum(seg)\n self.stats[\"low level image voxels\"] = np.prod(seg.shape)\n # in seg is now stored low resolution segmentation\n # back to normal parameters\n # step 2: discontinuity localization\n # self.segparams = sparams_hi\n seg_border = scipy.ndimage.filters.laplace(seg, mode=\"constant\")\n logger.debug(\"seg_border: %s\", scipy.stats.describe(seg_border, axis=None))\n # logger.debug(str(np.max(seg_border)))\n # logger.debug(str(np.min(seg_border)))\n seg_border[seg_border != 0] = 1\n logger.debug(\"seg_border: %s\", scipy.stats.describe(seg_border, axis=None))\n # scipy.ndimage.morphology.distance_transform_edt\n boundary_dilatation_distance = self.segparams[\"boundary_dilatation_distance\"]\n seg = scipy.ndimage.morphology.binary_dilation(\n seg_border,\n # seg,\n np.ones(\n [\n (boundary_dilatation_distance * 2) + 1,\n (boundary_dilatation_distance * 2) + 1,\n (boundary_dilatation_distance * 2) + 1,\n ]\n ),\n )\n if self.keep_temp_properties:\n self.temp_msgc_lowres_discontinuity = seg\n else:\n self.temp_msgc_lowres_discontinuity = None\n\n if self.debug_images:\n import sed3\n\n pd = sed3.sed3(seg_border) # ), contour=seg)\n pd.show()\n pd = sed3.sed3(seg) # ), contour=seg)\n pd.show()\n # segzoom = scipy.ndimage.interpolation.zoom(seg.astype('float'), zoom,\n # order=0).astype('int8')\n self.stats[\"t3\"] = time.time() - start\n return seg", "def _cnvkit_segment(cnr_file, cov_interval, data, items, out_file=None, detailed=False):\n \"\"\"Perform segmentation and copy number calling on normalized inputs\n \"\"\"\n if not out_file:\n out_file = \"%s.cns\" % os.path.splitext(cnr_file)[0]\n if not utils.file_uptodate(out_file, cnr_file):\n with file_transaction(data, out_file) as tx_out_file:\n if not _cna_has_values(cnr_file):\n with open(tx_out_file, \"w\") as out_handle:\n out_handle.write(\"chromosome\\tstart\\tend\\tgene\\tlog2\\tprobes\\tCN1\\tCN2\\tbaf\\tweight\\n\")\n else:\n # Scale cores to avoid memory issues with segmentation\n # https://github.com/etal/cnvkit/issues/346\n if cov_interval == \"genome\":\n cores = max(1, dd.get_cores(data) // 2)\n else:\n cores = dd.get_cores(data)\n cmd = [_get_cmd(), \"segment\", \"-p\", str(cores), \"-o\", tx_out_file, cnr_file]\n small_vrn_files = _compatible_small_variants(data, items)\n if len(small_vrn_files) > 0 and _cna_has_values(cnr_file) and cov_interval != \"genome\":\n cmd += [\"--vcf\", small_vrn_files[0].name, \"--sample-id\", small_vrn_files[0].sample]\n if small_vrn_files[0].normal:\n cmd += [\"--normal-id\", small_vrn_files[0].normal]\n resources = config_utils.get_resources(\"cnvkit_segment\", data[\"config\"])\n user_options = resources.get(\"options\", [])\n cmd += [str(x) for x in user_options]\n if cov_interval == \"genome\" and \"--threshold\" not in user_options:\n cmd += [\"--threshold\", \"0.00001\"]\n # For tumors, remove very low normalized regions, avoiding upcaptured noise\n # https://github.com/bcbio/bcbio-nextgen/issues/2171#issuecomment-348333650\n # unless we want detailed segmentation for downstream tools\n paired = vcfutils.get_paired(items)\n if paired:\n #if detailed:\n # cmd += [\"-m\", \"hmm-tumor\"]\n if \"--drop-low-coverage\" not in user_options:\n cmd += [\"--drop-low-coverage\"]\n # preferentially use conda installed Rscript\n export_cmd = (\"%s && export TMPDIR=%s && \"\n % (utils.get_R_exports(), os.path.dirname(tx_out_file)))\n do.run(export_cmd + \" \".join(cmd), \"CNVkit segment\")\n return out_file", "def graphcut_stawiaski(regions, gradient = False, foreground = False, background = False):\n \"\"\"\n Executes a Stawiaski label graph cut.\n \n Parameters\n ----------\n regions : ndarray\n The regions image / label map.\n gradient : ndarray\n The gradient image.\n foreground : ndarray\n The foreground markers.\n background : ndarray\n The background markers.\n \n Returns\n -------\n segmentation : ndarray\n The graph-cut segmentation result as boolean array.\n \n Raises\n ------\n ArgumentError\n When the supplied data is erroneous.\n \"\"\"\n # initialize logger\n logger = Logger.getInstance()\n \n # unpack images if required\n # !TODO: This is an ugly hack, especially since it can be seen inside the function definition\n # How to overcome this, since I can not use a wrapper function as the whole thing must be pickable\n if not gradient and not foreground and not background: \n regions, gradient, foreground, background = regions\n \n # ensure that input images are scipy arrays\n img_region = scipy.asarray(regions)\n img_gradient = scipy.asarray(gradient)\n img_fg = scipy.asarray(foreground, dtype=scipy.bool_)\n img_bg = scipy.asarray(background, dtype=scipy.bool_)\n \n # ensure correctness of supplied images\n if not (img_region.shape == img_gradient.shape == img_fg.shape == img_bg.shape): raise ArgumentError('All supplied images must be of the same shape.')\n\n # recompute the label ids to start from id = 1\n img_region = relabel(img_region)\n \n # generate graph\n gcgraph = graph_from_labels(img_region, img_fg, img_bg, boundary_term = boundary_stawiaski, boundary_term_args = (img_gradient))\n \n # execute min-cut\n maxflow = gcgraph.maxflow() # executes the cut and returns the maxflow value\n \n logger.debug('Graph-cut terminated successfully with maxflow of {}.'.format(maxflow))\n \n # apply results to the region image\n mapping = [0] # no regions with id 1 exists in mapping, entry used as padding\n mapping.extend([0 if gcgraph.termtype.SINK == gcgraph.what_segment(int(x) - 1) else 1 for x in scipy.unique(img_region)])\n img_results = relabel_map(img_region, mapping)\n \n return img_results.astype(scipy.bool_)", "def _setup_gc2_framework(self):\n \"\"\"\n This method establishes the GC2 framework for a multi-segment\n (and indeed multi-typology) case based on the description in\n Spudich & Chiou (2015) - see section on Generalized Coordinate\n System for Multiple Rupture Traces\n \"\"\"\n # Generate cartesian edge set\n edge_sets = self._get_cartesian_edge_set()\n self.gc2_config = {}\n # Determine furthest two points apart\n endpoint_set = numpy.vstack([cep for cep in self.cartesian_endpoints])\n dmat = squareform(pdist(endpoint_set))\n irow, icol = numpy.unravel_index(numpy.argmax(dmat), dmat.shape)\n # Join further points to form a vector (a_hat in Spudich & Chiou)\n # According to Spudich & Chiou, a_vec should be eastward trending\n if endpoint_set[irow, 0] > endpoint_set[icol, 0]:\n # Row point is to the east of column point\n beginning = endpoint_set[icol, :2]\n ending = endpoint_set[irow, :2]\n else:\n # Column point is to the east of row point\n beginning = endpoint_set[irow, :2]\n ending = endpoint_set[icol, :2]\n\n # Convert to unit vector\n a_vec = ending - beginning\n self.gc2_config[\"a_hat\"] = a_vec / numpy.linalg.norm(a_vec)\n # Get e_j set\n self.gc2_config[\"ejs\"] = []\n for c_edges in self.cartesian_edges:\n self.gc2_config[\"ejs\"].append(\n numpy.dot(c_edges[-1, :2] - c_edges[0, :2],\n self.gc2_config[\"a_hat\"]))\n # A \"total E\" is defined as the sum of the e_j values\n self.gc2_config[\"e_tot\"] = sum(self.gc2_config[\"ejs\"])\n sign_etot = numpy.sign(self.gc2_config[\"e_tot\"])\n b_vec = numpy.zeros(2)\n self.gc2_config[\"sign\"] = []\n for i, c_edges in enumerate(self.cartesian_edges):\n segment_sign = numpy.sign(self.gc2_config[\"ejs\"][i]) * sign_etot\n self.gc2_config[\"sign\"].append(segment_sign)\n if segment_sign < 0:\n # Segment is discordant - reverse the points\n c_edges = numpy.flipud(c_edges)\n self.cartesian_edges[i] = c_edges\n self.cartesian_endpoints[i] = numpy.flipud(\n self.cartesian_endpoints[i])\n b_vec += (c_edges[-1, :2] - c_edges[0, :2])\n\n # Get unit vector\n self.gc2_config[\"b_hat\"] = b_vec / numpy.linalg.norm(b_vec)\n if numpy.dot(a_vec, self.gc2_config[\"b_hat\"]) >= 0.0:\n self.p0 = beginning\n else:\n self.p0 = ending\n # To later calculate Ry0 it is necessary to determine the maximum\n # GC2-U coordinate for the fault\n self._get_gc2_coordinates_for_rupture(edge_sets)", "def model_segments(copy_file, work_dir, paired):\n \"\"\"Perform segmentation on input copy number log2 ratio file.\n \"\"\"\n out_file = os.path.join(work_dir, \"%s.cr.seg\" % dd.get_sample_name(paired.tumor_data))\n tumor_counts, normal_counts = heterogzygote_counts(paired)\n if not utils.file_exists(out_file):\n with file_transaction(paired.tumor_data, out_file) as tx_out_file:\n params = [\"-T\", \"ModelSegments\",\n \"--denoised-copy-ratios\", copy_file,\n \"--allelic-counts\", tumor_counts,\n \"--output-prefix\", dd.get_sample_name(paired.tumor_data),\n \"-O\", os.path.dirname(tx_out_file)]\n if normal_counts:\n params += [\"--normal-allelic-counts\", normal_counts]\n _run_with_memory_scaling(params, tx_out_file, paired.tumor_data)\n for tx_fname in glob.glob(os.path.join(os.path.dirname(tx_out_file),\n \"%s*\" % dd.get_sample_name(paired.tumor_data))):\n shutil.copy(tx_fname, os.path.join(work_dir, os.path.basename(tx_fname)))\n return {\"seg\": out_file, \"tumor_hets\": out_file.replace(\".cr.seg\", \".hets.tsv\"),\n \"final_seg\": out_file.replace(\".cr.seg\", \".modelFinal.seg\")}", "def boundary_stawiaski(graph, label_image, gradient_image): # label image is not required to hold continuous ids or to start from 1\n r\"\"\"\n Boundary term based on the sum of border voxel pairs differences.\n \n An implementation of the boundary term in [1]_, suitable to be used with the `~medpy.graphcut.generate.graph_from_labels` function.\n \n Determines for each two supplied regions the voxels forming their border assuming\n :math:`ndim*2`-connectedness (e.g. :math:`3*2=6` for 3D). From the gradient magnitude values of each\n end-point voxel the border-voxel pairs, the highest one is selected and passed to a\n strictly positive and decreasing function :math:`g(x)`, which is defined as:\n \n .. math::\n \n g(x) = \\left(\\frac{1}{1+|x|}\\right)^k\n \n ,where :math:`k=2`. The final weight :math:`w_{i,j}` between two regions :math:`r_i` and\n :math:`r_j` is then determined by the sum of all these neighbour values:\n \n .. math::\n \n w_{i,j} = \\sum_{e_{m,n}\\in F_{(r_i,r_j)}}g(\\max(|I(m)|,|I(n)|))\n \n , where :math:`F_{(r_i,r_j)}` is the set of border voxel-pairs :math:`e_{m,n}` between\n the regions :math:`r_i` and :math:`r_j` and :math:`|I(p)|` the absolute of the gradient\n magnitude at the voxel :math:`p`\n \n This boundary_function works as an edge indicator in the original image. In simpler\n words the weight (and therefore the energy) is obtained by summing the local contrast\n along the boundaries between two regions.\n \n Parameters\n ----------\n graph : GCGraph\n The graph to add the weights to.\n label_image : ndarray\n The label image. Must contain consecutively labelled regions starting from index 1.\n gradient_image : ndarray\n The gradient image.\n \n Notes\n -----\n This function requires the gradient magnitude image of the original image to be passed\n along. That means that `~medpy.graphcut.generate.graph_from_labels` has to be called\n with ``boundary_term_args`` set to the gradient image. This can be obtained e.g. with\n `generic_gradient_magnitude` and `prewitt` from `scipy.ndimage`.\n \n This function is tested on 2D and 3D images and theoretically works for all dimensionalities. \n \n References\n ----------\n .. [1] Stawiaski J., Decenciere E., Bidlaut F. \"Interactive Liver Tumor Segmentation\n Using Graph-cuts and watershed\" MICCAI 2008 participation\n \"\"\"\n # convert to arrays if necessary\n label_image = scipy.asarray(label_image)\n gradient_image = scipy.asarray(gradient_image)\n \n if label_image.flags['F_CONTIGUOUS']: # strangely, this one is required to be ctype ordering\n label_image = scipy.ascontiguousarray(label_image)\n \n __check_label_image(label_image)\n \n for dim in range(label_image.ndim):\n # prepare slicer for all minus last and all minus first \"row\"\n slicer_from = [slice(None)] * label_image.ndim\n slicer_to = [slice(None)] * label_image.ndim\n slicer_from[dim] = slice(None, -1)\n slicer_to[dim] = slice(1, None)\n # slice views of keys\n keys_from = label_image[slicer_from]\n keys_to = label_image[slicer_to]\n # determine not equal keys\n valid_edges = keys_from != keys_to\n # determine largest gradient\n gradient_max = numpy.maximum(numpy.abs(gradient_image[slicer_from]), numpy.abs(gradient_image[slicer_to]))[valid_edges]\n # determine key order\n keys_max = numpy.maximum(keys_from, keys_to)[valid_edges]\n keys_min = numpy.minimum(keys_from, keys_to)[valid_edges]\n # set edges / nweights\n for k1, k2, val in zip(keys_min, keys_max, gradient_max):\n weight = math.pow(1./(1. + val), 2) # weight contribution of a single pixel\n weight = max(weight, sys.float_info.min)\n graph.set_nweight(k1 - 1 , k2 - 1, weight, weight)", "def segment(self, document):\n \"\"\"\n document: list[str]\n return list[int],\n i-th element denotes whether exists a boundary right before paragraph i(0 indexed)\n \"\"\"\n # ensure document is not empty and every element is an instance of str\n assert(len(document) > 0 and len([d for d in document if not isinstance(d, str)]) == 0)\n # step 1, do preprocessing\n n = len(document)\n self.window = max(min(self.window, n / 3), 1)\n cnts = [Counter(self.tokenizer.tokenize(document[i])) for i in range(n)]\n\n # step 2, calculate gap score\n gap_score = [0 for _ in range(n)]\n for i in range(n):\n sz = min(min(i + 1, n - i - 1), self.window)\n lcnt, rcnt = Counter(), Counter()\n for j in range(i - sz + 1, i + 1):\n lcnt += cnts[j]\n for j in range(i + 1, i + sz + 1):\n rcnt += cnts[j]\n gap_score[i] = cosine_sim(lcnt, rcnt)\n\n # step 3, calculate depth score\n depth_score = [0 for _ in range(n)]\n for i in range(n):\n if i < self.window or i + self.window >= n:\n continue\n ptr = i - 1\n while ptr >= 0 and gap_score[ptr] >= gap_score[ptr + 1]:\n ptr -= 1\n lval = gap_score[ptr + 1]\n ptr = i + 1\n while ptr < n and gap_score[ptr] >= gap_score[ptr - 1]:\n ptr += 1\n rval = gap_score[ptr - 1]\n depth_score[i] = lval + rval - 2 * gap_score[i]\n\n # step 4, smooth depth score with fixed window size 3\n smooth_dep_score = [0 for _ in range(n)]\n for i in range(n):\n if i - 1 < 0 or i + 1 >= n:\n smooth_dep_score[i] = depth_score[i]\n else:\n smooth_dep_score[i] = np.average(depth_score[(i - 1):(i + 2)])\n\n # step 5, determine boundaries\n boundaries = [0 for _ in range(n)]\n avg = np.average(smooth_dep_score)\n stdev = np.std(smooth_dep_score)\n cutoff = avg - stdev / 2.0\n\n depth_tuples = list(zip(smooth_dep_score, list(range(len(smooth_dep_score)))))\n depth_tuples.sort()\n depth_tuples.reverse()\n hp = [x for x in depth_tuples if (x[0] > cutoff)]\n for dt in hp:\n boundaries[dt[1]] = 1\n for i in range(dt[1] - 4, dt[1] + 4 + 1):\n if i != dt[1] and i >= 0 and i < n and boundaries[i] == 1:\n boundaries[dt[1]] = 0\n break\n return [1] + boundaries[:-1]", "def graphcut_split(graphcut_function, regions, gradient, foreground, background, minimal_edge_length = 100, overlap = 10, processes = None):\n \"\"\"\n Executes a graph cut by splitting the original volume into a number of sub-volumes of\n a minimal edge length. These are then processed in subprocesses.\n \n This can be significantly faster than the traditional graph cuts, but should be\n used with, as it can lead to different results. To minimize this effect, the overlap\n parameter allows control over how much the respective sub-volumes should overlap.\n \n Parameters\n ----------\n graphcut_function : function\n The graph cut to use (e.g. `graphcut_stawiaski`).\n regions : ndarray\n The regions image / label map.\n gradient : ndarray\n The gradient image.\n foreground : ndarray\n The foreground markers.\n background : ndarray\n The background markers.\n minimal_edge_length : integer\n The minimal edge length of the sub-volumes in voxels.\n overlap : integer\n The overlap (in voxels) between the generated sub-volumes.\n processes : integer or None\n The number of processes to run simultaneously, if not supplied, will be the same\n as the number of processors.\n \n Returns\n -------\n segmentation : ndarray\n The graph-cut segmentation result as boolean array.\n \"\"\"\n # initialize logger\n logger = Logger.getInstance()\n \n # ensure that input images are scipy arrays\n img_region = scipy.asarray(regions)\n img_gradient = scipy.asarray(gradient)\n img_fg = scipy.asarray(foreground, dtype=scipy.bool_)\n img_bg = scipy.asarray(background, dtype=scipy.bool_)\n \n # ensure correctness of supplied images\n if not (img_region.shape == img_gradient.shape == img_fg.shape == img_bg.shape): raise ArgumentError('All supplied images must be of the same shape.') \n \n # check and eventually enhance input parameters\n if minimal_edge_length < 10: raise ArgumentError('A minimal edge length smaller than 10 is not supported.')\n if overlap < 0: raise ArgumentError('A negative overlap is not supported.')\n if overlap >= minimal_edge_length: raise ArgumentError('The overlap is not allowed to exceed the minimal edge length.')\n \n # compute how to split the volumes into sub-volumes i.e. determine step-size for each image dimension\n shape = list(img_region.shape)\n steps = [x // minimal_edge_length for x in shape]\n steps = [1 if 0 == x else x for x in steps] # replace zeros by ones\n stepsizes = [math.ceil(x / y) for x, y in zip(shape, steps)]\n logger.debug('Using a minimal edge length of {}, a sub-volume size of {} was determined from the shape {}, which means {} sub-volumes.'.format(minimal_edge_length, stepsizes, shape, reduce(lambda x, y: x*y, steps)))\n \n # control step-sizes to definitely cover the whole image\n covered_shape = [x * y for x, y in zip(steps, stepsizes)]\n for c, o in zip(covered_shape, shape):\n if c < o: raise Exception(\"The computed sub-volumes do not cover the complete image!\")\n \n # iterate over the steps and extract subvolumes according to the stepsizes\n slicer_steps = [list(range(0, int(step * stepsize), int(stepsize))) for step, stepsize in zip(steps, stepsizes)]\n slicers = [[slice(_from, _from + _offset + overlap) for _from, _offset in zip(slicer_step, stepsizes)] for slicer_step in itertools.product(*slicer_steps)]\n subvolumes_input = [(img_region[slicer],\n img_gradient[slicer],\n img_fg[slicer],\n img_bg[slicer]) for slicer in slicers]\n \n # execute the graph cuts and collect results\n subvolumes_output = graphcut_subprocesses(graphcut_function, subvolumes_input, processes)\n \n # put back data together\n img_result = scipy.zeros(img_region.shape, dtype=scipy.bool_)\n for slicer, subvolume in zip(slicers, subvolumes_output):\n sslicer_antioverlap = [slice(None)] * img_result.ndim\n \n # treat overlap area using logical-and (&)\n for dim in range(img_result.ndim):\n if 0 == slicer[dim].start: continue\n sslicer_antioverlap[dim] = slice(overlap, None)\n sslicer_overlap = [slice(None)] * img_result.ndim\n sslicer_overlap[dim] = slice(0, overlap)\n img_result[slicer][sslicer_overlap] = scipy.logical_and(img_result[slicer][sslicer_overlap], subvolume[sslicer_overlap])\n \n # treat remainder through assignment\n img_result[slicer][sslicer_antioverlap] = subvolume[sslicer_antioverlap]\n \n return img_result.astype(scipy.bool_)", "def correct_segmentation(segments, clusters, min_time):\n \"\"\" Corrects the predicted segmentation\n\n This process prevents over segmentation\n\n Args:\n segments (:obj:`list` of :obj:`list` of :obj:`Point`):\n segments to correct\n min_time (int): minimum required time for segmentation\n \"\"\"\n # segments = [points for points in segments if len(points) > 1]\n\n result_segments = []\n prev_segment = None\n for i, segment in enumerate(segments):\n if len(segment) >= 1:\n continue\n\n cluster = clusters[i]\n if prev_segment is None:\n prev_segment = segment\n else:\n cluster_dt = 0\n if len(cluster) > 0:\n cluster_dt = abs(cluster[0].time_difference(cluster[-1]))\n if cluster_dt <= min_time:\n prev_segment.extend(segment)\n else:\n prev_segment.append(segment[0])\n result_segments.append(prev_segment)\n prev_segment = segment\n if prev_segment is not None:\n result_segments.append(prev_segment)\n\n return result_segments" ]
[ 0.846430778503418, 0.7215781211853027, 0.7195752263069153, 0.685760498046875, 0.6824662089347839, 0.6763722896575928, 0.6726657152175903, 0.6619572043418884, 0.6599618792533875, 0.6594784259796143, 0.6542990803718567, 0.6521185040473938 ]
Return values (intensities) by indexes. Used for multiscale graph cut. data = [[0 1 1], [0 2 2], [0 2 2]] inds = [[0 1 2], [3 4 4], [5 4 4]] return: [0, 1, 1, 0, 2, 0] If the data are not consistent, it will take the maximal value
def __ordered_values_by_indexes(self, data, inds): """ Return values (intensities) by indexes. Used for multiscale graph cut. data = [[0 1 1], [0 2 2], [0 2 2]] inds = [[0 1 2], [3 4 4], [5 4 4]] return: [0, 1, 1, 0, 2, 0] If the data are not consistent, it will take the maximal value """ # get unique labels and their first indexes # lab, linds = np.unique(inds, return_index=True) # compute values by indexes # values = data.reshape(-1)[linds] # alternative slow implementation # if there are different data on same index, it will take # maximal value # lab = np.unique(inds) # values = [0]*len(lab) # for label in lab: # values[label] = np.max(data[inds == label]) # # values = np.asarray(values) # yet another implementation values = [None] * (np.max(inds) + 1) linear_inds = inds.ravel() linear_data = data.ravel() for i in range(0, len(linear_inds)): # going over all data pixels if values[linear_inds[i]] is None: # this index is found for first values[linear_inds[i]] = linear_data[i] elif values[linear_inds[i]] < linear_data[i]: # here can be changed maximal or minimal value values[linear_inds[i]] = linear_data[i] values = np.asarray(values) return values
[ "def get_maximum_index(indices):\n \"\"\"Internally used.\"\"\"\n def _maximum_idx_single(idx):\n if isinstance(idx, slice):\n start = -1\n stop = 0\n if idx.start is not None:\n start = idx.start.__index__()\n if idx.stop is not None:\n stop = idx.stop.__index__()\n return max(start, stop - 1)\n else:\n return idx.__index__()\n if isinstance(indices, tuple):\n return max((_maximum_idx_single(i) for i in indices), default=-1)\n else:\n return _maximum_idx_single(indices)", "def _get_indices(values, selected, tolerance):\n \"\"\"Get indices based on user-selected values.\n\n Parameters\n ----------\n values : ndarray (any dtype)\n values present in the axis.\n selected : ndarray (any dtype) or tuple or list\n values selected by the user\n tolerance : float\n avoid rounding errors.\n\n Returns\n -------\n idx_data : list of int\n indices of row/column to select the data\n idx_output : list of int\n indices of row/column to copy into output\n\n Notes\n -----\n This function is probably not very fast, but it's pretty robust. It keeps\n the order, which is extremely important.\n\n If you use values in the self.axis, you don't need to specify tolerance.\n However, if you specify arbitrary points, floating point errors might\n affect the actual values. Of course, using tolerance is much slower.\n\n Maybe tolerance should be part of Select instead of here.\n\n \"\"\"\n idx_data = []\n idx_output = []\n for idx_of_selected, one_selected in enumerate(selected):\n\n if tolerance is None or values.dtype.kind == 'U':\n idx_of_data = where(values == one_selected)[0]\n else:\n idx_of_data = where(abs(values - one_selected) <= tolerance)[0] # actual use min\n\n if len(idx_of_data) > 0:\n idx_data.append(idx_of_data[0])\n idx_output.append(idx_of_selected)\n\n return idx_data, idx_output", "def _slice_idxs(df, twin=None):\n \"\"\"\n Returns a slice of the incoming array filtered between\n the two times specified. Assumes the array is the same\n length as self.data. Acts in the time() and trace() functions.\n \"\"\"\n if twin is None:\n return 0, df.shape[0]\n\n tme = df.index\n\n if twin[0] is None:\n st_idx = 0\n else:\n st_idx = (np.abs(tme - twin[0])).argmin()\n if twin[1] is None:\n en_idx = df.shape[0]\n else:\n en_idx = (np.abs(tme - twin[1])).argmin() + 1\n return st_idx, en_idx", "def broadcast_indices(x, minv, ndim, axis):\n \"\"\"Calculate index values to properly broadcast index array within data array.\n\n See usage in interp.\n \"\"\"\n ret = []\n for dim in range(ndim):\n if dim == axis:\n ret.append(minv)\n else:\n broadcast_slice = [np.newaxis] * ndim\n broadcast_slice[dim] = slice(None)\n dim_inds = np.arange(x.shape[dim])\n ret.append(dim_inds[tuple(broadcast_slice)])\n return tuple(ret)", "def get_all_indices(self, n_samples=None, max_samples=None,\n random_state=None):\n \"\"\"Get the indices on which to evaluate the fitness of a program.\n\n Parameters\n ----------\n n_samples : int\n The number of samples.\n\n max_samples : int\n The maximum number of samples to use.\n\n random_state : RandomState instance\n The random number generator.\n\n Returns\n -------\n indices : array-like, shape = [n_samples]\n The in-sample indices.\n\n not_indices : array-like, shape = [n_samples]\n The out-of-sample indices.\n\n \"\"\"\n if self._indices_state is None and random_state is None:\n raise ValueError('The program has not been evaluated for fitness '\n 'yet, indices not available.')\n\n if n_samples is not None and self._n_samples is None:\n self._n_samples = n_samples\n if max_samples is not None and self._max_samples is None:\n self._max_samples = max_samples\n if random_state is not None and self._indices_state is None:\n self._indices_state = random_state.get_state()\n\n indices_state = check_random_state(None)\n indices_state.set_state(self._indices_state)\n\n not_indices = sample_without_replacement(\n self._n_samples,\n self._n_samples - self._max_samples,\n random_state=indices_state)\n sample_counts = np.bincount(not_indices, minlength=self._n_samples)\n indices = np.where(sample_counts == 0)[0]\n\n return indices, not_indices", "def select_data(self, iteration_indices):\n \"\"\"keep only data of `iteration_indices`\"\"\"\n dat = self\n iteridx = iteration_indices\n dat.f = dat.f[np.where([x in iteridx for x in dat.f[:, 0]])[0], :]\n dat.D = dat.D[np.where([x in iteridx for x in dat.D[:, 0]])[0], :]\n try:\n iteridx = list(iteridx)\n iteridx.append(iteridx[-1]) # last entry is artificial\n except:\n pass\n dat.std = dat.std[np.where([x in iteridx\n for x in dat.std[:, 0]])[0], :]\n dat.xmean = dat.xmean[np.where([x in iteridx\n for x in dat.xmean[:, 0]])[0], :]\n try:\n dat.xrecent = dat.x[np.where([x in iteridx for x in\n dat.xrecent[:, 0]])[0], :]\n except AttributeError:\n pass\n try:\n dat.corrspec = dat.x[np.where([x in iteridx for x in\n dat.corrspec[:, 0]])[0], :]\n except AttributeError:\n pass", "def get_highest_values(self, count):\n \"\"\"Get a list of the the x highest values of the Data Collection and their indices.\n\n This is useful for situations where one needs to know the times of\n the year when the largest values of a data collection occur. For example,\n there is a European dayight code that requires an analysis for the hours\n of the year with the greatest exterior illuminance level. This method\n can be used to help build a shcedule for such a study.\n\n Args:\n count: Integer representing the number of highest values to account for.\n\n Returns:\n highest_values: The n highest values in data list, ordered from\n highest to lowest.\n highest_values_index: Indicies of the n highest values in data\n list, ordered from highest to lowest.\n \"\"\"\n count = int(count)\n assert count <= len(self._values), \\\n 'count must be smaller than or equal to values length. {} > {}.'.format(\n count, len(self._values))\n assert count > 0, \\\n 'count must be greater than 0. Got {}.'.format(count)\n highest_values = sorted(self._values, reverse=True)[0:count]\n highest_values_index = sorted(list(xrange(len(self._values))),\n key=lambda k: self._values[k],\n reverse=True)[0:count]\n return highest_values, highest_values_index", "def create_index_tuple(group_ids):\n \"\"\"An helper function to create index tuples for fast lookup in HDF5Pump\"\"\"\n max_group_id = np.max(group_ids)\n\n start_idx_arr = np.full(max_group_id + 1, 0)\n n_items_arr = np.full(max_group_id + 1, 0)\n\n current_group_id = group_ids[0]\n current_idx = 0\n item_count = 0\n\n for group_id in group_ids:\n if group_id != current_group_id:\n start_idx_arr[current_group_id] = current_idx\n n_items_arr[current_group_id] = item_count\n current_idx += item_count\n item_count = 0\n current_group_id = group_id\n item_count += 1\n else:\n start_idx_arr[current_group_id] = current_idx\n n_items_arr[current_group_id] = item_count\n\n return (start_idx_arr, n_items_arr)", "def get_cutoff_indices(flow, fhigh, df, N):\n \"\"\"\n Gets the indices of a frequency series at which to stop an overlap\n calculation.\n\n Parameters\n ----------\n flow: float\n The frequency (in Hz) of the lower index.\n fhigh: float\n The frequency (in Hz) of the upper index.\n df: float\n The frequency step (in Hz) of the frequency series.\n N: int\n The number of points in the **time** series. Can be odd\n or even.\n\n Returns\n -------\n kmin: int\n kmax: int\n \"\"\"\n if flow:\n kmin = int(flow / df)\n if kmin < 0:\n err_msg = \"Start frequency cannot be negative. \"\n err_msg += \"Supplied value and kmin {} and {}\".format(flow, kmin)\n raise ValueError(err_msg)\n else:\n kmin = 1\n if fhigh:\n kmax = int(fhigh / df )\n if kmax > int((N + 1)/2.):\n kmax = int((N + 1)/2.)\n else:\n # int() truncates towards 0, so this is\n # equivalent to the floor of the float\n kmax = int((N + 1)/2.)\n\n if kmax <= kmin:\n err_msg = \"Kmax cannot be less than or equal to kmin. \"\n err_msg += \"Provided values of freqencies (min,max) were \"\n err_msg += \"{} and {} \".format(flow, fhigh)\n err_msg += \"corresponding to (kmin, kmax) of \"\n err_msg += \"{} and {}.\".format(kmin, kmax)\n raise ValueError(err_msg)\n\n return kmin,kmax", "def get_idx_rect(index_list):\r\n \"\"\"Extract the boundaries from a list of indexes\"\"\"\r\n rows, cols = list(zip(*[(i.row(), i.column()) for i in index_list]))\r\n return ( min(rows), max(rows), min(cols), max(cols) )", "def find_peaks_indexes(arr, window_width=5, threshold=0.0, fpeak=0):\n \"\"\"Find indexes of peaks in a 1d array.\n\n Note that window_width must be an odd number. The function imposes that the\n fluxes in the window_width /2 points to the left (and right) of the peak\n decrease monotonously as one moves away from the peak, except that\n it allows fpeak constant values around the peak.\n\n Parameters\n ----------\n arr : 1d numpy array\n Input 1D spectrum.\n window_width : int\n Width of the window where the peak must be found. This number must be\n odd.\n threshold : float\n Minimum signal in the peak (optional).\n fpeak: int\n Number of equal values around the peak\n\n Returns\n -------\n ipeaks : 1d numpy array (int)\n Indices of the input array arr in which the peaks have been found.\n\n\n \"\"\"\n\n _check_window_width(window_width)\n\n if (fpeak<0 or fpeak + 1 >= window_width):\n raise ValueError('fpeak must be in the range 0- window_width - 2')\n\n kernel_peak = kernel_peak_function(threshold, fpeak)\n out = generic_filter(arr, kernel_peak, window_width, mode=\"reflect\")\n result, = numpy.nonzero(out)\n\n return filter_array_margins(arr, result, window_width)", "def _validI(x, y, weights):\r\n '''\r\n return indices that have enough data points and are not erroneous \r\n '''\r\n # density filter:\r\n i = np.logical_and(np.isfinite(y), weights > np.median(weights))\r\n # filter outliers:\r\n try:\r\n grad = np.abs(np.gradient(y[i]))\r\n max_gradient = 4 * np.median(grad)\r\n i[i][grad > max_gradient] = False\r\n except (IndexError, ValueError):\r\n pass\r\n return i" ]
[ 0.7258252501487732, 0.7097887396812439, 0.7016910314559937, 0.7010431885719299, 0.6889966130256653, 0.6820541024208069, 0.678980827331543, 0.6788231134414673, 0.676472544670105, 0.6743552684783936, 0.6736085414886475, 0.672756016254425 ]
Function computes multiscale indexes of ndarray. mask: Says where is original resolution (0) and where is small resolution (1). Mask is in small resolution. orig_shape: Original shape of input data. zoom: Usually number greater then 1 result = [[0 1 2], [3 4 4], [5 4 4]]
def __hi2lo_multiscale_indexes(self, mask, orig_shape): # , zoom): """ Function computes multiscale indexes of ndarray. mask: Says where is original resolution (0) and where is small resolution (1). Mask is in small resolution. orig_shape: Original shape of input data. zoom: Usually number greater then 1 result = [[0 1 2], [3 4 4], [5 4 4]] """ mask_orig = zoom_to_shape(mask, orig_shape, dtype=np.int8) inds_small = np.arange(mask.size).reshape(mask.shape) inds_small_in_orig = zoom_to_shape(inds_small, orig_shape, dtype=np.int8) inds_orig = np.arange(np.prod(orig_shape)).reshape(orig_shape) # inds_orig = inds_orig * mask_orig inds_orig += np.max(inds_small_in_orig) + 1 # print 'indexes' # import py3DSeedEditor as ped # import pdb; pdb.set_trace() # BREAKPOINT # '==' is not the same as 'is' for numpy.array inds_small_in_orig[mask_orig == True] = inds_orig[mask_orig == True] # noqa inds = inds_small_in_orig # print np.max(inds) # print np.min(inds) inds = relabel_squeeze(inds) logger.debug( "Index after relabeling: %s", scipy.stats.describe(inds, axis=None) ) # logger.debug("Minimal index after relabeling: " + str(np.min(inds))) # inds_orig[mask_orig==True] = 0 # inds_small_in_orig[mask_orig==False] = 0 # inds = (inds_orig + np.max(inds_small_in_orig) + 1) + inds_small_in_orig return inds, mask_orig
[ "def construct_zernike_polynomials(x, y, zernike_indexes, mask=None, weight=None):\n \"\"\"Return the zerike polynomials for all objects in an image\n \n x - the X distance of a point from the center of its object\n y - the Y distance of a point from the center of its object\n zernike_indexes - an Nx2 array of the Zernike polynomials to be computed.\n mask - a mask with same shape as X and Y of the points to consider\n weight - weightings of points with the same shape as X and Y (default\n weight on each point is 1).\n \n returns a height x width x N array of complex numbers which are the\n e^i portion of the sine and cosine of the Zernikes\n \"\"\"\n if x.shape != y.shape:\n raise ValueError(\"X and Y must have the same shape\")\n if mask is None:\n pass\n elif mask.shape != x.shape:\n raise ValueError(\"The mask must have the same shape as X and Y\")\n else:\n x = x[mask]\n y = y[mask]\n if weight is not None:\n weight = weight[mask]\n lut = construct_zernike_lookuptable(zernike_indexes) # precompute poly. coeffs.\n nzernikes = zernike_indexes.shape[0]\n # compute radii\n r_square = np.square(x) # r_square = x**2\n np.add(r_square, np.square(y), out=r_square) # r_square = x**2 + y**2\n # z = y + 1j*x\n # each Zernike polynomial is poly(r)*(r**m * np.exp(1j*m*phi)) ==\n # poly(r)*(y + 1j*x)**m\n z = np.empty(x.shape, np.complex)\n np.copyto(z.real, y)\n np.copyto(z.imag, x)\n # preallocate buffers\n s = np.empty_like(x)\n zf = np.zeros((nzernikes,) + x.shape, np.complex)\n z_pows = {}\n for idx, (n, m) in enumerate(zernike_indexes):\n s[:]=0\n if not m in z_pows:\n if m == 0:\n z_pows[m] = np.complex(1.0)\n else:\n z_pows[m] = z if m == 1 else (z ** m)\n z_pow = z_pows[m]\n # use Horner scheme\n for k in range((n-m)//2+1):\n s *= r_square\n s += lut[idx, k]\n s[r_square>1]=0\n if weight is not None:\n s *= weight.astype(s.dtype)\n if m == 0:\n np.copyto(zf[idx], s) # zf[idx] = s\n else:\n np.multiply(s, z_pow, out=zf[idx]) # zf[idx] = s*exp_term\n \n if mask is None:\n result = zf.transpose( tuple(range(1, 1+x.ndim)) + (0, ))\n else:\n result = np.zeros( mask.shape + (nzernikes,), np.complex)\n result[mask] = zf.transpose( tuple(range(1, 1 + x.ndim)) + (0, ))\n return result", "def offset_mask(mask):\n \"\"\" Returns a mask shrunk to the 'minimum bounding rectangle' of the\n nonzero portion of the previous mask, and its offset from the original.\n Useful to find the smallest rectangular section of the image that can be\n extracted to include the entire geometry. Conforms to the y-first\n expectations of numpy arrays rather than x-first (geodata).\n \"\"\"\n def axis_data(axis):\n \"\"\"Gets the bounds of a masked area along a certain axis\"\"\"\n x = mask.sum(axis)\n trimmed_front = N.trim_zeros(x,\"f\")\n offset = len(x)-len(trimmed_front)\n size = len(N.trim_zeros(trimmed_front,\"b\"))\n return offset,size\n\n xo,xs = axis_data(0)\n yo,ys = axis_data(1)\n\n array = mask[yo:yo+ys,xo:xo+xs]\n offset = (yo,xo)\n return offset, array", "def scale(mask, mag_scale, outfile=None):\n \"\"\"\n Scale the completeness depth of a mask such that mag_new = mag + mag_scale.\n Input is a full HEALPix map.\n Optionally write out the scaled mask as an sparse HEALPix map.\n \"\"\"\n msg = \"'mask.scale': ADW 2018-05-05\"\n DeprecationWarning(msg)\n mask_new = hp.UNSEEN * np.ones(len(mask))\n mask_new[mask == 0.] = 0.\n mask_new[mask > 0.] = mask[mask > 0.] + mag_scale\n\n if outfile is not None:\n pix = np.nonzero(mask_new > 0.)[0]\n data_dict = {'MAGLIM': mask_new[pix]}\n nside = hp.npix2nside(len(mask_new))\n ugali.utils.skymap.writeSparseHealpixMap(pix, data_dict, nside, outfile)\n\n return mask_new", "def _compute_zs_mat(sz:TensorImageSize, scale:float, squish:float,\n invert:bool, row_pct:float, col_pct:float)->AffineMatrix:\n \"Utility routine to compute zoom/squish matrix.\"\n orig_ratio = math.sqrt(sz[1]/sz[0])\n for s,r,i in zip(scale,squish, invert):\n s,r = 1/math.sqrt(s),math.sqrt(r)\n if s * r <= 1 and s / r <= 1: #Test if we are completely inside the picture\n w,h = (s/r, s*r) if i else (s*r,s/r)\n col_c = (1-w) * (2*col_pct - 1)\n row_c = (1-h) * (2*row_pct - 1)\n return _get_zoom_mat(w, h, col_c, row_c)\n\n #Fallback, hack to emulate a center crop without cropping anything yet.\n if orig_ratio > 1: return _get_zoom_mat(1/orig_ratio**2, 1, 0, 0.)\n else: return _get_zoom_mat(1, orig_ratio**2, 0, 0.)", "def _findindex(az0, el0, az, el):\n \"\"\"\n inputs:\n ------\n az0, el0: N-D array of azimuth, elevation. May be masked arrays\n az, el: 1-D vectors of azimuth, elevation points from other camera to find closest angle for joint FOV.\n\n output:\n row, col: index of camera 0 closest to camera 1 FOV for each unmasked pixel\n\n I think with some minor tweaks this could be numba.jit if too slow.\n \"\"\"\n\n assert az0.size == el0.size # just for clarity\n assert az.ndim == el.ndim == 1, 'expect vector of test points'\n ic = np.empty(az.size, dtype=int)\n\n for i, (a, e) in enumerate(zip(az, el)):\n # we do this point by point because we need to know the closest pixel for each point\n # errang = haver.anglesep(az,el, apt,ept, deg=False)\n ic[i] = haver.anglesep_meeus(az0, el0, a, e, deg=False).argmin()\n\n \"\"\"\n THIS UNRAVEL_INDEX MUST BE ORDER = 'C'\n \"\"\"\n r, c = np.unravel_index(ic, az0.shape, order='C')\n\n mask = (c == 0) | (c == az0.shape[1] - 1) | (r == 0) | (r == az0.shape[0] - 1)\n\n r = np.ma.masked_where(mask, r)\n c = np.ma.masked_where(mask, c)\n\n return r, c", "def _makemasks(self, index=None, level=0):\n \"\"\"\n Internal function for generating masks for selecting values based on multi-index values.\n\n As all other multi-index functions will call this function, basic type-checking is also\n performed at this stage.\n \"\"\"\n if index is None:\n index = self.index\n\n try:\n dims = len(array(index).shape)\n if dims == 1:\n index = array(index, ndmin=2).T\n except:\n raise TypeError('A multi-index must be convertible to a numpy ndarray')\n\n try:\n index = index[:, level]\n except:\n raise ValueError(\"Levels must be indices into individual elements of the index\")\n\n lenIdx = index.shape[0]\n nlevels = index.shape[1]\n\n combs = product(*[unique(index.T[i, :]) for i in range(nlevels)])\n combs = array([l for l in combs])\n\n masks = array([[array_equal(index[i], c) for i in range(lenIdx)] for c in combs])\n\n return zip(*[(masks[x], combs[x]) for x in range(len(masks)) if masks[x].any()])", "def seed_zoom(seeds, zoom):\n \"\"\"\n Smart zoom for sparse matrix. If there is resize to bigger resolution\n thin line of label could be lost. This function prefers labels larger\n then zero. If there is only one small voxel in larger volume with zeros\n it is selected.\n \"\"\"\n # import scipy\n # loseeds=seeds\n labels = np.unique(seeds)\n # remove first label - 0\n labels = np.delete(labels, 0)\n # @TODO smart interpolation for seeds in one block\n # loseeds = scipy.ndimage.interpolation.zoom(\n # seeds, zoom, order=0)\n loshape = np.ceil(np.array(seeds.shape) * 1.0 / zoom).astype(np.int)\n loseeds = np.zeros(loshape, dtype=np.int8)\n loseeds = loseeds.astype(np.int8)\n for label in labels:\n a, b, c = np.where(seeds == label)\n loa = np.round(a // zoom)\n lob = np.round(b // zoom)\n loc = np.round(c // zoom)\n # loseeds = np.zeros(loshape)\n\n loseeds[loa, lob, loc] += label\n # this is to detect conflict seeds\n loseeds[loseeds > label] = 100\n\n # remove conflict seeds\n loseeds[loseeds > 99] = 0\n\n # import py3DSeedEditor\n # ped = py3DSeedEditor.py3DSeedEditor(loseeds)\n # ped.show()\n\n return loseeds", "def get_mask(cls, azim):\n \"\"\"Linear interpolation between two points of the mask\n \"\"\"\n\n if cls.mask is None:\n raise ValueError(\"No mask defined for the station {}\".format(cls.name))\n\n azim %= 2 * np.pi\n\n if azim in cls.mask[0, :]:\n return cls.mask[1, np.where(azim == cls.mask[0, :])[0][0]]\n\n for next_i, mask_azim in enumerate(cls.mask[0, :]):\n if mask_azim > azim:\n break\n else:\n next_i = 0\n\n x0, y0 = cls.mask[:, next_i - 1]\n x1, y1 = cls.mask[:, next_i]\n\n if next_i - 1 == -1:\n x0 = 0\n\n return y0 + (y1 - y0) * (azim - x0) / (x1 - x0)", "def _scale_shape(dshape, scale = (1,1,1)):\n \"\"\"returns the shape after scaling (should be the same as ndimage.zoom\"\"\"\n nshape = np.round(np.array(dshape) * np.array(scale))\n return tuple(nshape.astype(np.int))", "def cuts_from_bbox(mask_nii, cuts=3):\n \"\"\"Finds equi-spaced cuts for presenting images\"\"\"\n from nibabel.affines import apply_affine\n\n mask_data = mask_nii.get_data() > 0.0\n\n # First, project the number of masked voxels on each axes\n ijk_counts = [\n mask_data.sum(2).sum(1), # project sagittal planes to transverse (i) axis\n mask_data.sum(2).sum(0), # project coronal planes to to longitudinal (j) axis\n mask_data.sum(1).sum(0), # project axial planes to vertical (k) axis\n ]\n\n # If all voxels are masked in a slice (say that happens at k=10),\n # then the value for ijk_counts for the projection to k (ie. ijk_counts[2])\n # at that element of the orthogonal axes (ijk_counts[2][10]) is\n # the total number of voxels in that slice (ie. Ni x Nj).\n # Here we define some thresholds to consider the plane as \"masked\"\n # The thresholds vary because of the shape of the brain\n # I have manually found that for the axial view requiring 30%\n # of the slice elements to be masked drops almost empty boxes\n # in the mosaic of axial planes (and also addresses #281)\n ijk_th = [\n int((mask_data.shape[1] * mask_data.shape[2]) * 0.2), # sagittal\n int((mask_data.shape[0] * mask_data.shape[2]) * 0.0), # coronal\n int((mask_data.shape[0] * mask_data.shape[1]) * 0.3), # axial\n ]\n\n vox_coords = []\n for ax, (c, th) in enumerate(zip(ijk_counts, ijk_th)):\n B = np.argwhere(c > th)\n if B.size:\n smin, smax = B.min(), B.max()\n\n # Avoid too narrow selections of cuts (very small masks)\n if not B.size or (th > 0 and (smin + cuts + 1) >= smax):\n B = np.argwhere(c > 0)\n\n # Resort to full plane if mask is seemingly empty\n smin, smax = B.min(), B.max() if B.size else (0, mask_data.shape[ax])\n inc = (smax - smin) / (cuts + 1)\n vox_coords.append([smin + (i + 1) * inc for i in range(cuts)])\n\n ras_coords = []\n for cross in np.array(vox_coords).T:\n ras_coords.append(apply_affine(\n mask_nii.affine, cross).tolist())\n ras_cuts = [list(coords) for coords in np.transpose(ras_coords)]\n return {k: v for k, v in zip(['x', 'y', 'z'], ras_cuts)}", "def _crop_data(self):\n \"\"\"\n Crop the ``data`` and ``mask`` to have an integer number of\n background meshes of size ``box_size`` in both dimensions. The\n data are cropped on the top and/or right edges (this is the best\n option for the \"zoom\" interpolator).\n\n Returns\n -------\n result : `~numpy.ma.MaskedArray`\n The cropped data and mask as a masked array.\n \"\"\"\n\n ny_crop = self.nyboxes * self.box_size[1]\n nx_crop = self.nxboxes * self.box_size[0]\n crop_slc = index_exp[0:ny_crop, 0:nx_crop]\n if self.mask is not None:\n mask = self.mask[crop_slc]\n else:\n mask = False\n\n return np.ma.masked_array(self.data[crop_slc], mask=mask)", "def calculate_origin_and_size(canvas_size, data_shape, image_canvas_mode, image_zoom, image_position) -> typing.Tuple[typing.Any, typing.Any]:\n \"\"\"Calculate origin and size for canvas size, data shape, and image display parameters.\"\"\"\n if data_shape is None:\n return None, None\n if image_canvas_mode == \"fill\":\n data_shape = data_shape\n scale_h = float(data_shape[1]) / canvas_size[1]\n scale_v = float(data_shape[0]) / canvas_size[0]\n if scale_v < scale_h:\n image_canvas_size = (canvas_size[0], canvas_size[0] * data_shape[1] / data_shape[0])\n else:\n image_canvas_size = (canvas_size[1] * data_shape[0] / data_shape[1], canvas_size[1])\n image_canvas_origin = (canvas_size[0] * 0.5 - image_canvas_size[0] * 0.5, canvas_size[1] * 0.5 - image_canvas_size[1] * 0.5)\n elif image_canvas_mode == \"fit\":\n image_canvas_size = canvas_size\n image_canvas_origin = (0, 0)\n elif image_canvas_mode == \"1:1\":\n image_canvas_size = data_shape\n image_canvas_origin = (canvas_size[0] * 0.5 - image_canvas_size[0] * 0.5, canvas_size[1] * 0.5 - image_canvas_size[1] * 0.5)\n elif image_canvas_mode == \"2:1\":\n image_canvas_size = (data_shape[0] * 0.5, data_shape[1] * 0.5)\n image_canvas_origin = (canvas_size[0] * 0.5 - image_canvas_size[0] * 0.5, canvas_size[1] * 0.5 - image_canvas_size[1] * 0.5)\n else:\n image_canvas_size = (canvas_size[0] * image_zoom, canvas_size[1] * image_zoom)\n canvas_rect = Geometry.fit_to_size(((0, 0), image_canvas_size), data_shape)\n image_canvas_origin_y = (canvas_size[0] * 0.5) - image_position[0] * canvas_rect[1][0] - canvas_rect[0][0]\n image_canvas_origin_x = (canvas_size[1] * 0.5) - image_position[1] * canvas_rect[1][1] - canvas_rect[0][1]\n image_canvas_origin = (image_canvas_origin_y, image_canvas_origin_x)\n return image_canvas_origin, image_canvas_size" ]
[ 0.6771979928016663, 0.6765623092651367, 0.6748027205467224, 0.6736266016960144, 0.6680997014045715, 0.6649146676063538, 0.6602632999420166, 0.6561344265937805, 0.65461266040802, 0.6545136570930481, 0.6541271209716797, 0.6522963047027588 ]
Interactive seed setting with 3d seed editor
def interactivity(self, min_val=None, max_val=None, qt_app=None): """ Interactive seed setting with 3d seed editor """ from .seed_editor_qt import QTSeedEditor from PyQt4.QtGui import QApplication if min_val is None: min_val = np.min(self.img) if max_val is None: max_val = np.max(self.img) window_c = (max_val + min_val) / 2 # .astype(np.int16) window_w = max_val - min_val # .astype(np.int16) if qt_app is None: qt_app = QApplication(sys.argv) pyed = QTSeedEditor( self.img, modeFun=self.interactivity_loop, voxelSize=self.voxelsize, seeds=self.seeds, volume_unit=self.volume_unit, ) pyed.changeC(window_c) pyed.changeW(window_w) qt_app.exec_()
[ "def set_seed(seed: int):\n \"\"\" Set random seed for python, numpy and pytorch RNGs \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.random.manual_seed(seed)", "def set_seeds(self, seeds):\n \"\"\"\n Function for manual seed setting. Sets variable seeds and prepares\n voxels for density model.\n :param seeds: ndarray (0 - nothing, 1 - object, 2 - background,\n 3 - object just hard constraints, no model training, 4 - background \n just hard constraints, no model training)\n \"\"\"\n if self.img.shape != seeds.shape:\n raise Exception(\"Seeds must be same size as input image\")\n\n self.seeds = seeds.astype(\"int8\")\n self.voxels1 = self.img[self.seeds == 1]\n self.voxels2 = self.img[self.seeds == 2]", "function autoseed(seed) {\n try {\n global.crypto.getRandomValues(seed = new Uint8Array(width));\n return tostring(seed);\n } catch (e) {\n return [+new Date, global, global.navigator.plugins,\n global.screen, tostring(pool)];\n }\n}", "function Seeder(knex) {\n this.knex = knex;\n this.config = this.setConfig(knex.client.config.seeds);\n}", "def fetchExternalUpdates(self):\r\n \"\"\"\r\n !Experimental!\r\n Calls out to the client code requesting seed values to use in the UI\r\n !Experimental!\r\n \"\"\"\r\n seeds = seeder.fetchDynamicProperties(\r\n self.buildSpec['target'],\r\n self.buildSpec['encoding']\r\n )\r\n for config in self.configs:\r\n config.seedUI(seeds)", "def set_seed(self, rho_seed, mu_seed):\n\n \"\"\"\n set seeds manually (should add dimensionality check)\n \"\"\"\n\n self.rho = rho_seed\n self.mu = mu_seed", "def _set_seed(self):\n \"\"\" Set random seed for numpy and tensorflow packages \"\"\"\n if self.flags['SEED'] is not None:\n tf.set_random_seed(self.flags['SEED'])\n np.random.seed(self.flags['SEED'])", "def set_state(seed_value=None, step=None):\n \"\"\"Set random seed.\"\"\"\n global RANDOM_SEED # pylint: disable=global-statement\n if seed_value is not None:\n RANDOM_SEED = seed_value\n if step is not None:\n RANDOM_SEED += step", "def seed(self):\n \"\"\" Reset the number from which the next generated sequence start.\n If you seed at 100, next seed will be 101\n \"\"\"\n form = self.request.form\n prefix = form.get('prefix', None)\n if prefix is None:\n return 'No prefix provided'\n seed = form.get('seed', None)\n if seed is None:\n return 'No seed provided'\n if not seed.isdigit():\n return 'Seed must be a digit'\n seed = int(seed)\n if seed < 0:\n return 'Seed cannot be negative'\n\n new_seq = self.set_seed(prefix, seed)\n return 'IDServerView: \"%s\" seeded to %s' % (prefix, new_seq)", "private function seed()\n {\n $action = $this->arg->getParameter('action');\n\n if (!in_array($action, ['all', 'table'])) {\n $this->throwFailsCommand('This action is not exists', 'help seed');\n }\n\n if ($action == 'all') {\n if ($this->arg->getParameter('target') != null) {\n $this->throwFailsAction('Bad command usage', 'help seed');\n }\n }\n\n // Set command for understand\n $command = $action;\n\n $this->command->call(\n $command,\n 'seeder',\n $this->arg->getParameter('target')\n );\n }", "def check_manual_seed(seed):\n \"\"\" If manual seed is not specified, choose a random one and communicate it to the user.\n\n \"\"\"\n\n seed = seed or random.randint(1, 10000)\n random.seed(seed)\n torch.manual_seed(seed)\n\n print('Using manual seed: {seed}'.format(seed=seed))", "def setSeed(self, value):\n \"\"\"\n Sets the seed to value.\n \"\"\"\n self.seed = value\n random.seed(self.seed)\n if self.verbosity >= 0:\n print(\"Conx using seed:\", self.seed)" ]
[ 0.7103196382522583, 0.7084529399871826, 0.7084075212478638, 0.7056494355201721, 0.6945380568504333, 0.6907649636268616, 0.6894176006317139, 0.6885712146759033, 0.6882632374763489, 0.6879781484603882, 0.6860505938529968, 0.6801682710647583 ]
Function for manual seed setting. Sets variable seeds and prepares voxels for density model. :param seeds: ndarray (0 - nothing, 1 - object, 2 - background, 3 - object just hard constraints, no model training, 4 - background just hard constraints, no model training)
def set_seeds(self, seeds): """ Function for manual seed setting. Sets variable seeds and prepares voxels for density model. :param seeds: ndarray (0 - nothing, 1 - object, 2 - background, 3 - object just hard constraints, no model training, 4 - background just hard constraints, no model training) """ if self.img.shape != seeds.shape: raise Exception("Seeds must be same size as input image") self.seeds = seeds.astype("int8") self.voxels1 = self.img[self.seeds == 1] self.voxels2 = self.img[self.seeds == 2]
[ "def __set_hard_hard_constraints(self, tdata1, tdata2, seeds):\n \"\"\"\n it works with seed labels:\n 0: nothing\n 1: object 1 - full seeds\n 2: object 2 - full seeds\n 3: object 1 - not a training seeds\n 4: object 2 - not a training seeds\n \"\"\"\n seeds_mask = (seeds == 1) | (seeds == 3)\n tdata2[seeds_mask] = np.max(tdata2) + 1\n tdata1[seeds_mask] = 0\n\n seeds_mask = (seeds == 2) | (seeds == 4)\n tdata1[seeds_mask] = np.max(tdata1) + 1\n tdata2[seeds_mask] = 0\n\n return tdata1, tdata2", "public void setWebSeeds(List<WebSeedEntry> seeds) {\n web_seed_entry_vector v = new web_seed_entry_vector();\n\n for (WebSeedEntry e : seeds) {\n v.push_back(e.swig());\n }\n\n ti.set_web_seeds(v);\n }", "def _set_seed(self):\n \"\"\" Set random seed for numpy and tensorflow packages \"\"\"\n if self.flags['SEED'] is not None:\n tf.set_random_seed(self.flags['SEED'])\n np.random.seed(self.flags['SEED'])", "def set_seed(self, rho_seed, mu_seed):\n\n \"\"\"\n set seeds manually (should add dimensionality check)\n \"\"\"\n\n self.rho = rho_seed\n self.mu = mu_seed", "def prepare_environment(params: Params):\n \"\"\"\n Sets random seeds for reproducible experiments. This may not work as expected\n if you use this from within a python project in which you have already imported Pytorch.\n If you use the scripts/run_model.py entry point to training models with this library,\n your experiments should be reasonably reproducible. If you are using this from your own\n project, you will want to call this function before importing Pytorch. Complete determinism\n is very difficult to achieve with libraries doing optimized linear algebra due to massively\n parallel execution, which is exacerbated by using GPUs.\n\n Parameters\n ----------\n params: Params object or dict, required.\n A ``Params`` object or dict holding the json parameters.\n \"\"\"\n seed = params.pop_int(\"random_seed\", 13370)\n numpy_seed = params.pop_int(\"numpy_seed\", 1337)\n torch_seed = params.pop_int(\"pytorch_seed\", 133)\n\n if seed is not None:\n random.seed(seed)\n if numpy_seed is not None:\n numpy.random.seed(numpy_seed)\n if torch_seed is not None:\n torch.manual_seed(torch_seed)\n # Seed all GPUs with the same seed if available.\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(torch_seed)\n\n log_pytorch_version_info()", "def set_seed(seed: int):\n \"\"\" Set random seed for python, numpy and pytorch RNGs \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.random.manual_seed(seed)", "def set_seed(self, seed):\n\n \"\"\"\n Override default values for random initial topic assignment,\n set to \"seed\" instead.\n seed is 2-d array (number of samples in LDA model x number\n of tokens in LDA model)\n \"\"\"\n\n assert seed.dtype == np.int and seed.shape == (self.samples, self.N)\n self.topic_seed = seed", "def seeds(args):\n \"\"\"\n %prog seeds [pngfile|jpgfile]\n\n Extract seed metrics from [pngfile|jpgfile]. Use --rows and --cols to crop image.\n \"\"\"\n p = OptionParser(seeds.__doc__)\n p.set_outfile()\n opts, args, iopts = add_seeds_options(p, args)\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n pngfile, = args\n pf = opts.prefix or op.basename(pngfile).rsplit(\".\", 1)[0]\n sigma, kernel = opts.sigma, opts.kernel\n rows, cols = opts.rows, opts.cols\n labelrows, labelcols = opts.labelrows, opts.labelcols\n ff = opts.filter\n calib = opts.calibrate\n outdir = opts.outdir\n if outdir != '.':\n mkdir(outdir)\n if calib:\n calib = json.load(must_open(calib))\n pixel_cm_ratio, tr = calib[\"PixelCMratio\"], calib[\"RGBtransform\"]\n tr = np.array(tr)\n\n resizefile, mainfile, labelfile, exif = \\\n convert_image(pngfile, pf, outdir=outdir,\n rotate=opts.rotate,\n rows=rows, cols=cols,\n labelrows=labelrows, labelcols=labelcols)\n\n oimg = load_image(resizefile)\n img = load_image(mainfile)\n\n fig, (ax1, ax2, ax3, ax4) = plt.subplots(ncols=4, nrows=1,\n figsize=(iopts.w, iopts.h))\n\n # Edge detection\n img_gray = rgb2gray(img)\n logging.debug(\"Running {0} edge detection ...\".format(ff))\n if ff == \"canny\":\n edges = canny(img_gray, sigma=opts.sigma)\n elif ff == \"roberts\":\n edges = roberts(img_gray)\n elif ff == \"sobel\":\n edges = sobel(img_gray)\n edges = clear_border(edges, buffer_size=opts.border)\n selem = disk(kernel)\n closed = closing(edges, selem) if kernel else edges\n filled = binary_fill_holes(closed)\n\n # Watershed algorithm\n if opts.watershed:\n distance = distance_transform_edt(filled)\n local_maxi = peak_local_max(distance, threshold_rel=.05, indices=False)\n coordinates = peak_local_max(distance, threshold_rel=.05)\n markers, nmarkers = label(local_maxi, return_num=True)\n logging.debug(\"Identified {0} watershed markers\".format(nmarkers))\n labels = watershed(closed, markers, mask=filled)\n else:\n labels = label(filled)\n\n # Object size filtering\n w, h = img_gray.shape\n canvas_size = w * h\n min_size = int(round(canvas_size * opts.minsize / 100))\n max_size = int(round(canvas_size * opts.maxsize / 100))\n logging.debug(\"Find objects with pixels between {0} ({1}%) and {2} ({3}%)\"\\\n .format(min_size, opts.minsize, max_size, opts.maxsize))\n\n # Plotting\n ax1.set_title('Original picture')\n ax1.imshow(oimg)\n\n params = \"{0}, $\\sigma$={1}, $k$={2}\".format(ff, sigma, kernel)\n if opts.watershed:\n params += \", watershed\"\n ax2.set_title('Edge detection\\n({0})'.format(params))\n closed = gray2rgb(closed)\n ax2_img = labels\n if opts.edges:\n ax2_img = closed\n elif opts.watershed:\n ax2.plot(coordinates[:, 1], coordinates[:, 0], 'g.')\n ax2.imshow(ax2_img, cmap=iopts.cmap)\n\n ax3.set_title('Object detection')\n ax3.imshow(img)\n\n filename = op.basename(pngfile)\n if labelfile:\n accession = extract_label(labelfile)\n else:\n accession = pf\n\n # Calculate region properties\n rp = regionprops(labels)\n rp = [x for x in rp if min_size <= x.area <= max_size]\n nb_labels = len(rp)\n logging.debug(\"A total of {0} objects identified.\".format(nb_labels))\n objects = []\n for i, props in enumerate(rp):\n i += 1\n if i > opts.count:\n break\n\n y0, x0 = props.centroid\n orientation = props.orientation\n major, minor = props.major_axis_length, props.minor_axis_length\n major_dx = cos(orientation) * major / 2\n major_dy = sin(orientation) * major / 2\n minor_dx = sin(orientation) * minor / 2\n minor_dy = cos(orientation) * minor / 2\n ax2.plot((x0 - major_dx, x0 + major_dx),\n (y0 + major_dy, y0 - major_dy), 'r-')\n ax2.plot((x0 - minor_dx, x0 + minor_dx),\n (y0 - minor_dy, y0 + minor_dy), 'r-')\n\n npixels = int(props.area)\n # Sample the center of the blob for color\n d = min(int(round(minor / 2 * .35)) + 1, 50)\n x0d, y0d = int(round(x0)), int(round(y0))\n square = img[(y0d - d):(y0d + d), (x0d - d):(x0d + d)]\n pixels = []\n for row in square:\n pixels.extend(row)\n logging.debug(\"Seed #{0}: {1} pixels ({2} sampled) - {3:.2f}%\".\\\n format(i, npixels, len(pixels), 100. * npixels / canvas_size))\n\n rgb = pixel_stats(pixels)\n objects.append(Seed(filename, accession, i, rgb, props, exif))\n minr, minc, maxr, maxc = props.bbox\n rect = Rectangle((minc, minr), maxc - minc, maxr - minr,\n fill=False, ec='w', lw=1)\n ax3.add_patch(rect)\n mc, mr = (minc + maxc) / 2, (minr + maxr) / 2\n ax3.text(mc, mr, \"{0}\".format(i), color='w',\n ha=\"center\", va=\"center\", size=6)\n\n for ax in (ax2, ax3):\n ax.set_xlim(0, h)\n ax.set_ylim(w, 0)\n\n # Output identified seed stats\n ax4.text(.1, .92, \"File: {0}\".format(latex(filename)), color='g')\n ax4.text(.1, .86, \"Label: {0}\".format(latex(accession)), color='m')\n yy = .8\n fw = must_open(opts.outfile, \"w\")\n if not opts.noheader:\n print(Seed.header(calibrate=calib), file=fw)\n for o in objects:\n if calib:\n o.calibrate(pixel_cm_ratio, tr)\n print(o, file=fw)\n i = o.seedno\n if i > 7:\n continue\n ax4.text(.01, yy, str(i), va=\"center\", bbox=dict(fc='none', ec='k'))\n ax4.text(.1, yy, o.pixeltag, va=\"center\")\n yy -= .04\n ax4.add_patch(Rectangle((.1, yy - .025), .12, .05, lw=0,\n fc=rgb_to_hex(o.rgb)))\n ax4.text(.27, yy, o.hashtag, va=\"center\")\n yy -= .06\n ax4.text(.1 , yy, \"(A total of {0} objects displayed)\".format(nb_labels),\n color=\"darkslategrey\")\n normalize_axes(ax4)\n\n for ax in (ax1, ax2, ax3):\n xticklabels = [int(x) for x in ax.get_xticks()]\n yticklabels = [int(x) for x in ax.get_yticks()]\n ax.set_xticklabels(xticklabels, family='Helvetica', size=8)\n ax.set_yticklabels(yticklabels, family='Helvetica', size=8)\n\n image_name = op.join(outdir, pf + \".\" + iopts.format)\n savefig(image_name, dpi=iopts.dpi, iopts=iopts)\n return objects", "def _process_input_seed(seed_photon_fields):\n \"\"\"\n take input list of seed_photon_fields and fix them into usable format\n \"\"\"\n\n Tcmb = 2.72548 * u.K # 0.00057 K\n Tfir = 30 * u.K\n ufir = 0.5 * u.eV / u.cm ** 3\n Tnir = 3000 * u.K\n unir = 1.0 * u.eV / u.cm ** 3\n\n # Allow for seed_photon_fields definitions of the type 'CMB-NIR-FIR' or\n # 'CMB'\n if type(seed_photon_fields) != list:\n seed_photon_fields = seed_photon_fields.split(\"-\")\n\n result = OrderedDict()\n\n for idx, inseed in enumerate(seed_photon_fields):\n seed = {}\n if isinstance(inseed, six.string_types):\n name = inseed\n seed[\"type\"] = \"thermal\"\n if inseed == \"CMB\":\n seed[\"T\"] = Tcmb\n seed[\"u\"] = ar * Tcmb ** 4\n seed[\"isotropic\"] = True\n elif inseed == \"FIR\":\n seed[\"T\"] = Tfir\n seed[\"u\"] = ufir\n seed[\"isotropic\"] = True\n elif inseed == \"NIR\":\n seed[\"T\"] = Tnir\n seed[\"u\"] = unir\n seed[\"isotropic\"] = True\n else:\n log.warning(\n \"Will not use seed {0} because it is not \"\n \"CMB, FIR or NIR\".format(inseed)\n )\n raise TypeError\n elif type(inseed) == list and (\n len(inseed) == 3 or len(inseed) == 4\n ):\n isotropic = len(inseed) == 3\n\n if isotropic:\n name, T, uu = inseed\n seed[\"isotropic\"] = True\n else:\n name, T, uu, theta = inseed\n seed[\"isotropic\"] = False\n seed[\"theta\"] = validate_scalar(\n \"{0}-theta\".format(name), theta, physical_type=\"angle\"\n )\n\n thermal = T.unit.physical_type == \"temperature\"\n\n if thermal:\n seed[\"type\"] = \"thermal\"\n validate_scalar(\n \"{0}-T\".format(name),\n T,\n domain=\"positive\",\n physical_type=\"temperature\",\n )\n seed[\"T\"] = T\n if uu == 0:\n seed[\"u\"] = ar * T ** 4\n else:\n # pressure has same physical type as energy density\n validate_scalar(\n \"{0}-u\".format(name),\n uu,\n domain=\"positive\",\n physical_type=\"pressure\",\n )\n seed[\"u\"] = uu\n else:\n seed[\"type\"] = \"array\"\n # Ensure everything is in arrays\n T = u.Quantity((T,)).flatten()\n uu = u.Quantity((uu,)).flatten()\n\n seed[\"energy\"] = validate_array(\n \"{0}-energy\".format(name),\n T,\n domain=\"positive\",\n physical_type=\"energy\",\n )\n\n if np.isscalar(seed[\"energy\"]) or seed[\"energy\"].size == 1:\n seed[\"photon_density\"] = validate_scalar(\n \"{0}-density\".format(name),\n uu,\n domain=\"positive\",\n physical_type=\"pressure\",\n )\n else:\n if uu.unit.physical_type == \"pressure\":\n uu /= seed[\"energy\"] ** 2\n seed[\"photon_density\"] = validate_array(\n \"{0}-density\".format(name),\n uu,\n domain=\"positive\",\n physical_type=\"differential number density\",\n )\n else:\n raise TypeError(\n \"Unable to process seed photon\"\n \" field: {0}\".format(inseed)\n )\n\n result[name] = seed\n\n return result", "def set_parameters\n # Convert the volumes to vectors:\n @vectors = Array.new\n @volumes.each {|volume| @vectors << volume.flatten}\n verify_equal_vector_lengths\n # Number of voxels:\n @n = @vectors.first.length\n # Number of raters:\n @r = @vectors.length\n # Decisions array:\n @decisions = NArray.int(@n, @r)\n # Sensitivity vector: (Def: true positive fraction, or relative frequency of Dij = 1 when Ti = 1)\n # (If a rater includes all the voxels that are included in the true segmentation, his score is 1.0 on this parameter)\n @p = NArray.float(@r)\n # Specificity vector: (Def: true negative fraction, or relative frequency of Dij = 0 when Ti = 0)\n # (If a rater has avoided to specify any voxels that are not specified in the true segmentation, his score is 1.0 on this parameter)\n @q = NArray.float(@r)\n # Set initial parameter values: (p0, q0) - when combined, called: phi0\n @p.fill!(0.99999)\n @q.fill!(0.99999)\n # Combined scoring parameter:\n @phi = NArray.float(2, @r)\n # Fill the decisions matrix:\n @vectors.each_with_index do |decision, j|\n @decisions[true, j] = decision\n end\n # Indicator vector of the true (hidden) segmentation:\n @true_segmentation = NArray.byte(@n)\n # The estimate of the probability that the true segmentation at each voxel is Ti = 1: f(Ti=1)\n @weights_previous = NArray.float(@n)\n # Using the notation commom for EM algorithms and refering to this as the weight variable:\n @weights_current = NArray.float(@n)\n end", "def return_fv_by_seeds(fv, seeds=None, unique_cls=None):\n \"\"\"\n Return features selected by seeds and unique_cls or selection from features and corresponding seed classes.\n\n :param fv: ndarray with lineariezed feature. It's shape is MxN, where M is number of image pixels and N is number\n of features\n :param seeds: ndarray with seeds. Does not to be linear.\n :param unique_cls: number of used seeds clases. Like [1, 2]\n :return: fv, sd - selection from feature vector and selection from seeds or just fv for whole image\n \"\"\"\n if seeds is not None:\n if unique_cls is not None:\n return select_from_fv_by_seeds(fv, seeds, unique_cls)\n else:\n raise AssertionError(\"Input unique_cls has to be not None if seeds is not None.\")\n else:\n return fv", "def fit_from_image(self, data, voxelsize, seeds, unique_cls):\n \"\"\"\n This Method allows computes feature vector and train model.\n\n :cls: list of index number of requested classes in seeds\n \"\"\"\n fvs, clsselected = self.features_from_image(data, voxelsize, seeds, unique_cls)\n self.fit(fvs, clsselected)" ]
[ 0.724362850189209, 0.7048511505126953, 0.6971395015716553, 0.6954063177108765, 0.6898720860481262, 0.6891217231750488, 0.6812682151794434, 0.6777561902999878, 0.6749814748764038, 0.6689315438270569, 0.6673877835273743, 0.6638182401657104 ]
Run the Graph Cut segmentation according to preset parameters. :param run_fit_model: Allow to skip model fit when the model is prepared before :return:
def run(self, run_fit_model=True): """ Run the Graph Cut segmentation according to preset parameters. :param run_fit_model: Allow to skip model fit when the model is prepared before :return: """ if run_fit_model: self.fit_model(self.img, self.voxelsize, self.seeds) self._start_time = time.time() if self.segparams["method"].lower() in ("graphcut", "gc"): self.__single_scale_gc_run() elif self.segparams["method"].lower() in ( "multiscale_graphcut", "multiscale_gc", "msgc", "msgc_lo2hi", "lo2hi", "multiscale_graphcut_lo2hi", ): logger.debug("performing multiscale Graph-Cut lo2hi") self.__multiscale_gc_lo2hi_run() elif self.segparams["method"].lower() in ( "msgc_hi2lo", "hi2lo", "multiscale_graphcut_hi2lo", ): logger.debug("performing multiscale Graph-Cut hi2lo") self.__multiscale_gc_hi2lo_run() else: logger.error("Unknown segmentation method: " + self.segparams["method"])
[ "def model_segments(copy_file, work_dir, paired):\n \"\"\"Perform segmentation on input copy number log2 ratio file.\n \"\"\"\n out_file = os.path.join(work_dir, \"%s.cr.seg\" % dd.get_sample_name(paired.tumor_data))\n tumor_counts, normal_counts = heterogzygote_counts(paired)\n if not utils.file_exists(out_file):\n with file_transaction(paired.tumor_data, out_file) as tx_out_file:\n params = [\"-T\", \"ModelSegments\",\n \"--denoised-copy-ratios\", copy_file,\n \"--allelic-counts\", tumor_counts,\n \"--output-prefix\", dd.get_sample_name(paired.tumor_data),\n \"-O\", os.path.dirname(tx_out_file)]\n if normal_counts:\n params += [\"--normal-allelic-counts\", normal_counts]\n _run_with_memory_scaling(params, tx_out_file, paired.tumor_data)\n for tx_fname in glob.glob(os.path.join(os.path.dirname(tx_out_file),\n \"%s*\" % dd.get_sample_name(paired.tumor_data))):\n shutil.copy(tx_fname, os.path.join(work_dir, os.path.basename(tx_fname)))\n return {\"seg\": out_file, \"tumor_hets\": out_file.replace(\".cr.seg\", \".hets.tsv\"),\n \"final_seg\": out_file.replace(\".cr.seg\", \".modelFinal.seg\")}", "def run_model(self,\n op_list,\n num_steps,\n feed_vars=(),\n feed_data=None,\n print_every=100,\n allow_initialize=True):\n \"\"\"Runs `op_list` for `num_steps`.\n\n Args:\n op_list: A list of ops to run.\n num_steps: Number of steps to run this for. If feeds are used, this is a\n maximum. `None` can be used to signal \"forever\".\n feed_vars: The variables to feed.\n feed_data: An iterator that feeds data tuples.\n print_every: Print a log line and checkpoing every so many steps.\n allow_initialize: If True, the model will be initialized if any variable\n is uninitialized, if False the model will not be initialized.\n Returns:\n The final run result as a list.\n Raises:\n ValueError: If feed_data doesn't match feed_vars.\n \"\"\"\n feed_data = feed_data or itertools.repeat(())\n\n ops = [bookkeeper.global_step()]\n ops.extend(op_list)\n\n sess = tf.get_default_session()\n self.prepare_model(sess, allow_initialize=allow_initialize)\n results = []\n\n try:\n if num_steps is None:\n counter = itertools.count(0)\n elif num_steps >= 0:\n counter = xrange(num_steps)\n else:\n raise ValueError('num_steps cannot be negative: %s' % num_steps)\n for i, data in zip(counter, feed_data):\n log_this_time = print_every and i % print_every == 0\n if len(data) != len(feed_vars):\n raise ValueError(\n 'feed_data and feed_vars must be the same length: %d vs %d' % (\n len(data), len(feed_vars)))\n if self._coord.should_stop():\n print('Coordinator stopped')\n sys.stdout.flush()\n self.stop_queues()\n break\n if len(feed_vars) != len(data):\n raise ValueError('Feed vars must be the same length as data.')\n\n if log_this_time and self._summary_writer:\n results = sess.run(ops + [self._summaries],\n dict(zip(feed_vars, data)))\n self._summary_writer.add_summary(results[-1], results[0])\n results = results[:-1]\n else:\n results = sess.run(ops, dict(zip(feed_vars, data)))\n if log_this_time:\n self._log_and_save(sess, results)\n\n # Print the last line if it wasn't just printed\n if print_every and not log_this_time:\n self._log_and_save(sess, results)\n except tf.errors.OutOfRangeError as ex:\n print('Done training -- epoch limit reached %s' % ex.message)\n sys.stdout.flush()\n self.stop_queues()\n except BaseException as ex:\n print('Exception -- stopping threads: %s' % ex, file=sys.stderr)\n sys.stdout.flush()\n self.stop_queues()\n raise\n return results", "def __multiscale_gc_hi2lo_run(self): # , pyed):\n \"\"\"\n Run Graph-Cut segmentation with simplifiyng of high resolution multiscale graph.\n In first step is performed normal GC on low resolution data\n Second step construct finer grid on edges of segmentation from first\n step.\n There is no option for use without `use_boundary_penalties`\n \"\"\"\n # from PyQt4.QtCore import pyqtRemoveInputHook\n # pyqtRemoveInputHook()\n\n self.__msgc_step0_init()\n hard_constraints = self.__msgc_step12_low_resolution_segmentation()\n # ===== high resolution data processing\n seg = self.__msgc_step3_discontinuity_localization()\n nlinks, unariesalt2, msinds = self.__msgc_step45678_hi2lo_construct_graph(\n hard_constraints, seg\n )\n self.__msgc_step9_finish_perform_gc_and_reshape(nlinks, unariesalt2, msinds)", "def preprocess_D_segs(self, generative_model, genomic_data):\n \"\"\"Process P(delDl, delDr|D) into Pi arrays.\n \n Sets the attributes PD_nt_pos_vec, PD_2nd_nt_pos_per_aa_vec, \n min_delDl_given_DdelDr, max_delDl_given_DdelDr, and zeroD_given_D.\n \n Parameters\n ----------\n generative_model : GenerativeModelVDJ\n VDJ generative model class containing the model parameters. \n genomic_data : GenomicDataVDJ\n VDJ genomic data class containing the V, D, and J germline \n sequences and info.\n \n \"\"\"\n \n cutD_genomic_CDR3_segs = genomic_data.cutD_genomic_CDR3_segs\n nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3}\n num_dell_pos, num_delr_pos, num_D_genes = generative_model.PdelDldelDr_given_D.shape\n \n #These arrays only include the nt identity information, not the PdelDldelDr_given_D info\n PD_nt_pos_vec = [[]]*num_D_genes\n PD_2nd_nt_pos_per_aa_vec = [[]]*num_D_genes\n for D_in in range(num_D_genes):\n \n current_PD_nt_pos_vec = np.zeros((4, len(cutD_genomic_CDR3_segs[D_in])))\n current_PD_2nd_nt_pos_per_aa_vec = {}\n for aa in self.codons_dict.keys():\n current_PD_2nd_nt_pos_per_aa_vec[aa] = np.zeros((4, len(cutD_genomic_CDR3_segs[D_in])))\n \n for pos, nt in enumerate(cutD_genomic_CDR3_segs[D_in]):\n current_PD_nt_pos_vec[nt2num[nt], pos] = 1\n for ins_nt in 'ACGT':\n for aa in self.codons_dict.keys():\n if ins_nt + cutD_genomic_CDR3_segs[D_in][pos:pos+2] in self.codons_dict[aa]:\n current_PD_2nd_nt_pos_per_aa_vec[aa][nt2num[ins_nt], pos] = 1\n \n PD_nt_pos_vec[D_in] = current_PD_nt_pos_vec\n PD_2nd_nt_pos_per_aa_vec[D_in] = current_PD_2nd_nt_pos_per_aa_vec\n \n min_delDl_given_DdelDr = [[]]*num_D_genes\n max_delDl_given_DdelDr = [[]]*num_D_genes\n zeroD_given_D = [[]]*num_D_genes\n for D_in in range(num_D_genes):\n current_min_delDl_given_delDr = [0]*num_delr_pos\n current_max_delDl_given_delDr = [0]*num_delr_pos\n current_zeroD = 0\n for delr in range(num_delr_pos):\n \n if num_dell_pos > len(cutD_genomic_CDR3_segs[D_in])-delr:\n current_zeroD += generative_model.PdelDldelDr_given_D[len(cutD_genomic_CDR3_segs[D_in])-delr, delr, D_in]\n \n dell = 0\n while generative_model.PdelDldelDr_given_D[dell, delr, D_in]==0 and dell<num_dell_pos-1:\n dell+=1\n if generative_model.PdelDldelDr_given_D[dell, delr, D_in] == 0:\n current_min_delDl_given_delDr[delr] = -1\n else:\n current_min_delDl_given_delDr[delr] = dell\n if current_min_delDl_given_delDr[delr] == -1:\n current_max_delDl_given_delDr[delr] = -1\n else:\n dell = num_dell_pos-1\n while generative_model.PdelDldelDr_given_D[dell, delr, D_in]==0 and dell>=0:\n dell -= 1\n if generative_model.PdelDldelDr_given_D[dell, delr, D_in] == 0:\n current_max_delDl_given_delDr[delr] = -1\n else:\n current_max_delDl_given_delDr[delr] = dell\n \n min_delDl_given_DdelDr[D_in] = current_min_delDl_given_delDr\n max_delDl_given_DdelDr[D_in] = current_max_delDl_given_delDr\n zeroD_given_D[D_in] = current_zeroD\n \n self.PD_nt_pos_vec = PD_nt_pos_vec\n self.PD_2nd_nt_pos_per_aa_vec = PD_2nd_nt_pos_per_aa_vec\n self.min_delDl_given_DdelDr = min_delDl_given_DdelDr \n self.max_delDl_given_DdelDr = max_delDl_given_DdelDr\n self.zeroD_given_D = zeroD_given_D", "def __multiscale_gc_lo2hi_run(self): # , pyed):\n \"\"\"\n Run Graph-Cut segmentation with refinement of low resolution multiscale graph.\n In first step is performed normal GC on low resolution data\n Second step construct finer grid on edges of segmentation from first\n step.\n There is no option for use without `use_boundary_penalties`\n \"\"\"\n # from PyQt4.QtCore import pyqtRemoveInputHook\n # pyqtRemoveInputHook()\n self._msgc_lo2hi_resize_init()\n self.__msgc_step0_init()\n\n hard_constraints = self.__msgc_step12_low_resolution_segmentation()\n # ===== high resolution data processing\n seg = self.__msgc_step3_discontinuity_localization()\n\n self.stats[\"t3.1\"] = (time.time() - self._start_time)\n graph = Graph(\n seg,\n voxelsize=self.voxelsize,\n nsplit=self.segparams[\"block_size\"],\n edge_weight_table=self._msgc_npenalty_table,\n compute_low_nodes_index=True,\n )\n\n # graph.run() = graph.generate_base_grid() + graph.split_voxels()\n # graph.run()\n graph.generate_base_grid()\n self.stats[\"t3.2\"] = (time.time() - self._start_time)\n graph.split_voxels()\n\n self.stats[\"t3.3\"] = (time.time() - self._start_time)\n\n self.stats.update(graph.stats)\n self.stats[\"t4\"] = (time.time() - self._start_time)\n mul_mask, mul_val = self.__msgc_tlinks_area_weight_from_low_segmentation(seg)\n area_weight = 1\n unariesalt = self.__create_tlinks(\n self.img,\n self.voxelsize,\n self.seeds,\n area_weight=area_weight,\n hard_constraints=hard_constraints,\n mul_mask=None,\n mul_val=None,\n )\n # N-links prepared\n self.stats[\"t5\"] = (time.time() - self._start_time)\n un, ind = np.unique(graph.msinds, return_index=True)\n self.stats[\"t6\"] = (time.time() - self._start_time)\n\n self.stats[\"t7\"] = (time.time() - self._start_time)\n unariesalt2_lo2hi = np.hstack(\n [unariesalt[ind, 0, 0].reshape(-1, 1), unariesalt[ind, 0, 1].reshape(-1, 1)]\n )\n nlinks_lo2hi = np.hstack([graph.edges, graph.edges_weights.reshape(-1, 1)])\n if self.debug_images:\n import sed3\n\n ed = sed3.sed3(unariesalt[:, :, 0].reshape(self.img.shape))\n ed.show()\n import sed3\n\n ed = sed3.sed3(unariesalt[:, :, 1].reshape(self.img.shape))\n ed.show()\n # ed = sed3.sed3(seg)\n # ed.show()\n # import sed3\n # ed = sed3.sed3(graph.data)\n # ed.show()\n # import sed3\n # ed = sed3.sed3(graph.msinds)\n # ed.show()\n\n # nlinks, unariesalt2, msinds = self.__msgc_step45678_construct_graph(area_weight, hard_constraints, seg)\n # self.__msgc_step9_finish_perform_gc_and_reshape(nlinks, unariesalt2, msinds)\n self.__msgc_step9_finish_perform_gc_and_reshape(\n nlinks_lo2hi, unariesalt2_lo2hi, graph.msinds\n )\n self._msgc_lo2hi_resize_clean_finish()", "def create_body(arch:Callable, pretrained:bool=True, cut:Optional[Union[int, Callable]]=None):\n \"Cut off the body of a typically pretrained `model` at `cut` (int) or cut the model as specified by `cut(model)` (function).\"\n model = arch(pretrained)\n cut = ifnone(cut, cnn_config(arch)['cut'])\n if cut is None:\n ll = list(enumerate(model.children()))\n cut = next(i for i,o in reversed(ll) if has_pool_type(o))\n if isinstance(cut, int): return nn.Sequential(*list(model.children())[:cut])\n elif isinstance(cut, Callable): return cut(model)\n else: raise NamedError(\"cut must be either integer or a function\")", "def _run_model(iterator, args, tf_args):\n \"\"\"mapPartitions function to run single-node inferencing from a checkpoint/saved_model, using the model's input/output mappings.\n\n Args:\n :iterator: input RDD partition iterator.\n :args: arguments for TFModel, in argparse format\n :tf_args: arguments for TensorFlow inferencing code, in argparse or ARGV format.\n\n Returns:\n An iterator of result data.\n \"\"\"\n single_node_env(tf_args)\n\n logging.info(\"===== input_mapping: {}\".format(args.input_mapping))\n logging.info(\"===== output_mapping: {}\".format(args.output_mapping))\n input_tensor_names = [tensor for col, tensor in sorted(args.input_mapping.items())]\n output_tensor_names = [tensor for tensor, col in sorted(args.output_mapping.items())]\n\n # if using a signature_def_key, get input/output tensor info from the requested signature\n if args.signature_def_key:\n assert args.export_dir, \"Inferencing with signature_def_key requires --export_dir argument\"\n logging.info(\"===== loading meta_graph_def for tag_set ({0}) from saved_model: {1}\".format(args.tag_set, args.export_dir))\n meta_graph_def = get_meta_graph_def(args.export_dir, args.tag_set)\n signature = meta_graph_def.signature_def[args.signature_def_key]\n logging.debug(\"signature: {}\".format(signature))\n inputs_tensor_info = signature.inputs\n logging.debug(\"inputs_tensor_info: {0}\".format(inputs_tensor_info))\n outputs_tensor_info = signature.outputs\n logging.debug(\"outputs_tensor_info: {0}\".format(outputs_tensor_info))\n\n result = []\n\n global global_sess, global_args\n if global_sess and global_args == args:\n # if graph/session already loaded/started (and using same args), just reuse it\n sess = global_sess\n else:\n # otherwise, create new session and load graph from disk\n tf.reset_default_graph()\n sess = tf.Session(graph=tf.get_default_graph())\n if args.export_dir:\n assert args.tag_set, \"Inferencing from a saved_model requires --tag_set\"\n # load graph from a saved_model\n logging.info(\"===== restoring from saved_model: {}\".format(args.export_dir))\n loader.load(sess, args.tag_set.split(','), args.export_dir)\n elif args.model_dir:\n # load graph from a checkpoint\n ckpt = tf.train.latest_checkpoint(args.model_dir)\n assert ckpt, \"Invalid model checkpoint path: {}\".format(args.model_dir)\n logging.info(\"===== restoring from checkpoint: {}\".format(ckpt + \".meta\"))\n saver = tf.train.import_meta_graph(ckpt + \".meta\", clear_devices=True)\n saver.restore(sess, ckpt)\n else:\n raise Exception(\"Inferencing requires either --model_dir or --export_dir argument\")\n global_sess = sess\n global_args = args\n\n # get list of input/output tensors (by name)\n if args.signature_def_key:\n input_tensors = [inputs_tensor_info[t].name for t in input_tensor_names]\n output_tensors = [outputs_tensor_info[output_tensor_names[0]].name]\n else:\n input_tensors = [t + ':0' for t in input_tensor_names]\n output_tensors = [t + ':0' for t in output_tensor_names]\n\n logging.info(\"input_tensors: {0}\".format(input_tensors))\n logging.info(\"output_tensors: {0}\".format(output_tensors))\n\n # feed data in batches and return output tensors\n for tensors in yield_batch(iterator, args.batch_size, len(input_tensor_names)):\n inputs_feed_dict = {}\n for i in range(len(input_tensors)):\n inputs_feed_dict[input_tensors[i]] = tensors[i]\n\n outputs = sess.run(output_tensors, feed_dict=inputs_feed_dict)\n lengths = [len(output) for output in outputs]\n input_size = len(tensors[0])\n assert all([length == input_size for length in lengths]), \"Output array sizes {} must match input size: {}\".format(lengths, input_size)\n python_outputs = [output.tolist() for output in outputs] # convert from numpy to standard python types\n result.extend(zip(*python_outputs)) # convert to an array of tuples of \"output columns\"\n\n return result", "def fit(self, data, debug=False):\n \"\"\"\n Fit each segment. Segments that have not already been explicitly\n added will be automatically added with default model and ytransform.\n\n Parameters\n ----------\n data : pandas.DataFrame\n Must have a column with the same name as `segmentation_col`.\n debug : bool\n If set to true will pass debug to the fit method of each model.\n\n Returns\n -------\n fits : dict of statsmodels.regression.linear_model.OLSResults\n Keys are the segment names.\n\n \"\"\"\n data = util.apply_filter_query(data, self.fit_filters)\n\n unique = data[self.segmentation_col].unique()\n value_counts = data[self.segmentation_col].value_counts()\n\n # Remove any existing segments that may no longer have counterparts\n # in the data. This can happen when loading a saved model and then\n # calling this method with data that no longer has segments that\n # were there the last time this was called.\n gone = set(self._group.models) - set(unique)\n for g in gone:\n del self._group.models[g]\n\n for x in unique:\n if x not in self._group.models and \\\n value_counts[x] > self.min_segment_size:\n self.add_segment(x)\n\n with log_start_finish(\n 'fitting models in segmented model {}'.format(self.name),\n logger):\n return self._group.fit(data, debug=debug)", "def addCuts(self, checkonly):\n \"\"\"add cuts if necessary and return whether model is feasible\"\"\"\n cutsadded = False\n edges = []\n x = self.model.data\n for (i, j) in x:\n if self.model.getVal(x[i, j]) > .5:\n if i != V[0] and j != V[0]:\n edges.append((i, j))\n G = networkx.Graph()\n G.add_edges_from(edges)\n Components = list(networkx.connected_components(G))\n for S in Components:\n S_card = len(S)\n q_sum = sum(q[i] for i in S)\n NS = int(math.ceil(float(q_sum) / Q))\n S_edges = [(i, j) for i in S for j in S if i < j and (i, j) in edges]\n if S_card >= 3 and (len(S_edges) >= S_card or NS > 1):\n cutsadded = True\n if checkonly:\n break\n else:\n self.model.addCons(quicksum(x[i, j] for i in S for j in S if j > i) <= S_card - NS)\n print(\"adding cut for\", S_edges)\n\n return cutsadded", "def graphcut_subprocesses(graphcut_function, graphcut_arguments, processes = None):\n \"\"\"\n Executes multiple graph cuts in parallel.\n This can result in a significant speed-up.\n \n Parameters\n ----------\n graphcut_function : function\n The graph cut to use (e.g. `graphcut_stawiaski`).\n graphcut_arguments : tuple\n List of arguments to pass to the respective subprocesses resp. the ``graphcut_function``.\n processes : integer or None\n The number of processes to run simultaneously, if not supplied, will be the same\n as the number of processors.\n \n Returns\n -------\n segmentations : tuple of ndarray\n The graph-cut segmentation results as list of boolean arraya.\n \"\"\"\n # initialize logger\n logger = Logger.getInstance()\n \n # check and eventually enhance input parameters\n if not processes: processes = multiprocessing.cpu_count()\n if not int == type(processes) or processes <= 0: raise ArgumentError('The number processes can not be zero or negative.')\n \n logger.debug('Executing graph cuts in {} subprocesses.'.format(multiprocessing.cpu_count()))\n \n # creates subprocess pool and execute\n pool = multiprocessing.Pool(processes)\n results = pool.map(graphcut_function, graphcut_arguments)\n \n return results", "def _pare_down_model(self, strain_gempro, genes_to_remove):\n \"\"\"Mark genes as non-functional in a GEM-PRO. If there is a COBRApy model associated with it, the\n COBRApy method delete_model_genes is utilized to delete genes.\n\n Args:\n strain_gempro (GEMPRO): GEMPRO object\n genes_to_remove (list): List of gene IDs to remove from the model\n\n \"\"\"\n # Filter out genes in genes_to_remove which do not show up in the model\n strain_genes = [x.id for x in strain_gempro.genes]\n genes_to_remove.extend(self.missing_in_orthology_matrix)\n genes_to_remove = list(set(genes_to_remove).intersection(set(strain_genes)))\n\n if len(genes_to_remove) == 0:\n log.info('{}: no genes marked non-functional'.format(strain_gempro.id))\n return\n else:\n log.debug('{}: {} genes to be marked non-functional'.format(strain_gempro.id, len(genes_to_remove)))\n\n # If a COBRApy model exists, utilize the delete_model_genes method\n if strain_gempro.model:\n strain_gempro.model._trimmed = False\n strain_gempro.model._trimmed_genes = []\n strain_gempro.model._trimmed_reactions = {}\n\n # Delete genes!\n cobra.manipulation.delete_model_genes(strain_gempro.model, genes_to_remove)\n\n if strain_gempro.model._trimmed:\n log.info('{}: marked {} genes as non-functional, '\n 'deactivating {} reactions'.format(strain_gempro.id, len(strain_gempro.model._trimmed_genes),\n len(strain_gempro.model._trimmed_reactions)))\n # Otherwise, just mark the genes as non-functional\n else:\n for g in genes_to_remove:\n strain_gempro.genes.get_by_id(g).functional = False\n log.info('{}: marked {} genes as non-functional'.format(strain_gempro.id, len(genes_to_remove)))", "def _post_run_hook(self, runtime):\n ''' generates a report showing nine slices, three per axis, of an\n arbitrary volume of `in_files`, with the resulting segmentation\n overlaid '''\n self._anat_file = self.inputs.in_files[0]\n outputs = self.aggregate_outputs(runtime=runtime)\n self._mask_file = outputs.tissue_class_map\n # We are skipping the CSF class because with combination with others\n # it only shows the skullstriping mask\n self._seg_files = outputs.tissue_class_files[1:]\n self._masked = False\n\n NIWORKFLOWS_LOG.info('Generating report for FAST (in_files %s, '\n 'segmentation %s, individual tissue classes %s).',\n self.inputs.in_files,\n outputs.tissue_class_map,\n outputs.tissue_class_files)\n\n return super(FASTRPT, self)._post_run_hook(runtime)" ]
[ 0.6835083961486816, 0.6589675545692444, 0.6549115777015686, 0.6541705131530762, 0.6504920125007629, 0.6443780660629272, 0.6398264765739441, 0.6389582753181458, 0.6380501985549927, 0.6336901187896729, 0.6321241855621338, 0.6307634711265564 ]
it works with seed labels: 0: nothing 1: object 1 - full seeds 2: object 2 - full seeds 3: object 1 - not a training seeds 4: object 2 - not a training seeds
def __set_hard_hard_constraints(self, tdata1, tdata2, seeds): """ it works with seed labels: 0: nothing 1: object 1 - full seeds 2: object 2 - full seeds 3: object 1 - not a training seeds 4: object 2 - not a training seeds """ seeds_mask = (seeds == 1) | (seeds == 3) tdata2[seeds_mask] = np.max(tdata2) + 1 tdata1[seeds_mask] = 0 seeds_mask = (seeds == 2) | (seeds == 4) tdata1[seeds_mask] = np.max(tdata1) + 1 tdata2[seeds_mask] = 0 return tdata1, tdata2
[ "def get_seed_sub(self, label):\r\n \"\"\" Return list of all seeds with specific label\r\n \"\"\"\r\n sx, sy, sz = np.nonzero(self.seeds == label)\r\n\r\n return sx, sy, sz", "def identify(label, column_type = :integer)\n if column_type == :uuid\n NamedSeeds.uuid_v5(label)\n else\n Zlib.crc32(label.to_s) % MAX_ID\n end\n end", "function to_sects(o){\n return order.map(function(label){\n return o[label] ? sect(label, o[label]) : null ;\n }).filter(function(a){ return !!a; });\n}", "def text2labels(text, sents):\n '''\n Marks all characters in given `text`, that doesn't exists within any\n element of `sents` with `1` character, other characters (within sentences)\n will be marked with `0`\n Used in training process\n >>> text = 'привет. меня зовут аня.'\n >>> sents = ['привет.', 'меня зовут аня.']\n >>> labels = text2labels(text, sents)\n >>> ' '.join(text)\n >>> 'п р и в е т . м е н я з о в у т а н я .'\n >>> ' '.join(labels)\n >>> '0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0'\n '''\n labels = [c for c in text]\n for sent in sents:\n start = text.index(sent)\n finish = start + len(sent)\n labels[start:finish] = '0' * len(sent)\n for i, c in enumerate(labels):\n if c != '0':\n labels[i] = '1'\n return labels", "def seed_zoom(seeds, zoom):\n \"\"\"\n Smart zoom for sparse matrix. If there is resize to bigger resolution\n thin line of label could be lost. This function prefers labels larger\n then zero. If there is only one small voxel in larger volume with zeros\n it is selected.\n \"\"\"\n # import scipy\n # loseeds=seeds\n labels = np.unique(seeds)\n # remove first label - 0\n labels = np.delete(labels, 0)\n # @TODO smart interpolation for seeds in one block\n # loseeds = scipy.ndimage.interpolation.zoom(\n # seeds, zoom, order=0)\n loshape = np.ceil(np.array(seeds.shape) * 1.0 / zoom).astype(np.int)\n loseeds = np.zeros(loshape, dtype=np.int8)\n loseeds = loseeds.astype(np.int8)\n for label in labels:\n a, b, c = np.where(seeds == label)\n loa = np.round(a // zoom)\n lob = np.round(b // zoom)\n loc = np.round(c // zoom)\n # loseeds = np.zeros(loshape)\n\n loseeds[loa, lob, loc] += label\n # this is to detect conflict seeds\n loseeds[loseeds > label] = 100\n\n # remove conflict seeds\n loseeds[loseeds > 99] = 0\n\n # import py3DSeedEditor\n # ped = py3DSeedEditor.py3DSeedEditor(loseeds)\n # ped.show()\n\n return loseeds", "def _fix_labels(self):\n \"\"\"For each system, make sure tag _0 is the brightest, and make sure\n system 0 contains the brightest star in the highest-resolution image\n \"\"\"\n for s in self.systems:\n mag0 = np.inf\n n0 = None\n for n in self.get_system(s):\n if isinstance(n.parent, DummyObsNode):\n continue\n mag, _ = n.parent.value\n if mag < mag0:\n mag0 = mag\n n0 = n\n\n # If brightest is not tag _0, then switch them.\n if n0 is not None and n0.tag != 0:\n n_other = self.get_leaf('{}_{}'.format(s,0))\n n_other.tag = n0.tag\n n0.tag = 0", "function(data, options) {\n if (typeof data.length !== 'undefined') {\n if (typeof data[0].length !== 'undefined') {\n this.grouped = true;\n\n // TODO: Find longest?\n this.group_size = data[0].length;\n var o = {}, k, i = 0;\n for (k in options.labels) {\n k = options.labels[k];\n o[k] = data[i];\n i++;\n }\n return o;\n } else {\n return { 'one': data };\n }\n } else {\n return data;\n }\n }", "def _replace_labels(doc):\n \"\"\"Really hacky find-and-replace method that modifies one of the sklearn\n docstrings to change the semantics of labels_ for the subclasses\"\"\"\n lines = doc.splitlines()\n labelstart, labelend = None, None\n foundattributes = False\n for i, line in enumerate(lines):\n stripped = line.strip()\n if stripped == 'Attributes':\n foundattributes = True\n if foundattributes and not labelstart and stripped.startswith('labels_'):\n labelstart = len('\\n'.join(lines[:i])) + 1\n if labelstart and not labelend and stripped == '':\n labelend = len('\\n'.join(lines[:i + 1]))\n\n if labelstart is None or labelend is None:\n return doc\n\n replace = '\\n'.join([\n ' labels_ : list of arrays, each of shape [sequence_length, ]',\n ' The label of each point is an integer in [0, n_clusters).',\n '',\n ])\n return doc[:labelstart] + replace + doc[labelend:]", "def guess_labels(self, doc):\n \"\"\"\n return a prediction of label names\n \"\"\"\n doc = doc.clone() # make sure it can be serialized safely\n return self.index.guess_labels(doc)", "def _load_image_labels(self):\n \"\"\"\n preprocess all ground-truths\n\n Returns:\n ----------\n labels packed in [num_images x max_num_objects x 5] tensor\n \"\"\"\n temp = []\n\n # load ground-truths\n for idx in self.image_set_index:\n label_file = self._label_path_from_index(idx)\n with open(label_file, 'r') as f:\n label = []\n for line in f.readlines():\n temp_label = line.strip().split()\n assert len(temp_label) == 5, \"Invalid label file\" + label_file\n cls_id = int(temp_label[0])\n x = float(temp_label[1])\n y = float(temp_label[2])\n half_width = float(temp_label[3]) / 2\n half_height = float(temp_label[4]) / 2\n xmin = x - half_width\n ymin = y - half_height\n xmax = x + half_width\n ymax = y + half_height\n label.append([cls_id, xmin, ymin, xmax, ymax])\n temp.append(np.array(label))\n return temp", "public static void step(long seed, Neurons[] neurons, DeepLearningModel.DeepLearningModelInfo minfo, boolean training, double[] responses) {\n try {\n for (int i=1; i<neurons.length-1; ++i) {\n neurons[i].fprop(seed, training);\n }\n if (minfo.get_params().autoencoder) {\n neurons[neurons.length - 1].fprop(seed, training);\n if (training) {\n for (int i=neurons.length-1; i>0; --i) {\n neurons[i].bprop();\n }\n }\n } else {\n if (minfo.get_params().classification) {\n ((Neurons.Softmax) neurons[neurons.length - 1]).fprop();\n if (training) {\n for (int i = 1; i < neurons.length - 1; i++)\n Arrays.fill(neurons[i]._e.raw(), 0);\n int target_label;\n if (Double.isNaN(responses[0])) { //missing response\n target_label = Neurons.missing_int_value;\n } else {\n assert ((double) (int) responses[0] == responses[0]); //classification -> integer labels expected\n target_label = (int) responses[0];\n }\n ((Neurons.Softmax) neurons[neurons.length - 1]).bprop(target_label);\n }\n } else {\n ((Neurons.Linear) neurons[neurons.length - 1]).fprop();\n if (training) {\n for (int i = 1; i < neurons.length - 1; i++)\n Arrays.fill(neurons[i]._e.raw(), 0);\n float target_value;\n if (Double.isNaN(responses[0])) { //missing response\n target_value = Neurons.missing_real_value;\n } else {\n target_value = (float) responses[0];\n }\n ((Neurons.Linear) neurons[neurons.length - 1]).bprop(target_value);\n }\n }\n if (training) {\n for (int i=neurons.length-2; i>0; --i)\n neurons[i].bprop();\n }\n }\n }\n catch(RuntimeException ex) {\n Log.warn(ex.getMessage());\n minfo.set_unstable();\n throw new Job.JobCancelledException(\"Canceling job due to numerical instability.\");\n }\n }", "def _load_image_labels(self):\n \"\"\"\n preprocess all ground-truths\n\n Returns:\n ----------\n labels packed in [num_images x max_num_objects x 5] tensor\n \"\"\"\n temp = []\n\n # load ground-truth from xml annotations\n for idx in self.image_set_index:\n label_file = self._label_path_from_index(idx)\n tree = ET.parse(label_file)\n root = tree.getroot()\n size = root.find('size')\n width = float(size.find('width').text)\n height = float(size.find('height').text)\n label = []\n\n for obj in root.iter('object'):\n difficult = int(obj.find('difficult').text)\n # if not self.config['use_difficult'] and difficult == 1:\n # continue\n cls_name = obj.find('name').text\n if cls_name not in self.classes:\n continue\n cls_id = self.classes.index(cls_name)\n xml_box = obj.find('bndbox')\n xmin = float(xml_box.find('xmin').text) / width\n ymin = float(xml_box.find('ymin').text) / height\n xmax = float(xml_box.find('xmax').text) / width\n ymax = float(xml_box.find('ymax').text) / height\n label.append([cls_id, xmin, ymin, xmax, ymax, difficult])\n temp.append(np.array(label))\n return temp" ]
[ 0.7497216463088989, 0.70253586769104, 0.6890901327133179, 0.6859575510025024, 0.6842992901802063, 0.6806407570838928, 0.6781275868415833, 0.6768274307250977, 0.674925684928894, 0.6744183897972107, 0.6728851795196533, 0.6726276278495789 ]
Compute edge values for graph cut tlinks based on image intensity and texture.
def __similarity_for_tlinks_obj_bgr( self, data, voxelsize, # voxels1, voxels2, # seeds, otherfeatures=None ): """ Compute edge values for graph cut tlinks based on image intensity and texture. """ # self.fit_model(data, voxelsize, seeds) # There is a need to have small vaues for good fit # R(obj) = -ln( Pr (Ip | O) ) # R(bck) = -ln( Pr (Ip | B) ) # Boykov2001b # ln is computed in likelihood tdata1 = (-(self.mdl.likelihood_from_image(data, voxelsize, 1))) * 10 tdata2 = (-(self.mdl.likelihood_from_image(data, voxelsize, 2))) * 10 # to spare some memory dtype = np.int16 if np.any(tdata1 > 32760): dtype = np.float32 if np.any(tdata2 > 32760): dtype = np.float32 if self.segparams["use_apriori_if_available"] and self.apriori is not None: logger.debug("using apriori information") gamma = self.segparams["apriori_gamma"] a1 = (-np.log(self.apriori * 0.998 + 0.001)) * 10 a2 = (-np.log(0.999 - (self.apriori * 0.998))) * 10 # logger.debug('max ' + str(np.max(tdata1)) + ' min ' + str(np.min(tdata1))) # logger.debug('max ' + str(np.max(tdata2)) + ' min ' + str(np.min(tdata2))) # logger.debug('max ' + str(np.max(a1)) + ' min ' + str(np.min(a1))) # logger.debug('max ' + str(np.max(a2)) + ' min ' + str(np.min(a2))) tdata1u = (((1 - gamma) * tdata1) + (gamma * a1)).astype(dtype) tdata2u = (((1 - gamma) * tdata2) + (gamma * a2)).astype(dtype) tdata1 = tdata1u tdata2 = tdata2u # logger.debug(' max ' + str(np.max(tdata1)) + ' min ' + str(np.min(tdata1))) # logger.debug(' max ' + str(np.max(tdata2)) + ' min ' + str(np.min(tdata2))) # logger.debug('gamma ' + str(gamma)) # import sed3 # ed = sed3.show_slices(tdata1) # ed = sed3.show_slices(tdata2) del tdata1u del tdata2u del a1 del a2 # if np.any(tdata1 < 0) or np.any(tdata2 <0): # logger.error("Problem with tlinks. Likelihood is < 0") # if self.debug_images: # self.__show_debug_tdata_images(tdata1, tdata2, suptitle="likelihood") return tdata1, tdata2
[ "def __ordered_values_by_indexes(self, data, inds):\n \"\"\"\n Return values (intensities) by indexes.\n\n Used for multiscale graph cut.\n data = [[0 1 1],\n [0 2 2],\n [0 2 2]]\n\n inds = [[0 1 2],\n [3 4 4],\n [5 4 4]]\n\n return: [0, 1, 1, 0, 2, 0]\n\n If the data are not consistent, it will take the maximal value\n\n \"\"\"\n # get unique labels and their first indexes\n # lab, linds = np.unique(inds, return_index=True)\n # compute values by indexes\n # values = data.reshape(-1)[linds]\n\n # alternative slow implementation\n # if there are different data on same index, it will take\n # maximal value\n # lab = np.unique(inds)\n # values = [0]*len(lab)\n # for label in lab:\n # values[label] = np.max(data[inds == label])\n #\n # values = np.asarray(values)\n\n # yet another implementation\n values = [None] * (np.max(inds) + 1)\n\n linear_inds = inds.ravel()\n linear_data = data.ravel()\n for i in range(0, len(linear_inds)):\n # going over all data pixels\n\n if values[linear_inds[i]] is None:\n # this index is found for first\n values[linear_inds[i]] = linear_data[i]\n elif values[linear_inds[i]] < linear_data[i]:\n # here can be changed maximal or minimal value\n values[linear_inds[i]] = linear_data[i]\n\n values = np.asarray(values)\n\n return values", "public void process( T image , GrayF32 intensity ) {\n\t\tint maxFeatures = (int)(maxFeaturesFraction*image.width*image.height);\n\t\tcandidatesLow.reset();\n\t\tcandidatesHigh.reset();\n\t\tthis.image = image;\n\n\t\tif( stride != image.stride ) {\n\t\t\tstride = image.stride;\n\t\t\toffsets = DiscretizedCircle.imageOffsets(radius, image.stride);\n\t\t}\n\t\thelper.setImage(image,offsets);\n\n\t\tfor (int y = radius; y < image.height-radius; y++) {\n\t\t\tint indexIntensity = intensity.startIndex + y*intensity.stride + radius;\n\t\t\tint index = image.startIndex + y*image.stride + radius;\n\t\t\tfor (int x = radius; x < image.width-radius; x++, index++,indexIntensity++) {\n\n\t\t\t\tint result = helper.checkPixel(index);\n\n\t\t\t\tif( result < 0 ) {\n\t\t\t\t\tintensity.data[indexIntensity] = helper.scoreLower(index);\n\t\t\t\t\tcandidatesLow.add(x,y);\n\t\t\t\t} else if( result > 0) {\n\t\t\t\t\tintensity.data[indexIntensity] = helper.scoreUpper(index);\n\t\t\t\t\tcandidatesHigh.add(x,y);\n\t\t\t\t} else {\n\t\t\t\t\tintensity.data[indexIntensity] = 0;\n\t\t\t\t}\n\t\t\t}\n\t\t\t// check on a per row basis to reduce impact on performance\n\t\t\tif( candidatesLow.size + candidatesHigh.size >= maxFeatures )\n\t\t\t\tbreak;\n\t\t}\n\t}", "def _compute_fluxes(self):\n \"\"\"\n Compute integrated flux inside ellipse, as well as inside a\n circle defined with the same semimajor axis.\n\n Pixels in a square section enclosing circle are scanned; the\n distance of each pixel to the isophote center is compared both\n with the semimajor axis length and with the length of the\n ellipse radius vector, and integrals are updated if the pixel\n distance is smaller.\n \"\"\"\n\n # Compute limits of square array that encloses circle.\n sma = self.sample.geometry.sma\n x0 = self.sample.geometry.x0\n y0 = self.sample.geometry.y0\n xsize = self.sample.image.shape[1]\n ysize = self.sample.image.shape[0]\n\n imin = max(0, int(x0 - sma - 0.5) - 1)\n jmin = max(0, int(y0 - sma - 0.5) - 1)\n imax = min(xsize, int(x0 + sma + 0.5) + 1)\n jmax = min(ysize, int(y0 + sma + 0.5) + 1)\n\n # Integrate\n if (jmax-jmin > 1) and (imax-imin) > 1:\n y, x = np.mgrid[jmin:jmax, imin:imax]\n radius, angle = self.sample.geometry.to_polar(x, y)\n radius_e = self.sample.geometry.radius(angle)\n\n midx = (radius <= sma)\n values = self.sample.image[y[midx], x[midx]]\n tflux_c = np.ma.sum(values)\n npix_c = np.ma.count(values)\n\n midx2 = (radius <= radius_e)\n values = self.sample.image[y[midx2], x[midx2]]\n tflux_e = np.ma.sum(values)\n npix_e = np.ma.count(values)\n else:\n tflux_e = 0.\n tflux_c = 0.\n npix_e = 0\n npix_c = 0\n\n return tflux_e, tflux_c, npix_e, npix_c", "public void process(GrayF32 intensity ) {\n\n\t\toriginalMin.reset();\n\t\toriginalMax.reset();\n\t\tnonmax.process(intensity,null,null,originalMin,originalMax);\n\n\t\tlocalExtreme.reset();\n\t\tfor (int i = 0; i < originalMin.size; i++) {\n\t\t\tPoint2D_I16 p = originalMin.get(i);\n\t\t\tfloat val = intensity.unsafe_get(p.x,p.y);\n\t\t\tlocalExtreme.grow().set(-val,false,p);\n\t\t}\n\t\tfor (int i = 0; i < originalMax.size; i++) {\n\t\t\tPoint2D_I16 p = originalMax.get(i);\n\t\t\tfloat val = intensity.unsafe_get(p.x, p.y);\n\t\t\tlocalExtreme.grow().set(val,true,p);\n\t\t}\n\n\t\tif( localExtreme.size > maxTotalFeatures ) {\n\t\t\tQuickSelect.select(localExtreme.data, maxTotalFeatures, localExtreme.size);\n\t\t\tlocalExtreme.size = maxTotalFeatures;\n\t\t}\n\t}", "private void calc() {\n int hMin = (int) ((this.cImage.getHeight()) / 4.0);\n int hMax = (int) ((this.cImage.getHeight()) * 3.0 / 4.0);\n init();\n\n for (int y = hMin; y < hMax; y++) {\n for (int x = 1; x < (this.cImage.getWidth() - 2); x++) {\n // only lower edges are considered\n if (ImageUtil.isBlack(this.cImage, x, y)) {\n if (!ImageUtil.isBlack(this.cImage, x, y + 1)) {\n calc(x, y);\n }\n }\n }\n }\n\n }", "def _calc_texture_gradient(img):\n \"\"\"\n calculate texture gradient for entire image\n\n The original SelectiveSearch algorithm proposed Gaussian derivative\n for 8 orientations, but we use LBP instead.\n\n output will be [height(*)][width(*)]\n \"\"\"\n ret = numpy.zeros((img.shape[0], img.shape[1], img.shape[2]))\n\n for colour_channel in (0, 1, 2):\n ret[:, :, colour_channel] = skimage.feature.local_binary_pattern(\n img[:, :, colour_channel], 8, 1.0)\n\n return ret", "def _calc_grad_tiled(self, img, t_grad, tile_size=512):\n '''Compute the value of tensor t_grad over the image in a tiled way.\n Random shifts are applied to the image to blur tile boundaries over \n multiple iterations.'''\n sz = tile_size\n h, w = img.shape[:2]\n sx, sy = np.random.randint(sz, size=2)\n img_shift = np.roll(np.roll(img, sx, 1), sy, 0)\n grad = np.zeros_like(img)\n for y in range(0, max(h-sz//2, sz),sz):\n for x in range(0, max(w-sz//2, sz),sz):\n sub = img_shift[y:y+sz,x:x+sz]\n g = self._session.run(t_grad, {self._t_input:sub})\n grad[y:y+sz,x:x+sz] = g\n return np.roll(np.roll(grad, -sx, 1), -sy, 0)", "function calcCutValue(t, g, child) {\n var childLab = t.node(child);\n var parent = childLab.parent;\n // True if the child is on the tail end of the edge in the directed graph\n var childIsTail = true;\n // The graph's view of the tree edge we're inspecting\n var graphEdge = g.edge(child, parent);\n // The accumulated cut value for the edge between this node and its parent\n var cutValue = 0;\n\n if (!graphEdge) {\n childIsTail = false;\n graphEdge = g.edge(parent, child);\n }\n\n cutValue = graphEdge.weight;\n\n _.forEach(g.nodeEdges(child), function(e) {\n var isOutEdge = e.v === child,\n other = isOutEdge ? e.w : e.v;\n\n if (other !== parent) {\n var pointsToHead = isOutEdge === childIsTail,\n otherWeight = g.edge(e).weight;\n\n cutValue += pointsToHead ? otherWeight : -otherWeight;\n if (isTreeEdge(t, child, other)) {\n var otherCutValue = t.edge(child, other).cutvalue;\n cutValue += pointsToHead ? -otherCutValue : otherCutValue;\n }\n }\n });\n\n return cutValue;\n}", "def _calc_texture_hist(img):\n \"\"\"\n calculate texture histogram for each region\n\n calculate the histogram of gradient for each colours\n the size of output histogram will be\n BINS * ORIENTATIONS * COLOUR_CHANNELS(3)\n \"\"\"\n BINS = 10\n\n hist = numpy.array([])\n\n for colour_channel in (0, 1, 2):\n\n # mask by the colour channel\n fd = img[:, colour_channel]\n\n # calculate histogram for each orientation and concatenate them all\n # and join to the result\n hist = numpy.concatenate(\n [hist] + [numpy.histogram(fd, BINS, (0.0, 1.0))[0]])\n\n # L1 Normalize\n hist = hist / len(img)\n\n return hist", "def compute_edge_colors(self):\n \"\"\"Compute the edge colors.\"\"\"\n data = [self.graph.edges[n][self.edge_color] for n in self.edges]\n data_reduced = sorted(list(set(data)))\n\n dtype = infer_data_type(data)\n n_grps = num_discrete_groups(data)\n if dtype == \"categorical\" or dtype == \"ordinal\":\n if n_grps <= 8:\n cmap = get_cmap(\n cmaps[\"Accent_{0}\".format(n_grps)].mpl_colormap\n )\n else:\n cmap = n_group_colorpallet(n_grps)\n elif dtype == \"continuous\" and not is_data_diverging(data):\n cmap = get_cmap(cmaps[\"weights\"])\n\n for d in data:\n idx = data_reduced.index(d) / n_grps\n self.edge_colors.append(cmap(idx))\n # Add colorbar if required.\n logging.debug(\"length of data_reduced: {0}\".format(len(data_reduced)))\n logging.debug(\"dtype: {0}\".format(dtype))\n if len(data_reduced) > 1 and dtype == \"continuous\":\n self.sm = plt.cm.ScalarMappable(\n cmap=cmap,\n norm=plt.Normalize(\n vmin=min(data_reduced),\n vmax=max(data_reduced), # noqa # noqa\n ),\n )\n self.sm._A = []", "def compute_values(edge_compatibility, v):\n \"\"\"Compute values. If edge compatibilities is just adjacency, we get ggnn.\n\n Args:\n edge_compatibility: A tensor of shape [batch, num_transforms, length, depth]\n v: A tensor of shape [batch, num_transforms, length, depth]\n\n Returns:\n output: A [batch, length, depth] tensor\n \"\"\"\n\n # Computes the incoming value vectors for each node by weighting them\n # according to the attention weights. These values are still segregated by\n # edge type.\n # Shape = [B, T, N, V].\n all_edge_values = tf.matmul(tf.to_float(edge_compatibility), v)\n\n # Combines the weighted value vectors together across edge types into a\n # single N x V matrix for each batch.\n output = tf.reduce_sum(all_edge_values, axis=1) # Shape [B, N, V].\n return output", "def __create_nlinks(self, data, inds=None, boundary_penalties_fcn=None):\n \"\"\"\n Compute nlinks grid from data shape information. For boundary penalties\n are data (intensities) values are used.\n\n ins: Default is None. Used for multiscale GC. This are indexes of\n multiscale pixels. Next example shows one superpixel witn index 2.\n inds = [\n [1 2 2],\n [3 2 2],\n [4 5 6]]\n\n boundary_penalties_fcn: is function with one argument - axis. It can\n it can be used for setting penalty weights between neighbooring\n pixels.\n\n \"\"\"\n # use the gerneral graph algorithm\n # first, we construct the grid graph\n start = time.time()\n if inds is None:\n inds = np.arange(data.size).reshape(data.shape)\n # if not self.segparams['use_boundary_penalties'] and \\\n # boundary_penalties_fcn is None :\n if boundary_penalties_fcn is None:\n # This is faster for some specific format\n edgx = np.c_[inds[:, :, :-1].ravel(), inds[:, :, 1:].ravel()]\n edgy = np.c_[inds[:, :-1, :].ravel(), inds[:, 1:, :].ravel()]\n edgz = np.c_[inds[:-1, :, :].ravel(), inds[1:, :, :].ravel()]\n\n else:\n logger.info(\"use_boundary_penalties\")\n\n bpw = self.segparams[\"boundary_penalties_weight\"]\n\n bpa = boundary_penalties_fcn(2)\n # id1=inds[:, :, :-1].ravel()\n edgx = np.c_[\n inds[:, :, :-1].ravel(),\n inds[:, :, 1:].ravel(),\n # cc * np.ones(id1.shape)\n bpw * bpa[:, :, 1:].ravel(),\n ]\n\n bpa = boundary_penalties_fcn(1)\n # id1 =inds[:, 1:, :].ravel()\n edgy = np.c_[\n inds[:, :-1, :].ravel(),\n inds[:, 1:, :].ravel(),\n # cc * np.ones(id1.shape)]\n bpw * bpa[:, 1:, :].ravel(),\n ]\n\n bpa = boundary_penalties_fcn(0)\n # id1 = inds[1:, :, :].ravel()\n edgz = np.c_[\n inds[:-1, :, :].ravel(),\n inds[1:, :, :].ravel(),\n # cc * np.ones(id1.shape)]\n bpw * bpa[1:, :, :].ravel(),\n ]\n\n # import pdb; pdb.set_trace()\n edges = np.vstack([edgx, edgy, edgz]).astype(np.int32)\n # edges - seznam indexu hran, kteres spolu sousedi\\\n elapsed = time.time() - start\n self.stats[\"_create_nlinks time\"] = elapsed\n logger.info(\"__create nlinks time \" + str(elapsed))\n return edges" ]
[ 0.6722548007965088, 0.6709698438644409, 0.6678282618522644, 0.6651525497436523, 0.6602543592453003, 0.6581569910049438, 0.6538912057876587, 0.6536920666694641, 0.6516335606575012, 0.6476464867591858, 0.6469390988349915, 0.6459125280380249 ]
Compute nlinks grid from data shape information. For boundary penalties are data (intensities) values are used. ins: Default is None. Used for multiscale GC. This are indexes of multiscale pixels. Next example shows one superpixel witn index 2. inds = [ [1 2 2], [3 2 2], [4 5 6]] boundary_penalties_fcn: is function with one argument - axis. It can it can be used for setting penalty weights between neighbooring pixels.
def __create_nlinks(self, data, inds=None, boundary_penalties_fcn=None): """ Compute nlinks grid from data shape information. For boundary penalties are data (intensities) values are used. ins: Default is None. Used for multiscale GC. This are indexes of multiscale pixels. Next example shows one superpixel witn index 2. inds = [ [1 2 2], [3 2 2], [4 5 6]] boundary_penalties_fcn: is function with one argument - axis. It can it can be used for setting penalty weights between neighbooring pixels. """ # use the gerneral graph algorithm # first, we construct the grid graph start = time.time() if inds is None: inds = np.arange(data.size).reshape(data.shape) # if not self.segparams['use_boundary_penalties'] and \ # boundary_penalties_fcn is None : if boundary_penalties_fcn is None: # This is faster for some specific format edgx = np.c_[inds[:, :, :-1].ravel(), inds[:, :, 1:].ravel()] edgy = np.c_[inds[:, :-1, :].ravel(), inds[:, 1:, :].ravel()] edgz = np.c_[inds[:-1, :, :].ravel(), inds[1:, :, :].ravel()] else: logger.info("use_boundary_penalties") bpw = self.segparams["boundary_penalties_weight"] bpa = boundary_penalties_fcn(2) # id1=inds[:, :, :-1].ravel() edgx = np.c_[ inds[:, :, :-1].ravel(), inds[:, :, 1:].ravel(), # cc * np.ones(id1.shape) bpw * bpa[:, :, 1:].ravel(), ] bpa = boundary_penalties_fcn(1) # id1 =inds[:, 1:, :].ravel() edgy = np.c_[ inds[:, :-1, :].ravel(), inds[:, 1:, :].ravel(), # cc * np.ones(id1.shape)] bpw * bpa[:, 1:, :].ravel(), ] bpa = boundary_penalties_fcn(0) # id1 = inds[1:, :, :].ravel() edgz = np.c_[ inds[:-1, :, :].ravel(), inds[1:, :, :].ravel(), # cc * np.ones(id1.shape)] bpw * bpa[1:, :, :].ravel(), ] # import pdb; pdb.set_trace() edges = np.vstack([edgx, edgy, edgz]).astype(np.int32) # edges - seznam indexu hran, kteres spolu sousedi\ elapsed = time.time() - start self.stats["_create_nlinks time"] = elapsed logger.info("__create nlinks time " + str(elapsed)) return edges
[ "def __ms_npenalty_fcn(self, axis, mask, orig_shape):\n \"\"\"\n :param axis: direction of edge\n :param mask: 3d ndarray with ones where is fine resolution\n\n Neighboorhood penalty between small pixels should be smaller then in\n bigger tiles. This is the way how to set it.\n\n \"\"\"\n maskz = zoom_to_shape(mask, orig_shape)\n\n maskz_new = np.zeros(orig_shape, dtype=np.int16)\n maskz_new[maskz == 0] = self._msgc_npenalty_table[0, axis]\n maskz_new[maskz == 1] = self._msgc_npenalty_table[1, axis]\n # import sed3\n # ed = sed3.sed3(maskz_new)\n # import ipdb; ipdb.set_trace() # noqa BREAKPOINT\n\n return maskz_new", "def grid_edges(shape, inds=None, return_directions=True):\n \"\"\"\n Get list of grid edges\n :param shape:\n :param inds:\n :param return_directions:\n :return:\n \"\"\"\n if inds is None:\n inds = np.arange(np.prod(shape)).reshape(shape)\n # if not self.segparams['use_boundary_penalties'] and \\\n # boundary_penalties_fcn is None :\n if len(shape) == 2:\n edgx = np.c_[inds[:, :-1].ravel(), inds[:, 1:].ravel()]\n edgy = np.c_[inds[:-1, :].ravel(), inds[1:, :].ravel()]\n\n edges = [edgx, edgy]\n\n directions = [\n np.ones([edgx.shape[0]], dtype=np.int8) * 0,\n np.ones([edgy.shape[0]], dtype=np.int8) * 1,\n ]\n\n elif len(shape) == 3:\n # This is faster for some specific format\n edgx = np.c_[inds[:, :, :-1].ravel(), inds[:, :, 1:].ravel()]\n edgy = np.c_[inds[:, :-1, :].ravel(), inds[:, 1:, :].ravel()]\n edgz = np.c_[inds[:-1, :, :].ravel(), inds[1:, :, :].ravel()]\n edges = [edgx, edgy, edgz]\n else:\n logger.error(\"Expected 2D or 3D data\")\n\n # for all edges along first direction put 0, for second direction put 1, for third direction put 3\n if return_directions:\n directions = []\n for idirection in range(len(shape)):\n directions.append(\n np.ones([edges[idirection].shape[0]], dtype=np.int8) * idirection\n )\n edges = np.concatenate(edges)\n if return_directions:\n edge_dir = np.concatenate(directions)\n return edges, edge_dir\n else:\n return edges", "def grid_linspace(bounds, count):\n \"\"\"\n Return a grid spaced inside a bounding box with edges spaced using np.linspace.\n\n Parameters\n ---------\n bounds: (2,dimension) list of [[min x, min y, etc], [max x, max y, etc]]\n count: int, or (dimension,) int, number of samples per side\n\n Returns\n -------\n grid: (n, dimension) float, points in the specified bounds\n \"\"\"\n bounds = np.asanyarray(bounds, dtype=np.float64)\n if len(bounds) != 2:\n raise ValueError('bounds must be (2, dimension!')\n\n count = np.asanyarray(count, dtype=np.int)\n if count.shape == ():\n count = np.tile(count, bounds.shape[1])\n\n grid_elements = [np.linspace(*b, num=c) for b, c in zip(bounds.T, count)]\n grid = np.vstack(np.meshgrid(*grid_elements)\n ).reshape(bounds.shape[1], -1).T\n return grid", "def define_points_grid(self):\n \"\"\"\n This is experimental code that could be used in the spatialDomainNoGrid\n section to build a grid of points on which to generate the solution.\n However, the current development plan (as of 27 Jan 2015) is to have the \n end user supply the list of points where they want a solution (and/or for \n it to be provided in a more automated way by GRASS GIS). But because this \n (untested) code may still be useful, it will remain as its own function \n here.\n It used to be in f2d.py.\n \"\"\"\n # Grid making step\n # In this case, an output at different (x,y), e.g., on a grid, is desired\n # First, see if there is a need for a grid, and then make it\n # latlon arrays must have a pre-set grid\n if self.latlon == False:\n # Warn that any existing grid will be overwritten\n try:\n self.dx\n if self.Quiet == False:\n print(\"dx and dy being overwritten -- supply a full grid\")\n except:\n try:\n self.dy\n if self.Quiet == False:\n print(\"dx and dy being overwritten -- supply a full grid\")\n except:\n pass\n # Boundaries\n n = np.max(self.y) + self.alpha\n s = np.min(self.y) - self.alpha\n w = np.min(self.x) + self.alpha\n e = np.max(self.x) - self.alpha\n # Grid spacing\n dxprelim = self.alpha/50. # x or y\n nx = np.ceil((e-w)/dxprelim)\n ny = np.ceil((n-s)/dxprelim)\n dx = (e-w) / nx\n dy = (n-s) / ny\n self.dx = self.dy = (dx+dy)/2. # Average of these to create a \n # square grid for more compatibility\n self.xw = np.linspace(w, e, nx)\n self.yw = np.linspace(s, n, ny)\n else:\n print(\"Lat/lon xw and yw must be pre-set: grid will not be square\")\n print(\"and may run into issues with poles, so to ensure the proper\")\n print(\"output points are chosen, the end user should do this.\")\n sys.exit()", "def nphase_border(im, include_diagonals=False):\n r'''\n Identifies the voxels in regions that border *N* other regions.\n\n Useful for finding triple-phase boundaries.\n\n Parameters\n ----------\n im : ND-array\n An ND image of the porous material containing discrete values in the\n pore space identifying different regions. e.g. the result of a\n snow-partition\n\n include_diagonals : boolean\n When identifying bordering pixels (2D) and voxels (3D) include those\n shifted along more than one axis\n\n Returns\n -------\n image : ND-array\n A copy of ``im`` with voxel values equal to the number of uniquely\n different bordering values\n '''\n if im.ndim != im.squeeze().ndim:\n warnings.warn('Input image conains a singleton axis:' + str(im.shape) +\n ' Reduce dimensionality with np.squeeze(im) to avoid' +\n ' unexpected behavior.')\n # Get dimension of image\n ndim = len(np.shape(im))\n if ndim not in [2, 3]:\n raise NotImplementedError(\"Function only works for 2d and 3d images\")\n # Pad image to handle edges\n im = np.pad(im, pad_width=1, mode='edge')\n # Stack rolled images for each neighbor to be inspected\n stack = _make_stack(im, include_diagonals)\n # Sort the stack along the last axis\n stack.sort()\n out = np.ones_like(im)\n # Run through stack recording when neighbor id changes\n # Number of changes is number of unique bordering regions\n for k in range(np.shape(stack)[ndim])[1:]:\n if ndim == 2:\n mask = stack[:, :, k] != stack[:, :, k-1]\n elif ndim == 3:\n mask = stack[:, :, :, k] != stack[:, :, :, k-1]\n out += mask\n # Un-pad\n if ndim == 2:\n return out[1:-1, 1:-1].copy()\n else:\n return out[1:-1, 1:-1, 1:-1].copy()", "def fill_borders(self, *args):\n \"\"\"Extrapolate tiepoint lons and lats to fill in the border of the\n chunks.\n \"\"\"\n\n to_run = []\n cases = {\"y\": self._fill_row_borders,\n \"x\": self._fill_col_borders}\n for dim in args:\n try:\n to_run.append(cases[dim])\n except KeyError:\n raise NameError(\"Unrecognized dimension: \" + str(dim))\n\n for fun in to_run:\n fun()", "def _mk_connectivity_pits(self, i12, flats, elev, mag, dX, dY):\n \"\"\"\n Helper function for _mk_adjacency_matrix. This is a more general\n version of _mk_adjacency_flats which drains pits and flats to nearby\n but non-adjacent pixels. The slope magnitude (and flats mask) is\n updated for these pits and flats so that the TWI can be computed.\n \"\"\"\n \n e = elev.data.ravel()\n\n pit_i = []\n pit_j = []\n pit_prop = []\n warn_pits = []\n \n pits = i12[flats & (elev > 0)]\n I = np.argsort(e[pits])\n for pit in pits[I]:\n # find drains\n pit_area = np.array([pit], 'int64')\n\n drain = None\n epit = e[pit]\n for it in range(self.drain_pits_max_iter):\n border = get_border_index(pit_area, elev.shape, elev.size)\n\n eborder = e[border]\n emin = eborder.min()\n if emin < epit:\n drain = border[eborder < epit]\n break\n\n pit_area = np.concatenate([pit_area, border[eborder == emin]])\n\n if drain is None:\n warn_pits.append(pit)\n continue\n \n ipit, jpit = np.unravel_index(pit, elev.shape)\n Idrain, Jdrain = np.unravel_index(drain, elev.shape)\n\n # filter by drain distance in coordinate space\n if self.drain_pits_max_dist:\n dij = np.sqrt((ipit - Idrain)**2 + (jpit-Jdrain)**2)\n b = dij <= self.drain_pits_max_dist\n if not b.any():\n warn_pits.append(pit)\n continue\n drain = drain[b]\n Idrain = Idrain[b]\n Jdrain = Jdrain[b]\n \n # calculate real distances\n dx = [_get_dX_mean(dX, ipit, idrain) * (jpit - jdrain)\n for idrain, jdrain in zip(Idrain, Jdrain)]\n dy = [dY[make_slice(ipit, idrain)].sum() for idrain in Idrain]\n dxy = np.sqrt(np.array(dx)**2 + np.array(dy)**2)\n\n # filter by drain distance in real space\n if self.drain_pits_max_dist_XY:\n b = dxy <= self.drain_pits_max_dist_XY\n if not b.any():\n warn_pits.append(pit)\n continue\n drain = drain[b]\n dxy = dxy[b]\n \n # calculate magnitudes\n s = (e[pit]-e[drain]) / dxy\n\n # connectivity info\n # TODO proportion calculation (_mk_connectivity_flats used elev?)\n pit_i += [pit for i in drain]\n pit_j += drain.tolist()\n pit_prop += s.tolist()\n \n # update pit magnitude and flats mask\n mag[ipit, jpit] = np.mean(s)\n flats[ipit, jpit] = False\n\n if warn_pits:\n warnings.warn(\"Warning %d pits had no place to drain to in this \"\n \"chunk\" % len(warn_pits))\n \n # Note: returning flats and mag here is not strictly necessary\n return (np.array(pit_i, 'int64'),\n np.array(pit_j, 'int64'),\n np.array(pit_prop, 'float64'),\n flats,\n mag)", "def multigrid(bounds, points_count):\n \"\"\"\n Generates a multidimensional lattice\n :param bounds: box constraints\n :param points_count: number of points per dimension.\n \"\"\"\n if len(bounds)==1:\n return np.linspace(bounds[0][0], bounds[0][1], points_count).reshape(points_count, 1)\n x_grid_rows = np.meshgrid(*[np.linspace(b[0], b[1], points_count) for b in bounds])\n x_grid_columns = np.vstack([x.flatten(order='F') for x in x_grid_rows]).T\n return x_grid_columns", "def calc_indices(self, shape):\n \"\"\"calculates and stores the set of indices\n ix=[0, nx-1], iy=[0, ny-1] for data of shape (nx, ny)\"\"\"\n if len(shape) == 2:\n ny, nx = shape\n elif len(shape) == 3:\n ny, nx, nchan = shape\n\n inds = []\n for iy in range(ny):\n inds.extend([(ix, iy) for ix in range(nx)])\n self.conf.indices = np.array(inds)", "def _mk_connectivity_flats(self, i12, j1, j2, mat_data, flats, elev, mag):\n \"\"\"\n Helper function for _mk_adjacency_matrix. This calcualtes the\n connectivity for flat regions. Every pixel in the flat will drain\n to a random pixel in the flat. This accumulates all the area in the\n flat region to a single pixel. All that area is then drained from\n that pixel to the surroundings on the flat. If the border of the\n flat has a single pixel with a much lower elevation, all the area will\n go towards that pixel. If the border has pixels with similar elevation,\n then the area will be distributed amongst all the border pixels\n proportional to their elevation.\n \"\"\"\n nn, mm = flats.shape\n NN = np.prod(flats.shape)\n # Label the flats\n assigned, n_flats = spndi.label(flats, FLATS_KERNEL3)\n\n flat_ids, flat_coords, flat_labelsf = _get_flat_ids(assigned)\n flat_j = [None] * n_flats\n flat_prop = [None] * n_flats\n flat_i = [None] * n_flats\n\n # Temporary array to find the flats\n edges = np.zeros_like(flats)\n # %% Calcute the flat drainage\n warn_flats = []\n for ii in xrange(n_flats):\n ids_flats = flat_ids[flat_coords[ii]:flat_coords[ii+1]]\n edges[:] = 0\n j = ids_flats % mm\n i = ids_flats // mm\n for iii in [-1, 0, 1]:\n for jjj in [-1, 0, 1]:\n i_2 = i + iii\n j_2 = j + jjj\n ids_tmp = (i_2 >= 0) & (j_2 >= 0) & (i_2 < nn) & (j_2 < mm)\n edges[i_2[ids_tmp], j_2[ids_tmp]] += \\\n FLATS_KERNEL3[iii+1, jjj+1]\n edges.ravel()[ids_flats] = 0\n ids_edge = np.argwhere(edges.ravel()).squeeze()\n\n flat_elev_loc = elev.ravel()[ids_flats]\n # It is possble for the edges to merge 2 flats, so we need to\n # take the lower elevation to avoid large circular regions\n flat_elev = flat_elev_loc.min()\n\n loc_elev = elev.ravel()[ids_edge]\n # Filter out any elevations larger than the flat elevation\n # TODO: Figure out if this should be <= or <\n I_filt = loc_elev < flat_elev\n try:\n loc_elev = loc_elev[I_filt]\n loc_slope = mag.ravel()[ids_edge][I_filt]\n except: # If this is fully masked out (i.e. inside a no-data area)\n loc_elev = np.array([])\n loc_slope = np.array([])\n\n loc_dx = self.dX.mean()\n\n # Now I have to figure out if I should just use the minimum or\n # distribute amongst many pixels on the flat boundary\n n = len(loc_slope)\n if n == 0: # Flat does not have anywhere to drain\n # Let's see if the flat goes to the edge. If yes, we'll just\n # distribute the area along the edge.\n ids_flat_on_edge = ((ids_flats % mag.shape[1]) == 0) | \\\n ((ids_flats % mag.shape[1]) == (mag.shape[1] - 1)) | \\\n (ids_flats <= mag.shape[1]) | \\\n (ids_flats >= (mag.shape[1] * (mag.shape[0] - 1)))\n if ids_flat_on_edge.sum() == 0:\n warn_flats.append(ii)\n continue\n\n drain_ids = ids_flats[ids_flat_on_edge]\n loc_proportions = mag.ravel()[ids_flats[ids_flat_on_edge]]\n loc_proportions /= loc_proportions.sum()\n\n ids_flats = ids_flats[~ids_flat_on_edge]\n # This flat is entirely on the edge of the image\n if len(ids_flats) == 0:\n # therefore, whatever drains into it is done.\n continue\n flat_elev_loc = flat_elev_loc[~ids_flat_on_edge]\n else: # Flat has a place to drain to\n min_edges = np.zeros(loc_slope.shape, bool)\n min_edges[np.argmin(loc_slope)] = True\n # Add to the min edges any edge that is within an error\n # tolerance as small as the minimum\n min_edges = (loc_slope + loc_slope * loc_dx / 2) \\\n >= loc_slope[min_edges]\n\n drain_ids = ids_edge[I_filt][min_edges]\n\n loc_proportions = loc_slope[min_edges]\n loc_proportions /= loc_proportions.sum()\n\n # Now distribute the connectivity amongst the chosen elevations\n # proportional to their slopes\n\n # First, let all the the ids in the flats drain to 1\n # flat id (for ease)\n one_id = np.zeros(ids_flats.size, bool)\n one_id[np.argmin(flat_elev_loc)] = True\n\n j1.ravel()[ids_flats[~one_id]] = ids_flats[one_id]\n mat_data.ravel()[ids_flats[~one_id]] = 1\n # Negative indices will be eliminated before making the matix\n j2.ravel()[ids_flats[~one_id]] = -1\n mat_data.ravel()[ids_flats[~one_id] + NN] = 0\n\n # Now drain the 1 flat to the drains\n j1.ravel()[ids_flats[one_id]] = drain_ids[0]\n mat_data.ravel()[ids_flats[one_id]] = loc_proportions[0]\n if len(drain_ids) > 1:\n j2.ravel()[ids_flats[one_id]] = drain_ids[1]\n mat_data.ravel()[ids_flats[one_id] + NN] = loc_proportions[1]\n\n if len(loc_proportions > 2):\n flat_j[ii] = drain_ids[2:]\n flat_prop[ii] = loc_proportions[2:]\n flat_i[ii] = np.ones(drain_ids[2:].size, 'int64') * ids_flats[one_id]\n try:\n flat_j = np.concatenate([fj for fj in flat_j if fj is not None])\n flat_prop = \\\n np.concatenate([fp for fp in flat_prop if fp is not None])\n flat_i = np.concatenate([fi for fi in flat_i if fi is not None])\n except:\n flat_j = np.array([], 'int64')\n flat_prop = np.array([], 'float64')\n flat_i = np.array([], 'int64')\n\n if len(warn_flats) > 0:\n warnings.warn(\"Warning %d flats had no place\" % len(warn_flats) +\n \" to drain to --> these are pits (check pit-remove\"\n \"algorithm).\")\n return j1, j2, mat_data, flat_i, flat_j, flat_prop", "def _bresenham(self, faces, dx):\n r'''\n A Bresenham line function to generate points to fill in for the fibers\n '''\n line_points = []\n for face in faces:\n # Get in hull order\n fx = face[:, 0]\n fy = face[:, 1]\n fz = face[:, 2]\n # Find the axis with the smallest spread and remove it to make 2D\n if (np.std(fx) < np.std(fy)) and (np.std(fx) < np.std(fz)):\n f2d = np.vstack((fy, fz)).T\n elif (np.std(fy) < np.std(fx)) and (np.std(fy) < np.std(fz)):\n f2d = np.vstack((fx, fz)).T\n else:\n f2d = np.vstack((fx, fy)).T\n hull = sptl.ConvexHull(f2d, qhull_options='QJ Pp')\n face = np.around(face[hull.vertices].astype(float), 6)\n for i in range(len(face)):\n vec = face[i]-face[i-1]\n vec_length = np.linalg.norm(vec)\n increments = np.ceil(vec_length/dx)\n check_p_old = np.array([-1, -1, -1])\n for x in np.linspace(0, 1, increments):\n check_p_new = face[i-1]+(vec*x)\n if np.sum(check_p_new - check_p_old) != 0:\n line_points.append(check_p_new)\n check_p_old = check_p_new\n return np.asarray(line_points)", "def GridSample(inputs, borderMode='repeat'):\n \"\"\"\n Sample the images using the given coordinates, by bilinear interpolation.\n This was described in the paper:\n `Spatial Transformer Networks <http://arxiv.org/abs/1506.02025>`_.\n\n This is equivalent to `torch.nn.functional.grid_sample`,\n up to some non-trivial coordinate transformation.\n\n This implementation returns pixel value at pixel (1, 1) for a floating point coordinate (1.0, 1.0).\n Note that this may not be what you need.\n\n Args:\n inputs (list): [images, coords]. images has shape NHWC.\n coords has shape (N, H', W', 2), where each pair of the last dimension is a (y, x) real-value\n coordinate.\n borderMode: either \"repeat\" or \"constant\" (zero-filled)\n\n Returns:\n tf.Tensor: a tensor named ``output`` of shape (N, H', W', C).\n \"\"\"\n image, mapping = inputs\n assert image.get_shape().ndims == 4 and mapping.get_shape().ndims == 4\n input_shape = image.get_shape().as_list()[1:]\n assert None not in input_shape, \\\n \"Images in GridSample layer must have fully-defined shape\"\n assert borderMode in ['repeat', 'constant']\n\n orig_mapping = mapping\n mapping = tf.maximum(mapping, 0.0)\n lcoor = tf.floor(mapping)\n ucoor = lcoor + 1\n\n diff = mapping - lcoor\n neg_diff = 1.0 - diff # bxh2xw2x2\n\n lcoory, lcoorx = tf.split(lcoor, 2, 3)\n ucoory, ucoorx = tf.split(ucoor, 2, 3)\n\n lyux = tf.concat([lcoory, ucoorx], 3)\n uylx = tf.concat([ucoory, lcoorx], 3)\n\n diffy, diffx = tf.split(diff, 2, 3)\n neg_diffy, neg_diffx = tf.split(neg_diff, 2, 3)\n\n ret = tf.add_n([sample(image, lcoor) * neg_diffx * neg_diffy,\n sample(image, ucoor) * diffx * diffy,\n sample(image, lyux) * neg_diffy * diffx,\n sample(image, uylx) * diffy * neg_diffx], name='sampled')\n if borderMode == 'constant':\n max_coor = tf.constant([input_shape[0] - 1, input_shape[1] - 1], dtype=tf.float32)\n mask = tf.greater_equal(orig_mapping, 0.0)\n mask2 = tf.less_equal(orig_mapping, max_coor)\n mask = tf.logical_and(mask, mask2) # bxh2xw2x2\n mask = tf.reduce_all(mask, [3]) # bxh2xw2 boolean\n mask = tf.expand_dims(mask, 3)\n ret = ret * tf.cast(mask, tf.float32)\n return tf.identity(ret, name='output')" ]
[ 0.7182618975639343, 0.7100909948348999, 0.699677050113678, 0.6798095107078552, 0.6643567681312561, 0.6640481948852539, 0.6634003520011902, 0.6623707413673401, 0.6592199802398682, 0.6557827591896057, 0.6554999947547913, 0.6552016139030457 ]
Use actual model to calculate similarity. If no input is given the last image is used. :param data3d: :param voxelsize: :param seeds: :param area_weight: :param hard_constraints: :param return_unariesalt: :return:
def debug_get_reconstructed_similarity( self, data3d=None, voxelsize=None, seeds=None, area_weight=1, hard_constraints=True, return_unariesalt=False, ): """ Use actual model to calculate similarity. If no input is given the last image is used. :param data3d: :param voxelsize: :param seeds: :param area_weight: :param hard_constraints: :param return_unariesalt: :return: """ if data3d is None: data3d = self.img if voxelsize is None: voxelsize = self.voxelsize if seeds is None: seeds = self.seeds unariesalt = self.__create_tlinks( data3d, voxelsize, # voxels1, voxels2, seeds, area_weight, hard_constraints, ) if return_unariesalt: return unariesalt else: return self._reshape_unariesalt_to_similarity(unariesalt, data3d.shape)
[ "def fit_from_image(self, data, voxelsize, seeds, unique_cls):\n \"\"\"\n This Method allows computes feature vector and train model.\n\n :cls: list of index number of requested classes in seeds\n \"\"\"\n fvs, clsselected = self.features_from_image(data, voxelsize, seeds, unique_cls)\n self.fit(fvs, clsselected)", "def generative_model(A, D, m, eta, gamma=None, model_type='matching', \n model_var='powerlaw', epsilon=1e-6, copy=True, seed=None):\n '''\n Generates synthetic networks using the models described in\n Betzel et al. (2016) Neuroimage. See this paper for more details.\n\n Succinctly, the probability of forming a connection between nodes u and v is\n P(u,v) = E(u,v)**eta * K(u,v)**gamma\n where eta and gamma are hyperparameters, E(u,v) is the euclidean or similar\n distance measure, and K(u,v) is the algorithm that defines the model.\n\n This describes the power law formulation, an alternative formulation uses\n the exponential function\n P(u,v) = exp(E(u,v)*eta) * exp(K(u,v)*gamma)\n\n Parameters\n ----------\n A : np.ndarray\n Binary network of seed connections\n D : np.ndarray\n Matrix of euclidean distances or other distances between nodes\n m : int\n Number of connections that should be present in the final synthetic \n network\n eta : np.ndarray\n A vector describing a range of values to estimate for eta, the \n hyperparameter describing exponential weighting of the euclidean\n distance.\n gamma : np.ndarray\n A vector describing a range of values to estimate for theta, the\n hyperparameter describing exponential weighting of the basis\n algorithm. If model_type='euclidean' or another distance metric,\n this can be None.\n model_type : Enum(str)\n euclidean : Uses only euclidean distances to generate connection \n probabilities\n neighbors : count of common neighbors\n matching : matching index, the normalized overlap in neighborhoods\n clu-avg : Average clustering coefficient\n clu-min : Minimum clustering coefficient\n clu-max : Maximum clustering coefficient\n clu-diff : Difference in clustering coefficient\n clu-prod : Product of clustering coefficient\n deg-avg : Average degree\n deg-min : Minimum degree\n deg-max : Maximum degree\n deg-diff : Difference in degree\n deg-prod : Product of degrees\n model_var : Enum(str)\n Default value is powerlaw. If so, uses formulation of P(u,v) as\n described above. Alternate value is exponential. If so, uses\n P(u,v) = exp(E(u,v)*eta) * exp(K(u,v)*gamma)\n epsilon : float\n A small positive value added to all P(u,v). The default value is 1e-6\n copy : bool\n Some algorithms add edges directly to the input matrix. Set this flag\n to make a copy of the input matrix instead. Defaults to True.\n seed : hashable, optional\n If None (default), use the np.random's global random state to generate random numbers.\n Otherwise, use a new np.random.RandomState instance seeded with the given value.\n '''\n rng = get_rng(seed)\n if copy:\n A = A.copy()\n\n n = len(D)\n \n #These parameters don't do any of the voronoi narrowing.\n #Its a list of eta values paired with gamma values.\n #To try 3 eta and 3 gamma pairs, should use 9 list values.\n if len(eta) != len(gamma):\n raise BCTParamError('Eta and gamma hyperparameters must be lists of '\n 'the same size')\n\n nparams = len(eta)\n\n B = np.zeros((n, n, nparams))\n\n def k_avg(K):\n return ((np.tile(K, (n, 1)) + np.transpose(np.tile(K, (n, 1))))/2 +\n epsilon)\n\n def k_diff(K):\n return np.abs(np.tile(K, (n, 1)) - \n np.transpose(np.tile(K, (n, 1)))) + epsilon\n\n def k_max(K):\n return np.max(np.dstack((np.tile(K, (n, 1)),\n np.transpose(np.tile(K, (n, 1))))),\n axis=2) + epsilon\n\n def k_min(K):\n return np.min(np.dstack((np.tile(K, (n, 1)),\n np.transpose(np.tile(K, (n, 1))))),\n axis=2) + epsilon\n\n def k_prod(K):\n return np.outer(K, np.transpose(K)) + epsilon\n\n def s_avg(K, sc):\n return (K+sc) / 2 + epsilon\n\n def s_diff(K, sc):\n return np.abs(K-sc) + epsilon\n\n def s_min(K, sc):\n return np.where(K < sc, K + epsilon, sc + epsilon)\n \n def s_max(K, sc):\n #return np.max((K, sc.T), axis=0)\n return np.where(K > sc, K + epsilon, sc + epsilon)\n\n def s_prod(K, sc):\n return K * sc + epsilon\n\n def x_avg(K, ixes):\n nr_ixes = np.size(np.where(ixes))\n Ksc = np.tile(K, (nr_ixes, 1))\n Kix = np.transpose(np.tile(K[ixes], (n, 1)))\n return s_avg(Ksc, Kix)\n\n def x_diff(K, ixes):\n nr_ixes = np.size(np.where(ixes))\n Ksc = np.tile(K, (nr_ixes, 1))\n Kix = np.transpose(np.tile(K[ixes], (n, 1)))\n return s_diff(Ksc, Kix)\n\n def x_max(K, ixes):\n nr_ixes = np.size(np.where(ixes))\n Ksc = np.tile(K, (nr_ixes, 1))\n Kix = np.transpose(np.tile(K[ixes], (n, 1)))\n return s_max(Ksc, Kix)\n\n def x_min(K, ixes):\n nr_ixes = np.size(np.where(ixes))\n Ksc = np.tile(K, (nr_ixes, 1))\n Kix = np.transpose(np.tile(K[ixes], (n, 1)))\n return s_min(Ksc, Kix)\n\n def x_prod(K, ixes):\n nr_ixes = np.size(np.where(ixes))\n Ka = np.reshape(K[ixes], (nr_ixes, 1))\n Kb = np.reshape(np.transpose(K), (1, n))\n return np.outer(Ka, Kb) + epsilon\n\n\n def clu_gen(A, K, D, m, eta, gamma, model_var, x_fun):\n mseed = np.size(np.where(A.flat))//2\n\n A = A>0\n\n if type(model_var) == tuple:\n mv1, mv2 = model_var\n else:\n mv1, mv2 = model_var, model_var\n\n if mv1 in ('powerlaw', 'power_law'):\n Fd = D**eta\n elif mv1 in ('exponential',):\n Fd = np.exp(eta*D) \n\n if mv2 in ('powerlaw', 'power_law'):\n Fk = K**gamma\n elif mv2 in ('exponential',):\n Fk = np.exp(gamma*K) \n\n c = clustering_coef_bu(A)\n k = np.sum(A, axis=1)\n\n Ff = Fd * Fk * np.logical_not(A)\n u,v = np.where(np.triu(np.ones((n,n)), 1))\n\n #print(mseed, m)\n for i in range(mseed+1, m):\n C = np.append(0, np.cumsum(Ff[u,v]))\n r = np.sum(rng.random_sample()*C[-1] >= C)\n uu = u[r]\n vv = v[r]\n A[uu,vv] = A[vv,uu] = 1\n k[uu] += 1\n k[vv] += 1\n\n bu = A[uu,:].astype(bool)\n bv = A[vv,:].astype(bool)\n su = A[np.ix_(bu, bu)]\n sv = A[np.ix_(bu, bu)]\n\n bth = np.logical_and(bu, bv)\n c[bth] += 2/(k[bth]**2 - k[bth])\n c[uu] = np.size(np.where(su.flat))/(k[uu]*(k[uu]-1))\n c[vv] = np.size(np.where(sv.flat))/(k[vv]*(k[vv]-1))\n c[k<=1] = 0\n bth[uu] = 1\n bth[vv] = 1\n \n k_result = x_fun(c, bth)\n\n #print(np.shape(k_result))\n #print(np.shape(K))\n #print(K)\n #print(np.shape(K[bth,:]))\n\n K[bth,:] = k_result\n K[:,bth] = k_result.T\n\n if mv2 in ('powerlaw', 'power_law'):\n Ff[bth,:] = Fd[bth,:] * K[bth,:]**gamma\n Ff[:,bth] = Fd[:,bth] * K[:,bth]**gamma\n elif mv2 in ('exponential',):\n Ff[bth,:] = Fd[bth,:] * np.exp(K[bth,:])*gamma\n Ff[:,bth] = Fd[:,bth] * np.exp(K[:,bth])*gamma\n\n Ff = Ff * np.logical_not(A)\n\n return A\n\n def deg_gen(A, K, D, m, eta, gamma, model_var, s_fun):\n mseed = np.size(np.where(A.flat))//2\n\n k = np.sum(A, axis=1)\n\n if type(model_var) == tuple:\n mv1, mv2 = model_var\n else:\n mv1, mv2 = model_var, model_var\n\n if mv1 in ('powerlaw', 'power_law'):\n Fd = D**eta\n elif mv1 in ('exponential',):\n Fd = np.exp(eta*D) \n\n if mv2 in ('powerlaw', 'power_law'):\n Fk = K**gamma\n elif mv2 in ('exponential',):\n Fk = np.exp(gamma*K) \n\n P = Fd * Fk * np.logical_not(A)\n u,v = np.where(np.triu(np.ones((n,n)), 1))\n\n b = np.zeros((m,), dtype=int)\n\n# print(mseed)\n# print(np.shape(u),np.shape(v))\n# print(np.shape(b))\n# print(np.shape(A[u,v]))\n# print(np.shape(np.where(A[u,v])), 'sqishy')\n# print(np.shape(P), 'squnnaq')\n\n #b[:mseed] = np.where(A[np.ix_(u,v)]) \n b[:mseed] = np.squeeze(np.where(A[u,v]))\n #print(mseed, m)\n for i in range(mseed, m):\n C = np.append(0, np.cumsum(P[u,v]))\n r = np.sum(rng.random_sample()*C[-1] >= C)\n uu = u[r]\n vv = v[r]\n k[uu] += 1\n k[vv] += 1\n\n if mv2 in ('powerlaw', 'power_law'):\n Fk[:,uu] = Fk[uu,:] = s_fun(k, k[uu]) ** gamma\n Fk[:,vv] = Fk[vv,:] = s_fun(k, k[vv]) ** gamma\n elif mv2 in ('exponential',):\n Fk[:,uu] = Fk[uu,:] = np.exp(s_fun(k, k[uu]) * gamma)\n Fk[:,vv] = Fk[vv,:] = np.exp(s_fun(k, k[vv]) * gamma)\n\n P = Fd * Fk\n\n b[i] = r\n\n P[u[b[:i]], v[b[:i]]] = P[v[b[:i]], u[b[:i]]] = 0\n\n A[u[r], v[r]] = A[v[r], u[r]] = 1\n #P[b[u[:i]], b[v[:i]]] = P[b[v[:i]], b[u[:i]]] = 0\n\n #A[uu,vv] = A[vv,uu] = 1\n\n\n# indx = v*n + u\n# indx[b]\n#\n# nH = np.zeros((n,n))\n# nH.ravel()[indx[b]]=1\n#\n# nG = np.zeros((n,n))\n# nG[ u[b], v[b] ]=1\n# nG = nG + nG.T\n#\n# print(np.shape(np.where(A != nG)))\n#\n# import pdb\n# pdb.set_trace()\n\n return A\n\n def matching_gen(A, K, D, m, eta, gamma, model_var):\n K += epsilon\n\n mseed = np.size(np.where(A.flat))//2\n\n if type(model_var) == tuple:\n mv1, mv2 = model_var\n else:\n mv1, mv2 = model_var, model_var\n\n if mv1 in ('powerlaw', 'power_law'):\n Fd = D**eta\n elif mv1 in ('exponential',):\n Fd = np.exp(eta*D) \n\n if mv2 in ('powerlaw', 'power_law'):\n Fk = K**gamma\n elif mv2 in ('exponential',):\n Fk = np.exp(gamma*K) \n\n Ff = Fd * Fk * np.logical_not(A)\n u,v = np.where(np.triu(np.ones((n,n)), 1))\n \n for ii in range(mseed, m):\n C = np.append(0, np.cumsum(Ff[u,v]))\n r = np.sum(rng.random_sample()*C[-1] >= C)\n uu = u[r]\n vv = v[r]\n A[uu,vv] = A[vv,uu] = 1\n\n updateuu, = np.where(np.inner(A, A[:,uu]))\n np.delete(updateuu, np.where(updateuu == uu))\n np.delete(updateuu, np.where(updateuu == vv))\n\n c1 = np.append(A[:,uu], A[uu,:])\n for i in range(len(updateuu)):\n j = updateuu[i]\n c2 = np.append(A[:,j], A[j,:])\n \n use = np.logical_or(c1, c2)\n use[uu] = use[uu+n] = use[j] = use[j+n] = 0\n ncon = np.sum(c1[use]) + np.sum(c2[use])\n if ncon == 0:\n K[uu, j] = K[j, uu] = epsilon\n else:\n K[uu, j] = K[j, uu] = (2 / ncon *\n np.sum(np.logical_and(c1[use], c2[use])) + epsilon)\n\n updatevv, = np.where(np.inner(A, A[:,vv]))\n np.delete(updatevv, np.where(updatevv == uu))\n np.delete(updatevv, np.where(updatevv == vv))\n \n c1 = np.append(A[:,vv], A[vv,:])\n for i in range(len(updatevv)):\n j = updatevv[i]\n c2 = np.append(A[:,j], A[j,:])\n \n use = np.logical_or(c1, c2)\n use[vv] = use[vv+n] = use[j] = use[j+n] = 0\n ncon = np.sum(c1[use]) + np.sum(c2[use])\n if ncon == 0:\n K[vv, j] = K[j, vv] = epsilon\n else:\n K[vv, j] = K[j, vv] = (2 / ncon *\n np.sum(np.logical_and(c1[use], c2[use])) + epsilon)\n\n Ff = Fd * Fk * np.logical_not(A)\n\n return A\n \n def neighbors_gen(A, K, D, m, eta, gamma, model_var):\n K += epsilon\n\n mseed = np.size(np.where(A.flat))//2\n\n if type(model_var) == tuple:\n mv1, mv2 = model_var\n else:\n mv1, mv2 = model_var, model_var\n\n if mv1 in ('powerlaw', 'power_law'):\n Fd = D**eta\n elif mv1 in ('exponential',):\n Fd = np.exp(eta*D) \n\n if mv2 in ('powerlaw', 'power_law'):\n Fk = K**gamma\n elif mv2 in ('exponential',):\n Fk = np.exp(gamma*K) \n\n Ff = Fd * Fk * np.logical_not(A)\n u,v = np.where(np.triu(np.ones((n,n)), 1))\n \n for ii in range(mseed, m):\n C = np.append(0, np.cumsum(Ff[u,v]))\n r = np.sum(rng.random_sample()*C[-1] >= C)\n uu = u[r]\n vv = v[r]\n A[uu, vv] = A[vv, uu] = 1\n\n x = A[uu, :].astype(int)\n y = A[:, vv].astype(int)\n \n K[uu, y] += 1\n K[y, uu] += 1\n K[vv, x] += 1\n K[x, vv] += 1\n\n if mv2 in ('powerlaw', 'power_law'):\n Fk = K**gamma\n elif mv2 in ('exponential',):\n Fk = np.exp(gamma*K) \n\n if mv2 in ('powerlaw', 'power_law'):\n Ff[uu, y] = Ff[y, uu] = Fd[uu, y] * (K[uu, y] ** gamma)\n Ff[vv, x] = Ff[x, vv] = Fd[vv, x] * (K[vv, x] ** gamma)\n elif mv2 in ('exponential',):\n Ff[uu, y] = Ff[y, uu] = Fd[uu, y] * np.exp(gamma * K[uu, y])\n Ff[vv, x] = Ff[x, vv] = Fd[vv, x] * np.exp(gamma * K[vv, x])\n\n Ff[np.where(A)] = 0\n\n return A\n\n def euclidean_gen(A, D, m, eta, model_var):\n mseed = np.size(np.where(A.flat))//2\n\n if type(model_var) == tuple:\n mv1, mv2 = model_var\n else:\n mv1, mv2 = model_var, model_var\n\n if mv1 != mv2:\n raise BCTParamError('Too many hyperparameters specified')\n\n if mv1 in ('powerlaw', 'power_law'):\n Fd = D ** eta\n elif mv1 in ('exponential',):\n Fd = np.exp(eta ** D)\n\n u,v = np.where(np.triu(np.ones((n,n)), 1))\n P = Fd * np.logical_not(A)\n\n b = np.zeros((m,), dtype=int)\n b[:mseed] = np.squeeze(np.where(A[u, v]))\n for i in range(mseed, m):\n C = np.append(0, np.cumsum(P[u, v]))\n r = np.sum(rng.random_sample()*C[-1] >= C)\n b[i] = r\n P = Fd\n P[u[b[:i]], v[b[:i]]] = P[v[b[:i]], u[b[:i]]] = 0\n\n A[u[r], v[r]] = A[v[r], u[r]] = 1\n\n return A\n\n if model_type in ('clu-avg', 'clu_avg'):\n Kseed = k_avg(clustering_coef_bu(A))\n for j, (ep, gp) in enumerate(zip(eta, gamma)):\n B[:,:,j] = clu_gen(A, Kseed, D, m, ep, gp, model_var, x_avg)\n\n elif model_type in ('clu-diff', 'clu_diff'):\n Kseed = k_diff(clustering_coef_bu(A))\n for j, (ep, gp) in enumerate(zip(eta, gamma)):\n B[:,:,j] = clu_gen(A, Kseed, D, m, ep, gp, model_var, x_diff)\n\n elif model_type in ('clu-max', 'clu_max'):\n Kseed = k_max(clustering_coef_bu(A))\n for j, (ep, gp) in enumerate(zip(eta, gamma)):\n B[:,:,j] = clu_gen(A, Kseed, D, m, ep, gp, model_var, x_max) \n\n elif model_type in ('clu-min', 'clu_min'):\n Kseed = k_min(clustering_coef_bu(A))\n for j, (ep, gp) in enumerate(zip(eta, gamma)):\n B[:,:,j] = clu_gen(A, Kseed, D, m, ep, gp, model_var, x_min) \n\n elif model_type in ('clu-prod', 'clu_prod'):\n Kseed = k_prod(clustering_coef_bu(A))\n for j, (ep, gp) in enumerate(zip(eta, gamma)):\n B[:,:,j] = clu_gen(A, Kseed, D, m, ep, gp, model_var, x_prod)\n\n elif model_type in ('deg-avg', 'deg_avg'):\n Kseed = k_avg(np.sum(A, axis=1))\n for j, (ep, gp) in enumerate(zip(eta, gamma)):\n B[:,:,j] = deg_gen(A, Kseed, D, m, ep, gp, model_var, s_avg)\n\n elif model_type in ('deg-diff', 'deg_diff'):\n Kseed = k_diff(np.sum(A, axis=1))\n for j, (ep, gp) in enumerate(zip(eta, gamma)):\n B[:,:,j] = deg_gen(A, Kseed, D, m, ep, gp, model_var, s_diff)\n \n elif model_type in ('deg-max', 'deg_max'):\n Kseed = k_max(np.sum(A, axis=1))\n for j, (ep, gp) in enumerate(zip(eta, gamma)):\n B[:,:,j] = deg_gen(A, Kseed, D, m, ep, gp, model_var, s_max)\n\n elif model_type in ('deg-min', 'deg_min'):\n Kseed = k_min(np.sum(A, axis=1))\n for j, (ep, gp) in enumerate(zip(eta, gamma)):\n B[:,:,j] = deg_gen(A, Kseed, D, m, ep, gp, model_var, s_min)\n\n elif model_type in ('deg-prod', 'deg_prod'):\n Kseed = k_prod(np.sum(A, axis=1))\n for j, (ep, gp) in enumerate(zip(eta, gamma)):\n B[:,:,j] = deg_gen(A, Kseed, D, m, ep, gp, model_var, s_prod)\n\n elif model_type in ('neighbors',):\n Kseed = np.inner(A, A)\n np.fill_diagonal(Kseed, 0)\n for j, (ep, gp) in enumerate(zip(eta, gamma)):\n B[:,:,j] = neighbors_gen(A, Kseed, D, m, ep, gp, model_var)\n\n elif model_type in ('matching', 'matching-ind', 'matching_ind'):\n mi, _, _ = matching_ind(A)\n Kseed = mi + mi.T\n for j, (ep, gp) in enumerate(zip(eta, gamma)):\n B[:,:,j] = matching_gen(A, Kseed, D, m, ep, gp, model_var)\n\n elif model_type in ('spatial', 'geometric', 'euclidean'):\n for j, ep in enumerate(eta):\n B[:,:,j] = euclidean_gen(A, D, m, ep, model_var) \n\n return np.squeeze(B)", "def __similarity_for_tlinks_obj_bgr(\n self,\n data,\n voxelsize,\n # voxels1, voxels2,\n # seeds, otherfeatures=None\n ):\n \"\"\"\n Compute edge values for graph cut tlinks based on image intensity\n and texture.\n \"\"\"\n # self.fit_model(data, voxelsize, seeds)\n # There is a need to have small vaues for good fit\n # R(obj) = -ln( Pr (Ip | O) )\n # R(bck) = -ln( Pr (Ip | B) )\n # Boykov2001b\n # ln is computed in likelihood\n tdata1 = (-(self.mdl.likelihood_from_image(data, voxelsize, 1))) * 10\n tdata2 = (-(self.mdl.likelihood_from_image(data, voxelsize, 2))) * 10\n\n # to spare some memory\n dtype = np.int16\n if np.any(tdata1 > 32760):\n dtype = np.float32\n if np.any(tdata2 > 32760):\n dtype = np.float32\n\n if self.segparams[\"use_apriori_if_available\"] and self.apriori is not None:\n logger.debug(\"using apriori information\")\n gamma = self.segparams[\"apriori_gamma\"]\n a1 = (-np.log(self.apriori * 0.998 + 0.001)) * 10\n a2 = (-np.log(0.999 - (self.apriori * 0.998))) * 10\n # logger.debug('max ' + str(np.max(tdata1)) + ' min ' + str(np.min(tdata1)))\n # logger.debug('max ' + str(np.max(tdata2)) + ' min ' + str(np.min(tdata2)))\n # logger.debug('max ' + str(np.max(a1)) + ' min ' + str(np.min(a1)))\n # logger.debug('max ' + str(np.max(a2)) + ' min ' + str(np.min(a2)))\n tdata1u = (((1 - gamma) * tdata1) + (gamma * a1)).astype(dtype)\n tdata2u = (((1 - gamma) * tdata2) + (gamma * a2)).astype(dtype)\n tdata1 = tdata1u\n tdata2 = tdata2u\n # logger.debug(' max ' + str(np.max(tdata1)) + ' min ' + str(np.min(tdata1)))\n # logger.debug(' max ' + str(np.max(tdata2)) + ' min ' + str(np.min(tdata2)))\n # logger.debug('gamma ' + str(gamma))\n\n # import sed3\n # ed = sed3.show_slices(tdata1)\n # ed = sed3.show_slices(tdata2)\n del tdata1u\n del tdata2u\n del a1\n del a2\n\n # if np.any(tdata1 < 0) or np.any(tdata2 <0):\n # logger.error(\"Problem with tlinks. Likelihood is < 0\")\n\n # if self.debug_images:\n # self.__show_debug_tdata_images(tdata1, tdata2, suptitle=\"likelihood\")\n return tdata1, tdata2", "def compute_similarities(hdf5_file, data, N_processes):\n \"\"\"Compute a matrix of pairwise L2 Euclidean distances among samples from 'data'.\n This computation is to be done in parallel by 'N_processes' distinct processes. \n Those processes (which are instances of the class 'Similarities_worker') \n are prevented from simultaneously accessing the HDF5 data structure \n at 'hdf5_file' through the use of a multiprocessing.Lock object.\n \"\"\"\n\n slice_queue = multiprocessing.JoinableQueue()\n \n pid_list = []\n for i in range(N_processes):\n worker = Similarities_worker(hdf5_file, '/aff_prop_group/similarities',\n data, slice_queue)\n worker.daemon = True\n worker.start()\n pid_list.append(worker.pid)\n \n for rows_slice in chunk_generator(data.shape[0], 2 * N_processes):\n slice_queue.put(rows_slice)\n \n slice_queue.join() \n slice_queue.close()\n \n terminate_processes(pid_list)\n gc.collect()", "def _ssgc_prepare_data_and_run_computation(\n self,\n # voxels1, voxels2,\n hard_constraints=True,\n area_weight=1,\n ):\n \"\"\"\n Setting of data.\n You need set seeds if you want use hard_constraints.\n \"\"\"\n # from PyQt4.QtCore import pyqtRemoveInputHook\n # pyqtRemoveInputHook()\n # import pdb; pdb.set_trace() # BREAKPOINT\n\n unariesalt = self.__create_tlinks(\n self.img,\n self.voxelsize,\n # voxels1, voxels2,\n self.seeds,\n area_weight,\n hard_constraints,\n )\n # některém testu organ semgmentation dosahují unaries -15. což je podiné\n # stačí vyhodit print před if a je to vidět\n logger.debug(\"unaries %.3g , %.3g\" % (np.max(unariesalt), np.min(unariesalt)))\n # create potts pairwise\n # pairwiseAlpha = -10\n pairwise = -(np.eye(2) - 1)\n pairwise = (self.segparams[\"pairwise_alpha\"] * pairwise).astype(np.int32)\n # pairwise = np.array([[0,30],[30,0]]).astype(np.int32)\n # print pairwise\n\n self.iparams = {}\n\n if self.segparams[\"use_boundary_penalties\"]:\n sigma = self.segparams[\"boundary_penalties_sigma\"]\n # set boundary penalties function\n # Default are penalties based on intensity differences\n boundary_penalties_fcn = lambda ax: self._boundary_penalties_array(\n axis=ax, sigma=sigma\n )\n else:\n boundary_penalties_fcn = None\n nlinks = self.__create_nlinks(\n self.img, boundary_penalties_fcn=boundary_penalties_fcn\n )\n\n self.stats[\"tlinks shape\"].append(unariesalt.reshape(-1, 2).shape)\n self.stats[\"nlinks shape\"].append(nlinks.shape)\n # we flatten the unaries\n # result_graph = cut_from_graph(nlinks, unaries.reshape(-1, 2),\n # pairwise)\n start = time.time()\n if self.debug_images:\n self._debug_show_unariesalt(unariesalt)\n result_graph = pygco.cut_from_graph(nlinks, unariesalt.reshape(-1, 2), pairwise)\n elapsed = time.time() - start\n self.stats[\"gc time\"] = elapsed\n result_labeling = result_graph.reshape(self.img.shape)\n\n return result_labeling", "def cosine_similarity(u, v)\n # TODO: Change this to a more specific, custom-made exception.\n raise ArgumentError if u.size != v.size\n\n dot_product = u.zip(v).reduce(0.0) { |acc, ary| acc += ary[0] * ary[1] }\n\n dot_product / (euclidean(u) * euclidean(v))\n end", "def __set_hard_hard_constraints(self, tdata1, tdata2, seeds):\n \"\"\"\n it works with seed labels:\n 0: nothing\n 1: object 1 - full seeds\n 2: object 2 - full seeds\n 3: object 1 - not a training seeds\n 4: object 2 - not a training seeds\n \"\"\"\n seeds_mask = (seeds == 1) | (seeds == 3)\n tdata2[seeds_mask] = np.max(tdata2) + 1\n tdata1[seeds_mask] = 0\n\n seeds_mask = (seeds == 2) | (seeds == 4)\n tdata1[seeds_mask] = np.max(tdata1) + 1\n tdata2[seeds_mask] = 0\n\n return tdata1, tdata2", "def venn3_unweighted(subsets, set_labels=('A', 'B', 'C'), set_colors=('r', 'g', 'b'), alpha=0.4, normalize_to=1.0, subset_areas=(1, 1, 1, 1, 1, 1, 1), ax=None, subset_label_formatter=None):\n '''\n The version of venn3 without area-weighting.\n It is implemented as a wrapper around venn3. Namely, venn3 is invoked as usual, but with all subset areas\n set to 1. The subset labels are then replaced in the resulting diagram with the provided subset sizes.\n \n The parameters are all the same as that of venn2.\n In addition there is a subset_areas parameter, which specifies the actual subset areas.\n (it is (1, 1, 1, 1, 1, 1, 1) by default. You are free to change it, within reason).\n '''\n v = venn3(subset_areas, set_labels, set_colors, alpha, normalize_to, ax)\n # Now rename the labels\n if subset_label_formatter is None:\n subset_label_formatter = str \n subset_ids = ['100', '010', '110', '001', '101', '011', '111']\n if isinstance(subsets, dict):\n subsets = [subsets.get(t, 0) for t in subset_ids]\n elif len(subsets) == 3:\n subsets = compute_venn3_subsets(*subsets)\n for n, id in enumerate(subset_ids):\n lbl = v.get_label_by_id(id)\n if lbl is not None:\n lbl.set_text(subset_label_formatter(subsets[n]))\n return v", "def evaluate_generative_model(A, Atgt, D, eta, gamma=None, \n model_type='matching', model_var='powerlaw', epsilon=1e-6, seed=None):\n '''\n Generates synthetic networks with parameters provided and evaluates their\n energy function. The energy function is defined as in Betzel et al. 2016.\n Basically it takes the Kolmogorov-Smirnov statistics of 4 network\n measures; comparing the degree distributions, clustering coefficients,\n betweenness centrality, and Euclidean distances between connected regions.\n \n The energy is globally low if the synthetic network matches the target.\n Energy is defined as the maximum difference across the four statistics.\n '''\n m = np.size(np.where(Atgt.flat))//2\n n = len(Atgt)\n xk = np.sum(Atgt, axis=1)\n xc = clustering_coef_bu(Atgt)\n xb = betweenness_bin(Atgt)\n xe = D[np.triu(Atgt, 1) > 0]\n\n B = generative_model(A, D, m, eta, gamma, model_type=model_type, \n model_var=model_var, epsilon=epsilon, copy=True, seed=seed)\n\n #if eta != gamma then an error is thrown within generative model\n \n nB = len(eta)\n\n if nB == 1:\n B = np.reshape(B, np.append(np.shape(B), 1))\n\n K = np.zeros((nB, 4))\n\n def kstats(x, y):\n bin_edges = np.concatenate([[-np.inf],\n np.sort(np.concatenate((x, y))), \n [np.inf]])\n\n bin_x,_ = np.histogram(x, bin_edges)\n bin_y,_ = np.histogram(y, bin_edges)\n\n #print(np.shape(bin_x))\n\n sum_x = np.cumsum(bin_x) / np.sum(bin_x)\n sum_y = np.cumsum(bin_y) / np.sum(bin_y)\n\n cdfsamp_x = sum_x[:-1]\n cdfsamp_y = sum_y[:-1]\n\n delta_cdf = np.abs(cdfsamp_x - cdfsamp_y)\n\n print(np.shape(delta_cdf))\n #print(delta_cdf)\n print(np.argmax(delta_cdf), np.max(delta_cdf))\n\n return np.max(delta_cdf)\n\n for ib in range(nB):\n Bc = B[:,:,ib]\n yk = np.sum(Bc, axis=1)\n yc = clustering_coef_bu(Bc)\n yb = betweenness_bin(Bc)\n ye = D[np.triu(Bc, 1) > 0]\n\n K[ib, 0] = kstats(xk, yk)\n K[ib, 1] = kstats(xc, yc)\n K[ib, 2] = kstats(xb, yb)\n K[ib, 3] = kstats(xe, ye)\n\n return np.max(K, axis=1)", "def similarity(self, other: 'Trigram') -> float:\n \"\"\"\n Compute the similarity with the provided other trigram.\n \"\"\"\n if not len(self._trigrams) or not len(other._trigrams):\n return 0\n\n count = float(len(self._trigrams & other._trigrams))\n len1 = float(len(self._trigrams))\n len2 = float(len(other._trigrams))\n\n return count / (len1 + len2 - count)", "def calculate_similar_artists(output_filename, model_name=\"als\"):\n \"\"\" generates a list of similar artists in lastfm by utiliizing the 'similar_items'\n api of the models \"\"\"\n artists, users, plays = get_lastfm()\n\n # create a model from the input data\n model = get_model(model_name)\n\n # if we're training an ALS based model, weight input for last.fm\n # by bm25\n if issubclass(model.__class__, AlternatingLeastSquares):\n # lets weight these models by bm25weight.\n logging.debug(\"weighting matrix by bm25_weight\")\n plays = bm25_weight(plays, K1=100, B=0.8)\n\n # also disable building approximate recommend index\n model.approximate_recommend = False\n\n # this is actually disturbingly expensive:\n plays = plays.tocsr()\n\n logging.debug(\"training model %s\", model_name)\n start = time.time()\n model.fit(plays)\n logging.debug(\"trained model '%s' in %0.2fs\", model_name, time.time() - start)\n\n # write out similar artists by popularity\n start = time.time()\n logging.debug(\"calculating top artists\")\n\n user_count = np.ediff1d(plays.indptr)\n to_generate = sorted(np.arange(len(artists)), key=lambda x: -user_count[x])\n\n # write out as a TSV of artistid, otherartistid, score\n logging.debug(\"writing similar items\")\n with tqdm.tqdm(total=len(to_generate)) as progress:\n with codecs.open(output_filename, \"w\", \"utf8\") as o:\n for artistid in to_generate:\n artist = artists[artistid]\n for other, score in model.similar_items(artistid, 11):\n o.write(\"%s\\t%s\\t%s\\n\" % (artist, artists[other], score))\n progress.update(1)\n\n logging.debug(\"generated similar artists in %0.2fs\", time.time() - start)", "def atlas_overlap(dset,atlas=None):\n '''aligns ``dset`` to the TT_N27 atlas and returns ``(cost,overlap)``'''\n atlas = find_atlas(atlas)\n if atlas==None:\n return None\n \n cost_func = 'crM'\n infile = os.path.abspath(dset)\n tmpdir = tempfile.mkdtemp()\n with nl.run_in(tmpdir):\n o = nl.run(['3dAllineate','-verb','-base',atlas,'-source',infile + '[0]','-NN','-final','NN','-cost',cost_func,'-nmatch','20%','-onepass','-fineblur','2','-cmass','-prefix','test.nii.gz'])\n m = re.search(r'Final\\s+cost = ([\\d.]+) ;',o.output)\n if m:\n cost = float(m.group(1))\n o = nl.run(['3dmaskave','-mask',atlas,'-q','test.nii.gz'],stderr=None)\n data_thresh = float(o.output) / 4\n i = nl.dset_info('test.nii.gz')\n o = nl.run(['3dmaskave','-q','-mask','SELF','-sum',nl.calc([atlas,'test.nii.gz'],'equals(step(a-10),step(b-%.2f))'%data_thresh)],stderr=None)\n overlap = 100*float(o.output) / (i.voxel_dims[0]*i.voxel_dims[1]*i.voxel_dims[2])\n try:\n shutil.rmtree(tmpdir)\n except:\n pass\n return (cost,overlap)" ]
[ 0.6625571846961975, 0.6577945351600647, 0.6545608043670654, 0.6472549438476562, 0.6417547464370728, 0.6331488490104675, 0.6312707662582397, 0.6286358833312988, 0.6269406080245972, 0.6256269812583923, 0.6230696439743042, 0.6227918863296509 ]
Show tlinks. :param data3d: ndarray with input data :param voxelsize: :param seeds: :param area_weight: :param hard_constraints: :param show: :param bins: histogram bins number :param slice_number: :return:
def debug_show_reconstructed_similarity( self, data3d=None, voxelsize=None, seeds=None, area_weight=1, hard_constraints=True, show=True, bins=20, slice_number=None, ): """ Show tlinks. :param data3d: ndarray with input data :param voxelsize: :param seeds: :param area_weight: :param hard_constraints: :param show: :param bins: histogram bins number :param slice_number: :return: """ unariesalt = self.debug_get_reconstructed_similarity( data3d, voxelsize=voxelsize, seeds=seeds, area_weight=area_weight, hard_constraints=hard_constraints, return_unariesalt=True, ) self._debug_show_unariesalt( unariesalt, show=show, bins=bins, slice_number=slice_number )
[ "def show_slices(data3d, contour=None, seeds=None, axis=0, slice_step=None,\r\n shape=None, show=True,\r\n flipH=False, flipV=False,\r\n first_slice_offset=0,\r\n first_slice_offset_to_see_seed_with_label=None,\r\n slice_number=None\r\n ):\r\n \"\"\"\r\n Show slices as tiled image\r\n\r\n :param data3d: Input data\r\n :param contour: Data for contouring\r\n :param seeds: Seed data\r\n :param axis: Axis for sliceing\r\n :param slice_step: Show each \"slice_step\"-th slice, can be float\r\n :param shape: tuple(vertical_tiles_number, horisontal_tiles_number), set shape of output tiled image. slice_step is\r\n estimated if it is not set explicitly\r\n :param first_slice_offset: set offset of first slice\r\n :param first_slice_offset_to_see_seed_with_label: find offset to see slice with seed with defined label\r\n :param slice_number: int, Number of showed slices. Overwrites shape and slice_step.\r\n \"\"\"\r\n\r\n if slice_number is not None:\r\n slice_step = data3d.shape[axis] / slice_number\r\n # odhad slice_step, neni li zadan\r\n # slice_step estimation\r\n # TODO make precise estimation (use np.linspace to indexing?)\r\n if slice_step is None:\r\n if shape is None:\r\n slice_step = 1\r\n else:\r\n slice_step = ((data3d.shape[axis] - first_slice_offset ) / float(np.prod(shape)))\r\n\r\n\r\n\r\n if first_slice_offset_to_see_seed_with_label is not None:\r\n if seeds is not None:\r\n inds = np.nonzero(seeds==first_slice_offset_to_see_seed_with_label)\r\n # print(inds)\r\n # take first one with defined seed\r\n # ind = inds[axis][0]\r\n # take most used index\r\n ind = np.median(inds[axis])\r\n first_slice_offset = ind % slice_step\r\n\r\n\r\n data3d = _import_data(data3d, axis=axis, slice_step=slice_step, first_slice_offset=first_slice_offset)\r\n contour = _import_data(contour, axis=axis, slice_step=slice_step, first_slice_offset=first_slice_offset)\r\n seeds = _import_data(seeds, axis=axis, slice_step=slice_step, first_slice_offset=first_slice_offset)\r\n\r\n number_of_slices = data3d.shape[axis]\r\n # square image\r\n # nn = int(math.ceil(number_of_slices ** 0.5))\r\n\r\n # sh = [nn, nn]\r\n\r\n # 4:3 image\r\n meta_shape = shape\r\n if meta_shape is None:\r\n na = int(math.ceil(number_of_slices * 16.0 / 9.0) ** 0.5)\r\n nb = int(math.ceil(float(number_of_slices) / na))\r\n meta_shape = [nb, na]\r\n\r\n dsh = __get_slice(data3d, 0, axis).shape\r\n slimsh = [int(dsh[0] * meta_shape[0]), int(dsh[1] * meta_shape[1])]\r\n slim = np.zeros(slimsh, dtype=data3d.dtype)\r\n slco = None\r\n slse = None\r\n if seeds is not None:\r\n slse = np.zeros(slimsh, dtype=seeds.dtype)\r\n if contour is not None:\r\n slco = np.zeros(slimsh, dtype=contour.dtype)\r\n # slse =\r\n # f, axarr = plt.subplots(sh[0], sh[1])\r\n\r\n for i in range(0, number_of_slices):\r\n cont = None\r\n seeds2d = None\r\n im2d = __get_slice(data3d, i, axis, flipH=flipH, flipV=flipV)\r\n if contour is not None:\r\n cont = __get_slice(contour, i, axis, flipH=flipH, flipV=flipV)\r\n slco = __put_slice_in_slim(slco, cont, meta_shape, i)\r\n if seeds is not None:\r\n seeds2d = __get_slice(seeds, i, axis, flipH=flipH, flipV=flipV)\r\n slse = __put_slice_in_slim(slse, seeds2d, meta_shape, i)\r\n # plt.axis('off')\r\n # plt.subplot(sh[0], sh[1], i+1)\r\n # plt.subplots_adjust(wspace=0, hspace=0)\r\n\r\n slim = __put_slice_in_slim(slim, im2d, meta_shape, i)\r\n # show_slice(im2d, cont, seeds2d)\r\n show_slice(slim, slco, slse)\r\n if show:\r\n plt.show()", "def __similarity_for_tlinks_obj_bgr(\n self,\n data,\n voxelsize,\n # voxels1, voxels2,\n # seeds, otherfeatures=None\n ):\n \"\"\"\n Compute edge values for graph cut tlinks based on image intensity\n and texture.\n \"\"\"\n # self.fit_model(data, voxelsize, seeds)\n # There is a need to have small vaues for good fit\n # R(obj) = -ln( Pr (Ip | O) )\n # R(bck) = -ln( Pr (Ip | B) )\n # Boykov2001b\n # ln is computed in likelihood\n tdata1 = (-(self.mdl.likelihood_from_image(data, voxelsize, 1))) * 10\n tdata2 = (-(self.mdl.likelihood_from_image(data, voxelsize, 2))) * 10\n\n # to spare some memory\n dtype = np.int16\n if np.any(tdata1 > 32760):\n dtype = np.float32\n if np.any(tdata2 > 32760):\n dtype = np.float32\n\n if self.segparams[\"use_apriori_if_available\"] and self.apriori is not None:\n logger.debug(\"using apriori information\")\n gamma = self.segparams[\"apriori_gamma\"]\n a1 = (-np.log(self.apriori * 0.998 + 0.001)) * 10\n a2 = (-np.log(0.999 - (self.apriori * 0.998))) * 10\n # logger.debug('max ' + str(np.max(tdata1)) + ' min ' + str(np.min(tdata1)))\n # logger.debug('max ' + str(np.max(tdata2)) + ' min ' + str(np.min(tdata2)))\n # logger.debug('max ' + str(np.max(a1)) + ' min ' + str(np.min(a1)))\n # logger.debug('max ' + str(np.max(a2)) + ' min ' + str(np.min(a2)))\n tdata1u = (((1 - gamma) * tdata1) + (gamma * a1)).astype(dtype)\n tdata2u = (((1 - gamma) * tdata2) + (gamma * a2)).astype(dtype)\n tdata1 = tdata1u\n tdata2 = tdata2u\n # logger.debug(' max ' + str(np.max(tdata1)) + ' min ' + str(np.min(tdata1)))\n # logger.debug(' max ' + str(np.max(tdata2)) + ' min ' + str(np.min(tdata2)))\n # logger.debug('gamma ' + str(gamma))\n\n # import sed3\n # ed = sed3.show_slices(tdata1)\n # ed = sed3.show_slices(tdata2)\n del tdata1u\n del tdata2u\n del a1\n del a2\n\n # if np.any(tdata1 < 0) or np.any(tdata2 <0):\n # logger.error(\"Problem with tlinks. Likelihood is < 0\")\n\n # if self.debug_images:\n # self.__show_debug_tdata_images(tdata1, tdata2, suptitle=\"likelihood\")\n return tdata1, tdata2", "def pick_slices(img, num_slices_per_view):\n \"\"\"\n Picks the slices to display in each dimension,\n skipping any empty slices (without any segmentation at all).\n\n \"\"\"\n\n slices = list()\n for view in range(len(img.shape)):\n dim_size = img.shape[view]\n non_empty_slices = np.array(\n [sl for sl in range(dim_size) if np.count_nonzero(get_axis(img, view, sl)) > 0])\n num_non_empty = len(non_empty_slices)\n\n # trying to 5% slices at the tails (bottom clipping at 0)\n skip_count = max(0, np.around(num_non_empty * 0.05).astype('int16'))\n # only when possible\n if skip_count > 0 and (num_non_empty - 2 * skip_count >= num_slices_per_view):\n non_empty_slices = non_empty_slices[skip_count: -skip_count]\n num_non_empty = len(non_empty_slices)\n\n # sampling non-empty slices only\n sampled_indices = np.linspace(0, num_non_empty, num=min(num_non_empty, num_slices_per_view),\n endpoint=False)\n slices_in_dim = non_empty_slices[np.around(sampled_indices).astype('int64')]\n\n # ensure you do not overshoot\n slices_in_dim = [sn for sn in slices_in_dim if sn >= 0 or sn <= num_non_empty]\n\n slices.append(slices_in_dim)\n\n return slices", "def aseg_on_mri(mri_spec,\n aseg_spec,\n alpha_mri=1.0,\n alpha_seg=1.0,\n num_rows=2,\n num_cols=6,\n rescale_method='global',\n aseg_cmap='freesurfer',\n sub_cortical=False,\n annot=None,\n padding=5,\n bkground_thresh=0.05,\n output_path=None,\n figsize=None,\n **kwargs):\n \"Produces a collage of various slices from different orientations in the given 3D image\"\n\n num_rows, num_cols, padding = check_params(num_rows, num_cols, padding)\n\n mri = read_image(mri_spec, bkground_thresh=bkground_thresh)\n seg = read_image(aseg_spec, bkground_thresh=0)\n mri, seg = crop_to_seg_extents(mri, seg, padding)\n\n num_slices_per_view = num_rows * num_cols\n slices = pick_slices(seg, num_slices_per_view)\n\n plt.style.use('dark_background')\n\n num_axes = 3\n if figsize is None:\n figsize = [5 * num_axes * num_rows, 5 * num_cols]\n fig, ax = plt.subplots(num_axes * num_rows, num_cols, figsize=figsize)\n\n # displaying some annotation text if provided\n if annot is not None:\n fig.suptitle(annot, backgroundcolor='black', color='g')\n\n display_params_mri = dict(interpolation='none', aspect='equal', origin='lower',\n cmap='gray', alpha=alpha_mri,\n vmin=mri.min(), vmax=mri.max())\n display_params_seg = dict(interpolation='none', aspect='equal', origin='lower',\n alpha=alpha_seg)\n\n normalize_labels = colors.Normalize(vmin=seg.min(), vmax=seg.max(), clip=True)\n fs_cmap = get_freesurfer_cmap(sub_cortical)\n label_mapper = cm.ScalarMappable(norm=normalize_labels, cmap=fs_cmap)\n\n ax = ax.flatten()\n ax_counter = 0\n for dim_index in range(3):\n for slice_num in slices[dim_index]:\n plt.sca(ax[ax_counter])\n ax_counter = ax_counter + 1\n\n slice_mri = get_axis(mri, dim_index, slice_num)\n slice_seg = get_axis(seg, dim_index, slice_num)\n\n # # masking data to set no-value pixels to transparent\n # seg_background = np.isclose(slice_seg, 0.0)\n # slice_seg = np.ma.masked_where(seg_background, slice_seg)\n # slice_mri = np.ma.masked_where(np.logical_not(seg_background), slice_mri)\n\n seg_rgb = label_mapper.to_rgba(slice_seg)\n plt.imshow(seg_rgb, **display_params_seg)\n plt.imshow(slice_mri, **display_params_mri)\n plt.axis('off')\n\n # plt.subplots_adjust(wspace=0.0, hspace=0.0)\n plt.subplots_adjust(left=0.01, right=0.99,\n bottom=0.01, top=0.99,\n wspace=0.05, hspace=0.02)\n # fig.tight_layout()\n\n if output_path is not None:\n output_path = output_path.replace(' ', '_')\n fig.savefig(output_path + '.png', bbox_inches='tight')\n\n # plt.close()\n\n return fig", "def show_slice(data2d, contour2d=None, seeds2d=None):\r\n \"\"\"\r\n\r\n :param data2d:\r\n :param contour2d:\r\n :param seeds2d:\r\n :return:\r\n \"\"\"\r\n\r\n import copy as cp\r\n # Show results\r\n\r\n colormap = cp.copy(plt.cm.get_cmap('brg'))\r\n colormap._init()\r\n colormap._lut[:1:, 3] = 0\r\n\r\n plt.imshow(data2d, cmap='gray', interpolation='none')\r\n if contour2d is not None:\r\n plt.contour(contour2d, levels=[0.5, 1.5, 2.5])\r\n if seeds2d is not None:\r\n # Show results\r\n colormap = copy.copy(plt.cm.get_cmap('Paired'))\r\n # colormap = copy.copy(plt.cm.get_cmap('gist_rainbow'))\r\n colormap._init()\r\n\r\n colormap._lut[0, 3] = 0\r\n\r\n tmp0 = copy.copy(colormap._lut[:,0])\r\n tmp1 = copy.copy(colormap._lut[:,1])\r\n tmp2 = copy.copy(colormap._lut[:,2])\r\n\r\n colormap._lut[:, 0] = sigmoid(tmp0, 0.5, 5)\r\n colormap._lut[:, 1] = sigmoid(tmp1, 0.5, 5)\r\n colormap._lut[:, 2] = 0# sigmoid(tmp2, 0.5, 5)\r\n # seed 4\r\n colormap._lut[140:220:, 1] = 0.7# sigmoid(tmp2, 0.5, 5)\r\n colormap._lut[140:220:, 0] = 0.2# sigmoid(tmp2, 0.5, 5)\r\n # seed 2\r\n colormap._lut[40:120:, 1] = 1.# sigmoid(tmp2, 0.5, 5)\r\n colormap._lut[40:120:, 0] = 0.1# sigmoid(tmp2, 0.5, 5)\r\n\r\n\r\n # seed 2\r\n colormap._lut[120:150:, 0] = 1.# sigmoid(tmp2, 0.5, 5)\r\n colormap._lut[120:150:, 1] = 0.1# sigmoid(tmp2, 0.5, 5)\r\n\r\n # my colors\r\n\r\n # colormap._lut[1,:] = [.0,.1,.0,1]\r\n # colormap._lut[2,:] = [.1,.1,.0,1]\r\n # colormap._lut[3,:] = [.1,.1,.1,1]\r\n # colormap._lut[4,:] = [.3,.3,.3,1]\r\n\r\n plt.imshow(seeds2d, cmap=colormap, interpolation='none')", "def show_batch(self, rows:int=5, ds_type:DatasetType=DatasetType.Train, reverse:bool=False, **kwargs)->None:\n \"Show a batch of data in `ds_type` on a few `rows`.\"\n x,y = self.one_batch(ds_type, True, True)\n if reverse: x,y = x.flip(0),y.flip(0)\n n_items = rows **2 if self.train_ds.x._square_show else rows\n if self.dl(ds_type).batch_size < n_items: n_items = self.dl(ds_type).batch_size\n xs = [self.train_ds.x.reconstruct(grab_idx(x, i)) for i in range(n_items)]\n #TODO: get rid of has_arg if possible\n if has_arg(self.train_ds.y.reconstruct, 'x'):\n ys = [self.train_ds.y.reconstruct(grab_idx(y, i), x=x) for i,x in enumerate(xs)]\n else : ys = [self.train_ds.y.reconstruct(grab_idx(y, i)) for i in range(n_items)]\n self.train_ds.x.show_xys(xs, ys, **kwargs)", "def show_xys(self, xs, ys, max_len:int=70)->None:\n \"Show the `xs` (inputs) and `ys` (targets). `max_len` is the maximum number of tokens displayed.\"\n from IPython.display import display, HTML\n names = ['idx','text'] if self._is_lm else ['text','target']\n items = []\n for i, (x,y) in enumerate(zip(xs,ys)):\n txt_x = ' '.join(x.text.split(' ')[:max_len]) if max_len is not None else x.text\n items.append([i, txt_x] if self._is_lm else [txt_x, y])\n items = np.array(items)\n df = pd.DataFrame({n:items[:,i] for i,n in enumerate(names)}, columns=names)\n with pd.option_context('display.max_colwidth', -1):\n display(HTML(df.to_html(index=False)))", "def plot_triaxial(height, width, tools):\n '''Plot pandas dataframe containing an x, y, and z column'''\n import bokeh.plotting\n\n p = bokeh.plotting.figure(x_axis_type='datetime',\n plot_height=height,\n plot_width=width,\n title=' ',\n toolbar_sticky=False,\n tools=tools,\n active_drag=BoxZoomTool(),\n output_backend='webgl')\n\n p.yaxis.axis_label = 'Acceleration (count)'\n p.xaxis.axis_label = 'Time (timezone as programmed)'\n\n # Plot accelerometry data as lines and scatter (for BoxSelectTool)\n colors = ['#1b9e77', '#d95f02', '#7570b3']\n axes = ['x', 'y', 'z']\n lines = [None,]*3\n scats = [None,]*3\n for i, (ax, c) in enumerate(zip(axes, colors)):\n lines[i] = p.line(y=ax, x='dt', color=c, legend=False, source=source)\n scats[i] = p.scatter(y=ax, x='dt', color=c, legend=False, size=1,\n source=source)\n return p, lines, scats", "def slice_plot(netin, ax, nodelabels=None, timelabels=None, communities=None, plotedgeweights=False, edgeweightscalar=1, timeunit='', linestyle='k-', cmap=None, nodesize=100, nodekwargs=None, edgekwargs=None):\n r'''\n\n Fuction draws \"slice graph\" and exports axis handles\n\n\n Parameters\n ----------\n\n netin : array, dict\n temporal network input (graphlet or contact)\n ax : matplotlib figure handles.\n nodelabels : list\n nodes labels. List of strings.\n timelabels : list\n labels of dimension Graph is expressed across. List of strings.\n communities : array\n array of size: (time) or (node,time). Nodes will be coloured accordingly.\n plotedgeweights : bool\n if True, edges will vary in size (default False)\n edgeweightscalar : int\n scalar to multiply all edges if tweaking is needed.\n timeunit : string\n unit time axis is in.\n linestyle : string\n line style of Bezier curves.\n nodesize : int\n size of nodes\n nodekwargs : dict\n any additional kwargs for matplotlib.plt.scatter for the nodes\n edgekwargs : dict\n any additional kwargs for matplotlib.plt.plots for the edges\n\n\n Returns\n ---------\n ax : axis handle of slice graph\n\n\n Examples\n ---------\n\n\n Create a network with some metadata\n\n >>> import numpy as np\n >>> import teneto\n >>> import matplotlib.pyplot as plt\n >>> np.random.seed(2017) # For reproduceability\n >>> N = 5 # Number of nodes\n >>> T = 10 # Number of timepoints\n >>> # Probability of edge activation\n >>> birth_rate = 0.2\n >>> death_rate = .9\n >>> # Add node names into the network and say time units are years, go 1 year per graphlet and startyear is 2007\n >>> cfg={}\n >>> cfg['Fs'] = 1\n >>> cfg['timeunit'] = 'Years'\n >>> cfg['t0'] = 2007 #First year in network\n >>> cfg['nodelabels'] = ['Ashley','Blake','Casey','Dylan','Elliot'] # Node names\n >>> #Generate network\n >>> C = teneto.generatenetwork.rand_binomial([N,T],[birth_rate, death_rate],'contact','bu',netinfo=cfg)\n\n Now this network can be plotted\n\n >>> fig,ax = plt.subplots(figsize=(10,3))\n >>> ax = teneto.plot.slice_plot(C, ax, cmap='Pastel2')\n >>> plt.tight_layout()\n >>> fig.show()\n\n .. plot::\n\n import numpy as np\n import teneto\n import matplotlib.pyplot as plt\n np.random.seed(2017) # For reproduceability\n N = 5 # Number of nodes\n T = 10 # Number of timepoints\n # Probability of edge activation\n birth_rate = 0.2\n death_rate = .9\n # Add node names into the network and say time units are years, go 1 year per graphlet and startyear is 2007\n cfg={}\n cfg['Fs'] = 1\n cfg['timeunit'] = 'Years'\n cfg['t0'] = 2007 #First year in network\n cfg['nodelabels'] = ['Ashley','Blake','Casey','Dylan','Elliot']\n #Generate network\n C = teneto.generatenetwork.rand_binomial([N,T],[birth_rate, death_rate],'contact','bu',netinfo=cfg)\n fig,ax = plt.subplots(figsize=(10,3))\n cmap = 'Pastel2'\n ax = teneto.plot.slice_plot(C,ax,cmap=cmap)\n plt.tight_layout()\n fig.show()\n\n\n '''\n # Get input type (C or G)\n inputType = checkInput(netin)\n # Convert C representation to G\n\n if inputType == 'G':\n netin = graphlet2contact(netin)\n inputType = 'C'\n edgelist = [tuple(np.array(e[0:2]) + e[2] * netin['netshape'][0])\n for e in netin['contacts']]\n\n if nodelabels is not None and len(nodelabels) == netin['netshape'][0]:\n pass\n elif nodelabels is not None and len(nodelabels) != netin['netshape'][0]:\n raise ValueError('specified node label length does not match netshape')\n elif nodelabels is None and netin['nodelabels'] == '':\n nodelabels = np.arange(1, netin['netshape'][0] + 1)\n else:\n nodelabels = netin['nodelabels']\n\n if timelabels is not None and len(timelabels) == netin['netshape'][-1]:\n pass\n elif timelabels is not None and len(timelabels) != netin['netshape'][-1]:\n raise ValueError('specified time label length does not match netshape')\n elif timelabels is None and str(netin['t0']) == '':\n timelabels = np.arange(1, netin['netshape'][-1] + 1)\n else:\n timelabels = np.arange(netin['t0'], netin['Fs'] *\n netin['netshape'][-1] + netin['t0'], netin['Fs'])\n\n if timeunit is None:\n timeunit = netin['timeunit']\n\n timeNum = len(timelabels)\n nodeNum = len(nodelabels)\n posy = np.tile(list(range(0, nodeNum)), timeNum)\n posx = np.repeat(list(range(0, timeNum)), nodeNum)\n\n if nodekwargs is None:\n nodekwargs = {}\n if edgekwargs is None:\n edgekwargs = {}\n if cmap:\n nodekwargs['cmap'] = cmap\n if 'c' not in nodekwargs:\n nodekwargs['c'] = posy\n if communities is not None:\n # check if temporal or static\n if len(communities.shape) == 1:\n nodekwargs['c'] = np.tile(communities, timeNum)\n else:\n nodekwargs['c'] = communities.flatten(order='F')\n\n\n # plt.plot(points)\n # Draw Bezier vectors around egde positions\n for ei, edge in enumerate(edgelist):\n if plotedgeweights == True and netin['nettype'][0] == 'w':\n edgekwargs['linewidth'] = netin['values'][ei] * edgeweightscalar\n bvx, bvy = bezier_points(\n (posx[edge[0]], posy[edge[0]]), (posx[edge[1]], posy[edge[1]]), nodeNum, 20)\n ax.plot(bvx, bvy, linestyle, **edgekwargs)\n ax.set_yticks(range(0, len(nodelabels)))\n ax.set_xticks(range(0, len(timelabels)))\n ax.set_yticklabels(nodelabels)\n ax.set_xticklabels(timelabels)\n ax.grid()\n ax.set_frame_on(False)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.get_xaxis().tick_bottom()\n ax.get_yaxis().tick_left()\n ax.set_xlim([min(posx) - 1, max(posx) + 1])\n ax.set_ylim([min(posy) - 1, max(posy) + 1])\n ax.scatter(posx, posy, s=nodesize, zorder=10, **nodekwargs)\n if timeunit != '':\n timeunit = ' (' + timeunit + ')'\n ax.set_xlabel('Time' + timeunit)\n\n return ax", "def show(self, lenmavlist, block=True, xlim_pipe=None):\n '''show graph'''\n if xlim_pipe is not None:\n xlim_pipe[0].close()\n self.xlim_pipe = xlim_pipe\n if self.labels is not None:\n labels = self.labels.split(',')\n if len(labels) != len(fields)*lenmavlist:\n print(\"Number of labels (%u) must match number of fields (%u)\" % (\n len(labels), len(fields)*lenmavlist))\n return\n else:\n labels = None\n\n for fi in range(0, lenmavlist):\n timeshift = 0\n for i in range(0, len(self.x)):\n if self.first_only[i] and fi != 0:\n self.x[i] = []\n self.y[i] = []\n if labels:\n lab = labels[fi*len(self.fields):(fi+1)*len(self.fields)]\n else:\n lab = self.fields[:]\n if self.multi:\n col = colors[:]\n else:\n col = colors[fi*len(self.fields):]\n self.plotit(self.x, self.y, lab, colors=col, title=self.title)\n for i in range(0, len(self.x)):\n self.x[i] = []\n self.y[i] = []\n\n if self.xlim_pipe is not None:\n import matplotlib.animation\n self.ani = matplotlib.animation.FuncAnimation(self.fig, self.xlim_change_check,\n frames=10, interval=20000,\n repeat=True, blit=False)\n threading.Timer(0.1, self.xlim_timer).start()\n\n pylab.draw()\n pylab.show(block=block)", "def show(self, ax:plt.Axes=None, figsize:tuple=(3,3), title:Optional[str]=None, hide_axis:bool=True,\n cmap:str='tab20', alpha:float=0.5, **kwargs):\n \"Show the `ImageSegment` on `ax`.\"\n ax = show_image(self, ax=ax, hide_axis=hide_axis, cmap=cmap, figsize=figsize,\n interpolation='nearest', alpha=alpha, vmin=0)\n if title: ax.set_title(title)", "def showSegmentation(\n segmentation,\n voxelsize_mm=np.ones([3, 1]),\n degrad=4,\n label=1,\n smoothing=True\n ):\n \"\"\"\n Funkce vrací trojrozměrné porobné jako data['segmentation']\n v data['slab'] je popsáno, co která hodnota znamená\n \"\"\"\n labels = []\n\n segmentation = segmentation[::degrad, ::degrad, ::degrad]\n\n # import pdb; pdb.set_trace()\n mesh_data = seg2fem.gen_mesh_from_voxels_mc(segmentation, voxelsize_mm*degrad)\n if smoothing:\n mesh_data.coors = seg2fem.smooth_mesh(mesh_data)\n else:\n mesh_data = seg2fem.gen_mesh_from_voxels_mc(segmentation, voxelsize_mm * 1.0e-2)\n # mesh_data.coors +=\n vtk_file = \"mesh_geom.vtk\"\n mesh_data.write(vtk_file)\n QApplication(sys.argv)\n view = viewer.QVTKViewer(vtk_file)\n view.exec_()\n\n return labels" ]
[ 0.771639347076416, 0.664085865020752, 0.6366907358169556, 0.6365001797676086, 0.6328928470611572, 0.6294004917144775, 0.6291933059692383, 0.6279113292694092, 0.6263575553894043, 0.6245708465576172, 0.6240610480308533, 0.6238716244697571 ]
Get info about the node. See pycut.inspect_node() for details. Processing is done in temporary shape. :param node_seed: :return: node_unariesalt, node_neighboor_edges_and_weights, node_neighboor_seeds
def debug_inspect_node(self, node_msindex): """ Get info about the node. See pycut.inspect_node() for details. Processing is done in temporary shape. :param node_seed: :return: node_unariesalt, node_neighboor_edges_and_weights, node_neighboor_seeds """ return inspect_node(self.nlinks, self.unariesalt2, self.msinds, node_msindex)
[ "def inspect_node(nlinks, unariesalt, msinds, node_msindex):\n \"\"\"\n Get information about one node in graph\n\n :param nlinks: neighboorhood edges\n :param unariesalt: weights\n :param msinds: indexes in 3d image\n :param node_msindex: msindex of selected node. See get_node_msindex()\n :return: node_unariesalt, node_neighboor_edges_and_weights, node_neighboor_seeds\n \"\"\"\n\n node_unariesalt = unariesalt[node_msindex]\n neigh_edges, neigh_seeds = inspect_node_neighborhood(nlinks, msinds, node_msindex)\n return node_unariesalt, neigh_edges, neigh_seeds", "def inspect_node_neighborhood(nlinks, msinds, node_msindex):\n \"\"\"\n Get information about one node in graph\n\n :param nlinks: neighboorhood edges\n :param msinds: indexes in 3d image\n :param node_msindex: int, multiscale index of selected voxel\n :return: node_neighboor_edges_and_weights, node_neighboor_seeds\n \"\"\"\n # seed_indexes = np.nonzero(node_seed)\n # selected_inds = msinds[seed_indexes]\n # node_msindex = selected_inds[0]\n\n node_neighbor_edges = get_neighborhood_edes(nlinks, node_msindex)\n\n node_neighbor_seeds = np.zeros_like(msinds, dtype=np.int8)\n for neighboor_ind in np.unique(node_neighbor_edges[:, :2].ravel()):\n node_neighbor_ind = np.where(msinds == neighboor_ind)\n node_neighbor_seeds[node_neighbor_ind] = 2\n\n node_neighbor_seeds[np.where(msinds == node_msindex)] = 1\n\n # node_coordinates = np.unravel_index(selected_voxel_ind, msinds.shape)\n # node_neighbor_coordinates = np.unravel_index(np.unique(node_neighbor_edges[:, :2].ravel()), msinds.shape)\n return node_neighbor_edges, node_neighbor_seeds", "def debug_interactive_inspect_node(self):\n \"\"\"\n Call after segmentation to see selected node neighborhood.\n User have to select one node by click.\n :return:\n \"\"\"\n if (\n np.sum(\n np.abs(\n np.asarray(self.msinds.shape) - np.asarray(self.segmentation.shape)\n )\n )\n == 0\n ):\n segmentation = self.segmentation\n else:\n segmentation = self.temp_msgc_resized_segmentation\n\n logger.info(\"Click to select one voxel of interest\")\n import sed3\n\n ed = sed3.sed3(self.msinds, contour=segmentation == 0)\n ed.show()\n edseeds = ed.seeds\n node_msindex = get_node_msindex(self.msinds, edseeds)\n\n node_unariesalt, node_neighboor_edges_and_weights, node_neighboor_seeds = self.debug_inspect_node(\n node_msindex\n )\n import sed3\n\n ed = sed3.sed3(\n self.msinds, contour=segmentation == 0, seeds=node_neighboor_seeds\n )\n ed.show()\n\n return (\n node_unariesalt,\n node_neighboor_edges_and_weights,\n node_neighboor_seeds,\n node_msindex,\n )", "def get_seeds(self, threshold):\n \"\"\" Returns a list of seed points for isosurface extraction\n given a threshold value\n @ In, threshold, float, the isovalue for which we want to\n identify seed points for isosurface extraction\n \"\"\"\n seeds = []\n for e1, e2 in self.superArcs:\n # Because we did some extra work in _process_tree, we can\n # safely assume e1 is lower than e2\n if self.Y[e1] <= threshold <= self.Y[e2]:\n if (e1, e2) in self.augmentedEdges:\n # These should be sorted\n edgeList = self.augmentedEdges[(e1, e2)]\n elif (e2, e1) in self.augmentedEdges:\n e1, e2 = e2, e1\n # These should be reverse sorted\n edgeList = list(reversed(self.augmentedEdges[(e1, e2)]))\n else:\n continue\n\n startNode = e1\n for endNode in edgeList + [e2]:\n if self.Y[endNode] >= threshold:\n # Stop when you find the first point above the\n # threshold\n break\n startNode = endNode\n\n seeds.append(startNode)\n seeds.append(endNode)\n return seeds", "def inspect\n edge_count = @nodes.values.each_with_object(Set.new) do |node, edges|\n edges.merge(node.edges(:out))\n end.length\n\n \"#<#{self.class} (#{ @nodes.length } nodes, #{ edge_count } edges)>\"\n end", "def __get_segments_from_node(node, graph):\n \"\"\"Calculates the segments that can emanate from a particular node on the main cycle.\"\"\"\n list_of_segments = []\n node_object = graph.get_node(node)\n for e in node_object['edges']:\n list_of_segments.append(e)\n return list_of_segments", "def _show_normalized_node(full_node):\n '''\n Normalize the QingCloud instance data. Used by list_nodes()-related\n functions.\n '''\n public_ips = full_node.get('eip', [])\n if public_ips:\n public_ip = public_ips['eip_addr']\n public_ips = [public_ip, ]\n\n private_ips = []\n for vxnet in full_node.get('vxnets', []):\n private_ip = vxnet.get('private_ip', None)\n if private_ip:\n private_ips.append(private_ip)\n\n normalized_node = {\n 'id': full_node['instance_id'],\n 'image': full_node['image']['image_id'],\n 'size': full_node['instance_type'],\n 'state': full_node['status'],\n 'private_ips': private_ips,\n 'public_ips': public_ips,\n }\n\n return normalized_node", "def get_node_msindex(msinds, node_seed):\n \"\"\"\n Convert seeds-like selection of voxel to multiscale index.\n :param msinds: ndarray with indexes\n :param node_seed: ndarray with 1 where selected pixel is, or list of indexes in this array\n :return: multiscale index of first found seed\n \"\"\"\n if type(node_seed) == np.ndarray:\n seed_indexes = np.nonzero(node_seed)\n elif type(node_seed) == list:\n seed_indexes = node_seed\n else:\n seed_indexes = [node_seed]\n\n selected_nodes_msinds = msinds[seed_indexes]\n node_msindex = selected_nodes_msinds[0]\n return node_msindex", "def graph_from_seeds(seeds, cell_source):\n \"\"\"\n This creates/updates a networkx graph from a list of cells.\n\n The graph is created when the cell_source is an instance of ExcelCompiler\n The graph is updated when the cell_source is an instance of Spreadsheet\n \"\"\"\n\n # when called from Spreadsheet instance, use the Spreadsheet cellmap and graph\n if hasattr(cell_source, 'G'): # ~ cell_source is a Spreadsheet\n cellmap = cell_source.cellmap\n cells = cellmap\n G = cell_source.G\n for c in seeds:\n G.add_node(c)\n cellmap[c.address()] = c\n # when called from ExcelCompiler instance, construct cellmap and graph from seeds\n else: # ~ cell_source is a ExcelCompiler\n cellmap = dict([(x.address(),x) for x in seeds])\n cells = cell_source.cells\n # directed graph\n G = networkx.DiGraph()\n # match the info in cellmap\n for c in cellmap.values(): G.add_node(c)\n\n # cells to analyze: only formulas\n todo = [s for s in seeds if s.formula]\n steps = [i for i,s in enumerate(todo)]\n names = cell_source.named_ranges\n\n while todo:\n c1 = todo.pop()\n step = steps.pop()\n cursheet = c1.sheet\n\n ###### 1) looking for cell c1 dependencies ####################\n # print 'C1', c1.address()\n # in case a formula, get all cells that are arguments\n pystr, ast = cell2code(c1, names)\n # set the code & compile it (will flag problems sooner rather than later)\n c1.python_expression = pystr.replace('\"', \"'\") # compilation is done later\n\n if 'OFFSET' in c1.formula or 'INDEX' in c1.formula:\n if c1.address() not in cell_source.named_ranges: # pointers names already treated in ExcelCompiler\n cell_source.pointers.add(c1.address())\n\n # get all the cells/ranges this formula refers to\n deps = [x for x in ast.nodes() if isinstance(x,RangeNode)]\n # remove dupes\n deps = uniqueify(deps)\n\n ###### 2) connect dependencies in cells in graph ####################\n\n # ### LOG\n # tmp = []\n # for dep in deps:\n # if dep not in names:\n # if \"!\" not in dep and cursheet != None:\n # dep = cursheet + \"!\" + dep\n # if dep not in cellmap:\n # tmp.append(dep)\n # #deps = tmp\n # logStep = \"%s %s = %s \" % ('|'*step, c1.address(), '',)\n # print logStep\n\n # if len(deps) > 1 and 'L' in deps[0] and deps[0] == deps[-1].replace('DG','L'):\n # print logStep, \"[%s...%s]\" % (deps[0], deps[-1])\n # elif len(deps) > 0:\n # print logStep, \"->\", deps\n # else:\n # print logStep, \"done\"\n\n for dep in deps:\n dep_name = dep.tvalue.replace('$','')\n\n # this is to avoid :A1 or A1: dep due to clean_pointers() returning an ExcelError\n if dep_name.startswith(':') or dep_name.endswith(':'):\n dep_name = dep_name.replace(':', '')\n\n # if not pointer, we need an absolute address\n if dep.tsubtype != 'pointer' and dep_name not in names and \"!\" not in dep_name and cursheet != None:\n dep_name = cursheet + \"!\" + dep_name\n\n # Named_ranges + ranges already parsed (previous iterations)\n if dep_name in cellmap:\n origins = [cellmap[dep_name]]\n target = cellmap[c1.address()]\n # if the dep_name is a multi-cell range, create a range object\n elif is_range(dep_name) or (dep_name in names and is_range(names[dep_name])):\n if dep_name in names:\n reference = names[dep_name]\n else:\n reference = dep_name\n\n if 'OFFSET' in reference or 'INDEX' in reference:\n start_end = prepare_pointer(reference, names, ref_cell = c1)\n rng = cell_source.Range(start_end)\n\n if dep_name in names: # dep is a pointer range\n address = dep_name\n else:\n if c1.address() in names: # c1 holds is a pointer range\n address = c1.address()\n else: # a pointer range with no name, its address will be its name\n address = '%s:%s' % (start_end[\"start\"], start_end[\"end\"])\n cell_source.pointers.add(address)\n else:\n address = dep_name\n\n # get a list of the addresses in this range that are not yet in the graph\n range_addresses = list(resolve_range(reference, should_flatten=True)[0])\n cellmap_add_addresses = [addr for addr in range_addresses if addr not in cellmap.keys()]\n\n if len(cellmap_add_addresses) > 0:\n # this means there are cells to be added\n\n # get row and col dimensions for the sheet, assuming the whole range is in one sheet\n sheet_initial = split_address(cellmap_add_addresses[0])[0]\n max_rows, max_cols = max_dimension(cellmap, sheet_initial)\n\n # create empty cells that aren't in the cellmap\n for addr in cellmap_add_addresses:\n sheet_new, col_new, row_new = split_address(addr)\n\n # if somehow a new sheet comes up in the range, get the new dimensions\n if sheet_new != sheet_initial:\n sheet_initial = sheet_new\n max_rows, max_cols = max_dimension(cellmap, sheet_new)\n\n # add the empty cells\n if int(row_new) <= max_rows and int(col2num(col_new)) <= max_cols:\n # only add cells within the maximum bounds of the sheet to avoid too many evaluations\n # for A:A or 1:1 ranges\n\n cell_new = Cell(addr, sheet_new, value=\"\", should_eval='False') # create new cell object\n cellmap[addr] = cell_new # add it to the cellmap\n G.add_node(cell_new) # add it to the graph\n cell_source.cells[addr] = cell_new # add it to the cell_source, used in this function\n\n rng = cell_source.Range(reference)\n\n if address in cellmap:\n virtual_cell = cellmap[address]\n else:\n virtual_cell = Cell(address, None, value = rng, formula = reference, is_range = True, is_named_range = True )\n # save the range\n cellmap[address] = virtual_cell\n\n # add an edge from the range to the parent\n G.add_node(virtual_cell)\n # Cell(A1:A10) -> c1 or Cell(ExampleName) -> c1\n G.add_edge(virtual_cell, c1)\n # cells in the range should point to the range as their parent\n target = virtual_cell\n origins = []\n\n if len(list(rng.keys())) != 0: # could be better, but can't check on Exception types here...\n for child in rng.addresses:\n if child not in cellmap:\n origins.append(cells[child])\n else:\n origins.append(cellmap[child])\n else:\n # not a range\n if dep_name in names:\n reference = names[dep_name]\n else:\n reference = dep_name\n\n if reference in cells:\n if dep_name in names:\n virtual_cell = Cell(dep_name, None, value = cells[reference].value, formula = reference, is_range = False, is_named_range = True )\n\n G.add_node(virtual_cell)\n G.add_edge(cells[reference], virtual_cell)\n\n origins = [virtual_cell]\n else:\n cell = cells[reference]\n origins = [cell]\n\n cell = origins[0]\n\n if cell.formula is not None and ('OFFSET' in cell.formula or 'INDEX' in cell.formula):\n cell_source.pointers.add(cell.address())\n else:\n virtual_cell = Cell(dep_name, None, value = None, formula = None, is_range = False, is_named_range = True )\n origins = [virtual_cell]\n\n target = c1\n\n\n # process each cell\n for c2 in flatten(origins):\n\n # if we havent treated this cell allready\n if c2.address() not in cellmap:\n if c2.formula:\n # cell with a formula, needs to be added to the todo list\n todo.append(c2)\n steps.append(step+1)\n else:\n # constant cell, no need for further processing, just remember to set the code\n pystr,ast = cell2code(c2, names)\n c2.python_expression = pystr\n c2.compile()\n\n # save in the cellmap\n cellmap[c2.address()] = c2\n # add to the graph\n G.add_node(c2)\n\n # add an edge from the cell to the parent (range or cell)\n if(target != []):\n # print \"Adding edge %s --> %s\" % (c2.address(), target.address())\n G.add_edge(c2,target)\n\n c1.compile() # cell compilation is done here because pointer ranges might update python_expressions\n\n\n return (cellmap, G)", "def _node_info(conn):\n '''\n Internal variant of node_info taking a libvirt connection as parameter\n '''\n raw = conn.getInfo()\n info = {'cpucores': raw[6],\n 'cpumhz': raw[3],\n 'cpumodel': six.text_type(raw[0]),\n 'cpus': raw[2],\n 'cputhreads': raw[7],\n 'numanodes': raw[4],\n 'phymemory': raw[1],\n 'sockets': raw[5]}\n return info", "def inspect_node(self, node_id):\n \"\"\"\n Retrieve low-level information about a swarm node\n\n Args:\n node_id (string): ID of the node to be inspected.\n\n Returns:\n A dictionary containing data about this node.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n url = self._url('/nodes/{0}', node_id)\n return self._result(self._get(url), True)", "def node_info():\n '''\n Return a dict with information about this node\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' virt.node_info\n '''\n with _get_xapi_session() as xapi:\n # get node uuid\n host_rec = _get_record(xapi, 'host', _get_all(xapi, 'host')[0])\n # get first CPU (likely to be a core) uuid\n host_cpu_rec = _get_record(xapi, 'host_cpu', host_rec['host_CPUs'][0])\n # get related metrics\n host_metrics_rec = _get_metrics_record(xapi, 'host', host_rec)\n\n # adapted / cleaned up from Xen's xm\n def getCpuMhz():\n cpu_speeds = [int(host_cpu_rec[\"speed\"])\n for host_cpu_it in host_cpu_rec\n if \"speed\" in host_cpu_it]\n if cpu_speeds:\n return sum(cpu_speeds) / len(cpu_speeds)\n else:\n return 0\n\n def getCpuFeatures():\n if host_cpu_rec:\n return host_cpu_rec['features']\n\n def getFreeCpuCount():\n cnt = 0\n for host_cpu_it in host_cpu_rec:\n if not host_cpu_rec['cpu_pool']:\n cnt += 1\n return cnt\n\n info = {\n 'cpucores': _get_val(host_rec,\n [\"cpu_configuration\", \"nr_cpus\"]),\n 'cpufeatures': getCpuFeatures(),\n 'cpumhz': getCpuMhz(),\n 'cpuarch': _get_val(host_rec,\n [\"software_version\", \"machine\"]),\n 'cputhreads': _get_val(host_rec,\n [\"cpu_configuration\", \"threads_per_core\"]),\n 'phymemory': int(host_metrics_rec[\"memory_total\"]) / 1024 / 1024,\n 'cores_per_sockets': _get_val(host_rec,\n [\"cpu_configuration\", \"cores_per_socket\"]),\n 'free_cpus': getFreeCpuCount(),\n 'free_memory': int(host_metrics_rec[\"memory_free\"]) / 1024 / 1024,\n 'xen_major': _get_val(host_rec,\n [\"software_version\", \"xen_major\"]),\n 'xen_minor': _get_val(host_rec,\n [\"software_version\", \"xen_minor\"]),\n 'xen_extra': _get_val(host_rec,\n [\"software_version\", \"xen_extra\"]),\n 'xen_caps': \" \".join(_get_val(host_rec, [\"capabilities\"])),\n 'xen_scheduler': _get_val(host_rec,\n [\"sched_policy\"]),\n 'xen_pagesize': _get_val(host_rec,\n [\"other_config\", \"xen_pagesize\"]),\n 'platform_params': _get_val(host_rec,\n [\"other_config\", \"platform_params\"]),\n 'xen_commandline': _get_val(host_rec,\n [\"other_config\", \"xen_commandline\"]),\n 'xen_changeset': _get_val(host_rec,\n [\"software_version\", \"xen_changeset\"]),\n 'cc_compiler': _get_val(host_rec,\n [\"software_version\", \"cc_compiler\"]),\n 'cc_compile_by': _get_val(host_rec,\n [\"software_version\", \"cc_compile_by\"]),\n 'cc_compile_domain': _get_val(host_rec,\n [\"software_version\", \"cc_compile_domain\"]),\n 'cc_compile_date': _get_val(host_rec,\n [\"software_version\", \"cc_compile_date\"]),\n 'xend_config_format': _get_val(host_rec,\n [\"software_version\", \"xend_config_format\"])\n }\n\n return info" ]
[ 0.7683572173118591, 0.7625840902328491, 0.6913125514984131, 0.6483857035636902, 0.6416175961494446, 0.6361604332923889, 0.6354682445526123, 0.6320913434028625, 0.6299605369567871, 0.6297000646591187, 0.6281426548957825, 0.6263656616210938 ]
Call after segmentation to see selected node neighborhood. User have to select one node by click. :return:
def debug_interactive_inspect_node(self): """ Call after segmentation to see selected node neighborhood. User have to select one node by click. :return: """ if ( np.sum( np.abs( np.asarray(self.msinds.shape) - np.asarray(self.segmentation.shape) ) ) == 0 ): segmentation = self.segmentation else: segmentation = self.temp_msgc_resized_segmentation logger.info("Click to select one voxel of interest") import sed3 ed = sed3.sed3(self.msinds, contour=segmentation == 0) ed.show() edseeds = ed.seeds node_msindex = get_node_msindex(self.msinds, edseeds) node_unariesalt, node_neighboor_edges_and_weights, node_neighboor_seeds = self.debug_inspect_node( node_msindex ) import sed3 ed = sed3.sed3( self.msinds, contour=segmentation == 0, seeds=node_neighboor_seeds ) ed.show() return ( node_unariesalt, node_neighboor_edges_and_weights, node_neighboor_seeds, node_msindex, )
[ "def OnNodeSelected(self, event):\n \"\"\"We have selected a node with the list control, tell the world\"\"\"\n try:\n node = self.sorted[event.GetIndex()]\n except IndexError, err:\n log.warn(_('Invalid index in node selected: %(index)s'),\n index=event.GetIndex())\n else:\n if node is not self.selected_node:\n wx.PostEvent(\n self,\n squaremap.SquareSelectionEvent(node=node, point=None,\n map=None)\n )", "def toggle_NV(self, pt):\n '''\n If there is not currently a selected NV within self.settings[patch_size] of pt, adds it to the selected list. If\n there is, removes that point from the selected list.\n Args:\n pt: the point to add or remove from the selected list\n\n Poststate: updates selected list\n\n '''\n\n if not self.data['nv_locations']: #if self.data is empty so this is the first point\n self.data['nv_locations'].append(pt)\n self.data['image_data'] = None # clear image data\n\n else:\n # use KDTree to find NV closest to mouse click\n tree = scipy.spatial.KDTree(self.data['nv_locations'])\n #does a search with k=1, that is a search for the nearest neighbor, within distance_upper_bound\n d, i = tree.query(pt,k = 1, distance_upper_bound = self.settings['patch_size'])\n\n # removes NV if previously selected\n if d is not np.inf:\n self.data['nv_locations'].pop(i)\n # adds NV if not previously selected\n else:\n self.data['nv_locations'].append(pt)\n\n # if type is not free we calculate the total points of locations from the first selected points\n if self.settings['type'] == 'square' and len(self.data['nv_locations'])>1:\n # here we create a rectangular grid, where pts a and be define the top left and bottom right corner of the rectangle\n Nx, Ny = self.settings['Nx'], self.settings['Ny']\n pta = self.data['nv_locations'][0]\n ptb = self.data['nv_locations'][1]\n tmp = np.array([[[pta[0] + 1.0*i*(ptb[0]-pta[0])/(Nx-1), pta[1] + 1.0*j*(ptb[1]-pta[1])/(Ny-1)] for i in range(Nx)] for j in range(Ny)])\n self.data['nv_locations'] = np.reshape(tmp, (Nx * Ny, 2))\n self.stop()\n\n\n elif self.settings['type'] == 'line' and len(self.data['nv_locations'])>1:\n # here we create a straight line between points a and b\n N = self.settings['Nx']\n pta = self.data['nv_locations'][0]\n ptb = self.data['nv_locations'][1]\n self.data['nv_locations'] = [np.array([pta[0] + 1.0*i*(ptb[0]-pta[0])/(N-1), pta[1] + 1.0*i*(ptb[1]-pta[1])/(N-1)]) for i in range(N)]\n self.stop()\n\n elif self.settings['type'] == 'ring' and len(self.data['nv_locations'])>1:\n # here we create a circular grid, where pts a and be define the center and the outermost ring\n Nx, Ny = self.settings['Nx'], self.settings['Ny']\n\n pta = self.data['nv_locations'][0] # center\n ptb = self.data['nv_locations'][1] # outermost ring\n\n # radius of outermost ring:\n rmax = np.sqrt((pta[0] - ptb[0]) ** 2 + (pta[1] - ptb[1]) ** 2)\n\n # create points on rings\n tmp = []\n for r in np.linspace(rmax, 0, Ny + 1)[0:-1]:\n for theta in np.linspace(0, 2 * np.pi, Nx+1)[0:-1]:\n tmp += [[r * np.sin(theta)+pta[0], r * np.cos(theta)+pta[1]]]\n\n self.data['nv_locations'] = np.array(tmp)\n self.stop()", "def toggle_NV(self, pt):\n '''\n If there is not currently a selected NV within self.settings[patch_size] of pt, adds it to the selected list. If\n there is, removes that point from the selected list.\n Args:\n pt: the point to add or remove from the selected list\n Poststate: updates selected list\n '''\n if not self.data['nv_locations']: #if self.data is empty so this is the first point\n self.data['nv_locations'].append(pt)\n self.data['image_data'] = None # clear image data\n else:\n # use KDTree to find NV closest to mouse click\n tree = scipy.spatial.KDTree(self.data['nv_locations'])\n #does a search with k=1, that is a search for the nearest neighbor, within distance_upper_bound\n d, i = tree.query(pt,k = 1, distance_upper_bound = self.settings['patch_size'])\n\n # removes NV if previously selected\n if d is not np.inf:\n self.data['nv_locations'].pop(i)\n # adds NV if not previously selected\n else:\n self.data['nv_locations'].append(pt)\n\n # randomize\n if self.settings['randomize']:\n self.log('warning! randomize not avalable when manually selecting points')\n\n # if type is not free we calculate the total points of locations from the first selected points\n if self.settings['type'] == 'square' and len(self.data['nv_locations'])>1:\n # here we create a rectangular grid, where pts a and be define the top left and bottom right corner of the rectangle\n Nx, Ny = self.settings['Nx'], self.settings['Ny']\n pta = self.data['nv_locations'][0]\n ptb = self.data['nv_locations'][1]\n tmp = np.array([[[pta[0] + 1.0*i*(ptb[0]-pta[0])/(Nx-1), pta[1] + 1.0*j*(ptb[1]-pta[1])/(Ny-1)] for i in range(Nx)] for j in range(Ny)])\n nv_pts = np.reshape(tmp, (Nx * Ny, 2))\n\n # randomize\n if self.settings['randomize']:\n random.shuffle(nv_pts) # shuffles in place\n\n self.data['nv_locations'] = nv_pts\n\n self.stop()\n elif self.settings['type'] == 'line' and len(self.data['nv_locations'])>1:\n # here we create a straight line between points a and b\n N = self.settings['Nx']\n pta = self.data['nv_locations'][0]\n ptb = self.data['nv_locations'][1]\n nv_pts = [np.array([pta[0] + 1.0*i*(ptb[0]-pta[0])/(N-1), pta[1] + 1.0*i*(ptb[1]-pta[1])/(N-1)]) for i in range(N)]\n\n\n # randomize\n if self.settings['randomize']:\n random.shuffle(nv_pts) # shuffles in place\n\n self.data['nv_locations'] = nv_pts\n\n self.stop()\n elif self.settings['type'] == 'ring' and len(self.data['nv_locations'])>1:\n # here we create a circular grid, where pts a and be define the center and the outermost ring\n Nx, Ny = self.settings['Nx'], self.settings['Ny']\n pt_center = self.data['nv_locations'][0] # center\n pt_outer = self.data['nv_locations'][1] # outermost ring\n # radius of outermost ring:\n rmax = np.sqrt((pt_center[0] - pt_outer[0]) ** 2 + (pt_center[1] - pt_outer[1]) ** 2)\n\n # angles\n angles = np.linspace(0, 2 * np.pi, Nx+1)[0:-1]\n # create points on rings\n nv_pts = []\n for r in np.linspace(rmax, 0, Ny + 1)[0:-1]:\n for theta in angles:\n nv_pts += [[r * np.sin(theta)+pt_center[0], r * np.cos(theta)+pt_center[1]]]\n\n\n\n # randomize\n if self.settings['randomize']:\n coarray = list(zip(nv_pts, angles))\n random.shuffle(coarray) # shuffles in place\n nv_pts, angles = zip(*coarray)\n\n self.data['nv_locations'] = np.array(nv_pts)\n self.data['angles'] = np.array(angles)* 180 / np.pi\n self.data['ring_data'] = [pt_center, pt_outer]\n self.stop()\n\n elif self.settings['type'] == 'arc' and len(self.data['nv_locations']) > 3:\n # here we create a circular grid, where pts a and be define the center and the outermost ring\n Nx, Ny = self.settings['Nx'], self.settings['Ny']\n pt_center = self.data['nv_locations'][0] # center\n pt_start = self.data['nv_locations'][1] # arc point one (radius)\n pt_dir = self.data['nv_locations'][2] # arc point two (direction)\n pt_end = self.data['nv_locations'][3] # arc point three (angle)\n\n # radius of outermost ring:\n rmax = np.sqrt((pt_center[0] - pt_start[0]) ** 2 + (pt_center[1] - pt_start[1]) ** 2)\n angle_start = np.arctan((pt_start[1] - pt_center[1]) / (pt_start[0] - pt_center[0]))\n # arctan always returns between -pi/2 and pi/2, so adjust to allow full range of angles\n if ((pt_start[0] - pt_center[0]) < 0):\n angle_start += np.pi\n\n angle_end = np.arctan((pt_end[1] - pt_center[1]) / (pt_end[0] - pt_center[0]))\n # arctan always returns between -pi/2 and pi/2, so adjust to allow full range of angles\n if ((pt_end[0] - pt_center[0]) < 0):\n angle_end += np.pi\n\n if pt_dir[0] < pt_start[0]:\n # counter-clockwise: invert the order of the angles\n angle_start, angle_end = angle_end, angle_start\n\n if angle_start > angle_end:\n # make sure that start is the smaller\n # (e.g. angle_start= 180 deg and angle_end =10, we want to got from 180 to 370 deg)\n angle_end += 2 * np.pi\n\n # create points on arcs\n nv_pts = []\n for r in np.linspace(rmax, 0, Ny + 1)[0:-1]:\n for theta in np.linspace(angle_start, angle_end, Nx, endpoint=True):\n nv_pts += [[r * np.cos(theta) + pt_center[0], r * np.sin(theta) + pt_center[1]]]\n\n # randomize\n if self.settings['randomize']:\n coarray = list(zip(nv_pts, np.linspace(angle_start, angle_end, Nx, endpoint=True)))\n random.shuffle(coarray) # shuffles in place\n nv_pts, angles = zip(*coarray)\n else:\n angles = np.linspace(angle_start, angle_end, Nx, endpoint=True)\n self.data['nv_locations'] = np.array(nv_pts)\n self.data['arc_data'] = [pt_center, pt_start, pt_end]\n self.data['angles'] = np.array(angles) * 180 / np.pi\n self.stop()", "def on_zijd_select(self, event):\n \"\"\"\n Get mouse position on double click find the nearest interpretation\n to the mouse\n position then select that interpretation\n\n Parameters\n ----------\n event : the wx Mouseevent for that click\n\n Alters\n ------\n current_fit\n \"\"\"\n if not array(self.CART_rot_good).any():\n return\n pos = event.GetPosition()\n width, height = self.canvas1.get_width_height()\n pos[1] = height - pos[1]\n xpick_data, ypick_data = pos\n xdata_org = list(\n self.CART_rot_good[:, 0]) + list(self.CART_rot_good[:, 0])\n ydata_org = list(-1*self.CART_rot_good[:, 1]) + \\\n list(-1*self.CART_rot_good[:, 2])\n data_corrected = self.zijplot.transData.transform(\n vstack([xdata_org, ydata_org]).T)\n xdata, ydata = data_corrected.T\n xdata = list(map(float, xdata))\n ydata = list(map(float, ydata))\n e = 4.0\n\n index = None\n for i, (x, y) in enumerate(zip(xdata, ydata)):\n if 0 < sqrt((x-xpick_data)**2. + (y-ypick_data)**2.) < e:\n index = i\n break\n if index != None:\n steps = self.Data[self.s]['zijdblock_steps']\n bad_count = self.Data[self.s]['measurement_flag'][:index].count(\n 'b')\n if index > len(steps):\n bad_count *= 2\n if not self.current_fit:\n self.on_btn_add_fit(event)\n self.select_bounds_in_logger((index+bad_count) % len(steps))", "def clicked(self, px, py):\n '''see if the polygon has been clicked on.\n Consider it clicked if the pixel is within 6 of the point\n '''\n if self.hidden:\n return None\n for i in range(len(self._pix_points)):\n if self._pix_points[i] is None:\n continue\n (pixx,pixy) = self._pix_points[i]\n if abs(px - pixx) < 6 and abs(py - pixy) < 6:\n self._selected_vertex = i\n return math.sqrt((px - pixx)**2 + (py - pixy)**2)\n return None", "def OnNodeActivated(self, event):\n \"\"\"We have double-clicked for hit enter on a node refocus squaremap to this node\"\"\"\n try:\n node = self.sorted[event.GetIndex()]\n except IndexError, err:\n log.warn(_('Invalid index in node activated: %(index)s'),\n index=event.GetIndex())\n else:\n wx.PostEvent(\n self,\n squaremap.SquareActivationEvent(node=node, point=None,\n map=None)\n )", "def OnNodeActivated(self, event):\n \"\"\"Double-click or enter on a node in some control...\"\"\"\n self.activated_node = self.selected_node = event.node\n self.squareMap.SetModel(event.node, self.adapter)\n self.squareMap.SetSelected( event.node )\n if editor:\n if self.SourceShowFile(event.node):\n if hasattr(event.node,'lineno'):\n self.sourceCodeControl.GotoLine(event.node.lineno)\n self.RecordHistory()", "function () {\n if (!this.props.visible) {\n return;\n }\n\n var node = Preact.findDOMNode(this),\n selectedNode = $(node.parentNode).find(this.props.selectedClassName).closest(\"li\"),\n selectionViewInfo = this.props.selectionViewInfo;\n\n if (selectedNode.length === 0) {\n return;\n }\n\n var top = selectedNode.offset().top,\n baselineHeight = node.dataset.initialHeight,\n height = baselineHeight,\n scrollerTop = selectionViewInfo.get(\"offsetTop\");\n\n if (!baselineHeight) {\n baselineHeight = $(node).outerHeight();\n node.dataset.initialHeight = baselineHeight;\n height = baselineHeight;\n }\n\n // Check to see if the selection is completely scrolled out of view\n // to prevent the extension from appearing in the working set area.\n if (top < scrollerTop - baselineHeight) {\n node.style.display = \"none\";\n return;\n }\n\n node.style.display = \"block\";\n\n // The selectionExtension sits on top of the other nodes\n // so we need to shrink it if only part of the selection node is visible\n if (top < scrollerTop) {\n var difference = scrollerTop - top;\n top += difference;\n height = parseInt(height, 10);\n height -= difference;\n }\n\n node.style.top = top + \"px\";\n node.style.height = height + \"px\";\n node.style.left = selectionViewInfo.get(\"width\") - $(node).outerWidth() + \"px\";\n }", "def next(self):\n \"\"\"Select the next cluster.\"\"\"\n if not self.selected:\n self.cluster_view.next()\n else:\n self.similarity_view.next()", "def inspect_node_neighborhood(nlinks, msinds, node_msindex):\n \"\"\"\n Get information about one node in graph\n\n :param nlinks: neighboorhood edges\n :param msinds: indexes in 3d image\n :param node_msindex: int, multiscale index of selected voxel\n :return: node_neighboor_edges_and_weights, node_neighboor_seeds\n \"\"\"\n # seed_indexes = np.nonzero(node_seed)\n # selected_inds = msinds[seed_indexes]\n # node_msindex = selected_inds[0]\n\n node_neighbor_edges = get_neighborhood_edes(nlinks, node_msindex)\n\n node_neighbor_seeds = np.zeros_like(msinds, dtype=np.int8)\n for neighboor_ind in np.unique(node_neighbor_edges[:, :2].ravel()):\n node_neighbor_ind = np.where(msinds == neighboor_ind)\n node_neighbor_seeds[node_neighbor_ind] = 2\n\n node_neighbor_seeds[np.where(msinds == node_msindex)] = 1\n\n # node_coordinates = np.unravel_index(selected_voxel_ind, msinds.shape)\n # node_neighbor_coordinates = np.unravel_index(np.unique(node_neighbor_edges[:, :2].ravel()), msinds.shape)\n return node_neighbor_edges, node_neighbor_seeds", "def SetSelected( self, node, point=None, propagate=True ):\n \"\"\"Set the given node selected in the square-map\"\"\"\n if node == self.selectedNode:\n return\n self.selectedNode = node\n self.UpdateDrawing()\n if node:\n wx.PostEvent( self, SquareSelectionEvent( node=node, point=point, map=self ) )", "def onclick(self, event):\n \"\"\"\n Draw contours on the data for a click in the thematic map\n :param event: mouse click on thematic map preview\n \"\"\"\n if event.inaxes == self.previewax:\n y, x = int(event.xdata), int(event.ydata)\n label = self.selection_array[x, y]\n contiguous_regions = scipy.ndimage.label(self.selection_array == label)[0]\n this_region = contiguous_regions == (contiguous_regions[x, y])\n\n # remove the boundaries so any region touching the edge isn't drawn odd\n this_region[0, :] = 0\n this_region[:, 0] = 0\n this_region[this_region.shape[0]-1, :] = 0\n this_region[:, this_region.shape[1]-1] = 0\n\n # convert the region mask into just a true/false array of its boundary pixels\n edges = binary_erosion(this_region) ^ this_region\n\n # convert the boundary pixels into a path, moving around instead of just where\n x, y = np.where(edges)\n coords = np.dstack([x, y])[0]\n path = [coords[0]]\n coords = coords[1:]\n\n while len(coords):\n dist = np.sum(np.abs(path[-1] - coords), axis=1)\n neighbor_index = np.argmin(dist)\n\n if dist[neighbor_index] < 5:\n path.append(coords[neighbor_index].copy())\n coords[neighbor_index:-1] = coords[neighbor_index + 1:]\n coords = coords[:-1]\n else:\n break\n\n path = np.array(path)\n\n clips = []\n while len(coords) > 5:\n dist = np.sum(np.abs(path[-1] - coords), axis=1)\n neighbor_index = np.argmin(dist)\n clip = [coords[neighbor_index].copy()]\n coords[neighbor_index:-1] = coords[neighbor_index + 1:]\n coords = coords[:-1]\n while len(coords):\n dist = np.sum(np.abs(clip[-1] - coords), axis=1)\n neighbor_index = np.argmin(dist)\n if dist[neighbor_index] < 5:\n clip.append(coords[neighbor_index].copy())\n coords[neighbor_index:-1] = coords[neighbor_index + 1:]\n coords = coords[:-1]\n else:\n break\n clips.append(np.array(clip))\n\n # draw the continguous on the selection area\n self.region_patches.append(PatchCollection(\n [Polygon(np.dstack([path[:, 1], path[:, 0]])[0], False,\n fill=False, facecolor=None,\n edgecolor=\"black\", alpha=1, lw=2.5)] +\n [Polygon(np.dstack([clip[:, 1], clip[:, 0]])[0], False,\n fill=False, facecolor=None,\n edgecolor=\"black\", alpha=1, lw=2.0) for clip in clips],\n match_original=True))\n self.imageax.add_collection(self.region_patches[-1])\n self.fig.canvas.draw_idle()" ]
[ 0.7131602764129639, 0.7058137059211731, 0.704730212688446, 0.7002614140510559, 0.697652280330658, 0.6866844892501831, 0.6853585839271545, 0.6828123927116394, 0.6754629611968994, 0.6754100322723389, 0.6728068590164185, 0.6713006496429443 ]
Setting of data. You need set seeds if you want use hard_constraints.
def _ssgc_prepare_data_and_run_computation( self, # voxels1, voxels2, hard_constraints=True, area_weight=1, ): """ Setting of data. You need set seeds if you want use hard_constraints. """ # from PyQt4.QtCore import pyqtRemoveInputHook # pyqtRemoveInputHook() # import pdb; pdb.set_trace() # BREAKPOINT unariesalt = self.__create_tlinks( self.img, self.voxelsize, # voxels1, voxels2, self.seeds, area_weight, hard_constraints, ) # některém testu organ semgmentation dosahují unaries -15. což je podiné # stačí vyhodit print před if a je to vidět logger.debug("unaries %.3g , %.3g" % (np.max(unariesalt), np.min(unariesalt))) # create potts pairwise # pairwiseAlpha = -10 pairwise = -(np.eye(2) - 1) pairwise = (self.segparams["pairwise_alpha"] * pairwise).astype(np.int32) # pairwise = np.array([[0,30],[30,0]]).astype(np.int32) # print pairwise self.iparams = {} if self.segparams["use_boundary_penalties"]: sigma = self.segparams["boundary_penalties_sigma"] # set boundary penalties function # Default are penalties based on intensity differences boundary_penalties_fcn = lambda ax: self._boundary_penalties_array( axis=ax, sigma=sigma ) else: boundary_penalties_fcn = None nlinks = self.__create_nlinks( self.img, boundary_penalties_fcn=boundary_penalties_fcn ) self.stats["tlinks shape"].append(unariesalt.reshape(-1, 2).shape) self.stats["nlinks shape"].append(nlinks.shape) # we flatten the unaries # result_graph = cut_from_graph(nlinks, unaries.reshape(-1, 2), # pairwise) start = time.time() if self.debug_images: self._debug_show_unariesalt(unariesalt) result_graph = pygco.cut_from_graph(nlinks, unariesalt.reshape(-1, 2), pairwise) elapsed = time.time() - start self.stats["gc time"] = elapsed result_labeling = result_graph.reshape(self.img.shape) return result_labeling
[ "def __set_hard_hard_constraints(self, tdata1, tdata2, seeds):\n \"\"\"\n it works with seed labels:\n 0: nothing\n 1: object 1 - full seeds\n 2: object 2 - full seeds\n 3: object 1 - not a training seeds\n 4: object 2 - not a training seeds\n \"\"\"\n seeds_mask = (seeds == 1) | (seeds == 3)\n tdata2[seeds_mask] = np.max(tdata2) + 1\n tdata1[seeds_mask] = 0\n\n seeds_mask = (seeds == 2) | (seeds == 4)\n tdata1[seeds_mask] = np.max(tdata1) + 1\n tdata2[seeds_mask] = 0\n\n return tdata1, tdata2", "def set(name, data, **kwargs):\n '''\n Set debconf selections\n\n .. code-block:: yaml\n\n <state_id>:\n debconf.set:\n - name: <name>\n - data:\n <question>: {'type': <type>, 'value': <value>}\n <question>: {'type': <type>, 'value': <value>}\n\n <state_id>:\n debconf.set:\n - name: <name>\n - data:\n <question>: {'type': <type>, 'value': <value>}\n <question>: {'type': <type>, 'value': <value>}\n\n name:\n The package name to set answers for.\n\n data:\n A set of questions/answers for debconf. Note that everything under\n this must be indented twice.\n\n question:\n The question the is being pre-answered\n\n type:\n The type of question that is being asked (string, boolean, select, etc.)\n\n value:\n The answer to the question\n '''\n ret = {'name': name,\n 'changes': {},\n 'result': True,\n 'comment': ''}\n\n current = __salt__['debconf.show'](name)\n\n for (key, args) in six.iteritems(data):\n # For debconf data, valid booleans are 'true' and 'false';\n # But str()'ing the args['value'] will result in 'True' and 'False'\n # which will be ignored and overridden by a dpkg-reconfigure.\n\n # So we should manually set these values to lowercase ones,\n # before any str() call is performed.\n\n if args['type'] == 'boolean':\n args['value'] = 'true' if args['value'] else 'false'\n\n if current is not None and [key, args['type'], six.text_type(args['value'])] in current:\n if ret['comment'] is '':\n ret['comment'] = 'Unchanged answers: '\n ret['comment'] += ('{0} ').format(key)\n else:\n if __opts__['test']:\n ret['result'] = None\n ret['changes'][key] = ('New value: {0}').format(args['value'])\n else:\n if __salt__['debconf.set'](name, key, args['type'], args['value']):\n if args['type'] == 'password':\n ret['changes'][key] = '(password hidden)'\n else:\n ret['changes'][key] = ('{0}').format(args['value'])\n else:\n ret['result'] = False\n ret['comment'] = 'Some settings failed to be applied.'\n ret['changes'][key] = 'Failed to set!'\n\n if not ret['changes']:\n ret['comment'] = 'All specified answers are already set'\n\n return ret", "def specify_data_set(self, x_input, y_input, sort_data=False):\n \"\"\"\n Fully define data by lists of x values and y values.\n\n This will sort them by increasing x but remember how to unsort them for providing results.\n\n Parameters\n ----------\n x_input : iterable\n list of floats that represent x\n y_input : iterable\n list of floats that represent y(x) for each x\n sort_data : bool, optional\n If true, the data will be sorted by increasing x values.\n \"\"\"\n if sort_data:\n xy = sorted(zip(x_input, y_input))\n x, y = zip(*xy)\n x_input_list = list(x_input)\n self._original_index_of_xvalue = [x_input_list.index(xi) for xi in x]\n if len(set(self._original_index_of_xvalue)) != len(x):\n raise RuntimeError('There are some non-unique x-values')\n else:\n x, y = x_input, y_input\n\n self.x = x\n self.y = y", "def set(self, data, start=None, count=None, stride=None):\n \"\"\"Write data to the dataset.\n\n Args::\n\n data : array of data to write; can be given as a numpy\n array, or as Python sequence (whose elements can be\n imbricated sequences)\n start : indices where to start writing in the dataset;\n default to 0 on all dimensions\n count : number of values to write along each dimension;\n default to the current length of dataset dimensions\n stride : sampling interval along each dimension;\n default to 1 on all dimensions\n\n For n-dimensional datasets, those 3 parameters are entered\n using lists. For one-dimensional datasets, integers\n can also be used.\n\n Note that, to write the whole dataset at once, one has simply\n to call the method with the dataset values in parameter\n 'data', omitting all other parameters.\n\n Returns::\n\n None.\n\n C library equivalent : SDwritedata\n\n The dataset can also be written using the familiar indexing and\n slicing notation, like ordinary python sequences.\n See \"High level variable access\".\n\n \"\"\"\n\n\n # Obtain SDS info.\n try:\n sds_name, rank, dim_sizes, data_type, n_attrs = self.info()\n if isinstance(dim_sizes, type(1)):\n dim_sizes = [dim_sizes]\n except HDF4Error:\n raise HDF4Error('set : cannot execute')\n\n # Validate args.\n if start is None:\n start = [0] * rank\n elif isinstance(start, type(1)):\n start = [start]\n if count is None:\n count = dim_sizes\n if count[0] == 0:\n count[0] = 1\n elif isinstance(count, type(1)):\n count = [count]\n if stride is None:\n stride = [1] * rank\n elif isinstance(stride, type(1)):\n stride = [stride]\n if len(start) != rank or len(count) != rank or len(stride) != rank:\n raise HDF4Error('set : start, stride or count '\\\n 'do not match SDS rank')\n unlimited = self.isrecord()\n for n in range(rank):\n ok = 1\n if start[n] < 0:\n ok = 0\n elif n > 0 or not unlimited:\n if start[n] + (abs(count[n]) - 1) * stride[n] >= dim_sizes[n]:\n ok = 0\n if not ok:\n raise HDF4Error('set arguments violate '\\\n 'the size (%d) of dimension %d' \\\n % (dim_sizes[n], n))\n # ??? Check support for UINT16\n if not data_type in SDC.equivNumericTypes:\n raise HDF4Error('set cannot currrently deal '\\\n 'with the SDS data type')\n\n _C._SDwritedata_0(self._id, data_type, start, count, data, stride)", "def data(self, data):\n \"\"\" Sets the data of this item.\n Does type conversion to ensure data is always of the correct type.\n \"\"\"\n # Descendants should convert the data to the desired type here\n self._data = self._enforceDataType(data)\n\n #logger.debug(\"BoolCti.setData: {} for {}\".format(data, self))\n enabled = self.enabled\n self.enableBranch(enabled and self.data != self.childrenDisabledValue)\n self.enabled = enabled", "def set_data(self, data=None):\n \"\"\"Sets the content data.\n\n arg: data (osid.transport.DataInputStream): the content data\n raise: InvalidArgument - ``data`` is invalid\n raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``\n raise: NullArgument - ``data`` is ``null``\n *compliance: mandatory -- This method must be implemented.*\n\n \"\"\"\n def has_secondary_storage():\n return 'secondary_data_store_path' in self._config_map\n\n extension = data.name.split('.')[-1]\n data_store_path = self._config_map['data_store_path']\n if has_secondary_storage():\n secondary_data_store_path = self._config_map['secondary_data_store_path']\n\n if '_id' in self._my_map:\n filename = self._my_map['_id']\n # remove any old file that is set\n if str(self._my_map['_id']) not in self._my_map['url']:\n os.remove(self._my_map['url'])\n\n if has_secondary_storage():\n old_path = '{0}/repository/AssetContent'.format(data_store_path)\n secondary_file_location = self._my_map['url'].replace(old_path,\n secondary_data_store_path)\n os.remove(secondary_file_location)\n else:\n filename = ObjectId()\n\n filesystem_location = '{0}/repository/AssetContent/'.format(data_store_path)\n\n if not os.path.isdir(filesystem_location):\n os.makedirs(filesystem_location)\n\n file_location = '{0}{1}.{2}'.format(filesystem_location,\n str(filename),\n extension)\n\n data.seek(0)\n\n with open(file_location, 'wb') as file_handle:\n file_handle.write(data.read())\n\n # this URL should be a filesystem path...relative\n # to the setting in runtime\n self._payload.set_url(file_location)\n\n # if set, also make a backup copy in the secondary_data_store_path\n if has_secondary_storage():\n data.seek(0)\n\n if not os.path.isdir(secondary_data_store_path):\n os.makedirs(secondary_data_store_path)\n\n file_location = '{0}/{1}.{2}'.format(secondary_data_store_path,\n str(filename),\n extension)\n with open(file_location, 'wb') as file_handle:\n file_handle.write(data.read())", "def set_state(seed_value=None, step=None):\n \"\"\"Set random seed.\"\"\"\n global RANDOM_SEED # pylint: disable=global-statement\n if seed_value is not None:\n RANDOM_SEED = seed_value\n if step is not None:\n RANDOM_SEED += step", "def set_data(self, data, coll_filter=None):\r\n \"\"\"Set model data\"\"\"\r\n self._data = data\r\n data_type = get_type_string(data)\r\n\r\n if coll_filter is not None and not self.remote and \\\r\n isinstance(data, (tuple, list, dict, set)):\r\n data = coll_filter(data)\r\n self.showndata = data\r\n\r\n self.header0 = _(\"Index\")\r\n if self.names:\r\n self.header0 = _(\"Name\")\r\n if isinstance(data, tuple):\r\n self.keys = list(range(len(data)))\r\n self.title += _(\"Tuple\")\r\n elif isinstance(data, list):\r\n self.keys = list(range(len(data)))\r\n self.title += _(\"List\")\r\n elif isinstance(data, set):\r\n self.keys = list(range(len(data)))\r\n self.title += _(\"Set\")\r\n self._data = list(data)\r\n elif isinstance(data, dict):\r\n self.keys = list(data.keys())\r\n self.title += _(\"Dictionary\")\r\n if not self.names:\r\n self.header0 = _(\"Key\")\r\n else:\r\n self.keys = get_object_attrs(data)\r\n self._data = data = self.showndata = ProxyObject(data)\r\n if not self.names:\r\n self.header0 = _(\"Attribute\")\r\n\r\n if not isinstance(self._data, ProxyObject):\r\n self.title += (' (' + str(len(self.keys)) + ' ' +\r\n _(\"elements\") + ')')\r\n else:\r\n self.title += data_type\r\n\r\n self.total_rows = len(self.keys)\r\n if self.total_rows > LARGE_NROWS:\r\n self.rows_loaded = ROWS_TO_LOAD\r\n else:\r\n self.rows_loaded = self.total_rows\r\n self.sig_setting_data.emit()\r\n self.set_size_and_type()\r\n self.reset()", "def _set_seed(self):\n \"\"\" Set random seed for numpy and tensorflow packages \"\"\"\n if self.flags['SEED'] is not None:\n tf.set_random_seed(self.flags['SEED'])\n np.random.seed(self.flags['SEED'])", "def set(self, align=None, code=None, detail=None, limit=None, stats=None):\n '''Set some options. See also **reset**.\n\n *align* -- size alignment\n\n *code* -- incl. (byte)code size\n\n *detail* -- Asized refs level\n\n *limit* -- recursion limit\n\n *stats* -- print statistics, see function **asizeof**\n\n Any options not set remain unchanged from the previous setting.\n '''\n # adjust\n if align is not None:\n self._align_ = align\n if align > 1:\n self._mask = align - 1\n if (self._mask & align) != 0:\n raise ValueError('invalid option: %s=%r' % ('align', align))\n else:\n self._mask = 0\n if code is not None:\n self._code_ = code\n if code: # incl. (byte)code\n self._incl = ' (incl. code)'\n if detail is not None:\n self._detail_ = detail\n if limit is not None:\n self._limit_ = limit\n if stats is not None:\n self._stats_ = s = int(stats)\n self._cutoff = (stats - s) * 100\n if s > 1: # profile types\n self._profile = True\n else:\n self._profile = False", "def set_neighbor_data(self, neighbor_side, data, key, field):\n \"\"\"\n Assign data from the 'key' tile to the edge on the\n neighboring tile which is on the 'neighbor_side' of the 'key' tile.\n The data is assigned to the 'field' attribute of the neihboring tile's\n edge.\n \"\"\"\n i = self.keys[key]\n found = False\n sides = []\n if 'left' in neighbor_side:\n if i % self.n_cols == 0:\n return None\n i -= 1\n sides.append('right')\n found = True\n if 'right' in neighbor_side:\n if i % self.n_cols == self.n_cols - 1:\n return None\n i += 1\n sides.append('left')\n found = True\n if 'top' in neighbor_side:\n sides.append('bottom')\n i -= self.n_cols\n found = True\n if 'bottom' in neighbor_side:\n sides.append('top')\n i += self.n_cols\n found = True\n if not found:\n print \"Side '%s' not found\" % neighbor_side\n # Check if i is in range\n if i < 0 or i >= self.n_chunks:\n return None\n # Otherwise, set the data\n for side in sides:\n self.set_i(i, data, field, side)", "def set_data(self, data, from_db=False):\n \"\"\"\n Fills the object's fields with given data dict.\n Internally calls the self._load_data() method.\n\n Args:\n data (dict): Data to fill object's fields.\n from_db (bool): if data coming from db then we will\n use related field type's _load_data method\n\n Returns:\n Self. Returns objects itself for chainability.\n \"\"\"\n self._load_data(data, from_db)\n return self" ]
[ 0.7334316968917847, 0.7288636565208435, 0.7227749824523926, 0.7101259231567383, 0.7063561677932739, 0.7062025666236877, 0.7042734026908875, 0.7031860947608948, 0.6948578357696533, 0.6918073892593384, 0.6902747750282288, 0.6894993185997009 ]
Function resize input data to specific shape. :param data: input 3d array-like data :param shape: shape of output data :param zoom: zoom is used for back compatibility :mode: default is 'nearest'
def resize_to_shape(data, shape, zoom=None, mode="nearest", order=0): """ Function resize input data to specific shape. :param data: input 3d array-like data :param shape: shape of output data :param zoom: zoom is used for back compatibility :mode: default is 'nearest' """ # @TODO remove old code in except part # TODO use function from library in future try: # rint 'pred vyjimkou' # aise Exception ('test without skimage') # rint 'za vyjimkou' import skimage import skimage.transform # Now we need reshape seeds and segmentation to original size # with warnings.catch_warnings(): # warnings.filterwarnings("ignore", ".*'constant', will be changed to.*") segm_orig_scale = skimage.transform.resize( data, shape, order=0, preserve_range=True, mode="reflect" ) segmentation = segm_orig_scale logger.debug("resize to orig with skimage") except: if zoom is None: zoom = shape / np.asarray(data.shape).astype(np.double) segmentation = resize_to_shape_with_zoom( data, zoom=zoom, mode=mode, order=order ) return segmentation
[ "def resize_to_shape(data, shape, zoom=None, mode='nearest', order=0):\n \"\"\"\n Function resize input data to specific shape.\n\n :param data: input 3d array-like data\n :param shape: shape of output data\n :param zoom: zoom is used for back compatibility\n :mode: default is 'nearest'\n \"\"\"\n # @TODO remove old code in except part\n\n try:\n # rint 'pred vyjimkou'\n # aise Exception ('test without skimage')\n # rint 'za vyjimkou'\n import skimage\n import skimage.transform\n# Now we need reshape seeds and segmentation to original size\n\n segm_orig_scale = skimage.transform.resize(\n data, shape, order=0,\n preserve_range=True,\n mode=\"constant\",\n )\n\n segmentation = segm_orig_scale\n logger.debug('resize to orig with skimage')\n except:\n import scipy\n import scipy.ndimage\n dtype = data.dtype\n if zoom is None:\n zoom = shape / np.asarray(data.shape).astype(np.double)\n\n segm_orig_scale = scipy.ndimage.zoom(\n data,\n 1.0 / zoom,\n mode=mode,\n order=order\n ).astype(dtype)\n logger.debug('resize to orig with scipy.ndimage')\n\n# @TODO odstranit hack pro oříznutí na stejnou velikost\n# v podstatě je to vyřešeno, ale nechalo by se to dělat elegantněji v zoom\n# tam je bohužel patrně bug\n # rint 'd3d ', self.data3d.shape\n # rint 's orig scale shape ', segm_orig_scale.shape\n shp = [\n np.min([segm_orig_scale.shape[0], shape[0]]),\n np.min([segm_orig_scale.shape[1], shape[1]]),\n np.min([segm_orig_scale.shape[2], shape[2]]),\n ]\n # elf.data3d = self.data3d[0:shp[0], 0:shp[1], 0:shp[2]]\n # mport ipdb; ipdb.set_trace() # BREAKPOINT\n\n segmentation = np.zeros(shape, dtype=dtype)\n segmentation[\n 0:shp[0],\n 0:shp[1],\n 0:shp[2]] = segm_orig_scale[0:shp[0], 0:shp[1], 0:shp[2]]\n\n del segm_orig_scale\n return segmentation", "def zoom_to_shape(data, shape, dtype=None):\n \"\"\"\n Zoom data to specific shape.\n \"\"\"\n import scipy\n import scipy.ndimage\n\n zoomd = np.array(shape) / np.array(data.shape, dtype=np.double)\n import warnings\n\n datares = scipy.ndimage.interpolation.zoom(data, zoomd, order=0, mode=\"reflect\")\n\n if datares.shape != shape:\n logger.warning(\"Zoom with different output shape\")\n dataout = np.zeros(shape, dtype=dtype)\n shpmin = np.minimum(dataout.shape, shape)\n\n dataout[: shpmin[0], : shpmin[1], : shpmin[2]] = datares[\n : shpmin[0], : shpmin[1], : shpmin[2]\n ]\n return datares", "def resize(self, shape):\n \"\"\"Resize the image to the given *shape* tuple, in place. For zooming,\n nearest neighbour method is used, while for shrinking, decimation is\n used. Therefore, *shape* must be a multiple or a divisor of the image\n shape.\n \"\"\"\n if self.is_empty():\n raise ValueError(\"Cannot resize an empty image\")\n\n factor = [1, 1]\n zoom = [True, True]\n zoom[0] = shape[0] >= self.height\n zoom[1] = shape[1] >= self.width\n\n if zoom[0]:\n factor[0] = shape[0] * 1.0 / self.height\n else:\n factor[0] = self.height * 1.0 / shape[0]\n if zoom[1]:\n factor[1] = shape[1] * 1.0 / self.width\n else:\n factor[1] = self.width * 1.0 / shape[1]\n\n if(int(factor[0]) != factor[0] or\n int(factor[1]) != factor[1]):\n raise ValueError(\"Resize not of integer factor!\")\n\n factor[0] = int(factor[0])\n factor[1] = int(factor[1])\n\n i = 0\n for chn in self.channels:\n if zoom[0]:\n chn = chn.repeat([factor[0]] * chn.shape[0], axis=0)\n else:\n chn = chn[[idx * factor[0]\n for idx in range(int(self.height / factor[0]))],\n :]\n if zoom[1]:\n self.channels[i] = chn.repeat([factor[1]] * chn.shape[1],\n axis=1)\n else:\n self.channels[i] = chn[:,\n [idx * factor[1]\n for idx in range(int(self.width /\n factor[1]))]]\n\n i = i + 1\n\n self.height = self.channels[0].shape[0]\n self.width = self.channels[0].shape[1]\n self.shape = self.channels[0].shape", "def resize_to_mm(data3d, voxelsize_mm, new_voxelsize_mm, mode='nearest'):\n \"\"\"\n Function can resize data3d or segmentation to specifed voxelsize_mm\n :new_voxelsize_mm: requested voxelsize. List of 3 numbers, also\n can be a string 'orig', 'orgi*2' and 'orgi*4'.\n\n :voxelsize_mm: size of voxel\n :mode: default is 'nearest'\n \"\"\"\n import scipy\n import scipy.ndimage\n\n if np.all(list(new_voxelsize_mm) == 'orig'):\n new_voxelsize_mm = np.array(voxelsize_mm)\n elif np.all(list(new_voxelsize_mm) == 'orig*2'):\n new_voxelsize_mm = np.array(voxelsize_mm) * 2\n elif np.all(list(new_voxelsize_mm) == 'orig*4'):\n new_voxelsize_mm = np.array(voxelsize_mm) * 4\n # vx_size = np.array(metadata['voxelsize_mm']) * 4\n\n zoom = voxelsize_mm / (1.0 * np.array(new_voxelsize_mm))\n data3d_res = scipy.ndimage.zoom(\n data3d,\n zoom,\n mode=mode,\n order=1\n ).astype(data3d.dtype)\n return data3d_res", "def imresize(self, data, new_wd, new_ht, method='bilinear'):\n \"\"\"Scale an image in numpy array _data_ to the specified width and\n height. A smooth scaling is preferred.\n \"\"\"\n old_ht, old_wd = data.shape[:2]\n start_time = time.time()\n\n if have_pilutil:\n means = 'PIL'\n zoom_x = float(new_wd) / float(old_wd)\n zoom_y = float(new_ht) / float(old_ht)\n if (old_wd >= new_wd) or (old_ht >= new_ht):\n # data size is bigger, skip pixels\n zoom = max(zoom_x, zoom_y)\n else:\n zoom = min(zoom_x, zoom_y)\n\n newdata = imresize(data, zoom, interp=method)\n\n else:\n raise ImageError(\"No way to scale image smoothly\")\n\n end_time = time.time()\n self.logger.debug(\"scaling (%s) time %.4f sec\" % (\n means, end_time - start_time))\n\n return newdata", "def resize(image, shape, kind='linear'):\n \"\"\"Resize an image\n\n Parameters\n ----------\n image : ndarray\n Array of shape (N, M, ...).\n shape : tuple\n 2-element shape.\n kind : str\n Interpolation, either \"linear\" or \"nearest\".\n\n Returns\n -------\n scaled_image : ndarray\n New image, will have dtype np.float64.\n \"\"\"\n image = np.array(image, float)\n shape = np.array(shape, int)\n if shape.ndim != 1 or shape.size != 2:\n raise ValueError('shape must have two elements')\n if image.ndim < 2:\n raise ValueError('image must have two dimensions')\n if not isinstance(kind, string_types) or kind not in ('nearest', 'linear'):\n raise ValueError('mode must be \"nearest\" or \"linear\"')\n\n r = np.linspace(0, image.shape[0] - 1, shape[0])\n c = np.linspace(0, image.shape[1] - 1, shape[1])\n if kind == 'linear':\n r_0 = np.floor(r).astype(int)\n c_0 = np.floor(c).astype(int)\n r_1 = r_0 + 1\n c_1 = c_0 + 1\n\n top = (r_1 - r)[:, np.newaxis]\n bot = (r - r_0)[:, np.newaxis]\n lef = (c - c_0)[np.newaxis, :]\n rig = (c_1 - c)[np.newaxis, :]\n\n c_1 = np.minimum(c_1, image.shape[1] - 1)\n r_1 = np.minimum(r_1, image.shape[0] - 1)\n for arr in (top, bot, lef, rig):\n arr.shape = arr.shape + (1,) * (image.ndim - 2)\n out = top * rig * image[r_0][:, c_0, ...]\n out += bot * rig * image[r_1][:, c_0, ...]\n out += top * lef * image[r_0][:, c_1, ...]\n out += bot * lef * image[r_1][:, c_1, ...]\n else: # kind == 'nearest'\n r = np.round(r).astype(int)\n c = np.round(c).astype(int)\n out = image[r][:, c, ...]\n return out", "def zoom(self, factor, order=1, verbose=True):\n \"\"\"Zoom the data array using spline interpolation of the requested order.\n\n The number of points along each axis is increased by factor.\n See `scipy ndimage`__ for more info.\n\n __ http://docs.scipy.org/doc/scipy/reference/\n generated/scipy.ndimage.interpolation.zoom.html\n\n Parameters\n ----------\n factor : float\n The number of points along each axis will increase by this factor.\n order : int (optional)\n The order of the spline used to interpolate onto new points.\n verbose : bool (optional)\n Toggle talkback. Default is True.\n \"\"\"\n raise NotImplementedError\n import scipy.ndimage\n\n # axes\n for axis in self._axes:\n axis[:] = scipy.ndimage.interpolation.zoom(axis[:], factor, order=order)\n # channels\n for channel in self.channels:\n channel[:] = scipy.ndimage.interpolation.zoom(channel[:], factor, order=order)\n # return\n if verbose:\n print(\"data zoomed to new shape:\", self.shape)", "def _resize(self, shape, format=None, internalformat=None):\n \"\"\"Internal method for resize.\n \"\"\"\n shape = self._normalize_shape(shape)\n\n # Check\n if not self._resizable:\n raise RuntimeError(\"Texture is not resizable\")\n\n # Determine format\n if format is None:\n format = self._formats[shape[-1]]\n # Keep current format if channels match\n if self._format and \\\n self._inv_formats[self._format] == self._inv_formats[format]:\n format = self._format\n else:\n format = check_enum(format)\n\n if internalformat is None:\n # Keep current internalformat if channels match\n if self._internalformat and \\\n self._inv_internalformats[self._internalformat] == shape[-1]:\n internalformat = self._internalformat\n else:\n\n internalformat = check_enum(internalformat)\n\n # Check\n if format not in self._inv_formats:\n raise ValueError('Invalid texture format: %r.' % format)\n elif shape[-1] != self._inv_formats[format]:\n raise ValueError('Format does not match with given shape. '\n '(format expects %d elements, data has %d)' %\n (self._inv_formats[format], shape[-1]))\n \n if internalformat is None:\n pass\n elif internalformat not in self._inv_internalformats:\n raise ValueError(\n 'Invalid texture internalformat: %r. Allowed formats: %r' \n % (internalformat, self._inv_internalformats)\n )\n elif shape[-1] != self._inv_internalformats[internalformat]:\n raise ValueError('Internalformat does not match with given shape.')\n\n # Store and send GLIR command\n self._shape = shape\n self._format = format\n self._internalformat = internalformat\n self._glir.command('SIZE', self._id, self._shape, self._format, \n self._internalformat)", "def resize_image(self, data, size):\n \"\"\" Resizes the given image to fit inside a box of the given size. \"\"\"\n from machina.core.compat import PILImage as Image\n image = Image.open(BytesIO(data))\n\n # Resize!\n image.thumbnail(size, Image.ANTIALIAS)\n\n string = BytesIO()\n image.save(string, format='PNG')\n return string.getvalue()", "def _reshape_input_data(shape, desired_dim=3):\n \"\"\"\n Static function returns the column-wise shape for for an input shape.\n\n Input:\n --------------\n shape: tuple\n Shape of an input array, so that it is always a column.\n\n desired_dim: int\n desired shape of output. For Y data it should be 3\n (sample_no, dimension, ts_no). For X data - 2 (sample_no, 1)\n Output:\n --------------\n new_shape: tuple\n New shape of the measurements array. Idea is that samples are\n along dimension 0, sample dimension - dimension 1, different\n time series - dimension 2.\n old_shape: tuple or None\n If the shape has been modified, return old shape, otherwise\n None.\n \"\"\"\n\n if (len(shape) > 3):\n raise ValueError(\"\"\"Input array is not supposed to be more\n than 3 dimensional.\"\"\")\n\n if (len(shape) > desired_dim):\n raise ValueError(\"Input array shape is more than desired shape.\")\n elif len(shape) == 1:\n if (desired_dim == 3):\n return ((shape[0], 1, 1), shape) # last dimension is the\n # time serime_series_no\n elif (desired_dim == 2):\n return ((shape[0], 1), shape)\n\n elif len(shape) == 2:\n if (desired_dim == 3):\n return ((shape[1], 1, 1), shape) if (shape[0] == 1) else\\\n ((shape[0], shape[1], 1), shape) # convert to column\n # vector\n elif (desired_dim == 2):\n return ((shape[1], 1), shape) if (shape[0] == 1) else\\\n ((shape[0], shape[1]), None) # convert to column vector\n\n else: # len(shape) == 3\n return (shape, None)", "def resize(x, mode, factor=4):\n \"\"\"Resize input tensor with unkown input-shape by a factor\n\n Args:\n x (tf.Tensor): tensor NCHW\n factor (int, optional): resize factor for H, W\n\n Note:\n Differences here against Caffe have huge impacts on the\n quality of the predictions.\n\n Returns:\n tf.Tensor: resized tensor NCHW\n \"\"\"\n assert mode in ['bilinear', 'nearest'], mode\n shp = tf.shape(x)[2:] * factor\n # NCHW -> NHWC\n x = tf.transpose(x, [0, 2, 3, 1])\n if mode == 'bilinear':\n x = tf.image.resize_bilinear(x, shp, align_corners=True)\n else:\n # better approximation of what Caffe is doing\n x = tf.image.resize_nearest_neighbor(x, shp, align_corners=False)\n # NHWC -> NCHW\n return tf.transpose(x, [0, 3, 1, 2])", "def resize(self, shape, format=None, internalformat=None):\n \"\"\"Set the texture size and format\n\n Parameters\n ----------\n shape : tuple of integers\n New texture shape in zyx order. Optionally, an extra dimention\n may be specified to indicate the number of color channels.\n format : str | enum | None\n The format of the texture: 'luminance', 'alpha',\n 'luminance_alpha', 'rgb', or 'rgba'. If not given the format\n is chosen automatically based on the number of channels.\n When the data has one channel, 'luminance' is assumed.\n internalformat : str | enum | None\n The internal (storage) format of the texture: 'luminance',\n 'alpha', 'r8', 'r16', 'r16f', 'r32f'; 'luminance_alpha',\n 'rg8', 'rg16', 'rg16f', 'rg32f'; 'rgb', 'rgb8', 'rgb16',\n 'rgb16f', 'rgb32f'; 'rgba', 'rgba8', 'rgba16', 'rgba16f',\n 'rgba32f'. If None, the internalformat is chosen\n automatically based on the number of channels. This is a\n hint which may be ignored by the OpenGL implementation.\n \"\"\"\n return self._resize(shape, format, internalformat)" ]
[ 0.7604760527610779, 0.7587894797325134, 0.7159501314163208, 0.7097681760787964, 0.6770105957984924, 0.6759569048881531, 0.666746199131012, 0.6527373790740967, 0.6487169861793518, 0.6436185240745544, 0.6436010599136353, 0.6350785493850708 ]
Smart zoom for sparse matrix. If there is resize to bigger resolution thin line of label could be lost. This function prefers labels larger then zero. If there is only one small voxel in larger volume with zeros it is selected.
def seed_zoom(seeds, zoom): """ Smart zoom for sparse matrix. If there is resize to bigger resolution thin line of label could be lost. This function prefers labels larger then zero. If there is only one small voxel in larger volume with zeros it is selected. """ # import scipy # loseeds=seeds labels = np.unique(seeds) # remove first label - 0 labels = np.delete(labels, 0) # @TODO smart interpolation for seeds in one block # loseeds = scipy.ndimage.interpolation.zoom( # seeds, zoom, order=0) loshape = np.ceil(np.array(seeds.shape) * 1.0 / zoom).astype(np.int) loseeds = np.zeros(loshape, dtype=np.int8) loseeds = loseeds.astype(np.int8) for label in labels: a, b, c = np.where(seeds == label) loa = np.round(a // zoom) lob = np.round(b // zoom) loc = np.round(c // zoom) # loseeds = np.zeros(loshape) loseeds[loa, lob, loc] += label # this is to detect conflict seeds loseeds[loseeds > label] = 100 # remove conflict seeds loseeds[loseeds > 99] = 0 # import py3DSeedEditor # ped = py3DSeedEditor.py3DSeedEditor(loseeds) # ped.show() return loseeds
[ "def _zoom(scale:uniform=1.0, row_pct:uniform=0.5, col_pct:uniform=0.5):\n \"Zoom image by `scale`. `row_pct`,`col_pct` select focal point of zoom.\"\n s = 1-1/scale\n col_c = s * (2*col_pct - 1)\n row_c = s * (2*row_pct - 1)\n return _get_zoom_mat(1/scale, 1/scale, col_c, row_c)", "def zoom(self, factor, order=1, verbose=True):\n \"\"\"Zoom the data array using spline interpolation of the requested order.\n\n The number of points along each axis is increased by factor.\n See `scipy ndimage`__ for more info.\n\n __ http://docs.scipy.org/doc/scipy/reference/\n generated/scipy.ndimage.interpolation.zoom.html\n\n Parameters\n ----------\n factor : float\n The number of points along each axis will increase by this factor.\n order : int (optional)\n The order of the spline used to interpolate onto new points.\n verbose : bool (optional)\n Toggle talkback. Default is True.\n \"\"\"\n raise NotImplementedError\n import scipy.ndimage\n\n # axes\n for axis in self._axes:\n axis[:] = scipy.ndimage.interpolation.zoom(axis[:], factor, order=order)\n # channels\n for channel in self.channels:\n channel[:] = scipy.ndimage.interpolation.zoom(channel[:], factor, order=order)\n # return\n if verbose:\n print(\"data zoomed to new shape:\", self.shape)", "def _squish(scale:uniform=1.0, row_pct:uniform=0.5, col_pct:uniform=0.5):\n \"Squish image by `scale`. `row_pct`,`col_pct` select focal point of zoom.\"\n if scale <= 1:\n col_c = (1-scale) * (2*col_pct - 1)\n return _get_zoom_mat(scale, 1, col_c, 0.)\n else:\n row_c = (1-1/scale) * (2*row_pct - 1)\n return _get_zoom_mat(1, 1/scale, 0., row_c)", "def zoom(image, factor, dimension, hdr = False, order = 3):\n \"\"\"\n Zooms the provided image by the supplied factor in the supplied dimension.\n The factor is an integer determining how many slices should be put between each\n existing pair.\n If an image header (hdr) is supplied, its voxel spacing gets updated.\n Returns the image and the updated header or false.\n \"\"\"\n # check if supplied dimension is valid\n if dimension >= image.ndim:\n raise argparse.ArgumentError('The supplied zoom-dimension {} exceeds the image dimensionality of 0 to {}.'.format(dimension, image.ndim - 1))\n \n # get logger\n logger = Logger.getInstance()\n\n logger.debug('Old shape = {}.'.format(image.shape))\n\n # perform the zoom\n zoom = [1] * image.ndim\n zoom[dimension] = (image.shape[dimension] + (image.shape[dimension] - 1) * factor) / float(image.shape[dimension])\n logger.debug('Reshaping with = {}.'.format(zoom))\n image = interpolation.zoom(image, zoom, order=order)\n \n logger.debug('New shape = {}.'.format(image.shape))\n \n if hdr:\n new_spacing = list(header.get_pixel_spacing(hdr))\n new_spacing[dimension] = new_spacing[dimension] / float(factor + 1)\n logger.debug('Setting pixel spacing from {} to {}....'.format(header.get_pixel_spacing(hdr), new_spacing))\n header.set_pixel_spacing(hdr, tuple(new_spacing))\n \n return image, hdr", "def zoom_in(self):\n \"\"\"Zooms in by zoom factor\"\"\"\n\n zoom = self.grid.grid_renderer.zoom\n\n target_zoom = zoom * (1 + config[\"zoom_factor\"])\n\n if target_zoom < config[\"maximum_zoom\"]:\n self.zoom(target_zoom)", "def _compute_zs_mat(sz:TensorImageSize, scale:float, squish:float,\n invert:bool, row_pct:float, col_pct:float)->AffineMatrix:\n \"Utility routine to compute zoom/squish matrix.\"\n orig_ratio = math.sqrt(sz[1]/sz[0])\n for s,r,i in zip(scale,squish, invert):\n s,r = 1/math.sqrt(s),math.sqrt(r)\n if s * r <= 1 and s / r <= 1: #Test if we are completely inside the picture\n w,h = (s/r, s*r) if i else (s*r,s/r)\n col_c = (1-w) * (2*col_pct - 1)\n row_c = (1-h) * (2*row_pct - 1)\n return _get_zoom_mat(w, h, col_c, row_c)\n\n #Fallback, hack to emulate a center crop without cropping anything yet.\n if orig_ratio > 1: return _get_zoom_mat(1/orig_ratio**2, 1, 0, 0.)\n else: return _get_zoom_mat(1, orig_ratio**2, 0, 0.)", "def _zoom_labels(self, zoom):\n \"\"\"Adjust grid label font to zoom factor\"\"\"\n\n labelfont = self.grid.GetLabelFont()\n default_fontsize = get_default_font().GetPointSize()\n labelfont.SetPointSize(max(1, int(round(default_fontsize * zoom))))\n self.grid.SetLabelFont(labelfont)", "def sc_zoom_coarse(self, viewer, event, msg=True):\n \"\"\"Interactively zoom the image by scrolling motion.\n This zooms by adjusting the scale in x and y coarsely.\n \"\"\"\n if not self.canzoom:\n return True\n\n zoom_accel = self.settings.get('scroll_zoom_acceleration', 1.0)\n # change scale by 20%\n amount = self._scale_adjust(1.2, event.amount, zoom_accel, max_limit=4.0)\n self._scale_image(viewer, event.direction, amount, msg=msg)\n return True", "def mouse_zoom(self, inc):\n '''Convenience function to implement a zoom function.\n\n This is achieved by moving ``Camera.position`` in the\n direction of the ``Camera.c`` vector.\n\n '''\n # Square Distance from pivot\n dsq = np.linalg.norm(self.position - self.pivot)\n minsq = 1.0**2 # How near can we be to the pivot\n maxsq = 7.0**2 # How far can we go \n\n scalefac = 0.25\n\n if dsq > maxsq and inc < 0: \n # We're going too far\n pass\n elif dsq < minsq and inc > 0:\n # We're going too close\n pass\n else:\n # We're golden\n self.position += self.c*inc*scalefac", "def zoom_out(self):\n \"\"\"Zooms out by zoom factor\"\"\"\n\n zoom = self.grid.grid_renderer.zoom\n\n target_zoom = zoom * (1 - config[\"zoom_factor\"])\n\n if target_zoom > config[\"minimum_zoom\"]:\n self.zoom(target_zoom)", "def resize_to_mm(data3d, voxelsize_mm, new_voxelsize_mm, mode='nearest'):\n \"\"\"\n Function can resize data3d or segmentation to specifed voxelsize_mm\n :new_voxelsize_mm: requested voxelsize. List of 3 numbers, also\n can be a string 'orig', 'orgi*2' and 'orgi*4'.\n\n :voxelsize_mm: size of voxel\n :mode: default is 'nearest'\n \"\"\"\n import scipy\n import scipy.ndimage\n\n if np.all(list(new_voxelsize_mm) == 'orig'):\n new_voxelsize_mm = np.array(voxelsize_mm)\n elif np.all(list(new_voxelsize_mm) == 'orig*2'):\n new_voxelsize_mm = np.array(voxelsize_mm) * 2\n elif np.all(list(new_voxelsize_mm) == 'orig*4'):\n new_voxelsize_mm = np.array(voxelsize_mm) * 4\n # vx_size = np.array(metadata['voxelsize_mm']) * 4\n\n zoom = voxelsize_mm / (1.0 * np.array(new_voxelsize_mm))\n data3d_res = scipy.ndimage.zoom(\n data3d,\n zoom,\n mode=mode,\n order=1\n ).astype(data3d.dtype)\n return data3d_res", "def cmd_zoom(self, args):\n '''control zoom'''\n if len(args) < 2:\n print(\"map zoom WIDTH(m)\")\n return\n ground_width = float(args[1])\n self.map.set_zoom(ground_width)" ]
[ 0.6950457692146301, 0.6948485374450684, 0.6808585524559021, 0.678534984588623, 0.676499605178833, 0.6748631596565247, 0.6726201772689819, 0.6691378951072693, 0.6671112775802612, 0.6661582589149475, 0.6641716957092285, 0.6616668701171875 ]
Zoom data to specific shape.
def zoom_to_shape(data, shape, dtype=None): """ Zoom data to specific shape. """ import scipy import scipy.ndimage zoomd = np.array(shape) / np.array(data.shape, dtype=np.double) import warnings datares = scipy.ndimage.interpolation.zoom(data, zoomd, order=0, mode="reflect") if datares.shape != shape: logger.warning("Zoom with different output shape") dataout = np.zeros(shape, dtype=dtype) shpmin = np.minimum(dataout.shape, shape) dataout[: shpmin[0], : shpmin[1], : shpmin[2]] = datares[ : shpmin[0], : shpmin[1], : shpmin[2] ] return datares
[ "def zoom(self, factor, order=1, verbose=True):\n \"\"\"Zoom the data array using spline interpolation of the requested order.\n\n The number of points along each axis is increased by factor.\n See `scipy ndimage`__ for more info.\n\n __ http://docs.scipy.org/doc/scipy/reference/\n generated/scipy.ndimage.interpolation.zoom.html\n\n Parameters\n ----------\n factor : float\n The number of points along each axis will increase by this factor.\n order : int (optional)\n The order of the spline used to interpolate onto new points.\n verbose : bool (optional)\n Toggle talkback. Default is True.\n \"\"\"\n raise NotImplementedError\n import scipy.ndimage\n\n # axes\n for axis in self._axes:\n axis[:] = scipy.ndimage.interpolation.zoom(axis[:], factor, order=order)\n # channels\n for channel in self.channels:\n channel[:] = scipy.ndimage.interpolation.zoom(channel[:], factor, order=order)\n # return\n if verbose:\n print(\"data zoomed to new shape:\", self.shape)", "def zoom(self, value):\n \"\"\"Zoom level.\"\"\"\n if isinstance(value, (int, float)):\n value = (value, value)\n assert len(value) == 2\n self._zoom = np.clip(value, self._zmin, self._zmax)\n\n # Constrain bounding box.\n self._constrain_pan()\n self._constrain_zoom()\n\n self.update()", "def zoom(self, n='all', xfactor=2.0, yfactor=2.0):\n \"\"\"\n This will scale the chosen data set's plot range by the\n specified xfactor and yfactor, respectively, and set the trim limits\n xmin, xmax, ymin, ymax accordingly\n\n Parameters\n ----------\n n='all' \n Which data set to perform this action upon. 'all' means all data\n sets, or you can specify a list.\n xfactor=2.0\n Factor by which to scale the x range.\n yfactor=2.0\n Factor by which to scale the y range.\n \"\"\"\n if len(self._set_xdata)==0 or len(self._set_ydata)==0:\n self._error(\"No data. Please use set_data() and plot() prior to zooming.\")\n return\n\n # get the data\n xdata, ydata, eydata = self.get_data()\n\n if _s.fun.is_a_number(n): n = [n]\n elif isinstance(n,str): n = list(range(len(xdata)))\n\n # loop over the specified plots\n for i in n:\n fig = self['first_figure']+i\n try:\n xmin, xmax = _p.figure(fig).axes[1].get_xlim()\n xc = 0.5*(xmin+xmax)\n xs = 0.5*abs(xmax-xmin)\n self['xmin'][i] = xc - xfactor*xs\n self['xmax'][i] = xc + xfactor*xs\n\n ymin, ymax = _p.figure(fig).axes[1].get_ylim()\n yc = 0.5*(ymin+ymax)\n ys = 0.5*abs(ymax-ymin)\n self['ymin'][i] = yc - yfactor*ys\n self['ymax'][i] = yc + yfactor*ys\n except:\n self._error(\"Data \"+str(fig)+\" is not currently plotted.\")\n\n # now show the update.\n self.clear_results()\n if self['autoplot']: self.plot()\n\n return self", "def auto_zoom(zoomx=True, zoomy=True, axes=\"gca\", x_space=0.04, y_space=0.04, draw=True):\n \"\"\"\n Looks at the bounds of the plotted data and zooms accordingly, leaving some\n space around the data.\n \"\"\"\n\n # Disable auto-updating by default.\n _pylab.ioff()\n\n if axes==\"gca\": axes = _pylab.gca()\n\n # get the current bounds\n x10, x20 = axes.get_xlim()\n y10, y20 = axes.get_ylim()\n\n # Autoscale using pylab's technique (catches the error bars!)\n axes.autoscale(enable=True, tight=True)\n\n # Add padding\n if axes.get_xscale() == 'linear':\n x1, x2 = axes.get_xlim()\n xc = 0.5*(x1+x2)\n xs = 0.5*(1+x_space)*(x2-x1)\n axes.set_xlim(xc-xs, xc+xs)\n \n if axes.get_yscale() == 'linear':\n y1, y2 = axes.get_ylim()\n yc = 0.5*(y1+y2)\n ys = 0.5*(1+y_space)*(y2-y1)\n axes.set_ylim(yc-ys, yc+ys)\n \n # If we weren't supposed to zoom x or y, reset them\n if not zoomx: axes.set_xlim(x10, x20)\n if not zoomy: axes.set_ylim(y10, y20)\n \n if draw: \n _pylab.ion()\n _pylab.draw()", "def resize_to_shape(data, shape, zoom=None, mode=\"nearest\", order=0):\n \"\"\"\n Function resize input data to specific shape.\n :param data: input 3d array-like data\n :param shape: shape of output data\n :param zoom: zoom is used for back compatibility\n :mode: default is 'nearest'\n \"\"\"\n # @TODO remove old code in except part\n # TODO use function from library in future\n\n try:\n # rint 'pred vyjimkou'\n # aise Exception ('test without skimage')\n # rint 'za vyjimkou'\n import skimage\n import skimage.transform\n\n # Now we need reshape seeds and segmentation to original size\n\n # with warnings.catch_warnings():\n # warnings.filterwarnings(\"ignore\", \".*'constant', will be changed to.*\")\n segm_orig_scale = skimage.transform.resize(\n data, shape, order=0, preserve_range=True, mode=\"reflect\"\n )\n\n segmentation = segm_orig_scale\n logger.debug(\"resize to orig with skimage\")\n except:\n if zoom is None:\n zoom = shape / np.asarray(data.shape).astype(np.double)\n segmentation = resize_to_shape_with_zoom(\n data, zoom=zoom, mode=mode, order=order\n )\n\n return segmentation", "def ms_zoom_in(self, viewer, event, data_x, data_y, msg=False):\n \"\"\"Zoom in one level by a mouse click.\n \"\"\"\n if not self.canzoom:\n return True\n\n if not (event.state == 'down'):\n return True\n\n with viewer.suppress_redraw:\n viewer.panset_xy(data_x, data_y)\n\n if self.settings.get('scroll_zoom_direct_scale', True):\n zoom_accel = self.settings.get('scroll_zoom_acceleration', 1.0)\n # change scale by 100%\n amount = self._scale_adjust(2.0, 15.0, zoom_accel, max_limit=4.0)\n self._scale_image(viewer, 0.0, amount, msg=msg)\n else:\n viewer.zoom_in()\n\n if hasattr(viewer, 'center_cursor'):\n viewer.center_cursor()\n if msg:\n viewer.onscreen_message(viewer.get_scale_text(),\n delay=1.0)\n return True", "def zoom2D(xi, yi, zi, xi_zoom=3., yi_zoom=3., order=3, mode=\"nearest\", cval=0.):\n \"\"\"Zoom a 2D array, with axes.\n\n Parameters\n ----------\n xi : 1D array\n x axis points.\n yi : 1D array\n y axis points.\n zi : 2D array\n array values. Shape of (x, y).\n xi_zoom : float (optional)\n Zoom factor along x axis. Default is 3.\n yi_zoom : float (optional)\n Zoom factor along y axis. Default is 3.\n order : int (optional)\n The order of the spline interpolation, between 0 and 5. Default is 3.\n mode : {'constant', 'nearest', 'reflect', or 'wrap'}\n Points outside the boundaries of the input are filled according to the\n given mode. Default is nearest.\n cval : scalar (optional)\n Value used for constant mode. Default is 0.0.\n \"\"\"\n xi = ndimage.interpolation.zoom(xi, xi_zoom, order=order, mode=\"nearest\")\n yi = ndimage.interpolation.zoom(yi, yi_zoom, order=order, mode=\"nearest\")\n zi = ndimage.interpolation.zoom(zi, (xi_zoom, yi_zoom), order=order, mode=mode, cval=cval)\n return xi, yi, zi", "def shape(self, shape=None):\n \"\"\"We need to shift buffers in order to change shape\"\"\"\n if shape is None:\n return self._shape\n data, color = self.renderer.manager.set_shape(self.model.id, shape)\n self.model.data = data\n self.color = color\n self._shape = shape", "def unzoom(self, event=None, set_bounds=True):\n \"\"\" zoom out 1 level, or to full data range \"\"\"\n lims = None\n if len(self.conf.zoom_lims) > 1:\n lims = self.conf.zoom_lims.pop()\n ax = self.axes\n # print 'base unzoom ', lims, set_bounds\n if lims is None: # auto scale\n self.conf.zoom_lims = [None]\n xmin, xmax, ymin, ymax = self.data_range\n ax.set_xlim((xmin, xmax), emit=True)\n ax.set_ylim((ymin, ymax), emit=True)\n if set_bounds:\n ax.update_datalim(((xmin, ymin), (xmax, ymax)))\n ax.set_xbound(ax.xaxis.get_major_locator(\n ).view_limits(xmin, xmax))\n ax.set_ybound(ax.yaxis.get_major_locator(\n ).view_limits(ymin, ymax))\n else:\n self.set_viewlimits()\n\n self.canvas.draw()", "def sc_zoom_origin(self, viewer, event, msg=True):\n \"\"\"Like sc_zoom(), but pans the image as well to keep the\n coordinate under the cursor in that same position relative\n to the window.\n \"\"\"\n origin = (event.data_x, event.data_y)\n self._sc_zoom(viewer, event, msg=msg, origin=origin)\n return True", "def zoom_bbox(self, bbox):\n \"\"\"Zoom map to geometry extent.\n\n Arguments:\n bbox -- OGRGeometry polygon to zoom map extent\n \"\"\"\n try:\n bbox.transform(self.map.srs)\n except gdal.GDALException:\n pass\n else:\n self.map.zoom_to_box(mapnik.Box2d(*bbox.extent))", "def cmd_z(self, lvl=None, ch=None):\n \"\"\"z lvl=level ch=chname\n\n Zoom the image for the given viewer/channel to the given zoom\n level. Levels can be positive or negative numbers and are\n relative to a scale of 1:1 at zoom level 0.\n \"\"\"\n viewer = self.get_viewer(ch)\n if viewer is None:\n self.log(\"No current viewer/channel.\")\n return\n\n cur_lvl = viewer.get_zoom()\n\n if lvl is None:\n self.log(\"zoom=%f\" % (cur_lvl))\n\n else:\n viewer.zoom_to(lvl)" ]
[ 0.7957965135574341, 0.7436215281486511, 0.7239788174629211, 0.7158556580543518, 0.7105779051780701, 0.7090069055557251, 0.708440899848938, 0.7084274888038635, 0.7077401280403137, 0.707372784614563, 0.7045493125915527, 0.7032337188720703 ]
Crop the data. crop(data, crinfo) :param crinfo: min and max for each axis - [[minX, maxX], [minY, maxY], [minZ, maxZ]]
def crop(data, crinfo): """ Crop the data. crop(data, crinfo) :param crinfo: min and max for each axis - [[minX, maxX], [minY, maxY], [minZ, maxZ]] """ crinfo = fix_crinfo(crinfo) return data[ __int_or_none(crinfo[0][0]) : __int_or_none(crinfo[0][1]), __int_or_none(crinfo[1][0]) : __int_or_none(crinfo[1][1]), __int_or_none(crinfo[2][0]) : __int_or_none(crinfo[2][1]), ]
[ "def uncrop(data, crinfo, orig_shape, resize=False, outside_mode=\"constant\", cval=0):\n \"\"\"\n Put some boundary to input image.\n\n\n :param data: input data\n :param crinfo: array with minimum and maximum index along each axis\n [[minX, maxX],[minY, maxY],[minZ, maxZ]]. If crinfo is None, the whole input image is placed into [0, 0, 0].\n If crinfo is just series of three numbers, it is used as an initial point for input image placement.\n :param orig_shape: shape of uncropped image\n :param resize: True or False (default). Usefull if the data.shape does not fit to crinfo shape.\n :param outside_mode: 'constant', 'nearest'\n :return:\n \"\"\"\n\n if crinfo is None:\n crinfo = list(zip([0] * data.ndim, orig_shape))\n elif np.asarray(crinfo).size == data.ndim:\n crinfo = list(zip(crinfo, np.asarray(crinfo) + data.shape))\n\n crinfo = fix_crinfo(crinfo)\n data_out = np.ones(orig_shape, dtype=data.dtype) * cval\n\n # print 'uncrop ', crinfo\n # print orig_shape\n # print data.shape\n if resize:\n data = resize_to_shape(data, crinfo[:, 1] - crinfo[:, 0])\n\n startx = np.round(crinfo[0][0]).astype(int)\n starty = np.round(crinfo[1][0]).astype(int)\n startz = np.round(crinfo[2][0]).astype(int)\n\n data_out[\n # np.round(crinfo[0][0]).astype(int):np.round(crinfo[0][1]).astype(int)+1,\n # np.round(crinfo[1][0]).astype(int):np.round(crinfo[1][1]).astype(int)+1,\n # np.round(crinfo[2][0]).astype(int):np.round(crinfo[2][1]).astype(int)+1\n startx : startx + data.shape[0],\n starty : starty + data.shape[1],\n startz : startz + data.shape[2],\n ] = data\n\n if outside_mode == \"nearest\":\n # for ax in range(data.ndims):\n # ax = 0\n\n # copy border slice to pixels out of boundary - the higher part\n for ax in range(data.ndim):\n # the part under the crop\n start = np.round(crinfo[ax][0]).astype(int)\n slices = [slice(None), slice(None), slice(None)]\n slices[ax] = start\n repeated_slice = np.expand_dims(data_out[slices], ax)\n append_sz = start\n if append_sz > 0:\n tile0 = np.repeat(repeated_slice, append_sz, axis=ax)\n slices = [slice(None), slice(None), slice(None)]\n slices[ax] = slice(None, start)\n # data_out[start + data.shape[ax] : , :, :] = tile0\n data_out[slices] = tile0\n # plt.imshow(np.squeeze(repeated_slice))\n # plt.show()\n\n # the part over the crop\n start = np.round(crinfo[ax][0]).astype(int)\n slices = [slice(None), slice(None), slice(None)]\n slices[ax] = start + data.shape[ax] - 1\n repeated_slice = np.expand_dims(data_out[slices], ax)\n append_sz = data_out.shape[ax] - (start + data.shape[ax])\n if append_sz > 0:\n tile0 = np.repeat(repeated_slice, append_sz, axis=ax)\n slices = [slice(None), slice(None), slice(None)]\n slices[ax] = slice(start + data.shape[ax], None)\n # data_out[start + data.shape[ax] : , :, :] = tile0\n data_out[slices] = tile0\n # plt.imshow(np.squeeze(repeated_slice))\n # plt.show()\n\n return data_out", "def crop(self, doy, depth, lat, lon, var):\n \"\"\" Crop a subset of the dataset for each var\n\n Given doy, depth, lat and lon, it returns the smallest subset\n that still contains the requested coordinates inside it.\n\n It handels special cases like a region around greenwich and\n the international date line.\n\n Accepts 0 to 360 and -180 to 180 longitude reference.\n\n It extends time and longitude coordinates, so simplify the use\n of series. For example, a ship track can be requested with\n a longitude sequence like [352, 358, 364, 369, 380], and\n the equivalent for day of year above 365.\n \"\"\"\n dims, idx = cropIndices(self.dims, lat, lon, depth, doy)\n subset = {}\n for v in var:\n subset[v] = ma.asanyarray([\n self.ncs[tnn][v][0, idx['zn'], idx['yn'], idx['xn']] \\\n for tnn in idx['tn']])\n return subset, dims", "def crop(self, min, max):\n \"\"\"\n Crop a region by removing coordinates outside bounds.\n\n Follows normal slice indexing conventions.\n\n Parameters\n ----------\n min : tuple\n Minimum or starting bounds for each axis.\n\n max : tuple\n Maximum or ending bounds for each axis.\n \"\"\"\n new = [c for c in self.coordinates if all(c >= min) and all(c < max)]\n return one(new)", "def crop(self, lat, lon, var):\n \"\"\" Crop a subset of the dataset for each var\n\n Given doy, depth, lat and lon, it returns the smallest subset\n that still contains the requested coordinates inside it.\n\n It handels special cases like a region around greenwich and\n the international date line.\n\n Accepts 0 to 360 and -180 to 180 longitude reference.\n\n It extends time and longitude coordinates, so simplify the use\n of series. For example, a ship track can be requested with\n a longitude sequence like [352, 358, 364, 369, 380].\n \"\"\"\n dims, idx = cropIndices(self.dims, lat, lon)\n subset = {}\n for v in var:\n subset = {v: self.ncs[0][v][idx['yn'], idx['xn']]}\n return subset, dims", "def cropIndices(dims, lat, lon, depth=None, doy=None):\n \"\"\" Return the indices to crop dataset\n\n Assuming that the dataset have the dimensions given by\n dims, this function return the indices to conform with\n the given coordinates (lat, lon, ...)\n \"\"\"\n dims_out = {}\n idx = {}\n \n yn = slice(\n np.nonzero(dims['lat'] <= lat.min())[0].max(),\n np.nonzero(dims['lat'] >= lat.max())[0].min() + 1)\n dims_out['lat'] = np.atleast_1d(dims['lat'][yn])\n idx['yn'] = yn\n\n lon_ext = np.array(\n (dims['lon'] - 2*360).tolist() +\n (dims['lon'] - 360).tolist() +\n dims['lon'].tolist() +\n (dims['lon'] + 360).tolist())\n xn_ext = list(4 * list(range(dims['lon'].shape[0])))\n xn_start = np.nonzero(lon_ext <= lon.min())[0].max()\n xn_end = np.nonzero(lon_ext >= lon.max())[0].min()\n xn = xn_ext[xn_start:xn_end+1]\n dims_out['lon'] = np.atleast_1d(lon_ext[xn_start:xn_end+1])\n idx['xn'] = xn\n\n if depth is not None:\n zn = slice(\n np.nonzero(dims['depth'] <= depth.min())[0].max(),\n np.nonzero(dims['depth'] >= min(dims['depth'].max(), depth.max())\n )[0].min() + 1\n )\n # If a higher degree interpolation system uses more than one data\n # point in the edge, I should extend this selection one point on\n # each side, without go beyond 0\n # if zn.start < 0:\n # zn = slice(0, zn.stop, zn.step)\n dims_out['depth'] = np.atleast_1d(dims['depth'][zn])\n idx['zn'] = zn\n\n if doy is not None:\n # Source has only one time, like total mean field, or annual mean.\n if dims['time'].shape == (1,):\n dims_out['time'] = dims['time']\n idx['tn'] = [0]\n else:\n time_ext = np.array(\n [dims['time'][-1] - 365.25] +\n dims['time'].tolist() +\n [dims['time'][0] + 365.25])\n tn_ext = list(range(dims['time'].size))\n tn_ext = [tn_ext[-1]] + tn_ext + [tn_ext[0]]\n tn_start = np.nonzero(time_ext <= doy.min())[0].max()\n tn_end = np.nonzero(time_ext >= doy.max())[0].min()\n dims_out['time'] = np.atleast_1d(time_ext[tn_start:tn_end+1])\n idx['tn'] = tn_ext[tn_start:tn_end+1]\n\n return dims_out, idx", "def crop(self, doy, depth, lat, lon, var):\n \"\"\" Crop a subset of the dataset for each var\n\n Given doy, depth, lat and lon, it returns the smallest subset\n that still contains the requested coordinates inside it.\n\n It handels special cases like a region around greenwich and\n the international date line.\n\n Accepts 0 to 360 and -180 to 180 longitude reference.\n\n It extends time and longitude coordinates, so simplify the use\n of series. For example, a ship track can be requested with\n a longitude sequence like [352, 358, 364, 369, 380], and\n the equivalent for day of year above 365.\n \"\"\"\n dims, idx = cropIndices(self.dims, lat, lon, depth)\n\n dims['time'] = np.atleast_1d(doy)\n idx['tn'] = np.arange(dims['time'].size)\n\n # Temporary solution. Create an object for CARS dataset\n xn = idx['xn']\n yn = idx['yn']\n zn = idx['zn']\n tn = idx['tn']\n\n subset = {}\n for v in var:\n if v == 'mn':\n mn = []\n for d in doy:\n t = 2 * np.pi * d/366\n # Naive solution\n # FIXME: This is not an efficient solution.\n value = self.ncs[0]['mean'][:, yn, xn]\n value[:64] += self.ncs[0]['an_cos'][:, yn, xn] * np.cos(t) + \\\n self.ncs[0]['an_sin'][:, yn, xn] * np.sin(t)\n value[:55] += self.ncs[0]['sa_cos'][:, yn, xn] * np.cos(2*t) + \\\n self.ncs[0]['sa_sin'][:, yn, xn] * np.sin(2*t)\n mn.append(value[zn])\n\n subset['mn'] = ma.asanyarray(mn)\n else:\n subset[v] = ma.asanyarray(\n doy.size * [self[v][zn, yn, xn]])\n return subset, dims", "def crop(self, height, width, center_i=None, center_j=None):\n \"\"\"Crop the image centered around center_i, center_j.\n\n Parameters\n ----------\n height : int\n The height of the desired image.\n\n width : int\n The width of the desired image.\n\n center_i : int\n The center height point at which to crop. If not specified, the center\n of the image is used.\n\n center_j : int\n The center width point at which to crop. If not specified, the center\n of the image is used.\n\n Returns\n -------\n :obj:`Image`\n A cropped Image of the same type.\n \"\"\"\n # crop channels separately\n color_im_cropped = self.color.crop(height, width,\n center_i=center_i,\n center_j=center_j)\n depth_im_cropped = self.depth.crop(height, width,\n center_i=center_i,\n center_j=center_j)\n\n # return combination of cropped data\n return RgbdImage.from_color_and_depth(\n color_im_cropped, depth_im_cropped)", "def crop(cmapin, vmin, vmax, pivot, N=None, dmax=None):\n '''Crop end or ends of a diverging colormap by vmin/vmax values.\n\n :param cmap: A colormap object, like cmocean.cm.matter.\n :param vmin/vmax: vmin/vmax for use in plot with colormap.\n :param pivot: center point to be used in plot with diverging colormap.\n :param N=None: User can specify the number of rows for the outgoing colormap.\n If unspecified, N from incoming colormap will be used and values will\n be interpolated as needed to fill in rows.\n :param dmax=None: dmax is the highest number to be included in a plot with\n the colormap; values higher in magnitude than dmax are removed from both\n ends of colormap. It should be less than abs(vmin) and abs(vmax), which\n should be equal for this parameter to be used.\n\n Outputs resultant colormap object.\n\n This function can be used for sequential and other non-diverging colormaps\n but it is easier to use that way through crop_by_percent().\n This should be useful for plotting bathymetry and topography data with the\n topo colormap when max bathymetry value is different from max topography.\n\n Example usage:\n # example for crop on min end of diverging colormap\n vmin = -2; vmax = 5; pivot = 0\n newcmap = crop(cmocean.cm.curl, vmin, vmax, pivot)\n A = np.random.randint(vmin, vmax, (5,5))\n plt.pcolormesh(A, vmin=vmin, vmax=vmax, cmap=newcmap)\n plt.colorbar()\n\n # example for crop on max end of diverging colormap\n vmin = -10; vmax = 8; pivot = 0\n newcmap = crop(cmocean.cm.delta, vmin, vmax, pivot)\n A = np.random.randint(vmin, vmax, (5,5))\n plt.pcolormesh(A, vmin=vmin, vmax=vmax, cmap=newcmap)\n plt.colorbar()\n\n '''\n\n assert pivot >= vmin and pivot <= vmax\n\n # dmax used if and only if ends are equal\n if vmax-pivot == pivot-vmin:\n assert dmax is not None\n\n # allow user to input N, but otherwise use N for incoming colormap\n if N is None:\n N = cmapin.N\n else:\n N = N\n\n # ratio of the colormap to remove\n below = pivot - vmin # below pivot\n above = vmax - pivot # above pivot\n\n ranges = (above, below)\n half_range = max(ranges)\n full_range = half_range*2\n reduced_range = min(ranges)\n range_to_keep = half_range + reduced_range\n\n ratio = (full_range-range_to_keep)/full_range\n\n\n if below < above: # reducing colormap on side below pivot\n # start colormap partway through\n shortcmap = cmapin(np.linspace(0,1,N))[int(np.ceil(N*ratio)):]\n\n elif above < below: # reducing colormap on side above pivot\n # end colormap early\n shortcmap = cmapin(np.linspace(0,1,N))[:-int(np.ceil(N*ratio))]\n\n elif (below == above) and (dmax is not None): # equal\n ratio = dmax/full_range\n shortcmap = cmapin(np.linspace(0,1,N))[int(np.ceil(N*ratio)):-int(np.ceil(N*ratio))]\n\n # interpolate to original number of rows in colormap\n newrgb = np.zeros((N, 4))\n shnum = shortcmap.shape[0]\n for i in range(4): # loop through each column of cmap\n newrgb[:,i] = np.interp(np.linspace(0,shnum,N), np.arange(0,shnum), shortcmap[:,i])\n\n newcmap = cmap(newrgb)\n\n return newcmap", "def crop(self, xa, xb, ya, yb):\n \"\"\"\n Crops the image. Two points :\n \n - We use numpy conventions\n xa = 200 and xb = 400 will give you a width of 200 pixels !\n \n - We crop relative to the current array (i.e. not necessarily to the original array !)\n This means you can crop several times in a row with xa = 10, it will each time remove 10 pixels in x !\n \n But we update the crop region specifications, so that the object remembers how it was cut.\n \n Please give positive integers in compatible ranges, no checks are made.\n \n \"\"\"\n \n if self.pilimage != None:\n raise RuntimeError, \"Cannot crop anymore, PIL image already exists !\"\n \n if self.verbose:\n print \"Cropping : [%i:%i, %i:%i]\" % (xa, xb, ya, yb)\n self.numpyarray = self.numpyarray[xa:xb, ya:yb]\n \n self.xa += xa\n self.ya += ya\n self.xb = self.xa + (xb - xa)\n self.yb = self.ya + (yb - ya)", "def fix_crinfo(crinfo, to=\"axis\"):\n \"\"\"\n Function recognize order of crinfo and convert it to proper format.\n \"\"\"\n\n crinfo = np.asarray(crinfo)\n if crinfo.shape[0] == 2:\n crinfo = crinfo.T\n\n return crinfo", "def crop(im, r, c, sz_h, sz_w):\n '''\n crop image into a square of size sz,\n '''\n return im[r:r+sz_h, c:c+sz_w]", "def crop(self, area=None, ll_bbox=None, xy_bbox=None, dataset_ids=None):\n \"\"\"Crop Scene to a specific Area boundary or bounding box.\n\n Args:\n area (AreaDefinition): Area to crop the current Scene to\n ll_bbox (tuple, list): 4-element tuple where values are in\n lon/lat degrees. Elements are\n ``(xmin, ymin, xmax, ymax)`` where X is\n longitude and Y is latitude.\n xy_bbox (tuple, list): Same as `ll_bbox` but elements are in\n projection units.\n dataset_ids (iterable): DatasetIDs to include in the returned\n `Scene`. Defaults to all datasets.\n\n This method will attempt to intelligently slice the data to preserve\n relationships between datasets. For example, if we are cropping two\n DataArrays of 500m and 1000m pixel resolution then this method will\n assume that exactly 4 pixels of the 500m array cover the same\n geographic area as a single 1000m pixel. It handles these cases based\n on the shapes of the input arrays and adjusting slicing indexes\n accordingly. This method will have trouble handling cases where data\n arrays seem related but don't cover the same geographic area or if the\n coarsest resolution data is not related to the other arrays which are\n related.\n\n It can be useful to follow cropping with a call to the native\n resampler to resolve all datasets to the same resolution and compute\n any composites that could not be generated previously::\n\n >>> cropped_scn = scn.crop(ll_bbox=(-105., 40., -95., 50.))\n >>> remapped_scn = cropped_scn.resample(resampler='native')\n\n .. note::\n\n The `resample` method automatically crops input data before\n resampling to save time/memory.\n\n \"\"\"\n if len([x for x in [area, ll_bbox, xy_bbox] if x is not None]) != 1:\n raise ValueError(\"One and only one of 'area', 'll_bbox', \"\n \"or 'xy_bbox' can be specified.\")\n\n new_scn = self.copy(datasets=dataset_ids)\n if not new_scn.all_same_proj and xy_bbox is not None:\n raise ValueError(\"Can't crop when dataset_ids are not all on the \"\n \"same projection.\")\n\n # get the lowest resolution area, use it as the base of the slice\n # this makes sure that the other areas *should* be a consistent factor\n min_area = new_scn.min_area()\n if isinstance(area, str):\n area = get_area_def(area)\n new_min_area, min_y_slice, min_x_slice = self._slice_area_from_bbox(\n min_area, area, ll_bbox, xy_bbox)\n new_target_areas = {}\n for src_area, dataset_ids in new_scn.iter_by_area():\n if src_area is None:\n for ds_id in dataset_ids:\n new_scn.datasets[ds_id] = self[ds_id]\n continue\n\n y_factor, y_remainder = np.divmod(float(src_area.shape[0]),\n min_area.shape[0])\n x_factor, x_remainder = np.divmod(float(src_area.shape[1]),\n min_area.shape[1])\n y_factor = int(y_factor)\n x_factor = int(x_factor)\n if y_remainder == 0 and x_remainder == 0:\n y_slice = slice(min_y_slice.start * y_factor,\n min_y_slice.stop * y_factor)\n x_slice = slice(min_x_slice.start * x_factor,\n min_x_slice.stop * x_factor)\n new_area = src_area[y_slice, x_slice]\n slice_key = {'y': y_slice, 'x': x_slice}\n new_scn._slice_datasets(dataset_ids, slice_key, new_area)\n else:\n new_target_areas[src_area] = self._slice_area_from_bbox(\n src_area, area, ll_bbox, xy_bbox\n )\n\n return new_scn" ]
[ 0.7339417338371277, 0.7194818258285522, 0.7174306511878967, 0.7124550938606262, 0.7115015387535095, 0.7108666896820068, 0.7031501531600952, 0.7022998929023743, 0.6970390677452087, 0.6903190612792969, 0.6898860335350037, 0.6828916072845459 ]
Combine two crinfos. First used is crinfo1, second used is crinfo2.
def combinecrinfo(crinfo1, crinfo2): """ Combine two crinfos. First used is crinfo1, second used is crinfo2. """ crinfo1 = fix_crinfo(crinfo1) crinfo2 = fix_crinfo(crinfo2) crinfo = [ [crinfo1[0][0] + crinfo2[0][0], crinfo1[0][0] + crinfo2[0][1]], [crinfo1[1][0] + crinfo2[1][0], crinfo1[1][0] + crinfo2[1][1]], [crinfo1[2][0] + crinfo2[2][0], crinfo1[2][0] + crinfo2[2][1]], ] return crinfo
[ "static public int concatCrc(int crc1, int crc2, int order) {\n // Calculate CRC of crc1 + order's 0\n int crcForCrc1 = crc1;\n int orderRemained = order;\n\n // Fast transforming CRCs for adding 0 to the end of the byte array by table\n // look-up\n for (LookupTable lookupTable : lookupTables) {\n while (orderRemained >= lookupTable.getOrder()) {\n crcForCrc1 = transform(crcForCrc1, lookupTable.getLookupTable());\n orderRemained -= lookupTable.getOrder();\n } \n }\n\n if (orderRemained > 0) {\n // We continue the first byte array's CRC calculating\n // and adding 0s to it. And then we plus it with CRC2\n //\n // Doing that, we need to offset the CRC initial value of CRC2 by\n // subtracting a CRC value of empty string.\n //\n // For example, A1A2A3's CRC is C1C2C3C4,\n // while B1 B2 B3's CRc is C5C6C7C8 and we wnat to concatenate them,\n // it means (our initial value is FF FF FF FF):\n // FF FF FF FF A1 A2 A3 C1 C2 C3 C4\n // FF FF FF FF B1 B2 B3 C5 C6 C7 C8\n // both are multiple of generation polynomial.\n // By continue CRC by adding zeros, actually, we calculated\n // the CRC C1'C2'C3'C4, so that\n // FF FF FF FF A1 A2 A3 00 00 00 C1'C2'C3'C4'\n // is the multiple of generation polynomial.\n // By adding C5C6C7C8 and C1'C2'C3'C4', what we got is not\n // the CRC for\n // FF FF FF FF A1 A2 A3 B1 B2 B3\n // which we expect, but this string plus:\n // FF FF FF FF 00 00 00\n // To offset the impact, the only thing we need to do, is\n // to subtract the result by the CRC value for 00 00 00.\n //\n int initial = CrcConcatLookupTables.initCrcMap[orderRemained];\n\n NativeCrc32 pjc = new NativeCrc32();\n pjc.setValue(crcForCrc1);\n byte[] zeros = new byte[orderRemained];\n pjc.update(zeros, 0, zeros.length);\n crcForCrc1 = (int) pjc.getValue() ^ initial; \n }\n return crcForCrc1 ^ crc2;\n }", "def fix_crinfo(crinfo, to=\"axis\"):\n \"\"\"\n Function recognize order of crinfo and convert it to proper format.\n \"\"\"\n\n crinfo = np.asarray(crinfo)\n if crinfo.shape[0] == 2:\n crinfo = crinfo.T\n\n return crinfo", "public static int combine(int curTint, int tint) {\n int newA = ((((curTint >> 24) & 0xFF) * (((tint >> 24) & 0xFF)+1)) & 0xFF00) << 16;\n if ((tint & 0xFFFFFF) == 0xFFFFFF) { // fast path to just combine alpha\n return newA | (curTint & 0xFFFFFF);\n }\n\n // otherwise combine all the channels (beware the bit mask-and-shiftery!)\n int newR = ((((curTint >> 16) & 0xFF) * (((tint >> 16) & 0xFF)+1)) & 0xFF00) << 8;\n int newG = (((curTint >> 8) & 0xFF) * (((tint >> 8) & 0xFF)+1)) & 0xFF00;\n int newB = (((curTint & 0xFF) * ((tint & 0xFF)+1)) >> 8) & 0xFF;\n return newA | newR | newG | newB;\n }", "def merge_infos(info1, info2):\n \"\"\"We often need to aggregate together multiple infos. Most keys can\n just be clobbered by the new info, but e.g. any keys which contain\n counts should be added. The merge schema is indicated by the key\n namespace.\n\n Namespaces:\n\n - stats.timers: Timing\n - stats.gauges: Gauge values\n - stats.*: Counts of a quantity\n \"\"\"\n for key, value in six.iteritems(info2):\n if key in info1 and key.startswith('stats'):\n if key.startswith('stats.timers'):\n # timer\n info1[key] += value\n elif key.startswith('stats.gauges'):\n # gauge\n info1[key] = value\n else:\n # counter\n info1[key] += value\n else:\n info1[key] = value", "def combination(n, r):\n \"\"\"This function calculates nCr.\"\"\"\n if n == r or r == 0:\n return 1\n else:\n return combination(n-1, r-1) + combination(n-1, r)", "def merge(self,range2): \n \"\"\"merge this bed with another bed to make a longer bed. Returns None if on different chromosomes.\n\n keeps the options of this class (not range2)\n\n :param range2:\n :type range2: GenomicRange\n\n :return: bigger range with both\n :rtype: GenomicRange\n\n \"\"\"\n if self.chr != range2.chr:\n return None\n o = type(self)(self.chr,min(self.start,range2.start)+self._start_offset,max(self.end,range2.end),self.payload,self.dir)\n return o", "public void join(SelectableChannel ch1, SelectableChannel ch2) throws IOException\r\n {\r\n join(ch1, ch2, (ByteChannel)ch1, (ByteChannel)ch2);\r\n }", "def sxor(s1, s2)\n raise ArgumentError, \"strings must have equal size\" unless s1.size == s2.size\n\n s1.bytes.zip(s2.bytes).map {|a, b| (a ^ b).chr }.join\n end", "private double spfR(InfoTree it1, InfoTree it2) {\n\n\t\tint fReversedPostorder = it1.getSize() - 1 - it1.info[POST2_PRE][it1.getCurrentNode()];\n\t\tint gReversedPostorder = it2.getSize() - 1 - it2.info[POST2_PRE][it2.getCurrentNode()];\n\n\t\tint minRKR = it2.info[RPOST2_MIN_RKR][gReversedPostorder];\n\t\tint[] rkr = it2.info[RKR];\n\t\tif (minRKR > -1) for (int j = minRKR; rkr[j] < gReversedPostorder; j++) treeEditDistRev(it1, it2, fReversedPostorder, rkr[j]);\n\t\ttreeEditDistRev(it1, it2, fReversedPostorder, gReversedPostorder);\n\n\t\treturn it1.isSwitched() ? delta[it2.getCurrentNode()][it1\n\t\t\t\t.getCurrentNode()]\n\t\t\t\t+ deltaBit[it2.getCurrentNode()][it1.getCurrentNode()]\n\t\t\t\t* costMatch : delta[it1.getCurrentNode()][it2.getCurrentNode()]\n\t\t\t\t+ deltaBit[it1.getCurrentNode()][it2.getCurrentNode()]\n\t\t\t\t* costMatch;\n\t}", "def combination_memo(n, r):\n \"\"\"This function calculates nCr using memoization method.\"\"\"\n memo = {}\n def recur(n, r):\n if n == r or r == 0:\n return 1\n if (n, r) not in memo:\n memo[(n, r)] = recur(n - 1, r - 1) + recur(n - 1, r)\n return memo[(n, r)]\n return recur(n, r)", "def combine_kraus_maps(k1, k2):\n \"\"\"\n Generate the Kraus map corresponding to the composition\n of two maps on the same qubits with k1 being applied to the state\n after k2.\n\n :param list k1: The list of Kraus operators that are applied second.\n :param list k2: The list of Kraus operators that are applied first.\n :return: A combinatorially generated list of composed Kraus operators.\n \"\"\"\n return [np.dot(k1j, k2l) for k1j in k1 for k2l in k2]", "def combine(*rnf_profiles):\n \"\"\"Combine more profiles and set their maximal values.\n\n\t\tArgs:\n\t\t\t*rnf_profiles (rnftools.rnfformat.RnfProfile): RNF profile.\n\t\t\"\"\"\n\n for rnf_profile in rnf_profiles:\n self.prefix_width = max(self.prefix_width, rnf_profile.prefix_width)\n self.read_tuple_id_width = max(self.read_tuple_id_width, rnf_profile.read_tuple_id_width)\n self.genome_id_width = max(self.genome_id_width, rnf_profile.genome_id_width)\n self.chr_id_width = max(self.chr_id_width, rnf_profile.chr_id_width)\n self.coor_width = max(self.coor_width, rnf_profile.coor_width)" ]
[ 0.7125424146652222, 0.7048804759979248, 0.6844689846038818, 0.6780837774276733, 0.6704990863800049, 0.6569040417671204, 0.6556092500686646, 0.6535033583641052, 0.6515312194824219, 0.6504814028739929, 0.6501571536064148, 0.6478806138038635 ]
Create crinfo of minimum orthogonal nonzero block in input data. :param data: input data :param margin: add margin to minimum block :return:
def crinfo_from_specific_data(data, margin=0): """ Create crinfo of minimum orthogonal nonzero block in input data. :param data: input data :param margin: add margin to minimum block :return: """ # hledáme automatický ořez, nonzero dá indexy logger.debug("crinfo") logger.debug(str(margin)) nzi = np.nonzero(data) logger.debug(str(nzi)) if np.isscalar(margin): margin = [margin] * 3 x1 = np.min(nzi[0]) - margin[0] x2 = np.max(nzi[0]) + margin[0] + 1 y1 = np.min(nzi[1]) - margin[0] y2 = np.max(nzi[1]) + margin[0] + 1 z1 = np.min(nzi[2]) - margin[0] z2 = np.max(nzi[2]) + margin[0] + 1 # ošetření mezí polí if x1 < 0: x1 = 0 if y1 < 0: y1 = 0 if z1 < 0: z1 = 0 if x2 > data.shape[0]: x2 = data.shape[0] - 1 if y2 > data.shape[1]: y2 = data.shape[1] - 1 if z2 > data.shape[2]: z2 = data.shape[2] - 1 # ořez crinfo = [[x1, x2], [y1, y2], [z1, z2]] return crinfo
[ "def get_minimum_size(self, data):\n \"\"\"\n Minimum height is the total height + margins, minimum width\n is the largest width.\n \"\"\"\n min_width = 0\n height = 0\n for element in self.elements:\n size = element.get_minimum_size(data)\n min_width = max(min_width, size.x)\n height += size.y\n height += (len(self.elements)-1)*self.margin\n return datatypes.Point(min_width, height)", "def get_minimum_size(self, data):\n \"\"\"Minimum width is the total width + margins, minimum height\n is the largest height.\"\"\"\n width = 0\n min_height = 0\n for element in self.elements:\n size = element.get_minimum_size(data)\n min_height = max(min_height, size.y)\n width += size.x\n width += (len(self.elements)-1)*self.margin\n return datatypes.Point(width, min_height)", "def _data(self, cube, weighted, prune):\n \"\"\"ndarray representing table index by margin.\"\"\"\n result = []\n for slice_ in cube.slices:\n if cube.has_mr:\n return self._mr_index(cube, weighted, prune)\n num = slice_.margin(axis=0, weighted=weighted, prune=prune)\n den = slice_.margin(weighted=weighted, prune=prune)\n margin = num / den\n proportions = slice_.proportions(axis=1, weighted=weighted, prune=prune)\n result.append(proportions / margin)\n\n if len(result) == 1 and cube.ndim < 3:\n result = result[0]\n else:\n if prune:\n mask = np.array([slice_.mask for slice_ in result])\n result = np.ma.masked_array(result, mask)\n else:\n result = np.array(result)\n\n return result", "def right_margin(self, margin):\n '''Specify the right margin.\n \n Args:\n margin: The right margin, in character width, must be less than the media's width.\n Returns:\n None\n Raises:\n RuntimeError: Invalid margin parameter\n '''\n if margin >=1 and margin <=255:\n self.send(chr(27)+'Q'+chr(margin))\n else:\n raise RuntimeError('Invalid margin parameter in function rightMargin')", "def data(cls, cube, weighted, prune):\n \"\"\"Return ndarray representing table index by margin.\"\"\"\n return cls()._data(cube, weighted, prune)", "def crop(data, crinfo):\n \"\"\"\n Crop the data.\n\n crop(data, crinfo)\n\n :param crinfo: min and max for each axis - [[minX, maxX], [minY, maxY], [minZ, maxZ]]\n\n \"\"\"\n crinfo = fix_crinfo(crinfo)\n return data[\n __int_or_none(crinfo[0][0]) : __int_or_none(crinfo[0][1]),\n __int_or_none(crinfo[1][0]) : __int_or_none(crinfo[1][1]),\n __int_or_none(crinfo[2][0]) : __int_or_none(crinfo[2][1]),\n ]", "def crc_ccitt(data):\n # type: (bytes) -> int\n '''\n Calculate the CRC over a range of bytes using the CCITT polynomial.\n\n Parameters:\n data - The array of bytes to calculate the CRC over.\n Returns:\n The CCITT CRC of the data.\n '''\n crc = 0\n if not have_py_3:\n for x in data:\n crc = crc_ccitt_table[ord(x) ^ ((crc >> 8) & 0xFF)] ^ ((crc << 8) & 0xFF00) # type: ignore\n else:\n mv = memoryview(data)\n for x in mv.tobytes():\n crc = crc_ccitt_table[x ^ ((crc >> 8) & 0xFF)] ^ ((crc << 8) & 0xFF00)\n\n return crc", "def crossings_nonzero_pos2neg(data):\n \"\"\"\n Find `indices of zero crossings from positive to negative values <http://stackoverflow.com/questions/3843017/efficiently-detect-sign-changes-in-python>`_.\n\n :param data: numpy array of floats\n :type data: numpy array of floats\n :return crossings: crossing indices to data\n :rtype crossings: numpy array of integers\n\n :Examples:\n\n >>> import numpy as np\n >>> from mhealthx.signals import crossings_nonzero_pos2neg\n >>> data = np.random.random(100)\n >>> crossings = crossings_nonzero_pos2neg(data)\n \n \"\"\"\n import numpy as np\n\n if isinstance(data, np.ndarray):\n pass\n elif isinstance(data, list):\n data = np.asarray(data)\n else:\n raise IOError('data should be a numpy array')\n\n pos = data > 0\n\n crossings = (pos[:-1] & ~pos[1:]).nonzero()[0]\n\n return crossings", "def get_minimum_size(self, data):\n \"\"\"The minimum height is the number of rows multiplied by the\n tallest row.\"\"\"\n min_width = 0\n min_height = 0\n for element in self.elements:\n size = (\n datatypes.Point(0, 0) if element is None\n else element.get_minimum_size(data)\n )\n min_height = max(min_height, size.y)\n min_width = max(min_width, size.x)\n\n num_elements = len(self.elements)\n height = min_height * num_elements + self.margin * (num_elements-1)\n return datatypes.Point(min_width, height)", "def knuth(data):\n \"\"\"\n References\n ----------\n .. [1] K. Knuth, \"Optimal Data-Based Binning for Histograms\", 2006.\n http://arxiv.org/pdf/physics/0605197v1.pdf\n \"\"\"\n import scipy.optimize as optimize\n\n def f(data):\n from scipy.special import gammaln\n\n m, M = np.min(data), np.max(data)\n n = len(data)\n\n def fff(x):\n k = x[0] # number of bins\n if k <= 0:\n return float(\"+inf\")\n binning = np.linspace(m, M, k + 1)\n histo, bincenters = np.histogram(data, binning)\n\n return -(n * np.log(k) + gammaln(k / 2.) - gammaln(n + k / 2.) +\n k * gammaln(1. / 2.) + np.sum(gammaln(histo + 0.5)))\n return fff\n\n k0 = np.sqrt(len(data))\n return optimize.fmin(f(data), np.array([k0]), disp=False)[0]", "def get_data (self, datakind, integnum):\r\n \"\"\"Given an integration number (0 <= integnum < self.n_integrations) and a\r\n data kind ('crossData.bin', 'autoData.bin'), memory-map the corresponding data\r\n and return a wrapping numpy array.\"\"\"\r\n\r\n if integnum < 0 or integnum >= self.n_integrations:\r\n raise ValueError ('illegal integration number %d' % integnum)\r\n\r\n size = self.sizeinfo.get (datakind)\r\n if size is None:\r\n raise ValueError ('unrecognized data kind \"%s\"' % datakind)\r\n\r\n dtype = _datatypes[datakind]\r\n offset = self.headsize + integnum * self.intsize\r\n dslice = self.mmdata[offset:offset+size]\r\n data = np.fromstring (dslice, dtype=dtype)\r\n\r\n if datakind == 'crossData.bin':\r\n data = data.reshape ((self.n_baselines, self.n_channels, len (self.crosspols)))\r\n elif datakind == 'autoData.bin':\r\n data = data.reshape ((self.n_antennas, self.n_channels, 2))\r\n elif datakind == 'flags.bin':\r\n data = data.reshape ((self.n_baselines + self.n_antennas, self.n_channels,\r\n len (self.crosspols)))\r\n\r\n return data", "def mangleIR(data, ignore_errors=False):\n \"\"\"Mangle a raw Kira data packet into shorthand\"\"\"\n try:\n # Packet mangling algorithm inspired by Rex Becket's kirarx vera plugin\n # Determine a median value for the timing packets and categorize each\n # timing as longer or shorter than that. This will always work for signals\n # that use pulse width modulation (since varying by long-short is basically\n # the definition of what PWM is). By lucky coincidence this also works with\n # the RC-5/RC-6 encodings used by Phillips (manchester encoding)\n # because time variations of opposite-phase/same-phase are either N or 2*N\n if isinstance(data, bytes):\n data = data.decode('ascii')\n data = data.strip()\n times = [int(x, 16) for x in data.split()[2:]]\n minTime = min(times[2:-1])\n maxTime = max(times[2:-1])\n margin = (maxTime - minTime) / 2 + minTime\n return ''.join([(x < margin and 'S' or 'L') for x in times])\n except:\n # Probably a mangled packet.\n if not ignore_errors:\n raise" ]
[ 0.681165874004364, 0.6734849214553833, 0.6568872332572937, 0.6547254323959351, 0.6481010317802429, 0.6388972997665405, 0.638611376285553, 0.6367385387420654, 0.6363102793693542, 0.6296578049659729, 0.6294127702713013, 0.6288889050483704 ]
Put some boundary to input image. :param data: input data :param crinfo: array with minimum and maximum index along each axis [[minX, maxX],[minY, maxY],[minZ, maxZ]]. If crinfo is None, the whole input image is placed into [0, 0, 0]. If crinfo is just series of three numbers, it is used as an initial point for input image placement. :param orig_shape: shape of uncropped image :param resize: True or False (default). Usefull if the data.shape does not fit to crinfo shape. :param outside_mode: 'constant', 'nearest' :return:
def uncrop(data, crinfo, orig_shape, resize=False, outside_mode="constant", cval=0): """ Put some boundary to input image. :param data: input data :param crinfo: array with minimum and maximum index along each axis [[minX, maxX],[minY, maxY],[minZ, maxZ]]. If crinfo is None, the whole input image is placed into [0, 0, 0]. If crinfo is just series of three numbers, it is used as an initial point for input image placement. :param orig_shape: shape of uncropped image :param resize: True or False (default). Usefull if the data.shape does not fit to crinfo shape. :param outside_mode: 'constant', 'nearest' :return: """ if crinfo is None: crinfo = list(zip([0] * data.ndim, orig_shape)) elif np.asarray(crinfo).size == data.ndim: crinfo = list(zip(crinfo, np.asarray(crinfo) + data.shape)) crinfo = fix_crinfo(crinfo) data_out = np.ones(orig_shape, dtype=data.dtype) * cval # print 'uncrop ', crinfo # print orig_shape # print data.shape if resize: data = resize_to_shape(data, crinfo[:, 1] - crinfo[:, 0]) startx = np.round(crinfo[0][0]).astype(int) starty = np.round(crinfo[1][0]).astype(int) startz = np.round(crinfo[2][0]).astype(int) data_out[ # np.round(crinfo[0][0]).astype(int):np.round(crinfo[0][1]).astype(int)+1, # np.round(crinfo[1][0]).astype(int):np.round(crinfo[1][1]).astype(int)+1, # np.round(crinfo[2][0]).astype(int):np.round(crinfo[2][1]).astype(int)+1 startx : startx + data.shape[0], starty : starty + data.shape[1], startz : startz + data.shape[2], ] = data if outside_mode == "nearest": # for ax in range(data.ndims): # ax = 0 # copy border slice to pixels out of boundary - the higher part for ax in range(data.ndim): # the part under the crop start = np.round(crinfo[ax][0]).astype(int) slices = [slice(None), slice(None), slice(None)] slices[ax] = start repeated_slice = np.expand_dims(data_out[slices], ax) append_sz = start if append_sz > 0: tile0 = np.repeat(repeated_slice, append_sz, axis=ax) slices = [slice(None), slice(None), slice(None)] slices[ax] = slice(None, start) # data_out[start + data.shape[ax] : , :, :] = tile0 data_out[slices] = tile0 # plt.imshow(np.squeeze(repeated_slice)) # plt.show() # the part over the crop start = np.round(crinfo[ax][0]).astype(int) slices = [slice(None), slice(None), slice(None)] slices[ax] = start + data.shape[ax] - 1 repeated_slice = np.expand_dims(data_out[slices], ax) append_sz = data_out.shape[ax] - (start + data.shape[ax]) if append_sz > 0: tile0 = np.repeat(repeated_slice, append_sz, axis=ax) slices = [slice(None), slice(None), slice(None)] slices[ax] = slice(start + data.shape[ax], None) # data_out[start + data.shape[ax] : , :, :] = tile0 data_out[slices] = tile0 # plt.imshow(np.squeeze(repeated_slice)) # plt.show() return data_out
[ "def crop(data, crinfo):\n \"\"\"\n Crop the data.\n\n crop(data, crinfo)\n\n :param crinfo: min and max for each axis - [[minX, maxX], [minY, maxY], [minZ, maxZ]]\n\n \"\"\"\n crinfo = fix_crinfo(crinfo)\n return data[\n __int_or_none(crinfo[0][0]) : __int_or_none(crinfo[0][1]),\n __int_or_none(crinfo[1][0]) : __int_or_none(crinfo[1][1]),\n __int_or_none(crinfo[2][0]) : __int_or_none(crinfo[2][1]),\n ]", "def calculate_origin_and_size(canvas_size, data_shape, image_canvas_mode, image_zoom, image_position) -> typing.Tuple[typing.Any, typing.Any]:\n \"\"\"Calculate origin and size for canvas size, data shape, and image display parameters.\"\"\"\n if data_shape is None:\n return None, None\n if image_canvas_mode == \"fill\":\n data_shape = data_shape\n scale_h = float(data_shape[1]) / canvas_size[1]\n scale_v = float(data_shape[0]) / canvas_size[0]\n if scale_v < scale_h:\n image_canvas_size = (canvas_size[0], canvas_size[0] * data_shape[1] / data_shape[0])\n else:\n image_canvas_size = (canvas_size[1] * data_shape[0] / data_shape[1], canvas_size[1])\n image_canvas_origin = (canvas_size[0] * 0.5 - image_canvas_size[0] * 0.5, canvas_size[1] * 0.5 - image_canvas_size[1] * 0.5)\n elif image_canvas_mode == \"fit\":\n image_canvas_size = canvas_size\n image_canvas_origin = (0, 0)\n elif image_canvas_mode == \"1:1\":\n image_canvas_size = data_shape\n image_canvas_origin = (canvas_size[0] * 0.5 - image_canvas_size[0] * 0.5, canvas_size[1] * 0.5 - image_canvas_size[1] * 0.5)\n elif image_canvas_mode == \"2:1\":\n image_canvas_size = (data_shape[0] * 0.5, data_shape[1] * 0.5)\n image_canvas_origin = (canvas_size[0] * 0.5 - image_canvas_size[0] * 0.5, canvas_size[1] * 0.5 - image_canvas_size[1] * 0.5)\n else:\n image_canvas_size = (canvas_size[0] * image_zoom, canvas_size[1] * image_zoom)\n canvas_rect = Geometry.fit_to_size(((0, 0), image_canvas_size), data_shape)\n image_canvas_origin_y = (canvas_size[0] * 0.5) - image_position[0] * canvas_rect[1][0] - canvas_rect[0][0]\n image_canvas_origin_x = (canvas_size[1] * 0.5) - image_position[1] * canvas_rect[1][1] - canvas_rect[0][1]\n image_canvas_origin = (image_canvas_origin_y, image_canvas_origin_x)\n return image_canvas_origin, image_canvas_size", "def outbound_sizes(cls, original_width, original_height, target_width, target_height):\n \"\"\"\n Calculate new image sizes for outbound mode\n :param original_width: int\n :param original_height: int\n :param target_width: int\n :param target_height: int\n :return: tuple(int, int)\n \"\"\"\n if target_width <= original_width and target_height <= original_height:\n k = original_width / float(original_height)\n k_w = original_width / float(target_width)\n k_h = original_height / float(target_height)\n\n if k_w > k_h:\n target_width = int(target_height * k)\n else:\n target_height = int(target_width / k)\n else:\n target_width = original_width\n target_height = original_height\n\n return target_width, target_height", "def crinfo_from_specific_data(data, margin=0):\n \"\"\"\n Create crinfo of minimum orthogonal nonzero block in input data.\n\n :param data: input data\n :param margin: add margin to minimum block\n :return:\n \"\"\"\n # hledáme automatický ořez, nonzero dá indexy\n logger.debug(\"crinfo\")\n logger.debug(str(margin))\n nzi = np.nonzero(data)\n logger.debug(str(nzi))\n\n if np.isscalar(margin):\n margin = [margin] * 3\n\n x1 = np.min(nzi[0]) - margin[0]\n x2 = np.max(nzi[0]) + margin[0] + 1\n y1 = np.min(nzi[1]) - margin[0]\n y2 = np.max(nzi[1]) + margin[0] + 1\n z1 = np.min(nzi[2]) - margin[0]\n z2 = np.max(nzi[2]) + margin[0] + 1\n\n # ošetření mezí polí\n if x1 < 0:\n x1 = 0\n if y1 < 0:\n y1 = 0\n if z1 < 0:\n z1 = 0\n\n if x2 > data.shape[0]:\n x2 = data.shape[0] - 1\n if y2 > data.shape[1]:\n y2 = data.shape[1] - 1\n if z2 > data.shape[2]:\n z2 = data.shape[2] - 1\n\n # ořez\n crinfo = [[x1, x2], [y1, y2], [z1, z2]]\n return crinfo", "def resize_to_shape(data, shape, zoom=None, mode=\"nearest\", order=0):\n \"\"\"\n Function resize input data to specific shape.\n :param data: input 3d array-like data\n :param shape: shape of output data\n :param zoom: zoom is used for back compatibility\n :mode: default is 'nearest'\n \"\"\"\n # @TODO remove old code in except part\n # TODO use function from library in future\n\n try:\n # rint 'pred vyjimkou'\n # aise Exception ('test without skimage')\n # rint 'za vyjimkou'\n import skimage\n import skimage.transform\n\n # Now we need reshape seeds and segmentation to original size\n\n # with warnings.catch_warnings():\n # warnings.filterwarnings(\"ignore\", \".*'constant', will be changed to.*\")\n segm_orig_scale = skimage.transform.resize(\n data, shape, order=0, preserve_range=True, mode=\"reflect\"\n )\n\n segmentation = segm_orig_scale\n logger.debug(\"resize to orig with skimage\")\n except:\n if zoom is None:\n zoom = shape / np.asarray(data.shape).astype(np.double)\n segmentation = resize_to_shape_with_zoom(\n data, zoom=zoom, mode=mode, order=order\n )\n\n return segmentation", "def resize_to_mm(data3d, voxelsize_mm, new_voxelsize_mm, mode='nearest'):\n \"\"\"\n Function can resize data3d or segmentation to specifed voxelsize_mm\n :new_voxelsize_mm: requested voxelsize. List of 3 numbers, also\n can be a string 'orig', 'orgi*2' and 'orgi*4'.\n\n :voxelsize_mm: size of voxel\n :mode: default is 'nearest'\n \"\"\"\n import scipy\n import scipy.ndimage\n\n if np.all(list(new_voxelsize_mm) == 'orig'):\n new_voxelsize_mm = np.array(voxelsize_mm)\n elif np.all(list(new_voxelsize_mm) == 'orig*2'):\n new_voxelsize_mm = np.array(voxelsize_mm) * 2\n elif np.all(list(new_voxelsize_mm) == 'orig*4'):\n new_voxelsize_mm = np.array(voxelsize_mm) * 4\n # vx_size = np.array(metadata['voxelsize_mm']) * 4\n\n zoom = voxelsize_mm / (1.0 * np.array(new_voxelsize_mm))\n data3d_res = scipy.ndimage.zoom(\n data3d,\n zoom,\n mode=mode,\n order=1\n ).astype(data3d.dtype)\n return data3d_res", "def _crop(self, bounds, xsize=None, ysize=None, resampling=Resampling.cubic):\n \"\"\"Crop raster outside vector (convex hull).\n\n :param bounds: bounds on image\n :param xsize: output raster width, None for full resolution\n :param ysize: output raster height, None for full resolution\n :param resampling: reprojection resampling method, default `cubic`\n\n :return: GeoRaster2\n \"\"\"\n out_raster = self[\n int(bounds[0]): int(bounds[2]),\n int(bounds[1]): int(bounds[3])\n ]\n\n if xsize and ysize:\n if not (xsize == out_raster.width and ysize == out_raster.height):\n out_raster = out_raster.resize(dest_width=xsize, dest_height=ysize, resampling=resampling)\n return out_raster", "def calculate_crop_output_shapes(operator):\n '''\n Allowed input/output patterns are\n 1. [N, C, H, W] ---> [N, C, H', W']\n 2. [N, C, H, W], shape-ref [N', C', H', W'] ---> [N, C, H', W']\n '''\n check_input_and_output_numbers(operator, input_count_range=[1, 2], output_count_range=1)\n check_input_and_output_types(operator, good_input_types=[FloatTensorType])\n\n output_shape = copy.deepcopy(operator.inputs[0].type.shape)\n\n params = operator.raw_operator.crop\n if len(operator.inputs) == 1:\n if len(params.cropAmounts.borderAmounts) > 0:\n output_shape[2] -= params.cropAmounts.borderAmounts[0].startEdgeSize\n output_shape[2] -= params.cropAmounts.borderAmounts[0].endEdgeSize\n output_shape[3] -= params.cropAmounts.borderAmounts[1].startEdgeSize\n output_shape[3] -= params.cropAmounts.borderAmounts[1].endEdgeSize\n elif len(operator.inputs) == 2:\n output_shape[2] = operator.inputs[1].type.shape[2]\n output_shape[3] = operator.inputs[1].type.shape[3]\n else:\n raise RuntimeError('Too many inputs for Crop operator')\n\n operator.outputs[0].type.shape = output_shape", "def copyMakeBorder(src, top, bot, left, right, border_type=cv2.BORDER_CONSTANT, value=0):\n \"\"\"Pad image border\n Wrapper for cv2.copyMakeBorder that uses mx.nd.NDArray\n\n Parameters\n ----------\n src : NDArray\n Image in (width, height, channels).\n Others are the same with cv2.copyMakeBorder\n\n Returns\n -------\n img : NDArray\n padded image\n \"\"\"\n hdl = NDArrayHandle()\n check_call(_LIB.MXCVcopyMakeBorder(src.handle, ctypes.c_int(top), ctypes.c_int(bot),\n ctypes.c_int(left), ctypes.c_int(right),\n ctypes.c_int(border_type), ctypes.c_double(value),\n ctypes.byref(hdl)))\n return mx.nd.NDArray(hdl)", "def crop_or_pad_to(height, width):\n \"\"\"Ensures the specified spatial shape by either padding or cropping.\n Meant to be used as a last transform for architectures insisting on a specific\n spatial shape of their inputs.\n \"\"\"\n def inner(t_image):\n return tf.image.resize_image_with_crop_or_pad(t_image, height, width)\n return inner", "def crop_and_resize(image, boxes, box_ind, crop_size, pad_border=True):\n \"\"\"\n Aligned version of tf.image.crop_and_resize, following our definition of floating point boxes.\n\n Args:\n image: NCHW\n boxes: nx4, x1y1x2y2\n box_ind: (n,)\n crop_size (int):\n Returns:\n n,C,size,size\n \"\"\"\n assert isinstance(crop_size, int), crop_size\n boxes = tf.stop_gradient(boxes)\n\n # TF's crop_and_resize produces zeros on border\n if pad_border:\n # this can be quite slow\n image = tf.pad(image, [[0, 0], [0, 0], [1, 1], [1, 1]], mode='SYMMETRIC')\n boxes = boxes + 1\n\n @under_name_scope()\n def transform_fpcoor_for_tf(boxes, image_shape, crop_shape):\n \"\"\"\n The way tf.image.crop_and_resize works (with normalized box):\n Initial point (the value of output[0]): x0_box * (W_img - 1)\n Spacing: w_box * (W_img - 1) / (W_crop - 1)\n Use the above grid to bilinear sample.\n\n However, what we want is (with fpcoor box):\n Spacing: w_box / W_crop\n Initial point: x0_box + spacing/2 - 0.5\n (-0.5 because bilinear sample (in my definition) assumes floating point coordinate\n (0.0, 0.0) is the same as pixel value (0, 0))\n\n This function transform fpcoor boxes to a format to be used by tf.image.crop_and_resize\n\n Returns:\n y1x1y2x2\n \"\"\"\n x0, y0, x1, y1 = tf.split(boxes, 4, axis=1)\n\n spacing_w = (x1 - x0) / tf.cast(crop_shape[1], tf.float32)\n spacing_h = (y1 - y0) / tf.cast(crop_shape[0], tf.float32)\n\n imshape = [tf.cast(image_shape[0] - 1, tf.float32), tf.cast(image_shape[1] - 1, tf.float32)]\n nx0 = (x0 + spacing_w / 2 - 0.5) / imshape[1]\n ny0 = (y0 + spacing_h / 2 - 0.5) / imshape[0]\n\n nw = spacing_w * tf.cast(crop_shape[1] - 1, tf.float32) / imshape[1]\n nh = spacing_h * tf.cast(crop_shape[0] - 1, tf.float32) / imshape[0]\n\n return tf.concat([ny0, nx0, ny0 + nh, nx0 + nw], axis=1)\n\n # Expand bbox to a minium size of 1\n # boxes_x1y1, boxes_x2y2 = tf.split(boxes, 2, axis=1)\n # boxes_wh = boxes_x2y2 - boxes_x1y1\n # boxes_center = tf.reshape((boxes_x2y2 + boxes_x1y1) * 0.5, [-1, 2])\n # boxes_newwh = tf.maximum(boxes_wh, 1.)\n # boxes_x1y1new = boxes_center - boxes_newwh * 0.5\n # boxes_x2y2new = boxes_center + boxes_newwh * 0.5\n # boxes = tf.concat([boxes_x1y1new, boxes_x2y2new], axis=1)\n\n image_shape = tf.shape(image)[2:]\n boxes = transform_fpcoor_for_tf(boxes, image_shape, [crop_size, crop_size])\n image = tf.transpose(image, [0, 2, 3, 1]) # nhwc\n ret = tf.image.crop_and_resize(\n image, boxes, tf.cast(box_ind, tf.int32),\n crop_size=[crop_size, crop_size])\n ret = tf.transpose(ret, [0, 3, 1, 2]) # ncss\n return ret", "def crop_sizes(cls, original_width, original_height, target_width, target_height):\n \"\"\"\n Calculate crop parameters for outbound mode\n :param original_width: int\n :param original_height: int\n :param target_width: int\n :param target_height: int\n :return: tuple(int, int, int, int)\n \"\"\"\n if target_width < original_width:\n left = abs(original_width - target_width) / 2\n right = left + target_width\n else:\n left = 0\n right = original_width\n if target_height < original_height:\n upper = abs(original_height - target_height) / 2\n lower = upper + target_height\n else:\n upper = 0\n lower = original_height\n\n return left, upper, right, lower" ]
[ 0.6718054413795471, 0.6605711579322815, 0.6366471648216248, 0.6345922350883484, 0.6334717869758606, 0.6292850971221924, 0.627955973148346, 0.6246779561042786, 0.6235324740409851, 0.6230271458625793, 0.622809886932373, 0.622344434261322 ]
Function recognize order of crinfo and convert it to proper format.
def fix_crinfo(crinfo, to="axis"): """ Function recognize order of crinfo and convert it to proper format. """ crinfo = np.asarray(crinfo) if crinfo.shape[0] == 2: crinfo = crinfo.T return crinfo
[ "def combinecrinfo(crinfo1, crinfo2):\n \"\"\"\n Combine two crinfos. First used is crinfo1, second used is crinfo2.\n \"\"\"\n crinfo1 = fix_crinfo(crinfo1)\n crinfo2 = fix_crinfo(crinfo2)\n\n crinfo = [\n [crinfo1[0][0] + crinfo2[0][0], crinfo1[0][0] + crinfo2[0][1]],\n [crinfo1[1][0] + crinfo2[1][0], crinfo1[1][0] + crinfo2[1][1]],\n [crinfo1[2][0] + crinfo2[2][0], crinfo1[2][0] + crinfo2[2][1]],\n ]\n\n return crinfo", "def _transform_chrom(chrom):\n \"\"\"Helper function to obtain specific sort order.\"\"\"\n try:\n c = int(chrom)\n except:\n if chrom in ['X', 'Y']:\n return chrom\n elif chrom == 'MT':\n return '_MT' # sort to the end\n else:\n return '__' + chrom # sort to the very end\n else:\n # make sure numbered chromosomes are sorted numerically\n return '%02d' % c", "def parse_tag_info_chrs(self, f, convChr=True):\n \"\"\" Parse HOMER tagdirectory taginfo.txt file to extract chromosome coverage. \"\"\"\n parsed_data_total = OrderedDict()\n parsed_data_uniq = OrderedDict()\n remove = [\"hap\", \"random\", \"chrUn\", \"cmd\", \"EBV\", \"GL\", \"NT_\"]\n for l in f['f']:\n s = l.split(\"\\t\")\n key = s[0].strip()\n # skip header\n if '=' in l or len(s) != 3:\n continue\n if convChr:\n if any(x in key for x in remove):\n continue\n try:\n vT = float(s[1].strip())\n vU = float(s[2].strip())\n except ValueError:\n continue\n\n parsed_data_total[key] = vT\n parsed_data_uniq[key] = vU\n\n return [parsed_data_total, parsed_data_uniq]", "def _cr_decode(self, msg):\n \"\"\"CR: Custom values\"\"\"\n if int(msg[4:6]) > 0:\n index = int(msg[4:6])-1\n return {'values': [self._cr_one_custom_value_decode(index, msg[6:12])]}\n else:\n part = 6\n ret = []\n for i in range(Max.SETTINGS.value):\n ret.append(self._cr_one_custom_value_decode(i, msg[part:part+6]))\n part += 6\n return {'values': ret}", "private void filterCR(boolean moreData) {\n int i, j;\n\n readBufferOverflow = -1;\n\n loop: for (i = j = readBufferPos; j < readBufferLength; i++, j++) {\n switch (readBuffer[j]) {\n case '\\r':\n if (j == readBufferLength - 1) {\n if (moreData) {\n readBufferOverflow = '\\r';\n readBufferLength--;\n } else // CR at end of buffer\n {\n readBuffer[i++] = '\\n';\n }\n break loop;\n } else if (readBuffer[j + 1] == '\\n') {\n j++;\n }\n readBuffer[i] = '\\n';\n break;\n\n case '\\n':\n default:\n readBuffer[i] = readBuffer[j];\n break;\n }\n }\n readBufferLength = i;\n }", "def p_chr(p):\n \"\"\" string : CHR arg_list\n \"\"\"\n if len(p[2]) < 1:\n syntax_error(p.lineno(1), \"CHR$ function need at less 1 parameter\")\n p[0] = None\n return\n\n for i in range(len(p[2])): # Convert every argument to 8bit unsigned\n p[2][i].value = make_typecast(TYPE.ubyte, p[2][i].value, p.lineno(1))\n\n p[0] = make_builtin(p.lineno(1), 'CHR', p[2], type_=TYPE.string)", "def convert_coordinate(self, chromosome, position, strand='+'):\n '''\n Returns a *list* of possible conversions for a given chromosome position.\n The list may be empty (no conversion), have a single element (unique conversion), or several elements (position mapped to several chains).\n The list contains tuples (target_chromosome, target_position, target_strand, conversion_chain_score),\n where conversion_chain_score is the \"alignment score\" field specified at the chain used to perform conversion. If there\n are several possible conversions, they are sorted by decreasing conversion_chain_score.\n \n IF chromosome is completely unknown to the LiftOver, None is returned.\n \n Note that coordinates are 0-based, and even at negative strand are relative to the beginning of the genome.\n I.e. position 0 strand + is the first position of the genome. Position 0 strand - is also the first position of the genome \n (and the last position of reverse-complemented genome).\n '''\n query_results = self.chain_file.query(chromosome, position)\n if query_results is None:\n return None\n else:\n # query_results contains intervals which contain the query point. We simply have to remap to corresponding targets.\n results = []\n for (source_start, source_end, data) in query_results:\n target_start, target_end, chain = data\n result_position = target_start + (position - source_start)\n if chain.target_strand == '-':\n result_position = chain.target_size - 1 - result_position\n result_strand = chain.target_strand if strand == '+' else ('+' if chain.target_strand == '-' else '-')\n results.append((chain.target_name, result_position, result_strand, chain.score))\n #if len(results) > 1:\n results.sort(key=lambda x: x[3], reverse=True)\n return results", "def crinfo_from_specific_data(data, margin=0):\n \"\"\"\n Create crinfo of minimum orthogonal nonzero block in input data.\n\n :param data: input data\n :param margin: add margin to minimum block\n :return:\n \"\"\"\n # hledáme automatický ořez, nonzero dá indexy\n logger.debug(\"crinfo\")\n logger.debug(str(margin))\n nzi = np.nonzero(data)\n logger.debug(str(nzi))\n\n if np.isscalar(margin):\n margin = [margin] * 3\n\n x1 = np.min(nzi[0]) - margin[0]\n x2 = np.max(nzi[0]) + margin[0] + 1\n y1 = np.min(nzi[1]) - margin[0]\n y2 = np.max(nzi[1]) + margin[0] + 1\n z1 = np.min(nzi[2]) - margin[0]\n z2 = np.max(nzi[2]) + margin[0] + 1\n\n # ošetření mezí polí\n if x1 < 0:\n x1 = 0\n if y1 < 0:\n y1 = 0\n if z1 < 0:\n z1 = 0\n\n if x2 > data.shape[0]:\n x2 = data.shape[0] - 1\n if y2 > data.shape[1]:\n y2 = data.shape[1] - 1\n if z2 > data.shape[2]:\n z2 = data.shape[2] - 1\n\n # ořez\n crinfo = [[x1, x2], [y1, y2], [z1, z2]]\n return crinfo", "def _rec_filter_to_info(line):\n \"\"\"Move a DKFZBias filter to the INFO field, for a record.\n \"\"\"\n parts = line.rstrip().split(\"\\t\")\n move_filters = {\"bSeq\": \"strand\", \"bPcr\": \"damage\"}\n new_filters = []\n bias_info = []\n for f in parts[6].split(\";\"):\n if f in move_filters:\n bias_info.append(move_filters[f])\n elif f not in [\".\"]:\n new_filters.append(f)\n if bias_info:\n parts[7] += \";DKFZBias=%s\" % \",\".join(bias_info)\n parts[6] = \";\".join(new_filters or [\"PASS\"])\n return \"\\t\".join(parts) + \"\\n\"", "protected void updateLastCRLFInfo(int index, int pos, boolean isCR) {\n this.lastCRLFBufferIndex = index;\n this.lastCRLFPosition = pos;\n this.lastCRLFisCR = isCR;\n }", "def _calc_order(self, order):\n \"\"\"Called to set the order of a multi-channel image.\n The order should be determined by the loader, but this will\n make a best guess if passed `order` is `None`.\n \"\"\"\n if order is not None and order != '':\n self.order = order.upper()\n else:\n shape = self.shape\n if len(shape) <= 2:\n self.order = 'M'\n else:\n depth = shape[-1]\n # TODO: need something better here than a guess!\n if depth == 1:\n self.order = 'M'\n elif depth == 2:\n self.order = 'AM'\n elif depth == 3:\n self.order = 'RGB'\n elif depth == 4:\n self.order = 'RGBA'", "def _ycbcr2l(self, mode):\n \"\"\"Convert from YCbCr to L.\n \"\"\"\n self._check_modes((\"YCbCr\", \"YCbCrA\"))\n\n self.channels = [self.channels[0]] + self.channels[3:]\n if self.fill_value is not None:\n self.fill_value = [self.fill_value[0]] + self.fill_value[3:]\n self.mode = mode" ]
[ 0.7453998327255249, 0.6900262832641602, 0.6783666610717773, 0.6655680537223816, 0.6643850803375244, 0.6607131361961365, 0.6585497856140137, 0.6559640169143677, 0.6520832777023315, 0.6511232852935791, 0.6503932476043701, 0.6451404094696045 ]
Get list of grid edges :param shape: :param inds: :param return_directions: :return:
def grid_edges(shape, inds=None, return_directions=True): """ Get list of grid edges :param shape: :param inds: :param return_directions: :return: """ if inds is None: inds = np.arange(np.prod(shape)).reshape(shape) # if not self.segparams['use_boundary_penalties'] and \ # boundary_penalties_fcn is None : if len(shape) == 2: edgx = np.c_[inds[:, :-1].ravel(), inds[:, 1:].ravel()] edgy = np.c_[inds[:-1, :].ravel(), inds[1:, :].ravel()] edges = [edgx, edgy] directions = [ np.ones([edgx.shape[0]], dtype=np.int8) * 0, np.ones([edgy.shape[0]], dtype=np.int8) * 1, ] elif len(shape) == 3: # This is faster for some specific format edgx = np.c_[inds[:, :, :-1].ravel(), inds[:, :, 1:].ravel()] edgy = np.c_[inds[:, :-1, :].ravel(), inds[:, 1:, :].ravel()] edgz = np.c_[inds[:-1, :, :].ravel(), inds[1:, :, :].ravel()] edges = [edgx, edgy, edgz] else: logger.error("Expected 2D or 3D data") # for all edges along first direction put 0, for second direction put 1, for third direction put 3 if return_directions: directions = [] for idirection in range(len(shape)): directions.append( np.ones([edges[idirection].shape[0]], dtype=np.int8) * idirection ) edges = np.concatenate(edges) if return_directions: edge_dir = np.concatenate(directions) return edges, edge_dir else: return edges
[ "def gen_grid_2d(shape, voxelsize):\n \"\"\"\n Generate list of edges for a base grid.\n \"\"\"\n nr, nc = shape\n nrm1, ncm1 = nr - 1, nc - 1\n # sh = nm.asarray(shape)\n # calculate number of edges, in 2D: (nrows * (ncols - 1)) + ((nrows - 1) * ncols)\n nedges = 0\n for direction in range(len(shape)):\n sh = copy.copy(list(shape))\n sh[direction] += -1\n nedges += nm.prod(sh)\n\n nedges_old = ncm1 * nr + nrm1 * nc\n edges = nm.zeros((nedges, 2), dtype=nm.int16)\n edge_dir = nm.zeros((ncm1 * nr + nrm1 * nc,), dtype=nm.bool)\n nodes = nm.zeros((nm.prod(shape), 3), dtype=nm.float32)\n\n # edges\n idx = 0\n row = nm.zeros((ncm1, 2), dtype=nm.int16)\n row[:, 0] = nm.arange(ncm1)\n row[:, 1] = nm.arange(ncm1) + 1\n for ii in range(nr):\n edges[slice(idx, idx + ncm1), :] = row + nc * ii\n idx += ncm1\n\n edge_dir[slice(0, idx)] = 0 # horizontal dir\n\n idx0 = idx\n col = nm.zeros((nrm1, 2), dtype=nm.int16)\n col[:, 0] = nm.arange(nrm1) * nc\n col[:, 1] = nm.arange(nrm1) * nc + nc\n for ii in range(nc):\n edges[slice(idx, idx + nrm1), :] = col + ii\n idx += nrm1\n\n edge_dir[slice(idx0, idx)] = 1 # vertical dir\n\n # nodes\n idx = 0\n row = nm.zeros((nc, 3), dtype=nm.float32)\n row[:, 0] = voxelsize[0] * (nm.arange(nc) + 0.5)\n row[:, 1] = voxelsize[1] * 0.5\n for ii in range(nr):\n nodes[slice(idx, idx + nc), :] = row\n row[:, 1] += voxelsize[1]\n idx += nc\n\n return nodes, edges, edge_dir", "def get_edges(self, indexed=None):\n \"\"\"Edges of the mesh\n \n Parameters\n ----------\n indexed : str | None\n If indexed is None, return (Nf, 3) array of vertex indices,\n two per edge in the mesh.\n If indexed is 'faces', then return (Nf, 3, 2) array of vertex\n indices with 3 edges per face, and two vertices per edge.\n\n Returns\n -------\n edges : ndarray\n The edges.\n \"\"\"\n \n if indexed is None:\n if self._edges is None:\n self._compute_edges(indexed=None)\n return self._edges\n elif indexed == 'faces':\n if self._edges_indexed_by_faces is None:\n self._compute_edges(indexed='faces')\n return self._edges_indexed_by_faces\n else:\n raise Exception(\"Invalid indexing mode. Accepts: None, 'faces'\")", "def extract_adjacent_shapes(df_shapes, shape_i_column, extend=.5):\n '''\n Generate list of connections between \"adjacent\" polygon shapes based on\n geometrical \"closeness\".\n\n Parameters\n ----------\n df_shapes : pandas.DataFrame\n Table of polygon shape vertices (one row per vertex).\n\n Table rows with the same value in the :data:`shape_i_column` column\n are grouped together as a polygon.\n shape_i_column : str or list[str]\n Column name(s) that identify the polygon each row belongs to.\n extend : float, optional\n Extend ``x``/``y`` coords by the specified number of absolute units\n from the center point of each polygon.\n Each polygon is stretched independently in the ``x`` and ``y`` direction.\n In each direction, a polygon considered adjacent to all polygons that\n are overlapped by the extended shape.\n\n Returns\n -------\n pandas.DataFrame\n Adjacency list as a frame containing the columns ``source`` and\n ``target``.\n\n The ``source`` and ``target`` of each adjacency connection is ordered\n such that the ``source`` is less than the ``target``.\n '''\n # Find corners of each solid shape outline.\n # Extend x coords by abs units\n df_scaled_x = extend_shapes(df_shapes, 'x', extend)\n # Extend y coords by abs units\n df_scaled_y = extend_shapes(df_shapes, 'y', extend)\n\n df_corners = df_shapes.groupby(shape_i_column).agg({'x': ['min', 'max'],\n 'y': ['min', 'max']})\n\n # Find adjacent electrodes\n row_list = []\n\n for shapeNumber in df_shapes[shape_i_column].drop_duplicates():\n df_stretched = df_scaled_x[df_scaled_x[shape_i_column]\n .isin([shapeNumber])]\n xmin_x, xmax_x, ymin_x, ymax_x = (df_stretched.x.min(),\n df_stretched.x.max(),\n df_stretched.y.min(),\n df_stretched.y.max())\n df_stretched = df_scaled_y[df_scaled_y[shape_i_column]\n .isin([shapeNumber])]\n xmin_y, xmax_y, ymin_y, ymax_y = (df_stretched.x.min(),\n df_stretched.x.max(),\n df_stretched.y.min(),\n df_stretched.y.max())\n\n #Some conditions unnecessary if it is assumed that electrodes don't overlap\n adjacent = df_corners[\n ((df_corners.x['min'] < xmax_x) & (df_corners.x['max'] >= xmax_x)\n # Check in x stretched direction\n |(df_corners.x['min'] < xmin_x) & (df_corners.x['max'] >= xmin_x))\n # Check if y is within bounds\n & (df_corners.y['min'] < ymax_x) & (df_corners.y['max'] > ymin_x) |\n\n #maybe do ymax_x - df_corners.y['min'] > threshold &\n # df_corners.y['max'] - ymin_x > threshold\n\n ((df_corners.y['min'] < ymax_y) & (df_corners.y['max'] >= ymax_y)\n # Checks in y stretched direction\n |(df_corners.y['min'] < ymin_y) & (df_corners.y['max'] >= ymin_y))\n # Check if x in within bounds\n & ((df_corners.x['min'] < xmax_y) & (df_corners.x['max'] > xmin_y))\n ].index.values\n\n for shape in adjacent:\n temp_dict = {}\n reverse_dict = {}\n\n temp_dict ['source'] = shapeNumber\n reverse_dict['source'] = shape\n temp_dict ['target'] = shape\n reverse_dict['target'] = shapeNumber\n\n if(reverse_dict not in row_list):\n row_list.append(temp_dict)\n\n df_connected = (pd.DataFrame(row_list)[['source', 'target']]\n .sort_index(axis=1, ascending=True)\n .sort_values(['source', 'target']))\n return df_connected", "def get_shape_infos(df_shapes, shape_i_columns):\n '''\n Return a `pandas.DataFrame` indexed by `shape_i_columns` (i.e., each row\n corresponds to a single shape/polygon), containing the following columns:\n\n - `area`: The area of the shape.\n - `width`: The width of the widest part of the shape.\n - `height`: The height of the tallest part of the shape.\n '''\n shape_areas = get_shape_areas(df_shapes, shape_i_columns)\n bboxes = get_bounding_boxes(df_shapes, shape_i_columns)\n return bboxes.join(pd.DataFrame(shape_areas))", "def index_to_coords(index, shape):\r\n '''convert index to coordinates given the shape'''\r\n coords = []\r\n for i in xrange(1, len(shape)):\r\n divisor = int(np.product(shape[i:]))\r\n value = index // divisor\r\n coords.append(value)\r\n index -= value * divisor\r\n coords.append(index)\r\n return tuple(coords)", "def tesselate_shapes_frame(df_shapes, shape_i_columns):\n '''\n Tesselate each shape path into one or more triangles.\n\n Parameters\n ----------\n df_shapes : pandas.DataFrame\n Table containing vertices of shapes, one row per vertex, with the *at\n least* the following columns:\n - ``x``: The x-coordinate of the vertex.\n - ``y``: The y-coordinate of the vertex.\n shape_i_columns : str or list\n Column(s) forming key to differentiate rows/vertices for each distinct\n shape.\n\n Returns\n -------\n pandas.DataFrame\n\n Table where each row corresponds to a triangle vertex, with the following\n columns:\n\n - ``shape_i_columns[]``: The shape path index column(s).\n - ``triangle_i``: The integer triangle index within each electrode path.\n - ``vertex_i``: The integer vertex index within each triangle.\n '''\n frames = []\n if isinstance(shape_i_columns, bytes):\n shape_i_columns = [shape_i_columns]\n\n for shape_i, df_path in df_shapes.groupby(shape_i_columns):\n points_i = df_path[['x', 'y']].values\n if (points_i[0] == points_i[-1]).all():\n # XXX End point is the same as the start point (do not include it).\n points_i = points_i[:-1]\n try:\n triangulator = Triangulator(points_i)\n except:\n import pdb; pdb.set_trace()\n continue\n if not isinstance(shape_i, (list, tuple)):\n shape_i = [shape_i]\n\n for i, triangle_i in enumerate(triangulator.triangles()):\n triangle_points_i = [shape_i + [i] + [j, x, y]\n for j, (x, y) in enumerate(triangle_i)]\n frames.extend(triangle_points_i)\n frames = None if not frames else frames\n return pd.DataFrame(frames, columns=shape_i_columns +\n ['triangle_i', 'vertex_i', 'x', 'y'])", "def find_neighbors(self, grid, node, diagonal_movement=None):\n '''\n find neighbor, same for Djikstra, A*, Bi-A*, IDA*\n '''\n if not diagonal_movement:\n diagonal_movement = self.diagonal_movement\n return grid.neighbors(node, diagonal_movement=diagonal_movement)", "def shapes(self):\n \"\"\"Return the route shapes as a dictionary.\"\"\"\n # Todo: Cache?\n if self._shapes:\n return self._shapes\n # Group together by shape_id\n self.log(\"Generating shapes...\")\n ret = collections.defaultdict(entities.ShapeLine)\n for point in self.read('shapes'):\n ret[point['shape_id']].add_child(point)\n self._shapes = ret\n return self._shapes", "def build_geometry_by_shape(\n feed: \"Feed\",\n shape_ids: Optional[List[str]] = None,\n *,\n use_utm: bool = False,\n) -> Dict:\n \"\"\"\n Return a dictionary with structure shape_id -> Shapely LineString\n of shape.\n\n Parameters\n ----------\n feed : Feed\n shape_ids : list\n IDs of shapes in ``feed.shapes`` to restrict output to; return\n all shapes if ``None``.\n use_utm : boolean\n If ``True``, then use local UTM coordinates; otherwise, use\n WGS84 coordinates\n\n Returns\n -------\n dictionary\n Has the structure\n shape_id -> Shapely LineString of shape.\n If ``feed.shapes is None``, then return ``None``.\n\n Return the empty dictionary if ``feed.shapes is None``.\n\n \"\"\"\n if feed.shapes is None:\n return {}\n\n # Note the output for conversion to UTM with the utm package:\n # >>> u = utm.from_latlon(47.9941214, 7.8509671)\n # >>> print u\n # (414278, 5316285, 32, 'T')\n d = {}\n shapes = feed.shapes.copy()\n if shape_ids is not None:\n shapes = shapes[shapes[\"shape_id\"].isin(shape_ids)]\n\n if use_utm:\n for shape, group in shapes.groupby(\"shape_id\"):\n lons = group[\"shape_pt_lon\"].values\n lats = group[\"shape_pt_lat\"].values\n xys = [\n utm.from_latlon(lat, lon)[:2] for lat, lon in zip(lats, lons)\n ]\n d[shape] = sg.LineString(xys)\n else:\n for shape, group in shapes.groupby(\"shape_id\"):\n lons = group[\"shape_pt_lon\"].values\n lats = group[\"shape_pt_lat\"].values\n lonlats = zip(lons, lats)\n d[shape] = sg.LineString(lonlats)\n return d", "def get_bounding_boxes(df_shapes, shape_i_columns):\n '''\n Return a `pandas.DataFrame` indexed by `shape_i_columns` (i.e., each row\n corresponds to a single shape/polygon), containing the following columns:\n\n - `width`: The width of the widest part of the shape.\n - `height`: The height of the tallest part of the shape.\n '''\n xy_groups = df_shapes.groupby(shape_i_columns)[['x', 'y']]\n xy_min = xy_groups.agg('min')\n xy_max = xy_groups.agg('max')\n\n shapes = (xy_max - xy_min).rename(columns={'x': 'width', 'y': 'height'})\n return xy_min.join(shapes)", "def _getshapes_2d(center, max_radius, shape):\n \"\"\"Calculate indices and slices for the bounding box of a disk.\"\"\"\n index_mean = shape * center\n index_radius = max_radius / 2.0 * np.array(shape)\n\n # Avoid negative indices\n min_idx = np.maximum(np.floor(index_mean - index_radius), 0).astype(int)\n max_idx = np.ceil(index_mean + index_radius).astype(int)\n idx = [slice(minx, maxx) for minx, maxx in zip(min_idx, max_idx)]\n shapes = [(idx[0], slice(None)),\n (slice(None), idx[1])]\n return tuple(idx), tuple(shapes)", "def get_radius_indexes(self, radius, max_ranges=None):\n \"\"\"Return the indexes of the interacting neighboring unit cells\n\n Interacting neighboring unit cells have at least one point in their\n box volume that has a distance smaller or equal than radius to at\n least one point in the central cell. This concept is of importance\n when computing pair wise long-range interactions in periodic systems.\n\n Argument:\n | ``radius`` -- the radius of the interaction sphere\n\n Optional argument:\n | ``max_ranges`` -- numpy array with three elements: The maximum\n ranges of indexes to consider. This is\n practical when working with the minimum image\n convention to reduce the generated bins to the\n minimum image. (see binning.py) Use -1 to\n avoid such limitations. The default is three\n times -1.\n\n \"\"\"\n if max_ranges is None:\n max_ranges = np.array([-1, -1, -1])\n ranges = self.get_radius_ranges(radius)*2+1\n mask = (max_ranges != -1) & (max_ranges < ranges)\n ranges[mask] = max_ranges[mask]\n max_size = np.product(self.get_radius_ranges(radius)*2 + 1)\n indexes = np.zeros((max_size, 3), int)\n\n from molmod.ext import unit_cell_get_radius_indexes\n reciprocal = self.reciprocal*self.active\n matrix = self.matrix*self.active\n size = unit_cell_get_radius_indexes(\n matrix, reciprocal, radius, max_ranges, indexes\n )\n return indexes[:size]" ]
[ 0.6559106707572937, 0.6455737948417664, 0.645016610622406, 0.6407939791679382, 0.6346331238746643, 0.6286357045173645, 0.6279879808425903, 0.6273221969604492, 0.6237670183181763, 0.622414231300354, 0.6219837665557861, 0.6216621398925781 ]
Generate list of edges for a base grid.
def gen_grid_2d(shape, voxelsize): """ Generate list of edges for a base grid. """ nr, nc = shape nrm1, ncm1 = nr - 1, nc - 1 # sh = nm.asarray(shape) # calculate number of edges, in 2D: (nrows * (ncols - 1)) + ((nrows - 1) * ncols) nedges = 0 for direction in range(len(shape)): sh = copy.copy(list(shape)) sh[direction] += -1 nedges += nm.prod(sh) nedges_old = ncm1 * nr + nrm1 * nc edges = nm.zeros((nedges, 2), dtype=nm.int16) edge_dir = nm.zeros((ncm1 * nr + nrm1 * nc,), dtype=nm.bool) nodes = nm.zeros((nm.prod(shape), 3), dtype=nm.float32) # edges idx = 0 row = nm.zeros((ncm1, 2), dtype=nm.int16) row[:, 0] = nm.arange(ncm1) row[:, 1] = nm.arange(ncm1) + 1 for ii in range(nr): edges[slice(idx, idx + ncm1), :] = row + nc * ii idx += ncm1 edge_dir[slice(0, idx)] = 0 # horizontal dir idx0 = idx col = nm.zeros((nrm1, 2), dtype=nm.int16) col[:, 0] = nm.arange(nrm1) * nc col[:, 1] = nm.arange(nrm1) * nc + nc for ii in range(nc): edges[slice(idx, idx + nrm1), :] = col + ii idx += nrm1 edge_dir[slice(idx0, idx)] = 1 # vertical dir # nodes idx = 0 row = nm.zeros((nc, 3), dtype=nm.float32) row[:, 0] = voxelsize[0] * (nm.arange(nc) + 0.5) row[:, 1] = voxelsize[1] * 0.5 for ii in range(nr): nodes[slice(idx, idx + nc), :] = row row[:, 1] += voxelsize[1] idx += nc return nodes, edges, edge_dir
[ "def create_edges(self):\n \"\"\"Set up edge-node and edge-cell relations.\n \"\"\"\n # Reshape into individual edges.\n # Sort the columns to make it possible for `unique()` to identify\n # individual edges.\n s = self.idx_hierarchy.shape\n a = numpy.sort(self.idx_hierarchy.reshape(s[0], -1).T)\n a_unique, inv, cts = unique_rows(a)\n\n assert numpy.all(\n cts < 3\n ), \"No edge has more than 2 cells. Are cells listed twice?\"\n\n self.is_boundary_edge = (cts[inv] == 1).reshape(s[1:])\n\n self.is_boundary_edge_individual = cts == 1\n\n self.edges = {\"nodes\": a_unique}\n\n # cell->edges relationship\n self.cells[\"edges\"] = inv.reshape(3, -1).T\n\n self._edges_cells = None\n self._edge_gid_to_edge_list = None\n\n # Store an index {boundary,interior}_edge -> edge_gid\n self._edge_to_edge_gid = [\n [],\n numpy.where(self.is_boundary_edge_individual)[0],\n numpy.where(~self.is_boundary_edge_individual)[0],\n ]\n return", "def edges(self):\n # TODO: check docstring\n \"\"\"Returns a generator for iterating over edges\n \n Yields\n ------\n type\n Generator for iterating over edges.\n \n \"\"\"\n for i in sorted(self._matrix.keys(), key=lambda x:x.name()):\n for j in sorted(self._matrix[i].keys(), key=lambda x:x.name()):\n if i != j:\n yield (i, j)", "def grid_edges(shape, inds=None, return_directions=True):\n \"\"\"\n Get list of grid edges\n :param shape:\n :param inds:\n :param return_directions:\n :return:\n \"\"\"\n if inds is None:\n inds = np.arange(np.prod(shape)).reshape(shape)\n # if not self.segparams['use_boundary_penalties'] and \\\n # boundary_penalties_fcn is None :\n if len(shape) == 2:\n edgx = np.c_[inds[:, :-1].ravel(), inds[:, 1:].ravel()]\n edgy = np.c_[inds[:-1, :].ravel(), inds[1:, :].ravel()]\n\n edges = [edgx, edgy]\n\n directions = [\n np.ones([edgx.shape[0]], dtype=np.int8) * 0,\n np.ones([edgy.shape[0]], dtype=np.int8) * 1,\n ]\n\n elif len(shape) == 3:\n # This is faster for some specific format\n edgx = np.c_[inds[:, :, :-1].ravel(), inds[:, :, 1:].ravel()]\n edgy = np.c_[inds[:, :-1, :].ravel(), inds[:, 1:, :].ravel()]\n edgz = np.c_[inds[:-1, :, :].ravel(), inds[1:, :, :].ravel()]\n edges = [edgx, edgy, edgz]\n else:\n logger.error(\"Expected 2D or 3D data\")\n\n # for all edges along first direction put 0, for second direction put 1, for third direction put 3\n if return_directions:\n directions = []\n for idirection in range(len(shape)):\n directions.append(\n np.ones([edges[idirection].shape[0]], dtype=np.int8) * idirection\n )\n edges = np.concatenate(edges)\n if return_directions:\n edge_dir = np.concatenate(directions)\n return edges, edge_dir\n else:\n return edges", "private List<Pair<Integer, Integer>> doGenerateEdgesWithOmitList() {\n final int numberOfNodes = getConfiguration().getNumberOfNodes();\n final int numberOfEdges = getConfiguration().getNumberOfEdges();\n final long maxEdges = numberOfNodes * (numberOfNodes - 1) / 2;\n\n final List<Pair<Integer, Integer>> edges = new LinkedList<>();\n\n for (Long index : edgeIndices(numberOfEdges, maxEdges)) {\n edges.add(indexToEdgeBijection(index));\n }\n\n return edges;\n }", "function generate(base, tuple, index) {\n var name = dims[index],\n v = vals[index++],\n k, key;\n\n for (k in v) {\n tuple[name] = v[k];\n key = base ? base + '|' + k : k;\n if (index < n) generate(key, tuple, index);\n else if (!curr[key]) aggr.cell(key, tuple);\n }\n }", "def create_edges(cells_nodes):\n \"\"\"Setup edge-node and edge-cell relations. Adapted from voropy.\n \"\"\"\n # Create the idx_hierarchy (nodes->edges->cells), i.e., the value of\n # `self.idx_hierarchy[0, 2, 27]` is the index of the node of cell 27, edge\n # 2, node 0. The shape of `self.idx_hierarchy` is `(2, 3, n)`, where `n` is\n # the number of cells. Make sure that the k-th edge is opposite of the k-th\n # point in the triangle.\n local_idx = numpy.array([[1, 2], [2, 0], [0, 1]]).T\n # Map idx back to the nodes. This is useful if quantities which are in\n # idx shape need to be added up into nodes (e.g., equation system rhs).\n nds = cells_nodes.T\n idx_hierarchy = nds[local_idx]\n\n s = idx_hierarchy.shape\n a = numpy.sort(idx_hierarchy.reshape(s[0], s[1] * s[2]).T)\n\n b = numpy.ascontiguousarray(a).view(\n numpy.dtype((numpy.void, a.dtype.itemsize * a.shape[1]))\n )\n _, idx, inv, cts = numpy.unique(\n b, return_index=True, return_inverse=True, return_counts=True\n )\n\n # No edge has more than 2 cells. This assertion fails, for example, if\n # cells are listed twice.\n assert all(cts < 3)\n\n edge_nodes = a[idx]\n cells_edges = inv.reshape(3, -1).T\n\n return edge_nodes, cells_edges", "def _compute_edges_cells(self):\n \"\"\"This creates interior edge->cells relations. While it's not\n necessary for many applications, it sometimes does come in handy.\n \"\"\"\n if self.edges is None:\n self.create_edges()\n\n num_edges = len(self.edges[\"nodes\"])\n\n counts = numpy.zeros(num_edges, dtype=int)\n fastfunc.add.at(\n counts,\n self.cells[\"edges\"],\n numpy.ones(self.cells[\"edges\"].shape, dtype=int),\n )\n\n # <https://stackoverflow.com/a/50395231/353337>\n edges_flat = self.cells[\"edges\"].flat\n idx_sort = numpy.argsort(edges_flat)\n idx_start, count = grp_start_len(edges_flat[idx_sort])\n res1 = idx_sort[idx_start[count == 1]][:, numpy.newaxis]\n idx = idx_start[count == 2]\n res2 = numpy.column_stack([idx_sort[idx], idx_sort[idx + 1]])\n self._edges_cells = [\n [], # no edges with zero adjacent cells\n res1 // 3,\n res2 // 3,\n ]\n # self._edges_local = [\n # [], # no edges with zero adjacent cells\n # res1 % 3,\n # res2 % 3,\n # ]\n\n # For each edge, store the number of adjacent cells plus the index into\n # the respective edge array.\n self._edge_gid_to_edge_list = numpy.empty((num_edges, 2), dtype=int)\n self._edge_gid_to_edge_list[:, 0] = count\n c1 = count == 1\n l1 = numpy.sum(c1)\n self._edge_gid_to_edge_list[c1, 1] = numpy.arange(l1)\n c2 = count == 2\n l2 = numpy.sum(c2)\n self._edge_gid_to_edge_list[c2, 1] = numpy.arange(l2)\n assert l1 + l2 == len(count)\n\n return", "def legal_edge_coords():\n \"\"\"\n Return all legal edge coordinates on the grid.\n \"\"\"\n edges = set()\n for tile_id in legal_tile_ids():\n for edge in edges_touching_tile(tile_id):\n edges.add(edge)\n logging.debug('Legal edge coords({})={}'.format(len(edges), edges))\n return edges", "def gridrange(start, end, step):\n \"\"\"Generate a grid of complex numbers\"\"\"\n for x in frange(start.real, end.real, step.real):\n for y in frange(start.imag, end.imag, step.imag):\n yield x + y * 1j", "def edges(self, sites, permutation=None, input='sites'):\n \"\"\"\n Returns the list of edges of this coordination geometry. Each edge is given as a\n list of its end vertices coordinates.\n \"\"\"\n if input == 'sites':\n coords = [site.coords for site in sites]\n elif input == 'coords':\n coords = sites\n # if permutation is None:\n # coords = [site.coords for site in sites]\n # else:\n # coords = [sites[ii].coords for ii in permutation]\n if permutation is not None:\n coords = [coords[ii] for ii in permutation]\n return [[coords[ii] for ii in e] for e in self._edges]", "def compute_edges(ast: BELAst, spec: BELSpec) -> Edges:\n \"\"\"Compute edges\"\"\"\n\n edges = []\n if ast.bel_object.__class__.__name__ == \"BELAst\":\n edges.append(ast.bel_object)\n\n process_ast(edges, ast, spec)\n return edges", "def coastal_edges(tile_id):\n \"\"\"\n Returns a list of coastal edge coordinate.\n\n An edge is coastal if it is on the grid's border.\n :return: list(int)\n \"\"\"\n edges = list()\n tile_coord = tile_id_to_coord(tile_id)\n for edge_coord in edges_touching_tile(tile_id):\n dirn = tile_edge_offset_to_direction(edge_coord - tile_coord)\n if tile_id_in_direction(tile_id, dirn) is None:\n edges.append(edge_coord)\n return edges" ]
[ 0.7401100993156433, 0.7308559417724609, 0.7190775871276855, 0.707381546497345, 0.7058398723602295, 0.7043352723121643, 0.7015905976295471, 0.7009677886962891, 0.6896178722381592, 0.6852476596832275, 0.6844310164451599, 0.6842830777168274 ]
Write nodes and edges to VTK file :param fname: VTK filename :param nodes: :param edges: :param node_flag: set if this node is really used in output :param edge_flag: set if this flag is used in output :return:
def write_grid_to_vtk(fname, nodes, edges, node_flag=None, edge_flag=None): """ Write nodes and edges to VTK file :param fname: VTK filename :param nodes: :param edges: :param node_flag: set if this node is really used in output :param edge_flag: set if this flag is used in output :return: """ if node_flag is None: node_flag = np.ones([nodes.shape[0]], dtype=np.bool) if edge_flag is None: edge_flag = np.ones([edges.shape[0]], dtype=np.bool) nodes = make_nodes_3d(nodes) f = open(fname, "w") f.write("# vtk DataFile Version 2.6\n") f.write("output file\nASCII\nDATASET UNSTRUCTURED_GRID\n") idxs = nm.where(node_flag > 0)[0] nnd = len(idxs) aux = -nm.ones(node_flag.shape, dtype=nm.int32) aux[idxs] = nm.arange(nnd, dtype=nm.int32) f.write("\nPOINTS %d float\n" % nnd) for ndi in idxs: f.write("%.6f %.6f %.6f\n" % tuple(nodes[ndi, :])) idxs = nm.where(edge_flag > 0)[0] ned = len(idxs) f.write("\nCELLS %d %d\n" % (ned, ned * 3)) for edi in idxs: f.write("2 %d %d\n" % tuple(aux[edges[edi, :]])) f.write("\nCELL_TYPES %d\n" % ned) for edi in idxs: f.write("3\n")
[ "def write_edges(\n edges: Mapping[str, Any],\n filename: str,\n jsonlines: bool = False,\n gzipflag: bool = False,\n yaml: bool = False,\n):\n \"\"\"Write edges to file\n\n Args:\n edges (Mapping[str, Any]): in edges JSON Schema format\n filename (str): filename to write\n jsonlines (bool): output in JSONLines format?\n gzipflag (bool): create gzipped file?\n yaml (bool): create yaml file?\n \"\"\"\n pass", "def write_edges (self):\n \"\"\"\n Write all edges we can find in the graph in a brute-force manner.\n \"\"\"\n for node in self.nodes.values():\n if node[\"parent_url\"] in self.nodes:\n self.write_edge(node)\n self.flush()", "public <T,E extends TypedEdge<T>> void write(\n Multigraph<T,E> g, File f, \n Indexer<String> vertexLabels) \n throws IOException {\n write(g, f, null, false, vertexLabels, true);\n }", "public <T,E extends TypedEdge<T>> void write(\n Multigraph<T,E> g, File f, Map<T,Color> edgeColors, \n Indexer<String> vertexLabels) \n throws IOException {\n write(g, f, edgeColors, true, vertexLabels, true);\n }", "private <T,E extends TypedEdge<T>> void write(\n Multigraph<T,E> g, File f, Map<T,Color> edgeColors, \n boolean useColors, Indexer<String> vertexLabels,\n boolean useLabels) throws IOException {\n\n DocumentBuilderFactory dbfac = DocumentBuilderFactory.newInstance();\n DocumentBuilder docBuilder = null;\n try {\n docBuilder = dbfac.newDocumentBuilder();\n } catch (ParserConfigurationException pce) {\n throw new IOError(new IOException(pce));\n }\n Document doc = docBuilder.newDocument();\n\n Element root = doc.createElement(\"gexf\");\n root.setAttribute(\"xmlns\",\"http://www.gexf.net/1.2draft\");\n root.setAttribute(\"xmlns:viz\", \"http://www.gexf.net/1.2draft/viz\");\n root.setAttribute(\"xmlns:xsi\", \"http://www.w3.org/2001/XMLSchema-instance\");\n root.setAttribute(\"version\",\"1.2\");\n root.setAttribute(\"xsi:schemaLocation\",\"http://www.gexf.net/1.2draft http://www.gexf.net/1.2draft/gexf.xsd\");\n doc.appendChild(root);\n\n Element graph = doc.createElement(\"graph\");\n graph.setAttribute(\"defaultedgetype\",\"undirected\");\n root.appendChild(graph);\n\n Element nodes = doc.createElement(\"nodes\");\n graph.appendChild(nodes);\n Element edges = doc.createElement(\"edges\");\n graph.appendChild(edges);\n\n IntIterator vIter = g.vertices().iterator();\n while (vIter.hasNext()) {\n int vertex = vIter.next();\n Element node = doc.createElement(\"node\");\n node.setAttribute(\"id\", String.valueOf(vertex));\n if (useLabels) \n node.setAttribute(\"label\", vertexLabels.lookup(vertex));\n else\n node.setAttribute(\"label\", String.valueOf(vertex));\n nodes.appendChild(node);\n }\n\n ColorGenerator cg = null;\n if (useColors)\n cg = new ColorGenerator();\n\n int edgeId = 0;\n for (E e : g.edges()) {\n Element edge = doc.createElement(\"edge\");\n edges.appendChild(edge);\n edge.setAttribute(\"id\", \"\" + (edgeId++));\n edge.setAttribute(\"source\", String.valueOf(e.from()));\n edge.setAttribute(\"target\", String.valueOf(e.to()));\n edge.setAttribute(\"label\", String.valueOf(e.edgeType()));\n if (useColors) {\n Element cEdge = doc.createElement(\"viz:color\");\n edge.appendChild(cEdge);\n Color c = edgeColors.get(e.edgeType());\n if (c == null) {\n c = cg.next();\n edgeColors.put(e.edgeType(), c);\n }\n cEdge.setAttribute(\"r\", String.valueOf(c.getRed()));\n cEdge.setAttribute(\"g\", String.valueOf(c.getGreen()));\n cEdge.setAttribute(\"b\", String.valueOf(c.getBlue()));\n }\n }\n\n // Set up a transformer\n try {\n TransformerFactory transfac = TransformerFactory.newInstance();\n Transformer trans = transfac.newTransformer();\n trans.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, \"yes\");\n trans.setOutputProperty(OutputKeys.INDENT, \"yes\");\n trans.setOutputProperty(\n \"{http://xml.apache.org/xslt}indent-amount\", \"2\");\n \n // Create string from xml tree\n BufferedOutputStream bos = \n new BufferedOutputStream(new FileOutputStream(f));\n StreamResult result = new StreamResult(bos);\n DOMSource source = new DOMSource(doc);\n trans.transform(source, result);\n bos.close(); \n } catch (TransformerException te) {\n throw new IOError(new IOException(te));\n }\n }", "def save_graph(self, fname, style='flat', format='png', **kwargs): # @ReservedAssignment @IgnorePep8\n \"\"\"\n Saves a graph of the pipeline to file\n\n Parameters\n ----------\n fname : str\n The filename for the saved graph\n style : str\n The style of the graph, can be one of can be one of\n 'orig', 'flat', 'exec', 'hierarchical'\n plot : bool\n Whether to load and plot the graph after it has been written\n \"\"\"\n fname = os.path.expanduser(fname)\n if not fname.endswith('.png'):\n fname += '.png'\n orig_dir = os.getcwd()\n tmpdir = tempfile.mkdtemp()\n os.chdir(tmpdir)\n workflow = self._workflow\n workflow.write_graph(graph2use=style, format=format, **kwargs)\n os.chdir(orig_dir)\n try:\n shutil.move(os.path.join(tmpdir, 'graph_detailed.{}'\n .format(format)), fname)\n except IOError as e:\n if e.errno == errno.ENOENT:\n shutil.move(os.path.join(tmpdir, 'graph.{}'.format(format)),\n fname)\n else:\n raise\n shutil.rmtree(tmpdir)", "def writeEdgeList(grph, name, extraInfo = True, allSameAttribute = False, _progBar = None):\n \"\"\"Writes an edge list of _grph_ at the destination _name_.\n\n The edge list has two columns for the source and destination of the edge, `'From'` and `'To'` respectively, then, if _edgeInfo_ is `True`, for each attribute of the node another column is created.\n\n **Note**: If any edges are missing an attribute it will be left blank by default, enable _allSameAttribute_ to cause a `KeyError` to be raised.\n\n # Parameters\n\n _grph_ : `networkx Graph`\n\n > The graph to be written to _name_\n\n _name_ : `str`\n\n > The name of the file to be written\n\n _edgeInfo_ : `optional [bool]`\n\n > Default `True`, if `True` the attributes of each edge will be written\n\n _allSameAttribute_ : `optional [bool]`\n\n > Default `False`, if `True` all the edges must have the same attributes or an exception will be raised. If `False` the missing attributes will be left blank.\n \"\"\"\n count = 0\n eMax = len(grph.edges())\n if metaknowledge.VERBOSE_MODE or isinstance(_progBar, _ProgressBar):\n if isinstance(_progBar, _ProgressBar):\n PBar = _progBar\n PBar.updateVal(0, \"Writing edge list {}\".format(name))\n else:\n PBar = _ProgressBar(0, \"Writing edge list {}\".format(name))\n else:\n PBar = _ProgressBar(0, \"Writing edge list {}\".format(name), dummy = True)\n if len(grph.edges(data = True)) < 1:\n outFile = open(os.path.expanduser(os.path.abspath(name)), 'w')\n outFile.write('\"From\",\"To\"\\n')\n outFile.close()\n PBar.updateVal(1, \"Done edge list '{}', 0 edges written.\".format(name))\n else:\n if extraInfo:\n csvHeader = []\n if allSameAttribute:\n csvHeader = ['From'] + ['To'] + list(grph.edges(data = True).__next__()[2].keys())\n else:\n extraAttribs = set()\n for eTuple in grph.edges(data = True):\n count += 1\n if count % 1000 == 0:\n PBar.updateVal(count / eMax * .10, \"Checking over edge: '{}' to '{}'\".format(eTuple[0], eTuple[1]))\n s = set(eTuple[2].keys()) - extraAttribs\n if len(s) > 0:\n for i in s:\n extraAttribs.add(i)\n csvHeader = ['From', 'To'] + list(extraAttribs)\n else:\n csvHeader = ['From'] + ['To']\n count = 0\n PBar.updateVal(.01, \"Opening file {}\".format(name))\n f = open(os.path.expanduser(os.path.abspath(name)), 'w', newline = '')\n outFile = csv.DictWriter(f, csvHeader, delimiter = ',', quotechar = '\"', quoting=csv.QUOTE_NONNUMERIC)\n outFile.writeheader()\n if extraInfo:\n for e in grph.edges(data = True):\n count += 1\n if count % 1000 == 0:\n PBar.updateVal(count / eMax * .90 + .10, \"Writing edge: '{}' to '{}'\".format(e[0], e[1]))\n eDict = e[2].copy()\n eDict['From'] = e[0]\n eDict['To'] = e[1]\n try:\n outFile.writerow(eDict)\n except UnicodeEncodeError:\n #Because Windows\n newDict = {k.encode('ASCII', errors='ignore').decode('ASCII', errors='ignore') if isinstance(k, str) else k: v.encode('ASCII', errors='ignore').decode('ASCII', errors='ignore') if isinstance(v, str) else v for k, v in eDict.items()}\n outFile.writerow(newDict)\n except ValueError:\n raise ValueError(\"Some edges in The graph do not have the same attributes\")\n else:\n for e in grph.edges():\n count += 1\n if count % 1000 == 0:\n PBar.updateVal(count / eMax * .90 + .10, \"Writing edge: '{}' to '{}'\".format(e[0], e[1]))\n eDict['From'] = e[0]\n eDict['To'] = e[1]\n try:\n outFile.writerow(eDict)\n except UnicodeEncodeError:\n #Because Windows\n newDict = {k.encode('ASCII', errors='ignore').decode('ASCII', errors='ignore') if isinstance(k, str) else k: v.encode('ASCII', errors='ignore').decode('ASCII', errors='ignore') if isinstance(v, str) else v for k, v in eDict.items()}\n outFile.writerow(newDict)\n PBar.updateVal(1, \"Closing {}\".format(name))\n f.close()\n if not isinstance(_progBar, _ProgressBar):\n PBar.finish(\"Done edge list {}, {} edges written.\".format(name, count))", "def write_edge (self, node):\n \"\"\"Write one edge.\"\"\"\n self.writeln(u\" edge [\")\n self.writeln(u' label \"%s\"' % node[\"edge\"])\n self.writeln(u\" source %d\" % self.nodes[node[\"parent_url\"]][\"id\"])\n self.writeln(u\" target %d\" % node[\"id\"])\n if self.has_part(\"result\"):\n self.writeln(u\" valid %d\" % node[\"valid\"])\n self.writeln(u\" ]\")", "def save_graph(cn_topo, filename, showintfs=False, showaddrs=False):\n '''\n Save the topology to an image file \n '''\n __do_draw(cn_topo, showintfs=showintfs, showaddrs=showaddrs)\n pyp.savefig(filename)", "def draw_edges(self):\n \"\"\"\n Renders edges to the figure.\n \"\"\"\n for i, (start, end) in enumerate(self.graph.edges()):\n start_idx = self.nodes.index(start)\n start_x = self.node_coords[\"x\"][start_idx]\n start_y = self.node_coords[\"y\"][start_idx]\n\n end_idx = self.nodes.index(end)\n end_x = self.node_coords[\"x\"][end_idx]\n end_y = self.node_coords[\"y\"][end_idx]\n\n arc_radius = abs(end_x - start_x) / 2\n # we do min(start_x, end_x) just in case start_x is greater than\n # end_x.\n middle_x = min(start_x, end_x) + arc_radius\n middle_y = arc_radius * 2\n\n verts = [(start_x, start_y), (middle_x, middle_y), (end_x, end_y)]\n\n color = self.edge_colors[i]\n codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]\n lw = self.edge_widths[i]\n path = Path(verts, codes)\n patch = patches.PathPatch(\n path, lw=lw, edgecolor=color, zorder=1, **self.edgeprops\n )\n self.ax.add_patch(patch)", "public <T> void write(WeightedDirectedMultigraph<T> g, File f, \n Indexer<String> vertexLabels) \n throws IOException {\n\n PrintWriter pw = new PrintWriter(f);\n pw.println(\"*Vertices \" + g.order());\n if (vertexLabels != null) {\n IntIterator iter = g.vertices().iterator();\n while (iter.hasNext()) {\n int v = iter.nextInt();\n String label = vertexLabels.lookup(v);\n if (label != null)\n pw.printf(\"%d \\\"%s\\\"%n\", v, label);\n }\n }\n \n // We will flatten all the parallel edges together, which requires\n // iterative over the edges to see how many unique compacted edges there\n // are. We need to know the number for the pajek formatting\n pw.println(\"*Edges\");\n IntIterator iter = g.vertices().iterator();\n while (iter.hasNext()) {\n int v1 = iter.nextInt();\n IntIterator iter2 = g.getNeighbors(v1).iterator();\n while (iter2.hasNext()) {\n int v2 = iter2.nextInt();\n if (v1 < v2)\n continue;\n Set<? extends WeightedEdge> edges = g.getEdges(v1, v2);\n double fromWeight = 0;\n double toWeight = 0;\n for (WeightedEdge e : edges) {\n if (e.from() == v1)\n fromWeight += e.weight();\n else\n toWeight += e.weight();\n }\n if (fromWeight != 0)\n pw.printf(\"%d %d %f%n\", v1, v2, fromWeight);\n if (toWeight != 0)\n pw.printf(\"%d %d %f%n\", v2, v1, toWeight);\n }\n }\n \n pw.close();\n }", "def write(self, file_name, delim=',', sep='\\t'):\n \"\"\"Write a directed hypergraph to a file, where nodes are\n represented as strings.\n Each column is separated by \"sep\", and the individual\n tail nodes and head nodes are delimited by \"delim\".\n The header line is currently ignored, but columns should be of\n the format:\n tailnode1[delim]..tailnodeM[sep]headnode1[delim]..headnodeN[sep]weight\n\n As a concrete example, an arbitrary line with delim=',' and\n sep=' ' (4 spaces) may look like:\n ::\n\n x1,x2 x3,x4,x5 12\n\n which defines a hyperedge of weight 12 from a tail set containing\n nodes \"x1\" and \"x2\" to a head set containing nodes \"x3\", \"x4\", and \"x5\"\n\n \"\"\"\n out_file = open(file_name, 'w')\n\n # write first header line\n out_file.write(\"tail\" + sep + \"head\" + sep + \"weight\\n\")\n\n for hyperedge_id in self.get_hyperedge_id_set():\n line = \"\"\n # Write each tail node to the line, separated by delim\n for tail_node in self.get_hyperedge_tail(hyperedge_id):\n line += tail_node + delim\n # Remove last (extra) delim\n line = line[:-1]\n\n # Add sep between columns\n line += sep\n\n # Write each head node to the line, separated by delim\n for head_node in self.get_hyperedge_head(hyperedge_id):\n line += head_node + delim\n # Remove last (extra) delim\n line = line[:-1]\n\n # Write the weight to the line and end the line\n line += sep + str(self.get_hyperedge_weight(hyperedge_id)) + \"\\n\"\n\n out_file.write(line)\n\n out_file.close()" ]
[ 0.7161726355552673, 0.6993839740753174, 0.6887747049331665, 0.6847518086433411, 0.6826242208480835, 0.6810848712921143, 0.6732126474380493, 0.6722975969314575, 0.6669342517852783, 0.6657159924507141, 0.6601456999778748, 0.6593608260154724 ]
Add new nodes at the end of the list.
def add_nodes(self, coors, node_low_or_high=None): """ Add new nodes at the end of the list. """ last = self.lastnode if type(coors) is nm.ndarray: if len(coors.shape) == 1: coors = coors.reshape((1, coors.size)) nadd = coors.shape[0] idx = slice(last, last + nadd) else: nadd = 1 idx = self.lastnode right_dimension = coors.shape[1] self.nodes[idx, :right_dimension] = coors self.node_flag[idx] = True self.lastnode += nadd self.nnodes += nadd
[ "def append(self, *nodes: Union[AbstractNode, str]) -> None:\n \"\"\"Append new nodes after last child node.\"\"\"\n node = _to_node_list(nodes)\n self.appendChild(node)", "function(list){\n\t\t\tvar index = 0;\n\t\t\twhile(index < list.length) {\n\t\t\t\tvar node = list[index],\n\t\t\t\t\tchildNodeList = nodeMap[id(node)];\n\t\t\t\tif(childNodeList) {\n\t\t\t\t\tif(childNodeList !== list) {\n\t\t\t\t\t\tlist.splice( index, itemsInChildListTree(childNodeList), childNodeList );\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// Indicate the new nodes belong to this list.\n\t\t\t\t\tnodeMap[id(node)] = list;\n\t\t\t\t}\n\t\t\t\tindex++;\n\t\t\t}\n\t\t}", "private void addLastChildElements() {\n for (Node node : nodes) {\r\n if (DOMHelper.getNextSiblingElement(node) == null) {\r\n result.add(node);\r\n }\r\n }\r\n }", "private void addLastOfType() {\n for (Node node : nodes) {\r\n Node n = DOMHelper.getNextSiblingElement(node);\r\n while (n != null) {\r\n if (DOMHelper.getNodeName(n).equals(DOMHelper.getNodeName(node))) {\r\n break;\r\n }\r\n \r\n n = DOMHelper.getNextSiblingElement(n);\r\n }\r\n \r\n if (n == null) {\r\n result.add(node);\r\n }\r\n }\r\n }", "def append(self, value):\n \"\"\"Insert *value* at the end of the list of nodes.\n\n *value* can be anything parsable by :func:`.parse_anything`.\n \"\"\"\n nodes = parse_anything(value).nodes\n for node in nodes:\n self.nodes.append(node)", "private void addLastOfType() {\n\t\tfor (Node node : nodes) {\r\n\t\t\tIndex index = helper.getIndexInParent(node, true);\r\n\t\t\tif (index.index == (index.size-1))\r\n\t\t\t\tresult.add(node);\r\n\t\t}\r\n\t}", "def _expand(self, pos):\n \"\"\"Splits sublists that are more than double the load level.\n\n Updates the index when the sublist length is less than double the load\n level. This requires incrementing the nodes in a traversal from the\n leaf node to the root. For an example traversal see self._loc.\n\n \"\"\"\n _lists = self._lists\n _keys = self._keys\n _index = self._index\n\n if len(_keys[pos]) > self._dual:\n _maxes = self._maxes\n _load = self._load\n\n _lists_pos = _lists[pos]\n _keys_pos = _keys[pos]\n half = _lists_pos[_load:]\n half_keys = _keys_pos[_load:]\n del _lists_pos[_load:]\n del _keys_pos[_load:]\n _maxes[pos] = _keys_pos[-1]\n\n _lists.insert(pos + 1, half)\n _keys.insert(pos + 1, half_keys)\n _maxes.insert(pos + 1, half_keys[-1])\n\n del _index[:]\n else:\n if _index:\n child = self._offset + pos\n while child:\n _index[child] += 1\n child = (child - 1) >> 1\n _index[0] += 1", "private void fillNodesInContext(SiteNode rootNode, List<SiteNode> nodesList) {\r\n\t\t@SuppressWarnings(\"unchecked\")\r\n\t\tEnumeration<TreeNode> en = rootNode.children();\r\n\t\twhile (en.hasMoreElements()) {\r\n\t\t\tSiteNode sn = (SiteNode) en.nextElement();\r\n\t\t\tif (isInContext(sn)) {\r\n\t\t\t\tnodesList.add(sn);\r\n\t\t\t}\r\n\t\t\tfillNodesInContext(sn, nodesList);\r\n\t\t}\r\n\t}", "def _expand(self, pos):\n \"\"\"Splits sublists that are more than double the load level.\n\n Updates the index when the sublist length is less than double the load\n level. This requires incrementing the nodes in a traversal from the\n leaf node to the root. For an example traversal see self._loc.\n\n \"\"\"\n _lists = self._lists\n _index = self._index\n\n if len(_lists[pos]) > self._dual:\n _maxes = self._maxes\n _load = self._load\n\n _lists_pos = _lists[pos]\n half = _lists_pos[_load:]\n del _lists_pos[_load:]\n _maxes[pos] = _lists_pos[-1]\n\n _lists.insert(pos + 1, half)\n _maxes.insert(pos + 1, half[-1])\n\n del _index[:]\n else:\n if _index:\n child = self._offset + pos\n while child:\n _index[child] += 1\n child = (child - 1) >> 1\n _index[0] += 1", "private void addNthLastChild() {\n for (Node node : nodes) {\r\n int count = 1;\r\n Node n = DOMHelper.getNextSiblingElement(node);\r\n while (n != null) {\r\n count++;\r\n n = DOMHelper.getNextSiblingElement(n);\r\n }\r\n \r\n if (specifier.isMatch(count)) {\r\n result.add(node);\r\n }\r\n }\r\n }", "function fromListPush(toPush, nodes)\n{\n\tvar h = toPush.height;\n\n\t// Maybe the node on this height does not exist.\n\tif (nodes.length === h)\n\t{\n\t\tvar node = {\n\t\t\tctor: '_Array',\n\t\t\theight: h + 1,\n\t\t\ttable: [],\n\t\t\tlengths: []\n\t\t};\n\t\tnodes.push(node);\n\t}\n\n\tnodes[h].table.push(toPush);\n\tvar len = length(toPush);\n\tif (nodes[h].lengths.length > 0)\n\t{\n\t\tlen += nodes[h].lengths[nodes[h].lengths.length - 1];\n\t}\n\tnodes[h].lengths.push(len);\n\n\tif (nodes[h].table.length === M)\n\t{\n\t\tfromListPush(nodes[h], nodes);\n\t\tnodes[h] = {\n\t\t\tctor: '_Array',\n\t\t\theight: h + 1,\n\t\t\ttable: [],\n\t\t\tlengths: []\n\t\t};\n\t}\n}", "function endList()\n {\n $listItemArray = array();\n // Buils list item array\n foreach ( $this->DocumentStack[$this->CurrentStackNumber]['ChildArray'] as $listItem )\n {\n $listItemArray[] = array( 'Type' => 'listitem',\n 'Content' => $listItem );\n }\n\n $this->CurrentStackNumber -= 1;\n\n if ( $this->CurrentStackNumber == 0 )\n {\n $this->DocumentArray[] = array( 'Type' => 'list',\n 'ListType' => $this->DocumentStack[$this->CurrentStackNumber + 1]['ListType'],\n 'Content' => $listItemArray );\n }\n else\n {\n $elementArray = array( 'Type' => 'list',\n 'ListType' => $this->DocumentStack[$this->CurrentStackNumber + 1]['ListType'],\n 'Content' => $listItemArray );\n\n $this->addElement( $elementArray );\n }\n }" ]
[ 0.7512038350105286, 0.7270193696022034, 0.7220104932785034, 0.7022676467895508, 0.7018536329269409, 0.7017405033111572, 0.7012209892272949, 0.7008579969406128, 0.6987891793251038, 0.6986714005470276, 0.6984930634498596, 0.6933650374412537 ]
Add new edges at the end of the list. :param edge_direction: direction flag :param edge_group: describes group of edges from same low super node and same direction :param edge_low_or_high: zero for low to low resolution, one for high to high or high to low resolution. It is used to set weight from weight table.
def add_edges(self, conn, edge_direction, edge_group=None, edge_low_or_high=None): """ Add new edges at the end of the list. :param edge_direction: direction flag :param edge_group: describes group of edges from same low super node and same direction :param edge_low_or_high: zero for low to low resolution, one for high to high or high to low resolution. It is used to set weight from weight table. """ last = self.lastedge if type(conn) is nm.ndarray: nadd = conn.shape[0] idx = slice(last, last + nadd) if edge_group is None: edge_group = nm.arange(nadd) + last else: nadd = 1 idx = nm.array([last]) conn = nm.array(conn).reshape((1, 2)) if edge_group is None: edge_group = idx self.edges[idx, :] = conn self.edge_flag[idx] = True # t_start0 = time.time() # self.edge_flag_idx.extend(list(range(idx.start, idx.stop))) # self.stats["t split 082"] += time.time() - t_start0 self.edge_dir[idx] = edge_direction self.edge_group[idx] = edge_group # TODO change this just to array of low_or_high_resolution if edge_low_or_high is not None and self._edge_weight_table is not None: self.edges_weights[idx] = self._edge_weight_table[ edge_low_or_high, edge_direction ] self.lastedge += nadd self.nedges += nadd
[ "def add_edge(self, head_id, tail_id, edge_data=1, create_nodes=True):\n \"\"\"\n Adds a directed edge going from head_id to tail_id.\n Arbitrary data can be attached to the edge via edge_data.\n It may create the nodes if adding edges between nonexisting ones.\n\n :param head_id: head node\n :param tail_id: tail node\n :param edge_data: (optional) data attached to the edge\n :param create_nodes: (optional) creates the head_id or tail_id node in case they did not exist\n \"\"\"\n # shorcut\n edge = self.next_edge\n\n # add nodes if on automatic node creation\n if create_nodes:\n self.add_node(head_id)\n self.add_node(tail_id)\n\n # update the corresponding incoming and outgoing lists in the nodes\n # index 0 -> incoming edges\n # index 1 -> outgoing edges\n\n try:\n self.nodes[tail_id][0].append(edge)\n self.nodes[head_id][1].append(edge)\n except KeyError:\n raise GraphError('Invalid nodes %s -> %s' % (head_id, tail_id))\n\n # store edge information\n self.edges[edge] = (head_id, tail_id, edge_data)\n\n\n self.next_edge += 1", "def _edge_group_substitution(\n self, ndid, nsplit, idxs, sr_tab, ndoffset, ed_remove, into_or_from\n ):\n \"\"\"\n Reconnect edges.\n :param ndid: id of low resolution edges\n :param nsplit: number of split\n :param idxs: indexes of low resolution\n :param sr_tab:\n :param ndoffset:\n :param ed_remove:\n :param into_or_from: if zero, connection of input edges is done. If one, connection of output edges\n is performed.\n :return:\n \"\"\"\n # this is useful for type(idxs) == np.ndarray\n eidxs = idxs[nm.where(self.edges[idxs, 1 - into_or_from] == ndid)[0]]\n # selected_edges = self.edges[idxs, 1 - into_or_from]\n # selected_edges == ndid\n # whre = nm.where(self.edges[idxs, 1 - into_or_from] == ndid)\n # whre0 = (nm.where(self.edges[idxs, 1 - into_or_from] == ndid) == ndid)[0]\n # eidxs = [idxs[i] for i in idxs]\n for igrp in self.edges_by_group(eidxs):\n if igrp.shape[0] > 1:\n # high resolution block to high resolution block\n # all directions are the same\n directions = self.edge_dir[igrp[0]]\n edge_indexes = sr_tab[directions, :].T.flatten() + ndoffset\n # debug code\n # if len(igrp) != len(edge_indexes):\n # print(\"Problem \")\n self.edges[igrp, 1] = edge_indexes\n if self._edge_weight_table is not None:\n self.edges_weights[igrp] = self._edge_weight_table[1, directions]\n else:\n # low res block to hi res block, if into_or_from is set to 0\n # hig res block to low res block, if into_or_from is set to 1\n ed_remove.append(igrp[0])\n # number of new edges is equal to number of pixels on one side of the box (in 2D and D too)\n nnewed = np.power(nsplit, self.data.ndim - 1)\n muleidxs = nm.tile(igrp, nnewed)\n # copy the low-res edge multipletime\n newed = self.edges[muleidxs, :]\n neweddir = self.edge_dir[muleidxs]\n local_node_ids = sr_tab[\n self.edge_dir[igrp] + self.data.ndim * into_or_from, :\n ].T.flatten()\n # first or second (the actual) node id is substitued by new node indexes\n newed[:, 1 - into_or_from] = local_node_ids + ndoffset\n if self._edge_weight_table is not None:\n self.add_edges(\n newed, neweddir, self.edge_group[igrp], edge_low_or_high=1\n )\n else:\n self.add_edges(\n newed, neweddir, self.edge_group[igrp], edge_low_or_high=None\n )\n return ed_remove", "def _augment_network(self, edge_dict):\n \"\"\"Given a dictionary of edges (edge id -> feature list), add all of\n these to the CoNetwork object\n \"\"\"\n for (vertex0, vertex1), feature_list in edge_dict.items():\n edge_obj = Edge(vertex0, vertex1, feature_list)\n self.edges[(vertex0, vertex1)] = edge_obj\n self._add_edge_to_vertex(vertex0, edge_obj)\n self._add_edge_to_vertex(vertex1, edge_obj)", "def add_edge(self, u, v, attr_dict=None, **attr):\n \"\"\"Version of add_edge that only writes to the database once\"\"\"\n if attr_dict is None:\n attr_dict = attr\n else:\n try:\n attr_dict.update(attr)\n except AttributeError:\n raise NetworkXError(\n \"The attr_dict argument must be a dictionary.\"\n )\n if u not in self.node:\n self.node[u] = {}\n if v not in self.node:\n self.node[v] = {}\n if u in self.adj:\n datadict = self.adj[u].get(v, {})\n else:\n self.adj[u] = {v: {}}\n datadict = self.adj[u][v]\n datadict.update(attr_dict)\n self.succ[u][v] = datadict\n assert u in self.succ, \"Failed to add edge {u}->{v} ({u} not in successors)\".format(u=u, v=v)\n assert v in self.succ[u], \"Failed to add edge {u}->{v} ({v} not in succ[{u}])\".format(u=u, v=v)", "def add_edges(self, edge_list, dataframe=True):\n \"\"\"\n Add a all edges in edge_list.\n :return: A data structure with Cytoscape SUIDs for the newly-created edges.\n :param edge_list: List of (source, target, interaction) tuples *or*\n list of dicts with 'source', 'target', 'interaction', 'direction' keys.\n :param dataframe: If dataframe is True (default), return a Pandas DataFrame.\n If dataframe is False, return a list of dicts with keys 'SUID', 'source' and 'target'.\n \"\"\"\n # It might be nice to have an option pass a list of dicts instead of list of tuples\n if not isinstance(edge_list[0], dict):\n edge_list = [{'source': edge_tuple[0],\n 'target': edge_tuple[1],\n 'interaction': edge_tuple[2]}\n for edge_tuple in edge_list]\n res = self.session.post(self.__url + 'edges', data=json.dumps(edge_list), headers=HEADERS)\n check_response(res)\n edges = res.json()\n if dataframe:\n return pd.DataFrame(edges).set_index(['SUID'])\n else:\n return edges", "def add_edge(self, edgelist):\n \"\"\"\n Adds an edge from network.\n\n Parameters\n ----------\n\n edgelist : list\n a list (or list of lists) containing the i,j and t indicies to be added. For weighted networks list should also contain a 'weight' key.\n\n Returns\n --------\n Updates TenetoBIDS.network dataframe with new edge\n \"\"\"\n if not isinstance(edgelist[0], list):\n edgelist = [edgelist]\n teneto.utils.check_TemporalNetwork_input(edgelist, 'edgelist')\n if len(edgelist[0]) == 4:\n colnames = ['i', 'j', 't', 'weight']\n elif len(edgelist[0]) == 3:\n colnames = ['i', 'j', 't']\n if self.hdf5:\n with pd.HDFStore(self.network) as hdf:\n rows = hdf.get_storer('network').nrows\n hdf.append('network', pd.DataFrame(edgelist, columns=colnames, index=np.arange(\n rows, rows+len(edgelist))), format='table', data_columns=True)\n edgelist = np.array(edgelist)\n if np.max(edgelist[:, :2]) > self.netshape[0]:\n self.netshape[0] = np.max(edgelist[:, :2])\n if np.max(edgelist[:, 2]) > self.netshape[1]:\n self.netshape[1] = np.max(edgelist[:, 2])\n else:\n newedges = pd.DataFrame(edgelist, columns=colnames)\n self.network = pd.concat(\n [self.network, newedges], ignore_index=True, sort=True)\n self._update_network()", "def _add_edge_to_vertex(self, vertex_id, edge):\n \"\"\"Adds the edge to the Vertex object's `edges` dictionary\n \"\"\"\n connected_to = edge.connected_to(vertex_id)\n if vertex_id not in self.vertices:\n vertex_obj = Vertex(vertex_id)\n self.vertices[vertex_id] = vertex_obj\n self.vertices[vertex_id].edges[connected_to] = edge.weight", "def add_edge_grades(G, add_absolute=True): # pragma: no cover\n \"\"\"\n Get the directed grade (ie, rise over run) for each edge in the network and\n add it to the edge as an attribute. Nodes must have elevation attributes to\n use this function.\n\n Parameters\n ----------\n G : networkx multidigraph\n add_absolute : bool\n if True, also add the absolute value of the grade as an edge attribute\n\n Returns\n -------\n G : networkx multidigraph\n \"\"\"\n\n # for each edge, calculate the difference in elevation from origin to\n # destination, then divide by edge length\n for u, v, data in G.edges(keys=False, data=True):\n elevation_change = G.nodes[v]['elevation'] - G.nodes[u]['elevation']\n \n # round to ten-thousandths decimal place\n grade = round(elevation_change / data['length'], 4)\n data['grade'] = grade\n if add_absolute:\n data['grade_abs'] = abs(grade)\n\n log('Added grade data to all edges.')\n return G", "def add_edge(self,\r\n source: Node,\r\n target: Node,\r\n weight: float = 1,\r\n save_to_cache: bool = True) -> None:\r\n \"\"\"\r\n Adds an edge to the edge list that will connect the specified nodes.\r\n\r\n Arguments:\r\n source (Node): The source node of the edge.\r\n target (Node): The target node of the edge.\r\n weight (float): The weight of the created edge.\r\n save_to_cache (bool): Whether the edge should be saved to the local database.\r\n \"\"\"\r\n if not isinstance(source, Node):\r\n raise TypeError(\"Invalid source: expected Node instance, got {}.\".format(source))\r\n if not isinstance(target, Node):\r\n raise TypeError(\"Invalid target: expected Node instance, got {}.\".format(target))\r\n\r\n if source.index == target.index or\\\r\n self.get_edge_by_index(source.index, target.index) is not None:\r\n return\r\n\r\n self._edges[(source.index, target.index)] = Edge(source, target, weight)\r\n\r\n if save_to_cache:\r\n should_commit: bool = False\r\n database: GraphDatabaseInterface = self._graph.database\r\n db_edge: DBEdge = database.Edge.find_by_name(source.name, target.name)\r\n if db_edge is None:\r\n database.session.add(database.Edge(source.name, target.name, weight))\r\n should_commit = True\r\n elif db_edge.weight != weight:\r\n db_edge.weight = weight\r\n should_commit = True\r\n\r\n if should_commit:\r\n database.session.commit()", "def add_edge(self, source: Node,\r\n target: Node,\r\n weight: float = 1,\r\n save_to_cache: bool = True) -> None:\r\n \"\"\"\r\n Adds an edge between the specified nodes of the graph.\r\n\r\n Arguments:\r\n source (Node): The source node of the edge to add.\r\n target (Node): The target node of the edge to add.\r\n weight (float): The weight of the edge.\r\n save_to_cache (bool): Whether the edge should be saved to the local database. This\r\n argument is necessary (and `False`) when we load edges from\r\n the local cache.\r\n \"\"\"\r\n if self._edges.get_edge(source, target) is not None:\r\n return\r\n\r\n self._edges.add_edge(\r\n source=source,\r\n target=target,\r\n weight=weight,\r\n save_to_cache=save_to_cache\r\n )", "def add_edges(self, from_idx, to_idx, weight=1, symmetric=False, copy=False):\n '''Adds all from->to edges. weight may be a scalar or 1d array.\n If symmetric=True, also adds to->from edges with the same weights.'''\n raise NotImplementedError()", "def _group_edges(self):\n \"\"\"Group all edges that are topologically identical.\n\n This means that (i, source, target, polarity) are the same, then sets\n edges on parent (i.e. - group) nodes to 'Virtual' and creates a new\n edge to represent all of them.\n \"\"\"\n # edit edges on parent nodes and make new edges for them\n edges_to_add = [[], []] # [group_edges, uuid_lists]\n for e in self._edges:\n new_edge = deepcopy(e)\n new_edge['data'].pop('id', None)\n uuid_list = new_edge['data'].pop('uuid_list', [])\n # Check if edge source or target are contained in a parent\n # If source or target in parent edit edge\n # Nodes may only point within their container\n source = e['data']['source']\n target = e['data']['target']\n source_node = [x for x in self._nodes if\n x['data']['id'] == source][0]\n target_node = [x for x in self._nodes if\n x['data']['id'] == target][0]\n # If the source node is in a group, we change the source of this\n # edge to the group\n if source_node['data']['parent'] != '':\n new_edge['data']['source'] = source_node['data']['parent']\n e['data']['i'] = 'Virtual'\n # If the targete node is in a group, we change the target of this\n # edge to the group\n if target_node['data']['parent'] != '':\n new_edge['data']['target'] = target_node['data']['parent']\n e['data']['i'] = 'Virtual'\n if e['data']['i'] == 'Virtual':\n if new_edge not in edges_to_add[0]:\n edges_to_add[0].append(new_edge)\n edges_to_add[1].append(uuid_list)\n else:\n idx = edges_to_add[0].index(new_edge)\n edges_to_add[1][idx] += uuid_list\n edges_to_add[1][idx] = list(set(edges_to_add[1][idx]))\n for ze in zip(*edges_to_add):\n edge = ze[0]\n edge['data']['id'] = self._get_new_id()\n edge['data']['uuid_list'] = ze[1]\n self._edges.append(edge)" ]
[ 0.669408917427063, 0.6667589545249939, 0.6647486686706543, 0.658531665802002, 0.656326949596405, 0.6461688280105591, 0.6458262801170349, 0.6382313370704651, 0.6357985138893127, 0.6345136165618896, 0.633725643157959, 0.6332263350486755 ]
Reconnect edges. :param ndid: id of low resolution edges :param nsplit: number of split :param idxs: indexes of low resolution :param sr_tab: :param ndoffset: :param ed_remove: :param into_or_from: if zero, connection of input edges is done. If one, connection of output edges is performed. :return:
def _edge_group_substitution( self, ndid, nsplit, idxs, sr_tab, ndoffset, ed_remove, into_or_from ): """ Reconnect edges. :param ndid: id of low resolution edges :param nsplit: number of split :param idxs: indexes of low resolution :param sr_tab: :param ndoffset: :param ed_remove: :param into_or_from: if zero, connection of input edges is done. If one, connection of output edges is performed. :return: """ # this is useful for type(idxs) == np.ndarray eidxs = idxs[nm.where(self.edges[idxs, 1 - into_or_from] == ndid)[0]] # selected_edges = self.edges[idxs, 1 - into_or_from] # selected_edges == ndid # whre = nm.where(self.edges[idxs, 1 - into_or_from] == ndid) # whre0 = (nm.where(self.edges[idxs, 1 - into_or_from] == ndid) == ndid)[0] # eidxs = [idxs[i] for i in idxs] for igrp in self.edges_by_group(eidxs): if igrp.shape[0] > 1: # high resolution block to high resolution block # all directions are the same directions = self.edge_dir[igrp[0]] edge_indexes = sr_tab[directions, :].T.flatten() + ndoffset # debug code # if len(igrp) != len(edge_indexes): # print("Problem ") self.edges[igrp, 1] = edge_indexes if self._edge_weight_table is not None: self.edges_weights[igrp] = self._edge_weight_table[1, directions] else: # low res block to hi res block, if into_or_from is set to 0 # hig res block to low res block, if into_or_from is set to 1 ed_remove.append(igrp[0]) # number of new edges is equal to number of pixels on one side of the box (in 2D and D too) nnewed = np.power(nsplit, self.data.ndim - 1) muleidxs = nm.tile(igrp, nnewed) # copy the low-res edge multipletime newed = self.edges[muleidxs, :] neweddir = self.edge_dir[muleidxs] local_node_ids = sr_tab[ self.edge_dir[igrp] + self.data.ndim * into_or_from, : ].T.flatten() # first or second (the actual) node id is substitued by new node indexes newed[:, 1 - into_or_from] = local_node_ids + ndoffset if self._edge_weight_table is not None: self.add_edges( newed, neweddir, self.edge_group[igrp], edge_low_or_high=1 ) else: self.add_edges( newed, neweddir, self.edge_group[igrp], edge_low_or_high=None ) return ed_remove
[ "def reconnectPorts(root: LNode, srcPort: LPort,\n oldSplits: List[Tuple[LNode, LEdge]],\n newSplitNode: LNode):\n \"\"\"\n :ivar root: top LNode instance in which are nodes and links stored\n :ivar srcPort: for SLICE it is port which is connected to input of SLICE node\n for CONCAT it is port which is connected to output of CONCAT\n :ivar oldSplits: list of tuples (node, edge) which should be disconnected from graph\n :ivar newSplitNode: new node which should be connected to graph\n \"\"\"\n # sort oldSplit nodes because they are not in same order as signals on\n # ports\n mainPortSignals = list(walkSignalPorts(srcPort))\n portOrder = {p: i for i, p in enumerate(mainPortSignals)}\n isOneToN = len(newSplitNode.west) == 1\n\n def portSortKey(x):\n n, e = x\n if e.dstNode is n:\n return portOrder[e.src]\n elif e.srcNode is n:\n return portOrder[e.dst]\n else:\n raise ValueError(\"Edge not connected to split node\", e, n)\n\n oldSplits.sort(key=portSortKey)\n newSplitPorts = [walkSignalPorts(p) for p in\n (newSplitNode.east if isOneToN else newSplitNode.west)]\n\n if isOneToN:\n newMainPort = newSplitNode.west[0]\n else:\n newMainPort = newSplitNode.east[0]\n\n for mainPort, splitInp, (oldSplitNode, e) in zip(\n mainPortSignals,\n walkSignalPorts(newMainPort),\n oldSplits):\n assert mainPort.direction != splitInp.direction, (\n mainPort, splitInp)\n\n # reconnect edge from src port to split node\n assert (e.src is mainPort and e.dstNode is oldSplitNode)\\\n or (e.dst is mainPort and e.srcNode is oldSplitNode), e\n e.remove()\n\n _newSplitPorts = [next(p) for p in newSplitPorts]\n # reconnect part from split node to other target nodes\n if oldSplitNode.name == \"CONCAT\":\n root.addEdge(splitInp, mainPort,\n originObj=e.originObj)\n\n for oldP, newP in zip(oldSplitNode.west, _newSplitPorts):\n for e in list(oldP.incomingEdges):\n root.addEdge(e.src, newP, originObj=e.originObj)\n e.remove()\n\n elif oldSplitNode.name == \"SLICE\":\n root.addEdge(mainPort, splitInp,\n originObj=e.originObj)\n\n for oldP, newP in zip(oldSplitNode.east, reversed(_newSplitPorts)):\n for e in list(oldP.outgoingEdges):\n root.addEdge(newP, e.dst, originObj=e.originObj)\n e.remove()\n else:\n raise ValueError(oldSplitNode)\n\n root.children.remove(oldSplitNode)", "def reconnect(edges, source, target)\n raise ArgumentError, \"Vertices expected as source and target\"\\\n unless (source.nil? or Vertex===source) and (target.nil? or Vertex===target)\n to_edges(edges).each do |edge|\n if source\n edge.source.remove_out_edge(edge)\n source.add_out_edge(edge)\n end\n if target\n edge.target.remove_in_edge(edge)\n target.add_in_edge(edge)\n end\n edge.reconnect(source, target)\n edge\n end\n end", "def remove_edges(self, from_idx, to_idx, symmetric=False, copy=False):\n '''Removes all from->to and to->from edges.\n Note: the symmetric kwarg is unused.'''\n flat_inds = self._pairs.dot((self._num_vertices, 1))\n # convert to sorted order and flatten\n to_remove = (np.minimum(from_idx, to_idx) * self._num_vertices\n + np.maximum(from_idx, to_idx))\n mask = np.in1d(flat_inds, to_remove, invert=True)\n res = self.copy() if copy else self\n res._pairs = res._pairs[mask]\n res._offdiag_mask = res._offdiag_mask[mask]\n return res", "def make_inverse_connectivity(conns, n_nod, ret_offsets=True):\n \"\"\"\n For each mesh node referenced in the connectivity conns, make a list of\n elements it belongs to.\n \"\"\"\n from itertools import chain\n\n iconn = [[] for ii in xrange( n_nod )]\n n_els = [0] * n_nod\n for ig, conn in enumerate( conns ):\n for iel, row in enumerate( conn ):\n for node in row:\n iconn[node].extend([ig, iel])\n n_els[node] += 1\n\n n_els = nm.array(n_els, dtype=nm.int32)\n iconn = nm.fromiter(chain(*iconn), nm.int32)\n\n if ret_offsets:\n offsets = nm.cumsum(nm.r_[0, n_els], dtype=nm.int32)\n return offsets, iconn\n\n else:\n return n_els, iconn", "def reconnect(force=False, **kwargs): # pylint: disable=unused-argument\n '''\n Reconnect the NAPALM proxy when the connection\n is dropped by the network device.\n The connection can be forced to be restarted\n using the ``force`` argument.\n\n .. note::\n\n This function can be used only when running proxy minions.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' napalm.reconnect\n salt '*' napalm.reconnect force=True\n '''\n default_ret = {\n 'out': None,\n 'result': True,\n 'comment': 'Already alive.'\n }\n if not salt.utils.napalm.is_proxy(__opts__):\n # regular minion is always alive\n # otherwise, the user would not be able to execute this command\n return default_ret\n is_alive = alive()\n log.debug('Is alive fetch:')\n log.debug(is_alive)\n if not is_alive.get('result', False) or\\\n not is_alive.get('out', False) or\\\n not is_alive.get('out', {}).get('is_alive', False) or\\\n force: # even if alive, but the user wants to force a restart\n proxyid = __opts__.get('proxyid') or __opts__.get('id')\n # close the connection\n log.info('Closing the NAPALM proxy connection with %s', proxyid)\n salt.utils.napalm.call(\n napalm_device, # pylint: disable=undefined-variable\n 'close',\n **{}\n )\n # and re-open\n log.info('Re-opening the NAPALM proxy connection with %s', proxyid)\n salt.utils.napalm.call(\n napalm_device, # pylint: disable=undefined-variable\n 'open',\n **{}\n )\n default_ret.update({\n 'comment': 'Connection restarted!'\n })\n return default_ret\n # otherwise, I have nothing to do here:\n return default_ret", "def localize(self, inod):\n \"\"\"\n Strips nodes not in inod and remaps connectivities.\n Omits elements where remap[conn] contains -1...\n \"\"\"\n remap = nm.empty((self.n_nod,), dtype=nm.int32)\n remap.fill(-1)\n remap[inod] = nm.arange(inod.shape[0], dtype=nm.int32)\n\n self.coors = self.coors[inod]\n self.ngroups = self.ngroups[inod]\n conns = []\n mat_ids = []\n for ig, conn in enumerate(self.conns):\n if conn.shape[0] == 0:\n continue\n\n aux = remap[conn]\n ii = nm.unique(nm.where(aux == -1)[0])\n ii = nm.setdiff1d(nm.arange(conn.shape[0], dtype=nm.int32), ii)\n conns.append(aux[ii])\n mat_ids.append(self.mat_ids[ig][ii])\n self.conns = conns\n self.mat_ids = mat_ids\n\n self._set_shape_info()", "def remove_edges_from(self, ebunch):\n \"\"\"Version of remove_edges_from that's much like normal networkx but only\n deletes once, since the database doesn't keep separate adj and\n succ mappings\n\n \"\"\"\n for e in ebunch:\n (u, v) = e[:2]\n if u in self.succ and v in self.succ[u]:\n del self.succ[u][v]", "def createConns(self):\n # SONATA method - works but same results as NeuroMLlite\n '''\n from sonata.io import File, Edge\n data = File(data_files=[self.subs('$NETWORK_DIR/excvirt_cortex_edges.h5')],\n data_type_files=[self.subs('$NETWORK_DIR/excvirt_cortex_edge_types.csv')])\n '''\n\n # NeuroMLlite Method\n self.edges_info = {}\n self.conn_info = {}\n\n synMechSubs = {'level_of_detail': 'mod', \n 'erev': 'e'}\n \n if 'edges' in self.network_config['networks']:\n for e in self.network_config['networks']['edges']:\n edges_file = self.subs(e['edges_file'])\n edge_types_file = self.subs(e['edge_types_file'])\n\n print(\"\\nLoading edges from %s and %s\"%(edges_file,edge_types_file))\n\n h5file=tables.open_file(edges_file,mode='r')\n\n print(\"Opened HDF5 file: %s\"%(h5file.filename))\n self.parse_group(h5file.root.edges)\n h5file.close()\n self.edges_info[self.current_edge] = load_csv_props(edge_types_file)\n self.current_edge = None\n\n for conn in self.conn_info:\n \n pre_node = self.conn_info[conn]['pre_node']\n post_node = self.conn_info[conn]['post_node']\n \n print(' Adding projection %s: %s -> %s '%(conn, pre_node, post_node))\n\n # add all synMechs in this projection to netParams.synMechParams\n for type in self.edges_info[conn]:\n syn_label = self.edges_info[conn][type]['dynamics_params'].split('.')[0]\n if syn_label not in sim.net.params.synMechParams:\n dynamics_params_file = self.subs(self.network_config['components']['synaptic_models_dir']) +'/'+self.edges_info[conn][type]['dynamics_params'] \n syn_dyn_params = load_json(dynamics_params_file)\n synMechParams = dict(syn_dyn_params)\n for k in synMechParams: # replace keys\n if k in synMechSubs:\n synMechParams[synMechSubs[k]] = synMechParams.pop(k) \n synMechParams['mod'] = self.edges_info[conn][type]['model_template']\n sim.net.params.synMechParams[syn_label] = synMechParams\n print(' Added synMech %s '%(syn_label))\n\n # add individual connections in this projection\n for i in range(len(self.conn_info[conn]['pre_id'])):\n pre_id = self.conn_info[conn]['pre_id'][i]\n post_id = self.conn_info[conn]['post_id'][i]\n pre_gid = self.cell_info[pre_node]['gid_from_id'][pre_id] \n post_gid = self.cell_info[post_node]['gid_from_id'][post_id]\n\n\n if post_gid in sim.net.gid2lid:\n\n type = self.conn_info[conn]['edge_type_id'][i]\n\n print(' Conn: type %s pop %s (id %s) -> pop %s (id %s) MAPPED TO: cell gid %s -> cell gid %s'%(type,pre_node,pre_id,post_node,post_id, pre_gid,post_gid))\n #print(self.edges_info[conn][type])\n \n connParams = {}\n postCell = sim.net.cells[sim.net.gid2lid[post_gid]]\n\n # preGid\n connParams['preGid'] = pre_gid\n\n # synMech\n connParams['synMech'] = self.edges_info[conn][type]['dynamics_params'].split('.')[0] \n \n # weight\n sign = syn_dyn_params['sign'] if 'sign' in syn_dyn_params else 1\n try:\n weight = self.conn_info[conn]['syn_weight'][i] \n except:\n weight = self.edges_info[conn][type]['syn_weight'] if 'syn_weight' in self.edges_info[conn][type] else 1.0\n connParams['weight'] = sign*weight\n \n # delay\n connParams['delay'] = self.edges_info[conn][type]['delay'] if 'delay' in self.edges_info[conn][type] else 0\n \n # sec \n sec_id = self.conn_info[conn]['sec_id'][i] \n connParams['sec'] = postCell.secLists['SONATA_sec_id'][sec_id]\n\n # loc\n connParams['loc'] = self.conn_info[conn]['sec_x'][i] \n\n # add connection\n postCell.addConn(connParams)", "function _tryRepairConnectionSide(moved, other, newDocking, points) {\n\n function needsRelayout(moved, other, points) {\n\n if (points.length < 3) {\n return true;\n }\n\n if (points.length > 4) {\n return false;\n }\n\n // relayout if two points overlap\n // this is most likely due to\n return !!find(points, function(p, idx) {\n var q = points[idx - 1];\n\n return q && pointDistance(p, q) < 3;\n });\n }\n\n function repairBendpoint(candidate, oldPeer, newPeer) {\n\n var alignment = pointsAligned(oldPeer, candidate);\n\n switch (alignment) {\n case 'v':\n // repair vertical alignment\n return { x: candidate.x, y: newPeer.y };\n case 'h':\n // repair horizontal alignment\n return { x: newPeer.x, y: candidate.y };\n }\n\n return { x: candidate.x, y: candidate. y };\n }\n\n function removeOverlapping(points, a, b) {\n var i;\n\n for (i = points.length - 2; i !== 0; i--) {\n\n // intersects (?) break, remove all bendpoints up to this one and relayout\n if (pointInRect(points[i], a, INTERSECTION_THRESHOLD) ||\n pointInRect(points[i], b, INTERSECTION_THRESHOLD)) {\n\n // return sliced old connection\n return points.slice(i);\n }\n }\n\n return points;\n }\n\n\n // (0) only repair what has layoutable bendpoints\n\n // (1) if only one bendpoint and on shape moved onto other shapes axis\n // (horizontally / vertically), relayout\n\n if (needsRelayout(moved, other, points)) {\n return null;\n }\n\n var oldDocking = points[0],\n newPoints = points.slice(),\n slicedPoints;\n\n // (2) repair only last line segment and only if it was layouted before\n\n newPoints[0] = newDocking;\n newPoints[1] = repairBendpoint(newPoints[1], oldDocking, newDocking);\n\n\n // (3) if shape intersects with any bendpoint after repair,\n // remove all segments up to this bendpoint and repair from there\n\n slicedPoints = removeOverlapping(newPoints, moved, other);\n\n if (slicedPoints !== newPoints) {\n return _tryRepairConnectionSide(moved, other, newDocking, slicedPoints);\n }\n\n return newPoints;\n}", "void disconnectInvalidVertices() {\n\t\t// add elements with 1 or 2 edges\n\t\topenVertexes.clear();\n\t\tfor (int idxVert = 0; idxVert < vertexes.size; idxVert++) {\n\t\t\tVertex n = vertexes.get(idxVert);\n\t\t\tif( n.connections.size() == 1 || n.connections.size()==2) {\n\t\t\t\topenVertexes.add(n);\n\t\t\t}\n\t\t}\n\n\t\t// continue until there are no changes\n\t\twhile( !openVertexes.isEmpty() ) {\n\t\t\tdirtyVertexes.clear();\n\t\t\tfor (int idxVert = 0; idxVert < openVertexes.size(); idxVert++) {\n\t\t\t\tboolean remove = false;\n\t\t\t\tVertex n = openVertexes.get(idxVert);\n\t\t\t\tif( n.connections.size() == 1 ) {\n\t\t\t\t\tremove = true;\n\t\t\t\t} else if( n.connections.size() == 2 ) {\n\t\t\t\t\tEdge ea = n.connections.get(0);\n\t\t\t\t\tEdge eb = n.connections.get(1);\n\n\t\t\t\t\t// Look for a common vertex that isn't 'n'\n\t\t\t\t\tremove = true;\n\t\t\t\t\tfor (int i = 0; i < ea.dst.connections.size(); i++) {\n\t\t\t\t\t\tVertex va = ea.dst.connections.get(i).dst;\n\t\t\t\t\t\tif( va == n )\n\t\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\tfor (int j = 0; j < eb.dst.connections.size(); j++) {\n\t\t\t\t\t\t\tVertex vb = ea.dst.connections.get(j).dst;\n\t\t\t\t\t\t\tif( va == vb ) {\n\t\t\t\t\t\t\t\tremove = false;\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif( remove ) {\n\t\t\t\t\t// only go through the subset referenced the disconnected. Yes there could be duplicates\n\t\t\t\t\t// not worth the time to fix that\n\t\t\t\t\tfor (int i = 0; i < n.connections.size(); i++) {\n\t\t\t\t\t\tdirtyVertexes.add( n.connections.get(i).dst );\n\t\t\t\t\t}\n\t\t\t\t\tremoveReferences(n,EdgeType.CONNECTION);\n\t\t\t\t}\n\t\t\t}\n\t\t\topenVertexes.clear();\n\t\t\topenVertexes.addAll(dirtyVertexes);\n\t\t}\n\n\t}", "def _reconnect_internal(self):\n \"\"\"\n Tries to connect to each host in the query plan until one succeeds\n or every attempt fails. If successful, a new Connection will be\n returned. Otherwise, :exc:`NoHostAvailable` will be raised\n with an \"errors\" arg that is a dict mapping host addresses\n to the exception that was raised when an attempt was made to open\n a connection to that host.\n \"\"\"\n errors = {}\n lbp = (\n self._cluster.load_balancing_policy\n if self._cluster._config_mode == _ConfigMode.LEGACY else\n self._cluster._default_load_balancing_policy\n )\n\n for host in lbp.make_query_plan():\n try:\n return self._try_connect(host)\n except ConnectionException as exc:\n errors[str(host.endpoint)] = exc\n log.warning(\"[control connection] Error connecting to %s:\", host, exc_info=True)\n self._cluster.signal_connection_failure(host, exc, is_host_addition=False)\n except Exception as exc:\n errors[str(host.endpoint)] = exc\n log.warning(\"[control connection] Error connecting to %s:\", host, exc_info=True)\n if self._is_shutdown:\n raise DriverException(\"[control connection] Reconnection in progress during shutdown\")\n\n raise NoHostAvailable(\"Unable to connect to any servers\", errors)", "def end_connect(self, shape, cxn_pt_idx):\n \"\"\"\n **EXPERIMENTAL** - *The current implementation only works properly\n with rectangular shapes, such as pictures and rectangles. Use with\n other shape types may cause unexpected visual alignment of the\n connected end-point and could lead to a load error if cxn_pt_idx\n exceeds the connection point count available on the connected shape.\n That said, a quick test should reveal what to expect when using this\n method with other shape types.*\n\n Connect the ending of this connector to *shape* at the connection\n point specified by *cxn_pt_idx*.\n \"\"\"\n self._connect_end_to(shape, cxn_pt_idx)\n self._move_end_to_cxn(shape, cxn_pt_idx)" ]
[ 0.7014980912208557, 0.6444256901741028, 0.6356461644172668, 0.6267616152763367, 0.6257805824279785, 0.6197803616523743, 0.6172372698783875, 0.6164812445640564, 0.6139699816703796, 0.6091693043708801, 0.6076071858406067, 0.6074635982513428 ]
Run first step of algorithm. Next step is split_voxels :param vtk_filename: :return:
def generate_base_grid(self, vtk_filename=None): """ Run first step of algorithm. Next step is split_voxels :param vtk_filename: :return: """ nd, ed, ed_dir = self.gen_grid_fcn(self.data.shape, self.voxelsize) self.add_nodes(nd) self.add_edges(ed, ed_dir, edge_low_or_high=0) if vtk_filename is not None: self.write_vtk(vtk_filename)
[ "def split_voxels(self, vtk_filename=None):\n \"\"\"\n Second step of algorithm\n :return:()\n \"\"\"\n self.cache = {}\n self.stats[\"t graph 10\"] = time.time() - self.start_time\n self.msi = MultiscaleArray(self.data.shape, block_size=self.nsplit)\n\n # old implementation\n # idxs = nm.where(self.data)\n # nr, nc = self.data.shape\n # for k, (ir, ic) in enumerate(zip(*idxs)):\n # ndid = ic + ir * nc\n # self.split_voxel(ndid, self.nsplit)\n\n # new_implementation\n # for ndid in np.flatnonzero(self.data):\n # self.split_voxel(ndid, self.nsplit)\n\n # even newer implementation\n self.stats[\"t graph 11\"] = time.time() - self.start_time\n for ndid, val in enumerate(self.data.ravel()):\n t_split_start = time.time()\n if val == 0:\n if self.compute_msindex:\n self.msi.set_block_lowres(ndid, ndid)\n self.stats[\"t graph low\"] += time.time() - t_split_start\n else:\n self.split_voxel(ndid)\n self.stats[\"t graph high\"] += time.time() - t_split_start\n\n self.stats[\"t graph 13\"] = time.time() - self.start_time\n self.finish()\n if vtk_filename is not None:\n self.write_vtk(vtk_filename)\n self.stats[\"t graph 14\"] = time.time() - self.start_time", "def saveVTK(self, filename):\n \"\"\"\n Save polygons in VTK file.\n \"\"\"\n with open(filename, 'w') as f:\n f.write('# vtk DataFile Version 3.0\\n')\n f.write('pycsg output\\n')\n f.write('ASCII\\n')\n f.write('DATASET POLYDATA\\n')\n \n verts, cells, count = self.toVerticesAndPolygons()\n\n f.write('POINTS {0} float\\n'.format(len(verts)))\n for v in verts:\n f.write('{0} {1} {2}\\n'.format(v[0], v[1], v[2]))\n numCells = len(cells)\n f.write('POLYGONS {0} {1}\\n'.format(numCells, count + numCells))\n for cell in cells:\n f.write('{0} '.format(len(cell)))\n for index in cell:\n f.write('{0} '.format(index))\n f.write('\\n')", "def loadImageData(filename, spacing=()):\n \"\"\"Read and return a ``vtkImageData`` object from file.\"\"\"\n if not os.path.isfile(filename):\n colors.printc(\"~noentry File not found:\", filename, c=1)\n return None\n\n if \".tif\" in filename.lower():\n reader = vtk.vtkTIFFReader()\n elif \".slc\" in filename.lower():\n reader = vtk.vtkSLCReader()\n if not reader.CanReadFile(filename):\n colors.printc(\"~prohibited Sorry bad slc file \" + filename, c=1)\n exit(1)\n elif \".vti\" in filename.lower():\n reader = vtk.vtkXMLImageDataReader()\n elif \".mhd\" in filename.lower():\n reader = vtk.vtkMetaImageReader()\n reader.SetFileName(filename)\n reader.Update()\n image = reader.GetOutput()\n if len(spacing) == 3:\n image.SetSpacing(spacing[0], spacing[1], spacing[2])\n return image", "def vt2vtk_file(vessel_tree, outfile, text_label=None, lc_all=\"C\"):\n \"\"\"\n vessel_tree structure\n :param vessel_tree: vt structure\n :param outfile: filename with .vtk extension\n :param text_label: text label like 'porta' or 'hepatic_veins'\n :param lc_all: LC_ALL locale settings. Controls float numbers\n format (dot or colon). If is set to \"C\" dot is used.\n If None is used no processing is done\n :return:\n \"\"\"\n\n polyData = vt2polyData(vessel_tree, text_label=text_label)\n if lc_all is not None:\n import locale\n locale.setlocale(locale.LC_ALL, 'C')\n writer = vtk.vtkPolyDataWriter()\n writer.SetFileName(outfile)\n try:\n writer.SetInputData(polyData)\n except:\n logger.warning(\"old vtk is used\")\n writer.SetInput(polyData)\n writer.Write()\n return polyData", "def read(filename, attrs=None):\n \"\"\"This will read any VTK file! It will figure out what reader to use\n then wrap the VTK object for use in ``vtki``.\n\n Parameters\n ----------\n attrs : dict, optional\n A dictionary of attributes to call on the reader. Keys of dictionary are\n the attribute/method names and values are the arguments passed to those\n calls. If you do not have any attributes to call, pass ``None`` as the\n value.\n \"\"\"\n filename = os.path.abspath(os.path.expanduser(filename))\n ext = get_ext(filename)\n\n # From the extension, decide which reader to use\n if attrs is not None:\n reader = get_reader(filename)\n return standard_reader_routine(reader, filename, attrs=attrs)\n elif ext in '.vti': # ImageData\n return vtki.UniformGrid(filename)\n elif ext in '.vtr': # RectilinearGrid\n return vtki.RectilinearGrid(filename)\n elif ext in '.vtu': # UnstructuredGrid\n return vtki.UnstructuredGrid(filename)\n elif ext in ['.ply', '.obj', '.stl']: # PolyData\n return vtki.PolyData(filename)\n elif ext in '.vts': # StructuredGrid\n return vtki.StructuredGrid(filename)\n elif ext in ['.vtm', '.vtmb']:\n return vtki.MultiBlock(filename)\n elif ext in ['.e', '.exo']:\n return read_exodus(filename)\n elif ext in ['.vtk']:\n # Attempt to use the legacy reader...\n return read_legacy(filename)\n else:\n # Attempt find a reader in the readers mapping\n try:\n reader = get_reader(filename)\n return standard_reader_routine(reader, filename)\n except KeyError:\n pass\n raise IOError(\"This file was not able to be automatically read by vtki.\")", "def _run_cortex(fastq, indexes, params, out_base, dirs, config):\n \"\"\"Run cortex_var run_calls.pl, producing a VCF variant file.\n \"\"\"\n print(out_base)\n fastaq_index = \"{0}.fastaq_index\".format(out_base)\n se_fastq_index = \"{0}.se_fastq\".format(out_base)\n pe_fastq_index = \"{0}.pe_fastq\".format(out_base)\n reffasta_index = \"{0}.list_ref_fasta\".format(out_base)\n with open(se_fastq_index, \"w\") as out_handle:\n out_handle.write(fastq + \"\\n\")\n with open(pe_fastq_index, \"w\") as out_handle:\n out_handle.write(\"\")\n with open(fastaq_index, \"w\") as out_handle:\n out_handle.write(\"{0}\\t{1}\\t{2}\\t{2}\\n\".format(params[\"sample\"], se_fastq_index,\n pe_fastq_index))\n with open(reffasta_index, \"w\") as out_handle:\n for x in indexes[\"fasta\"]:\n out_handle.write(x + \"\\n\")\n os.environ[\"PERL5LIB\"] = \"{0}:{1}:{2}\".format(\n os.path.join(dirs[\"cortex\"], \"scripts/calling\"),\n os.path.join(dirs[\"cortex\"], \"scripts/analyse_variants/bioinf-perl/lib\"),\n os.environ.get(\"PERL5LIB\", \"\"))\n kmers = sorted(params[\"kmers\"])\n kmer_info = [\"--first_kmer\", str(kmers[0])]\n if len(kmers) > 1:\n kmer_info += [\"--last_kmer\", str(kmers[-1]),\n \"--kmer_step\", str(kmers[1] - kmers[0])]\n subprocess.check_call([\"perl\", os.path.join(dirs[\"cortex\"], \"scripts\", \"calling\", \"run_calls.pl\"),\n \"--fastaq_index\", fastaq_index,\n \"--auto_cleaning\", \"yes\", \"--bc\", \"yes\", \"--pd\", \"yes\",\n \"--outdir\", os.path.dirname(out_base), \"--outvcf\", os.path.basename(out_base),\n \"--ploidy\", str(config[\"algorithm\"].get(\"ploidy\", 2)),\n \"--stampy_hash\", indexes[\"stampy\"],\n \"--stampy_bin\", os.path.join(dirs[\"stampy\"], \"stampy.py\"),\n \"--refbindir\", os.path.dirname(indexes[\"cortex\"][0]),\n \"--list_ref_fasta\", reffasta_index,\n \"--genome_size\", str(params[\"genome_size\"]),\n \"--max_read_len\", \"30000\",\n #\"--max_var_len\", \"4000\",\n \"--format\", \"FASTQ\", \"--qthresh\", \"5\", \"--do_union\", \"yes\",\n \"--mem_height\", \"17\", \"--mem_width\", \"100\",\n \"--ref\", \"CoordinatesAndInCalling\", \"--workflow\", \"independent\",\n \"--vcftools_dir\", dirs[\"vcftools\"],\n \"--logfile\", \"{0}.logfile,f\".format(out_base)]\n + kmer_info)\n final = glob.glob(os.path.join(os.path.dirname(out_base), \"vcfs\",\n \"{0}*FINALcombined_BC*decomp.vcf\".format(os.path.basename(out_base))))\n # No calls, need to setup an empty file\n if len(final) != 1:\n print(\"Did not find output VCF file for {0}\".format(out_base))\n return None\n else:\n return final[0]", "def loadPolyData(filename):\n \"\"\"Load a file and return a ``vtkPolyData`` object (not a ``vtkActor``).\"\"\"\n if not os.path.exists(filename):\n colors.printc(\"~noentry Error in loadPolyData: Cannot find\", filename, c=1)\n return None\n fl = filename.lower()\n if fl.endswith(\".vtk\"):\n reader = vtk.vtkPolyDataReader()\n elif fl.endswith(\".ply\"):\n reader = vtk.vtkPLYReader()\n elif fl.endswith(\".obj\"):\n reader = vtk.vtkOBJReader()\n elif fl.endswith(\".stl\"):\n reader = vtk.vtkSTLReader()\n elif fl.endswith(\".byu\") or fl.endswith(\".g\"):\n reader = vtk.vtkBYUReader()\n elif fl.endswith(\".vtp\"):\n reader = vtk.vtkXMLPolyDataReader()\n elif fl.endswith(\".vts\"):\n reader = vtk.vtkXMLStructuredGridReader()\n elif fl.endswith(\".vtu\"):\n reader = vtk.vtkXMLUnstructuredGridReader()\n elif fl.endswith(\".txt\"):\n reader = vtk.vtkParticleReader() # (x y z scalar)\n elif fl.endswith(\".xyz\"):\n reader = vtk.vtkParticleReader()\n else:\n reader = vtk.vtkDataReader()\n reader.SetFileName(filename)\n if fl.endswith(\".vts\"): # structured grid\n reader.Update()\n gf = vtk.vtkStructuredGridGeometryFilter()\n gf.SetInputConnection(reader.GetOutputPort())\n gf.Update()\n poly = gf.GetOutput()\n elif fl.endswith(\".vtu\"): # unstructured grid\n reader.Update()\n gf = vtk.vtkGeometryFilter()\n gf.SetInputConnection(reader.GetOutputPort())\n gf.Update()\n poly = gf.GetOutput()\n else:\n try:\n reader.Update()\n poly = reader.GetOutput()\n except:\n poly = None\n\n if not poly:\n return None\n\n cleanpd = vtk.vtkCleanPolyData()\n cleanpd.SetInputData(poly)\n cleanpd.Update()\n return cleanpd.GetOutput()", "def read_texture(filename, attrs=None):\n \"\"\"Loads a ``vtkTexture`` from an image file.\"\"\"\n filename = os.path.abspath(os.path.expanduser(filename))\n try:\n # intitialize the reader using the extnesion to find it\n reader = get_reader(filename)\n image = standard_reader_routine(reader, filename, attrs=attrs)\n return vtki.image_to_texture(image)\n except KeyError:\n # Otherwise, use the imageio reader\n pass\n return vtki.numpy_to_texture(imageio.imread(filename))", "def export_plotter_vtkjs(plotter, filename, compress_arrays=False):\n \"\"\"Export a plotter's rendering window to the VTKjs format.\n \"\"\"\n sceneName = os.path.split(filename)[1]\n doCompressArrays = compress_arrays\n\n # Generate timestamp and use it to make subdirectory within the top level output dir\n timeStamp = time.strftime(\"%a-%d-%b-%Y-%H-%M-%S\")\n root_output_directory = os.path.split(filename)[0]\n output_dir = os.path.join(root_output_directory, timeStamp)\n mkdir_p(output_dir)\n\n renderers = plotter.ren_win.GetRenderers()\n\n scDirs = []\n sceneComponents = []\n textureToSave = {}\n\n for rIdx in range(renderers.GetNumberOfItems()):\n renderer = renderers.GetItemAsObject(rIdx)\n renProps = renderer.GetViewProps()\n for rpIdx in range(renProps.GetNumberOfItems()):\n renProp = renProps.GetItemAsObject(rpIdx)\n if not renProp.GetVisibility():\n continue\n if hasattr(renProp, 'GetMapper') and renProp.GetMapper() is not None:\n mapper = renProp.GetMapper()\n dataObject = mapper.GetInputDataObject(0, 0)\n dataset = None\n if dataObject is None:\n continue\n if dataObject.IsA('vtkCompositeDataSet'):\n if dataObject.GetNumberOfBlocks() == 1:\n dataset = dataObject.GetBlock(0)\n else:\n gf = vtk.vtkCompositeDataGeometryFilter()\n gf.SetInputData(dataObject)\n gf.Update()\n dataset = gf.GetOutput()\n else:\n dataset = mapper.GetInput()\n\n if dataset and not isinstance(dataset, (vtk.vtkPolyData, vtk.vtkImageData)):\n # All data must be PolyData surfaces\n gf = vtk.vtkGeometryFilter()\n gf.SetInputData(dataset)\n gf.Update()\n dataset = gf.GetOutputDataObject(0)\n\n\n if dataset:# and dataset.GetPoints(): # NOTE: vtkImageData does not have points\n componentName = 'data_%d_%d' % (\n rIdx, rpIdx) # getComponentName(renProp)\n scalarVisibility = mapper.GetScalarVisibility()\n #arrayAccessMode = mapper.GetArrayAccessMode()\n #colorArrayName = mapper.GetArrayName() #TODO: if arrayAccessMode == 1 else mapper.GetArrayId()\n colorMode = mapper.GetColorMode()\n scalarMode = mapper.GetScalarMode()\n lookupTable = mapper.GetLookupTable()\n\n dsAttrs = None\n arrayLocation = ''\n\n if scalarVisibility:\n if scalarMode == 3 or scalarMode == 1: # VTK_SCALAR_MODE_USE_POINT_FIELD_DATA or VTK_SCALAR_MODE_USE_POINT_DATA\n dsAttrs = dataset.GetPointData()\n arrayLocation = 'pointData'\n # VTK_SCALAR_MODE_USE_CELL_FIELD_DATA or VTK_SCALAR_MODE_USE_CELL_DATA\n elif scalarMode == 4 or scalarMode == 2:\n dsAttrs = dataset.GetCellData()\n arrayLocation = 'cellData'\n\n colorArray = None\n dataArray = None\n\n if dsAttrs:\n dataArray = dsAttrs.GetArray(0) # Force getting the active array\n\n if dataArray:\n # component = -1 => let specific instance get scalar from vector before mapping\n colorArray = lookupTable.MapScalars(\n dataArray, colorMode, -1)\n colorArrayName = '__CustomRGBColorArray__'\n colorArray.SetName(colorArrayName)\n colorMode = 0\n else:\n colorArrayName = ''\n\n color_array_info = {\n 'colorArray': colorArray,\n 'location': arrayLocation\n }\n\n scDirs.append(write_data_set('', dataset, output_dir, color_array_info,\n new_name=componentName, compress=doCompressArrays))\n\n # Handle texture if any\n textureName = None\n if renProp.GetTexture() and renProp.GetTexture().GetInput():\n textureData = renProp.GetTexture().GetInput()\n textureName = 'texture_%d' % get_object_id(textureData)\n textureToSave[textureName] = textureData\n\n representation = renProp.GetProperty().GetRepresentation(\n ) if hasattr(renProp, 'GetProperty') else 2\n colorToUse = renProp.GetProperty().GetDiffuseColor(\n ) if hasattr(renProp, 'GetProperty') else [1, 1, 1]\n if representation == 1:\n colorToUse = renProp.GetProperty().GetColor() if hasattr(\n renProp, 'GetProperty') else [1, 1, 1]\n pointSize = renProp.GetProperty().GetPointSize(\n ) if hasattr(renProp, 'GetProperty') else 1.0\n opacity = renProp.GetProperty().GetOpacity() if hasattr(\n renProp, 'GetProperty') else 1.0\n edgeVisibility = renProp.GetProperty().GetEdgeVisibility(\n ) if hasattr(renProp, 'GetProperty') else false\n\n p3dPosition = renProp.GetPosition() if renProp.IsA(\n 'vtkProp3D') else [0, 0, 0]\n p3dScale = renProp.GetScale() if renProp.IsA(\n 'vtkProp3D') else [1, 1, 1]\n p3dOrigin = renProp.GetOrigin() if renProp.IsA(\n 'vtkProp3D') else [0, 0, 0]\n p3dRotateWXYZ = renProp.GetOrientationWXYZ(\n ) if renProp.IsA('vtkProp3D') else [0, 0, 0, 0]\n\n sceneComponents.append({\n \"name\": componentName,\n \"type\": \"httpDataSetReader\",\n \"httpDataSetReader\": {\n \"url\": componentName\n },\n \"actor\": {\n \"origin\": p3dOrigin,\n \"scale\": p3dScale,\n \"position\": p3dPosition,\n },\n \"actorRotation\": p3dRotateWXYZ,\n \"mapper\": {\n \"colorByArrayName\": colorArrayName,\n \"colorMode\": colorMode,\n \"scalarMode\": scalarMode\n },\n \"property\": {\n \"representation\": representation,\n \"edgeVisibility\": edgeVisibility,\n \"diffuseColor\": colorToUse,\n \"pointSize\": pointSize,\n \"opacity\": opacity\n },\n \"lookupTable\": {\n \"tableRange\": lookupTable.GetRange(),\n \"hueRange\": lookupTable.GetHueRange() if hasattr(lookupTable, 'GetHueRange') else [0.5, 0]\n }\n })\n\n if textureName:\n sceneComponents[-1]['texture'] = textureName\n\n # Save texture data if any\n for key, val in textureToSave.items():\n write_data_set('', val, output_dir, None, new_name=key,\n compress=doCompressArrays)\n\n cameraClippingRange = plotter.camera.GetClippingRange()\n\n sceneDescription = {\n \"fetchGzip\": doCompressArrays,\n \"background\": plotter.background_color,\n \"camera\": {\n \"focalPoint\": plotter.camera.GetFocalPoint(),\n \"position\": plotter.camera.GetPosition(),\n \"viewUp\": plotter.camera.GetViewUp(),\n \"clippingRange\": [ elt for elt in cameraClippingRange ]\n },\n \"centerOfRotation\": plotter.camera.GetFocalPoint(),\n \"scene\": sceneComponents\n }\n\n indexFilePath = os.path.join(output_dir, 'index.json')\n with open(indexFilePath, 'w') as outfile:\n json.dump(sceneDescription, outfile, indent=4)\n\n# -----------------------------------------------------------------------------\n\n # Now zip up the results and get rid of the temp directory\n sceneFileName = os.path.join(\n root_output_directory, '%s%s' % (sceneName, FILENAME_EXTENSION))\n\n try:\n import zlib\n compression = zipfile.ZIP_DEFLATED\n except:\n compression = zipfile.ZIP_STORED\n\n zf = zipfile.ZipFile(sceneFileName, mode='w')\n\n try:\n for dirName, subdirList, fileList in os.walk(output_dir):\n for fname in fileList:\n fullPath = os.path.join(dirName, fname)\n relPath = '%s/%s' % (sceneName,\n os.path.relpath(fullPath, output_dir))\n zf.write(fullPath, arcname=relPath, compress_type=compression)\n finally:\n zf.close()\n\n shutil.rmtree(output_dir)\n\n print('Finished exporting dataset to: ', sceneFileName)", "def _read_vtc(vtc_file):\n \"\"\"Read the VTC file.\n\n Parameters\n ----------\n vtc_file : str\n path to vtc file\n\n Returns\n -------\n mpg_file : list of str\n list of avi files\n start_time : list of datetime\n list of start time of the avi files\n end_time : list of datetime\n list of end time of the avi files\n \"\"\"\n with vtc_file.open('rb') as f:\n filebytes = f.read()\n\n hdr = {}\n hdr['file_guid'] = hexlify(filebytes[:16])\n # not sure about the 4 Bytes inbetween\n\n i = 20\n mpg_file = []\n start_time = []\n end_time = []\n while i < len(filebytes):\n mpg_file.append(_make_str(unpack('c' * 261, filebytes[i:i + 261])))\n i += 261\n Location = filebytes[i:i + 16]\n correct = b'\\xff\\xfe\\xf8^\\xfc\\xdc\\xe5D\\x8f\\xae\\x19\\xf5\\xd6\"\\xb6\\xd4'\n assert Location == correct\n i += 16\n start_time.append(_filetime_to_dt(unpack('<q',\n filebytes[i:(i + 8)])[0]))\n i += 8\n end_time.append(_filetime_to_dt(unpack('<q',\n filebytes[i:(i + 8)])[0]))\n i += 8\n\n return mpg_file, start_time, end_time", "def loadStructuredPoints(filename):\n \"\"\"Load a ``vtkStructuredPoints`` object from file and return an ``Actor(vtkActor)`` object.\n\n .. hint:: |readStructuredPoints| |readStructuredPoints.py|_\n \"\"\"\n reader = vtk.vtkStructuredPointsReader()\n reader.SetFileName(filename)\n reader.Update()\n gf = vtk.vtkImageDataGeometryFilter()\n gf.SetInputConnection(reader.GetOutputPort())\n gf.Update()\n return Actor(gf.GetOutput())", "def run_cortex(align_bams, items, ref_file, assoc_files, region=None,\n out_file=None):\n \"\"\"Top level entry to regional de-novo based variant calling with cortex_var.\n \"\"\"\n raise NotImplementedError(\"Cortex currently out of date and needs reworking.\")\n if len(align_bams) == 1:\n align_bam = align_bams[0]\n config = items[0][\"config\"]\n else:\n raise NotImplementedError(\"Need to add multisample calling for cortex_var\")\n if out_file is None:\n out_file = \"%s-cortex.vcf\" % os.path.splitext(align_bam)[0]\n if region is not None:\n work_dir = safe_makedir(os.path.join(os.path.dirname(out_file),\n region.replace(\".\", \"_\")))\n else:\n work_dir = os.path.dirname(out_file)\n if not file_exists(out_file):\n bam.index(align_bam, config)\n variant_regions = config[\"algorithm\"].get(\"variant_regions\", None)\n if not variant_regions:\n raise ValueError(\"Only support regional variant calling with cortex_var: set variant_regions\")\n target_regions = subset_variant_regions(variant_regions, region, out_file)\n if os.path.isfile(target_regions):\n with open(target_regions) as in_handle:\n regional_vcfs = [_run_cortex_on_region(x.strip().split(\"\\t\")[:3], align_bam,\n ref_file, work_dir, out_file, config)\n for x in in_handle]\n\n combine_file = \"{0}-raw{1}\".format(*os.path.splitext(out_file))\n _combine_variants(regional_vcfs, combine_file, ref_file, config)\n _select_final_variants(combine_file, out_file, config)\n else:\n vcfutils.write_empty_vcf(out_file)\n return out_file" ]
[ 0.7477437257766724, 0.6800397038459778, 0.672272264957428, 0.6702258586883545, 0.6685526967048645, 0.6679937839508057, 0.6633126139640808, 0.6589624881744385, 0.6504949927330017, 0.6503692865371704, 0.6481964588165283, 0.6472215056419373 ]
Second step of algorithm :return:()
def split_voxels(self, vtk_filename=None): """ Second step of algorithm :return:() """ self.cache = {} self.stats["t graph 10"] = time.time() - self.start_time self.msi = MultiscaleArray(self.data.shape, block_size=self.nsplit) # old implementation # idxs = nm.where(self.data) # nr, nc = self.data.shape # for k, (ir, ic) in enumerate(zip(*idxs)): # ndid = ic + ir * nc # self.split_voxel(ndid, self.nsplit) # new_implementation # for ndid in np.flatnonzero(self.data): # self.split_voxel(ndid, self.nsplit) # even newer implementation self.stats["t graph 11"] = time.time() - self.start_time for ndid, val in enumerate(self.data.ravel()): t_split_start = time.time() if val == 0: if self.compute_msindex: self.msi.set_block_lowres(ndid, ndid) self.stats["t graph low"] += time.time() - t_split_start else: self.split_voxel(ndid) self.stats["t graph high"] += time.time() - t_split_start self.stats["t graph 13"] = time.time() - self.start_time self.finish() if vtk_filename is not None: self.write_vtk(vtk_filename) self.stats["t graph 14"] = time.time() - self.start_time
[ "def step2(self):\n \"\"\"step2() maps double suffices to single ones.\n so -ization ( = -ize plus -ation) maps to -ize etc. note that the\n string before the suffix must give m() > 0.\n \"\"\"\n if self.b[self.k - 1] == \"a\":\n if self.ends(\"ational\"):\n self.r(\"ate\")\n elif self.ends(\"tional\"):\n self.r(\"tion\")\n elif self.b[self.k - 1] == \"c\":\n if self.ends(\"enci\"):\n self.r(\"ence\")\n elif self.ends(\"anci\"):\n self.r(\"ance\")\n elif self.b[self.k - 1] == \"e\":\n if self.ends(\"izer\"):\n self.r(\"ize\")\n elif self.b[self.k - 1] == \"l\":\n if self.ends(\"bli\"):\n self.r(\"ble\") # --DEPARTURE--\n # To match the published algorithm, replace this phrase with\n # if self.ends(\"abli\"): self.r(\"able\")\n elif self.ends(\"alli\"):\n self.r(\"al\")\n elif self.ends(\"entli\"):\n self.r(\"ent\")\n elif self.ends(\"eli\"):\n self.r(\"e\")\n elif self.ends(\"ousli\"):\n self.r(\"ous\")\n elif self.b[self.k - 1] == \"o\":\n if self.ends(\"ization\"):\n self.r(\"ize\")\n elif self.ends(\"ation\"):\n self.r(\"ate\")\n elif self.ends(\"ator\"):\n self.r(\"ate\")\n elif self.b[self.k - 1] == \"s\":\n if self.ends(\"alism\"):\n self.r(\"al\")\n elif self.ends(\"iveness\"):\n self.r(\"ive\")\n elif self.ends(\"fulness\"):\n self.r(\"ful\")\n elif self.ends(\"ousness\"):\n self.r(\"ous\")\n elif self.b[self.k - 1] == \"t\":\n if self.ends(\"aliti\"):\n self.r(\"al\")\n elif self.ends(\"iviti\"):\n self.r(\"ive\")\n elif self.ends(\"biliti\"):\n self.r(\"ble\")\n elif self.b[self.k - 1] == \"g\": # --DEPARTURE--\n if self.ends(\"logi\"):\n self.r(\"log\")", "protected function step2()\n {\n switch ($this->b[$this->k-1]) {\n case 'a':\n if ($this->ends(\"ational\",7)) { $this->r(\"ate\",3); break; }\n if ($this->ends(\"tional\",6)) { $this->r(\"tion\",4); break; }\n break;\n case 'c':\n if ($this->ends(\"enci\",4)) { $this->r(\"ence\",4); break; }\n if ($this->ends(\"anci\",4)) { $this->r(\"ance\",4); break; }\n break;\n case 'e':\n if ($this->ends(\"izer\",4)) { $this->r(\"ize\",3); break; }\n break;\n case 'l':\n if ($this->ends(\"bli\",3)) { $this->r(\"ble\",3); break; }\n // -DEPARTURE-\n // To match the published algorithm, replace the above line with\n // if ($this->ends(\"abli\",4)) { $this->r(\"able\",4); break; }\n if ($this->ends(\"alli\",4)) { $this->r(\"al\",2); break; }\n if ($this->ends(\"entli\",5)) { $this->r(\"ent\",3); break; }\n if ($this->ends(\"eli\",3)) { $this->r(\"e\",1); break; }\n if ($this->ends(\"ousli\",5)) { $this->r(\"ous\",3); break; }\n break;\n case 'o':\n if ($this->ends(\"ization\",7)) { $this->r(\"ize\",3); break; }\n if ($this->ends(\"ation\",5)) { $this->r(\"ate\",3); break; }\n if ($this->ends(\"ator\",4)) { $this->r(\"ate\",3); break; }\n break;\n case 's':\n if ($this->ends(\"alism\",5)) { $this->r(\"al\",2); break; }\n if ($this->ends(\"iveness\",7)) { $this->r(\"ive\",3); break; }\n if ($this->ends(\"fulness\",7)) { $this->r(\"ful\",3); break; }\n if ($this->ends(\"ousness\",7)) { $this->r(\"ous\",3); break; }\n break;\n case 't':\n if ($this->ends(\"aliti\",5)) { $this->r(\"al\",2); break; }\n if ($this->ends(\"iviti\",5)) { $this->r(\"ive\",3); break; }\n if ($this->ends(\"biliti\",6)) { $this->r(\"ble\",3); break; }\n break;\n case 'g':\n if ($this->ends(\"logi\",4)) { $this->r(\"log\",3); break; }\n // -DEPARTURE-\n // To match the published algorithm delete the above line\n }\n }", "def _step2(self, word):\n \"\"\"step2() maps double suffices to single ones.\n so -ization ( = -ize plus -ation) maps to -ize etc. note that the\n string before the suffix must give m() > 0.\n \"\"\"\n if len(word) <= 1: # Only possible at this stage given unusual inputs to stem_word like 'oed'\n return word\n\n ch = word[-2]\n\n if ch == 'a':\n if word.endswith(\"ational\"):\n return word[:-7] + \"ate\" if self._m(word, len(word)-8) > 0 else word\n elif word.endswith(\"tional\"):\n return word[:-2] if self._m(word, len(word)-7) > 0 else word\n else:\n return word\n elif ch == 'c':\n if word.endswith(\"enci\"):\n return word[:-4] + \"ence\" if self._m(word, len(word)-5) > 0 else word\n elif word.endswith(\"anci\"):\n return word[:-4] + \"ance\" if self._m(word, len(word)-5) > 0 else word\n else:\n return word\n elif ch == 'e':\n if word.endswith(\"izer\"):\n return word[:-1] if self._m(word, len(word)-5) > 0 else word\n else:\n return word\n elif ch == 'l':\n if word.endswith(\"bli\"):\n return word[:-3] + \"ble\" if self._m(word, len(word)-4) > 0 else word # --DEPARTURE--\n # To match the published algorithm, replace \"bli\" with \"abli\" and \"ble\" with \"able\"\n elif word.endswith(\"alli\"):\n # --NEW--\n if self._m(word, len(word)-5) > 0:\n word = word[:-2]\n return self._step2(word)\n else:\n return word\n elif word.endswith(\"fulli\"):\n return word[:-2] if self._m(word, len(word)-6) else word # --NEW--\n elif word.endswith(\"entli\"):\n return word[:-2] if self._m(word, len(word)-6) else word\n elif word.endswith(\"eli\"):\n return word[:-2] if self._m(word, len(word)-4) else word\n elif word.endswith(\"ousli\"):\n return word[:-2] if self._m(word, len(word)-6) else word\n else:\n return word\n elif ch == 'o':\n if word.endswith(\"ization\"):\n return word[:-7] + \"ize\" if self._m(word, len(word)-8) else word\n elif word.endswith(\"ation\"):\n return word[:-5] + \"ate\" if self._m(word, len(word)-6) else word\n elif word.endswith(\"ator\"):\n return word[:-4] + \"ate\" if self._m(word, len(word)-5) else word\n else:\n return word\n elif ch == 's':\n if word.endswith(\"alism\"):\n return word[:-3] if self._m(word, len(word)-6) else word\n elif word.endswith(\"ness\"):\n if word.endswith(\"iveness\"):\n return word[:-4] if self._m(word, len(word)-8) else word\n elif word.endswith(\"fulness\"):\n return word[:-4] if self._m(word, len(word)-8) else word\n elif word.endswith(\"ousness\"):\n return word[:-4] if self._m(word, len(word)-8) else word\n else:\n return word\n else:\n return word\n elif ch == 't':\n if word.endswith(\"aliti\"):\n return word[:-3] if self._m(word, len(word)-6) else word\n elif word.endswith(\"iviti\"):\n return word[:-5] + \"ive\" if self._m(word, len(word)-6) else word\n elif word.endswith(\"biliti\"):\n return word[:-6] + \"ble\" if self._m(word, len(word)-7) else word\n else:\n return word\n elif ch == 'g': # --DEPARTURE--\n if word.endswith(\"logi\"):\n return word[:-1] if self._m(word, len(word) - 4) else word # --NEW-- (Barry Wilkins)\n # To match the published algorithm, pass len(word)-5 to _m instead of len(word)-4\n else:\n return word\n\n else:\n return word", "def step(self):\n \"\"\"Single iteration of t-walk algorithm\"\"\"\n\n valid_proposal = False\n\n # Use x or xprime as pivot\n self._prime = (random() < 0.5)\n\n if self.verbose > 1:\n print_(\"\\n\\nUsing x%s as pivot\" % (\" prime\" * self._prime or \"\"))\n\n if self._prime:\n # Set the value of the stochastic to the auxiliary\n self.stochastic.value = self.values[1]\n\n if self.verbose > 1:\n print_(\n self._id,\n \"setting value to auxiliary\",\n self.stochastic.value)\n\n # Current log-probability\n logp = self.logp_plus_loglike\n if self.verbose > 1:\n print_(\"Current logp\", logp)\n\n try:\n # Propose new value\n while not valid_proposal:\n self.propose()\n # Check that proposed value lies in support\n valid_proposal = self._support(self.stochastic.value)\n\n if not sum(self.phi):\n raise ZeroProbability\n\n # Proposed log-probability\n logp_p = self.logp_plus_loglike\n if self.verbose > 1:\n print_(\"Proposed logp\", logp_p)\n\n except ZeroProbability:\n\n # Reject proposal\n if self.verbose > 1:\n print_(self._id + ' rejecting due to ZeroProbability.')\n self.reject()\n\n if self._prime:\n # Update value list\n self.values[1] = self.stochastic.value\n # Revert to stochastic's value for next iteration\n self.stochastic.value = self.values[0]\n\n if self.verbose > 1:\n print_(\n self._id,\n \"reverting stochastic to primary value\",\n self.stochastic.value)\n else:\n # Update value list\n self.values[0] = self.stochastic.value\n\n if self.verbose > 1:\n print_(self._id + ' returning.')\n return\n\n if self.verbose > 1:\n print_('logp_p - logp: ', logp_p - logp)\n\n # Evaluate acceptance ratio\n if log(random()) > (logp_p - logp + self.hastings_factor):\n\n # Revert s if fail\n self.reject()\n\n else:\n # Increment accepted count\n self.accepted[self.current_kernel] += 1\n if self.verbose > 1:\n print_(self._id + ' accepting')\n\n if self._prime:\n # Update value list\n self.values[1] = self.stochastic.value\n # Revert to stochastic's value for next iteration\n self.stochastic.value = self.values[0]\n\n if self.verbose > 1:\n print_(\n self._id,\n \"reverting stochastic to primary value\",\n self.stochastic.value)\n\n else:\n # Update value list\n self.values[0] = self.stochastic.value", "def step2(self, atv_pub_key, atv_salt):\n \"\"\"Second pairing step.\"\"\"\n pk_str = binascii.hexlify(atv_pub_key).decode()\n salt = binascii.hexlify(atv_salt).decode()\n self._client_session_key, _, _ = self._session.process(pk_str, salt)\n\n if not self._session.verify_proof(self._session.key_proof_hash):\n raise exceptions.AuthenticationError('proofs do not match (mitm?)')\n\n pub_key = binascii.unhexlify(self._session.public)\n proof = binascii.unhexlify(self._session.key_proof)\n log_binary(_LOGGER, 'Client', Public=pub_key, Proof=proof)\n return pub_key, proof", "def _second(self):\n \"\"\"Find Smith normal form for Right-low 2x2 matrix\"\"\"\n\n self._second_one_loop()\n A = self._A\n if A[2, 1] == 0:\n return True\n elif A[2, 1] % A[1, 1] == 0:\n self._second_finalize()\n self._Ps += self._L\n self._L = []\n return True\n else:\n return False", "def __step1(self):\n \"\"\"\n For each row of the matrix, find the smallest element and\n subtract it from every element in its row. Go to Step 2.\n \"\"\"\n C = self.C\n n = self.n\n for i in range(n):\n minval = min(self.C[i])\n # Find the minimum value for this row and subtract that minimum\n # from every element in the row.\n for j in range(n):\n self.C[i][j] -= minval\n\n return 2", "def __step2(self):\n \"\"\"\n Find a zero (Z) in the resulting matrix. If there is no starred\n zero in its row or column, star Z. Repeat for each element in the\n matrix. Go to Step 3.\n \"\"\"\n n = self.n\n for i in range(n):\n for j in range(n):\n if (self.C[i][j] == 0) and \\\n (not self.col_covered[j]) and \\\n (not self.row_covered[i]):\n self.marked[i][j] = 1\n self.col_covered[j] = True\n self.row_covered[i] = True\n\n self.__clear_covers()\n return 3", "def process_results(self):\n\t\t\"\"\"\n\t\tfunction that is called when a stage is completed and\n\t\tneeds to be analyzed befor further computations.\n\n\t\tThe code here implements the original SH algorithms by\n\t\tadvancing the k-best (lowest loss) configurations at the current\n\t\tbudget. k is defined by the num_configs list (see __init__)\n\t\tand the current stage value.\n\n\t\tFor more advanced methods like resampling after each stage,\n\t\toverload this function only.\n\t\t\"\"\"\n\t\tself.stage += 1\n\t\t\n\t\t# collect all config_ids that need to be compared\n\t\tconfig_ids = list(filter(lambda cid: self.data[cid].status == 'REVIEW', self.data.keys()))\n\n\t\tif (self.stage >= len(self.num_configs)):\n\t\t\tself.finish_up()\n\t\t\treturn\n\n\t\tbudgets = [self.data[cid].budget for cid in config_ids]\n\t\tif len(set(budgets)) > 1:\n\t\t\traise RuntimeError('Not all configurations have the same budget!')\n\t\tbudget = self.budgets[self.stage-1]\n\n\t\tlosses = np.array([self.data[cid].results[budget]['loss'] for cid in config_ids])\n\n\t\tadvance = self._advance_to_next_stage(config_ids, losses)\n\n\t\tfor i, a in enumerate(advance):\n\t\t\tif a:\n\t\t\t\tself.logger.debug('ITERATION: Advancing config %s to next budget %f'%(config_ids[i], self.budgets[self.stage]))\n\n\t\tfor i, cid in enumerate(config_ids):\n\t\t\tif advance[i]:\n\t\t\t\tself.data[cid].status = 'QUEUED'\n\t\t\t\tself.data[cid].budget = self.budgets[self.stage]\n\t\t\t\tself.actual_num_configs[self.stage] += 1\n\t\t\telse:\n\t\t\t\tself.data[cid].status = 'TERMINATED'", "def step2(self, pub_key, salt):\n \"\"\"Second authentication step.\"\"\"\n self._check_initialized()\n pk_str = binascii.hexlify(pub_key).decode()\n salt = binascii.hexlify(salt).decode()\n self.client_session_key, _, _ = self.session.process(pk_str, salt)\n _LOGGER.debug('Client session key: %s', self.client_session_key)\n\n # Generate client public and session key proof.\n client_public = self.session.public\n client_session_key_proof = self.session.key_proof\n _LOGGER.debug('Client public: %s, proof: %s',\n client_public, client_session_key_proof)\n\n if not self.session.verify_proof(self.session.key_proof_hash):\n raise AuthenticationError('proofs do not match (mitm?)')\n return client_public, client_session_key_proof", "def run_all2(protgroup, memornot, subsequences, base_outdir,\n protgroup_dict, protein_feathers_dir, date, errfile, impute_counts=True,\n cutoff_num_proteins=0, core_only_genes=None,\n length_filter_pid=.8, remove_correlated_feats=True,\n force_rerun_counts=False, force_rerun_percentages=False, force_rerun_pca=False):\n \"\"\"run_all but ignoring observations before pca\"\"\"\n import ssbio.utils\n\n # Need to set multiprocessing limit for scipy/numpy stuff if parallelizing anything\n import os\n os.environ['OMP_NUM_THREADS'] = '1'\n\n # First, filter down the protein group to the membrane/nonmembrane definition\n prots_filtered_feathers = get_protein_feather_paths(protgroup=protgroup, memornot=memornot,\n protgroup_dict=protgroup_dict,\n protein_feathers_dir=protein_feathers_dir,\n core_only_genes=core_only_genes)\n num_proteins = len(prots_filtered_feathers)\n if num_proteins <= cutoff_num_proteins:\n return\n\n # Make output directories\n protscale = 'proteome_unscaled'\n outdir_d0 = ssbio.utils.make_dir(op.join(base_outdir, protscale))\n outdir_d1 = ssbio.utils.make_dir(op.join(outdir_d0, '-'.join(memornot)))\n outdir_final = ssbio.utils.make_dir(op.join(outdir_d1, '-'.join(protgroup)))\n\n if impute_counts:\n big_strain_counts_df = get_proteome_counts_impute_missing(prots_filtered_feathers=prots_filtered_feathers,\n outpath=op.join(outdir_final,\n '{}-subsequence_proteome_IMP.fthr'.format(\n date)),\n length_filter_pid=length_filter_pid,\n force_rerun=force_rerun_counts)\n\n big_strain_percents_df = get_proteome_percentages(counts_df=big_strain_counts_df,\n outpath=op.join(outdir_final,\n '{}-subsequence_proteome_perc_IMP.fthr'.format(\n date)),\n force_rerun=force_rerun_percentages)\n pca_pickle = op.join(outdir_final, '{}-subsequence_pca.pckl'.format(date))\n # Divide by totals to get percentages in a new dataframe\n else:\n try:\n big_strain_percents_df = get_proteome_correct_percentages(prots_filtered_feathers=prots_filtered_feathers,\n outpath=op.join(outdir_final,\n '{}-subsequence_proteome_perc_AVG.fthr'.format(\n date)),\n length_filter_pid=length_filter_pid,\n force_rerun=force_rerun_percentages)\n pca_pickle = op.join(outdir_final, '{}-subsequence_pca_AVG.pckl'.format(date))\n except:\n with open(errfile, \"a\") as myfile:\n myfile.write('PERCENTAGES ERR: ' + '-'.join(memornot) + '\\t' + '-'.join(protgroup) + \"\\n\")\n return\n\n\n\n if ssbio.utils.force_rerun(flag=force_rerun_pca, outfile=pca_pickle):\n\n # Then, get filters for rows of the loaded feathers for interested subsequences\n keep_subsequences = get_interested_subsequences(subsequences=subsequences)\n\n # Some numbers: number of features\n num_feats = len(big_strain_percents_df)\n\n # Make an unwieldy title\n big_title = 'LOC={0}; PROTGROUP={1};\\n' \\\n 'NUMPROTS={2}; NUMFEATS={3}'.format('-'.join(memornot),\n '-'.join(protgroup),\n num_proteins,\n num_feats)\n\n # Run PCA and make plots\n runner = PCAMultiROS(features_df=big_strain_percents_df, observations_df=pd.DataFrame(), plot_title=big_title)\n try:\n runner.clean_data(keep_features=keep_subsequences, remove_correlated_feats=remove_correlated_feats)\n except:\n with open(errfile, \"a\") as myfile:\n myfile.write(\n 'CLEAN ERR: ' + '-'.join(memornot) + '\\t' + '-'.join(protgroup) + \"\\n\")\n return\n # try:\n runner.run_pca()\n # except:\n # with open(errfile, \"a\") as myfile:\n # myfile.write(\n # 'PCA ERR: ' + '-'.join(memornot) + '\\t' + '-'.join(protgroup) + \"\\n\")\n # return\n with open(pca_pickle, 'wb') as f:\n pickle.dump(runner, f)\n else:\n with open(pca_pickle, 'rb') as f:\n runner = pickle.load(f)", "def top2_full(votes):\n \"\"\"\n Description:\n Top 2 alternatives 16 moment conditions values calculation\n Parameters:\n votes: ordinal preference data (numpy ndarray of integers)\n \"\"\"\n res = np.zeros(16)\n for vote in votes:\n # the top ranked alternative is in vote[0][0], second in vote[1][0]\n if vote[0][0] == 0: # i.e. the first alt is ranked first\n res[0] += 1\n if vote[1][0] == 1: # i.e. the second alt is ranked second\n res[4] += 1\n elif vote[1][0] == 2:\n res[5] += 1\n elif vote[1][0] == 3:\n res[6] += 1\n elif vote[0][0] == 1:\n res[1] += 1\n if vote[1][0] == 0:\n res[7] += 1\n elif vote[1][0] == 2:\n res[8] += 1\n elif vote[1][0] == 3:\n res[9] += 1\n elif vote[0][0] == 2:\n res[2] += 1\n if vote[1][0] == 0:\n res[10] += 1\n elif vote[1][0] == 1:\n res[11] += 1\n elif vote[1][0] == 3:\n res[12] += 1\n elif vote[0][0] == 3:\n res[3] += 1\n if vote[1][0] == 0:\n res[13] += 1\n elif vote[1][0] == 1:\n res[14] += 1\n elif vote[1][0] == 2:\n res[15] += 1\n res /= len(votes)\n return res" ]
[ 0.7399905920028687, 0.7189586758613586, 0.6995337605476379, 0.6849796772003174, 0.6814959645271301, 0.6771830320358276, 0.6719550490379333, 0.6714085340499878, 0.6711153388023376, 0.654771625995636, 0.653181791305542, 0.6484779119491577 ]
Multiply values in block
def mul_block(self, index, val): """Multiply values in block""" self._prepare_cache_slice(index) self.msinds[self.cache_slice] *= val
[ "function blockMult(a, b, rows, cols) {\n // For small matrices, resort to naive multiplication.\n if (rows <= 512 || cols <= 512) {\n return a.mmul(b); // a is equivalent to this\n }\n\n // Apply dynamic padding.\n if ((rows % 2 === 1) && (cols % 2 === 1)) {\n a = embed(a, rows + 1, cols + 1);\n b = embed(b, rows + 1, cols + 1);\n } else if (rows % 2 === 1) {\n a = embed(a, rows + 1, cols);\n b = embed(b, rows + 1, cols);\n } else if (cols % 2 === 1) {\n a = embed(a, rows, cols + 1);\n b = embed(b, rows, cols + 1);\n }\n\n var halfRows = parseInt(a.rows / 2);\n var halfCols = parseInt(a.columns / 2);\n // Subdivide input matrices.\n var a11 = a.subMatrix(0, halfRows - 1, 0, halfCols - 1);\n var b11 = b.subMatrix(0, halfRows - 1, 0, halfCols - 1);\n\n var a12 = a.subMatrix(0, halfRows - 1, halfCols, a.columns - 1);\n var b12 = b.subMatrix(0, halfRows - 1, halfCols, b.columns - 1);\n\n var a21 = a.subMatrix(halfRows, a.rows - 1, 0, halfCols - 1);\n var b21 = b.subMatrix(halfRows, b.rows - 1, 0, halfCols - 1);\n\n var a22 = a.subMatrix(halfRows, a.rows - 1, halfCols, a.columns - 1);\n var b22 = b.subMatrix(halfRows, b.rows - 1, halfCols, b.columns - 1);\n\n // Compute intermediate values.\n var m1 = blockMult(Matrix.add(a11, a22), Matrix.add(b11, b22), halfRows, halfCols);\n var m2 = blockMult(Matrix.add(a21, a22), b11, halfRows, halfCols);\n var m3 = blockMult(a11, Matrix.sub(b12, b22), halfRows, halfCols);\n var m4 = blockMult(a22, Matrix.sub(b21, b11), halfRows, halfCols);\n var m5 = blockMult(Matrix.add(a11, a12), b22, halfRows, halfCols);\n var m6 = blockMult(Matrix.sub(a21, a11), Matrix.add(b11, b12), halfRows, halfCols);\n var m7 = blockMult(Matrix.sub(a12, a22), Matrix.add(b21, b22), halfRows, halfCols);\n\n // Combine intermediate values into the output.\n var c11 = Matrix.add(m1, m4);\n c11.sub(m5);\n c11.add(m7);\n var c12 = Matrix.add(m3, m5);\n var c21 = Matrix.add(m2, m4);\n var c22 = Matrix.sub(m1, m2);\n c22.add(m3);\n c22.add(m6);\n\n //Crop output to the desired size (undo dynamic padding).\n var resultat = Matrix.zeros(2 * c11.rows, 2 * c11.columns);\n resultat = resultat.setSubMatrix(c11, 0, 0);\n resultat = resultat.setSubMatrix(c12, c11.rows, 0);\n resultat = resultat.setSubMatrix(c21, 0, c11.columns);\n resultat = resultat.setSubMatrix(c22, c11.rows, c11.columns);\n return resultat.subMatrix(0, rows - 1, 0, cols - 1);\n }", "protected final void multiply() {\r\n\t\tfinal int step = blockStripeSize, blockSize = blockStripeSize * blockStripeSize;\r\n\r\n\t\tfor (int m = fromM; m < toM; m += step) {\r\n\t\t\tfinal int aRows = matrixA.layout.getRowsInBlock(m);\r\n\r\n\t\t\tfor (int k = fromK; k < toK; k += step) {\r\n\t\t\t\tfinal int bCols = matrixB.layout.getColumnsInBlock(k);\r\n\r\n\t\t\t\tfinal double[] cBlock = new double[aRows * bCols];\r\n\r\n\t\t\t\tfor (int n = fromN; n < toN; n += step) {\r\n\r\n\t\t\t\t\t// ensure a and b are in optimal block order before\r\n\t\t\t\t\t// multiplication\r\n\t\t\t\t\tfinal double[] aBlock = matrixA.layout.toRowMajorBlock(matrixA, m, n);\r\n\t\t\t\t\tfinal double[] bBlock = matrixB.layout.toColMajorBlock(matrixB, n, k);\r\n\r\n\t\t\t\t\tif (aBlock != null && bBlock != null) {\r\n\t\t\t\t\t\tif (aBlock.length == blockSize && bBlock.length == blockSize) {\r\n\t\t\t\t\t\t\tmultiplyAxB(aBlock, bBlock, cBlock, step);\r\n\t\t\t\t\t\t} else {\r\n\t\t\t\t\t\t\tint aCols = aBlock.length / aRows;\r\n\t\t\t\t\t\t\tint bRows = bBlock.length / bCols;\r\n\t\t\t\t\t\t\tverifyTrue(aCols == bRows, \"aCols!=bRows\");\r\n\t\t\t\t\t\t\tmultiplyRowMajorTimesColumnMajorBlocks(aBlock, bBlock, cBlock, aRows,\r\n\t\t\t\t\t\t\t\t\taCols, bCols);\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\r\n\t\t\t\tmatrixC.addBlockData(m, k, cBlock);\r\n\t\t\t}\r\n\t\t}\r\n\t}", "private static void multiplyAxB(final double[] aBlock, final double[] bBlock,\r\n\t\t\tfinal double[] cBlock, final int step) {\r\n\t\tfinal int blockStripeMini = step % 3;\r\n\t\tfinal int blockStripeMaxi = step / 3;\r\n\t\tfinal int blockArea = step * step;\r\n\r\n\t\tfor (int iL = 0; iL < blockArea; iL += step) {\r\n\t\t\tint rc = iL;\r\n\r\n\t\t\tfor (int kL = 0; kL < blockArea; kL += step) {\r\n\t\t\t\tint ra = iL;\r\n\t\t\t\tint rb = kL;\r\n\t\t\t\tdouble sum = 0.0d;\r\n\r\n\t\t\t\tfor (int jL = blockStripeMini; --jL >= 0;) {\r\n\t\t\t\t\tsum += aBlock[ra++] * bBlock[rb++];\r\n\t\t\t\t}\r\n\r\n\t\t\t\t// loop unrolling\r\n\t\t\t\tfor (int jL = blockStripeMaxi; --jL >= 0;) {\r\n\t\t\t\t\tsum += aBlock[ra++] * bBlock[rb++] //\r\n\t\t\t\t\t\t\t+ aBlock[ra++] * bBlock[rb++] //\r\n\t\t\t\t\t\t\t+ aBlock[ra++] * bBlock[rb++];\r\n\t\t\t\t}\r\n\r\n\t\t\t\tcBlock[rc++] += sum;\r\n\t\t\t}\r\n\t\t}\r\n\t}", "def matrix_multiplication_blockwise(self, matrix, blocksize):\n \"\"\"\n http://en.wikipedia.org/wiki/Block_matrix#Block_matrix_multiplication\n \"\"\"\n #Create the blockwise version of self and matrix\n selfBlockwise = self.matrix_to_blockmatrix(blocksize)\n matrixBlockwise = matrix.matrix_to_blockmatrix(blocksize)\n\n return (selfBlockwise * matrixBlockwise).flatten()", "public static void mult(int blockLength ,\n DSubmatrixD1 A , DSubmatrixD1 B ,\n DSubmatrixD1 C )\n {\n for( int i = A.row0; i < A.row1; i += blockLength ) {\n int heightA = Math.min( blockLength , A.row1 - i );\n\n for( int j = B.col0; j < B.col1; j += blockLength ) {\n int widthB = Math.min( blockLength , B.col1 - j );\n\n int indexC = (i-A.row0+C.row0)*C.original.numCols + (j-B.col0+C.col0)*heightA;\n\n for( int k = A.col0; k < A.col1; k += blockLength ) {\n int widthA = Math.min( blockLength , A.col1 - k );\n\n int indexA = i*A.original.numCols + k*heightA;\n int indexB = (k-A.col0+B.row0)*B.original.numCols + j*widthA;\n\n if( k == A.col0 )\n blockMultSet(A.original.data,B.original.data,C.original.data,\n indexA,indexB,indexC,heightA,widthA,widthB);\n else\n blockMultPlus(A.original.data,B.original.data,C.original.data,\n indexA,indexB,indexC,heightA,widthA,widthB);\n }\n }\n }\n }", "def multiplicador(options = {}, &_block)\n raise ArgumentError, 'Número inválido' unless is_number?\n raise ArgumentError, 'Fatores não podem estar em branco' unless options[:fatores]\n\n total = 0\n multiplicador_posicao = 0\n fatores = options[:fatores]\n numeros = options[:reverse].nil? ? to_s.split(//).reverse! : to_s.split(//)\n\n numeros.each do |caracter|\n fator = fatores[multiplicador_posicao]\n total += block_given? ? yield(caracter, fator) : (caracter.to_i * fator)\n multiplicador_posicao = multiplicador_posicao < (fatores.size - 1) ? (multiplicador_posicao + 1) : 0\n end\n total\n end", "public void multiply(double val) {\n for (int c = 0; c < this.values.length; c++) {\n multiplyValue(c, val);\n }\n }", "public static void mult(DMatrixRBlock A , DMatrixRBlock B , DMatrixRBlock C )\n {\n if( A.numCols != B.numRows )\n throw new IllegalArgumentException(\"Columns in A are incompatible with rows in B\");\n if( A.numRows != C.numRows )\n throw new IllegalArgumentException(\"Rows in A are incompatible with rows in C\");\n if( B.numCols != C.numCols )\n throw new IllegalArgumentException(\"Columns in B are incompatible with columns in C\");\n if( A.blockLength != B.blockLength || A.blockLength != C.blockLength )\n throw new IllegalArgumentException(\"Block lengths are not all the same.\");\n\n final int blockLength = A.blockLength;\n\n DSubmatrixD1 Asub = new DSubmatrixD1(A,0, A.numRows, 0, A.numCols);\n DSubmatrixD1 Bsub = new DSubmatrixD1(B,0, B.numRows, 0, B.numCols);\n DSubmatrixD1 Csub = new DSubmatrixD1(C,0, C.numRows, 0, C.numCols);\n\n MatrixMult_DDRB.mult(blockLength,Asub,Bsub,Csub);\n }", "def multiply(**kwargs):\n \"\"\"Simple postprocessor where we multiply the input values.\n\n :param kwargs: Dictionary of values to multiply\n :type kwargs: dict\n\n :return: The result.\n :rtype: float\n \"\"\"\n result = 1\n for i in list(kwargs.values()):\n if not i:\n # If one value is null, we return null.\n return i\n result *= i\n return result", "public static void multiply( GrayU8 input , double value , GrayU8 output ) {\n\n\t\toutput.reshape(input.width,input.height);\n\n\t\tint columns = input.width;\n\t\tif(BoofConcurrency.USE_CONCURRENT ) {\n\t\t\tImplPixelMath_MT.multiplyU_A(input.data,input.startIndex,input.stride,value , \n\t\t\t\t\toutput.data,output.startIndex,output.stride,\n\t\t\t\t\tinput.height,columns);\n\t\t} else {\n\t\t\tImplPixelMath.multiplyU_A(input.data,input.startIndex,input.stride,value , \n\t\t\t\t\toutput.data,output.startIndex,output.stride,\n\t\t\t\t\tinput.height,columns);\n\t\t}\n\t}", "function multiply(out, a, b) {\n out[0] = a[0] * b[0];\n out[1] = a[1] * b[1];\n out[2] = a[2] * b[2];\n return out;\n}", "private static void mult(int[] src, int srcLen, int value, int[] dst) {\n long val = value & LONG_MASK;\n long carry = 0;\n for (int i = 0; i < srcLen; i++) {\n long product = (src[i] & LONG_MASK) * val + carry;\n dst[i] = (int) product;\n carry = product >>> 32;\n }\n dst[srcLen] = (int) carry;\n }" ]
[ 0.7522327899932861, 0.7504237294197083, 0.7500613927841187, 0.7392536401748657, 0.7338317036628723, 0.7292303442955017, 0.7271780371665955, 0.7201430201530457, 0.7198589444160461, 0.7174651622772217, 0.7109858989715576, 0.7057550549507141 ]
Tool to make simple feature functions take features from feature array by seeds. :param fv: ndarray with lineariezed feature. It's shape is MxN, where M is number of image pixels and N is number of features :param seeds: ndarray with seeds. Does not to be linear. :param unique_cls: number of used seeds clases. Like [1, 2] :return: fv_selection, seeds_selection - selection from feature vector and selection from seeds
def select_from_fv_by_seeds(fv, seeds, unique_cls): """ Tool to make simple feature functions take features from feature array by seeds. :param fv: ndarray with lineariezed feature. It's shape is MxN, where M is number of image pixels and N is number of features :param seeds: ndarray with seeds. Does not to be linear. :param unique_cls: number of used seeds clases. Like [1, 2] :return: fv_selection, seeds_selection - selection from feature vector and selection from seeds """ logger.debug("seeds" + str(seeds)) # fvlin = fv.reshape(-1, int(fv.size/seeds.size)) expected_shape = [seeds.size, int(fv.size/seeds.size)] if fv.shape[0] != expected_shape[0] or fv.shape[1] != expected_shape[1]: raise AssertionError("Wrong shape of input feature vector array fv") # sd = seeds.reshape(-1, 1) selection = np.in1d(seeds, unique_cls) fv_selection = fv[selection] seeds_selection = seeds.flatten()[selection] # sd = sd[] return fv_selection, seeds_selection
[ "def return_fv_by_seeds(fv, seeds=None, unique_cls=None):\n \"\"\"\n Return features selected by seeds and unique_cls or selection from features and corresponding seed classes.\n\n :param fv: ndarray with lineariezed feature. It's shape is MxN, where M is number of image pixels and N is number\n of features\n :param seeds: ndarray with seeds. Does not to be linear.\n :param unique_cls: number of used seeds clases. Like [1, 2]\n :return: fv, sd - selection from feature vector and selection from seeds or just fv for whole image\n \"\"\"\n if seeds is not None:\n if unique_cls is not None:\n return select_from_fv_by_seeds(fv, seeds, unique_cls)\n else:\n raise AssertionError(\"Input unique_cls has to be not None if seeds is not None.\")\n else:\n return fv", "def fit_from_image(self, data, voxelsize, seeds, unique_cls):\n \"\"\"\n This Method allows computes feature vector and train model.\n\n :cls: list of index number of requested classes in seeds\n \"\"\"\n fvs, clsselected = self.features_from_image(data, voxelsize, seeds, unique_cls)\n self.fit(fvs, clsselected)", "def features_from_image(\n self, data, voxelsize, seeds=None, unique_cls=None\n ): # , voxels=None):\n \"\"\"\n Input data is 3d image\n\n :param data: is 3d image\n :param seeds: ndimage with same shape as data, nonzero values means seeds.\n :param unique_cls: can select only fv for seeds from specific class.\n f.e. unique_cls = [1, 2] ignores label 0\n\n funcion is called twice in graph cut\n first call is with all params, second is only with data.\n\n based on self.modelparams['fv_type'] the feature vector is computed\n keywords \"intensity\", \"voxels\", \"fv001\", \"fv_extern\" can be used.\n modelparams['fv_type'] = 'fv_extern' allows to use external fv function\n\n Example of exter feature function. For easier implementation of return values use function return_fv_by_seeds().\n\n def fv_function(data, voxelsize, seeds=None, cl=None):\n data2 = scipy.ndimage.filters.gaussian_filter(data, sigma=5)\n arrs = [data.reshape(-1, 1), data2.reshape(-1, 1)]\n fv = np.concatenate(arrs, axis=1)\n return imcut.features.return_fv_by_seeds(fv, seeds, unique_cls)\n\n modelparams['fv_extern'] = fv_function\n \"\"\"\n\n fv_type = self.modelparams[\"fv_type\"]\n logger.debug(\"fv_type \" + fv_type)\n fv = []\n if fv_type == \"intensity\":\n fv = data.reshape(-1, 1)\n\n if seeds is not None:\n logger.debug(\"seeds: %s\", scipy.stats.describe(seeds, axis=None))\n sd = seeds.reshape(-1, 1)\n selection = np.in1d(sd, unique_cls)\n fv = fv[selection]\n sd = sd[selection]\n # sd = sd[]\n return fv, sd\n return fv\n\n # elif fv_type in (\"voxels\"):\n # if seeds is not None:\n # fv = np.asarray(voxels).reshape(-1, 1)\n # else:\n # fv = data\n # fv = fv.reshape(-1, 1)\n elif fv_type in (\"fv001\", \"FV001\", \"intensity_and_blur\"):\n\n # intensity in pixel, gaussian blur intensity\n return features.fv_function_intensity_and_smoothing(\n data, voxelsize, seeds, unique_cls\n )\n\n # from PyQt4.QtCore import pyqtRemoveInputHook\n # pyqtRemoveInputHook()\n\n # print fv1.shape\n # print fv2.shape\n # print fv.shape\n elif fv_type == \"fv_extern\":\n fv_function = self.modelparams[\"fv_extern\"]\n return fv_function(data, voxelsize, seeds, unique_cls)\n\n else:\n logger.error(\"Unknown feature vector type: \" + self.modelparams[\"fv_type\"])\n return fv", "public DoubleVector[] chooseSeeds(int k, Matrix dataPoints) {\n // If no weights were selected, then just use a uniform weighting of 1.\n int[] weights = new int[dataPoints.rows()];\n Arrays.fill(weights, 1);\n return chooseSeeds(dataPoints, k, weights);\n }", "def feature_selection(self, data, labels, weights, num_features, method):\n \"\"\"Selects features for the model. see explain_instance_with_data to\n understand the parameters.\"\"\"\n if method == 'none':\n return np.array(range(data.shape[1]))\n elif method == 'forward_selection':\n return self.forward_selection(data, labels, weights, num_features)\n elif method == 'highest_weights':\n clf = Ridge(alpha=0, fit_intercept=True,\n random_state=self.random_state)\n clf.fit(data, labels, sample_weight=weights)\n feature_weights = sorted(zip(range(data.shape[0]),\n clf.coef_ * data[0]),\n key=lambda x: np.abs(x[1]),\n reverse=True)\n return np.array([x[0] for x in feature_weights[:num_features]])\n elif method == 'lasso_path':\n weighted_data = ((data - np.average(data, axis=0, weights=weights))\n * np.sqrt(weights[:, np.newaxis]))\n weighted_labels = ((labels - np.average(labels, weights=weights))\n * np.sqrt(weights))\n nonzero = range(weighted_data.shape[1])\n _, coefs = self.generate_lars_path(weighted_data,\n weighted_labels)\n for i in range(len(coefs.T) - 1, 0, -1):\n nonzero = coefs.T[i].nonzero()[0]\n if len(nonzero) <= num_features:\n break\n used_features = nonzero\n return used_features\n elif method == 'auto':\n if num_features <= 6:\n n_method = 'forward_selection'\n else:\n n_method = 'highest_weights'\n return self.feature_selection(data, labels, weights,\n num_features, n_method)", "def select(self, selection_specs=None, selection_mode='edges', **selection):\n \"\"\"\n Allows selecting data by the slices, sets and scalar values\n along a particular dimension. The indices should be supplied as\n keywords mapping between the selected dimension and\n value. Additionally selection_specs (taking the form of a list\n of type.group.label strings, types or functions) may be\n supplied, which will ensure the selection is only applied if the\n specs match the selected object.\n\n Selecting by a node dimensions selects all edges and nodes that are\n connected to the selected nodes. To select only edges between the\n selected nodes set the selection_mode to 'nodes'.\n \"\"\"\n selection = {dim: sel for dim, sel in selection.items()\n if dim in self.dimensions('ranges')+['selection_mask']}\n if (selection_specs and not any(self.matches(sp) for sp in selection_specs)\n or not selection):\n return self\n\n index_dim = self.nodes.kdims[2].name\n dimensions = self.kdims+self.vdims\n node_selection = {index_dim: v for k, v in selection.items()\n if k in self.kdims}\n nodes = self.nodes.select(**dict(selection, **node_selection))\n selection = {k: v for k, v in selection.items() if k in dimensions}\n\n # Compute mask for edges if nodes were selected on\n nodemask = None\n if len(nodes) != len(self.nodes):\n xdim, ydim = dimensions[:2]\n indices = list(nodes.dimension_values(2, False))\n if selection_mode == 'edges':\n mask1 = self.interface.select_mask(self, {xdim.name: indices})\n mask2 = self.interface.select_mask(self, {ydim.name: indices})\n nodemask = (mask1 | mask2)\n nodes = self.nodes\n else:\n nodemask = self.interface.select_mask(self, {xdim.name: indices,\n ydim.name: indices})\n\n # Compute mask for edge selection\n mask = None\n if selection:\n mask = self.interface.select_mask(self, selection)\n\n # Combine masks\n if nodemask is not None:\n if mask is not None:\n mask &= nodemask\n else:\n mask = nodemask\n\n # Apply edge mask\n if mask is not None:\n data = self.interface.select(self, mask)\n if not np.all(mask):\n new_graph = self.clone((data, nodes))\n source = new_graph.dimension_values(0, expanded=False)\n target = new_graph.dimension_values(1, expanded=False)\n unique_nodes = np.unique(np.concatenate([source, target]))\n nodes = new_graph.nodes[:, :, list(unique_nodes)]\n paths = None\n if self._edgepaths:\n edgepaths = self._split_edgepaths\n paths = edgepaths.clone(edgepaths.interface.select_paths(edgepaths, mask))\n if len(self._edgepaths.data) == 1:\n paths = paths.clone([paths.dframe() if pd else paths.array()])\n else:\n data = self.data\n paths = self._edgepaths\n return self.clone((data, nodes, paths))", "def toy_linear_1d_classification(seed=default_seed):\n \"\"\"Simple classification data in one dimension for illustrating models.\"\"\"\n def sample_class(f):\n p = 1. / (1. + np.exp(-f))\n c = np.random.binomial(1, p)\n c = np.where(c, 1, -1)\n return c\n\n np.random.seed(seed=seed)\n x1 = np.random.normal(-3, 5, 20)\n x2 = np.random.normal(3, 5, 20)\n X = (np.r_[x1, x2])[:, None]\n return {'X': X, 'Y': sample_class(2.*X), 'F': 2.*X, 'covariates' : ['X'], 'response': [discrete({'positive': 1, 'negative': -1})],'seed' : seed}", "def suggest_features \n sel_features = Array.new\n \n File.open(@directory + \"/train\", \"w\") do |f| f.puts @fv_train.to_libSVM end\n \n Dir.chdir('./rsvm/bin/tools') do\n output = `python fselect.py #{@directory}/train`\n \n puts output if (@verbose)\n \n x = File.read(\"train.select\")\n sel_f_ids = x[1..-2].split(\", \")\n sel_f_ids.each do |f|\n s_f = @features.term(f.to_i)\n if s_f.instance_of? String then\n s_f = s_f.split(\"||\")\n s_f[0] = s_f[0].to_sym\n end\n sel_features.push(s_f)\n end\n \n #Remove temporary files\n File.delete(\"train.select\") if File.exist?(\"train.select\")\n File.delete(\"train.fscore\") if File.exist?(\"train.fscore\")\n File.delete(\"train.tr.out\") if File.exist?(\"train.tr.out\")\n end\n \n return sel_features\n end", "def uniq(args):\n \"\"\"\n %prog uniq bedfile\n\n Remove overlapping features with higher scores.\n \"\"\"\n from jcvi.formats.sizes import Sizes\n\n p = OptionParser(uniq.__doc__)\n p.add_option(\"--sizes\", help=\"Use sequence length as score\")\n p.add_option(\"--mode\", default=\"span\", choices=(\"span\", \"score\"),\n help=\"Pile mode\")\n opts, args = p.parse_args(args)\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n bedfile, = args\n uniqbedfile = bedfile.split(\".\")[0] + \".uniq.bed\"\n bed = Bed(bedfile)\n\n if opts.sizes:\n sizes = Sizes(opts.sizes).mapping\n ranges = [Range(x.seqid, x.start, x.end, sizes[x.accn], i) \\\n for i, x in enumerate(bed)]\n else:\n if opts.mode == \"span\":\n ranges = [Range(x.seqid, x.start, x.end, x.end - x.start + 1, i) \\\n for i, x in enumerate(bed)]\n else:\n ranges = [Range(x.seqid, x.start, x.end, float(x.score), i) \\\n for i, x in enumerate(bed)]\n\n selected, score = range_chain(ranges)\n selected = [x.id for x in selected]\n selected_ids = set(selected)\n selected = [bed[x] for x in selected]\n notselected = [x for i, x in enumerate(bed) if i not in selected_ids]\n\n newbed = Bed()\n newbed.extend(selected)\n newbed.print_to_file(uniqbedfile, sorted=True)\n\n if notselected:\n leftoverfile = bedfile.split(\".\")[0] + \".leftover.bed\"\n leftoverbed = Bed()\n leftoverbed.extend(notselected)\n leftoverbed.print_to_file(leftoverfile, sorted=True)\n\n logging.debug(\"Imported: {0}, Exported: {1}\".format(len(bed), len(newbed)))\n\n return uniqbedfile", "def lexicase(self,F, num_selections=None, survival = False):\n \"\"\"conducts lexicase selection for de-aggregated fitness vectors\"\"\"\n if num_selections is None:\n num_selections = F.shape[0]\n winners = []\n locs = []\n\n individual_locs = np.arange(F.shape[0])\n \n for i in np.arange(num_selections):\n can_locs = individual_locs\n cases = list(np.arange(F.shape[1]))\n self.random_state.shuffle(cases)\n # pdb.set_trace()\n while len(cases) > 0 and len(can_locs) > 1:\n # get best fitness for case among candidates\n best_val_for_case = np.min(F[can_locs,cases[0]])\n # filter individuals without an elite fitness on this case\n can_locs = [l for l in can_locs if F[l,cases[0]] <= best_val_for_case ]\n cases.pop(0)\n\n choice = self.random_state.randint(len(can_locs))\n locs.append(can_locs[choice])\n if survival: # filter out winners from remaining selection pool\n individual_locs = [i for i in individual_locs if i != can_locs[choice]]\n\n while len(locs) < num_selections:\n locs.append(individual_locs[0])\n\n return locs", "def seed(all_individuals, individuals_sample, ids_fenotypes)\n super\n @structure = build_node individuals_sample, Nimbus::LossFunctions.majority_class(individuals_sample, @id_to_fenotype, @classes)\n end", "def pick_four_unique_nodes_quickly(n, seed=None):\n '''\n This is equivalent to np.random.choice(n, 4, replace=False)\n\n Another fellow suggested np.random.random_sample(n).argpartition(4) which is\n clever but still substantially slower.\n '''\n rng = get_rng(seed)\n k = rng.randint(n**4)\n a = k % n\n b = k // n % n\n c = k // n ** 2 % n\n d = k // n ** 3 % n\n if (a != b and a != c and a != d and b != c and b != d and c != d):\n return (a, b, c, d)\n else:\n # the probability of finding a wrong configuration is extremely low\n # unless for extremely small n. if n is extremely small the\n # computational demand is not a problem.\n\n # In my profiling it only took 0.4 seconds to include the uniqueness\n # check in 1 million runs of this function so I think it is OK.\n return pick_four_unique_nodes_quickly(n, rng)" ]
[ 0.9158183336257935, 0.7435174584388733, 0.6858772039413452, 0.6444513201713562, 0.6422149538993835, 0.6410118937492371, 0.6389459371566772, 0.6308839917182922, 0.6264603137969971, 0.6260638236999512, 0.6256772875785828, 0.6210070848464966 ]
Return features selected by seeds and unique_cls or selection from features and corresponding seed classes. :param fv: ndarray with lineariezed feature. It's shape is MxN, where M is number of image pixels and N is number of features :param seeds: ndarray with seeds. Does not to be linear. :param unique_cls: number of used seeds clases. Like [1, 2] :return: fv, sd - selection from feature vector and selection from seeds or just fv for whole image
def return_fv_by_seeds(fv, seeds=None, unique_cls=None): """ Return features selected by seeds and unique_cls or selection from features and corresponding seed classes. :param fv: ndarray with lineariezed feature. It's shape is MxN, where M is number of image pixels and N is number of features :param seeds: ndarray with seeds. Does not to be linear. :param unique_cls: number of used seeds clases. Like [1, 2] :return: fv, sd - selection from feature vector and selection from seeds or just fv for whole image """ if seeds is not None: if unique_cls is not None: return select_from_fv_by_seeds(fv, seeds, unique_cls) else: raise AssertionError("Input unique_cls has to be not None if seeds is not None.") else: return fv
[ "def select_from_fv_by_seeds(fv, seeds, unique_cls):\n \"\"\"\n Tool to make simple feature functions take features from feature array by seeds.\n :param fv: ndarray with lineariezed feature. It's shape is MxN, where M is number of image pixels and N is number\n of features\n :param seeds: ndarray with seeds. Does not to be linear.\n :param unique_cls: number of used seeds clases. Like [1, 2]\n :return: fv_selection, seeds_selection - selection from feature vector and selection from seeds\n \"\"\"\n logger.debug(\"seeds\" + str(seeds))\n # fvlin = fv.reshape(-1, int(fv.size/seeds.size))\n expected_shape = [seeds.size, int(fv.size/seeds.size)]\n if fv.shape[0] != expected_shape[0] or fv.shape[1] != expected_shape[1]:\n raise AssertionError(\"Wrong shape of input feature vector array fv\")\n # sd = seeds.reshape(-1, 1)\n selection = np.in1d(seeds, unique_cls)\n fv_selection = fv[selection]\n seeds_selection = seeds.flatten()[selection]\n # sd = sd[]\n return fv_selection, seeds_selection", "def fit_from_image(self, data, voxelsize, seeds, unique_cls):\n \"\"\"\n This Method allows computes feature vector and train model.\n\n :cls: list of index number of requested classes in seeds\n \"\"\"\n fvs, clsselected = self.features_from_image(data, voxelsize, seeds, unique_cls)\n self.fit(fvs, clsselected)", "def features_from_image(\n self, data, voxelsize, seeds=None, unique_cls=None\n ): # , voxels=None):\n \"\"\"\n Input data is 3d image\n\n :param data: is 3d image\n :param seeds: ndimage with same shape as data, nonzero values means seeds.\n :param unique_cls: can select only fv for seeds from specific class.\n f.e. unique_cls = [1, 2] ignores label 0\n\n funcion is called twice in graph cut\n first call is with all params, second is only with data.\n\n based on self.modelparams['fv_type'] the feature vector is computed\n keywords \"intensity\", \"voxels\", \"fv001\", \"fv_extern\" can be used.\n modelparams['fv_type'] = 'fv_extern' allows to use external fv function\n\n Example of exter feature function. For easier implementation of return values use function return_fv_by_seeds().\n\n def fv_function(data, voxelsize, seeds=None, cl=None):\n data2 = scipy.ndimage.filters.gaussian_filter(data, sigma=5)\n arrs = [data.reshape(-1, 1), data2.reshape(-1, 1)]\n fv = np.concatenate(arrs, axis=1)\n return imcut.features.return_fv_by_seeds(fv, seeds, unique_cls)\n\n modelparams['fv_extern'] = fv_function\n \"\"\"\n\n fv_type = self.modelparams[\"fv_type\"]\n logger.debug(\"fv_type \" + fv_type)\n fv = []\n if fv_type == \"intensity\":\n fv = data.reshape(-1, 1)\n\n if seeds is not None:\n logger.debug(\"seeds: %s\", scipy.stats.describe(seeds, axis=None))\n sd = seeds.reshape(-1, 1)\n selection = np.in1d(sd, unique_cls)\n fv = fv[selection]\n sd = sd[selection]\n # sd = sd[]\n return fv, sd\n return fv\n\n # elif fv_type in (\"voxels\"):\n # if seeds is not None:\n # fv = np.asarray(voxels).reshape(-1, 1)\n # else:\n # fv = data\n # fv = fv.reshape(-1, 1)\n elif fv_type in (\"fv001\", \"FV001\", \"intensity_and_blur\"):\n\n # intensity in pixel, gaussian blur intensity\n return features.fv_function_intensity_and_smoothing(\n data, voxelsize, seeds, unique_cls\n )\n\n # from PyQt4.QtCore import pyqtRemoveInputHook\n # pyqtRemoveInputHook()\n\n # print fv1.shape\n # print fv2.shape\n # print fv.shape\n elif fv_type == \"fv_extern\":\n fv_function = self.modelparams[\"fv_extern\"]\n return fv_function(data, voxelsize, seeds, unique_cls)\n\n else:\n logger.error(\"Unknown feature vector type: \" + self.modelparams[\"fv_type\"])\n return fv", "def get_unique_R(self, R):\n \"\"\"Get unique vlaues from coordinate matrix\n\n Parameters\n ----------\n R : 2D array\n The coordinate matrix of a subject's fMRI data\n\n Return\n ------\n\n unique_R : a list of array,\n Each element contains unique value in one dimension of\n coordinate matrix R.\n\n inds : a list of array,\n Each element contains the indices to reconstruct one\n dimension of original cooridnate matrix from the unique\n array.\n\n \"\"\"\n unique_R = []\n inds = []\n for d in np.arange(self.n_dim):\n tmp_unique, tmp_inds = np.unique(R[:, d], return_inverse=True)\n unique_R.append(tmp_unique)\n inds.append(tmp_inds)\n return unique_R, inds", "def uniq(args):\n \"\"\"\n %prog uniq bedfile\n\n Remove overlapping features with higher scores.\n \"\"\"\n from jcvi.formats.sizes import Sizes\n\n p = OptionParser(uniq.__doc__)\n p.add_option(\"--sizes\", help=\"Use sequence length as score\")\n p.add_option(\"--mode\", default=\"span\", choices=(\"span\", \"score\"),\n help=\"Pile mode\")\n opts, args = p.parse_args(args)\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n bedfile, = args\n uniqbedfile = bedfile.split(\".\")[0] + \".uniq.bed\"\n bed = Bed(bedfile)\n\n if opts.sizes:\n sizes = Sizes(opts.sizes).mapping\n ranges = [Range(x.seqid, x.start, x.end, sizes[x.accn], i) \\\n for i, x in enumerate(bed)]\n else:\n if opts.mode == \"span\":\n ranges = [Range(x.seqid, x.start, x.end, x.end - x.start + 1, i) \\\n for i, x in enumerate(bed)]\n else:\n ranges = [Range(x.seqid, x.start, x.end, float(x.score), i) \\\n for i, x in enumerate(bed)]\n\n selected, score = range_chain(ranges)\n selected = [x.id for x in selected]\n selected_ids = set(selected)\n selected = [bed[x] for x in selected]\n notselected = [x for i, x in enumerate(bed) if i not in selected_ids]\n\n newbed = Bed()\n newbed.extend(selected)\n newbed.print_to_file(uniqbedfile, sorted=True)\n\n if notselected:\n leftoverfile = bedfile.split(\".\")[0] + \".leftover.bed\"\n leftoverbed = Bed()\n leftoverbed.extend(notselected)\n leftoverbed.print_to_file(leftoverfile, sorted=True)\n\n logging.debug(\"Imported: {0}, Exported: {1}\".format(len(bed), len(newbed)))\n\n return uniqbedfile", "def toy_linear_1d_classification(seed=default_seed):\n \"\"\"Simple classification data in one dimension for illustrating models.\"\"\"\n def sample_class(f):\n p = 1. / (1. + np.exp(-f))\n c = np.random.binomial(1, p)\n c = np.where(c, 1, -1)\n return c\n\n np.random.seed(seed=seed)\n x1 = np.random.normal(-3, 5, 20)\n x2 = np.random.normal(3, 5, 20)\n X = (np.r_[x1, x2])[:, None]\n return {'X': X, 'Y': sample_class(2.*X), 'F': 2.*X, 'covariates' : ['X'], 'response': [discrete({'positive': 1, 'negative': -1})],'seed' : seed}", "def select(self, selection_specs=None, selection_mode='edges', **selection):\n \"\"\"\n Allows selecting data by the slices, sets and scalar values\n along a particular dimension. The indices should be supplied as\n keywords mapping between the selected dimension and\n value. Additionally selection_specs (taking the form of a list\n of type.group.label strings, types or functions) may be\n supplied, which will ensure the selection is only applied if the\n specs match the selected object.\n\n Selecting by a node dimensions selects all edges and nodes that are\n connected to the selected nodes. To select only edges between the\n selected nodes set the selection_mode to 'nodes'.\n \"\"\"\n selection = {dim: sel for dim, sel in selection.items()\n if dim in self.dimensions('ranges')+['selection_mask']}\n if (selection_specs and not any(self.matches(sp) for sp in selection_specs)\n or not selection):\n return self\n\n index_dim = self.nodes.kdims[2].name\n dimensions = self.kdims+self.vdims\n node_selection = {index_dim: v for k, v in selection.items()\n if k in self.kdims}\n nodes = self.nodes.select(**dict(selection, **node_selection))\n selection = {k: v for k, v in selection.items() if k in dimensions}\n\n # Compute mask for edges if nodes were selected on\n nodemask = None\n if len(nodes) != len(self.nodes):\n xdim, ydim = dimensions[:2]\n indices = list(nodes.dimension_values(2, False))\n if selection_mode == 'edges':\n mask1 = self.interface.select_mask(self, {xdim.name: indices})\n mask2 = self.interface.select_mask(self, {ydim.name: indices})\n nodemask = (mask1 | mask2)\n nodes = self.nodes\n else:\n nodemask = self.interface.select_mask(self, {xdim.name: indices,\n ydim.name: indices})\n\n # Compute mask for edge selection\n mask = None\n if selection:\n mask = self.interface.select_mask(self, selection)\n\n # Combine masks\n if nodemask is not None:\n if mask is not None:\n mask &= nodemask\n else:\n mask = nodemask\n\n # Apply edge mask\n if mask is not None:\n data = self.interface.select(self, mask)\n if not np.all(mask):\n new_graph = self.clone((data, nodes))\n source = new_graph.dimension_values(0, expanded=False)\n target = new_graph.dimension_values(1, expanded=False)\n unique_nodes = np.unique(np.concatenate([source, target]))\n nodes = new_graph.nodes[:, :, list(unique_nodes)]\n paths = None\n if self._edgepaths:\n edgepaths = self._split_edgepaths\n paths = edgepaths.clone(edgepaths.interface.select_paths(edgepaths, mask))\n if len(self._edgepaths.data) == 1:\n paths = paths.clone([paths.dframe() if pd else paths.array()])\n else:\n data = self.data\n paths = self._edgepaths\n return self.clone((data, nodes, paths))", "def fnd_unq_rws(A, return_index=False, return_inverse=False):\n \"\"\"Find unique rows in 2D array.\n\n Parameters\n ----------\n A : 2d numpy array\n Array for which unique rows should be identified.\n return_index : bool\n Bool to decide whether I is returned.\n return_inverse : bool\n Bool to decide whether J is returned.\n\n Returns\n -------\n B : 1d numpy array,\n Unique rows\n I: 1d numpy array, only returned if return_index is True\n B = A[I,:]\n J: 2d numpy array, only returned if return_inverse is True\n A = B[J,:]\n\n \"\"\"\n\n A = np.require(A, requirements='C')\n assert A.ndim == 2, \"array must be 2-dim'l\"\n\n B = np.unique(A.view([('', A.dtype)]*A.shape[1]),\n return_index=return_index,\n return_inverse=return_inverse)\n\n if return_index or return_inverse:\n return (B[0].view(A.dtype).reshape((-1, A.shape[1]), order='C'),) \\\n + B[1:]\n else:\n return B.view(A.dtype).reshape((-1, A.shape[1]), order='C')", "def suggest_features \n sel_features = Array.new\n \n File.open(@directory + \"/train\", \"w\") do |f| f.puts @fv_train.to_libSVM end\n \n Dir.chdir('./rsvm/bin/tools') do\n output = `python fselect.py #{@directory}/train`\n \n puts output if (@verbose)\n \n x = File.read(\"train.select\")\n sel_f_ids = x[1..-2].split(\", \")\n sel_f_ids.each do |f|\n s_f = @features.term(f.to_i)\n if s_f.instance_of? String then\n s_f = s_f.split(\"||\")\n s_f[0] = s_f[0].to_sym\n end\n sel_features.push(s_f)\n end\n \n #Remove temporary files\n File.delete(\"train.select\") if File.exist?(\"train.select\")\n File.delete(\"train.fscore\") if File.exist?(\"train.fscore\")\n File.delete(\"train.tr.out\") if File.exist?(\"train.tr.out\")\n end\n \n return sel_features\n end", "public DoubleVector[] chooseSeeds(int k, Matrix dataPoints) {\n // If no weights were selected, then just use a uniform weighting of 1.\n int[] weights = new int[dataPoints.rows()];\n Arrays.fill(weights, 1);\n return chooseSeeds(dataPoints, k, weights);\n }", "def selective_search(\n im_orig, scale=1.0, sigma=0.8, min_size=50):\n '''Selective Search\n\n Parameters\n ----------\n im_orig : ndarray\n Input image\n scale : int\n Free parameter. Higher means larger clusters in felzenszwalb segmentation.\n sigma : float\n Width of Gaussian kernel for felzenszwalb segmentation.\n min_size : int\n Minimum component size for felzenszwalb segmentation.\n Returns\n -------\n img : ndarray\n image with region label\n region label is stored in the 4th value of each pixel [r,g,b,(region)]\n regions : array of dict\n [\n {\n 'rect': (left, top, width, height),\n 'labels': [...],\n 'size': component_size\n },\n ...\n ]\n '''\n assert im_orig.shape[2] == 3, \"3ch image is expected\"\n\n # load image and get smallest regions\n # region label is stored in the 4th value of each pixel [r,g,b,(region)]\n img = _generate_segments(im_orig, scale, sigma, min_size)\n\n if img is None:\n return None, {}\n\n imsize = img.shape[0] * img.shape[1]\n R = _extract_regions(img)\n\n # extract neighbouring information\n neighbours = _extract_neighbours(R)\n\n # calculate initial similarities\n S = {}\n for (ai, ar), (bi, br) in neighbours:\n S[(ai, bi)] = _calc_sim(ar, br, imsize)\n\n # hierarchal search\n while S != {}:\n\n # get highest similarity\n i, j = sorted(S.items(), key=lambda i: i[1])[-1][0]\n\n # merge corresponding regions\n t = max(R.keys()) + 1.0\n R[t] = _merge_regions(R[i], R[j])\n\n # mark similarities for regions to be removed\n key_to_delete = []\n for k, v in list(S.items()):\n if (i in k) or (j in k):\n key_to_delete.append(k)\n\n # remove old similarities of related regions\n for k in key_to_delete:\n del S[k]\n\n # calculate similarity set with the new region\n for k in [a for a in key_to_delete if a != (i, j)]:\n n = k[1] if k[0] in (i, j) else k[0]\n S[(t, n)] = _calc_sim(R[t], R[n], imsize)\n\n regions = []\n for k, r in list(R.items()):\n regions.append({\n 'rect': (\n r['min_x'], r['min_y'],\n r['max_x'] - r['min_x'], r['max_y'] - r['min_y']),\n 'size': r['size'],\n 'labels': r['labels']\n })\n\n return img, regions", "def computeUniquePointsSensed(nCols, nPoints, s):\n \"\"\"\n If a network with nCols columns senses an object s times, how many\n unique points are actually sensed? The number is generally <= nCols * s\n because features may be repeated across sensations.\n \"\"\"\n if nCols == 1:\n return min(s, nPoints)\n elif nCols < nPoints:\n q = float(nCols) / nPoints\n unique = min(int(round(( 1.0 - math.pow(1.0 - q, s)) * nPoints)),\n nPoints)\n return unique\n else:\n return nPoints" ]
[ 0.8714072704315186, 0.735267162322998, 0.6699656844139099, 0.6452354788780212, 0.6315693259239197, 0.6298038363456726, 0.6262094974517822, 0.6221494674682617, 0.6198633313179016, 0.61878901720047, 0.6154218316078186, 0.6153923869132996 ]
Expands logical constructions.
def expand(self, expression): """Expands logical constructions.""" self.logger.debug("expand : expression %s", str(expression)) if not is_string(expression): return expression result = self._pattern.sub(lambda var: str(self._variables[var.group(1)]), expression) result = result.strip() self.logger.debug('expand : %s - result : %s', expression, result) if is_number(result): if result.isdigit(): self.logger.debug(' expand is integer !!!') return int(result) else: self.logger.debug(' expand is float !!!') return float(result) return result
[ "def _expand_logical_shortcuts(cls, schema):\n \"\"\" Expand agglutinated rules in a definition-schema.\n\n :param schema: The schema-definition to expand.\n :return: The expanded schema-definition.\n \"\"\"\n def is_of_rule(x):\n return isinstance(x, _str_type) and \\\n x.startswith(('allof_', 'anyof_', 'noneof_', 'oneof_'))\n\n for field in schema:\n for of_rule in (x for x in schema[field] if is_of_rule(x)):\n operator, rule = of_rule.split('_')\n schema[field].update({operator: []})\n for value in schema[field][of_rule]:\n schema[field][operator].append({rule: value})\n del schema[field][of_rule]\n return schema", "public void expand() {\r\n join(p, r.first());\r\n join(r.last(), n);\r\n // Necessary so that garbage collector\r\n // can delete rule and guard.\r\n r.theGuard.r = null;\r\n r.theGuard = null;\r\n }", "@Override\n public List<Statement> expand(Statement statement) {\n List<Statement> statements = new ArrayList<Statement>();\n\n // A -> (B -> C) implies A\n Statement definitional = new Statement(statement.getSubject());\n attachExpansionRuleCitation(definitional);\n statements.add(definitional);\n\n // A -> (B -> C) implies B -> C\n Statement object = statement.getObject().getStatement();\n Statement extracted = new Statement(object.getSubject());\n extracted.setRelationshipType(object.getRelationshipType());\n extracted.setObject(new Statement.Object(object.getObject().getTerm()));\n attachExpansionRuleCitation(extracted);\n statements.add(extracted);\n\n return statements;\n }", "void expand(@Nonnull Preprocessor p)\n throws IOException,\n LexerException {\n /* Cache expansion. */\n if (expansion == null) {\n this.expansion = p.expand(this);\n // System.out.println(\"Expanded arg \" + this);\n }\n }", "public BtrpOperand expand() {\n String head = getChild(0).getText().substring(0, getChild(0).getText().length() - 1);\n String tail = getChild(getChildCount() - 1).getText().substring(1);\n BtrpSet res = new BtrpSet(1, BtrpOperand.Type.STRING);\n\n for (int i = 1; i < getChildCount() - 1; i++) {\n BtrpOperand op = getChild(i).go(this);\n if (op == IgnorableOperand.getInstance()) {\n return op;\n }\n BtrpSet s = (BtrpSet) op;\n for (BtrpOperand o : s.getValues()) {\n //Compose\n res.getValues().add(new BtrpString(head + o.toString() + tail));\n }\n }\n return res;\n }", "def expand_varref(varref, lambda_args)\n case varref\n when Symbol, true, false, nil\n varref\n when String\n if varref =~ /^(.*)\\$\\{([^}]+)\\}(.*)$/\n prefix, varname, suffix = $1, $2, $3\n prefix = expand_varref(prefix, lambda_args) unless prefix.empty?\n varval = expand_varref(self[varname], lambda_args)\n # suffix needs no expansion since the regex matches the last occurence\n case varval\n when Symbol, true, false, nil, String\n if prefix.is_a?(Array)\n prefix.map {|p| \"#{p}#{varval}#{suffix}\"}\n else\n \"#{prefix}#{varval}#{suffix}\"\n end\n when Array\n if prefix.is_a?(Array)\n varval.map {|vv| prefix.map {|p| \"#{p}#{vv}#{suffix}\"}}.flatten\n else\n varval.map {|vv| \"#{prefix}#{vv}#{suffix}\"}\n end\n else\n raise \"Unknown construction variable type: #{varval.class} (from #{varname.inspect} => #{self[varname].inspect})\"\n end\n else\n varref\n end\n when Array\n varref.map do |ent|\n expand_varref(ent, lambda_args)\n end.flatten\n when Proc\n expand_varref(varref[*lambda_args], lambda_args)\n else\n raise \"Unknown construction variable type: #{varref.class} (#{varref.inspect})\"\n end\n end", "private StringBuffer expandConstructions(final String drl) {\n // display keys if requested\n if ( showKeyword ) {\n for ( DSLMappingEntry entry : this.keywords ) {\n logger.info( \"keyword: \" + entry.getMappingKey() );\n logger.info( \" \" + entry.getKeyPattern() );\n }\n }\n if ( showWhen ) {\n for ( DSLMappingEntry entry : this.condition ) {\n logger.info( \"when: \" + entry.getMappingKey() );\n logger.info( \" \" + entry.getKeyPattern() );\n // logger.info( \" \" + entry.getValuePattern() );\n }\n }\n if ( showThen ) {\n for ( DSLMappingEntry entry : this.consequence ) {\n logger.info( \"then: \" + entry.getMappingKey() );\n logger.info( \" \" + entry.getKeyPattern() );\n }\n }\n\n // parse and expand specific areas\n final Matcher m = finder.matcher( drl );\n final StringBuffer buf = new StringBuffer();\n int drlPos = 0;\n int linecount = 0;\n while ( m.find() ) {\n final StringBuilder expanded = new StringBuilder();\n\n int newPos = m.start();\n linecount += countNewlines( drl,\n drlPos,\n newPos );\n drlPos = newPos;\n\n String constr = m.group().trim();\n if ( constr.startsWith( \"rule\" ) ) {\n String headerFragment = m.group( 1 );\n expanded.append( headerFragment ); // adding rule header and attributes\n String lhsFragment = m.group( 2 );\n expanded.append( this.expandLHS( lhsFragment,\n linecount + countNewlines( drl,\n drlPos,\n m.start( 2 ) ) + 1 ) );\n String thenFragment = m.group( 3 );\n expanded.append( thenFragment ); // adding \"then\" header\n String rhsFragment = this.expandRHS( m.group( 4 ),\n linecount + countNewlines( drl,\n drlPos,\n m.start( 4 ) ) + 1 );\n expanded.append( rhsFragment );\n expanded.append( m.group( 5 ) ); // adding rule trailer\n\n } else if ( constr.startsWith( \"query\" ) ) {\n String fragment = m.group( 6 );\n expanded.append( fragment ); // adding query header and attributes\n String lhsFragment = this.expandLHS( m.group( 7 ),\n linecount + countNewlines( drl,\n drlPos,\n m.start( 7 ) ) + 1 );\n expanded.append( lhsFragment );\n expanded.append( m.group( 8 ) ); // adding query trailer\n\n } else {\n // strange behavior\n this.addError( new ExpanderException( \"Unable to expand statement: \" + constr,\n 0 ) );\n }\n m.appendReplacement( buf,\n Matcher.quoteReplacement( expanded.toString() ) );\n }\n m.appendTail( buf );\n return buf;\n }", "def expand(symbol)\n expansion = rules[symbol] || context[symbol]\n\n if expansion.nil?\n if @options.strict?\n raise Errors::UndefinedRule.new(@last_expansion, symbol)\n else\n expansion = Production::Terminal.new('')\n end\n end\n\n @last_expansion = expansion\n expansion\n end", "public BooleanExpr expand(BooleanExpr booleanExpr) {\n if (booleanExpr == null || booleanExpr instanceof ConstantBooleanExpr) {\n return booleanExpr;\n }\n\n Collector collector = new Collector();\n booleanExpr.acceptVisitor(collector);\n\n if (!collector.foundIndexed) {\n return ConstantBooleanExpr.TRUE;\n }\n\n if (!collector.predicatesToRemove.isEmpty()) {\n int numCofactors = 1;\n for (PrimaryPredicateExpr e : collector.predicatesToRemove) {\n Replacer replacer1 = new Replacer(e, ConstantBooleanExpr.TRUE);\n BooleanExpr e1 = booleanExpr.acceptVisitor(replacer1);\n if (!replacer1.found) {\n continue;\n }\n if (e1 == ConstantBooleanExpr.TRUE) {\n return ConstantBooleanExpr.TRUE;\n }\n Replacer replacer2 = new Replacer(e, ConstantBooleanExpr.FALSE);\n BooleanExpr e2 = booleanExpr.acceptVisitor(replacer2);\n if (e2 == ConstantBooleanExpr.TRUE) {\n return ConstantBooleanExpr.TRUE;\n }\n if (e1 == ConstantBooleanExpr.FALSE) {\n booleanExpr = e2;\n } else if (e2 == ConstantBooleanExpr.FALSE) {\n booleanExpr = e1;\n } else {\n numCofactors *= 2;\n OrExpr disjunction;\n if (e1 instanceof OrExpr) {\n disjunction = (OrExpr) e1;\n if (e2 instanceof OrExpr) {\n disjunction.getChildren().addAll(((OrExpr) e2).getChildren());\n } else {\n disjunction.getChildren().add(e2);\n }\n } else if (e2 instanceof OrExpr) {\n disjunction = (OrExpr) e2;\n disjunction.getChildren().add(e1);\n } else {\n disjunction = new OrExpr(e1, e2);\n }\n PredicateOptimisations.optimizePredicates(disjunction.getChildren(), false);\n booleanExpr = disjunction;\n }\n if (numCofactors > maxExpansionCofactors) {\n // expansion is too big, it's better to do full scan rather than search the index with a huge and\n // complex query that is a disjunction of many predicates so will very likely match everything anyway\n return ConstantBooleanExpr.TRUE;\n }\n }\n }\n\n return booleanExpr;\n }", "private void expandAffixes(String pluralCount) {\n // expandAffix() will set currencyChoice to a non-null value if\n // appropriate AND if it is null.\n currencyChoice = null;\n\n // Reuse one StringBuffer for better performance\n StringBuffer buffer = new StringBuffer();\n if (posPrefixPattern != null) {\n expandAffix(posPrefixPattern, pluralCount, buffer);\n positivePrefix = buffer.toString();\n }\n if (posSuffixPattern != null) {\n expandAffix(posSuffixPattern, pluralCount, buffer);\n positiveSuffix = buffer.toString();\n }\n if (negPrefixPattern != null) {\n expandAffix(negPrefixPattern, pluralCount, buffer);\n negativePrefix = buffer.toString();\n }\n if (negSuffixPattern != null) {\n expandAffix(negSuffixPattern, pluralCount, buffer);\n negativeSuffix = buffer.toString();\n }\n }", "def grammatical_construct(grammar, what=nil)\n output = \"\"\n if what.nil?\n case grammar\n when Hash\n a_key = grammar.keys.sort_by{rand}[0]\n output += grammatical_construct(grammar, a_key)\n when Array\n grammar.each do |item|\n output += grammatical_construct(item)\n end\n when String\n output += grammar\n end\n else\n rhs = grammar[what]\n case rhs\n when Array\n rhs.each do |item|\n case item\n when Symbol\n output += grammatical_construct(grammar,item)\n when String\n output += item\n when Hash\n output += grammatical_construct(item)\n else\n raise \"#{item.inspect} must be a symbol or string or Hash\"\n end\n end\n when Hash\n output+= grammatical_construct(rhs)\n when Symbol\n output += grammatical_construct(rhs)\n when String\n output += rhs\n else\n raise \"#{rhs.inspect} must be a symbol, string, Array or Hash\"\n end\n end\n return output\n end", "function(abbr, syntax, profile, contextNode) {\n\t\t\treturn parser.expand(abbr, {\n\t\t\t\tsyntax: syntax,\n\t\t\t\tprofile: profile,\n\t\t\t\tcontextNode: contextNode\n\t\t\t});\n\t\t}" ]
[ 0.7648979425430298, 0.7185328602790833, 0.7133020758628845, 0.712088942527771, 0.7094528675079346, 0.7062499523162842, 0.7050736546516418, 0.7036584615707397, 0.703376293182373, 0.6962770223617554, 0.6927876472473145, 0.691910982131958 ]
Creates gutter clients and memoizes them in a registry for future quick access. Args: alias (str or None): Name of the client. Used for caching. If name is falsy then do not use the cache. cache (dict): cache to store gutter managers in. **kwargs: kwargs to be passed the Manger class. Returns (Manager): A gutter client.
def get_gutter_client( alias='default', cache=CLIENT_CACHE, **kwargs ): """ Creates gutter clients and memoizes them in a registry for future quick access. Args: alias (str or None): Name of the client. Used for caching. If name is falsy then do not use the cache. cache (dict): cache to store gutter managers in. **kwargs: kwargs to be passed the Manger class. Returns (Manager): A gutter client. """ from gutter.client.models import Manager if not alias: return Manager(**kwargs) elif alias not in cache: cache[alias] = Manager(**kwargs) return cache[alias]
[ "def create(self, alias=None, cache=None, **kwargs):\n \"\"\"\n Create a new cache. Either alias or cache params are required. You can use\n kwargs to pass extra parameters to configure the cache.\n\n .. deprecated:: 0.11.0\n Only creating a cache passing an alias is supported. If you want to\n create a cache passing explicit cache and kwargs use ``aiocache.Cache``.\n\n :param alias: str alias to pull configuration from\n :param cache: str or class cache class to use for creating the\n new cache (when no alias is used)\n :return: New cache instance\n \"\"\"\n if alias:\n config = self.get_alias_config(alias)\n elif cache:\n warnings.warn(\n \"Creating a cache with an explicit config is deprecated, use 'aiocache.Cache'\",\n DeprecationWarning,\n )\n config = {\"cache\": cache}\n else:\n raise TypeError(\"create call needs to receive an alias or a cache\")\n cache = _create_cache(**{**config, **kwargs})\n return cache", "def get(self, alias: str):\n \"\"\"\n Retrieve cache identified by alias. Will return always the same instance\n\n If the cache was not instantiated yet, it will do it lazily the first time\n this is called.\n\n :param alias: str cache alias\n :return: cache instance\n \"\"\"\n try:\n return self._caches[alias]\n except KeyError:\n pass\n\n config = self.get_alias_config(alias)\n cache = _create_cache(**deepcopy(config))\n self._caches[alias] = cache\n return cache", "def create(cls, **kwargs):\n \"\"\"Build and return a `ScatterGather` object \"\"\"\n linkname = kwargs.setdefault('linkname', cls.clientclass.linkname_default)\n # Don't use setdefault b/c we don't want to build a JobArchive\n # Unless it is needed\n job_archive = kwargs.get('job_archive', None)\n if job_archive is None:\n job_archive = JobArchive.build_temp_job_archive()\n kwargs.setdefault('job_archive', job_archive)\n kwargs_client = dict(linkname=linkname,\n link_prefix=kwargs.get('link_prefix', ''),\n file_stage=kwargs.get('file_stage', None),\n job_archive=job_archive)\n link = cls.clientclass.create(**kwargs_client)\n sg = cls(link, **kwargs)\n return sg", "def get_client(self, cls, purge_cache=False, *args, **kwds):\n \"\"\"\n This is a general method for getting a client: if present, it is pulled\n from the cache; if not, a new one is instantiated and then put into the\n cache. This method should not be called directly, but rather by other\n client-specific methods (e.g., get_ec2_client).\n \"\"\"\n key = str(cls) + str(args) + str(kwds)\n instance = self._clients.get(key)\n if purge_cache or not instance:\n instance = cls(*args, **kwds)\n self._clients[key] = instance\n return instance", "private <K, V> CacheConfiguration<K, V> adjustConfigurationWithCacheManagerDefaults(String alias, CacheConfiguration<K, V> config) {\n ClassLoader cacheClassLoader = config.getClassLoader();\n\n List<ServiceConfiguration<?>> configurationList = new ArrayList<>();\n configurationList.addAll(config.getServiceConfigurations());\n\n CacheLoaderWriterConfiguration loaderWriterConfiguration = findSingletonAmongst(CacheLoaderWriterConfiguration.class, config.getServiceConfigurations());\n if (loaderWriterConfiguration == null) {\n CacheLoaderWriterProvider loaderWriterProvider = serviceLocator.getService(CacheLoaderWriterProvider.class);\n ServiceConfiguration<CacheLoaderWriterProvider> preConfiguredCacheLoaderWriterConfig = loaderWriterProvider.getPreConfiguredCacheLoaderWriterConfig(alias);\n if (preConfiguredCacheLoaderWriterConfig != null) {\n configurationList.add(preConfiguredCacheLoaderWriterConfig);\n }\n if (loaderWriterProvider.isLoaderJsrProvided(alias)) {\n configurationList.add(new CacheLoaderWriterConfiguration() {\n });\n }\n }\n\n ServiceConfiguration<?>[] serviceConfigurations = new ServiceConfiguration<?>[configurationList.size()];\n configurationList.toArray(serviceConfigurations);\n\n if (cacheClassLoader == null) {\n cacheClassLoader = cacheManagerClassLoader;\n }\n if (cacheClassLoader != config.getClassLoader() ) {\n config = new BaseCacheConfiguration<>(config.getKeyType(), config.getValueType(),\n config.getEvictionAdvisor(), cacheClassLoader, config.getExpiryPolicy(),\n config.getResourcePools(), serviceConfigurations);\n } else {\n config = new BaseCacheConfiguration<>(config.getKeyType(), config.getValueType(),\n config.getEvictionAdvisor(), config.getClassLoader(), config.getExpiryPolicy(),\n config.getResourcePools(), serviceConfigurations);\n }\n return config;\n }", "public <K, V> CacheManagerBuilder<T> withCache(String alias, CacheConfiguration<K, V> configuration) {\n return new CacheManagerBuilder<>(this, configBuilder.addCache(alias, configuration));\n }", "def set_alias(self, alias, *digests):\n # pylint: disable=too-many-locals\n \"\"\"\n Give a name (alias) to a set of blobs. Each blob is specified by\n the hash of its content.\n\n :param alias: Alias name\n :type alias: str\n\n :param digests: List of blob hashes (prefixed by ``sha256:``).\n :type digests: list of strings\n\n :rtype: str\n :returns: The registry manifest used to define the alias. You almost definitely won't need this.\n \"\"\"\n try:\n manifest_json = self.make_manifest(*digests)\n self.set_manifest(alias, manifest_json)\n return manifest_json\n except requests.exceptions.HTTPError as ex:\n # pylint: disable=no-member\n if ex.response.status_code != requests.codes.bad_request:\n raise\n manifest_json = self.make_unsigned_manifest(alias, *digests)\n signed_json = _sign_manifest(manifest_json)\n self._request('put', 'manifests/' + alias, data=signed_json)\n return signed_json", "def client_for(self, config_path, quiet=False, bootstrap_server=False,\n create_client=False):\n \"\"\"Get a cached client for a project, otherwise create one.\"\"\"\n client = None\n abs_path = os.path.abspath(config_path)\n if abs_path in self.clients:\n client = self.clients[abs_path]\n elif create_client:\n client = self.create_client(config_path)\n if client.setup(quiet=quiet, bootstrap_server=bootstrap_server):\n self.clients[abs_path] = client\n return client", "def cache(self, name, cache_class=Cache,\n identity_generator_class=IdentityGenerator,\n compressor_class=Compressor,\n serializer_class=Serializer, *args, **kwargs):\n \"\"\"\n Return a cache object using default identity generator,\n serializer and compressor.\n\n ``name`` is used to identify the series of your cache\n ``cache_class`` Cache is for normal use and HerdCache\n is used in case of Thundering Herd Problem\n ``identity_generator_class`` is the class used to generate\n the real unique key in cache, can be overwritten to\n meet your special needs. It should provide `generate` API\n ``compressor_class`` is the class used to compress cache in redis,\n can be overwritten with API `compress` and `decompress` retained.\n ``serializer_class`` is the class used to serialize\n content before compress, can be overwritten with API\n `serialize` and `deserialize` retained.\n \"\"\"\n return cache_class(self, app=name,\n identity_generator_class=identity_generator_class,\n compressor_class=compressor_class,\n serializer_class=serializer_class,\n *args, **kwargs)", "def create_cache(self, **kwargs):\n \"\"\"\n Creates an instance of the Cache Service.\n \"\"\"\n cache = predix.admin.cache.Cache(**kwargs)\n cache.create(**kwargs)\n cache.add_to_manifest(self)\n return cache", "def get_autoconfig_client(client_cache=_AUTOCONFIG_CLIENT):\n \"\"\"\n Creates the client as specified in the `luigi.cfg` configuration.\n \"\"\"\n try:\n return client_cache.client\n except AttributeError:\n configured_client = hdfs_config.get_configured_hdfs_client()\n if configured_client == \"webhdfs\":\n client_cache.client = hdfs_webhdfs_client.WebHdfsClient()\n elif configured_client == \"snakebite\":\n client_cache.client = hdfs_snakebite_client.SnakebiteHdfsClient()\n elif configured_client == \"snakebite_with_hadoopcli_fallback\":\n client_cache.client = luigi.contrib.target.CascadingClient([\n hdfs_snakebite_client.SnakebiteHdfsClient(),\n hdfs_hadoopcli_clients.create_hadoopcli_client(),\n ])\n elif configured_client == \"hadoopcli\":\n client_cache.client = hdfs_hadoopcli_clients.create_hadoopcli_client()\n else:\n raise Exception(\"Unknown hdfs client \" + configured_client)\n return client_cache.client", "def create_client_with_manual_poll(api_key, config_cache_class=None,\n base_url=None):\n \"\"\"\n Create an instance of ConfigCatClient and setup Manual Poll mode with custom options\n\n :param api_key: ConfigCat ApiKey to access your configuration.\n :param config_cache_class: If you want to use custom caching instead of the client's default InMemoryConfigCache,\n You can provide an implementation of ConfigCache.\n :param base_url: You can set a base_url if you want to use a proxy server between your application and ConfigCat\n \"\"\"\n\n if api_key is None:\n raise ConfigCatClientException('API Key is required.')\n\n return ConfigCatClient(api_key, 0, 0, None, 0, config_cache_class, base_url)" ]
[ 0.6883594989776611, 0.6749633550643921, 0.6676217317581177, 0.6557245850563049, 0.6497238278388977, 0.6472004055976868, 0.646845281124115, 0.6415619254112244, 0.6407591700553894, 0.6398371458053589, 0.6362635493278503, 0.6346388459205627 ]
The mod operator is prone to floating point errors, so use decimal. 101.1 % 100 >>> 1.0999999999999943 decimal_context.divmod(Decimal('100.1'), 100) >>> (Decimal('1'), Decimal('0.1'))
def _modulo(self, decimal_argument): """ The mod operator is prone to floating point errors, so use decimal. 101.1 % 100 >>> 1.0999999999999943 decimal_context.divmod(Decimal('100.1'), 100) >>> (Decimal('1'), Decimal('0.1')) """ _times, remainder = self._context.divmod(decimal_argument, 100) # match the builtin % behavior by adding the N to the result if negative return remainder if remainder >= 0 else remainder + 100
[ "def div(a,b):\n \"\"\"``div(a,b)`` is like ``a // b`` if ``b`` devides ``a``, otherwise\n an `ValueError` is raised.\n\n >>> div(10,2)\n 5\n >>> div(10,3)\n Traceback (most recent call last):\n ...\n ValueError: 3 does not divide 10\n \"\"\"\n res, fail = divmod(a,b)\n if fail:\n raise ValueError(\"%r does not divide %r\" % (b,a))\n else:\n return res", "def mod(value, mod=1):\n \"\"\"\n RETURN NON-NEGATIVE MODULO\n RETURN None WHEN GIVEN INVALID ARGUMENTS\n \"\"\"\n if value == None:\n return None\n elif mod <= 0:\n return None\n elif value < 0:\n return (value % mod + mod) % mod\n else:\n return value % mod", "def _mod(value, mod=1):\n \"\"\"\n RETURN NON-NEGATIVE MODULO\n RETURN None WHEN GIVEN INVALID ARGUMENTS\n \"\"\"\n if value == None:\n return None\n elif mod <= 0:\n return None\n elif value < 0:\n return (value % mod + mod) % mod\n else:\n return value % mod", "def mod(ctx, number, divisor):\n \"\"\"\n Returns the remainder after number is divided by divisor\n \"\"\"\n number = conversions.to_decimal(number, ctx)\n divisor = conversions.to_decimal(divisor, ctx)\n return number - divisor * _int(ctx, number / divisor)", "public static BigDecimal mod(EvaluationContext ctx, Object number, Object divisor) {\n BigDecimal _number = Conversions.toDecimal(number, ctx);\n BigDecimal _divisor = Conversions.toDecimal(divisor, ctx);\n return _number.subtract(_divisor.multiply(new BigDecimal(_int(ctx, _number.divide(_divisor, 10, RoundingMode.HALF_UP)))));\n }", "def redivmod(initial_value, factors):\n \"\"\"\n Chop up C{initial_value} according to the list of C{factors} and return a\n formatted string.\n \"\"\"\n result = []\n value = initial_value\n for divisor, label in factors:\n if not divisor:\n remainder = value\n if not remainder:\n break\n else:\n value, remainder = divmod(value, divisor)\n if not value and not remainder:\n break\n if remainder == 1:\n # depluralize\n label = label[:-1]\n if six.PY2:\n addition = unicode(remainder) + ' ' + unicode(label)\n else:\n addition = str(remainder) + ' ' + str(label)\n result.insert(0, addition)\n if len(result) > 1:\n result[-1] = \"and \" + result[-1]\n if result:\n return ', '.join(result)\n else:\n return \"instantly\"", "def divmod(x, y, context=None):\n \"\"\"\n Return the pair (floordiv(x, y, context), mod(x, y, context)).\n\n Semantics for negative inputs match those of Python's divmod function.\n\n \"\"\"\n return floordiv(x, y, context=context), mod(x, y, context=context)", "def divmod(other)\n if Quantity===other\n @value.divmod( @unit.convert(other) )\n else\n d,m = @value.divmod(other)\n [ self.class.new( d, @expr, @unit ),\n self.class.new( m, @expr, @unit ) ]\n end\n end", "Expr mulDivMod() {\r\n\t\tExpr expr = nullSafe();\r\n\t\tfor (Tok tok=peek(); tok.sym==Sym.MUL || tok.sym==Sym.DIV || tok.sym==Sym.MOD; tok=peek()) {\r\n\t\t\tmove();\r\n\t\t\texpr = new Arith(tok.sym, expr, nullSafe(), location);\r\n\t\t}\r\n\t\treturn expr;\r\n\t}", "def divmod(other)\n raise ArgumentError, \"Incompatible Units ('#{self}' not compatible with '#{other}')\" unless self =~ other\n return scalar.divmod(other.scalar) if units == other.units\n to_base.scalar.divmod(other.to_base.scalar)\n end", "def fmod(x, y, context=None):\n \"\"\"\n Return ``x`` reduced modulo ``y``.\n\n Returns the value of x - n * y, where n is the integer quotient of x\n divided by y, rounded toward zero.\n\n Special values are handled as described in Section F.9.7.1 of the ISO C99\n standard: If x is infinite or y is zero, the result is NaN. If y is\n infinite and x is finite, the result is x rounded to the current context.\n If the result is zero, it has the sign of x.\n\n \"\"\"\n return _apply_function_in_current_context(\n BigFloat,\n mpfr.mpfr_fmod,\n (\n BigFloat._implicit_convert(x),\n BigFloat._implicit_convert(y),\n ),\n context,\n )", "def _mod(field, value, document):\n \"\"\"\n Performs a mod on a document field. Value must be a list or tuple with\n two values divisor and remainder (i.e. [2, 0]). This will essentially\n perform the following:\n\n document[field] % divisor == remainder\n\n If the value does not contain integers or is not a two-item list/tuple,\n a MalformedQueryException will be raised. If the value of document[field]\n cannot be converted to an integer, this will return False.\n \"\"\"\n try:\n divisor, remainder = map(int, value)\n except (TypeError, ValueError):\n raise MalformedQueryException(\"'$mod' must accept an iterable: [divisor, remainder]\")\n\n try:\n return int(document.get(field, None)) % divisor == remainder\n except (TypeError, ValueError):\n return False" ]
[ 0.7502017021179199, 0.7433937788009644, 0.7366652488708496, 0.7245782613754272, 0.7240141034126282, 0.716590404510498, 0.7109190821647644, 0.7009940147399902, 0.6962355971336365, 0.6936264038085938, 0.683988094329834, 0.681254506111145 ]
Checks to see if this switch is enabled for the provided input. If ``compounded``, all switch conditions must be ``True`` for the switch to be enabled. Otherwise, *any* condition needs to be ``True`` for the switch to be enabled. The switch state is then checked to see if it is ``GLOBAL`` or ``DISABLED``. If it is not, then the switch is ``SELECTIVE`` and each condition is checked. Keyword Arguments: inpt -- An instance of the ``Input`` class.
def enabled_for(self, inpt): """ Checks to see if this switch is enabled for the provided input. If ``compounded``, all switch conditions must be ``True`` for the switch to be enabled. Otherwise, *any* condition needs to be ``True`` for the switch to be enabled. The switch state is then checked to see if it is ``GLOBAL`` or ``DISABLED``. If it is not, then the switch is ``SELECTIVE`` and each condition is checked. Keyword Arguments: inpt -- An instance of the ``Input`` class. """ signals.switch_checked.call(self) signal_decorated = partial(self.__signal_and_return, inpt) if self.state is self.states.GLOBAL: return signal_decorated(True) elif self.state is self.states.DISABLED: return signal_decorated(False) conditions_dict = ConditionsDict.from_conditions_list(self.conditions) conditions = conditions_dict.get_by_input(inpt) if conditions: result = self.__enabled_func( cond.call(inpt) for cond in conditions if cond.argument(inpt).applies ) else: result = None return signal_decorated(result)
[ "def call(self, inpt):\n \"\"\"\n Returns if the condition applies to the ``inpt``.\n\n If the class ``inpt`` is an instance of is not the same class as the\n condition's own ``argument``, then ``False`` is returned. This also\n applies to the ``NONE`` input.\n\n Otherwise, ``argument`` is called, with ``inpt`` as the instance and\n the value is compared to the ``operator`` and the Value is returned. If\n the condition is ``negative``, then then ``not`` the value is returned.\n\n Keyword Arguments:\n inpt -- An instance of the ``Input`` class.\n \"\"\"\n if inpt is Manager.NONE_INPUT:\n return False\n\n # Call (construct) the argument with the input object\n argument_instance = self.argument(inpt)\n\n if not argument_instance.applies:\n return False\n\n application = self.__apply(argument_instance, inpt)\n\n if self.negative:\n application = not application\n\n return application", "def consume_input(self, inp):\n \"\"\"\n Return True/False if the machine accepts/reject the input.\n Args:\n inp (str): input string to be consumed\n Returns:\n bool: A true or false value depending on if the DFA\n accepts the provided input\n \"\"\"\n cur_state = sorted(\n self.states,\n key=attrgetter('initial'),\n reverse=True)[0]\n while len(inp) > 0:\n found = False\n for arc in cur_state.arcs:\n if self.isyms.find(arc.ilabel) == inp[0]:\n cur_state = self[arc.nextstate]\n inp = inp[1:]\n found = True\n break\n if not found:\n return False\n return cur_state.final != TropicalWeight(float('inf'))", "def is_active(self, key, *instances, **kwargs):\n \"\"\"\n Returns ``True`` if any of ``instances`` match an active switch.\n Otherwise returns ``False``.\n\n >>> operator.is_active('my_feature', request) #doctest: +SKIP\n \"\"\"\n try:\n default = kwargs.pop('default', False)\n\n # Check all parents for a disabled state\n parts = key.split(':')\n if len(parts) > 1:\n child_kwargs = kwargs.copy()\n child_kwargs['default'] = None\n result = self.is_active(':'.join(parts[:-1]), *instances,\n **child_kwargs)\n\n if result is False:\n return result\n elif result is True:\n default = result\n\n try:\n switch = self[key]\n except KeyError:\n # switch is not defined, defer to parent\n return default\n\n if switch.status == GLOBAL:\n return True\n elif switch.status == DISABLED:\n return False\n elif switch.status == INHERIT:\n return default\n\n conditions = switch.value\n # If no conditions are set, we inherit from parents\n if not conditions:\n return default\n\n instances = list(instances) if instances else []\n instances.extend(self.context.values())\n\n # check each switch to see if it can execute\n return_value = False\n\n for namespace, condition in conditions.iteritems():\n condition_set = registry_by_namespace.get(namespace)\n if not condition_set:\n continue\n result = condition_set.has_active_condition(condition,\n instances)\n if result is False:\n return False\n elif result is True:\n return_value = True\n except:\n log.exception('Error checking if switch \"%s\" is active', key)\n return_value = False\n\n # there were no matching conditions, so it must not be enabled\n return return_value", "def is_active(self, key, *instances, **kwargs):\n \"\"\"\n Returns ``True`` if any of ``instances`` match an active switch. Otherwise\n returns ``False``.\n\n >>> gargoyle.is_active('my_feature', request) #doctest: +SKIP\n \"\"\"\n default = kwargs.pop('default', False)\n\n # Check all parents for a disabled state\n parts = key.split(':')\n if len(parts) > 1:\n child_kwargs = kwargs.copy()\n child_kwargs['default'] = None\n result = self.is_active(':'.join(parts[:-1]), *instances, **child_kwargs)\n\n if result is False:\n return result\n elif result is True:\n default = result\n\n try:\n switch = self[key]\n except KeyError:\n # switch is not defined, defer to parent\n return default\n\n if switch.status == GLOBAL:\n return True\n elif switch.status == DISABLED:\n return False\n elif switch.status == INHERIT:\n return default\n\n conditions = switch.value\n # If no conditions are set, we inherit from parents\n if not conditions:\n return default\n\n if instances:\n # HACK: support request.user by swapping in User instance\n instances = list(instances)\n for v in instances:\n if isinstance(v, HttpRequest) and hasattr(v, 'user'):\n instances.append(v.user)\n\n # check each switch to see if it can execute\n return_value = False\n\n for switch in self._registry.itervalues():\n result = switch.has_active_condition(conditions, instances)\n if result is False:\n return False\n elif result is True:\n return_value = True\n\n # there were no matching conditions, so it must not be enabled\n return return_value", "def p_InSwitchDefList(p):\n '''\n InSwitchDefList : InSwitchDef\n | InSwitchDefList InSwitchDef\n '''\n if len(p) <= 2:\n p[0] = InSwitchDefList(None, p[1])\n else:\n p[0] = InSwitchDefList(p[1], p[2])", "function( inst ) {\n\t\treturn inst.input && inst.input.is( \":visible\" ) && !inst.input.is( \":disabled\" ) && !inst.input.is( \":focus\" );\n\t}", "def consume_input(self, inp):\n \"\"\"\n Return True/False if the machine accepts/reject the input.\n Args:\n inp (str): input string to be consumed\n Retunrs:\n bool: A true or false value depending on if the DFA\n accepts the provided input\n \"\"\"\n cur_state = self.states[0]\n for character in inp:\n found = False\n for arc in cur_state.arcs:\n if arc.guard.is_sat(character):\n cur_state = self.states[arc.dst_state]\n found = True\n break\n\n if not found:\n raise RuntimeError('SFA not complete')\n\n return cur_state.final", "def _is_in_set(self, inpt, metadata):\n \"\"\"checks if the input is in the metadata's *_set list\"\"\"\n # makes an assumption there is only one _set in the metadata dict\n get_set_methods = [m for m in dir(metadata) if 'get_' in m and '_set' in m]\n set_results = None\n for m in get_set_methods:\n try:\n set_results = getattr(metadata, m)()\n break\n except errors.IllegalState:\n pass\n if set_results is not None and inpt in set_results:\n return True\n return False", "def validate_enabled(enabled):\n '''\n Helper function to validate the enabled parameter. Boolean values are\n converted to \"on\" and \"off\". String values are checked to make sure they are\n either \"on\" or \"off\"/\"yes\" or \"no\". Integer ``0`` will return \"off\". All\n other integers will return \"on\"\n\n :param enabled: Enabled can be boolean True or False, Integers, or string\n values \"on\" and \"off\"/\"yes\" and \"no\".\n :type: str, int, bool\n\n :return: \"on\" or \"off\" or errors\n :rtype: str\n '''\n if isinstance(enabled, six.string_types):\n if enabled.lower() not in ['on', 'off', 'yes', 'no']:\n msg = '\\nMac Power: Invalid String Value for Enabled.\\n' \\\n 'String values must be \\'on\\' or \\'off\\'/\\'yes\\' or \\'no\\'.\\n' \\\n 'Passed: {0}'.format(enabled)\n raise SaltInvocationError(msg)\n\n return 'on' if enabled.lower() in ['on', 'yes'] else 'off'\n\n return 'on' if bool(enabled) else 'off'", "def _is_valid_integer(self, inpt, metadata):\n \"\"\"Checks if input is a valid integer value\"\"\"\n if not isinstance(inpt, int):\n return False\n if metadata.get_minimum_integer() and inpt < metadata.get_maximum_integer():\n return False\n if metadata.get_maximum_integer() and inpt > metadata.get_minimum_integer():\n return False\n if metadata.get_integer_set() and inpt not in metadata.get_integer_set():\n return False\n else:\n return True", "def setActiveState(self, active):\n \"\"\" Use this to enable or disable (grey out) a parameter. \"\"\"\n st = DISABLED\n if active: st = NORMAL\n self.entry.configure(state=st)\n self.inputLabel.configure(state=st)\n self.promptLabel.configure(state=st)", "def condition(i):\n \"\"\" Returns the flag this instruction uses\n or None. E.g. 'c' for Carry, 'nz' for not-zero, etc.\n That is the condition required for this instruction\n to execute. For example: ADC A, 0 does NOT have a\n condition flag (it always execute) whilst RETC does.\n \"\"\"\n I = inst(i)\n\n if I not in {'call', 'jp', 'jr', 'ret'}:\n return None # This instruction always execute\n\n if I == 'ret':\n i = [x.lower() for x in i.split(' ') if x != '']\n return i[1] if len(i) > 1 else None\n\n i = [x.strip() for x in i.split(',')]\n i = [x.lower() for x in i[0].split(' ') if x != '']\n if len(i) > 1 and i[1] in {'c', 'nc', 'z', 'nz', 'po', 'pe', 'p', 'm'}:\n return i[1]\n\n return None" ]
[ 0.6927810311317444, 0.6696218252182007, 0.666363000869751, 0.6596062779426575, 0.6584011316299438, 0.6560157537460327, 0.6553595066070557, 0.64560866355896, 0.6452962756156921, 0.6417357921600342, 0.6405673623085022, 0.6382938623428345 ]
Returns if the condition applies to the ``inpt``. If the class ``inpt`` is an instance of is not the same class as the condition's own ``argument``, then ``False`` is returned. This also applies to the ``NONE`` input. Otherwise, ``argument`` is called, with ``inpt`` as the instance and the value is compared to the ``operator`` and the Value is returned. If the condition is ``negative``, then then ``not`` the value is returned. Keyword Arguments: inpt -- An instance of the ``Input`` class.
def call(self, inpt): """ Returns if the condition applies to the ``inpt``. If the class ``inpt`` is an instance of is not the same class as the condition's own ``argument``, then ``False`` is returned. This also applies to the ``NONE`` input. Otherwise, ``argument`` is called, with ``inpt`` as the instance and the value is compared to the ``operator`` and the Value is returned. If the condition is ``negative``, then then ``not`` the value is returned. Keyword Arguments: inpt -- An instance of the ``Input`` class. """ if inpt is Manager.NONE_INPUT: return False # Call (construct) the argument with the input object argument_instance = self.argument(inpt) if not argument_instance.applies: return False application = self.__apply(argument_instance, inpt) if self.negative: application = not application return application
[ "def enabled_for(self, inpt):\n \"\"\"\n Checks to see if this switch is enabled for the provided input.\n\n If ``compounded``, all switch conditions must be ``True`` for the switch\n to be enabled. Otherwise, *any* condition needs to be ``True`` for the\n switch to be enabled.\n\n The switch state is then checked to see if it is ``GLOBAL`` or\n ``DISABLED``. If it is not, then the switch is ``SELECTIVE`` and each\n condition is checked.\n\n Keyword Arguments:\n inpt -- An instance of the ``Input`` class.\n \"\"\"\n\n signals.switch_checked.call(self)\n signal_decorated = partial(self.__signal_and_return, inpt)\n\n if self.state is self.states.GLOBAL:\n return signal_decorated(True)\n elif self.state is self.states.DISABLED:\n return signal_decorated(False)\n\n conditions_dict = ConditionsDict.from_conditions_list(self.conditions)\n conditions = conditions_dict.get_by_input(inpt)\n\n if conditions:\n result = self.__enabled_func(\n cond.call(inpt)\n for cond\n in conditions\n if cond.argument(inpt).applies\n )\n else:\n result = None\n\n return signal_decorated(result)", "def _not(condition=None, **kwargs):\n \"\"\"\n Return the opposite of input condition.\n\n :param condition: condition to process.\n\n :result: not condition.\n :rtype: bool\n \"\"\"\n\n result = True\n\n if condition is not None:\n result = not run(condition, **kwargs)\n\n return result", "def condition(i):\n \"\"\" Returns the flag this instruction uses\n or None. E.g. 'c' for Carry, 'nz' for not-zero, etc.\n That is the condition required for this instruction\n to execute. For example: ADC A, 0 does NOT have a\n condition flag (it always execute) whilst RETC does.\n \"\"\"\n I = inst(i)\n\n if I not in {'call', 'jp', 'jr', 'ret'}:\n return None # This instruction always execute\n\n if I == 'ret':\n i = [x.lower() for x in i.split(' ') if x != '']\n return i[1] if len(i) > 1 else None\n\n i = [x.strip() for x in i.split(',')]\n i = [x.lower() for x in i[0].split(' ') if x != '']\n if len(i) > 1 and i[1] in {'c', 'nc', 'z', 'nz', 'po', 'pe', 'p', 'm'}:\n return i[1]\n\n return None", "def _is_valid_integer(self, inpt, metadata):\n \"\"\"Checks if input is a valid integer value\"\"\"\n if not isinstance(inpt, int):\n return False\n if metadata.get_minimum_integer() and inpt < metadata.get_maximum_integer():\n return False\n if metadata.get_maximum_integer() and inpt > metadata.get_minimum_integer():\n return False\n if metadata.get_integer_set() and inpt not in metadata.get_integer_set():\n return False\n else:\n return True", "def _is_valid_cardinal(self, inpt, metadata):\n \"\"\"Checks if input is a valid cardinal value\"\"\"\n if not isinstance(inpt, int):\n return False\n if metadata.get_minimum_cardinal() and inpt < metadata.get_maximum_cardinal():\n return False\n if metadata.get_maximum_cardinal() and inpt > metadata.get_minimum_cardinal():\n return False\n if metadata.get_cardinal_set() and inpt not in metadata.get_cardinal_set():\n return False\n else:\n return True", "def is_instrinsic(input):\n \"\"\"\n Checks if the given input is an intrinsic function dictionary. Intrinsic function is a dictionary with single\n key that is the name of the intrinsics.\n\n :param input: Input value to check if it is an intrinsic\n :return: True, if yes\n \"\"\"\n\n if input is not None \\\n and isinstance(input, dict) \\\n and len(input) == 1:\n\n key = list(input.keys())[0]\n return key == \"Ref\" or key == \"Condition\" or key.startswith(\"Fn::\")\n\n return False", "def _is_in_set(self, inpt, metadata):\n \"\"\"checks if the input is in the metadata's *_set list\"\"\"\n # makes an assumption there is only one _set in the metadata dict\n get_set_methods = [m for m in dir(metadata) if 'get_' in m and '_set' in m]\n set_results = None\n for m in get_set_methods:\n try:\n set_results = getattr(metadata, m)()\n break\n except errors.IllegalState:\n pass\n if set_results is not None and inpt in set_results:\n return True\n return False", "def is_none_or(self):\n \"\"\"\n Ensures :attr:`subject` is either ``None``, or satisfies subsequent (chained) conditions::\n\n Ensure(None).is_none_or.is_an(int)\n \"\"\"\n if self._subject is None:\n return NoOpInspector(subject=self._subject, error_factory=self._error_factory)\n else:\n return self", "def _is_valid_type(self, inpt):\n \"\"\"Checks if input is a valid Type\"\"\"\n from dlkit.abstract_osid.type.primitives import Type as abc_type\n if isinstance(inpt, abc_type):\n return True\n else:\n return False", "def _is_valid_string(self, inpt, metadata):\n \"\"\"Checks if input is a valid string\"\"\"\n if not is_string(inpt):\n return False\n if metadata.get_minimum_string_length() and len(inpt) < metadata.get_minimum_string_length():\n return False\n elif metadata.get_maximum_string_length() and len(inpt) > metadata.get_maximum_string_length():\n return False\n if metadata.get_string_set() and inpt not in metadata.get_string_set():\n return False\n else:\n return True", "def _is_noop_insn(insn):\n \"\"\"\n Check if the instruction does nothing.\n\n :param insn: The capstone insn object.\n :return: True if the instruction does no-op, False otherwise.\n \"\"\"\n\n if insn.insn_name() == 'nop':\n # nops\n return True\n if insn.insn_name() == 'lea':\n # lea reg, [reg + 0]\n op0, op1 = insn.operands\n if op0.type == 1 and op1.type == 3:\n # reg and mem\n if op0.reg == op1.mem.base and op1.mem.index == 0 and op1.mem.disp == 0:\n return True\n\n # add more types of no-op instructions here :-)\n\n return False", "def applies(self, container, ctx):\n '''\n Subclasses should not override `applies`, but instead they should override `_applies`, which has the same syntax as `applies`.\n In the `_applies` method the condition is guaranteed to have a reference to the desired field, as `self._field`.\n\n :type container: :class:`~kitty.model.low_level.container.Container`\n :param container: the caller\n :param ctx: rendering context in which applies was called\n :return: True if condition applies, False otherwise\n '''\n self._get_ready(container)\n return self._applies(container, ctx)" ]
[ 0.6861664056777954, 0.6830622553825378, 0.6745240092277527, 0.6726325750350952, 0.6664140820503235, 0.6653026938438416, 0.6593443155288696, 0.658407986164093, 0.6563044786453247, 0.6530144810676575, 0.6528307795524597, 0.6505153775215149 ]
List of all switches currently registered.
def switches(self): """ List of all switches currently registered. """ results = [ switch for name, switch in self.storage.iteritems() if name.startswith(self.__joined_namespace) ] return results
[ "def do_list_logical_switch(self, line):\n \"\"\"list_logical_switch <peer>\n \"\"\"\n\n def f(p, args):\n o = p.get()\n for s in o.logical_switches.switch:\n print('%s %s' % (s.id, s.datapath_id))\n\n self._request(line, f)", "def switches(self):\n \"\"\"\n Gets the Switches API client.\n\n Returns:\n Switches:\n \"\"\"\n if not self.__switches:\n self.__switches = Switches(self.__connection)\n return self.__switches", "def get_all_switch_ips(self):\n \"\"\"Using reserved switch binding get all switch ips.\"\"\"\n\n switch_connections = []\n try:\n bindings = nxos_db.get_reserved_switch_binding()\n except excep.NexusPortBindingNotFound:\n LOG.error(\"No switch bindings in the port data base\")\n bindings = []\n for switch in bindings:\n switch_connections.append(switch.switch_ip)\n\n return switch_connections", "def _get_switches(self, profile):\n \"\"\"Get set of switches referenced in a port binding profile\"\"\"\n switchports = self._get_switchports(profile)\n switches = set([switchport[0] for switchport in switchports])\n return switches", "void printSwitches() {\n\n System.out.println(\"Used labels:\");\n\n for (int i = 0; i < vSwitches.size(); i++) {\n System.out.println((String) (vSwitches.elementAt(i)));\n }\n }", "def _get_host_switches(self, host_id):\n \"\"\"Get switch IPs from configured host mapping.\n\n This method is used to extract switch information\n from transactions where VNIC_TYPE is normal.\n Information is extracted from ini file which\n is stored in _nexus_switches.\n\n :param host_id: host_name from transaction\n :returns: list of all switches\n :returns: list of only switches which are active\n \"\"\"\n\n all_switches = set()\n active_switches = set()\n\n try:\n host_list = nxos_db.get_host_mappings(host_id)\n for mapping in host_list:\n all_switches.add(mapping.switch_ip)\n if self.is_switch_active(mapping.switch_ip):\n active_switches.add(mapping.switch_ip)\n except excep.NexusHostMappingNotFound:\n pass\n\n return list(all_switches), list(active_switches)", "function switches() {\n return fritz.getSwitchList().then(function(switches) {\n console.log(\"Switches: \" + switches + \"\\n\");\n\n return sequence(switches.map(function(sw) {\n return function() {\n return sequence([\n function() {\n return fritz.getSwitchName(sw).then(function(name) {\n console.log(\"[\" + sw + \"] \" + name);\n });\n },\n function() {\n return fritz.getSwitchPresence(sw).then(function(presence) {\n console.log(\"[\" + sw + \"] presence: \" + presence);\n });\n },\n function() {\n return fritz.getSwitchState(sw).then(function(state) {\n console.log(\"[\" + sw + \"] state: \" + state);\n });\n },\n function() {\n return fritz.getTemperature(sw).then(function(temp) {\n temp = isNaN(temp) ? '-' : temp + \"°C\";\n console.log(\"[\" + sw + \"] temp: \" + temp + \"\\n\");\n });\n }\n ]);\n };\n }));\n });\n}", "def switchport_list(self):\n \"\"\"list[dict]:A list of dictionary items describing the details\n of list of dictionary items describing the details of switch port\"\"\"\n urn = \"{urn:brocade.com:mgmt:brocade-interface-ext}\"\n result = []\n request_interface = self.get_interface_switchport_request()\n interface_result = self._callback(request_interface, 'get')\n for interface in interface_result.findall('%sswitchport' % urn):\n vlans = []\n interface_type = self.get_node_value(interface, '%sinterface-type',\n urn)\n interface_name = self.get_node_value(interface, '%sinterface-name',\n urn)\n mode = self.get_node_value(interface, '%smode', urn)\n intf = interface.find('%sactive-vlans' % urn)\n for vlan_node in intf.findall('%svlanid' % urn):\n vlan = vlan_node.text\n vlans.append(vlan)\n results = {'vlan-id': vlans,\n 'mode': mode,\n 'interface-name': interface_name,\n 'interface_type': interface_type}\n result.append(results)\n return result", "def _get_baremetal_switches(self, port):\n \"\"\"Get switch ip addresses from baremetal transaction.\n\n This method is used to extract switch information\n from the transaction where VNIC_TYPE is baremetal.\n\n :param port: Received port transaction\n :returns: list of all switches\n :returns: list of only switches which are active\n \"\"\"\n\n all_switches = set()\n active_switches = set()\n all_link_info = port[bc.portbindings.PROFILE]['local_link_information']\n for link_info in all_link_info:\n switch_info = self._get_baremetal_switch_info(link_info)\n if not switch_info:\n continue\n switch_ip = switch_info['switch_ip']\n\n # If not for Nexus\n if not self._switch_defined(switch_ip):\n continue\n\n all_switches.add(switch_ip)\n if self.is_switch_active(switch_ip):\n active_switches.add(switch_ip)\n\n return list(all_switches), list(active_switches)", "def get_switch_actors(self):\n \"\"\"\n Get information about all actors\n\n This needs 1+(5n) requests where n = number of actors registered\n\n Deprecated, use get_actors instead.\n\n Returns a dict:\n [ain] = {\n 'name': Name of actor,\n 'state': Powerstate (boolean)\n 'present': Connected to server? (boolean)\n 'power': Current power consumption in mW\n 'energy': Used energy in Wh since last energy reset\n 'temperature': Current environment temperature in celsius\n }\n \"\"\"\n actors = {}\n for ain in self.homeautoswitch(\"getswitchlist\").split(','):\n actors[ain] = {\n 'name': self.homeautoswitch(\"getswitchname\", ain),\n 'state': bool(self.homeautoswitch(\"getswitchstate\", ain)),\n 'present': bool(self.homeautoswitch(\"getswitchpresent\", ain)),\n 'power': self.homeautoswitch(\"getswitchpower\", ain),\n 'energy': self.homeautoswitch(\"getswitchenergy\", ain),\n 'temperature': self.homeautoswitch(\"getswitchtemperature\", ain),\n }\n return actors", "def logical_switches(self):\n \"\"\"\n Gets the LogicalSwitches API client.\n\n Returns:\n LogicalSwitches:\n \"\"\"\n if not self.__logical_switches:\n self.__logical_switches = LogicalSwitches(self.__connection)\n return self.__logical_switches", "def do_show_logical_switch(self, line):\n \"\"\"show_logical_switch <peer> <logical switch>\n \"\"\"\n\n def f(p, args):\n try:\n (lsw,) = args\n except:\n print(\"argument error\")\n return\n o = p.get()\n for s in o.logical_switches.switch:\n if s.id != lsw:\n continue\n print(s.id)\n print('datapath-id %s' % s.datapath_id)\n if s.resources.queue:\n print('queues:')\n for q in s.resources.queue:\n print('\\t %s' % q)\n if s.resources.port:\n print('ports:')\n for p in s.resources.port:\n print('\\t %s' % p)\n\n self._request(line, f)" ]
[ 0.7795083522796631, 0.7660427689552307, 0.7531906962394714, 0.7399041652679443, 0.739628255367279, 0.7348507642745972, 0.7344115376472473, 0.724181056022644, 0.7154394388198853, 0.7123521566390991, 0.7080769538879395, 0.7069939970970154 ]
Returns the switch with the provided ``name``. If ``autocreate`` is set to ``True`` and no switch with that name exists, a ``DISABLED`` switch will be with that name. Keyword Arguments: name -- A name of a switch.
def switch(self, name): """ Returns the switch with the provided ``name``. If ``autocreate`` is set to ``True`` and no switch with that name exists, a ``DISABLED`` switch will be with that name. Keyword Arguments: name -- A name of a switch. """ try: switch = self.storage[self.__namespaced(name)] except KeyError: if not self.autocreate: raise ValueError("No switch named '%s' registered in '%s'" % (name, self.namespace)) switch = self.__create_and_register_disabled_switch(name) switch.manager = self return switch
[ "def auto(name):\n '''\n .. versionadded:: 0.17.0\n\n Instruct alternatives to use the highest priority\n path for <name>\n\n name\n is the master name for this link group\n (e.g. pager)\n\n '''\n ret = {'name': name,\n 'result': True,\n 'comment': '',\n 'changes': {}}\n\n display = __salt__['alternatives.display'](name)\n line = display.splitlines()[0]\n if line.endswith(' auto mode'):\n ret['comment'] = '{0} already in auto mode'.format(name)\n return ret\n\n if __opts__['test']:\n ret['comment'] = '{0} will be put in auto mode'.format(name)\n ret['result'] = None\n return ret\n ret['changes']['result'] = __salt__['alternatives.auto'](name)\n return ret", "def addSwitch(self, name=None):\n '''\n Add a new switch to the topology.\n '''\n if name is None:\n while True:\n name = 's' + str(self.__snum)\n self.__snum += 1\n if name not in self.__nxgraph:\n break\n self.__addNode(name, Switch)\n return name", "def auto(name):\n '''\n Trigger alternatives to set the path for <name> as\n specified by priority.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' alternatives.auto name\n '''\n cmd = [_get_cmd(), '--auto', name]\n out = __salt__['cmd.run_all'](cmd, python_shell=False)\n if out['retcode'] > 0:\n return out['stderr']\n return out['stdout']", "def from_name(cls, name):\n \"\"\"Retrieve a sshkey id associated to a name.\"\"\"\n sshkeys = cls.list({'name': name})\n if len(sshkeys) == 1:\n return sshkeys[0]['id']\n elif not sshkeys:\n return\n\n raise DuplicateResults('sshkey name %s is ambiguous.' % name)", "def enable_auto_login(name, password):\n '''\n .. versionadded:: 2016.3.0\n\n Configures the machine to auto login with the specified user\n\n Args:\n\n name (str): The user account use for auto login\n\n password (str): The password to user for auto login\n\n .. versionadded:: 2017.7.3\n\n Returns:\n bool: ``True`` if successful, otherwise ``False``\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' user.enable_auto_login stevej\n '''\n # Make the entry into the defaults file\n cmd = ['defaults',\n 'write',\n '/Library/Preferences/com.apple.loginwindow.plist',\n 'autoLoginUser',\n name]\n __salt__['cmd.run'](cmd)\n current = get_auto_login()\n\n # Create/Update the kcpassword file with an obfuscated password\n o_password = _kcpassword(password=password)\n with salt.utils.files.set_umask(0o077):\n with salt.utils.files.fopen('/etc/kcpassword', 'w' if six.PY2 else 'wb') as fd:\n fd.write(o_password)\n\n return current if isinstance(current, bool) else current.lower() == name.lower()", "def switch(name, ip=None, netmask=None, gateway=None, dhcp=None,\n password=None, snmp=None):\n '''\n Manage switches in a Dell Chassis.\n\n name\n The switch designation (e.g. switch-1, switch-2)\n\n ip\n The Static IP Address of the switch\n\n netmask\n The netmask for the static IP\n\n gateway\n The gateway for the static IP\n\n dhcp\n True: Enable DHCP\n False: Do not change DHCP setup\n (disabling DHCP is automatic when a static IP is set)\n\n password\n The access (root) password for the switch\n\n snmp\n The SNMP community string for the switch\n\n Example:\n\n .. code-block:: yaml\n\n my-dell-chassis:\n dellchassis.switch:\n - switch: switch-1\n - ip: 192.168.1.1\n - netmask: 255.255.255.0\n - gateway: 192.168.1.254\n - dhcp: True\n - password: secret\n - snmp: public\n\n '''\n ret = {'name': name,\n 'result': True,\n 'changes': {},\n 'comment': ''}\n\n current_nic = __salt__['chassis.cmd']('network_info', module=name)\n try:\n if current_nic.get('retcode', 0) != 0:\n ret['result'] = False\n ret['comment'] = current_nic['stdout']\n return ret\n\n if ip or netmask or gateway:\n if not ip:\n ip = current_nic['Network']['IP Address']\n if not netmask:\n ip = current_nic['Network']['Subnet Mask']\n if not gateway:\n ip = current_nic['Network']['Gateway']\n\n if current_nic['Network']['DHCP Enabled'] == '0' and dhcp:\n ret['changes'].update({'DHCP': {'Old': {'DHCP Enabled': current_nic['Network']['DHCP Enabled']},\n 'New': {'DHCP Enabled': dhcp}}})\n\n if ((ip or netmask or gateway) and not dhcp and (ip != current_nic['Network']['IP Address'] or\n netmask != current_nic['Network']['Subnet Mask'] or\n gateway != current_nic['Network']['Gateway'])):\n ret['changes'].update({'IP': {'Old': current_nic['Network'],\n 'New': {'IP Address': ip,\n 'Subnet Mask': netmask,\n 'Gateway': gateway}}})\n\n if password:\n if 'New' not in ret['changes']:\n ret['changes']['New'] = {}\n ret['changes']['New'].update({'Password': '*****'})\n\n if snmp:\n if 'New' not in ret['changes']:\n ret['changes']['New'] = {}\n ret['changes']['New'].update({'SNMP': '*****'})\n\n if ret['changes'] == {}:\n ret['comment'] = 'Switch ' + name + ' is already in desired state'\n return ret\n except AttributeError:\n ret['changes'] = {}\n ret['comment'] = 'Something went wrong retrieving the switch details'\n return ret\n\n if __opts__['test']:\n ret['result'] = None\n ret['comment'] = 'Switch ' + name + ' configuration will change'\n return ret\n\n # Finally, set the necessary configurations on the chassis.\n dhcp_ret = net_ret = password_ret = snmp_ret = True\n if dhcp:\n dhcp_ret = __salt__['chassis.cmd']('set_niccfg', module=name, dhcp=dhcp)\n if ip or netmask or gateway:\n net_ret = __salt__['chassis.cmd']('set_niccfg', ip, netmask, gateway, module=name)\n if password:\n password_ret = __salt__['chassis.cmd']('deploy_password', 'root', password, module=name)\n\n if snmp:\n snmp_ret = __salt__['chassis.cmd']('deploy_snmp', snmp, module=name)\n\n if any([password_ret, snmp_ret, net_ret, dhcp_ret]) is False:\n ret['result'] = False\n ret['comment'] = 'There was an error setting the switch {0}.'.format(name)\n\n ret['comment'] = 'Dell chassis switch {0} was updated.'.format(name)\n return ret", "def present(name, parent=None, vlan=None):\n '''\n Ensures that the named bridge exists, eventually creates it.\n\n Args:\n name: The name of the bridge.\n parent: The name of the parent bridge (if the bridge shall be created\n as a fake bridge). If specified, vlan must also be specified.\n vlan: The VLAN ID of the bridge (if the bridge shall be created as a\n fake bridge). If specified, parent must also be specified.\n\n '''\n ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}\n\n # Comment and change messages\n comment_bridge_created = 'Bridge {0} created.'.format(name)\n comment_bridge_notcreated = 'Unable to create bridge: {0}.'.format(name)\n comment_bridge_exists = 'Bridge {0} already exists.'.format(name)\n comment_bridge_mismatch = ('Bridge {0} already exists, but has a different'\n ' parent or VLAN ID.').format(name)\n changes_bridge_created = {name: {'old': 'Bridge {0} does not exist.'.format(name),\n 'new': 'Bridge {0} created'.format(name),\n }\n }\n\n bridge_exists = __salt__['openvswitch.bridge_exists'](name)\n if bridge_exists:\n current_parent = __salt__['openvswitch.bridge_to_parent'](name)\n if current_parent == name:\n current_parent = None\n current_vlan = __salt__['openvswitch.bridge_to_vlan'](name)\n if current_vlan == 0:\n current_vlan = None\n\n # Dry run, test=true mode\n if __opts__['test']:\n if bridge_exists:\n if current_parent == parent and current_vlan == vlan:\n ret['result'] = True\n ret['comment'] = comment_bridge_exists\n else:\n ret['result'] = False\n ret['comment'] = comment_bridge_mismatch\n else:\n ret['result'] = None\n ret['comment'] = comment_bridge_created\n\n return ret\n\n if bridge_exists:\n if current_parent == parent and current_vlan == vlan:\n ret['result'] = True\n ret['comment'] = comment_bridge_exists\n else:\n ret['result'] = False\n ret['comment'] = comment_bridge_mismatch\n else:\n bridge_create = __salt__['openvswitch.bridge_create'](\n name, parent=parent, vlan=vlan)\n if bridge_create:\n ret['result'] = True\n ret['comment'] = comment_bridge_created\n ret['changes'] = changes_bridge_created\n else:\n ret['result'] = False\n ret['comment'] = comment_bridge_notcreated\n\n return ret", "def port_profile_vlan_profile_switchport_access_vlan_name(self, **kwargs):\n \"\"\"Auto Generated Code\n \"\"\"\n config = ET.Element(\"config\")\n port_profile = ET.SubElement(config, \"port-profile\", xmlns=\"urn:brocade.com:mgmt:brocade-port-profile\")\n name_key = ET.SubElement(port_profile, \"name\")\n name_key.text = kwargs.pop('name')\n vlan_profile = ET.SubElement(port_profile, \"vlan-profile\")\n switchport = ET.SubElement(vlan_profile, \"switchport\")\n access = ET.SubElement(switchport, \"access\")\n vlan = ET.SubElement(access, \"vlan\")\n name = ET.SubElement(vlan, \"name\")\n name.text = kwargs.pop('name')\n\n callback = kwargs.pop('callback', self._callback)\n return callback(config)", "def create_switch(type, settings, pin):\n\t\"\"\"Create a switch.\n\n Args:\n type: (str): type of the switch [A,B,C,D]\n settings (str): a comma separted list\n pin (int): wiringPi pin\n\n Returns:\n switch\n \"\"\"\n\n\tswitch = None\n\tif type == \"A\":\n\t\tgroup, device = settings.split(\",\")\n\t\tswitch = pi_switch.RCSwitchA(group, device)\n\n\telif type == \"B\":\n\t\taddr, channel = settings.split(\",\")\n\t\taddr = int(addr)\n\t\tchannel = int(channel)\n\t\tswitch = pi_switch.RCSwitchB(addr, channel)\n\n\telif type == \"C\":\n\t\tfamily, group, device = settings.split(\",\")\n\t\tgroup = int(group)\n\t\tdevice = int(device)\n\t\tswitch = pi_switch.RCSwitchC(family, group, device)\n\n\telif type == \"D\":\n\t\tgroup, device = settings.split(\",\")\n\t\tdevice = int(device)\n\t\tswitch = pi_switch.RCSwitchD(group, device)\n\n\telse:\n\t\tprint \"Type %s is not supported!\" % type\n\t\tsys.exit()\n\n\tswitch.enableTransmit(pin)\n\treturn switch", "def register(self, switch, signal=signals.switch_registered):\n '''\n Register a switch and persist it to the storage.\n '''\n if not switch.name:\n raise ValueError('Switch name cannot be blank')\n\n switch.manager = self\n self.__persist(switch)\n\n signal.call(switch)", "def getIndividualByName(self, name):\n \"\"\"\n Returns an individual with the specified name, or raises a\n IndividualNameNotFoundException if it does not exist.\n \"\"\"\n if name not in self._individualNameMap:\n raise exceptions.IndividualNameNotFoundException(name)\n return self._individualNameMap[name]", "def branch(self, name, values=None): # @UnusedVariable @IgnorePep8\n \"\"\"\n Checks whether the given switch matches the value provided\n\n Parameters\n ----------\n name : str\n The name of the parameter to retrieve\n value : str | None\n The value(s) of the switch to match if a non-boolean switch\n \"\"\"\n if isinstance(values, basestring):\n values = [values]\n spec = self.parameter_spec(name)\n if not isinstance(spec, SwitchSpec):\n raise ArcanaUsageError(\n \"{} is standard parameter not a switch\".format(spec))\n switch = self._get_parameter(name)\n if spec.is_boolean:\n if values is not None:\n raise ArcanaDesignError(\n \"Should not provide values ({}) to boolean switch \"\n \"'{}' in {}\".format(\n values, name, self._param_error_location))\n in_branch = switch.value\n else:\n if values is None:\n raise ArcanaDesignError(\n \"Value(s) need(s) to be provided non-boolean switch\"\n \" '{}' in {}\".format(\n name, self._param_error_location))\n # Register parameter as being used by the pipeline\n unrecognised_values = set(values) - set(spec.choices)\n if unrecognised_values:\n raise ArcanaDesignError(\n \"Provided value(s) ('{}') for switch '{}' in {} \"\n \"is not a valid option ('{}')\".format(\n \"', '\".join(unrecognised_values), name,\n self._param_error_location,\n \"', '\".join(spec.choices)))\n in_branch = switch.value in values\n return in_branch" ]
[ 0.7068734169006348, 0.7061296105384827, 0.7047081589698792, 0.6806841492652893, 0.6628941297531128, 0.6542777419090271, 0.6535179615020752, 0.6494287252426147, 0.649349570274353, 0.6475399732589722, 0.6444427371025085, 0.6438058018684387 ]
Register a switch and persist it to the storage.
def register(self, switch, signal=signals.switch_registered): ''' Register a switch and persist it to the storage. ''' if not switch.name: raise ValueError('Switch name cannot be blank') switch.manager = self self.__persist(switch) signal.call(switch)
[ "def switch(self, name):\n \"\"\"\n Returns the switch with the provided ``name``.\n\n If ``autocreate`` is set to ``True`` and no switch with that name\n exists, a ``DISABLED`` switch will be with that name.\n\n Keyword Arguments:\n name -- A name of a switch.\n \"\"\"\n try:\n switch = self.storage[self.__namespaced(name)]\n except KeyError:\n if not self.autocreate:\n raise ValueError(\"No switch named '%s' registered in '%s'\" % (name, self.namespace))\n\n switch = self.__create_and_register_disabled_switch(name)\n\n switch.manager = self\n return switch", "def switch_add_record(self, userid, interface, port=None,\n switch=None, comments=None):\n \"\"\"Add userid and nic name address into switch table.\"\"\"\n with get_network_conn() as conn:\n conn.execute(\"INSERT INTO switch VALUES (?, ?, ?, ?, ?)\",\n (userid, interface, switch, port, comments))\n LOG.debug(\"New record in the switch table: user %s, \"\n \"nic %s, port %s\" %\n (userid, interface, port))", "def add_reserved_switch_binding(switch_ip, state):\n \"\"\"Add a reserved switch binding.\"\"\"\n\n # overload port_id to contain switch state\n add_nexusport_binding(\n state,\n const.NO_VLAN_OR_VNI_ID,\n const.NO_VLAN_OR_VNI_ID,\n switch_ip,\n const.RESERVED_NEXUS_SWITCH_DEVICE_ID_R1)", "def write_switch(self, module_address, state, callback_fn):\r\n \"\"\"Set relay state.\"\"\"\r\n _LOGGER.info(\"write_switch: setstate,{},{}{}\"\r\n .format(module_address, str(state), chr(13)))\r\n self.subscribe(\"state,\" + module_address, callback_fn)\r\n self.send(\"setstate,{},{}{}\"\r\n .format(module_address, str(state), chr(13)))", "def register_switch_address(addr, interval=None):\n \"\"\"\n Registers a new address to initiate connection to switch.\n\n Registers a new IP address and port pair of switch to let\n ryu.controller.controller.OpenFlowController to try to initiate\n connection to switch.\n\n :param addr: A tuple of (host, port) pair of switch.\n :param interval: Interval in seconds to try to connect to switch\n \"\"\"\n assert len(addr) == 2\n assert ip.valid_ipv4(addr[0]) or ip.valid_ipv6(addr[0])\n ofp_handler = app_manager.lookup_service_brick(ofp_event.NAME)\n _TMP_ADDRESSES[addr] = interval\n\n def _retry_loop():\n # Delays registration if ofp_handler is not started yet\n while True:\n if ofp_handler.controller is not None:\n for a, i in _TMP_ADDRESSES.items():\n ofp_handler.controller.spawn_client_loop(a, i)\n hub.sleep(1)\n break\n hub.sleep(1)\n\n hub.spawn(_retry_loop)", "public Switch newSwitch(int id, int capacity) {\n Switch s = swBuilder.newSwitch(id, capacity);\n switches.add(s);\n return s;\n }", "def update_reserved_switch_binding(switch_ip, state):\n \"\"\"Update a reserved switch binding.\"\"\"\n\n # overload port_id to contain switch state\n update_reserved_binding(\n const.NO_VLAN_OR_VNI_ID,\n switch_ip,\n const.RESERVED_NEXUS_SWITCH_DEVICE_ID_R1,\n state)", "def set_vswitch(self, switch_name, **kwargs):\n \"\"\"Set vswitch\"\"\"\n smt_userid = zvmutils.get_smt_userid()\n rd = ' '.join((\n \"SMAPI %s API Virtual_Network_Vswitch_Set_Extended\" %\n smt_userid,\n \"--operands\",\n \"-k switch_name=%s\" % switch_name))\n\n for k, v in kwargs.items():\n rd = ' '.join((rd,\n \"-k %(key)s=\\'%(value)s\\'\" %\n {'key': k, 'value': v}))\n\n try:\n self._request(rd)\n except exception.SDKSMTRequestFailed as err:\n LOG.error(\"Failed to set vswitch %s, error: %s\" %\n (switch_name, err.format_message()))\n self._set_vswitch_exception(err, switch_name)", "def set_switch_state(self, state):\n \"\"\"Set the switch state, also update local state.\"\"\"\n self.set_service_value(\n self.switch_service,\n 'Target',\n 'newTargetValue',\n state)\n self.set_cache_value('Status', state)", "def set_raw_holding_register(self, name, value):\n \"\"\"Write to register by name.\"\"\"\n self._conn.write_register(\n unit=self._slave,\n address=(self._holding_regs[name]['addr']),\n value=value)", "def addSwitch(self, name=None):\n '''\n Add a new switch to the topology.\n '''\n if name is None:\n while True:\n name = 's' + str(self.__snum)\n self.__snum += 1\n if name not in self.__nxgraph:\n break\n self.__addNode(name, Switch)\n return name", "public void doSwitch() {\n AuthenticationInfo newRunningInfo = (runningInfo.equals(masterInfo) ? standbyInfo : masterInfo);\n this.doSwitch(newRunningInfo);\n }" ]
[ 0.7570830583572388, 0.7144901752471924, 0.7065062522888184, 0.689936101436615, 0.6877805590629578, 0.6734769344329834, 0.6715748906135559, 0.6705305576324463, 0.6699982285499573, 0.6660456657409668, 0.6631150841712952, 0.6563107371330261 ]
Central interface to verify interactions. `verify` uses a fluent interface:: verify(<obj>, times=2).<method_name>(<args>) `args` can be as concrete as necessary. Often a catch-all is enough, especially if you're working with strict mocks, bc they throw at call time on unwanted, unconfigured arguments:: from mockito import ANY, ARGS, KWARGS when(manager).add_tasks(1, 2, 3) ... # no need to duplicate the specification; every other argument pattern # would have raised anyway. verify(manager).add_tasks(1, 2, 3) # duplicates `when`call verify(manager).add_tasks(*ARGS) verify(manager).add_tasks(...) # Py3 verify(manager).add_tasks(Ellipsis) # Py2
def verify(obj, times=1, atleast=None, atmost=None, between=None, inorder=False): """Central interface to verify interactions. `verify` uses a fluent interface:: verify(<obj>, times=2).<method_name>(<args>) `args` can be as concrete as necessary. Often a catch-all is enough, especially if you're working with strict mocks, bc they throw at call time on unwanted, unconfigured arguments:: from mockito import ANY, ARGS, KWARGS when(manager).add_tasks(1, 2, 3) ... # no need to duplicate the specification; every other argument pattern # would have raised anyway. verify(manager).add_tasks(1, 2, 3) # duplicates `when`call verify(manager).add_tasks(*ARGS) verify(manager).add_tasks(...) # Py3 verify(manager).add_tasks(Ellipsis) # Py2 """ if isinstance(obj, str): obj = get_obj(obj) verification_fn = _get_wanted_verification( times=times, atleast=atleast, atmost=atmost, between=between) if inorder: verification_fn = verification.InOrder(verification_fn) # FIXME?: Catch error if obj is neither a Mock nor a known stubbed obj theMock = _get_mock_or_raise(obj) class Verify(object): def __getattr__(self, method_name): return invocation.VerifiableInvocation( theMock, method_name, verification_fn) return Verify()
[ "def verifyZeroInteractions(*objs):\n \"\"\"Verify that no methods have been called on given objs.\n\n Note that strict mocks usually throw early on unexpected, unstubbed\n invocations. Partial mocks ('monkeypatched' objects or modules) do not\n support this functionality at all, bc only for the stubbed invocations\n the actual usage gets recorded. So this function is of limited use,\n nowadays.\n\n \"\"\"\n for obj in objs:\n theMock = _get_mock_or_raise(obj)\n\n if len(theMock.invocations) > 0:\n raise VerificationError(\n \"\\nUnwanted interaction: %s\" % theMock.invocations[0])", "def verifyNoUnwantedInteractions(*objs):\n \"\"\"Verifies that expectations set via `expect` are met\n\n E.g.::\n\n expect(os.path, times=1).exists(...).thenReturn(True)\n os.path('/foo')\n verifyNoUnwantedInteractions(os.path) # ok, called once\n\n If you leave out the argument *all* registered objects will\n be checked.\n\n .. note:: **DANGERZONE**: If you did not :func:`unstub` correctly,\n it is possible that old registered mocks, from other tests\n leak.\n\n See related :func:`expect`\n \"\"\"\n\n if objs:\n theMocks = map(_get_mock_or_raise, objs)\n else:\n theMocks = mock_registry.get_registered_mocks()\n\n for mock in theMocks:\n for i in mock.stubbed_invocations:\n i.verify()", "def verify_arguments(self, args=None, kwargs=None):\n \"\"\"Ensures that the arguments specified match the signature of the real method.\n\n :raise: ``VerifyingDoubleError`` if the arguments do not match.\n \"\"\"\n\n args = self.args if args is None else args\n kwargs = self.kwargs if kwargs is None else kwargs\n\n try:\n verify_arguments(self._target, self._method_name, args, kwargs)\n except VerifyingBuiltinDoubleArgumentError:\n if doubles.lifecycle.ignore_builtin_verification():\n raise", "def verify_arguments(target, method_name, args, kwargs):\n \"\"\"Verifies that the provided arguments match the signature of the provided method.\n\n :param Target target: A ``Target`` object containing the object with the method to double.\n :param str method_name: The name of the method to double.\n :param tuple args: The positional arguments the method should be called with.\n :param dict kwargs: The keyword arguments the method should be called with.\n :raise: ``VerifyingDoubleError`` if the provided arguments do not match the signature.\n \"\"\"\n\n if method_name == '_doubles__new__':\n return _verify_arguments_of_doubles__new__(target, args, kwargs)\n\n attr = target.get_attr(method_name)\n method = attr.object\n\n if attr.kind in ('data', 'attribute', 'toplevel', 'class method', 'static method'):\n try:\n method = method.__get__(None, attr.defining_class)\n except AttributeError:\n method = method.__call__\n elif attr.kind == 'property':\n if args or kwargs:\n raise VerifyingDoubleArgumentError(\"Properties do not accept arguments.\")\n return\n else:\n args = ['self_or_cls'] + list(args)\n\n _verify_arguments(method, method_name, args, kwargs)", "def verify_cot_cmdln(args=None, event_loop=None):\n \"\"\"Test the chain of trust from the commandline, for debugging purposes.\n\n Args:\n args (list, optional): the commandline args to parse. If None, use\n ``sys.argv[1:]`` . Defaults to None.\n\n event_loop (asyncio.events.AbstractEventLoop): the event loop to use.\n If ``None``, use ``asyncio.get_event_loop()``. Defaults to ``None``.\n\n \"\"\"\n args = args or sys.argv[1:]\n parser = argparse.ArgumentParser(\n description=\"\"\"Verify a given task's chain of trust.\n\nGiven a task's `task_id`, get its task definition, then trace its chain of\ntrust back to the tree. This doesn't verify chain of trust artifact signatures,\nbut does run the other tests in `scriptworker.cot.verify.verify_chain_of_trust`.\n\nThis is helpful in debugging chain of trust changes or issues.\n\nTo use, first either set your taskcluster creds in your env http://bit.ly/2eDMa6N\nor in the CREDS_FILES http://bit.ly/2fVMu0A\"\"\")\n parser.add_argument('task_id', help='the task id to test')\n parser.add_argument('--task-type', help='the task type to test',\n choices=sorted(get_valid_task_types().keys()), required=True)\n parser.add_argument('--cleanup', help='clean up the temp dir afterwards',\n dest='cleanup', action='store_true', default=False)\n parser.add_argument('--cot-product', help='the product type to test', default='firefox')\n parser.add_argument('--verify-sigs', help='enable signature verification', action='store_true', default=False)\n opts = parser.parse_args(args)\n tmp = tempfile.mkdtemp()\n log = logging.getLogger('scriptworker')\n log.setLevel(logging.DEBUG)\n logging.basicConfig()\n event_loop = event_loop or asyncio.get_event_loop()\n try:\n event_loop.run_until_complete(_async_verify_cot_cmdln(opts, tmp))\n finally:\n if opts.cleanup:\n rm(tmp)\n else:\n log.info(\"Artifacts are in {}\".format(tmp))", "def called_with(self, *args, **kwargs):\n \"\"\"Return True if the spy was called with the specified args/kwargs.\n\n Otherwise raise VerificationError.\n\n \"\"\"\n expected_call = Call(*args, **kwargs)\n if expected_call in calls(self.spy):\n return True\n raise VerificationError(\n \"expected %s to be called with %s, but it wasn't\" % (\n self.spy, expected_call.formatted_args))", "def _default_verify_function(instance, answer, result_host, atol, verbose):\n \"\"\"default verify function based on numpy.allclose\"\"\"\n\n #first check if the length is the same\n if len(instance.arguments) != len(answer):\n raise TypeError(\"The length of argument list and provided results do not match.\")\n #for each element in the argument list, check if the types match\n for i, arg in enumerate(instance.arguments):\n if answer[i] is not None: #skip None elements in the answer list\n if isinstance(answer[i], numpy.ndarray) and isinstance(arg, numpy.ndarray):\n if answer[i].dtype != arg.dtype:\n raise TypeError(\"Element \" + str(i)\n + \" of the expected results list is not of the same dtype as the kernel output: \"\n + str(answer[i].dtype) + \" != \" + str(arg.dtype) + \".\")\n if answer[i].size != arg.size:\n raise TypeError(\"Element \" + str(i)\n + \" of the expected results list has a size different from \"\n + \"the kernel argument: \"\n + str(answer[i].size) + \" != \" + str(arg.size) + \".\")\n elif isinstance(answer[i], numpy.number) and isinstance(arg, numpy.number):\n if answer[i].dtype != arg.dtype:\n raise TypeError(\"Element \" + str(i)\n + \" of the expected results list is not the same as the kernel output: \"\n + str(answer[i].dtype) + \" != \" + str(arg.dtype) + \".\")\n else:\n #either answer[i] and argument have different types or answer[i] is not a numpy type\n if not isinstance(answer[i], numpy.ndarray) or not isinstance(answer[i], numpy.number):\n raise TypeError(\"Element \" + str(i)\n + \" of expected results list is not a numpy array or numpy scalar.\")\n else:\n raise TypeError(\"Element \" + str(i)\n + \" of expected results list and kernel arguments have different types.\")\n\n def _ravel(a):\n if hasattr(a, 'ravel') and len(a.shape) > 1:\n return a.ravel()\n return a\n\n def _flatten(a):\n if hasattr(a, 'flatten'):\n return a.flatten()\n return a\n\n correct = True\n for i, arg in enumerate(instance.arguments):\n expected = answer[i]\n if expected is not None:\n\n result = _ravel(result_host[i])\n expected = _flatten(expected)\n output_test = numpy.allclose(expected, result, atol=atol)\n\n if not output_test and verbose:\n print(\"Error: \" + util.get_config_string(instance.params) + \" detected during correctness check\")\n print(\"this error occured when checking value of the %oth kernel argument\" % (i,))\n print(\"Printing kernel output and expected result, set verbose=False to suppress this debug print\")\n numpy.set_printoptions(edgeitems=50)\n print(\"Kernel output:\")\n print(result)\n print(\"Expected:\")\n print(expected)\n correct = correct and output_test\n\n if not correct:\n logging.debug('correctness check has found a correctness issue')\n raise Exception(\"Error: \" + util.get_config_string(instance.params) + \" failed correctness check\")\n\n return correct", "def __mock_verify\n __mock_defis.values.all?(&:empty?) || begin\n msg, defis_with_same_msg = __mock_defis.find{ |_, v| v.any? }\n args, defis = defis_with_same_msg.group_by(&:args).first\n dsize = __mock_disps[msg].count{ |d| d.args == args }\n Mock.__send__(:raise, # Too little times\n Expected.new(object, defis.first, defis.size + dsize, dsize))\n end\n end", "def expect(obj, strict=None,\n times=None, atleast=None, atmost=None, between=None):\n \"\"\"Stub a function call, and set up an expected call count.\n\n Usage::\n\n # Given `dog` is an instance of a `Dog`\n expect(dog, times=1).bark('Wuff').thenReturn('Miau')\n dog.bark('Wuff')\n dog.bark('Wuff') # will throw at call time: too many invocations\n\n # maybe if you need to ensure that `dog.bark()` was called at all\n verifyNoUnwantedInteractions()\n\n .. note:: You must :func:`unstub` after stubbing, or use `with`\n statement.\n\n See :func:`when`, :func:`when2`, :func:`verifyNoUnwantedInteractions`\n\n \"\"\"\n if strict is None:\n strict = True\n theMock = _get_mock(obj, strict=strict)\n\n verification_fn = _get_wanted_verification(\n times=times, atleast=atleast, atmost=atmost, between=between)\n\n class Expect(object):\n def __getattr__(self, method_name):\n return invocation.StubbedInvocation(\n theMock, method_name, verification=verification_fn,\n strict=strict)\n\n return Expect()", "def verify(self):\n \"\"\"\n Verifying all inspectors in exp_list\n Return:\n True: pass all inspectors\n False: fail at more than one inspector\n \"\"\"\n for expectation in self.exp_list:\n if hasattr(expectation, \"verify\") and not expectation.verify():\n return False\n return True", "def verify(self, **kwargs):\n \"\"\"Authorization Request parameters that are OPTIONAL in the OAuth 2.0\n specification MAY be included in the OpenID Request Object without also\n passing them as OAuth 2.0 Authorization Request parameters, with one\n exception: The scope parameter MUST always be present in OAuth 2.0\n Authorization Request parameters.\n All parameter values that are present both in the OAuth 2.0\n Authorization Request and in the OpenID Request Object MUST exactly\n match.\"\"\"\n super(AuthorizationRequest, self).verify(**kwargs)\n\n clear_verified_claims(self)\n\n args = {}\n for arg in [\"keyjar\", \"opponent_id\", \"sender\", \"alg\", \"encalg\",\n \"encenc\"]:\n try:\n args[arg] = kwargs[arg]\n except KeyError:\n pass\n\n if \"opponent_id\" not in kwargs:\n args[\"opponent_id\"] = self[\"client_id\"]\n\n if \"request\" in self:\n if isinstance(self[\"request\"], str):\n # Try to decode the JWT, checks the signature\n oidr = OpenIDRequest().from_jwt(str(self[\"request\"]), **args)\n\n # check if something is change in the original message\n for key, val in oidr.items():\n if key in self:\n if self[key] != val:\n # log but otherwise ignore\n logger.warning('{} != {}'.format(self[key], val))\n\n # remove all claims\n _keys = list(self.keys())\n for key in _keys:\n if key not in oidr:\n del self[key]\n\n self.update(oidr)\n\n # replace the JWT with the parsed and verified instance\n self[verified_claim_name(\"request\")] = oidr\n\n if \"id_token_hint\" in self:\n if isinstance(self[\"id_token_hint\"], str):\n idt = IdToken().from_jwt(str(self[\"id_token_hint\"]), **args)\n self[\"verified_id_token_hint\"] = idt\n\n if \"response_type\" not in self:\n raise MissingRequiredAttribute(\"response_type missing\", self)\n\n _rt = self[\"response_type\"]\n if \"id_token\" in _rt:\n if \"nonce\" not in self:\n raise MissingRequiredAttribute(\"Nonce missing\", self)\n else:\n try:\n if self['nonce'] != kwargs['nonce']:\n raise ValueError(\n 'Nonce in id_token not matching nonce in authz '\n 'request')\n except KeyError:\n pass\n\n if \"openid\" not in self.get(\"scope\", []):\n raise MissingRequiredValue(\"openid not in scope\", self)\n\n if \"offline_access\" in self.get(\"scope\", []):\n if \"prompt\" not in self or \"consent\" not in self[\"prompt\"]:\n raise MissingRequiredValue(\"consent in prompt\", self)\n\n if \"prompt\" in self:\n if \"none\" in self[\"prompt\"] and len(self[\"prompt\"]) > 1:\n raise InvalidRequest(\"prompt none combined with other value\",\n self)\n\n return True", "def guard_verify(obj):\n \"\"\"Returns True if 'verify' transition can be applied to the Worksheet\n passed in. This is, returns true if all the analyses assigned\n have already been verified. Those analyses that are in an inactive state\n (cancelled, inactive) are dismissed, but at least one analysis must be in\n an active state (and verified), otherwise always return False.\n Note this guard depends entirely on the current status of the children\n :returns: true or false\n \"\"\"\n\n analyses = obj.getAnalyses()\n if not analyses:\n # An empty worksheet cannot be verified\n return False\n\n can_verify = False\n for analysis in obj.getAnalyses():\n # Dismiss analyses that are not active\n if not api.is_active(analysis):\n continue\n # Dismiss analyses that have been rejected or retracted\n if api.get_workflow_status_of(analysis) in [\"rejected\", \"retracted\"]:\n continue\n # Worksheet cannot be verified if there is one analysis not verified\n can_verify = IVerified.providedBy(analysis)\n if not can_verify:\n # No need to look further\n return False\n\n # This prevents the verification of the worksheet if all its analyses are in\n # a detached status (rejected, retracted or cancelled)\n return can_verify" ]
[ 0.7097271084785461, 0.6925081014633179, 0.6835185885429382, 0.6689969897270203, 0.6676076054573059, 0.6642816066741943, 0.6623503565788269, 0.6622745394706726, 0.6559537649154663, 0.6559337377548218, 0.6547770500183105, 0.6542908549308777 ]
Central interface to stub functions on a given `obj` `obj` should be a module, a class or an instance of a class; it can be a Dummy you created with :func:`mock`. ``when`` exposes a fluent interface where you configure a stub in three steps:: when(<obj>).<method_name>(<args>).thenReturn(<value>) Compared to simple *patching*, stubbing in mockito requires you to specify conrete `args` for which the stub will answer with a concrete `<value>`. All invocations that do not match this specific call signature will be rejected. They usually throw at call time. Stubbing in mockito's sense thus means not only to get rid of unwanted side effects, but effectively to turn function calls into constants. E.g.:: # Given ``dog`` is an instance of a ``Dog`` when(dog).bark('Grrr').thenReturn('Wuff') when(dog).bark('Miau').thenRaise(TypeError()) # With this configuration set up: assert dog.bark('Grrr') == 'Wuff' dog.bark('Miau') # will throw TypeError dog.bark('Wuff') # will throw unwanted interaction Stubbing can effectively be used as monkeypatching; usage shown with the `with` context managing:: with when(os.path).exists('/foo').thenReturn(True): ... Most of the time verifying your interactions is not necessary, because your code under tests implicitly verifies the return value by evaluating it. See :func:`verify` if you need to, see also :func:`expect` to setup expected call counts up front. If your function is pure side effect and does not return something, you can omit the specific answer. The default then is `None`:: when(manager).do_work() `when` verifies the method name, the expected argument signature, and the actual, factual arguments your code under test uses against the original object and its function so its easier to spot changing interfaces. Sometimes it's tedious to spell out all arguments:: from mockito import ANY, ARGS, KWARGS when(requests).get('http://example.com/', **KWARGS).thenReturn(...) when(os.path).exists(ANY) when(os.path).exists(ANY(str)) .. note:: You must :func:`unstub` after stubbing, or use `with` statement. Set ``strict=False`` to bypass the function signature checks. See related :func:`when2` which has a more pythonic interface.
def when(obj, strict=None): """Central interface to stub functions on a given `obj` `obj` should be a module, a class or an instance of a class; it can be a Dummy you created with :func:`mock`. ``when`` exposes a fluent interface where you configure a stub in three steps:: when(<obj>).<method_name>(<args>).thenReturn(<value>) Compared to simple *patching*, stubbing in mockito requires you to specify conrete `args` for which the stub will answer with a concrete `<value>`. All invocations that do not match this specific call signature will be rejected. They usually throw at call time. Stubbing in mockito's sense thus means not only to get rid of unwanted side effects, but effectively to turn function calls into constants. E.g.:: # Given ``dog`` is an instance of a ``Dog`` when(dog).bark('Grrr').thenReturn('Wuff') when(dog).bark('Miau').thenRaise(TypeError()) # With this configuration set up: assert dog.bark('Grrr') == 'Wuff' dog.bark('Miau') # will throw TypeError dog.bark('Wuff') # will throw unwanted interaction Stubbing can effectively be used as monkeypatching; usage shown with the `with` context managing:: with when(os.path).exists('/foo').thenReturn(True): ... Most of the time verifying your interactions is not necessary, because your code under tests implicitly verifies the return value by evaluating it. See :func:`verify` if you need to, see also :func:`expect` to setup expected call counts up front. If your function is pure side effect and does not return something, you can omit the specific answer. The default then is `None`:: when(manager).do_work() `when` verifies the method name, the expected argument signature, and the actual, factual arguments your code under test uses against the original object and its function so its easier to spot changing interfaces. Sometimes it's tedious to spell out all arguments:: from mockito import ANY, ARGS, KWARGS when(requests).get('http://example.com/', **KWARGS).thenReturn(...) when(os.path).exists(ANY) when(os.path).exists(ANY(str)) .. note:: You must :func:`unstub` after stubbing, or use `with` statement. Set ``strict=False`` to bypass the function signature checks. See related :func:`when2` which has a more pythonic interface. """ if isinstance(obj, str): obj = get_obj(obj) if strict is None: strict = True theMock = _get_mock(obj, strict=strict) class When(object): def __getattr__(self, method_name): return invocation.StubbedInvocation( theMock, method_name, strict=strict) return When()
[ "def when2(fn, *args, **kwargs):\n \"\"\"Stub a function call with the given arguments\n\n Exposes a more pythonic interface than :func:`when`. See :func:`when` for\n more documentation.\n\n Returns `AnswerSelector` interface which exposes `thenReturn`,\n `thenRaise`, and `thenAnswer` as usual. Always `strict`.\n\n Usage::\n\n # Given `dog` is an instance of a `Dog`\n when2(dog.bark, 'Miau').thenReturn('Wuff')\n\n .. note:: You must :func:`unstub` after stubbing, or use `with`\n statement.\n\n \"\"\"\n obj, name = get_obj_attr_tuple(fn)\n theMock = _get_mock(obj, strict=True)\n return invocation.StubbedInvocation(theMock, name)(*args, **kwargs)", "def _stub_obj(obj):\n '''\n Stub an object directly.\n '''\n # Annoying circular reference requires importing here. Would like to see\n # this cleaned up. @AW\n from .mock import Mock\n\n # Return an existing stub\n if isinstance(obj, Stub):\n return obj\n\n # If a Mock object, stub its __call__\n if isinstance(obj, Mock):\n return stub(obj.__call__)\n\n # If passed-in a type, assume that we're going to stub out the creation.\n # See StubNew for the awesome sauce.\n # if isinstance(obj, types.TypeType):\n if hasattr(types, 'TypeType') and isinstance(obj, types.TypeType):\n return StubNew(obj)\n elif hasattr(__builtins__, 'type') and \\\n isinstance(obj, __builtins__.type):\n return StubNew(obj)\n elif inspect.isclass(obj):\n return StubNew(obj)\n\n # I thought that types.UnboundMethodType differentiated these cases but\n # apparently not.\n if isinstance(obj, types.MethodType):\n # Handle differently if unbound because it's an implicit \"any instance\"\n if getattr(obj, 'im_self', None) is None:\n # Handle the python3 case and py2 filter\n if hasattr(obj, '__self__'):\n if obj.__self__ is not None:\n return StubMethod(obj)\n if sys.version_info.major == 2:\n return StubUnboundMethod(obj)\n else:\n return StubMethod(obj)\n\n # These aren't in the types library\n if type(obj).__name__ == 'method-wrapper':\n return StubMethodWrapper(obj)\n\n if type(obj).__name__ == 'wrapper_descriptor':\n raise UnsupportedStub(\n \"must call stub(obj,'%s') for slot wrapper on %s\",\n obj.__name__, obj.__objclass__.__name__)\n\n # (Mostly) Lastly, look for properties.\n # First look for the situation where there's a reference back to the\n # property.\n prop = obj\n if isinstance(getattr(obj, '__self__', None), property):\n obj = prop.__self__\n\n # Once we've found a property, we have to figure out how to reference\n # back to the owning class. This is a giant pain and we have to use gc\n # to find out where it comes from. This code is dense but resolves to\n # something like this:\n # >>> gc.get_referrers( foo.x )\n # [{'__dict__': <attribute '__dict__' of 'foo' objects>,\n # 'x': <property object at 0x7f68c99a16d8>,\n # '__module__': '__main__',\n # '__weakref__': <attribute '__weakref__' of 'foo' objects>,\n # '__doc__': None}]\n if isinstance(obj, property):\n klass, attr = None, None\n for ref in gc.get_referrers(obj):\n if klass and attr:\n break\n if isinstance(ref, dict) and ref.get('prop', None) is obj:\n klass = getattr(\n ref.get('__dict__', None), '__objclass__', None)\n for name, val in getattr(klass, '__dict__', {}).items():\n if val is obj:\n attr = name\n break\n # In the case of PyPy, we have to check all types that refer to\n # the property, and see if any of their attrs are the property\n elif isinstance(ref, type):\n # Use dir as a means to quickly walk through the class tree\n for name in dir(ref):\n if getattr(ref, name) == obj:\n klass = ref\n attr = name\n break\n\n if klass and attr:\n rval = stub(klass, attr)\n if prop != obj:\n return stub(rval, prop.__name__)\n return rval\n\n # If a function and it has an associated module, we can mock directly.\n # Note that this *must* be after properties, otherwise it conflicts with\n # stubbing out the deleter methods and such\n # Sadly, builtin functions and methods have the same type, so we have to\n # use the same stub class even though it's a bit ugly\n if isinstance(obj, (types.FunctionType, types.BuiltinFunctionType,\n types.BuiltinMethodType)) and hasattr(obj, '__module__'):\n return StubFunction(obj)\n\n raise UnsupportedStub(\"can't stub %s\", obj)", "def expect(obj, strict=None,\n times=None, atleast=None, atmost=None, between=None):\n \"\"\"Stub a function call, and set up an expected call count.\n\n Usage::\n\n # Given `dog` is an instance of a `Dog`\n expect(dog, times=1).bark('Wuff').thenReturn('Miau')\n dog.bark('Wuff')\n dog.bark('Wuff') # will throw at call time: too many invocations\n\n # maybe if you need to ensure that `dog.bark()` was called at all\n verifyNoUnwantedInteractions()\n\n .. note:: You must :func:`unstub` after stubbing, or use `with`\n statement.\n\n See :func:`when`, :func:`when2`, :func:`verifyNoUnwantedInteractions`\n\n \"\"\"\n if strict is None:\n strict = True\n theMock = _get_mock(obj, strict=strict)\n\n verification_fn = _get_wanted_verification(\n times=times, atleast=atleast, atmost=atmost, between=between)\n\n class Expect(object):\n def __getattr__(self, method_name):\n return invocation.StubbedInvocation(\n theMock, method_name, verification=verification_fn,\n strict=strict)\n\n return Expect()", "def returns(self, obj):\n \"\"\"\n Customizes the return values of the stub function. If conditions like withArgs or onCall\n were specified, then the return value will only be returned when the conditions are met.\n\n Args: obj (anything)\n Return: a SinonStub object (able to be chained)\n \"\"\"\n self._copy._append_condition(self, lambda *args, **kwargs: obj)\n return self", "def StubOutWithMock(self, obj, attr_name, use_mock_anything=False):\n \"\"\"Replace a method, attribute, etc. with a Mock.\n\n This will replace a class or module with a MockObject, and everything else\n (method, function, etc) with a MockAnything. This can be overridden to\n always use a MockAnything by setting use_mock_anything to True.\n\n Args:\n obj: A Python object (class, module, instance, callable).\n attr_name: str. The name of the attribute to replace with a mock.\n use_mock_anything: bool. True if a MockAnything should be used regardless\n of the type of attribute.\n \"\"\"\n\n attr_to_replace = getattr(obj, attr_name)\n if type(attr_to_replace) in self._USE_MOCK_OBJECT and not use_mock_anything:\n stub = self.CreateMock(attr_to_replace)\n else:\n stub = self.CreateMockAnything()\n\n self.stubs.Set(obj, attr_name, stub)", "def withArgs(self, *args, **kwargs): #pylint: disable=invalid-name\n \"\"\"\n Adds a condition for when the stub is called. When the condition is met, a special\n return value can be returned. Adds the specified argument(s) into the condition list.\n\n For example, when the stub function is called with argument 1, it will return \"#\":\n stub.withArgs(1).returns(\"#\")\n\n Without returns/throws at the end of the chain of functions, nothing will happen.\n For example, in this case, although 1 is in the condition list, nothing will happen:\n stub.withArgs(1)\n\n Return:\n a SinonStub object (able to be chained)\n \"\"\"\n cond_args = args if len(args) > 0 else None\n cond_kwargs = kwargs if len(kwargs) > 0 else None\n return _SinonStubCondition(copy=self._copy, cond_args=cond_args, cond_kwargs=cond_kwargs, oncall=self._oncall)", "def stub(self, obj, attr=None):\n '''\n Stub an object. If attr is not None, will attempt to stub that\n attribute on the object. Only required for modules and other rare\n cases where we can't determine the binding from the object.\n '''\n s = stub(obj, attr)\n if s not in self._stubs:\n self._stubs.append(s)\n return s", "def patch(fn, attr_or_replacement, replacement=None):\n \"\"\"Patch/Replace a function.\n\n This is really like monkeypatching, but *note* that all interactions\n will be recorded and can be verified. That is, using `patch` you stay in\n the domain of mockito.\n\n Two ways to call this. Either::\n\n patch(os.path.exists, lambda str: True) # two arguments\n # OR\n patch(os.path, 'exists', lambda str: True) # three arguments\n\n If called with three arguments, the mode is *not* strict to allow *adding*\n methods. If called with two arguments, mode is always `strict`.\n\n .. note:: You must :func:`unstub` after stubbing, or use `with`\n statement.\n\n \"\"\"\n if replacement is None:\n replacement = attr_or_replacement\n return when2(fn, Ellipsis).thenAnswer(replacement)\n else:\n obj, name = fn, attr_or_replacement\n theMock = _get_mock(obj, strict=True)\n return invocation.StubbedInvocation(\n theMock, name, strict=False)(Ellipsis).thenAnswer(replacement)", "public static <T> OngoingStubbing<T> when(Class<?> klass, Object... arguments) throws Exception {\n return Mockito.when(Whitebox.<T>invokeMethod(klass, arguments));\n }", "public static <T> OngoingStubbing<T> when(Object instance, String methodName, Object... arguments) throws Exception {\n return Mockito.when(Whitebox.<T>invokeMethod(instance, methodName, arguments));\n }", "public static <T> OngoingStubbing<T> when(Class<?> clazz, String methodToExpect, Object... arguments)\n throws Exception {\n return Mockito.when(Whitebox.<T>invokeMethod(clazz, methodToExpect, arguments));\n }", "def stub(base_class=None, **attributes):\n \"\"\"creates a python class on-the-fly with the given keyword-arguments\n as class-attributes accessible with .attrname.\n\n The new class inherits from\n Use this to mock rather than stub.\n \"\"\"\n if base_class is None:\n base_class = object\n\n members = {\n \"__init__\": lambda self: None,\n \"__new__\": lambda *args, **kw: object.__new__(\n *args, *kw\n ), # remove __new__ and metaclass behavior from object\n \"__metaclass__\": None,\n }\n members.update(attributes)\n # let's create a python class on-the-fly :)\n return type(f\"{base_class.__name__}Stub\", (base_class,), members)()" ]
[ 0.8300884962081909, 0.7341936826705933, 0.7278451919555664, 0.720047116279602, 0.7118588089942932, 0.7065277695655823, 0.7060503959655762, 0.7045286297798157, 0.6935253739356995, 0.6877067685127258, 0.6868920922279358, 0.6828497052192688 ]
Stub a function call with the given arguments Exposes a more pythonic interface than :func:`when`. See :func:`when` for more documentation. Returns `AnswerSelector` interface which exposes `thenReturn`, `thenRaise`, and `thenAnswer` as usual. Always `strict`. Usage:: # Given `dog` is an instance of a `Dog` when2(dog.bark, 'Miau').thenReturn('Wuff') .. note:: You must :func:`unstub` after stubbing, or use `with` statement.
def when2(fn, *args, **kwargs): """Stub a function call with the given arguments Exposes a more pythonic interface than :func:`when`. See :func:`when` for more documentation. Returns `AnswerSelector` interface which exposes `thenReturn`, `thenRaise`, and `thenAnswer` as usual. Always `strict`. Usage:: # Given `dog` is an instance of a `Dog` when2(dog.bark, 'Miau').thenReturn('Wuff') .. note:: You must :func:`unstub` after stubbing, or use `with` statement. """ obj, name = get_obj_attr_tuple(fn) theMock = _get_mock(obj, strict=True) return invocation.StubbedInvocation(theMock, name)(*args, **kwargs)
[ "def when(obj, strict=None):\n \"\"\"Central interface to stub functions on a given `obj`\n\n `obj` should be a module, a class or an instance of a class; it can be\n a Dummy you created with :func:`mock`. ``when`` exposes a fluent interface\n where you configure a stub in three steps::\n\n when(<obj>).<method_name>(<args>).thenReturn(<value>)\n\n Compared to simple *patching*, stubbing in mockito requires you to specify\n conrete `args` for which the stub will answer with a concrete `<value>`.\n All invocations that do not match this specific call signature will be\n rejected. They usually throw at call time.\n\n Stubbing in mockito's sense thus means not only to get rid of unwanted\n side effects, but effectively to turn function calls into constants.\n\n E.g.::\n\n # Given ``dog`` is an instance of a ``Dog``\n when(dog).bark('Grrr').thenReturn('Wuff')\n when(dog).bark('Miau').thenRaise(TypeError())\n\n # With this configuration set up:\n assert dog.bark('Grrr') == 'Wuff'\n dog.bark('Miau') # will throw TypeError\n dog.bark('Wuff') # will throw unwanted interaction\n\n Stubbing can effectively be used as monkeypatching; usage shown with\n the `with` context managing::\n\n with when(os.path).exists('/foo').thenReturn(True):\n ...\n\n Most of the time verifying your interactions is not necessary, because\n your code under tests implicitly verifies the return value by evaluating\n it. See :func:`verify` if you need to, see also :func:`expect` to setup\n expected call counts up front.\n\n If your function is pure side effect and does not return something, you\n can omit the specific answer. The default then is `None`::\n\n when(manager).do_work()\n\n `when` verifies the method name, the expected argument signature, and the\n actual, factual arguments your code under test uses against the original\n object and its function so its easier to spot changing interfaces.\n\n Sometimes it's tedious to spell out all arguments::\n\n from mockito import ANY, ARGS, KWARGS\n when(requests).get('http://example.com/', **KWARGS).thenReturn(...)\n when(os.path).exists(ANY)\n when(os.path).exists(ANY(str))\n\n .. note:: You must :func:`unstub` after stubbing, or use `with`\n statement.\n\n Set ``strict=False`` to bypass the function signature checks.\n\n See related :func:`when2` which has a more pythonic interface.\n\n \"\"\"\n\n if isinstance(obj, str):\n obj = get_obj(obj)\n\n if strict is None:\n strict = True\n theMock = _get_mock(obj, strict=strict)\n\n class When(object):\n def __getattr__(self, method_name):\n return invocation.StubbedInvocation(\n theMock, method_name, strict=strict)\n\n return When()", "def withArgs(self, *args, **kwargs): #pylint: disable=invalid-name\n \"\"\"\n Adds a condition for when the stub is called. When the condition is met, a special\n return value can be returned. Adds the specified argument(s) into the condition list.\n\n For example, when the stub function is called with argument 1, it will return \"#\":\n stub.withArgs(1).returns(\"#\")\n\n Without returns/throws at the end of the chain of functions, nothing will happen.\n For example, in this case, although 1 is in the condition list, nothing will happen:\n stub.withArgs(1)\n\n Return:\n a SinonStub object (able to be chained)\n \"\"\"\n cond_args = args if len(args) > 0 else None\n cond_kwargs = kwargs if len(kwargs) > 0 else None\n return _SinonStubCondition(copy=self._copy, cond_args=cond_args, cond_kwargs=cond_kwargs, oncall=self._oncall)", "def __get_return_value_no_withargs(self, *args, **kwargs):\n \"\"\" \n Pre-conditions:\n (1) The user has created a stub and specified the stub behaviour\n (2) The user has called the stub function with the specified \"args\" and \"kwargs\"\n (3) No 'withArgs' conditions were applicable in this case\n Args:\n args: tuple, the arguments inputed by the user\n kwargs: dictionary, the keyword arguments inputed by the user\n Returns:\n any type, the appropriate return value, based on the stub's behaviour setup and the user input\n \"\"\"\n c = self._conditions\n call_count = self._wrapper.callCount\n\n # if there might be applicable onCall conditions\n if call_count in c[\"oncall\"]:\n index_list = [i for i, x in enumerate(c[\"oncall\"]) if x and not c[\"args\"][i] and not c[\"kwargs\"][i]]\n for i in reversed(index_list):\n # if the onCall condition applies\n if call_count == c[\"oncall\"][i]:\n return c[\"action\"][i](*args, **kwargs)\n\n # else all conditions did not match\n return c[\"default\"](*args, **kwargs)", "def onCall(self, n): #pylint: disable=invalid-name\n \"\"\"\n Adds a condition for when the stub is called. When the condition is met, a special\n return value can be returned. Adds the specified call number into the condition\n list.\n\n For example, when the stub function is called the second time, it will return \"#\":\n stub.onCall(1).returns(\"#\")\n\n Without returns/throws at the end of the chain of functions, nothing will happen.\n For example, in this case, although 2 is in the condition list, nothing will happen:\n stub.onCall(2)\n\n Args:\n n: integer, the call # for which we want a special return value.\n The first call has an index of 0.\n\n Return:\n a SinonStub object (able to be chained)\n \"\"\"\n cond_oncall = n + 1\n return _SinonStubCondition(copy=self._copy, oncall=cond_oncall, cond_args=self._cond_args, cond_kwargs=self._cond_kwargs)", "def returns(self, obj):\n \"\"\"\n Customizes the return values of the stub function. If conditions like withArgs or onCall\n were specified, then the return value will only be returned when the conditions are met.\n\n Args: obj (anything)\n Return: a SinonStub object (able to be chained)\n \"\"\"\n self._copy._append_condition(self, lambda *args, **kwargs: obj)\n return self", "def __get_return_value_withargs(self, index_list, *args, **kwargs):\n \"\"\" \n Pre-conditions:\n (1) The user has created a stub and specified the stub behaviour\n (2) The user has called the stub function with the specified \"args\" and \"kwargs\"\n (3) One or more 'withArgs' conditions were applicable in this case\n Args:\n index_list: list, the list of indices in conditions for which the user args/kwargs match\n args: tuple, the arguments inputed by the user\n kwargs: dictionary, the keyword arguments inputed by the user\n Returns:\n any type, the appropriate return value, based on the stub's behaviour setup and the user input\n \"\"\"\n c = self._conditions\n args_list = self._wrapper.args_list\n kwargs_list = self._wrapper.kwargs_list\n\n # indices with an arg and oncall have higher priority and should be checked first\n indices_with_oncall = [i for i in reversed(index_list) if c[\"oncall\"][i]]\n\n # if there are any combined withArgs+onCall conditions\n if indices_with_oncall:\n call_count = self.__get_call_count(args, kwargs, args_list, kwargs_list)\n for i in indices_with_oncall:\n if c[\"oncall\"][i] == call_count:\n return c[\"action\"][i](*args, **kwargs)\n\n # else if there are simple withArgs conditions\n indices_without_oncall = [i for i in reversed(index_list) if not c[\"oncall\"][i]]\n if indices_without_oncall:\n max_index = max(indices_without_oncall)\n return c[\"action\"][max_index](*args, **kwargs)\n\n # else all conditions did not match\n return c[\"default\"](*args, **kwargs)", "def async_make_reply(msgname, types, arguments_future, major):\n \"\"\"Wrap future that will resolve with arguments needed by make_reply().\"\"\"\n arguments = yield arguments_future\n raise gen.Return(make_reply(msgname, types, arguments, major))", "def Q(*predicates, **query):\n \"\"\"\n Handles situations where :class:`hunter.Query` objects (or other callables) are passed in as positional arguments.\n Conveniently converts that to an :class:`hunter.And` predicate.\n \"\"\"\n optional_actions = query.pop(\"actions\", [])\n if \"action\" in query:\n optional_actions.append(query.pop(\"action\"))\n\n for p in predicates:\n if not callable(p):\n raise TypeError(\"Predicate {0!r} is not callable.\".format(p))\n\n for a in optional_actions:\n if not callable(a):\n raise TypeError(\"Action {0!r} is not callable.\".format(a))\n\n if predicates:\n predicates = tuple(\n p() if inspect.isclass(p) and issubclass(p, Action) else p\n for p in predicates\n )\n if any(isinstance(p, (CallPrinter, CodePrinter)) for p in predicates):\n # the user provided an action as a filter, remove the action then to prevent double output\n for action in optional_actions:\n if action in (CallPrinter, CodePrinter) or isinstance(action, (CallPrinter, CodePrinter)):\n optional_actions.remove(action)\n if query:\n predicates += Query(**query),\n\n result = And(*predicates)\n else:\n result = Query(**query)\n\n if optional_actions:\n result = When(result, *optional_actions)\n\n return result", "def expect(obj, strict=None,\n times=None, atleast=None, atmost=None, between=None):\n \"\"\"Stub a function call, and set up an expected call count.\n\n Usage::\n\n # Given `dog` is an instance of a `Dog`\n expect(dog, times=1).bark('Wuff').thenReturn('Miau')\n dog.bark('Wuff')\n dog.bark('Wuff') # will throw at call time: too many invocations\n\n # maybe if you need to ensure that `dog.bark()` was called at all\n verifyNoUnwantedInteractions()\n\n .. note:: You must :func:`unstub` after stubbing, or use `with`\n statement.\n\n See :func:`when`, :func:`when2`, :func:`verifyNoUnwantedInteractions`\n\n \"\"\"\n if strict is None:\n strict = True\n theMock = _get_mock(obj, strict=strict)\n\n verification_fn = _get_wanted_verification(\n times=times, atleast=atleast, atmost=atmost, between=between)\n\n class Expect(object):\n def __getattr__(self, method_name):\n return invocation.StubbedInvocation(\n theMock, method_name, verification=verification_fn,\n strict=strict)\n\n return Expect()", "public static <T> OngoingStubbing<T> when(Class<?> klass, Object... arguments) throws Exception {\n return Mockito.when(Whitebox.<T>invokeMethod(klass, arguments));\n }", "def _call_spy(self, *args, **kwargs):\n '''\n Wrapper to call the spied-on function. Operates similar to\n Expectation.test.\n '''\n if self._spy_side_effect:\n if self._spy_side_effect_args or self._spy_side_effect_kwargs:\n self._spy_side_effect(\n *self._spy_side_effect_args,\n **self._spy_side_effect_kwargs)\n else:\n self._spy_side_effect(*args, **kwargs)\n\n return_value = self._stub.call_orig(*args, **kwargs)\n if self._spy_return:\n self._spy_return(return_value)\n\n return return_value", "function when(pred, f) {\n if(!isPredOrFunc(pred)) {\n throw new TypeError(\n 'when: Pred or predicate function required for first argument'\n )\n }\n\n if(!isFunction(f)) {\n throw new TypeError(\n 'when: Function required for second argument'\n )\n }\n\n return x =>\n predOrFunc(pred, x) ? f(x) : x\n}" ]
[ 0.7158923745155334, 0.7125566005706787, 0.6874305605888367, 0.6713445782661438, 0.6674691438674927, 0.6647620797157288, 0.6502985954284668, 0.6432019472122192, 0.6420909762382507, 0.6394932866096497, 0.6372888088226318, 0.6355871558189392 ]
Patch/Replace a function. This is really like monkeypatching, but *note* that all interactions will be recorded and can be verified. That is, using `patch` you stay in the domain of mockito. Two ways to call this. Either:: patch(os.path.exists, lambda str: True) # two arguments # OR patch(os.path, 'exists', lambda str: True) # three arguments If called with three arguments, the mode is *not* strict to allow *adding* methods. If called with two arguments, mode is always `strict`. .. note:: You must :func:`unstub` after stubbing, or use `with` statement.
def patch(fn, attr_or_replacement, replacement=None): """Patch/Replace a function. This is really like monkeypatching, but *note* that all interactions will be recorded and can be verified. That is, using `patch` you stay in the domain of mockito. Two ways to call this. Either:: patch(os.path.exists, lambda str: True) # two arguments # OR patch(os.path, 'exists', lambda str: True) # three arguments If called with three arguments, the mode is *not* strict to allow *adding* methods. If called with two arguments, mode is always `strict`. .. note:: You must :func:`unstub` after stubbing, or use `with` statement. """ if replacement is None: replacement = attr_or_replacement return when2(fn, Ellipsis).thenAnswer(replacement) else: obj, name = fn, attr_or_replacement theMock = _get_mock(obj, strict=True) return invocation.StubbedInvocation( theMock, name, strict=False)(Ellipsis).thenAnswer(replacement)
[ "def patch_func(replacement, target_mod, func_name):\n \"\"\"\n Patch func_name in target_mod with replacement\n\n Important - original must be resolved by name to avoid\n patching an already patched function.\n \"\"\"\n original = getattr(target_mod, func_name)\n\n # set the 'unpatched' attribute on the replacement to\n # point to the original.\n vars(replacement).setdefault('unpatched', original)\n\n # replace the function in the original module\n setattr(target_mod, func_name, replacement)", "def monkeypatch_method(cls, patch_name):\r\n # This function's code was inspired from the following thread:\r\n # \"[Python-Dev] Monkeypatching idioms -- elegant or ugly?\"\r\n # by Robert Brewer <fumanchu at aminus.org>\r\n # (Tue Jan 15 19:13:25 CET 2008)\r\n \"\"\"\r\n Add the decorated method to the given class; replace as needed.\r\n\r\n If the named method already exists on the given class, it will\r\n be replaced, and a reference to the old method is created as\r\n cls._old<patch_name><name>. If the \"_old_<patch_name>_<name>\" attribute\r\n already exists, KeyError is raised.\r\n \"\"\"\r\n def decorator(func):\r\n fname = func.__name__\r\n old_func = getattr(cls, fname, None)\r\n if old_func is not None:\r\n # Add the old func to a list of old funcs.\r\n old_ref = \"_old_%s_%s\" % (patch_name, fname)\r\n\r\n old_attr = getattr(cls, old_ref, None)\r\n if old_attr is None:\r\n setattr(cls, old_ref, old_func)\r\n else:\r\n raise KeyError(\"%s.%s already exists.\"\r\n % (cls.__name__, old_ref))\r\n setattr(cls, fname, func)\r\n return func\r\n return decorator", "def patch(name,\n source=None,\n source_hash=None,\n source_hash_name=None,\n skip_verify=False,\n template=None,\n context=None,\n defaults=None,\n options='',\n reject_file=None,\n strip=None,\n saltenv=None,\n **kwargs):\n '''\n Ensure that a patch has been applied to the specified file or directory\n\n .. versionchanged:: 2019.2.0\n The ``hash`` and ``dry_run_first`` options are now ignored, as the\n logic which determines whether or not the patch has already been\n applied no longer requires them. Additionally, this state now supports\n patch files that modify more than one file. To use these sort of\n patches, specify a directory (and, if necessary, the ``strip`` option)\n instead of a file.\n\n .. note::\n A suitable ``patch`` executable must be available on the minion. Also,\n keep in mind that the pre-check this state does to determine whether or\n not changes need to be made will create a temp file and send all patch\n output to that file. This means that, in the event that the patch would\n not have applied cleanly, the comment included in the state results will\n reference a temp file that will no longer exist once the state finishes\n running.\n\n name\n The file or directory to which the patch should be applied\n\n source\n The patch file to apply\n\n .. versionchanged:: 2019.2.0\n The source can now be from any file source supported by Salt\n (``salt://``, ``http://``, ``https://``, ``ftp://``, etc.).\n Templating is also now supported.\n\n source_hash\n Works the same way as in :py:func:`file.managed\n <salt.states.file.managed>`.\n\n .. versionadded:: 2019.2.0\n\n source_hash_name\n Works the same way as in :py:func:`file.managed\n <salt.states.file.managed>`\n\n .. versionadded:: 2019.2.0\n\n skip_verify\n Works the same way as in :py:func:`file.managed\n <salt.states.file.managed>`\n\n .. versionadded:: 2019.2.0\n\n template\n Works the same way as in :py:func:`file.managed\n <salt.states.file.managed>`\n\n .. versionadded:: 2019.2.0\n\n context\n Works the same way as in :py:func:`file.managed\n <salt.states.file.managed>`\n\n .. versionadded:: 2019.2.0\n\n defaults\n Works the same way as in :py:func:`file.managed\n <salt.states.file.managed>`\n\n .. versionadded:: 2019.2.0\n\n options\n Extra options to pass to patch. This should not be necessary in most\n cases.\n\n .. note::\n For best results, short opts should be separate from one another.\n The ``-N`` and ``-r``, and ``-o`` options are used internally by\n this state and cannot be used here. Additionally, instead of using\n ``-pN`` or ``--strip=N``, use the ``strip`` option documented\n below.\n\n reject_file\n If specified, any rejected hunks will be written to this file. If not\n specified, then they will be written to a temp file which will be\n deleted when the state finishes running.\n\n .. important::\n The parent directory must exist. Also, this will overwrite the file\n if it is already present.\n\n .. versionadded:: 2019.2.0\n\n strip\n Number of directories to strip from paths in the patch file. For\n example, using the below SLS would instruct Salt to use ``-p1`` when\n applying the patch:\n\n .. code-block:: yaml\n\n /etc/myfile.conf:\n file.patch:\n - source: salt://myfile.patch\n - strip: 1\n\n .. versionadded:: 2019.2.0\n In previous versions, ``-p1`` would need to be passed as part of\n the ``options`` value.\n\n saltenv\n Specify the environment from which to retrieve the patch file indicated\n by the ``source`` parameter. If not provided, this defaults to the\n environment from which the state is being executed.\n\n .. note::\n Ignored when the patch file is from a non-``salt://`` source.\n\n **Usage:**\n\n .. code-block:: yaml\n\n # Equivalent to ``patch --forward /opt/myfile.txt myfile.patch``\n /opt/myfile.txt:\n file.patch:\n - source: salt://myfile.patch\n '''\n ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}\n\n if not salt.utils.path.which('patch'):\n ret['comment'] = 'patch executable not found on minion'\n return ret\n\n # is_dir should be defined if we proceed past the if/else block below, but\n # just in case, avoid a NameError.\n is_dir = False\n\n if not name:\n ret['comment'] = 'A file/directory to be patched is required'\n return ret\n else:\n try:\n name = os.path.expanduser(name)\n except Exception:\n ret['comment'] = 'Invalid path \\'{0}\\''.format(name)\n return ret\n else:\n if not os.path.isabs(name):\n ret['comment'] = '{0} is not an absolute path'.format(name)\n return ret\n elif not os.path.exists(name):\n ret['comment'] = '{0} does not exist'.format(name)\n return ret\n else:\n is_dir = os.path.isdir(name)\n\n for deprecated_arg in ('hash', 'dry_run_first'):\n if deprecated_arg in kwargs:\n ret.setdefault('warnings', []).append(\n 'The \\'{0}\\' argument is no longer used and has been '\n 'ignored.'.format(deprecated_arg)\n )\n\n if reject_file is not None:\n try:\n reject_file_parent = os.path.dirname(reject_file)\n except Exception:\n ret['comment'] = 'Invalid path \\'{0}\\' for reject_file'.format(\n reject_file\n )\n return ret\n else:\n if not os.path.isabs(reject_file_parent):\n ret['comment'] = '\\'{0}\\' is not an absolute path'.format(\n reject_file\n )\n return ret\n elif not os.path.isdir(reject_file_parent):\n ret['comment'] = (\n 'Parent directory for reject_file \\'{0}\\' either does '\n 'not exist, or is not a directory'.format(reject_file)\n )\n return ret\n\n sanitized_options = []\n options = salt.utils.args.shlex_split(options)\n index = 0\n max_index = len(options) - 1\n # Not using enumerate here because we may need to consume more than one\n # option if --strip is used.\n blacklisted_options = []\n while index <= max_index:\n option = options[index]\n if not isinstance(option, six.string_types):\n option = six.text_type(option)\n\n for item in ('-N', '--forward', '-r', '--reject-file', '-o', '--output'):\n if option.startswith(item):\n blacklisted = option\n break\n else:\n blacklisted = None\n\n if blacklisted is not None:\n blacklisted_options.append(blacklisted)\n\n if option.startswith('-p'):\n try:\n strip = int(option[2:])\n except Exception:\n ret['comment'] = (\n 'Invalid format for \\'-p\\' CLI option. Consider using '\n 'the \\'strip\\' option for this state.'\n )\n return ret\n elif option.startswith('--strip'):\n if '=' in option:\n # Assume --strip=N\n try:\n strip = int(option.rsplit('=', 1)[-1])\n except Exception:\n ret['comment'] = (\n 'Invalid format for \\'-strip\\' CLI option. Consider '\n 'using the \\'strip\\' option for this state.'\n )\n return ret\n else:\n # Assume --strip N and grab the next option in the list\n try:\n strip = int(options[index + 1])\n except Exception:\n ret['comment'] = (\n 'Invalid format for \\'-strip\\' CLI option. Consider '\n 'using the \\'strip\\' option for this state.'\n )\n return ret\n else:\n # We need to increment again because we grabbed the next\n # option in the list.\n index += 1\n else:\n sanitized_options.append(option)\n\n # Increment the index\n index += 1\n\n if blacklisted_options:\n ret['comment'] = (\n 'The following CLI options are not allowed: {0}'.format(\n ', '.join(blacklisted_options)\n )\n )\n return ret\n\n options = sanitized_options\n\n try:\n source_match = __salt__['file.source_list'](source,\n source_hash,\n __env__)[0]\n except CommandExecutionError as exc:\n ret['result'] = False\n ret['comment'] = exc.strerror\n return ret\n else:\n # Passing the saltenv to file.managed to pull down the patch file is\n # not supported, because the saltenv is already being passed via the\n # state compiler and this would result in two values for that argument\n # (and a traceback). Therefore, we will add the saltenv to the source\n # URL to ensure we pull the file from the correct environment.\n if saltenv is not None:\n source_match_url, source_match_saltenv = \\\n salt.utils.url.parse(source_match)\n if source_match_url.startswith('salt://'):\n if source_match_saltenv is not None \\\n and source_match_saltenv != saltenv:\n ret.setdefault('warnings', []).append(\n 'Ignoring \\'saltenv\\' option in favor of saltenv '\n 'included in the source URL.'\n )\n else:\n source_match += '?saltenv={0}'.format(saltenv)\n\n cleanup = []\n\n try:\n patch_file = salt.utils.files.mkstemp()\n cleanup.append(patch_file)\n\n try:\n orig_test = __opts__['test']\n __opts__['test'] = False\n sys.modules[__salt__['test.ping'].__module__].__opts__['test'] = False\n result = managed(patch_file,\n source=source_match,\n source_hash=source_hash,\n source_hash_name=source_hash_name,\n skip_verify=skip_verify,\n template=template,\n context=context,\n defaults=defaults)\n except Exception as exc:\n msg = 'Failed to cache patch file {0}: {1}'.format(\n salt.utils.url.redact_http_basic_auth(source_match),\n exc\n )\n log.exception(msg)\n ret['comment'] = msg\n return ret\n else:\n log.debug('file.managed: %s', result)\n finally:\n __opts__['test'] = orig_test\n sys.modules[__salt__['test.ping'].__module__].__opts__['test'] = orig_test\n\n if not result['result']:\n log.debug(\n 'failed to download %s',\n salt.utils.url.redact_http_basic_auth(source_match)\n )\n return result\n\n def _patch(patch_file, options=None, dry_run=False):\n patch_opts = copy.copy(sanitized_options)\n if options is not None:\n patch_opts.extend(options)\n return __salt__['file.patch'](\n name,\n patch_file,\n options=patch_opts,\n dry_run=dry_run)\n\n if reject_file is not None:\n patch_rejects = reject_file\n else:\n # No rejects file specified, create a temp file\n patch_rejects = salt.utils.files.mkstemp()\n cleanup.append(patch_rejects)\n\n patch_output = salt.utils.files.mkstemp()\n cleanup.append(patch_output)\n\n # Older patch releases can only write patch output to regular files,\n # meaning that /dev/null can't be relied on. Also, if we ever want this\n # to work on Windows with patch.exe, /dev/null is a non-starter.\n # Therefore, redirect all patch output to a temp file, which we will\n # then remove.\n patch_opts = ['-N', '-r', patch_rejects, '-o', patch_output]\n if is_dir and strip is not None:\n patch_opts.append('-p{0}'.format(strip))\n\n pre_check = _patch(patch_file, patch_opts)\n if pre_check['retcode'] != 0:\n # Try to reverse-apply hunks from rejects file using a dry-run.\n # If this returns a retcode of 0, we know that the patch was\n # already applied. Rejects are written from the base of the\n # directory, so the strip option doesn't apply here.\n reverse_pass = _patch(patch_rejects, ['-R', '-f'], dry_run=True)\n already_applied = reverse_pass['retcode'] == 0\n\n if already_applied:\n ret['comment'] = 'Patch was already applied'\n ret['result'] = True\n return ret\n else:\n ret['comment'] = (\n 'Patch would not apply cleanly, no changes made. Results '\n 'of dry-run are below.'\n )\n if reject_file is None:\n ret['comment'] += (\n ' Run state again using the reject_file option to '\n 'save rejects to a persistent file.'\n )\n opts = copy.copy(__opts__)\n opts['color'] = False\n ret['comment'] += '\\n\\n' + salt.output.out_format(\n pre_check,\n 'nested',\n opts,\n nested_indent=14)\n return ret\n\n if __opts__['test']:\n ret['result'] = None\n ret['comment'] = 'The patch would be applied'\n ret['changes'] = pre_check\n return ret\n\n # If we've made it here, the patch should apply cleanly\n patch_opts = []\n if is_dir and strip is not None:\n patch_opts.append('-p{0}'.format(strip))\n ret['changes'] = _patch(patch_file, patch_opts)\n\n if ret['changes']['retcode'] == 0:\n ret['comment'] = 'Patch successfully applied'\n ret['result'] = True\n else:\n ret['comment'] = 'Failed to apply patch'\n\n return ret\n\n finally:\n # Clean up any temp files\n for path in cleanup:\n try:\n os.remove(path)\n except OSError as exc:\n if exc.errno != os.errno.ENOENT:\n log.error(\n 'file.patch: Failed to remove temp file %s: %s',\n path, exc\n )", "def patch(\n target, new=DEFAULT, spec=None, create=False,\n spec_set=None, autospec=None, new_callable=None, **kwargs\n ):\n \"\"\"\n `patch` acts as a function decorator, class decorator or a context\n manager. Inside the body of the function or with statement, the `target`\n is patched with a `new` object. When the function/with statement exits\n the patch is undone.\n\n If `new` is omitted, then the target is replaced with a\n `MagicMock`. If `patch` is used as a decorator and `new` is\n omitted, the created mock is passed in as an extra argument to the\n decorated function. If `patch` is used as a context manager the created\n mock is returned by the context manager.\n\n `target` should be a string in the form `'package.module.ClassName'`. The\n `target` is imported and the specified object replaced with the `new`\n object, so the `target` must be importable from the environment you are\n calling `patch` from. The target is imported when the decorated function\n is executed, not at decoration time.\n\n The `spec` and `spec_set` keyword arguments are passed to the `MagicMock`\n if patch is creating one for you.\n\n In addition you can pass `spec=True` or `spec_set=True`, which causes\n patch to pass in the object being mocked as the spec/spec_set object.\n\n `new_callable` allows you to specify a different class, or callable object,\n that will be called to create the `new` object. By default `MagicMock` is\n used.\n\n A more powerful form of `spec` is `autospec`. If you set `autospec=True`\n then the mock with be created with a spec from the object being replaced.\n All attributes of the mock will also have the spec of the corresponding\n attribute of the object being replaced. Methods and functions being\n mocked will have their arguments checked and will raise a `TypeError` if\n they are called with the wrong signature. For mocks replacing a class,\n their return value (the 'instance') will have the same spec as the class.\n\n Instead of `autospec=True` you can pass `autospec=some_object` to use an\n arbitrary object as the spec instead of the one being replaced.\n\n By default `patch` will fail to replace attributes that don't exist. If\n you pass in `create=True`, and the attribute doesn't exist, patch will\n create the attribute for you when the patched function is called, and\n delete it again afterwards. This is useful for writing tests against\n attributes that your production code creates at runtime. It is off by by\n default because it can be dangerous. With it switched on you can write\n passing tests against APIs that don't actually exist!\n\n Patch can be used as a `TestCase` class decorator. It works by\n decorating each test method in the class. This reduces the boilerplate\n code when your test methods share a common patchings set. `patch` finds\n tests by looking for method names that start with `patch.TEST_PREFIX`.\n By default this is `test`, which matches the way `unittest` finds tests.\n You can specify an alternative prefix by setting `patch.TEST_PREFIX`.\n\n Patch can be used as a context manager, with the with statement. Here the\n patching applies to the indented block after the with statement. If you\n use \"as\" then the patched object will be bound to the name after the\n \"as\"; very useful if `patch` is creating a mock object for you.\n\n `patch` takes arbitrary keyword arguments. These will be passed to\n the `Mock` (or `new_callable`) on construction.\n\n `patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are\n available for alternate use-cases.\n \"\"\"\n getter, attribute = _get_target(target)\n return _patch(\n getter, attribute, new, spec, create,\n spec_set, autospec, new_callable, kwargs\n )", "def patch_module_function(module, target, aspect, force_name=None, bag=BrokenBag, **options):\n \"\"\"\n Low-level patcher for one function from a specified module.\n\n .. warning:: You should not use this directly.\n\n :returns: An :obj:`aspectlib.Rollback` object.\n \"\"\"\n logdebug(\"patch_module_function (module=%s, target=%s, aspect=%s, force_name=%s, **options=%s\",\n module, target, aspect, force_name, options)\n name = force_name or target.__name__\n return patch_module(module, name, _checked_apply(aspect, target, module=module), original=target, **options)", "def patch(self, path, auth=None, **kwargs):\n \"\"\"\n Manually make a PATCH request.\n\n :param str path: relative url of the request (e.g. `/users/username`)\n :param auth.Authentication auth: authentication object\n :param kwargs dict: Extra arguments for the request, as supported by the\n `requests <http://docs.python-requests.org/>`_ library.\n :raises NetworkFailure: if there is an error communicating with the server\n :raises ApiFailure: if the request cannot be serviced\n \"\"\"\n return self._check_ok(self._patch(path, auth=auth, **kwargs))", "def _patch_multiple(target, spec=None, create=False, spec_set=None,\n autospec=None, new_callable=None, **kwargs):\n \"\"\"Perform multiple patches in a single call. It takes the object to be\n patched (either as an object or a string to fetch the object by importing)\n and keyword arguments for the patches::\n\n with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'):\n ...\n\n Use `DEFAULT` as the value if you want `patch.multiple` to create\n mocks for you. In this case the created mocks are passed into a decorated\n function by keyword, and a dictionary is returned when `patch.multiple` is\n used as a context manager.\n\n `patch.multiple` can be used as a decorator, class decorator or a context\n manager. The arguments `spec`, `spec_set`, `create`,\n `autospec` and `new_callable` have the same meaning as for `patch`. These\n arguments will be applied to *all* patches done by `patch.multiple`.\n\n When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX`\n for choosing which methods to wrap.\n \"\"\"\n if type(target) in (unicode, str):\n getter = lambda: _importer(target)\n else:\n getter = lambda: target\n\n if not kwargs:\n raise ValueError(\n 'Must supply at least one keyword argument with patch.multiple'\n )\n # need to wrap in a list for python 3, where items is a view\n items = list(kwargs.items())\n attribute, new = items[0]\n patcher = _patch(\n getter, attribute, new, spec, create, spec_set,\n autospec, new_callable, {}\n )\n patcher.attribute_name = attribute\n for attribute, new in items[1:]:\n this_patcher = _patch(\n getter, attribute, new, spec, create, spec_set,\n autospec, new_callable, {}\n )\n this_patcher.attribute_name = attribute\n patcher.additional_patchers.append(this_patcher)\n return patcher", "def patch(self, path=None, method='PATCH', **options):\n \"\"\" Equals :meth:`route` with a ``PATCH`` method parameter. \"\"\"\n return self.route(path, method, **options)", "def patched_function(self, *args, **kwargs):\n \"\"\"\n Step 3. Wrapped function calling.\n \"\"\"\n result = self.function(*args, **kwargs)\n self.validate(result)\n return result", "function patch(obj, method, optionsArgIndex) {\n var orig = obj[method];\n if (!orig) return;\n obj[method] = function () {\n var opts = arguments[optionsArgIndex];\n var file = null;\n if (opts) {\n file = typeof opts == 'string' ? opts : opts.filename;\n }\n if (file) callback(file);\n return orig.apply(this, arguments);\n };\n }", "def when2(fn, *args, **kwargs):\n \"\"\"Stub a function call with the given arguments\n\n Exposes a more pythonic interface than :func:`when`. See :func:`when` for\n more documentation.\n\n Returns `AnswerSelector` interface which exposes `thenReturn`,\n `thenRaise`, and `thenAnswer` as usual. Always `strict`.\n\n Usage::\n\n # Given `dog` is an instance of a `Dog`\n when2(dog.bark, 'Miau').thenReturn('Wuff')\n\n .. note:: You must :func:`unstub` after stubbing, or use `with`\n statement.\n\n \"\"\"\n obj, name = get_obj_attr_tuple(fn)\n theMock = _get_mock(obj, strict=True)\n return invocation.StubbedInvocation(theMock, name)(*args, **kwargs)", "def patch(func=None, obj=None, name=None, avoid_doublewrap=True):\n \"\"\"\n Decorator for monkeypatching functions on modules and classes.\n\n Example::\n\n # This replaces FooClass.bar with our method\n @monkeybiz.patch(FooClass)\n def bar(original_bar, *args, **kwargs):\n print \"Patched!\"\n return original_bar(*args, **kwargs)\n\n # This replaces FooClass.bar and foomodule.bar with our method\n @monkeybiz.patch([FooClass, foomodule])\n def bar(original_bar, *args, **kwargs):\n #...\n\n The first argument to ``monkeybiz.patch`` can be either a module, a class,\n or a list of modules and/or classes. The decorator also takes optional\n ``name`` and ``avoid_doublewrap`` keyword arguments. If ``name`` is\n omitted, the name of the function being patched will be the name of the\n function being decorated. If ``avoid_doublewrap`` is True (the default),\n then functions and methods can only be patched once using this function.\n\n Use ``monkeybiz.unpatch()`` to revert a monkey-patched function to its\n original.\n \"\"\"\n if obj is None:\n if isinstance(func, (type, ModuleType)):\n obj = func\n func = None\n elif isinstance(func, (list, tuple)) and all([isinstance(i, (ModuleType, type)) for i in func]):\n obj = func\n func = None\n\n if func is None:\n return functools.partial(patch, obj=obj, name=name, avoid_doublewrap=avoid_doublewrap)\n\n if name is None:\n name = func.__name__\n\n if isinstance(obj, (list, tuple)) and all([isinstance(i, (ModuleType, type)) for i in obj]):\n return [patch(func=func, obj=o, name=name, avoid_doublewrap=avoid_doublewrap) for o in obj]\n\n if not isinstance(obj, (ModuleType, type)):\n raise ValueError(\n \"Argument passed to @patch decorator must be a \"\n \"class or module, or a list of classes and modules\")\n\n try:\n call = getattr(obj, name)\n except AttributeError:\n raise TypeError(\"%(func_repr)s does not exist\" % {\n 'func_repr': '.'.join(\n filter(None, [\n getattr(obj, '__module__', None),\n obj.__name__,\n func.__name__],\n )),\n })\n\n # optionally avoid multiple identical wrappings\n if avoid_doublewrap and getattr(call, 'wrapper', None) is func:\n return\n\n # get underlying function (if it's an unbound method)\n try:\n original_callable = six.get_method_function(call)\n except AttributeError:\n original_callable = call\n\n @six.wraps(func)\n def wrapper(*args, **kwargs):\n return func(original_callable, *args, **kwargs)\n\n # set attributes, for future unwrapping and to avoid double-wrapping\n wrapper.original = call\n wrapper.wrapper = func\n\n if six.PY2 and inspect.isclass(obj):\n # rewrap staticmethod and classmethod specifically (iff obj is a class)\n if hasattr(call, 'im_self'):\n if call.im_self:\n wrapper = classmethod(wrapper)\n else:\n wrapper = staticmethod(wrapper)\n\n # finally, install the func closure as requested\n setattr(obj, name, wrapper)\n return getattr(obj, name)" ]
[ 0.7399376034736633, 0.7201372385025024, 0.71756911277771, 0.7158401012420654, 0.7078803181648254, 0.6980276107788086, 0.6951728463172913, 0.6931896805763245, 0.6924526691436768, 0.689326822757721, 0.6884449124336243, 0.6861975789070129 ]
Stub a function call, and set up an expected call count. Usage:: # Given `dog` is an instance of a `Dog` expect(dog, times=1).bark('Wuff').thenReturn('Miau') dog.bark('Wuff') dog.bark('Wuff') # will throw at call time: too many invocations # maybe if you need to ensure that `dog.bark()` was called at all verifyNoUnwantedInteractions() .. note:: You must :func:`unstub` after stubbing, or use `with` statement. See :func:`when`, :func:`when2`, :func:`verifyNoUnwantedInteractions`
def expect(obj, strict=None, times=None, atleast=None, atmost=None, between=None): """Stub a function call, and set up an expected call count. Usage:: # Given `dog` is an instance of a `Dog` expect(dog, times=1).bark('Wuff').thenReturn('Miau') dog.bark('Wuff') dog.bark('Wuff') # will throw at call time: too many invocations # maybe if you need to ensure that `dog.bark()` was called at all verifyNoUnwantedInteractions() .. note:: You must :func:`unstub` after stubbing, or use `with` statement. See :func:`when`, :func:`when2`, :func:`verifyNoUnwantedInteractions` """ if strict is None: strict = True theMock = _get_mock(obj, strict=strict) verification_fn = _get_wanted_verification( times=times, atleast=atleast, atmost=atmost, between=between) class Expect(object): def __getattr__(self, method_name): return invocation.StubbedInvocation( theMock, method_name, verification=verification_fn, strict=strict) return Expect()
[ "def when2(fn, *args, **kwargs):\n \"\"\"Stub a function call with the given arguments\n\n Exposes a more pythonic interface than :func:`when`. See :func:`when` for\n more documentation.\n\n Returns `AnswerSelector` interface which exposes `thenReturn`,\n `thenRaise`, and `thenAnswer` as usual. Always `strict`.\n\n Usage::\n\n # Given `dog` is an instance of a `Dog`\n when2(dog.bark, 'Miau').thenReturn('Wuff')\n\n .. note:: You must :func:`unstub` after stubbing, or use `with`\n statement.\n\n \"\"\"\n obj, name = get_obj_attr_tuple(fn)\n theMock = _get_mock(obj, strict=True)\n return invocation.StubbedInvocation(theMock, name)(*args, **kwargs)", "def expects_call(self):\n \"\"\"The fake must be called.\n\n .. doctest::\n :hide:\n\n >>> import fudge\n >>> fudge.clear_expectations()\n >>> fudge.clear_calls()\n\n This is useful for when you stub out a function\n as opposed to a class. For example::\n\n >>> import fudge\n >>> remove = fudge.Fake('os.remove').expects_call()\n >>> fudge.verify()\n Traceback (most recent call last):\n ...\n AssertionError: fake:os.remove() was not called\n\n .. doctest::\n :hide:\n\n >>> fudge.clear_expectations()\n\n \"\"\"\n self._callable = ExpectedCall(self, call_name=self._name,\n callable=True)\n return self", "def expect(self):\n '''\n Add an expectation to this stub. Return the expectation.\n '''\n exp = Expectation(self)\n self._expectations.append(exp)\n return exp", "def and_calls(self, *funcs):\n \"\"\"Expects the return value from one or more functions to be raised\n from the given expectation.\n \"\"\"\n for fn in funcs:\n self.__expect(Expectation, Invoke(fn))", "def expect(func, args, times=7, sleep_t=0.5):\n \"\"\"try many times as in times with sleep time\"\"\"\n while times > 0:\n try:\n return func(*args)\n except Exception as e:\n times -= 1\n logger.debug(\"expect failed - attempts left: %d\" % times)\n time.sleep(sleep_t)\n if times == 0:\n raise exceptions.BaseExc(e)", "def expects(self, call_name):\n \"\"\"Expect a call.\n\n .. doctest::\n :hide:\n\n >>> import fudge\n >>> fudge.clear_expectations()\n >>> fudge.clear_calls()\n\n If the method *call_name* is never called, then raise an error. I.E.::\n\n >>> session = Fake('session').expects('open').expects('close')\n >>> session.open()\n >>> fudge.verify()\n Traceback (most recent call last):\n ...\n AssertionError: fake:session.close() was not called\n\n .. note::\n If you want to also verify the order these calls are made in,\n use :func:`fudge.Fake.remember_order`. When using :func:`fudge.Fake.next_call`\n after ``expects(...)``, each new call will be part of the expected order\n\n Declaring ``expects()`` multiple times is the same as\n declaring :func:`fudge.Fake.next_call`\n \"\"\"\n if call_name in self._declared_calls:\n return self.next_call(for_method=call_name)\n\n self._last_declared_call_name = call_name\n c = ExpectedCall(self, call_name, call_order=self._expected_call_order)\n self._declare_call(call_name, c)\n return self", "def onCall(self, n): #pylint: disable=invalid-name\n \"\"\"\n Adds a condition for when the stub is called. When the condition is met, a special\n return value can be returned. Adds the specified call number into the condition\n list.\n\n For example, when the stub function is called the second time, it will return \"#\":\n stub.onCall(1).returns(\"#\")\n\n Without returns/throws at the end of the chain of functions, nothing will happen.\n For example, in this case, although 2 is in the condition list, nothing will happen:\n stub.onCall(2)\n\n Args:\n n: integer, the call # for which we want a special return value.\n The first call has an index of 0.\n\n Return:\n a SinonStub object (able to be chained)\n \"\"\"\n cond_oncall = n + 1\n return _SinonStubCondition(copy=self._copy, oncall=cond_oncall, cond_args=self._cond_args, cond_kwargs=self._cond_kwargs)", "def _call_spy(self, *args, **kwargs):\n '''\n Wrapper to call the spied-on function. Operates similar to\n Expectation.test.\n '''\n if self._spy_side_effect:\n if self._spy_side_effect_args or self._spy_side_effect_kwargs:\n self._spy_side_effect(\n *self._spy_side_effect_args,\n **self._spy_side_effect_kwargs)\n else:\n self._spy_side_effect(*args, **kwargs)\n\n return_value = self._stub.call_orig(*args, **kwargs)\n if self._spy_return:\n self._spy_return(return_value)\n\n return return_value", "def with_arg_count(self, count):\n \"\"\"Set the last call to expect an exact argument count.\n\n I.E.::\n\n >>> auth = Fake('auth').provides('login').with_arg_count(2)\n >>> auth.login('joe_user') # forgot password\n Traceback (most recent call last):\n ...\n AssertionError: fake:auth.login() was called with 1 arg(s) but expected 2\n\n \"\"\"\n exp = self._get_current_call()\n exp.expected_arg_count = count\n return self", "def with_kwarg_count(self, count):\n \"\"\"Set the last call to expect an exact count of keyword arguments.\n\n I.E.::\n\n >>> auth = Fake('auth').provides('login').with_kwarg_count(2)\n >>> auth.login(username='joe') # forgot password=\n Traceback (most recent call last):\n ...\n AssertionError: fake:auth.login() was called with 1 keyword arg(s) but expected 2\n\n \"\"\"\n exp = self._get_current_call()\n exp.expected_kwarg_count = count\n return self", "def spy(self):\n '''\n Add a spy to this stub. Return the spy.\n '''\n spy = Spy(self)\n self._expectations.append(spy)\n return spy", "def call(self, func, *args, **kwargs):\n \"\"\"\n Like :meth:`mitogen.parent.CallChain.call`, but log timings.\n \"\"\"\n t0 = time.time()\n try:\n recv = self.call_async(func, *args, **kwargs)\n return self._rethrow(recv)\n finally:\n LOG.debug('Call took %d ms: %r', 1000 * (time.time() - t0),\n mitogen.parent.CallSpec(func, args, kwargs))" ]
[ 0.7674841284751892, 0.7304031848907471, 0.7183098196983337, 0.705590009689331, 0.7028374075889587, 0.7000864148139954, 0.6943051815032959, 0.6941514611244202, 0.6917891502380371, 0.6833136677742004, 0.6819828748703003, 0.6706059575080872 ]
Unstubs all stubbed methods and functions If you don't pass in any argument, *all* registered mocks and patched modules, classes etc. will be unstubbed. Note that additionally, the underlying registry will be cleaned. After an `unstub` you can't :func:`verify` anymore because all interactions will be forgotten.
def unstub(*objs): """Unstubs all stubbed methods and functions If you don't pass in any argument, *all* registered mocks and patched modules, classes etc. will be unstubbed. Note that additionally, the underlying registry will be cleaned. After an `unstub` you can't :func:`verify` anymore because all interactions will be forgotten. """ if objs: for obj in objs: mock_registry.unstub(obj) else: mock_registry.unstub_all()
[ "def verifyStubbedInvocationsAreUsed(*objs):\n \"\"\"Ensure stubs are actually used.\n\n This functions just ensures that stubbed methods are actually used. Its\n purpose is to detect interface changes after refactorings. It is meant\n to be invoked usually without arguments just before :func:`unstub`.\n\n \"\"\"\n if objs:\n theMocks = map(_get_mock_or_raise, objs)\n else:\n theMocks = mock_registry.get_registered_mocks()\n\n\n for mock in theMocks:\n for i in mock.stubbed_invocations:\n if not i.allow_zero_invocations and i.used < len(i.answers):\n raise VerificationError(\"\\nUnused stub: %s\" % i)", "def verifyNoUnwantedInteractions(*objs):\n \"\"\"Verifies that expectations set via `expect` are met\n\n E.g.::\n\n expect(os.path, times=1).exists(...).thenReturn(True)\n os.path('/foo')\n verifyNoUnwantedInteractions(os.path) # ok, called once\n\n If you leave out the argument *all* registered objects will\n be checked.\n\n .. note:: **DANGERZONE**: If you did not :func:`unstub` correctly,\n it is possible that old registered mocks, from other tests\n leak.\n\n See related :func:`expect`\n \"\"\"\n\n if objs:\n theMocks = map(_get_mock_or_raise, objs)\n else:\n theMocks = mock_registry.get_registered_mocks()\n\n for mock in theMocks:\n for i in mock.stubbed_invocations:\n i.verify()", "function(object, method) {\n if (method) {\n if (object['__prototype__' + method])\n delete object[method]\n else\n object[method] = object['__original__' + method]\n delete object['__prototype__' + method]\n delete object['__original____' + method]\n }\n else if (object) {\n for (var key in object)\n if (captures = key.match(/^(?:__prototype__|__original__)(.*)/))\n destub(object, captures[1])\n }\n else\n while (JSpec.stubbed.length)\n destub(JSpec.stubbed.shift())\n }", "def unplug(self):\n '''Remove the actor's methods from the callback registry.'''\n if not self.__plugged:\n return\n members = set([method for _, method\n in inspect.getmembers(self, predicate=inspect.ismethod)])\n for message in global_callbacks:\n global_callbacks[message] -= members\n self.__plugged = False", "def reset_mock(self):\n \"Restore the mock object to its initial state.\"\n self.called = False\n self.call_args = None\n self.call_count = 0\n self.mock_calls = _CallList()\n self.call_args_list = _CallList()\n self.method_calls = _CallList()\n\n for child in self._mock_children.values():\n if isinstance(child, _SpecState):\n continue\n child.reset_mock()\n\n ret = self._mock_return_value\n if _is_instance_mock(ret) and ret is not self:\n ret.reset_mock()", "def reset_mock(self, visited=None, return_value=False, side_effect=False):\n \"Restore the mock object to its initial state.\"\n if visited is None:\n visited = []\n if id(self) in visited:\n return\n visited.append(id(self))\n\n self.called = False\n self.call_args = None\n self.call_count = 0\n self.mock_calls = _CallList()\n self.call_args_list = _CallList()\n self.method_calls = _CallList()\n\n if return_value:\n self._mock_return_value = DEFAULT\n if side_effect:\n self._mock_side_effect = None\n\n for child in self._mock_children.values():\n if isinstance(child, _SpecState):\n continue\n child.reset_mock(visited)\n\n ret = self._mock_return_value\n if _is_instance_mock(ret) and ret is not self:\n ret.reset_mock(visited)", "def _teardown(self):\n '''\n Overload so that we can clear out the cache after a test run.\n '''\n # __new__ is a super-special case in that even when stubbing a class\n # which implements its own __new__ and subclasses object, the\n # \"Class.__new__\" reference is a staticmethod and not a method (or\n # function). That confuses the \"was_object_method\" logic in\n # StubFunction which then fails to delattr and from then on the class\n # is corrupted. So skip that teardown and use a __new__-specific case.\n setattr(self._instance, self._attr, staticmethod(self._new))\n StubNew._cache.pop(self._type)", "def reset(cls):\n \"\"\"Reset the registry to the standard multihash functions.\"\"\"\n # Maps function names (hyphens or underscores) to registered functions.\n cls._func_from_name = {}\n\n # Maps hashlib names to registered functions.\n cls._func_from_hash = {}\n\n # Hashlib compatibility data by function.\n cls._func_hash = {}\n\n register = cls._do_register\n for (func, hash_name, hash_new) in cls._std_func_data:\n register(func, func.name, hash_name, hash_new)\n assert set(cls._func_hash) == set(Func)", "def _init_stub(self, stub_init, **stub_kwargs):\n \"\"\"Initializes all other stubs for consistency's sake\"\"\"\n getattr(self.testbed, stub_init, lambda **kwargs: None)(**stub_kwargs)", "def _teardown(self):\n '''\n Put the original method back in place. This will also handle the\n special case when it putting back a class method.\n\n The following code snippet best describe why it fails using settar,\n the class method would be replaced with a bound method not a class\n method.\n\n >>> class Example(object):\n ... @classmethod\n ... def a_classmethod(self):\n ... pass\n ...\n >>> Example.__dict__['a_classmethod']\n <classmethod object at 0x7f5e6c298be8>\n >>> orig = getattr(Example, 'a_classmethod')\n >>> orig\n <bound method type.a_classmethod of <class '__main__.Example'>>\n >>> setattr(Example, 'a_classmethod', orig)\n >>> Example.__dict__['a_classmethod']\n <bound method type.a_classmethod of <class '__main__.Example'>>\n\n The only way to figure out if this is a class method is to check and\n see if the bound method im_self is a class, if so then we need to wrap\n the function object (im_func) with class method before setting it back\n on the class.\n '''\n # Figure out if this is a class method and we're unstubbing it on the\n # class to which it belongs. This addresses an edge case where a\n # module can expose a method of an instance. e.g gevent.\n if hasattr(self._obj, '__self__') and \\\n inspect.isclass(self._obj.__self__) and \\\n self._obj.__self__ is self._instance:\n setattr(\n self._instance, self._attr, classmethod(self._obj.__func__))\n elif hasattr(self._obj, 'im_self') and \\\n inspect.isclass(self._obj.im_self) and \\\n self._obj.im_self is self._instance:\n # Wrap it and set it back on the class\n setattr(self._instance, self._attr, classmethod(self._obj.im_func))\n else:\n setattr(self._instance, self._attr, self._obj)", "def StubOutWithMock(self, obj, attr_name, use_mock_anything=False):\n \"\"\"Replace a method, attribute, etc. with a Mock.\n\n This will replace a class or module with a MockObject, and everything else\n (method, function, etc) with a MockAnything. This can be overridden to\n always use a MockAnything by setting use_mock_anything to True.\n\n Args:\n obj: A Python object (class, module, instance, callable).\n attr_name: str. The name of the attribute to replace with a mock.\n use_mock_anything: bool. True if a MockAnything should be used regardless\n of the type of attribute.\n \"\"\"\n\n attr_to_replace = getattr(obj, attr_name)\n if type(attr_to_replace) in self._USE_MOCK_OBJECT and not use_mock_anything:\n stub = self.CreateMock(attr_to_replace)\n else:\n stub = self.CreateMockAnything()\n\n self.stubs.Set(obj, attr_name, stub)", "def _stub_obj(obj):\n '''\n Stub an object directly.\n '''\n # Annoying circular reference requires importing here. Would like to see\n # this cleaned up. @AW\n from .mock import Mock\n\n # Return an existing stub\n if isinstance(obj, Stub):\n return obj\n\n # If a Mock object, stub its __call__\n if isinstance(obj, Mock):\n return stub(obj.__call__)\n\n # If passed-in a type, assume that we're going to stub out the creation.\n # See StubNew for the awesome sauce.\n # if isinstance(obj, types.TypeType):\n if hasattr(types, 'TypeType') and isinstance(obj, types.TypeType):\n return StubNew(obj)\n elif hasattr(__builtins__, 'type') and \\\n isinstance(obj, __builtins__.type):\n return StubNew(obj)\n elif inspect.isclass(obj):\n return StubNew(obj)\n\n # I thought that types.UnboundMethodType differentiated these cases but\n # apparently not.\n if isinstance(obj, types.MethodType):\n # Handle differently if unbound because it's an implicit \"any instance\"\n if getattr(obj, 'im_self', None) is None:\n # Handle the python3 case and py2 filter\n if hasattr(obj, '__self__'):\n if obj.__self__ is not None:\n return StubMethod(obj)\n if sys.version_info.major == 2:\n return StubUnboundMethod(obj)\n else:\n return StubMethod(obj)\n\n # These aren't in the types library\n if type(obj).__name__ == 'method-wrapper':\n return StubMethodWrapper(obj)\n\n if type(obj).__name__ == 'wrapper_descriptor':\n raise UnsupportedStub(\n \"must call stub(obj,'%s') for slot wrapper on %s\",\n obj.__name__, obj.__objclass__.__name__)\n\n # (Mostly) Lastly, look for properties.\n # First look for the situation where there's a reference back to the\n # property.\n prop = obj\n if isinstance(getattr(obj, '__self__', None), property):\n obj = prop.__self__\n\n # Once we've found a property, we have to figure out how to reference\n # back to the owning class. This is a giant pain and we have to use gc\n # to find out where it comes from. This code is dense but resolves to\n # something like this:\n # >>> gc.get_referrers( foo.x )\n # [{'__dict__': <attribute '__dict__' of 'foo' objects>,\n # 'x': <property object at 0x7f68c99a16d8>,\n # '__module__': '__main__',\n # '__weakref__': <attribute '__weakref__' of 'foo' objects>,\n # '__doc__': None}]\n if isinstance(obj, property):\n klass, attr = None, None\n for ref in gc.get_referrers(obj):\n if klass and attr:\n break\n if isinstance(ref, dict) and ref.get('prop', None) is obj:\n klass = getattr(\n ref.get('__dict__', None), '__objclass__', None)\n for name, val in getattr(klass, '__dict__', {}).items():\n if val is obj:\n attr = name\n break\n # In the case of PyPy, we have to check all types that refer to\n # the property, and see if any of their attrs are the property\n elif isinstance(ref, type):\n # Use dir as a means to quickly walk through the class tree\n for name in dir(ref):\n if getattr(ref, name) == obj:\n klass = ref\n attr = name\n break\n\n if klass and attr:\n rval = stub(klass, attr)\n if prop != obj:\n return stub(rval, prop.__name__)\n return rval\n\n # If a function and it has an associated module, we can mock directly.\n # Note that this *must* be after properties, otherwise it conflicts with\n # stubbing out the deleter methods and such\n # Sadly, builtin functions and methods have the same type, so we have to\n # use the same stub class even though it's a bit ugly\n if isinstance(obj, (types.FunctionType, types.BuiltinFunctionType,\n types.BuiltinMethodType)) and hasattr(obj, '__module__'):\n return StubFunction(obj)\n\n raise UnsupportedStub(\"can't stub %s\", obj)" ]
[ 0.7259087562561035, 0.7201184034347534, 0.6834965348243713, 0.6720833778381348, 0.6676120162010193, 0.6579244136810303, 0.6551214456558228, 0.6547223925590515, 0.6515965461730957, 0.6505199670791626, 0.649671733379364, 0.6468276381492615 ]
Verify that no methods have been called on given objs. Note that strict mocks usually throw early on unexpected, unstubbed invocations. Partial mocks ('monkeypatched' objects or modules) do not support this functionality at all, bc only for the stubbed invocations the actual usage gets recorded. So this function is of limited use, nowadays.
def verifyZeroInteractions(*objs): """Verify that no methods have been called on given objs. Note that strict mocks usually throw early on unexpected, unstubbed invocations. Partial mocks ('monkeypatched' objects or modules) do not support this functionality at all, bc only for the stubbed invocations the actual usage gets recorded. So this function is of limited use, nowadays. """ for obj in objs: theMock = _get_mock_or_raise(obj) if len(theMock.invocations) > 0: raise VerificationError( "\nUnwanted interaction: %s" % theMock.invocations[0])
[ "def verifyStubbedInvocationsAreUsed(*objs):\n \"\"\"Ensure stubs are actually used.\n\n This functions just ensures that stubbed methods are actually used. Its\n purpose is to detect interface changes after refactorings. It is meant\n to be invoked usually without arguments just before :func:`unstub`.\n\n \"\"\"\n if objs:\n theMocks = map(_get_mock_or_raise, objs)\n else:\n theMocks = mock_registry.get_registered_mocks()\n\n\n for mock in theMocks:\n for i in mock.stubbed_invocations:\n if not i.allow_zero_invocations and i.used < len(i.answers):\n raise VerificationError(\"\\nUnused stub: %s\" % i)", "def verifyNoUnwantedInteractions(*objs):\n \"\"\"Verifies that expectations set via `expect` are met\n\n E.g.::\n\n expect(os.path, times=1).exists(...).thenReturn(True)\n os.path('/foo')\n verifyNoUnwantedInteractions(os.path) # ok, called once\n\n If you leave out the argument *all* registered objects will\n be checked.\n\n .. note:: **DANGERZONE**: If you did not :func:`unstub` correctly,\n it is possible that old registered mocks, from other tests\n leak.\n\n See related :func:`expect`\n \"\"\"\n\n if objs:\n theMocks = map(_get_mock_or_raise, objs)\n else:\n theMocks = mock_registry.get_registered_mocks()\n\n for mock in theMocks:\n for i in mock.stubbed_invocations:\n i.verify()", "def unstub(*objs):\n \"\"\"Unstubs all stubbed methods and functions\n\n If you don't pass in any argument, *all* registered mocks and\n patched modules, classes etc. will be unstubbed.\n\n Note that additionally, the underlying registry will be cleaned.\n After an `unstub` you can't :func:`verify` anymore because all\n interactions will be forgotten.\n \"\"\"\n\n if objs:\n for obj in objs:\n mock_registry.unstub(obj)\n else:\n mock_registry.unstub_all()", "def has_methods(*method_names):\n \"\"\"Return a test function that, when given an object (class or an\n instance), returns ``True`` if that object has all of the (regular) methods\n in ``method_names``. Note: this is testing for regular methods only and the\n test function will correctly return ``False`` if an instance has one of the\n specified methods as a classmethod or a staticmethod. However, it will\n incorrectly return ``True`` (false positives) for classmethods and\n staticmethods on a *class*.\n \"\"\"\n\n def test(obj):\n for method_name in method_names:\n try:\n method = getattr(obj, method_name)\n except AttributeError:\n return False\n else:\n if not callable(method):\n return False\n if not isinstance(obj, type):\n try:\n # An instance method is a method type with a __self__\n # attribute that references the instance.\n if method.__self__ is not obj:\n return False\n except AttributeError:\n return False\n return True\n\n return test", "def neverCalledWith(cls, spy, *args, **kwargs): #pylint: disable=invalid-name\n \"\"\"\n Checking the inspector is never called with partial args/kwargs\n Args: SinonSpy, args/kwargs\n \"\"\"\n cls.__is_spy(spy)\n if not (spy.neverCalledWith(*args, **kwargs)):\n raise cls.failException(cls.message)", "def assert_has_calls(self, calls, any_order=False):\n \"\"\"assert the mock has been called with the specified calls.\n The `mock_calls` list is checked for the calls.\n\n If `any_order` is False (the default) then the calls must be\n sequential. There can be extra calls before or after the\n specified calls.\n\n If `any_order` is True then the calls can be in any order, but\n they must all appear in `mock_calls`.\"\"\"\n if not any_order:\n if calls not in self.mock_calls:\n raise AssertionError(\n 'Calls not found.\\nExpected: %r\\n'\n 'Actual: %r' % (calls, self.mock_calls)\n )\n return\n\n all_calls = list(self.mock_calls)\n\n not_found = []\n for kall in calls:\n try:\n all_calls.remove(kall)\n except ValueError:\n not_found.append(kall)\n if not_found:\n raise AssertionError(\n '%r not all found in call list' % (tuple(not_found),)\n )", "def expect(obj, strict=None,\n times=None, atleast=None, atmost=None, between=None):\n \"\"\"Stub a function call, and set up an expected call count.\n\n Usage::\n\n # Given `dog` is an instance of a `Dog`\n expect(dog, times=1).bark('Wuff').thenReturn('Miau')\n dog.bark('Wuff')\n dog.bark('Wuff') # will throw at call time: too many invocations\n\n # maybe if you need to ensure that `dog.bark()` was called at all\n verifyNoUnwantedInteractions()\n\n .. note:: You must :func:`unstub` after stubbing, or use `with`\n statement.\n\n See :func:`when`, :func:`when2`, :func:`verifyNoUnwantedInteractions`\n\n \"\"\"\n if strict is None:\n strict = True\n theMock = _get_mock(obj, strict=strict)\n\n verification_fn = _get_wanted_verification(\n times=times, atleast=atleast, atmost=atmost, between=between)\n\n class Expect(object):\n def __getattr__(self, method_name):\n return invocation.StubbedInvocation(\n theMock, method_name, verification=verification_fn,\n strict=strict)\n\n return Expect()", "public static synchronized void verify(Object... objects) {\n for (Object mock : objects) {\n if (mock instanceof Class<?>) {\n verifyClass((Class<?>) mock);\n } else {\n EasyMockMethodInvocationControl invocationControl = (EasyMockMethodInvocationControl) MockRepository.getInstanceMethodInvocationControl(mock);\n if (invocationControl != null) {\n invocationControl.verify();\n } else {\n if (isNiceReplayAndVerifyMode() && !isEasyMocked(mock)) {\n // ignore non-mock\n } else {\n /*\n * Delegate to easy mock class extension if we have no\n * handler registered for this object.\n */\n try {\n org.easymock.EasyMock.verify(mock);\n } catch (RuntimeException e) {\n throw new RuntimeException(mock + \" is not a mock object\", e);\n }\n }\n }\n }\n }\n }", "def assert_chain_calls(self, *calls):\n \"\"\"\n Asserts that a chained method was called (parents in the chain do not\n matter, nor are they tracked). Use with `mock.call`.\n\n >>> obj.filter(foo='bar').select_related('baz')\n >>> obj.assert_chain_calls(mock.call.filter(foo='bar'))\n >>> obj.assert_chain_calls(mock.call.select_related('baz'))\n >>> obj.assert_chain_calls(mock.call.reverse())\n *** AssertionError: [call.reverse()] not all found in call list, ...\n\n \"\"\"\n\n all_calls = self.__parent.mock_calls[:]\n\n not_found = []\n for kall in calls:\n try:\n all_calls.remove(kall)\n except ValueError:\n not_found.append(kall)\n if not_found:\n if self.__parent.mock_calls:\n message = '%r not all found in call list, %d other(s) were:\\n%r' % (not_found, len(self.__parent.mock_calls), self.__parent.mock_calls)\n else:\n message = 'no calls were found'\n\n raise AssertionError(message)", "function areHostMethods(object) {\n var methodNames = Array.prototype.slice.call(arguments, 1),\n t, i, len = methodNames.length;\n for (i = 0; i < len; i++) {\n t = typeof object[methodNames[i]];\n if (!(/^(?:function|object|unknown)$/).test(t)) return false;\n }\n return true;\n }", "static void checkNoneNull(Iterable<?> objects) {\n if (!(objects instanceof ImmutableCollection)) {\n for (Object o : objects) {\n checkNotNull(o);\n }\n }\n }", "def spy(object):\n \"\"\"Spy an object.\n\n Spying means that all functions will behave as before, so they will\n be side effects, but the interactions can be verified afterwards.\n\n Returns Dummy-like, almost empty object as proxy to `object`.\n\n The *returned* object must be injected and used by the code under test;\n after that all interactions can be verified as usual.\n T.i. the original object **will not be patched**, and has no further\n knowledge as before.\n\n E.g.::\n\n import time\n time = spy(time)\n # inject time\n do_work(..., time)\n verify(time).time()\n\n \"\"\"\n if inspect.isclass(object) or inspect.ismodule(object):\n class_ = None\n else:\n class_ = object.__class__\n\n class Spy(_Dummy):\n if class_:\n __class__ = class_\n\n def __getattr__(self, method_name):\n return RememberedProxyInvocation(theMock, method_name)\n\n def __repr__(self):\n name = 'Spied'\n if class_:\n name += class_.__name__\n return \"<%s id=%s>\" % (name, id(self))\n\n\n obj = Spy()\n theMock = Mock(obj, strict=True, spec=object)\n\n mock_registry.register(obj, theMock)\n return obj" ]
[ 0.7882779836654663, 0.7657877802848816, 0.7032131552696228, 0.6859399080276489, 0.6765344142913818, 0.6675518155097961, 0.6675054430961609, 0.6674047112464905, 0.6667309999465942, 0.6651041507720947, 0.6645183563232422, 0.6618899703025818 ]
Verifies that expectations set via `expect` are met E.g.:: expect(os.path, times=1).exists(...).thenReturn(True) os.path('/foo') verifyNoUnwantedInteractions(os.path) # ok, called once If you leave out the argument *all* registered objects will be checked. .. note:: **DANGERZONE**: If you did not :func:`unstub` correctly, it is possible that old registered mocks, from other tests leak. See related :func:`expect`
def verifyNoUnwantedInteractions(*objs): """Verifies that expectations set via `expect` are met E.g.:: expect(os.path, times=1).exists(...).thenReturn(True) os.path('/foo') verifyNoUnwantedInteractions(os.path) # ok, called once If you leave out the argument *all* registered objects will be checked. .. note:: **DANGERZONE**: If you did not :func:`unstub` correctly, it is possible that old registered mocks, from other tests leak. See related :func:`expect` """ if objs: theMocks = map(_get_mock_or_raise, objs) else: theMocks = mock_registry.get_registered_mocks() for mock in theMocks: for i in mock.stubbed_invocations: i.verify()
[ "def unmet_expectations(self):\n '''\n Assert that all expectations on the stub have been met.\n '''\n unmet = []\n for exp in self._expectations:\n if not exp.closed(with_counts=True):\n unmet.append(ExpectationNotSatisfied(exp))\n return unmet", "def expect(obj, strict=None,\n times=None, atleast=None, atmost=None, between=None):\n \"\"\"Stub a function call, and set up an expected call count.\n\n Usage::\n\n # Given `dog` is an instance of a `Dog`\n expect(dog, times=1).bark('Wuff').thenReturn('Miau')\n dog.bark('Wuff')\n dog.bark('Wuff') # will throw at call time: too many invocations\n\n # maybe if you need to ensure that `dog.bark()` was called at all\n verifyNoUnwantedInteractions()\n\n .. note:: You must :func:`unstub` after stubbing, or use `with`\n statement.\n\n See :func:`when`, :func:`when2`, :func:`verifyNoUnwantedInteractions`\n\n \"\"\"\n if strict is None:\n strict = True\n theMock = _get_mock(obj, strict=strict)\n\n verification_fn = _get_wanted_verification(\n times=times, atleast=atleast, atmost=atmost, between=between)\n\n class Expect(object):\n def __getattr__(self, method_name):\n return invocation.StubbedInvocation(\n theMock, method_name, verification=verification_fn,\n strict=strict)\n\n return Expect()", "def verifyStubbedInvocationsAreUsed(*objs):\n \"\"\"Ensure stubs are actually used.\n\n This functions just ensures that stubbed methods are actually used. Its\n purpose is to detect interface changes after refactorings. It is meant\n to be invoked usually without arguments just before :func:`unstub`.\n\n \"\"\"\n if objs:\n theMocks = map(_get_mock_or_raise, objs)\n else:\n theMocks = mock_registry.get_registered_mocks()\n\n\n for mock in theMocks:\n for i in mock.stubbed_invocations:\n if not i.allow_zero_invocations and i.used < len(i.answers):\n raise VerificationError(\"\\nUnused stub: %s\" % i)", "def unstub(*objs):\n \"\"\"Unstubs all stubbed methods and functions\n\n If you don't pass in any argument, *all* registered mocks and\n patched modules, classes etc. will be unstubbed.\n\n Note that additionally, the underlying registry will be cleaned.\n After an `unstub` you can't :func:`verify` anymore because all\n interactions will be forgotten.\n \"\"\"\n\n if objs:\n for obj in objs:\n mock_registry.unstub(obj)\n else:\n mock_registry.unstub_all()", "def verifyZeroInteractions(*objs):\n \"\"\"Verify that no methods have been called on given objs.\n\n Note that strict mocks usually throw early on unexpected, unstubbed\n invocations. Partial mocks ('monkeypatched' objects or modules) do not\n support this functionality at all, bc only for the stubbed invocations\n the actual usage gets recorded. So this function is of limited use,\n nowadays.\n\n \"\"\"\n for obj in objs:\n theMock = _get_mock_or_raise(obj)\n\n if len(theMock.invocations) > 0:\n raise VerificationError(\n \"\\nUnwanted interaction: %s\" % theMock.invocations[0])", "def verify(self):\n \"\"\"\n Verifying all inspectors in exp_list\n Return:\n True: pass all inspectors\n False: fail at more than one inspector\n \"\"\"\n for expectation in self.exp_list:\n if hasattr(expectation, \"verify\") and not expectation.verify():\n return False\n return True", "def verify(self):\n \"\"\"Verifies expectations on all doubled objects.\n\n :raise: ``MockExpectationError`` on the first expectation that is not satisfied, if any.\n \"\"\"\n\n if self._is_verified:\n return\n\n for proxy in self._proxies.values():\n proxy.verify()\n\n self._is_verified = True", "def _Verify(self):\n \"\"\"Verify that all of the expected calls have been made.\n\n Raises:\n ExpectedMethodCallsError: if there are still more method calls in the\n expected queue.\n \"\"\"\n\n # If the list of expected calls is not empty, raise an exception\n if self._expected_calls_queue:\n # The last MultipleTimesGroup is not popped from the queue.\n if (len(self._expected_calls_queue) == 1 and\n isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and\n self._expected_calls_queue[0].IsSatisfied()):\n pass\n else:\n raise ExpectedMethodCallsError(self._expected_calls_queue)", "def __mock_verify\n __mock_defis.values.all?(&:empty?) || begin\n msg, defis_with_same_msg = __mock_defis.find{ |_, v| v.any? }\n args, defis = defis_with_same_msg.group_by(&:args).first\n dsize = __mock_disps[msg].count{ |d| d.args == args }\n Mock.__send__(:raise, # Too little times\n Expected.new(object, defis.first, defis.size + dsize, dsize))\n end\n end", "def and_raises(self, *errors):\n \"Expects an error or more to be raised from the given expectation.\"\n for error in errors:\n self.__expect(Expectation.raises, error)", "def expect(obj, caller_args=[]):\n \"\"\"Primary method for test assertions in Specter\n\n :param obj: The evaluated target object\n :param caller_args: Is only used when using expecting a raised Exception\n \"\"\"\n line, module = get_module_and_line('__spec__')\n src_params = ExpectParams(line, module)\n\n expect_obj = ExpectAssert(\n obj,\n src_params=src_params,\n caller_args=caller_args\n )\n _add_expect_to_wrapper(expect_obj)\n return expect_obj", "def with_matching_args(self, *args, **kwargs):\n \"\"\"Set the last call to expect specific argument values if those arguments exist.\n\n Unlike :func:`fudge.Fake.with_args` use this if you want to only declare\n expectations about matching arguments. Any unknown keyword arguments\n used by the app under test will be allowed.\n\n For example, you can declare positional arguments but ignore keyword arguments:\n\n .. doctest::\n\n >>> import fudge\n >>> db = fudge.Fake('db').expects('transaction').with_matching_args('insert')\n\n With this declaration, any keyword argument is allowed:\n\n .. doctest::\n\n >>> db.transaction('insert', isolation_level='lock')\n >>> db.transaction('insert', isolation_level='shared')\n >>> db.transaction('insert', retry_on_error=True)\n\n .. doctest::\n :hide:\n\n >>> fudge.clear_expectations()\n\n .. note::\n\n you may get more mileage out of :mod:`fudge.inspector` functions as\n described in :func:`fudge.Fake.with_args`\n\n \"\"\"\n exp = self._get_current_call()\n if args:\n exp.expected_matching_args = args\n if kwargs:\n exp.expected_matching_kwargs = kwargs\n return self" ]
[ 0.7114996910095215, 0.7048271894454956, 0.6924911737442017, 0.6901080012321472, 0.6899175047874451, 0.6762266159057617, 0.6691651344299316, 0.6686018109321594, 0.6515703201293945, 0.6468772292137146, 0.6459056735038757, 0.6432895064353943 ]
Ensure stubs are actually used. This functions just ensures that stubbed methods are actually used. Its purpose is to detect interface changes after refactorings. It is meant to be invoked usually without arguments just before :func:`unstub`.
def verifyStubbedInvocationsAreUsed(*objs): """Ensure stubs are actually used. This functions just ensures that stubbed methods are actually used. Its purpose is to detect interface changes after refactorings. It is meant to be invoked usually without arguments just before :func:`unstub`. """ if objs: theMocks = map(_get_mock_or_raise, objs) else: theMocks = mock_registry.get_registered_mocks() for mock in theMocks: for i in mock.stubbed_invocations: if not i.allow_zero_invocations and i.used < len(i.answers): raise VerificationError("\nUnused stub: %s" % i)
[ "def unstub(*objs):\n \"\"\"Unstubs all stubbed methods and functions\n\n If you don't pass in any argument, *all* registered mocks and\n patched modules, classes etc. will be unstubbed.\n\n Note that additionally, the underlying registry will be cleaned.\n After an `unstub` you can't :func:`verify` anymore because all\n interactions will be forgotten.\n \"\"\"\n\n if objs:\n for obj in objs:\n mock_registry.unstub(obj)\n else:\n mock_registry.unstub_all()", "def _init_stub(self, stub_init, **stub_kwargs):\n \"\"\"Initializes all other stubs for consistency's sake\"\"\"\n getattr(self.testbed, stub_init, lambda **kwargs: None)(**stub_kwargs)", "def _stub_obj(obj):\n '''\n Stub an object directly.\n '''\n # Annoying circular reference requires importing here. Would like to see\n # this cleaned up. @AW\n from .mock import Mock\n\n # Return an existing stub\n if isinstance(obj, Stub):\n return obj\n\n # If a Mock object, stub its __call__\n if isinstance(obj, Mock):\n return stub(obj.__call__)\n\n # If passed-in a type, assume that we're going to stub out the creation.\n # See StubNew for the awesome sauce.\n # if isinstance(obj, types.TypeType):\n if hasattr(types, 'TypeType') and isinstance(obj, types.TypeType):\n return StubNew(obj)\n elif hasattr(__builtins__, 'type') and \\\n isinstance(obj, __builtins__.type):\n return StubNew(obj)\n elif inspect.isclass(obj):\n return StubNew(obj)\n\n # I thought that types.UnboundMethodType differentiated these cases but\n # apparently not.\n if isinstance(obj, types.MethodType):\n # Handle differently if unbound because it's an implicit \"any instance\"\n if getattr(obj, 'im_self', None) is None:\n # Handle the python3 case and py2 filter\n if hasattr(obj, '__self__'):\n if obj.__self__ is not None:\n return StubMethod(obj)\n if sys.version_info.major == 2:\n return StubUnboundMethod(obj)\n else:\n return StubMethod(obj)\n\n # These aren't in the types library\n if type(obj).__name__ == 'method-wrapper':\n return StubMethodWrapper(obj)\n\n if type(obj).__name__ == 'wrapper_descriptor':\n raise UnsupportedStub(\n \"must call stub(obj,'%s') for slot wrapper on %s\",\n obj.__name__, obj.__objclass__.__name__)\n\n # (Mostly) Lastly, look for properties.\n # First look for the situation where there's a reference back to the\n # property.\n prop = obj\n if isinstance(getattr(obj, '__self__', None), property):\n obj = prop.__self__\n\n # Once we've found a property, we have to figure out how to reference\n # back to the owning class. This is a giant pain and we have to use gc\n # to find out where it comes from. This code is dense but resolves to\n # something like this:\n # >>> gc.get_referrers( foo.x )\n # [{'__dict__': <attribute '__dict__' of 'foo' objects>,\n # 'x': <property object at 0x7f68c99a16d8>,\n # '__module__': '__main__',\n # '__weakref__': <attribute '__weakref__' of 'foo' objects>,\n # '__doc__': None}]\n if isinstance(obj, property):\n klass, attr = None, None\n for ref in gc.get_referrers(obj):\n if klass and attr:\n break\n if isinstance(ref, dict) and ref.get('prop', None) is obj:\n klass = getattr(\n ref.get('__dict__', None), '__objclass__', None)\n for name, val in getattr(klass, '__dict__', {}).items():\n if val is obj:\n attr = name\n break\n # In the case of PyPy, we have to check all types that refer to\n # the property, and see if any of their attrs are the property\n elif isinstance(ref, type):\n # Use dir as a means to quickly walk through the class tree\n for name in dir(ref):\n if getattr(ref, name) == obj:\n klass = ref\n attr = name\n break\n\n if klass and attr:\n rval = stub(klass, attr)\n if prop != obj:\n return stub(rval, prop.__name__)\n return rval\n\n # If a function and it has an associated module, we can mock directly.\n # Note that this *must* be after properties, otherwise it conflicts with\n # stubbing out the deleter methods and such\n # Sadly, builtin functions and methods have the same type, so we have to\n # use the same stub class even though it's a bit ugly\n if isinstance(obj, (types.FunctionType, types.BuiltinFunctionType,\n types.BuiltinMethodType)) and hasattr(obj, '__module__'):\n return StubFunction(obj)\n\n raise UnsupportedStub(\"can't stub %s\", obj)", "def _handle_call(self, actual_call, stubbed_call):\n \"\"\"Extends Stub call handling behavior to be callable by default.\"\"\"\n self._actual_calls.append(actual_call)\n use_call = stubbed_call or actual_call\n return use_call.return_value", "def verifyNoUnwantedInteractions(*objs):\n \"\"\"Verifies that expectations set via `expect` are met\n\n E.g.::\n\n expect(os.path, times=1).exists(...).thenReturn(True)\n os.path('/foo')\n verifyNoUnwantedInteractions(os.path) # ok, called once\n\n If you leave out the argument *all* registered objects will\n be checked.\n\n .. note:: **DANGERZONE**: If you did not :func:`unstub` correctly,\n it is possible that old registered mocks, from other tests\n leak.\n\n See related :func:`expect`\n \"\"\"\n\n if objs:\n theMocks = map(_get_mock_or_raise, objs)\n else:\n theMocks = mock_registry.get_registered_mocks()\n\n for mock in theMocks:\n for i in mock.stubbed_invocations:\n i.verify()", "def _teardown(self):\n '''\n Put the original method back in place. This will also handle the\n special case when it putting back a class method.\n\n The following code snippet best describe why it fails using settar,\n the class method would be replaced with a bound method not a class\n method.\n\n >>> class Example(object):\n ... @classmethod\n ... def a_classmethod(self):\n ... pass\n ...\n >>> Example.__dict__['a_classmethod']\n <classmethod object at 0x7f5e6c298be8>\n >>> orig = getattr(Example, 'a_classmethod')\n >>> orig\n <bound method type.a_classmethod of <class '__main__.Example'>>\n >>> setattr(Example, 'a_classmethod', orig)\n >>> Example.__dict__['a_classmethod']\n <bound method type.a_classmethod of <class '__main__.Example'>>\n\n The only way to figure out if this is a class method is to check and\n see if the bound method im_self is a class, if so then we need to wrap\n the function object (im_func) with class method before setting it back\n on the class.\n '''\n # Figure out if this is a class method and we're unstubbing it on the\n # class to which it belongs. This addresses an edge case where a\n # module can expose a method of an instance. e.g gevent.\n if hasattr(self._obj, '__self__') and \\\n inspect.isclass(self._obj.__self__) and \\\n self._obj.__self__ is self._instance:\n setattr(\n self._instance, self._attr, classmethod(self._obj.__func__))\n elif hasattr(self._obj, 'im_self') and \\\n inspect.isclass(self._obj.im_self) and \\\n self._obj.im_self is self._instance:\n # Wrap it and set it back on the class\n setattr(self._instance, self._attr, classmethod(self._obj.im_func))\n else:\n setattr(self._instance, self._attr, self._obj)", "def patch_stackless():\n '''\n This function should be called to patch the stackless module so that new tasklets are properly tracked in the\n debugger.\n '''\n global _application_set_schedule_callback\n _application_set_schedule_callback = stackless.set_schedule_callback(_schedule_callback)\n\n def set_schedule_callback(callable):\n global _application_set_schedule_callback\n old = _application_set_schedule_callback\n _application_set_schedule_callback = callable\n return old\n\n def get_schedule_callback():\n global _application_set_schedule_callback\n return _application_set_schedule_callback\n\n set_schedule_callback.__doc__ = stackless.set_schedule_callback.__doc__\n if hasattr(stackless, \"get_schedule_callback\"):\n get_schedule_callback.__doc__ = stackless.get_schedule_callback.__doc__\n stackless.set_schedule_callback = set_schedule_callback\n stackless.get_schedule_callback = get_schedule_callback\n\n if not hasattr(stackless.tasklet, \"trace_function\"):\n # Older versions of Stackless, released before 2014\n __call__.__doc__ = stackless.tasklet.__call__.__doc__\n stackless.tasklet.__call__ = __call__\n\n setup.__doc__ = stackless.tasklet.setup.__doc__\n stackless.tasklet.setup = setup\n\n run.__doc__ = stackless.run.__doc__\n stackless.run = run", "def stub(base_class=None, **attributes):\n \"\"\"creates a python class on-the-fly with the given keyword-arguments\n as class-attributes accessible with .attrname.\n\n The new class inherits from\n Use this to mock rather than stub.\n \"\"\"\n if base_class is None:\n base_class = object\n\n members = {\n \"__init__\": lambda self: None,\n \"__new__\": lambda *args, **kw: object.__new__(\n *args, *kw\n ), # remove __new__ and metaclass behavior from object\n \"__metaclass__\": None,\n }\n members.update(attributes)\n # let's create a python class on-the-fly :)\n return type(f\"{base_class.__name__}Stub\", (base_class,), members)()", "def _teardown(self):\n '''\n Overload so that we can clear out the cache after a test run.\n '''\n # __new__ is a super-special case in that even when stubbing a class\n # which implements its own __new__ and subclasses object, the\n # \"Class.__new__\" reference is a staticmethod and not a method (or\n # function). That confuses the \"was_object_method\" logic in\n # StubFunction which then fails to delattr and from then on the class\n # is corrupted. So skip that teardown and use a __new__-specific case.\n setattr(self._instance, self._attr, staticmethod(self._new))\n StubNew._cache.pop(self._type)", "def stub(self, obj, attr=None):\n '''\n Stub an object. If attr is not None, will attempt to stub that\n attribute on the object. Only required for modules and other rare\n cases where we can't determine the binding from the object.\n '''\n s = stub(obj, attr)\n if s not in self._stubs:\n self._stubs.append(s)\n return s", "def StubOutWithMock(self, obj, attr_name, use_mock_anything=False):\n \"\"\"Replace a method, attribute, etc. with a Mock.\n\n This will replace a class or module with a MockObject, and everything else\n (method, function, etc) with a MockAnything. This can be overridden to\n always use a MockAnything by setting use_mock_anything to True.\n\n Args:\n obj: A Python object (class, module, instance, callable).\n attr_name: str. The name of the attribute to replace with a mock.\n use_mock_anything: bool. True if a MockAnything should be used regardless\n of the type of attribute.\n \"\"\"\n\n attr_to_replace = getattr(obj, attr_name)\n if type(attr_to_replace) in self._USE_MOCK_OBJECT and not use_mock_anything:\n stub = self.CreateMock(attr_to_replace)\n else:\n stub = self.CreateMockAnything()\n\n self.stubs.Set(obj, attr_name, stub)", "def wrap2stub(self, customfunc):\n \"\"\"\n Wrapping the inspector as a stub based on the type\n Args:\n customfunc: function that replaces the original\n Returns:\n function, the spy wrapper around the customfunc\n \"\"\"\n if self.args_type == \"MODULE_FUNCTION\":\n wrapper = Wrapper.wrap_spy(customfunc, self.obj)\n setattr(self.obj, self.prop, wrapper)\n elif self.args_type == \"MODULE\":\n wrapper = Wrapper.EmptyClass\n setattr(CPSCOPE, self.obj.__name__, wrapper)\n elif self.args_type == \"FUNCTION\":\n wrapper = Wrapper.wrap_spy(customfunc)\n setattr(CPSCOPE, self.obj.__name__, wrapper)\n elif self.args_type == \"PURE\":\n wrapper = Wrapper.wrap_spy(customfunc)\n setattr(self.pure, \"func\", wrapper)\n return wrapper" ]
[ 0.7473836541175842, 0.7073056697845459, 0.6898674368858337, 0.6717952489852905, 0.6665818095207214, 0.6658259630203247, 0.6647620797157288, 0.6638123989105225, 0.663215160369873, 0.6608758568763733, 0.6538175344467163, 0.6529616713523865 ]
Destructure a given function into its host and its name. The 'host' of a function is a module, for methods it is usually its instance or its class. This is safe only for methods, for module wide, globally declared names it must be considered experimental. For all reasonable fn: ``getattr(*get_function_host(fn)) == fn`` Returns tuple (host, fn-name) Otherwise should raise TypeError
def get_function_host(fn): """Destructure a given function into its host and its name. The 'host' of a function is a module, for methods it is usually its instance or its class. This is safe only for methods, for module wide, globally declared names it must be considered experimental. For all reasonable fn: ``getattr(*get_function_host(fn)) == fn`` Returns tuple (host, fn-name) Otherwise should raise TypeError """ obj = None try: name = fn.__name__ obj = fn.__self__ except AttributeError: pass if obj is None: # Due to how python imports work, everything that is global on a module # level must be regarded as not safe here. For now, we go for the extra # mile, TBC, because just specifying `os.path.exists` would be 'cool'. # # TLDR;: # E.g. `inspect.getmodule(os.path.exists)` returns `genericpath` bc # that's where `exists` is defined and comes from. But from the point # of view of the user `exists` always comes and is used from `os.path` # which points e.g. to `ntpath`. We thus must patch `ntpath`. # But that's the same for most imports:: # # # b.py # from a import foo # # Now asking `getmodule(b.foo)` it tells you `a`, but we access and use # `b.foo` and we therefore must patch `b`. obj, name = find_invoking_frame_and_try_parse() # safety check! assert getattr(obj, name) == fn return obj, name
[ "def named_function(name):\n \"\"\"Gets a fully named module-global object.\"\"\"\n name_parts = name.split('.')\n module = named_object('.'.join(name_parts[:-1]))\n func = getattr(module, name_parts[-1])\n if hasattr(func, 'original_func'):\n func = func.original_func\n return func", "def get_original_fn(fn):\n \"\"\"Gets the very original function of a decorated one.\"\"\"\n\n fn_type = type(fn)\n if fn_type is classmethod or fn_type is staticmethod:\n return get_original_fn(fn.__func__)\n if hasattr(fn, \"original_fn\"):\n return fn.original_fn\n if hasattr(fn, \"fn\"):\n fn.original_fn = get_original_fn(fn.fn)\n return fn.original_fn\n return fn", "def identify(fn):\n ''' returns a tuple that is used to match\n functions to their neighbors in their\n resident namespaces '''\n return (\n fn.__globals__['__name__'], # module namespace\n getattr(fn, '__qualname__', getattr(fn, '__name__', '')) # class and function namespace\n )\n def __init__(self, fn):\n self.validate_function(fn)\n self.configured = False\n self.has_backup_plan = False\n if self.has_args():\n self.backup_plan = fn\n else:\n self.id = self.identify(fn)\n self.backup_plan = big.overload._cache.get(self.id, None)\n #if self.id in overload._cache:\n # self.backup_plan =\n self.configure_with(fn)\n #wraps(fn)(self)\n\n def __call__(self, *args, **kwargs):\n #print(locals())\n try: # try running like normal\n return self.fn(*args, **kwargs)\n except Exception as ex:\n if self.has_backup_plan:\n return self.backup_plan(*args, **kwargs) # run backup plan\n elif self.configured:\n raise ex # no backup plan, abort\n else:\n # complete unconfigured setup\n self.configure_with(*args, **kwargs)\n return self", "def prepare_namespace(self, func):\n \"\"\"\n Prepares the function to be run after deserializing it.\n Re-associates any previously bound variables and modules from the closure\n\n Returns:\n callable: ready-to-call function\n \"\"\"\n if self.is_imethod:\n to_run = getattr(self.obj, self.imethod_name)\n else:\n to_run = func\n\n for varname, modulename in self.global_modules.items():\n to_run.__globals__[varname] = __import__(modulename)\n if self.global_closure:\n to_run.__globals__.update(self.global_closure)\n if self.global_functions:\n to_run.__globals__.update(self.global_functions)\n return to_run", "def host_info_getter(func, name=None):\n \"\"\"\n The decorated function is added to the process of collecting the host_info.\n\n This just adds the decorated function to the global\n ``sacred.host_info.host_info_gatherers`` dictionary.\n The functions from that dictionary are used when collecting the host info\n using :py:func:`~sacred.host_info.get_host_info`.\n\n Parameters\n ----------\n func : callable\n A function that can be called without arguments and returns some\n json-serializable information.\n name : str, optional\n The name of the corresponding entry in host_info.\n Defaults to the name of the function.\n\n Returns\n -------\n The function itself.\n\n \"\"\"\n name = name or func.__name__\n host_info_gatherers[name] = func\n return func", "def function_to_serializable_representation(fn):\n \"\"\"\n Converts a Python function into a serializable representation. Does not\n currently work for methods or functions with closure data.\n \"\"\"\n if type(fn) not in (FunctionType, BuiltinFunctionType):\n raise ValueError(\n \"Can't serialize %s : %s, must be globally defined function\" % (\n fn, type(fn),))\n\n if hasattr(fn, \"__closure__\") and fn.__closure__ is not None:\n raise ValueError(\"No serializable representation for closure %s\" % (fn,))\n\n return {\"__module__\": get_module_name(fn), \"__name__\": fn.__name__}", "def fn_name(fn):\n '''\n Gets a funtion fully quaified name.\n Args :\n fn : The function.\n Returns : The name of the function.\n '''\n expression = '(\\S+) (?:of|at)'\n #Checks if the function is a mehtod and should have the self argument passed\n is_method = inspect.ismethod(fn)\n\n #Builds the name of the method,module.class.method or module.method\n name = '{}.{}'.format(fn.__module__, re.compile(expression).findall(str(fn))[0])\n\n return name,is_method", "def get_func(fullFuncName):\n \"\"\"Retrieve a function object from a full dotted-package name.\"\"\"\n\n # Parse out the path, module, and function\n lastDot = fullFuncName.rfind(u\".\")\n funcName = fullFuncName[lastDot + 1:]\n modPath = fullFuncName[:lastDot]\n\n aMod = get_mod(modPath)\n aFunc = getattr(aMod, funcName)\n\n # Assert that the function is a *callable* attribute.\n assert callable(aFunc), u\"%s is not callable.\" % fullFuncName\n\n # Return a reference to the function itself,\n # not the results of the function.\n return aFunc", "def get_function_name_from_frame(frame):\n # type: (Any) -> str\n \"\"\"\n Heuristic to find the class-specified name by @guido\n\n For instance methods we return \"ClassName.method_name\"\n For functions we return \"function_name\"\n \"\"\"\n\n def bases_to_mro(cls, bases):\n # type: (type, List[type]) -> List[type]\n \"\"\"\n Convert __bases__ to __mro__\n \"\"\"\n mro = [cls]\n for base in bases:\n if base not in mro:\n mro.append(base)\n sub_bases = getattr(base, '__bases__', None)\n if sub_bases:\n sub_bases = [sb for sb in sub_bases if sb not in mro and sb not in bases]\n if sub_bases:\n mro.extend(bases_to_mro(base, sub_bases))\n return mro\n\n code = frame.f_code\n # This ought to be aggressively cached with the code object as key.\n funcname = code.co_name\n if code.co_varnames:\n varname = code.co_varnames[0]\n if varname == 'self':\n inst = frame.f_locals.get(varname)\n if inst is not None:\n try:\n mro = inst.__class__.__mro__\n except AttributeError:\n mro = None\n try:\n bases = inst.__class__.__bases__\n except AttributeError:\n bases = None\n else:\n mro = bases_to_mro(inst.__class__, bases)\n if mro:\n for cls in mro:\n bare_method = cls.__dict__.get(funcname)\n if bare_method and getattr(bare_method, '__code__', None) is code:\n return '%s.%s' % (cls.__name__, funcname)\n return funcname", "def _str_to_fn(self, fn_as_str):\n \"\"\"\n If the argument is not a string, return whatever was passed in.\n Parses a string such as package.module.function, imports the module\n and returns the function.\n\n :param fn_as_str: The string to parse. If not a string, return it.\n \"\"\"\n if not isinstance(fn_as_str, str):\n return fn_as_str\n\n path, _, function = fn_as_str.rpartition('.')\n module = importlib.import_module(path)\n return getattr(module, function)", "def host_context(func):\n \"Sets the context of the setting to the current host\"\n @wraps(func)\n def decorator(*args, **kwargs):\n hosts = get_hosts_settings()\n with settings(**hosts[env.host]):\n return func(*args, **kwargs)\n return decorator", "def decompile_func(func):\n '''\n Decompile a function into ast.FunctionDef node.\n \n :param func: python function (can not be a built-in)\n \n :return: ast.FunctionDef instance.\n '''\n code = func.__code__\n\n # For python 3\n# defaults = func.func_defaults if sys.version_info.major < 3 else func.__defaults__\n# if defaults:\n# default_names = code.co_varnames[:code.co_argcount][-len(defaults):]\n# else:\n# default_names = []\n# defaults = [_ast.Name(id='%s_default' % name, ctx=_ast.Load() , lineno=0, col_offset=0) for name in default_names]\n ast_node = make_function(code, defaults=[], lineno=code.co_firstlineno)\n\n return ast_node" ]
[ 0.7048628330230713, 0.697286069393158, 0.6927260756492615, 0.686748743057251, 0.677983283996582, 0.6778026223182678, 0.6769918203353882, 0.6759390234947205, 0.6753368377685547, 0.6752420663833618, 0.6730707287788391, 0.6728537082672119 ]
Return obj for given dotted path. Typical inputs for `path` are 'os' or 'os.path' in which case you get a module; or 'os.path.exists' in which case you get a function from that module. Just returns the given input in case it is not a str. Note: Relative imports not supported. Raises ImportError or AttributeError as appropriate.
def get_obj(path): """Return obj for given dotted path. Typical inputs for `path` are 'os' or 'os.path' in which case you get a module; or 'os.path.exists' in which case you get a function from that module. Just returns the given input in case it is not a str. Note: Relative imports not supported. Raises ImportError or AttributeError as appropriate. """ # Since we usually pass in mocks here; duck typing is not appropriate # (mocks respond to every attribute). if not isinstance(path, str): return path if path.startswith('.'): raise TypeError('relative imports are not supported') parts = path.split('.') head, tail = parts[0], parts[1:] obj = importlib.import_module(head) # Normally a simple reduce, but we go the extra mile # for good exception messages. for i, name in enumerate(tail): try: obj = getattr(obj, name) except AttributeError: # Note the [:i] instead of [:i+1], so we get the path just # *before* the AttributeError, t.i. the part of it that went ok. module = '.'.join([head] + tail[:i]) try: importlib.import_module(module) except ImportError: raise AttributeError( "object '%s' has no attribute '%s'" % (module, name)) else: raise AttributeError( "module '%s' has no attribute '%s'" % (module, name)) return obj
[ "def import_string(dotted_path: str) -> Any:\n \"\"\"\n Stolen approximately from django. Import a dotted module path and return the attribute/class designated by the\n last name in the path. Raise ImportError if the import fails.\n \"\"\"\n try:\n module_path, class_name = dotted_path.strip(' ').rsplit('.', 1)\n except ValueError as e:\n raise ImportError(f'\"{dotted_path}\" doesn\\'t look like a module path') from e\n\n module = import_module(module_path)\n try:\n return getattr(module, class_name)\n except AttributeError as e:\n raise ImportError(f'Module \"{module_path}\" does not define a \"{class_name}\" attribute') from e", "def get_obj_attr_tuple(path):\n \"\"\"Split path into (obj, attribute) tuple.\n\n Given `path` is 'os.path.exists' will thus return `(os.path, 'exists')`\n\n If path is not a str, delegates to `get_function_host(path)`\n\n \"\"\"\n if not isinstance(path, str):\n return get_function_host(path)\n\n if path.startswith('.'):\n raise TypeError('relative imports are not supported')\n\n try:\n leading, end = path.rsplit('.', 1)\n except ValueError:\n raise TypeError('path must have dots')\n\n return get_obj(leading), end", "def import_dotted_path(path):\n \"\"\"\n Takes a dotted path to a member name in a module, and returns\n the member after importing it.\n \"\"\"\n # stolen from Mezzanine (mezzanine.utils.importing.import_dotted_path)\n try:\n module_path, member_name = path.rsplit(\".\", 1)\n module = import_module(module_path)\n return getattr(module, member_name)\n except (ValueError, ImportError, AttributeError) as e:\n raise ImportError('Could not import the name: {}: {}'.format(path, e))", "def import_attr(path):\n \"\"\"\n transform a python dotted path to the attr\n\n :param path: A dotted path to a python object or a python object\n :type path: :obj:`unicode` or :obj:`str` or anything\n :return: The python object pointed by the dotted path or the python object unchanged\n \"\"\"\n # if we got a str, decode it to unicode (normally it should only contain ascii)\n if isinstance(path, six.binary_type):\n path = path.decode(\"utf-8\")\n # if path is not an unicode, return it unchanged (may be it is already the attribute to import)\n if not isinstance(path, six.text_type):\n return path\n if u\".\" not in path:\n ValueError(\"%r should be of the form `module.attr` and we just got `attr`\" % path)\n module, attr = path.rsplit(u'.', 1)\n try:\n return getattr(import_module(module), attr)\n except ImportError:\n raise ImportError(\"Module %r not found\" % module)\n except AttributeError:\n raise AttributeError(\"Module %r has not attribut %r\" % (module, attr))", "def import_string(dotted_path):\n \"\"\"\n Import a dotted module path.\n\n Returns the attribute/class designated by the last name in the path.\n\n Raises ImportError if the import fails.\n\n \"\"\"\n try:\n module_path, class_name = dotted_path.rsplit('.', 1)\n except ValueError:\n raise ImportError('%s doesn\\'t look like a valid path' % dotted_path)\n\n module = __import__(module_path, fromlist=[class_name])\n\n try:\n return getattr(module, class_name)\n except AttributeError:\n msg = 'Module \"%s\" does not define a \"%s\" attribute/class' % (\n dotted_path, class_name)\n raise ImportError(msg)", "def import_by_path(dotted_path, error_prefix=''):\n \"\"\"\n Import a dotted module path and return the attribute/class designated by\n the last name in the path. Raise ImproperlyConfigured if something goes\n wrong. This has come straight from Django 1.6\n \"\"\"\n try:\n module_path, class_name = dotted_path.rsplit('.', 1)\n except ValueError:\n raise ImproperlyConfigured(\"%s%s doesn't look like a module path\" % (\n error_prefix, dotted_path))\n try:\n module = import_module(module_path)\n except ImportError as e:\n raise ImproperlyConfigured('%sError importing module %s: \"%s\"' % (\n error_prefix, module_path, e))\n try:\n attr = getattr(module, class_name)\n except AttributeError:\n raise ImproperlyConfigured(\n '%sModule \"%s\" does not define a \"%s\" attribute/class' % (\n error_prefix, module_path, class_name\n )\n )\n return attr", "def import_string(dotted_path):\r\n \"\"\"\r\n Import a dotted module path and return the attribute/class designated by the\r\n last name in the path. Raise ImportError if the import failed.\r\n \r\n Args:\r\n dotted_path: The path to attempt importing\r\n\r\n Returns:\r\n Imported class/attribute\r\n \"\"\"\r\n try:\r\n module_path, class_name = dotted_path.rsplit('.', 1)\r\n except ValueError as err:\r\n raise ImportError(\"%s doesn't look like a module path\" % dotted_path) from err\r\n\r\n module = import_module(module_path)\r\n\r\n try:\r\n return getattr(module, class_name)\r\n except AttributeError as err:\r\n raise ImportError('Module \"%s\" does not define a \"%s\" attribute/class' % (\r\n module_path, class_name)) from err", "def import_string(dotted_path: str) -> ModuleType:\n \"\"\"\n Source: django.utils.module_loading\n Import a dotted module path and return the attribute/class designated by the\n last name in the path. Raise ImportError if the import failed.\n \"\"\"\n try:\n module_path, class_name = dotted_path.rsplit('.', 1)\n except ValueError:\n msg = \"%s doesn't look like a module path\" % dotted_path\n raise ImportError(msg)\n\n module = import_module(module_path)\n\n try:\n return getattr(module, class_name)\n except AttributeError:\n msg = 'Module \"%s\" does not define a \"%s\" attribute/class' % (\n module_path, class_name)\n raise ImportError(msg)", "def load_object(path):\n \"\"\"Return the Python object represented by dotted *path*.\"\"\"\n i = path.rfind('.')\n module_name, object_name = path[:i], path[i + 1:]\n # Load module.\n try:\n module = import_module(module_name)\n except ImportError:\n raise ImproperlyConfigured('Module %r not found' % module_name)\n except ValueError:\n raise ImproperlyConfigured('Invalid module %r' % module_name)\n # Load object.\n try:\n return getattr(module, object_name)\n except AttributeError:\n msg = 'Module %r does not define an object named %r'\n raise ImproperlyConfigured(msg % (module_name, object_name))", "def import_string(dotted_path):\n \"\"\"\n Import a dotted module path and return the attribute/class designated by\n the last name in the path. Raise ImportError if the import failed.\n \"\"\"\n try:\n module_path, class_name = dotted_path.rsplit('.', 1)\n except ValueError:\n msg = \"%s doesn't look like a module path\" % dotted_path\n six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])\n\n module = import_module(module_path)\n\n try:\n return getattr(module, class_name)\n except AttributeError:\n msg = 'Module \"%s\" does not define a \"%s\" attribute/class' % (\n dotted_path, class_name\n )\n six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])", "def load_path(self, path):\n '''\n Load and return a given import path to a module or class\n '''\n containing_module, _, last_item = path.rpartition('.')\n if last_item[0].isupper():\n # Is a class definition, should do an \"import from\"\n path = containing_module\n imported_obj = importlib.import_module(path)\n if last_item[0].isupper():\n try:\n imported_obj = getattr(imported_obj, last_item)\n except AttributeError:\n msg = 'Cannot import \"%s\". ' \\\n '(Hint: CamelCase is only for classes)' % last_item\n raise ConfigurationError(msg)\n return imported_obj", "def get_object(path=\"\", obj=None):\n \"\"\"Return an object from a dot path.\n\n Path can either be a full path, in which case the `get_object` function\n will try to import the modules in the path and follow it to the final\n object. Or it can be a path relative to the object passed in as the second\n argument.\n\n Args:\n path (str): Full or relative dot path to the desired object\n obj (object): Starting object. Dot path is calculated relatively to\n this object.\n\n Returns:\n Object at the end of the path, or list of non hidden objects if we use\n the star query.\n\n Example for full paths::\n\n >>> get_object('os.path.join')\n <function join at 0x1002d9ed8>\n >>> get_object('tea.process')\n <module 'tea.process' from 'tea/process/__init__.pyc'>\n\n Example for relative paths when an object is passed in::\n\n >>> import os\n >>> get_object('path.join', os)\n <function join at 0x1002d9ed8>\n\n Example for a star query. (Star query can be used only as the last element\n of the path::\n\n >>> get_object('tea.dsa.*')\n []\n >>> get_object('tea.dsa.singleton.*')\n [<class 'tea.dsa.singleton.Singleton'>,\n <class 'tea.dsa.singleton.SingletonMetaclass'>\n <module 'six' from '...'>]\n >>> get_object('tea.dsa.*')\n [<module 'tea.dsa.singleton' from '...'>] # Since we imported it\n \"\"\"\n if not path:\n return obj\n path = path.split(\".\")\n if obj is None:\n obj = importlib.import_module(path[0])\n path = path[1:]\n for item in path:\n if item == \"*\":\n # This is the star query, returns non hidden objects\n return [\n getattr(obj, name)\n for name in dir(obj)\n if not name.startswith(\"__\")\n ]\n if isinstance(obj, types.ModuleType):\n submodule = \"{}.{}\".format(_package(obj), item)\n try:\n obj = importlib.import_module(submodule)\n except Exception as import_error:\n try:\n obj = getattr(obj, item)\n except Exception:\n # FIXME: I know I should probably merge the errors, but\n # it's easier just to throw the import error since\n # it's most probably the one user wants to see.\n # Create a new LoadingError and throw a combination\n # of the import error and attribute error.\n raise import_error\n else:\n obj = getattr(obj, item)\n return obj" ]
[ 0.776495635509491, 0.7743529677391052, 0.7714292407035828, 0.770045816898346, 0.7693743109703064, 0.7667275667190552, 0.7637400031089783, 0.7595250606536865, 0.7577233910560608, 0.7538796663284302, 0.7516044974327087, 0.746537983417511 ]
Split path into (obj, attribute) tuple. Given `path` is 'os.path.exists' will thus return `(os.path, 'exists')` If path is not a str, delegates to `get_function_host(path)`
def get_obj_attr_tuple(path): """Split path into (obj, attribute) tuple. Given `path` is 'os.path.exists' will thus return `(os.path, 'exists')` If path is not a str, delegates to `get_function_host(path)` """ if not isinstance(path, str): return get_function_host(path) if path.startswith('.'): raise TypeError('relative imports are not supported') try: leading, end = path.rsplit('.', 1) except ValueError: raise TypeError('path must have dots') return get_obj(leading), end
[ "def get_function_host(fn):\n \"\"\"Destructure a given function into its host and its name.\n\n The 'host' of a function is a module, for methods it is usually its\n instance or its class. This is safe only for methods, for module wide,\n globally declared names it must be considered experimental.\n\n For all reasonable fn: ``getattr(*get_function_host(fn)) == fn``\n\n Returns tuple (host, fn-name)\n Otherwise should raise TypeError\n \"\"\"\n\n obj = None\n try:\n name = fn.__name__\n obj = fn.__self__\n except AttributeError:\n pass\n\n if obj is None:\n # Due to how python imports work, everything that is global on a module\n # level must be regarded as not safe here. For now, we go for the extra\n # mile, TBC, because just specifying `os.path.exists` would be 'cool'.\n #\n # TLDR;:\n # E.g. `inspect.getmodule(os.path.exists)` returns `genericpath` bc\n # that's where `exists` is defined and comes from. But from the point\n # of view of the user `exists` always comes and is used from `os.path`\n # which points e.g. to `ntpath`. We thus must patch `ntpath`.\n # But that's the same for most imports::\n #\n # # b.py\n # from a import foo\n #\n # Now asking `getmodule(b.foo)` it tells you `a`, but we access and use\n # `b.foo` and we therefore must patch `b`.\n\n obj, name = find_invoking_frame_and_try_parse()\n # safety check!\n assert getattr(obj, name) == fn\n\n\n return obj, name", "def _parse_path(self, path):\n \"\"\"Return (hosts, path) tuple\"\"\"\n # Support specifying another host via hdfs://host:port/path syntax\n # We ignore the scheme and piece together the query and fragment\n # Note that HDFS URIs are not URL encoded, so a '?' or a '#' in the URI is part of the\n # path\n parts = urlsplit(path, allow_fragments=False)\n if not parts.path.startswith('/'):\n raise ValueError(\"Path must be absolute, was given {}\".format(path))\n if parts.scheme not in ('', 'hdfs', 'hftp', 'webhdfs'):\n warnings.warn(\"Unexpected scheme {}\".format(parts.scheme))\n assert not parts.fragment\n path = parts.path\n if parts.query:\n path += '?' + parts.query\n if parts.netloc:\n hosts = self._parse_hosts(parts.netloc)\n else:\n hosts = self.hosts\n return hosts, path", "def _getattr_path(obj, path):\n \"\"\"\n getattr for a dot separated path\n\n If an AttributeError is raised, it will return None.\n \"\"\"\n if not path:\n return None\n\n for attr in path.split('.'):\n obj = getattr(obj, attr, None)\n return obj", "def getattr_path(obj, path):\n \"\"\"\n Get an attribute path, as defined by a string separated by '__'.\n getattr_path(foo, 'a__b__c') is roughly equivalent to foo.a.b.c but\n will short circuit to return None if something on the path is None.\n \"\"\"\n path = path.split('__')\n for name in path:\n obj = getattr(obj, name)\n if obj is None:\n return None\n return obj", "def _split_path(path):\n \"\"\"\n A wrapper around the normal split function that ignores any trailing /.\n\n :return: A tuple of the form (dirname, last) where last is the last element\n in the path.\n \"\"\"\n # Get around a quirk in path_split where a / at the end will make the\n # dirname (split[0]) the entire path\n path = path[:-1] if path[-1] == '/' else path\n split = path_split(path)\n return split", "def _split_path(path):\n \"\"\"split a path return by the api\n\n return\n - the sentinel:\n - the rest of the path as a list.\n - the original path stripped of / for normalisation.\n \"\"\"\n path = path.strip('/')\n list_path = path.split('/')\n sentinel = list_path.pop(0)\n return sentinel, list_path, path", "def get_path_attribute(obj, path):\n \"\"\"Given a path like `related_record.related_record2.id`, this method\n will be able to pull the value of ID from that object, returning None\n if it doesn't exist.\n\n Args:\n obj (fleaker.db.Model):\n The object to attempt to pull the value from\n path (str):\n The path to follow to pull the value from\n\n Returns:\n (int|str|None):\n The value at the end of the path. None if it doesn't exist at\n any point in the path.\n \"\"\"\n # Strip out ignored keys passed in\n path = path.replace('original.', '').replace('current_user.', '')\n\n attr_parts = path.split('.')\n res = obj\n\n try:\n for part in attr_parts:\n try:\n res = getattr(res, part)\n except AttributeError:\n res = getattr(res.get(), part)\n\n except (peewee.DoesNotExist, AttributeError):\n return None\n\n return res", "def get_obj(path):\n \"\"\"Return obj for given dotted path.\n\n Typical inputs for `path` are 'os' or 'os.path' in which case you get a\n module; or 'os.path.exists' in which case you get a function from that\n module.\n\n Just returns the given input in case it is not a str.\n\n Note: Relative imports not supported.\n Raises ImportError or AttributeError as appropriate.\n\n \"\"\"\n # Since we usually pass in mocks here; duck typing is not appropriate\n # (mocks respond to every attribute).\n if not isinstance(path, str):\n return path\n\n if path.startswith('.'):\n raise TypeError('relative imports are not supported')\n\n parts = path.split('.')\n head, tail = parts[0], parts[1:]\n\n obj = importlib.import_module(head)\n\n # Normally a simple reduce, but we go the extra mile\n # for good exception messages.\n for i, name in enumerate(tail):\n try:\n obj = getattr(obj, name)\n except AttributeError:\n # Note the [:i] instead of [:i+1], so we get the path just\n # *before* the AttributeError, t.i. the part of it that went ok.\n module = '.'.join([head] + tail[:i])\n try:\n importlib.import_module(module)\n except ImportError:\n raise AttributeError(\n \"object '%s' has no attribute '%s'\" % (module, name))\n else:\n raise AttributeError(\n \"module '%s' has no attribute '%s'\" % (module, name))\n return obj", "def split_path(path_):\n \"\"\"\n Split the requested path into (locale, path).\n\n locale will be empty if it isn't found.\n \"\"\"\n path = path_.lstrip('/')\n\n # Use partitition instead of split since it always returns 3 parts\n first, _, rest = path.partition('/')\n\n lang = first.lower()\n if lang in settings.LANGUAGE_URL_MAP:\n return settings.LANGUAGE_URL_MAP[lang], rest\n else:\n supported = find_supported(first)\n if len(supported):\n return supported[0], rest\n else:\n return '', path", "def attr_to_path(node):\n \"\"\" Compute path and final object for an attribute node \"\"\"\n\n def get_intrinsic_path(modules, attr):\n \"\"\" Get function path and intrinsic from an ast.Attribute. \"\"\"\n if isinstance(attr, ast.Name):\n return modules[demangle(attr.id)], (demangle(attr.id),)\n elif isinstance(attr, ast.Attribute):\n module, path = get_intrinsic_path(modules, attr.value)\n return module[attr.attr], path + (attr.attr,)\n obj, path = get_intrinsic_path(MODULES, node)\n if not obj.isliteral():\n path = path[:-1] + ('functor', path[-1])\n return obj, ('pythonic', ) + path", "def parse_path(path):\r\n '''Parses an address into directory and port parts.\r\n\r\n The last segment of the address will be checked to see if it matches a port\r\n specification (i.e. contains a colon followed by text). This will be\r\n returned separately from the directory parts.\r\n\r\n If a leading / is given, that will be returned as the first directory\r\n component. All other / characters are removed.\r\n\r\n All leading / characters are condensed into a single leading /.\r\n\r\n Any path components that are . will be removed, as they just point to the\r\n previous path component. For example, '/localhost/.' will become\r\n '/localhost'. Any path components that are .. will be removed, along with\r\n the previous path component. If this renders the path empty, it will be\r\n replaced with '/'.\r\n\r\n Examples:\r\n\r\n >>> parse_path('localhost:30000/manager/comp0.rtc')\r\n (['localhost:30000', 'manager', 'comp0.rtc'], None)\r\n \r\n >>> parse_path('localhost/manager/comp0.rtc:in')\r\n (['localhost', 'manager', 'comp0.rtc'], 'in')\r\n \r\n >>> parse_path('/localhost/manager/comp0.rtc')\r\n (['/', 'localhost', 'manager', 'comp0.rtc'], None)\r\n \r\n >>> parse_path('/localhost/manager/comp0.rtc:in')\r\n (['/', 'localhost', 'manager', 'comp0.rtc'], 'in')\r\n \r\n >>> parse_path('manager/comp0.rtc')\r\n (['manager', 'comp0.rtc'], None)\r\n \r\n >>> parse_path('comp0.rtc')\r\n (['comp0.rtc'], None)\r\n\r\n '''\r\n bits = path.lstrip('/').split('/')\r\n if not bits:\r\n raise exceptions.BadPathError(path)\r\n\r\n if bits[-1]:\r\n bits[-1], port = get_port(bits[-1])\r\n else:\r\n port = None\r\n if path[0] == '/':\r\n bits = ['/'] + bits\r\n condensed_bits = []\r\n for bit in bits:\r\n if bit == '.':\r\n continue\r\n if bit == '..':\r\n condensed_bits = condensed_bits[:-1]\r\n continue\r\n condensed_bits.append(bit)\r\n if not condensed_bits:\r\n condensed_bits = ['/']\r\n return condensed_bits, port", "def _parse_path(self, path):\n \"\"\"\n Parses a Registry path and returns the hive and key.\n\n @type path: str\n @param path: Registry path.\n\n @rtype: tuple( int, str )\n @return: Tuple containing the hive handle and the subkey path.\n For a local Registry, the hive handle is an integer.\n For a remote Registry, the hive handle is a L{RegistryKeyHandle}.\n \"\"\"\n handle, path = self._split_path(path)\n if self._machine is not None:\n handle = self._connect_hive(handle)\n return handle, path" ]
[ 0.7452395558357239, 0.7234854698181152, 0.6943947672843933, 0.6841155886650085, 0.6819661855697632, 0.6749341487884521, 0.672653079032898, 0.6641891002655029, 0.6619399785995483, 0.6615392565727234, 0.6571950912475586, 0.6566314697265625 ]
Spy an object. Spying means that all functions will behave as before, so they will be side effects, but the interactions can be verified afterwards. Returns Dummy-like, almost empty object as proxy to `object`. The *returned* object must be injected and used by the code under test; after that all interactions can be verified as usual. T.i. the original object **will not be patched**, and has no further knowledge as before. E.g.:: import time time = spy(time) # inject time do_work(..., time) verify(time).time()
def spy(object): """Spy an object. Spying means that all functions will behave as before, so they will be side effects, but the interactions can be verified afterwards. Returns Dummy-like, almost empty object as proxy to `object`. The *returned* object must be injected and used by the code under test; after that all interactions can be verified as usual. T.i. the original object **will not be patched**, and has no further knowledge as before. E.g.:: import time time = spy(time) # inject time do_work(..., time) verify(time).time() """ if inspect.isclass(object) or inspect.ismodule(object): class_ = None else: class_ = object.__class__ class Spy(_Dummy): if class_: __class__ = class_ def __getattr__(self, method_name): return RememberedProxyInvocation(theMock, method_name) def __repr__(self): name = 'Spied' if class_: name += class_.__name__ return "<%s id=%s>" % (name, id(self)) obj = Spy() theMock = Mock(obj, strict=True, spec=object) mock_registry.register(obj, theMock) return obj
[ "def spy2(fn): # type: (...) -> None\n \"\"\"Spy usage of given `fn`.\n\n Patches the module, class or object `fn` lives in, so that all\n interactions can be recorded; otherwise executes `fn` as before, so\n that all side effects happen as before.\n\n E.g.::\n\n import time\n spy(time.time)\n do_work(...) # nothing injected, uses global patched `time` module\n verify(time).time()\n\n Note that builtins often cannot be patched because they're read-only.\n\n\n \"\"\"\n if isinstance(fn, str):\n answer = get_obj(fn)\n else:\n answer = fn\n\n when2(fn, Ellipsis).thenAnswer(answer)", "def wrap2spy(self):\n \"\"\"\n Wrapping the inspector as a spy based on the type\n \"\"\"\n if self.args_type == \"MODULE_FUNCTION\":\n self.orig_func = deepcopy(getattr(self.obj, self.prop))\n setattr(self.obj, self.prop, Wrapper.wrap_spy(getattr(self.obj, self.prop)))\n elif self.args_type == \"MODULE\":\n setattr(self.obj, \"__SINONLOCK__\", True)\n elif self.args_type == \"FUNCTION\":\n self.orig_func = deepcopy(getattr(CPSCOPE, self.obj.__name__))\n setattr(CPSCOPE, self.obj.__name__,\n Wrapper.wrap_spy(getattr(CPSCOPE, self.obj.__name__)))\n elif self.args_type == \"PURE\":\n self.orig_func = deepcopy(getattr(self.pure, \"func\"))\n setattr(self.pure, \"func\", Wrapper.wrap_spy(getattr(self.pure, \"func\")))", "function( object, method, options ) {\n\t\tvar self = this, spy;\n\n\t\t// Can only create spies while the module is active\n\t\tself.requireMaxState( munit.ASSERT_STATE_ACTIVE );\n\n\t\t// Store the spy internally for destruction once module is closed\n\t\tspy = munit.Spy( self, object, method, options );\n\t\tself._spies.push( spy );\n\n\t\treturn spy;\n\t}", "def spy(self):\n '''\n Add a spy to this stub. Return the spy.\n '''\n spy = Spy(self)\n self._expectations.append(spy)\n return spy", "@CheckReturnValue\n public static <T> T spy(T object) {\n return MOCKITO_CORE.mock((Class<T>) object.getClass(), withSettings()\n .spiedInstance(object)\n .defaultAnswer(CALLS_REAL_METHODS));\n }", "def wrap_spy(function, owner=None):\n \"\"\"\n Surrounds the given 'function' with a spy wrapper that tracks usage data, such as:\n * call count\n * arguments it was called with\n * keyword argument it was called with\n * return values\n * etc.\n\n Parameters:\n function: function, could be one of 3 things:\n 1. the original function the user wants to spy on\n 2. the custom function the user specified to replace the original\n 3. a default function configurable via returns/throws\n owner: object, the owner of the original function. It is necessary in certain cases\n to specify this, such as when the user stubs a class. Otherwise, the SpyCall\n arguments will erroneously include the 'owner' as the first parameter of every call.\n Returns:\n function, the spy wrapper that is replacing the inputted function\n \"\"\"\n def __set__(value, new_list):\n \"\"\"\n For python 2.x compatibility\n \"\"\"\n setattr(wrapped, value, new_list)\n\n def wrapped(*args, **kwargs):\n \"\"\"\n Fully manipulatable inspector function\n \"\"\"\n if owner:\n if len(args) > 0:\n if owner == args[0].__class__:\n args = args[1:]\n \n wrapped.callCount += 1\n wrapped.args_list.append(args)\n wrapped.kwargs_list.append(kwargs)\n\n call = SpyCall()\n call.args = args\n call.kwargs = kwargs\n call.stack = traceback.format_stack()\n wrapped.call_list.append(call)\n\n try:\n ret = function(*args, **kwargs)\n wrapped.ret_list.append(ret)\n call.returnValue = ret\n return ret\n except BaseException as excpt:\n # Todo: make sure e.__class__ is enough for all purpose or not\n wrapped.error_list.append(excpt.__class__)\n call.exception = excpt\n raise excpt\n\n wrapped.__set__ = __set__\n wrapped.callCount = 0\n wrapped.args_list = []\n wrapped.call_list = []\n wrapped.kwargs_list = []\n wrapped.error_list = []\n wrapped.ret_list = []\n wrapped.LOCK = True\n return wrapped", "def _call_spy(self, *args, **kwargs):\n '''\n Wrapper to call the spied-on function. Operates similar to\n Expectation.test.\n '''\n if self._spy_side_effect:\n if self._spy_side_effect_args or self._spy_side_effect_kwargs:\n self._spy_side_effect(\n *self._spy_side_effect_args,\n **self._spy_side_effect_kwargs)\n else:\n self._spy_side_effect(*args, **kwargs)\n\n return_value = self._stub.call_orig(*args, **kwargs)\n if self._spy_return:\n self._spy_return(return_value)\n\n return return_value", "function Spy( assert, module, method, options ) {\n\tvar original, wrapped = false;\n\n\t// Spies can only be created with an assertion module\n\tif ( ! ( assert instanceof munit.Assert ) ) {\n\t\tthrow new munit.AssertionError( \"Spies require an assertion module\", Spy );\n\t}\n\t// No wrap provided, just empty spy\n\telse if ( method === undefined && options === undefined ) {\n\t\toptions = module;\n\t\tmodule = undefined;\n\t}\n\n\t// Passing a function as the options argument auto assigns to the onCall method\n\tif ( munit.isFunction( options ) ) {\n\t\toptions = { onCall: options };\n\t}\n\telse {\n\t\toptions = options || {};\n\t}\n\n\t// Spy replacement\n\tfunction spy(){\n\t\tvar call = new SpyCall( this, Slice.call( arguments ), spy );\n\t\tspy.history.push( call );\n\t\tspy.scope = call.scope;\n\t\tspy.args = call.args;\n\t\tspy.order = call.order;\n\t\tspy.overall = call.overall;\n\t\tspy.trace = call.trace;\n\t\tspy.count++;\n\t\tspy.returnValue = spy.option( 'returnValue' );\n\n\t\t// Callback for when spy gets triggered\n\t\tif ( spy.options.onCall ) {\n\t\t\tspy.returnValue = spy.options.onCall.apply( spy.scope, spy.args );\n\t\t}\n\n\t\t// Pass trigger through to the original function if allowed\n\t\tif ( spy.wrapped && spy.options.passthru ) {\n\t\t\tspy.returnValue = spy.original.apply( spy.scope, spy.args );\n\n\t\t\tif ( spy.options.hasOwnProperty( 'returnValue' ) ) {\n\t\t\t\tspy.returnValue = spy.option( 'returnValue' );\n\t\t\t}\n\t\t}\n\n\t\t// After trigger callback\n\t\tif ( spy.options.afterCall ) {\n\t\t\tspy.returnValue = spy.options.afterCall.apply( spy.scope, spy.args );\n\t\t}\n\n\t\treturn spy.returnValue;\n\t}\n\n\t// Overwrite defined method\n\tif ( module && munit.isString( method ) ) {\n\t\toriginal = module[ method ];\n\t\tmodule[ method ] = spy;\n\t\twrapped = true;\n\t}\n\n\t// Attach useful shortcuts to spy object\n\t[ 'onCall', 'afterCall' ].forEach(function( name ) {\n\t\tspy[ name ] = function( value ) {\n\t\t\treturn spy.option( name, value );\n\t\t};\n\t});\n\n\t// Info\n\treturn munit.extend( spy, {\n\n\t\t// Meta\n\t\t_module: module,\n\t\t_method: method,\n\t\tcreated: ( new Error( \"\" ) ).stack,\n\t\ttrace: null,\n\t\toriginal: original,\n\t\twrapped: wrapped,\n\t\tisSpy: true,\n\t\tassert: assert,\n\t\tcount: 0,\n\t\torder: -1,\n\t\toverall: -1,\n\t\targs: [],\n\t\thistory: [],\n\t\tdata: {},\n\t\toptions: options,\n\t\tscope: null,\n\n\t\t// Changing options\n\t\toption: function( name, value ) {\n\t\t\t// Passing a list of options to change\n\t\t\tif ( munit.isObject( name ) ) {\n\t\t\t\tmunit.each( name, function( value, name ) {\n\t\t\t\t\tspy.option( name, value );\n\t\t\t\t});\n\n\t\t\t\treturn spy;\n\t\t\t}\n\t\t\t// Requesting to get the value of an option\n\t\t\telse if ( value === undefined ) {\n\t\t\t\treturn spy.options[ name ];\n\t\t\t}\n\n\t\t\tspy.options[ name ] = value;\n\t\t\treturn spy;\n\t\t},\n\n\t\t// Resets history and counters\n\t\treset: function(){\n\t\t\tspy.history = [];\n\t\t\tspy.args = [];\n\t\t\tspy.count = 0;\n\t\t\tspy.scope = null;\n\t\t\tspy.trace = null;\n\n\t\t\treturn spy;\n\t\t},\n\n\t\t// Restores the original method back\n\t\trestore: function(){\n\t\t\tif ( spy.wrapped ) {\n\t\t\t\tspy._module[ spy._method ] = spy.original;\n\t\t\t\tspy.wrapped = false;\n\t\t\t}\n\n\t\t\treturn spy;\n\t\t}\n\n\t});\n}", "def wrap2stub(self, customfunc):\n \"\"\"\n Wrapping the inspector as a stub based on the type\n Args:\n customfunc: function that replaces the original\n Returns:\n function, the spy wrapper around the customfunc\n \"\"\"\n if self.args_type == \"MODULE_FUNCTION\":\n wrapper = Wrapper.wrap_spy(customfunc, self.obj)\n setattr(self.obj, self.prop, wrapper)\n elif self.args_type == \"MODULE\":\n wrapper = Wrapper.EmptyClass\n setattr(CPSCOPE, self.obj.__name__, wrapper)\n elif self.args_type == \"FUNCTION\":\n wrapper = Wrapper.wrap_spy(customfunc)\n setattr(CPSCOPE, self.obj.__name__, wrapper)\n elif self.args_type == \"PURE\":\n wrapper = Wrapper.wrap_spy(customfunc)\n setattr(self.pure, \"func\", wrapper)\n return wrapper", "def side_effect(self, func, *args, **kwargs):\n '''\n Wrap side effects for spies.\n '''\n self._spy_side_effect = func\n self._spy_side_effect_args = args\n self._spy_side_effect_kwargs = kwargs\n return self", "function spy(obj, fn, callback, reportResults) {\n\t\t\tvar origFn = obj && obj[fn];\n\n\t\t\tif (typeof origFn !== 'function') {\n\t\t\t\treturn false;\n\t\t\t}\n\n\t\t\tphantomas.log('spy: attaching to \"%s\" function%s', fn, (reportResults ? ' with results reporting' : ''));\n\n\t\t\tobj[fn] = function() {\n\t\t\t\tvar args = Array.prototype.slice.call(arguments),\n\t\t\t\t\tresults = origFn.apply(this, args);\n\n\t\t\t\tif (enabled && typeof callback === 'function') {\n\t\t\t\t\tcallback.apply(this, (reportResults === true) ? [results].concat(args) : args);\n\t\t\t\t}\n\n\t\t\t\treturn results;\n\t\t\t};\n\n\t\t\t// copy custom properties of original function to the mocked one\n\t\t\tObject.keys(origFn).forEach(function(key) {\n\t\t\t\tobj[fn][key] = origFn[key];\n\t\t\t});\n\n\t\t\tobj[fn].prototype = origFn.prototype;\n\n\t\t\treturn true;\n\t\t}", "def _get_signature_object(func, as_instance, eat_self):\n \"\"\"\n Given an arbitrary, possibly callable object, try to create a suitable\n signature object.\n Return a (reduced func, signature) tuple, or None.\n \"\"\"\n if isinstance(func, ClassTypes) and not as_instance:\n # If it's a type and should be modelled as a type, use __init__.\n try:\n func = func.__init__\n except AttributeError:\n return None\n # Skip the `self` argument in __init__\n eat_self = True\n elif not isinstance(func, FunctionTypes):\n # If we really want to model an instance of the passed type,\n # __call__ should be looked up, not __init__.\n try:\n func = func.__call__\n except AttributeError:\n return None\n if eat_self:\n sig_func = partial(func, None)\n else:\n sig_func = func\n\n try:\n return func, inspectsignature(sig_func)\n except ValueError:\n # Certain callable types are not supported by inspect.signature()\n return None" ]
[ 0.7754902243614197, 0.767021894454956, 0.7479539513587952, 0.7346881031990051, 0.7221837043762207, 0.707771897315979, 0.7065128684043884, 0.7030767798423767, 0.6995481252670288, 0.683078944683075, 0.6758384108543396, 0.669526219367981 ]
Spy usage of given `fn`. Patches the module, class or object `fn` lives in, so that all interactions can be recorded; otherwise executes `fn` as before, so that all side effects happen as before. E.g.:: import time spy(time.time) do_work(...) # nothing injected, uses global patched `time` module verify(time).time() Note that builtins often cannot be patched because they're read-only.
def spy2(fn): # type: (...) -> None """Spy usage of given `fn`. Patches the module, class or object `fn` lives in, so that all interactions can be recorded; otherwise executes `fn` as before, so that all side effects happen as before. E.g.:: import time spy(time.time) do_work(...) # nothing injected, uses global patched `time` module verify(time).time() Note that builtins often cannot be patched because they're read-only. """ if isinstance(fn, str): answer = get_obj(fn) else: answer = fn when2(fn, Ellipsis).thenAnswer(answer)
[ "def spy(object):\n \"\"\"Spy an object.\n\n Spying means that all functions will behave as before, so they will\n be side effects, but the interactions can be verified afterwards.\n\n Returns Dummy-like, almost empty object as proxy to `object`.\n\n The *returned* object must be injected and used by the code under test;\n after that all interactions can be verified as usual.\n T.i. the original object **will not be patched**, and has no further\n knowledge as before.\n\n E.g.::\n\n import time\n time = spy(time)\n # inject time\n do_work(..., time)\n verify(time).time()\n\n \"\"\"\n if inspect.isclass(object) or inspect.ismodule(object):\n class_ = None\n else:\n class_ = object.__class__\n\n class Spy(_Dummy):\n if class_:\n __class__ = class_\n\n def __getattr__(self, method_name):\n return RememberedProxyInvocation(theMock, method_name)\n\n def __repr__(self):\n name = 'Spied'\n if class_:\n name += class_.__name__\n return \"<%s id=%s>\" % (name, id(self))\n\n\n obj = Spy()\n theMock = Mock(obj, strict=True, spec=object)\n\n mock_registry.register(obj, theMock)\n return obj", "def side_effect(self, func, *args, **kwargs):\n '''\n Wrap side effects for spies.\n '''\n self._spy_side_effect = func\n self._spy_side_effect_args = args\n self._spy_side_effect_kwargs = kwargs\n return self", "function spy(obj, fn, callback, reportResults) {\n\t\t\tvar origFn = obj && obj[fn];\n\n\t\t\tif (typeof origFn !== 'function') {\n\t\t\t\treturn false;\n\t\t\t}\n\n\t\t\tphantomas.log('spy: attaching to \"%s\" function%s', fn, (reportResults ? ' with results reporting' : ''));\n\n\t\t\tobj[fn] = function() {\n\t\t\t\tvar args = Array.prototype.slice.call(arguments),\n\t\t\t\t\tresults = origFn.apply(this, args);\n\n\t\t\t\tif (enabled && typeof callback === 'function') {\n\t\t\t\t\tcallback.apply(this, (reportResults === true) ? [results].concat(args) : args);\n\t\t\t\t}\n\n\t\t\t\treturn results;\n\t\t\t};\n\n\t\t\t// copy custom properties of original function to the mocked one\n\t\t\tObject.keys(origFn).forEach(function(key) {\n\t\t\t\tobj[fn][key] = origFn[key];\n\t\t\t});\n\n\t\t\tobj[fn].prototype = origFn.prototype;\n\n\t\t\treturn true;\n\t\t}", "function(fn) {\n // Save off the existing spy method as __functionName\n var newName = '__' + fn;\n self[newName] = self[fn];\n\n // This is the actual function that will be called when,\n // for example, calledWith() is invoked\n return function() {\n var func = self[newName];\n \n // If this thing is a function, invoke it with all arguments;\n // otherwise, just grab the value of the property.\n var val = typeof func === 'function' ? func.apply(condition, arguments) : func;\n\n // Negate and return\n return !val;\n };\n }", "def patch(fn, attr_or_replacement, replacement=None):\n \"\"\"Patch/Replace a function.\n\n This is really like monkeypatching, but *note* that all interactions\n will be recorded and can be verified. That is, using `patch` you stay in\n the domain of mockito.\n\n Two ways to call this. Either::\n\n patch(os.path.exists, lambda str: True) # two arguments\n # OR\n patch(os.path, 'exists', lambda str: True) # three arguments\n\n If called with three arguments, the mode is *not* strict to allow *adding*\n methods. If called with two arguments, mode is always `strict`.\n\n .. note:: You must :func:`unstub` after stubbing, or use `with`\n statement.\n\n \"\"\"\n if replacement is None:\n replacement = attr_or_replacement\n return when2(fn, Ellipsis).thenAnswer(replacement)\n else:\n obj, name = fn, attr_or_replacement\n theMock = _get_mock(obj, strict=True)\n return invocation.StubbedInvocation(\n theMock, name, strict=False)(Ellipsis).thenAnswer(replacement)", "def wrap_spy(function, owner=None):\n \"\"\"\n Surrounds the given 'function' with a spy wrapper that tracks usage data, such as:\n * call count\n * arguments it was called with\n * keyword argument it was called with\n * return values\n * etc.\n\n Parameters:\n function: function, could be one of 3 things:\n 1. the original function the user wants to spy on\n 2. the custom function the user specified to replace the original\n 3. a default function configurable via returns/throws\n owner: object, the owner of the original function. It is necessary in certain cases\n to specify this, such as when the user stubs a class. Otherwise, the SpyCall\n arguments will erroneously include the 'owner' as the first parameter of every call.\n Returns:\n function, the spy wrapper that is replacing the inputted function\n \"\"\"\n def __set__(value, new_list):\n \"\"\"\n For python 2.x compatibility\n \"\"\"\n setattr(wrapped, value, new_list)\n\n def wrapped(*args, **kwargs):\n \"\"\"\n Fully manipulatable inspector function\n \"\"\"\n if owner:\n if len(args) > 0:\n if owner == args[0].__class__:\n args = args[1:]\n \n wrapped.callCount += 1\n wrapped.args_list.append(args)\n wrapped.kwargs_list.append(kwargs)\n\n call = SpyCall()\n call.args = args\n call.kwargs = kwargs\n call.stack = traceback.format_stack()\n wrapped.call_list.append(call)\n\n try:\n ret = function(*args, **kwargs)\n wrapped.ret_list.append(ret)\n call.returnValue = ret\n return ret\n except BaseException as excpt:\n # Todo: make sure e.__class__ is enough for all purpose or not\n wrapped.error_list.append(excpt.__class__)\n call.exception = excpt\n raise excpt\n\n wrapped.__set__ = __set__\n wrapped.callCount = 0\n wrapped.args_list = []\n wrapped.call_list = []\n wrapped.kwargs_list = []\n wrapped.error_list = []\n wrapped.ret_list = []\n wrapped.LOCK = True\n return wrapped", "def wrap2spy(self):\n \"\"\"\n Wrapping the inspector as a spy based on the type\n \"\"\"\n if self.args_type == \"MODULE_FUNCTION\":\n self.orig_func = deepcopy(getattr(self.obj, self.prop))\n setattr(self.obj, self.prop, Wrapper.wrap_spy(getattr(self.obj, self.prop)))\n elif self.args_type == \"MODULE\":\n setattr(self.obj, \"__SINONLOCK__\", True)\n elif self.args_type == \"FUNCTION\":\n self.orig_func = deepcopy(getattr(CPSCOPE, self.obj.__name__))\n setattr(CPSCOPE, self.obj.__name__,\n Wrapper.wrap_spy(getattr(CPSCOPE, self.obj.__name__)))\n elif self.args_type == \"PURE\":\n self.orig_func = deepcopy(getattr(self.pure, \"func\"))\n setattr(self.pure, \"func\", Wrapper.wrap_spy(getattr(self.pure, \"func\")))", "def _safe_processing(nsafefn, source, _globals=None, _locals=None):\n \"\"\"Do a safe processing of input fn in using SAFE_BUILTINS.\n\n :param fn: function to call with input parameters.\n :param source: source object to process with fn.\n :param dict _globals: global objects by name.\n :param dict _locals: local objects by name.\n :return: fn processing result\"\"\"\n\n if _globals is None:\n _globals = SAFE_BUILTINS\n\n else:\n _globals.update(SAFE_BUILTINS)\n\n return nsafefn(source, _globals, _locals)", "def apply():\r\n \"\"\"Monkey patching rope\r\n\r\n See [1], [2], [3], [4] and [5] in module docstring.\"\"\"\r\n from spyder.utils.programs import is_module_installed\r\n if is_module_installed('rope', '<0.9.4'):\r\n import rope\r\n raise ImportError(\"rope %s can't be patched\" % rope.VERSION)\r\n\r\n # [1] Patching project.Project for compatibility with py2exe/cx_Freeze\r\n # distributions\r\n from spyder.config.base import is_py2exe_or_cx_Freeze\r\n if is_py2exe_or_cx_Freeze():\r\n from rope.base import project\r\n class PatchedProject(project.Project):\r\n def _default_config(self):\r\n # py2exe/cx_Freeze distribution\r\n from spyder.config.base import get_module_source_path\r\n fname = get_module_source_path('spyder',\r\n 'default_config.py')\r\n return open(fname, 'rb').read()\r\n project.Project = PatchedProject\r\n \r\n # Patching pycore.PyCore...\r\n from rope.base import pycore\r\n class PatchedPyCore(pycore.PyCore):\r\n # [2] ...so that forced builtin modules (i.e. modules that were \r\n # declared as 'extension_modules' in rope preferences) will be indeed\r\n # recognized as builtins by rope, as expected\r\n # \r\n # This patch is included in rope 0.9.4+ but applying it anyway is ok\r\n def get_module(self, name, folder=None):\r\n \"\"\"Returns a `PyObject` if the module was found.\"\"\"\r\n # check if this is a builtin module\r\n pymod = self._builtin_module(name)\r\n if pymod is not None:\r\n return pymod\r\n module = self.find_module(name, folder)\r\n if module is None:\r\n raise pycore.ModuleNotFoundError(\r\n 'Module %s not found' % name)\r\n return self.resource_to_pyobject(module)\r\n # [3] ...to avoid considering folders without __init__.py as Python\r\n # packages\r\n def _find_module_in_folder(self, folder, modname):\r\n module = folder\r\n packages = modname.split('.')\r\n for pkg in packages[:-1]:\r\n if module.is_folder() and module.has_child(pkg):\r\n module = module.get_child(pkg)\r\n else:\r\n return None\r\n if module.is_folder():\r\n if module.has_child(packages[-1]) and \\\r\n module.get_child(packages[-1]).is_folder() and \\\r\n module.get_child(packages[-1]).has_child('__init__.py'):\r\n return module.get_child(packages[-1])\r\n elif module.has_child(packages[-1] + '.py') and \\\r\n not module.get_child(packages[-1] + '.py').is_folder():\r\n return module.get_child(packages[-1] + '.py')\r\n pycore.PyCore = PatchedPyCore\r\n\r\n # [2] Patching BuiltinName for the go to definition feature to simply work \r\n # with forced builtins\r\n from rope.base import builtins, libutils, pyobjects\r\n import inspect\r\n import os.path as osp\r\n class PatchedBuiltinName(builtins.BuiltinName):\r\n def _pycore(self):\r\n p = self.pyobject\r\n while p.parent is not None:\r\n p = p.parent\r\n if isinstance(p, builtins.BuiltinModule) and p.pycore is not None:\r\n return p.pycore\r\n def get_definition_location(self):\r\n if not inspect.isbuiltin(self.pyobject):\r\n _lines, lineno = inspect.getsourcelines(self.pyobject.builtin)\r\n path = inspect.getfile(self.pyobject.builtin)\r\n if path.endswith('pyc') and osp.isfile(path[:-1]):\r\n path = path[:-1]\r\n pycore = self._pycore()\r\n if pycore and pycore.project:\r\n resource = libutils.path_to_resource(pycore.project, path)\r\n module = pyobjects.PyModule(pycore, None, resource)\r\n return (module, lineno)\r\n return (None, None)\r\n builtins.BuiltinName = PatchedBuiltinName\r\n \r\n # [4] Patching several PyDocExtractor methods:\r\n # 1. get_doc:\r\n # To force rope to return the docstring of any object which has one, even\r\n # if it's not an instance of AbstractFunction, AbstractClass, or\r\n # AbstractModule.\r\n # Also, to use utils.dochelpers.getdoc to get docs from forced builtins.\r\n #\r\n # 2. _get_class_docstring and _get_single_function_docstring:\r\n # To not let rope add a 2 spaces indentation to every docstring, which was\r\n # breaking our rich text mode. The only value that we are modifying is the\r\n # 'indents' keyword of those methods, from 2 to 0.\r\n #\r\n # 3. get_calltip\r\n # To easily get calltips of forced builtins\r\n from rope.contrib import codeassist\r\n from spyder_kernels.utils.dochelpers import getdoc\r\n from rope.base import exceptions\r\n class PatchedPyDocExtractor(codeassist.PyDocExtractor):\r\n def get_builtin_doc(self, pyobject):\r\n buitin = pyobject.builtin\r\n return getdoc(buitin)\r\n \r\n def get_doc(self, pyobject):\r\n if hasattr(pyobject, 'builtin'):\r\n doc = self.get_builtin_doc(pyobject)\r\n return doc\r\n elif isinstance(pyobject, builtins.BuiltinModule):\r\n docstring = pyobject.get_doc()\r\n if docstring is not None:\r\n docstring = self._trim_docstring(docstring)\r\n else:\r\n docstring = ''\r\n # TODO: Add a module_name key, so that the name could appear\r\n # on the OI text filed but not be used by sphinx to render\r\n # the page\r\n doc = {'name': '',\r\n 'argspec': '',\r\n 'note': '',\r\n 'docstring': docstring\r\n }\r\n return doc\r\n elif isinstance(pyobject, pyobjects.AbstractFunction):\r\n return self._get_function_docstring(pyobject)\r\n elif isinstance(pyobject, pyobjects.AbstractClass):\r\n return self._get_class_docstring(pyobject)\r\n elif isinstance(pyobject, pyobjects.AbstractModule):\r\n return self._trim_docstring(pyobject.get_doc())\r\n elif pyobject.get_doc() is not None: # Spyder patch\r\n return self._trim_docstring(pyobject.get_doc())\r\n return None\r\n\r\n def get_calltip(self, pyobject, ignore_unknown=False, remove_self=False):\r\n if hasattr(pyobject, 'builtin'):\r\n doc = self.get_builtin_doc(pyobject)\r\n return doc['name'] + doc['argspec']\r\n try:\r\n if isinstance(pyobject, pyobjects.AbstractClass):\r\n pyobject = pyobject['__init__'].get_object()\r\n if not isinstance(pyobject, pyobjects.AbstractFunction):\r\n pyobject = pyobject['__call__'].get_object()\r\n except exceptions.AttributeNotFoundError:\r\n return None\r\n if ignore_unknown and not isinstance(pyobject, pyobjects.PyFunction):\r\n return\r\n if isinstance(pyobject, pyobjects.AbstractFunction):\r\n result = self._get_function_signature(pyobject, add_module=True)\r\n if remove_self and self._is_method(pyobject):\r\n return result.replace('(self)', '()').replace('(self, ', '(')\r\n return result\r\n \r\n def _get_class_docstring(self, pyclass):\r\n contents = self._trim_docstring(pyclass.get_doc(), indents=0)\r\n supers = [super.get_name() for super in pyclass.get_superclasses()]\r\n doc = 'class %s(%s):\\n\\n' % (pyclass.get_name(), ', '.join(supers)) + contents\r\n\r\n if '__init__' in pyclass:\r\n init = pyclass['__init__'].get_object()\r\n if isinstance(init, pyobjects.AbstractFunction):\r\n doc += '\\n\\n' + self._get_single_function_docstring(init)\r\n return doc\r\n \r\n def _get_single_function_docstring(self, pyfunction):\r\n docs = pyfunction.get_doc()\r\n docs = self._trim_docstring(docs, indents=0)\r\n return docs\r\n codeassist.PyDocExtractor = PatchedPyDocExtractor\r\n\r\n\r\n # [5] Get the right matplotlib docstrings for Help\r\n try:\r\n import matplotlib as mpl\r\n mpl.rcParams['docstring.hardcopy'] = True\r\n except:\r\n pass", "def run_fn(self, name, *args, **kwds):\n \"\"\"Run pre-built functionality that used Broad tools by name.\n\n See the gatkrun, picardrun module for available functions.\n \"\"\"\n fn = None\n to_check = [picardrun]\n for ns in to_check:\n try:\n fn = getattr(ns, name)\n break\n except AttributeError:\n pass\n assert fn is not None, \"Could not find function %s in %s\" % (name, to_check)\n return fn(self, *args, **kwds)", "def PatchAt(cls, n, module, method_wrapper=None, module_alias=None, method_name_modifier=utils.identity, blacklist_predicate=_False, whitelist_predicate=_True, return_type_predicate=_None, getmembers_predicate=inspect.isfunction, admit_private=False, explanation=\"\"):\n \"\"\"\nThis classmethod lets you easily patch all of functions/callables from a module or class as methods a Builder class.\n\n**Arguments**\n\n* **n** : the position the the object being piped will take in the arguments when the function being patched is applied. See `RegisterMethod` and `ThenAt`.\n* **module** : a module or class from which the functions/methods/callables will be taken.\n* `module_alias = None` : an optional alias for the module used for documentation purposes.\n* `method_name_modifier = lambda f_name: None` : a function that can modify the name of the method will take. If `None` the name of the function will be used.\n* `blacklist_predicate = lambda f_name: name[0] != \"_\"` : A predicate that determines which functions are banned given their name. By default it excludes all function whose name start with `'_'`. `blacklist_predicate` can also be of type list, in which case all names contained in this list will be banned.\n* `whitelist_predicate = lambda f_name: True` : A predicate that determines which functions are admitted given their name. By default it include any function. `whitelist_predicate` can also be of type list, in which case only names contained in this list will be admitted. You can use both `blacklist_predicate` and `whitelist_predicate` at the same time.\n* `return_type_predicate = lambda f_name: None` : a predicate that determines the `_return_type` of the Builder. By default it will always return `None`. See `phi.builder.Builder.ThenAt`.\n* `getmembers_predicate = inspect.isfunction` : a predicate that determines what type of elements/members will be fetched by the `inspect` module, defaults to [inspect.isfunction](https://docs.python.org/2/library/inspect.html#inspect.isfunction). See [getmembers](https://docs.python.org/2/library/inspect.html#inspect.getmembers).\n\n**Examples**\n\nLets patch ALL the main functions from numpy into a custom builder!\n\n from phi import PythonBuilder #or Builder\n import numpy as np\n\n class NumpyBuilder(PythonBuilder): #or Builder\n \"A Builder for numpy functions!\"\n pass\n\n NumpyBuilder.PatchAt(1, np)\n\n N = NumpyBuilder(lambda x: x)\n\nThats it! Although a serious patch would involve filtering out functions that don't take arrays. Another common task would be to use `NumpyBuilder.PatchAt(2, ...)` (`PatchAt(n, ..)` in general) when convenient to send the object being pipe to the relevant argument of the function. The previous is usually done with and a combination of `whitelist_predicate`s and `blacklist_predicate`s on `PatchAt(1, ...)` and `PatchAt(2, ...)` to filter or include the approriate functions on each kind of patch. Given the previous code we could now do\n\n import numpy as np\n\n x = np.array([[1,2],[3,4]])\n y = np.array([[5,6],[7,8]])\n\n z = N.Pipe(\n x, N\n .dot(y)\n .add(x)\n .transpose()\n .sum(axis=1)\n )\n\nWhich is strictly equivalent to\n\n import numpy as np\n\n x = np.array([[1,2],[3,4]])\n y = np.array([[5,6],[7,8]])\n\n z = np.dot(x, y)\n z = np.add(z, x)\n z = np.transpose(z)\n z = np.sum(z, axis=1)\n\nThe thing to notice is that with the `NumpyBuilder` we avoid the repetitive and needless passing and reassigment of the `z` variable, this removes a lot of noise from our code.\n \"\"\"\n _rtp = return_type_predicate\n\n return_type_predicate = (lambda x: _rtp) if inspect.isclass(_rtp) and issubclass(_rtp, Builder) else _rtp\n module_name = module_alias if module_alias else module.__name__ + '.'\n patch_members = _get_patch_members(module, blacklist_predicate=blacklist_predicate, whitelist_predicate=whitelist_predicate, getmembers_predicate=getmembers_predicate, admit_private=admit_private)\n\n for name, f in patch_members:\n wrapped = None\n\n if method_wrapper:\n g = method_wrapper(f)\n wrapped = f\n else:\n g = f\n\n cls.RegisterAt(n, g, module_name, wrapped=wrapped, _return_type=return_type_predicate(name), alias=method_name_modifier(name), explanation=explanation)", "def visit_Module(self, node):\n \"\"\"\n When we normalize call, we need to add correct import for method\n to function transformation.\n\n a.max()\n\n for numpy array will become:\n\n numpy.max(a)\n\n so we have to import numpy.\n \"\"\"\n self.skip_functions = True\n self.generic_visit(node)\n self.skip_functions = False\n self.generic_visit(node)\n new_imports = self.to_import - self.globals\n imports = [ast.Import(names=[ast.alias(name=mod[17:], asname=mod)])\n for mod in new_imports]\n node.body = imports + node.body\n self.update |= bool(imports)\n return node" ]
[ 0.7440390586853027, 0.7235741019248962, 0.7028001546859741, 0.7016107439994812, 0.69905686378479, 0.6942735314369202, 0.6937426328659058, 0.6883745789527893, 0.6880348324775696, 0.6833273768424988, 0.6828630566596985, 0.682716965675354 ]
Create 'empty' objects ('Mocks'). Will create an empty unconfigured object, that you can pass around. All interactions (method calls) will be recorded and can be verified using :func:`verify` et.al. A plain `mock()` will be not `strict`, and thus all methods regardless of the arguments will return ``None``. .. note:: Technically all attributes will return an internal interface. Because of that a simple ``if mock().foo:`` will surprisingly pass. If you set strict to ``True``: ``mock(strict=True)`` all unexpected interactions will raise an error instead. You configure a mock using :func:`when`, :func:`when2` or :func:`expect`. You can also very conveniently just pass in a dict here:: response = mock({'text': 'ok', 'raise_for_status': lambda: None}) You can also create an empty Mock which is specced against a given `spec`: ``mock(requests.Response)``. These mock are by default strict, thus they raise if you want to stub a method, the spec does not implement. Mockito will also match the function signature. You can pre-configure a specced mock as well:: response = mock({'json': lambda: {'status': 'Ok'}}, spec=requests.Response) Mocks are by default callable. Configure the callable behavior using `when`:: dummy = mock() when(dummy).__call_(1).thenReturn(2) All other magic methods must be configured this way or they will raise an AttributeError. See :func:`verify` to verify your interactions after usage.
def mock(config_or_spec=None, spec=None, strict=OMITTED): """Create 'empty' objects ('Mocks'). Will create an empty unconfigured object, that you can pass around. All interactions (method calls) will be recorded and can be verified using :func:`verify` et.al. A plain `mock()` will be not `strict`, and thus all methods regardless of the arguments will return ``None``. .. note:: Technically all attributes will return an internal interface. Because of that a simple ``if mock().foo:`` will surprisingly pass. If you set strict to ``True``: ``mock(strict=True)`` all unexpected interactions will raise an error instead. You configure a mock using :func:`when`, :func:`when2` or :func:`expect`. You can also very conveniently just pass in a dict here:: response = mock({'text': 'ok', 'raise_for_status': lambda: None}) You can also create an empty Mock which is specced against a given `spec`: ``mock(requests.Response)``. These mock are by default strict, thus they raise if you want to stub a method, the spec does not implement. Mockito will also match the function signature. You can pre-configure a specced mock as well:: response = mock({'json': lambda: {'status': 'Ok'}}, spec=requests.Response) Mocks are by default callable. Configure the callable behavior using `when`:: dummy = mock() when(dummy).__call_(1).thenReturn(2) All other magic methods must be configured this way or they will raise an AttributeError. See :func:`verify` to verify your interactions after usage. """ if type(config_or_spec) is dict: config = config_or_spec else: config = {} spec = config_or_spec if strict is OMITTED: strict = False if spec is None else True class Dummy(_Dummy): if spec: __class__ = spec # make isinstance work def __getattr__(self, method_name): if strict: raise AttributeError( "'Dummy' has no attribute %r configured" % method_name) return functools.partial( remembered_invocation_builder, theMock, method_name) def __repr__(self): name = 'Dummy' if spec: name += spec.__name__ return "<%s id=%s>" % (name, id(self)) # That's a tricky one: The object we will return is an *instance* of our # Dummy class, but the mock we register will point and patch the class. # T.i. so that magic methods (`__call__` etc.) can be configured. obj = Dummy() theMock = Mock(Dummy, strict=strict, spec=spec) for n, v in config.items(): if inspect.isfunction(v): invocation.StubbedInvocation(theMock, n)(Ellipsis).thenAnswer(v) else: setattr(obj, n, v) mock_registry.register(obj, theMock) return obj
[ "def create_autospec(spec, spec_set=False, instance=False, _parent=None,\n _name=None, **kwargs):\n \"\"\"Create a mock object using another object as a spec. Attributes on the\n mock will use the corresponding attribute on the `spec` object as their\n spec.\n\n Functions or methods being mocked will have their arguments checked\n to check that they are called with the correct signature.\n\n If `spec_set` is True then attempting to set attributes that don't exist\n on the spec object will raise an `AttributeError`.\n\n If a class is used as a spec then the return value of the mock (the\n instance of the class) will have the same spec. You can use a class as the\n spec for an instance object by passing `instance=True`. The returned mock\n will only be callable if instances of the mock are callable.\n\n `create_autospec` also takes arbitrary keyword arguments that are passed to\n the constructor of the created mock.\"\"\"\n if _is_list(spec):\n # can't pass a list instance to the mock constructor as it will be\n # interpreted as a list of strings\n spec = type(spec)\n\n is_type = isinstance(spec, ClassTypes)\n\n _kwargs = {'spec': spec}\n if spec_set:\n _kwargs = {'spec_set': spec}\n elif spec is None:\n # None we mock with a normal mock without a spec\n _kwargs = {}\n\n _kwargs.update(kwargs)\n\n Klass = MagicMock\n if type(spec) in DescriptorTypes:\n # descriptors don't have a spec\n # because we don't know what type they return\n _kwargs = {}\n elif not _callable(spec):\n Klass = NonCallableMagicMock\n elif is_type and instance and not _instance_callable(spec):\n Klass = NonCallableMagicMock\n\n _new_name = _name\n if _parent is None:\n # for a top level object no _new_name should be set\n _new_name = ''\n\n mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name,\n name=_name, **_kwargs)\n\n if isinstance(spec, FunctionTypes):\n # should only happen at the top level because we don't\n # recurse for functions\n mock = _set_signature(mock, spec)\n else:\n _check_signature(spec, mock, is_type, instance)\n\n if _parent is not None and not instance:\n _parent._mock_children[_name] = mock\n\n if is_type and not instance and 'return_value' not in kwargs:\n mock.return_value = create_autospec(spec, spec_set, instance=True,\n _name='()', _parent=mock)\n\n for entry in dir(spec):\n if _is_magic(entry):\n # MagicMock already does the useful magic methods for us\n continue\n\n if isinstance(spec, FunctionTypes) and entry in FunctionAttributes:\n # allow a mock to actually be a function\n continue\n\n # XXXX do we need a better way of getting attributes without\n # triggering code execution (?) Probably not - we need the actual\n # object to mock it so we would rather trigger a property than mock\n # the property descriptor. Likewise we want to mock out dynamically\n # provided attributes.\n # XXXX what about attributes that raise exceptions other than\n # AttributeError on being fetched?\n # we could be resilient against it, or catch and propagate the\n # exception when the attribute is fetched from the mock\n try:\n original = getattr(spec, entry)\n except AttributeError:\n continue\n\n kwargs = {'spec': original}\n if spec_set:\n kwargs = {'spec_set': original}\n\n if not isinstance(original, FunctionTypes):\n new = _SpecState(original, spec_set, mock, entry, instance)\n mock._mock_children[entry] = new\n else:\n parent = mock\n if isinstance(spec, FunctionTypes):\n parent = mock.mock\n\n new = MagicMock(parent=parent, name=entry, _new_name=entry,\n _new_parent=parent, **kwargs)\n mock._mock_children[entry] = new\n skipfirst = _must_skip(spec, entry, is_type)\n _check_signature(original, new, skipfirst=skipfirst)\n\n # so functions created with _set_signature become instance attributes,\n # *plus* their underlying mock exists in _mock_children of the parent\n # mock. Adding to _mock_children may be unnecessary where we are also\n # setting as an instance attribute?\n if isinstance(new, FunctionTypes):\n setattr(mock, entry, new)\n\n return mock", "def configure_mock(self, **kwargs):\n \"\"\"Set attributes on the mock through keyword arguments.\n\n Attributes plus return values and side effects can be set on child\n mocks using standard dot notation and unpacking a dictionary in the\n method call:\n\n >>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError}\n >>> mock.configure_mock(**attrs)\"\"\"\n for arg, val in sorted(kwargs.items(),\n # we sort on the number of dots so that\n # attributes are set before we set attributes on\n # attributes\n key=lambda entry: entry[0].count('.')):\n args = arg.split('.')\n final = args.pop()\n obj = self\n for entry in args:\n obj = getattr(obj, entry)\n setattr(obj, final, val)", "def _get_child_mock(self, **kw):\n \"\"\"Create the child mocks for attributes and return value.\n By default child mocks will be the same type as the parent.\n Subclasses of Mock may want to override this to customize the way\n child mocks are made.\n\n For non-callable mocks the callable variant will be used (rather than\n any custom subclass).\"\"\"\n _type = type(self)\n if not issubclass(_type, CallableMixin):\n if issubclass(_type, NonCallableMagicMock):\n klass = MagicMock\n elif issubclass(_type, NonCallableMock) :\n klass = Mock\n else:\n klass = _type.__mro__[1]\n return klass(**kw)", "def construct(self, request, service=None, http_args=None, **kwargs):\n \"\"\"\n Constructs a client assertion and signs it with a key.\n The request is modified as a side effect.\n\n :param request: The request\n :param service: A :py:class:`oidcservice.service.Service` instance\n :param http_args: HTTP arguments\n :param kwargs: Extra arguments\n :return: Constructed HTTP arguments, in this case none\n \"\"\"\n\n if 'client_assertion' in kwargs:\n request[\"client_assertion\"] = kwargs['client_assertion']\n if 'client_assertion_type' in kwargs:\n request[\n 'client_assertion_type'] = kwargs['client_assertion_type']\n else:\n request[\"client_assertion_type\"] = JWT_BEARER\n elif 'client_assertion' in request:\n if 'client_assertion_type' not in request:\n request[\"client_assertion_type\"] = JWT_BEARER\n else:\n algorithm = None\n _context = service.service_context\n # audience for the signed JWT depends on which endpoint\n # we're talking to.\n if kwargs['authn_endpoint'] in ['token_endpoint']:\n try:\n algorithm = _context.behaviour[\n 'token_endpoint_auth_signing_alg']\n except (KeyError, AttributeError):\n pass\n audience = _context.provider_info['token_endpoint']\n else:\n audience = _context.provider_info['issuer']\n\n if not algorithm:\n algorithm = self.choose_algorithm(**kwargs)\n\n ktype = alg2keytype(algorithm)\n try:\n if 'kid' in kwargs:\n signing_key = [self.get_key_by_kid(kwargs[\"kid\"], algorithm,\n _context)]\n elif ktype in _context.kid[\"sig\"]:\n try:\n signing_key = [self.get_key_by_kid(\n _context.kid[\"sig\"][ktype], algorithm, _context)]\n except KeyError:\n signing_key = self.get_signing_key(algorithm, _context)\n else:\n signing_key = self.get_signing_key(algorithm, _context)\n except NoMatchingKey as err:\n logger.error(\"%s\" % sanitize(err))\n raise\n\n try:\n _args = {'lifetime': kwargs['lifetime']}\n except KeyError:\n _args = {}\n\n # construct the signed JWT with the assertions and add\n # it as value to the 'client_assertion' claim of the request\n request[\"client_assertion\"] = assertion_jwt(\n _context.client_id, signing_key, audience,\n algorithm, **_args)\n\n request[\"client_assertion_type\"] = JWT_BEARER\n\n try:\n del request[\"client_secret\"]\n except KeyError:\n pass\n\n # If client_id is not required to be present, remove it.\n if not request.c_param[\"client_id\"][VREQUIRED]:\n try:\n del request[\"client_id\"]\n except KeyError:\n pass\n\n return {}", "def construct(self):\n \"\"\"\n Recursively serialize a lot of types, and\n in cases where it doesn't recognize the type,\n it will fall back to Django's `smart_unicode`.\n\n Returns `dict`.\n \"\"\"\n def _any(thing, fields=None):\n \"\"\"\n Dispatch, all types are routed through here.\n \"\"\"\n ret = None\n\n if isinstance(thing, QuerySet):\n ret = _qs(thing, fields)\n elif isinstance(thing, (tuple, list, set)):\n ret = _list(thing, fields)\n elif isinstance(thing, dict):\n ret = _dict(thing, fields)\n elif isinstance(thing, decimal.Decimal):\n ret = str(thing)\n elif isinstance(thing, Model):\n ret = _model(thing, fields)\n elif isinstance(thing, HttpResponse):\n raise HttpStatusCode(thing)\n elif inspect.isfunction(thing):\n if not inspect.getargspec(thing)[0]:\n ret = _any(thing())\n elif hasattr(thing, '__emittable__'):\n f = thing.__emittable__\n if inspect.ismethod(f) and len(inspect.getargspec(f)[0]) == 1:\n ret = _any(f())\n elif repr(thing).startswith(\"<django.db.models.fields.related.RelatedManager\"):\n ret = _any(thing.all())\n else:\n ret = smart_unicode(thing, strings_only=True)\n\n return ret\n\n def _fk(data, field):\n \"\"\"\n Foreign keys.\n \"\"\"\n return _any(getattr(data, field.name))\n\n def _related(data, fields=None):\n \"\"\"\n Foreign keys.\n \"\"\"\n return [ _model(m, fields) for m in data.iterator() ]\n\n def _m2m(data, field, fields=None):\n \"\"\"\n Many to many (re-route to `_model`.)\n \"\"\"\n return [ _model(m, fields) for m in getattr(data, field.name).iterator() ]\n\n def _model(data, fields=None):\n \"\"\"\n Models. Will respect the `fields` and/or\n `exclude` on the handler (see `typemapper`.)\n \"\"\"\n ret = { }\n handler = self.in_typemapper(type(data), self.anonymous)\n get_absolute_uri = False\n\n if handler or fields:\n v = lambda f: getattr(data, f.attname)\n\n if handler:\n fields = getattr(handler, 'fields')\n\n if not fields or hasattr(handler, 'fields'):\n \"\"\"\n Fields was not specified, try to find teh correct\n version in the typemapper we were sent.\n \"\"\"\n mapped = self.in_typemapper(type(data), self.anonymous)\n get_fields = set(mapped.fields)\n exclude_fields = set(mapped.exclude).difference(get_fields)\n\n if 'absolute_uri' in get_fields:\n get_absolute_uri = True\n\n if not get_fields:\n get_fields = set([ f.attname.replace(\"_id\", \"\", 1)\n for f in data._meta.fields + data._meta.virtual_fields])\n\n if hasattr(mapped, 'extra_fields'):\n get_fields.update(mapped.extra_fields)\n\n # sets can be negated.\n for exclude in exclude_fields:\n if isinstance(exclude, basestring):\n get_fields.discard(exclude)\n\n elif isinstance(exclude, re._pattern_type):\n for field in get_fields.copy():\n if exclude.match(field):\n get_fields.discard(field)\n\n else:\n get_fields = set(fields)\n\n met_fields = self.method_fields(handler, get_fields)\n\n for f in data._meta.local_fields + data._meta.virtual_fields:\n if f.serialize and not any([ p in met_fields for p in [ f.attname, f.name ]]):\n if not f.rel:\n if f.attname in get_fields:\n ret[f.attname] = _any(v(f))\n get_fields.remove(f.attname)\n else:\n if f.attname[:-3] in get_fields:\n ret[f.name] = _fk(data, f)\n get_fields.remove(f.name)\n\n for mf in data._meta.many_to_many:\n if mf.serialize and mf.attname not in met_fields:\n if mf.attname in get_fields:\n ret[mf.name] = _m2m(data, mf)\n get_fields.remove(mf.name)\n\n # try to get the remainder of fields\n for maybe_field in get_fields:\n if isinstance(maybe_field, (list, tuple)):\n model, fields = maybe_field\n inst = getattr(data, model, None)\n\n if inst:\n if hasattr(inst, 'all'):\n ret[model] = _related(inst, fields)\n elif callable(inst):\n if len(inspect.getargspec(inst)[0]) == 1:\n ret[model] = _any(inst(), fields)\n else:\n ret[model] = _model(inst, fields)\n\n elif maybe_field in met_fields:\n # Overriding normal field which has a \"resource method\"\n # so you can alter the contents of certain fields without\n # using different names.\n ret[maybe_field] = _any(met_fields[maybe_field](data))\n\n else:\n maybe = getattr(data, maybe_field, None)\n if maybe is not None:\n if callable(maybe):\n if len(inspect.getargspec(maybe)[0]) <= 1:\n ret[maybe_field] = _any(maybe())\n else:\n ret[maybe_field] = _any(maybe)\n else:\n handler_f = getattr(handler or self.handler, maybe_field, None)\n\n if handler_f:\n ret[maybe_field] = _any(handler_f(data))\n\n else:\n for f in data._meta.fields:\n ret[f.attname] = _any(getattr(data, f.attname))\n\n fields = dir(data.__class__) + ret.keys()\n add_ons = [k for k in dir(data) if k not in fields]\n\n for k in add_ons:\n ret[k] = _any(getattr(data, k))\n\n # resouce uri\n if self.in_typemapper(type(data), self.anonymous):\n handler = self.in_typemapper(type(data), self.anonymous)\n if hasattr(handler, 'resource_uri'):\n url_id, fields = handler.resource_uri(data)\n\n try:\n ret['resource_uri'] = permalink(lambda: (url_id, fields))()\n except NoReverseMatch, e:\n pass\n\n if hasattr(data, 'get_api_url') and 'resource_uri' not in ret:\n try:\n ret['resource_uri'] = data.get_api_url()\n except:\n pass\n\n # absolute uri\n if hasattr(data, 'get_absolute_url') and get_absolute_uri:\n try:\n ret['absolute_uri'] = data.get_absolute_url()\n except:\n pass\n\n return ret\n\n def _qs(data, fields=None):\n \"\"\"\n Querysets.\n \"\"\"\n return [_any(v, fields) for v in data ]\n\n def _list(data, fields=None):\n \"\"\"\n Lists.\n \"\"\"\n return [_any(v, fields) for v in data ]\n\n def _dict(data, fields=None):\n \"\"\"\n Dictionaries.\n \"\"\"\n return dict([(k, _any(v, fields)) for k, v in data.iteritems()])\n\n # Kickstart the seralizin'.\n return _any(self.data, self.fields)", "def spy(object):\n \"\"\"Spy an object.\n\n Spying means that all functions will behave as before, so they will\n be side effects, but the interactions can be verified afterwards.\n\n Returns Dummy-like, almost empty object as proxy to `object`.\n\n The *returned* object must be injected and used by the code under test;\n after that all interactions can be verified as usual.\n T.i. the original object **will not be patched**, and has no further\n knowledge as before.\n\n E.g.::\n\n import time\n time = spy(time)\n # inject time\n do_work(..., time)\n verify(time).time()\n\n \"\"\"\n if inspect.isclass(object) or inspect.ismodule(object):\n class_ = None\n else:\n class_ = object.__class__\n\n class Spy(_Dummy):\n if class_:\n __class__ = class_\n\n def __getattr__(self, method_name):\n return RememberedProxyInvocation(theMock, method_name)\n\n def __repr__(self):\n name = 'Spied'\n if class_:\n name += class_.__name__\n return \"<%s id=%s>\" % (name, id(self))\n\n\n obj = Spy()\n theMock = Mock(obj, strict=True, spec=object)\n\n mock_registry.register(obj, theMock)\n return obj", "def mock_open(mock=None, read_data=''):\n \"\"\"\n A helper function to create a mock to replace the use of `open`. It works\n for `open` called directly or used as a context manager.\n\n The `mock` argument is the mock object to configure. If `None` (the\n default) then a `MagicMock` will be created for you, with the API limited\n to methods or attributes available on standard file handles.\n\n `read_data` is a string for the `read` methoddline`, and `readlines` of the\n file handle to return. This is an empty string by default.\n \"\"\"\n def _readlines_side_effect(*args, **kwargs):\n if handle.readlines.return_value is not None:\n return handle.readlines.return_value\n return list(_state[0])\n\n def _read_side_effect(*args, **kwargs):\n if handle.read.return_value is not None:\n return handle.read.return_value\n return type(read_data)().join(_state[0])\n\n def _readline_side_effect():\n if handle.readline.return_value is not None:\n while True:\n yield handle.readline.return_value\n for line in _state[0]:\n yield line\n while True:\n yield type(read_data)()\n\n\n global file_spec\n if file_spec is None:\n # set on first use\n if six.PY3:\n import _io\n file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))\n else:\n file_spec = file\n\n if mock is None:\n mock = MagicMock(name='open', spec=open)\n\n handle = MagicMock(spec=file_spec)\n handle.__enter__.return_value = handle\n\n _state = [_iterate_read_data(read_data), None]\n\n handle.write.return_value = None\n handle.read.return_value = None\n handle.readline.return_value = None\n handle.readlines.return_value = None\n\n handle.read.side_effect = _read_side_effect\n _state[1] = _readline_side_effect()\n handle.readline.side_effect = _state[1]\n handle.readlines.side_effect = _readlines_side_effect\n\n def reset_data(*args, **kwargs):\n _state[0] = _iterate_read_data(read_data)\n if handle.readline.side_effect == _state[1]:\n # Only reset the side effect if the user hasn't overridden it.\n _state[1] = _readline_side_effect()\n handle.readline.side_effect = _state[1]\n return DEFAULT\n\n mock.side_effect = reset_data\n mock.return_value = handle\n return mock", "def _create_mock(self, endpoint_desc, session, request_params, mock_desc, loop):\n \"\"\"\n The class imported should have the __call__ function defined to be an object directly callable\n \"\"\"\n try:\n mock_def = mock_manager.next_mock(self.service_client.name,\n endpoint_desc['endpoint'])\n\n if isinstance(mock_def, PatchMockDescDefinition):\n mock_desc.update(mock_def.patch)\n else:\n return mock_def.mock\n except NoMock:\n pass\n\n return self.loader.factory(mock_desc.get('mock_type'),\n endpoint_desc, session,\n request_params, mock_desc,\n loop=loop)", "def Facade( some_instance=None, exclusion_list=[], cls=None, args=tuple(), kwargs={} ):\n \"\"\"\n Top-level interface to the Facade functionality. Determines what to return when passed arbitrary objects.\n\n :param mixed some_instance: Anything.\n :param list exclusion_list: The list of types NOT to wrap\n :param class cls: The class definition for the object being mocked\n :param tuple args: The arguments for the class definition to return the desired instance\n :param dict kwargs: The keywork arguments for the class definition to return the desired instance\n\n :rtype instance: Either the instance passed or an instance of the Wrapper wrapping the instance passed.\n \"\"\"\n if not USE_CALIENDO or should_exclude( some_instance, exclusion_list ):\n if not util.is_primitive(some_instance):\n # Provide dummy methods to prevent errors in implementations dependent\n # on the Wrapper interface\n some_instance.wrapper__unwrap = lambda : None\n some_instance.wrapper__delete_last_cached = lambda : None\n return some_instance # Just give it back.\n else:\n if util.is_primitive(some_instance) and not cls:\n return some_instance\n return Wrapper(o=some_instance, exclusion_list=list(exclusion_list), cls=cls, args=args, kwargs=kwargs )", "def mock_django_connection(disabled_features=None):\n \"\"\" Overwrite the Django database configuration with a mocked version.\n\n This is a helper function that does the actual monkey patching.\n \"\"\"\n db = connections.databases['default']\n db['PASSWORD'] = '****'\n db['USER'] = '**Database disabled for unit tests**'\n ConnectionHandler.__getitem__ = MagicMock(name='mock_connection')\n # noinspection PyUnresolvedReferences\n mock_connection = ConnectionHandler.__getitem__.return_value\n if disabled_features:\n for feature in disabled_features:\n setattr(mock_connection.features, feature, False)\n mock_ops = mock_connection.ops\n\n # noinspection PyUnusedLocal\n def compiler(queryset, connection, using, **kwargs):\n result = MagicMock(name='mock_connection.ops.compiler()')\n # noinspection PyProtectedMember\n result.execute_sql.side_effect = NotSupportedError(\n \"Mock database tried to execute SQL for {} model.\".format(\n queryset.model._meta.object_name))\n result.has_results.side_effect = result.execute_sql.side_effect\n return result\n\n mock_ops.compiler.return_value.side_effect = compiler\n mock_ops.integer_field_range.return_value = (-sys.maxsize - 1, sys.maxsize)\n mock_ops.max_name_length.return_value = sys.maxsize\n\n Model.refresh_from_db = Mock()", "def create_missing(self):\n \"\"\"Automagically populate all required instance attributes.\n\n Iterate through the set of all required class\n :class:`nailgun.entity_fields.Field` defined on ``type(self)`` and\n create a corresponding instance attribute if none exists. Subclasses\n should override this method if there is some relationship between two\n required fields.\n\n :return: Nothing. This method relies on side-effects.\n\n \"\"\"\n for field_name, field in self.get_fields().items():\n if field.required and not hasattr(self, field_name):\n # Most `gen_value` methods return a value such as an integer,\n # string or dictionary, but OneTo{One,Many}Field.gen_value\n # returns the referenced class.\n if hasattr(field, 'default'):\n value = field.default\n elif hasattr(field, 'choices'):\n value = gen_choice(field.choices)\n elif isinstance(field, OneToOneField):\n value = field.gen_value()(self._server_config).create(True)\n elif isinstance(field, OneToManyField):\n value = [\n field.gen_value()(self._server_config).create(True)\n ]\n else:\n value = field.gen_value()\n setattr(self, field_name, value)", "def _mock_request(self, **kwargs):\n \"\"\"\n A mocked out make_request call that bypasses all network calls\n and simply returns any mocked responses defined.\n \"\"\"\n model = kwargs.get('model')\n service = model.service_model.endpoint_prefix\n operation = model.name\n LOG.debug('_make_request: %s.%s', service, operation)\n return self.load_response(service, operation)" ]
[ 0.729351818561554, 0.713209867477417, 0.6814382076263428, 0.6734673976898193, 0.673088014125824, 0.6675387024879456, 0.6672521233558655, 0.663781464099884, 0.6614181995391846, 0.6592409610748291, 0.6569718718528748, 0.6568246483802795 ]
Function importPuppetClasses Force the reload of puppet classes @param smartProxyId: smartProxy Id @return RETURN: the API result
def importPuppetClasses(self, smartProxyId): """ Function importPuppetClasses Force the reload of puppet classes @param smartProxyId: smartProxy Id @return RETURN: the API result """ return self.api.create('{}/{}/import_puppetclasses' .format(self.objName, smartProxyId), '{}')
[ "def import_puppetclasses(self, synchronous=True, **kwargs):\n \"\"\"Import puppet classes from puppet Capsule.\n\n :param synchronous: What should happen if the server returns an HTTP\n 202 (accepted) status code? Wait for the task to complete if\n ``True``. Immediately return the server's response otherwise.\n :param kwargs: Arguments to pass to requests.\n :returns: The server's response, with all JSON decoded.\n :raises: ``requests.exceptions.HTTPError`` If the server responds with\n an HTTP 4XX or 5XX message.\n\n \"\"\"\n kwargs = kwargs.copy()\n kwargs.update(self._server_config.get_client_kwargs())\n # Check if environment_id was sent and substitute it to the path\n # but do not pass it to requests\n if 'environment' in kwargs:\n if isinstance(kwargs['environment'], Environment):\n environment_id = kwargs.pop('environment').id\n else:\n environment_id = kwargs.pop('environment')\n path = '{0}/environments/{1}/import_puppetclasses'.format(\n self.path(), environment_id)\n else:\n path = '{0}/import_puppetclasses'.format(self.path())\n return _handle_response(\n client.post(path, **kwargs), self._server_config, synchronous)", "def checkAndCreateClasses(self, classes):\n \"\"\" Function checkAndCreateClasses\n Check and add puppet class\n\n @param classes: The classes ids list\n @return RETURN: boolean\n \"\"\"\n actual_classes = self['puppetclasses'].keys()\n for i in classes:\n if i not in actual_classes:\n self['puppetclasses'].append(i)\n self.reload()\n return set(classes).issubset(set((self['puppetclasses'].keys())))", "public function createService(ServiceLocatorInterface $serviceLocator)\n {\n $service = new PuppetClass();\n\n /** @var PuppetModule $moduleService */\n $moduleService = $serviceLocator->get('pmProxyPuppetModuleService');\n $service->setModuleService($moduleService);\n\n return $service;\n }", "def delete_puppetclass(self, synchronous=True, **kwargs):\n \"\"\"Remove a Puppet class from host group\n\n Here is an example of how to use this method::\n hostgroup.delete_puppetclass(data={'puppetclass_id': puppet.id})\n\n Constructs path:\n /api/hostgroups/:hostgroup_id/puppetclass_ids/:id\n\n :param synchronous: What should happen if the server returns an HTTP\n 202 (accepted) status code? Wait for the task to complete if\n ``True``. Immediately return the server's response otherwise.\n :param kwargs: Arguments to pass to requests.\n :returns: The server's response, with all JSON decoded.\n :raises: ``requests.exceptions.HTTPError`` If the server responds with\n an HTTP 4XX or 5XX message.\n\n \"\"\"\n kwargs = kwargs.copy()\n kwargs.update(self._server_config.get_client_kwargs())\n path = \"{0}/{1}\".format(\n self.path('puppetclass_ids'),\n kwargs['data'].pop('puppetclass_id')\n )\n return _handle_response(\n client.delete(path, **kwargs), self._server_config, synchronous)", "private void reloadProxiesIfNecessary(String versionsuffix) {\n\t\tReloadableType proxy = typeRegistry.cglibProxies.get(this.slashedtypename);\n\t\tif (proxy != null) {\n\t\t\tif (GlobalConfiguration.isRuntimeLogging && log.isLoggable(Level.INFO)) {\n\t\t\t\tlog.log(Level.INFO, \"Attempting reload of cglib proxy for type \" + this.slashedtypename);\n\t\t\t}\n\n\t\t\tObject[] strategyAndGeneratorPair = CglibPluginCapturing.clazzToGeneratorStrategyAndClassGeneratorMap.get(\n\t\t\t\t\tgetClazz());\n\t\t\tif (strategyAndGeneratorPair == null) {\n\t\t\t\tif (log.isLoggable(Level.SEVERE)) {\n\t\t\t\t\tlog.severe(\n\t\t\t\t\t\t\t\"Unable to find regeneration methods for cglib proxies - proxies will be out of date for type: \"\n\t\t\t\t\t\t\t\t\t+ getClazz());\n\t\t\t\t}\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tObject a = strategyAndGeneratorPair[0];\n\t\t\tObject b = strategyAndGeneratorPair[1];\n\t\t\t// want to call a.generate(b)\n\t\t\ttry {\n\t\t\t\tMethod[] ms = a.getClass().getMethods();\n\t\t\t\tMethod found = null;\n\t\t\t\tfor (Method m : ms) {\n\t\t\t\t\tif (m.getName().equals(\"generate\")) {\n\t\t\t\t\t\tfound = m;// TODO cache\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfound.setAccessible(true);\n\t\t\t\tbyte[] bs = (byte[]) found.invoke(a, b);\n\t\t\t\tif (GlobalConfiguration.isRuntimeLogging && log.isLoggable(Level.INFO)) {\n\t\t\t\t\tlog.log(Level.INFO, \"Proxy regenerate successful for \" + this.slashedtypename);\n\t\t\t\t}\n\t\t\t\tproxy.loadNewVersion(versionsuffix, bs);\n\t\t\t\tproxy.runStaticInitializer();\n\t\t\t}\n\t\t\tcatch (Throwable t) {\n\t\t\t\tt.printStackTrace();\n\t\t\t}\n\t\t}\n\n\t\tproxy = typeRegistry.cglibProxiesFastClass.get(this.slashedtypename);\n\t\tif (proxy != null) {\n\t\t\tObject[] strategyAndFCGeneratorPair = CglibPluginCapturing.clazzToGeneratorStrategyAndFastClassGeneratorMap.get(\n\t\t\t\t\tgetClazz());\n\t\t\tstrategyAndFCGeneratorPair = CglibPluginCapturing.clazzToGeneratorStrategyAndFastClassGeneratorMap.get(\n\t\t\t\t\tgetClazz());\n\t\t\t//\t\t\t\tSystem.out.println(\"need to reload fastclass \" + proxy + \" os=\" + os);\n\t\t\tif (strategyAndFCGeneratorPair != null) {\n\t\t\t\tObject a = strategyAndFCGeneratorPair[0];\n\t\t\t\tObject b = strategyAndFCGeneratorPair[1];\n\t\t\t\t// want to call a.generate(b)\n\t\t\t\ttry {\n\t\t\t\t\tMethod[] ms = a.getClass().getMethods();\n\t\t\t\t\tMethod found = null;\n\t\t\t\t\tfor (Method m : ms) {\n\t\t\t\t\t\tif (m.getName().equals(\"generate\")) {\n\t\t\t\t\t\t\tfound = m;\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tbyte[] bs = (byte[]) found.invoke(a, b);\n\t\t\t\t\tif (GlobalConfiguration.isRuntimeLogging && log.isLoggable(Level.INFO)) {\n\t\t\t\t\t\tlog.log(Level.INFO, \"Proxy (fastclass) regenerate successful for \" + this.slashedtypename);\n\t\t\t\t\t}\n\t\t\t\t\tproxy.loadNewVersion(versionsuffix, bs);\n\t\t\t\t\tproxy.runStaticInitializer();\n\t\t\t\t}\n\t\t\t\tcatch (Throwable t) {\n\t\t\t\t\tt.printStackTrace();\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\ttry {\n\t\t\tif (this.clazz.isInterface()) {\n\t\t\t\t// JDK Proxy reloading\n\t\t\t\tSet<ReloadableType> relevantProxies = typeRegistry.jdkProxiesForInterface.get(this.slashedtypename);\n\t\t\t\tif (relevantProxies != null) {\n\t\t\t\t\tfor (ReloadableType relevantProxy : relevantProxies) {\n\t\t\t\t\t\tClass<?>[] interfacesImplementedByProxy = relevantProxy.getClazz().getInterfaces();\n\t\t\t\t\t\t// TODO confirm slashedname correct\n\t\t\t\t\t\tbyte[] newProxyBytes = Utils.generateProxyClass(relevantProxy.getSlashedName(),\n\t\t\t\t\t\t\t\tinterfacesImplementedByProxy);\n\t\t\t\t\t\trelevantProxy.loadNewVersion(versionsuffix, newProxyBytes, true);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tcatch (Throwable t) {\n\t\t\tnew RuntimeException(\"Unexpected problem trying to reload proxy for interface \" + this.dottedtypename,\n\t\t\t\t\tt).printStackTrace();\n\t\t}\n\t}", "public static ClassLoader resolveClassLoaderForBeanProxy(String contextId, Class<?> proxiedType, TypeInfo typeInfo, ProxyServices proxyServices) {\n Class<?> superClass = typeInfo.getSuperClass();\n if (superClass.getName().startsWith(JAVA)) {\n ClassLoader cl = proxyServices.getClassLoader(proxiedType);\n if (cl == null) {\n cl = Thread.currentThread().getContextClassLoader();\n }\n return cl;\n }\n return Container.instance(contextId).services().get(ProxyServices.class).getClassLoader(superClass);\n }", "def _import(klass):\n '''1) Get a reference to the module\n 2) Check the file that module's imported from\n 3) If that file's been updated, force a reload of that module\n return it'''\n mod = __import__(klass.rpartition('.')[0])\n for segment in klass.split('.')[1:-1]:\n mod = getattr(mod, segment)\n\n # Alright, now check the file associated with it. Note that clases\n # defined in __main__ don't have a __file__ attribute\n if klass not in BaseJob._loaded:\n BaseJob._loaded[klass] = time.time()\n if hasattr(mod, '__file__'):\n try:\n mtime = os.stat(mod.__file__).st_mtime\n if BaseJob._loaded[klass] < mtime:\n mod = reload_module(mod)\n except OSError:\n logger.warn('Could not check modification time of %s',\n mod.__file__)\n\n return getattr(mod, klass.rpartition('.')[2])", "def loadInstance(self):\n \"\"\"\n Loads the plugin from the proxy information that was created from the\n registry file.\n \"\"\"\n if self._loaded:\n return\n\n self._loaded = True\n module_path = self.modulePath()\n\n package = projex.packageFromPath(module_path)\n path = os.path.normpath(projex.packageRootPath(module_path))\n\n if path in sys.path:\n sys.path.remove(path)\n\n sys.path.insert(0, path)\n\n try:\n __import__(package)\n\n except Exception, e:\n err = Plugin(self.name(), self.version())\n err.setError(e)\n err.setFilepath(module_path)\n\n self._instance = err\n\n self.setError(e)\n\n msg = \"%s.plugin('%s') errored loading instance from %s\"\n opts = (self.proxyClass().__name__, self.name(), module_path)\n logger.warning(msg % opts)\n logger.error(e)", "private Class<?> readNewProxyClassDesc() throws ClassNotFoundException,\n IOException {\n int count = input.readInt();\n String[] interfaceNames = new String[count];\n for (int i = 0; i < count; i++) {\n interfaceNames[i] = input.readUTF();\n }\n Class<?> proxy = resolveProxyClass(interfaceNames);\n // Consume unread class annotation data and TC_ENDBLOCKDATA\n discardData();\n return proxy;\n }", "def load(self):\n \"\"\" Function load\n Get the list of all objects\n\n @return RETURN: A ForemanItem list\n \"\"\"\n cl_tmp = self.api.list(self.objName, limit=self.searchLimit).values()\n cl = []\n for i in cl_tmp:\n cl.extend(i)\n return {x[self.index]: ItemPuppetClass(self.api, x['id'],\n self.objName, self.payloadObj,\n x)\n for x in cl}", "protected Class resolveProxyClass(String[] interfaces) throws IOException, ClassNotFoundException {\n\t\tClassLoader cl = getClass().getClassLoader();\n\t\tClass[] cinterfaces = new Class[interfaces.length];\n\n\t\tfor (int i = 0; i < interfaces.length; i++) {\n\t\t\ttry {\n\t\t\t\tcinterfaces[i] = cl.loadClass(interfaces[i]);\n\t\t\t} catch (ClassNotFoundException ex) {\n\t\t\t\tClassLoader tccl = Thread.currentThread().getContextClassLoader();\n\t\t\t\tif (tccl != null) {\n\t\t\t\t\treturn tccl.loadClass(interfaces[i]);\n\t\t\t\t} else {\n\t\t\t\t\tthrow ex;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\ttry {\n\t\t\treturn Proxy.getProxyClass(cinterfaces[0].getClassLoader(), cinterfaces);\n\t\t} catch (IllegalArgumentException e) {\n\t\t\tthrow new ClassNotFoundException(null, e);\n\t\t}\n\t}", "protected Class<?> getProxyClass() throws ClassNotFoundException {\n ClassLoader classLoader = getProxyClassLoader();\n return Class.forName(proxyClassName, false, classLoader);\n }" ]
[ 0.7606505155563354, 0.7133715152740479, 0.6375888586044312, 0.6335131525993347, 0.6314616203308105, 0.6294018030166626, 0.6293189525604248, 0.6288355588912964, 0.6247472167015076, 0.6239311695098877, 0.6210521459579468, 0.6183024048805237 ]
Return a list of templates usable by a model.
def get_templates(model): """ Return a list of templates usable by a model. """ for template_name, template in templates.items(): if issubclass(template.model, model): yield (template_name, template.layout._meta.verbose_name)
[ "def get_templates(self, action='index'):\n \"\"\"\n Utility function that provides a list of templates to try for a given\n view, when the template isn't overridden by one of the template\n attributes on the class.\n \"\"\"\n app = self.opts.app_label\n model_name = self.opts.model_name\n return [\n 'wagtailmodeladmin/%s/%s/%s.html' % (app, model_name, action),\n 'wagtailmodeladmin/%s/%s.html' % (app, action),\n 'wagtailmodeladmin/%s.html' % (action,),\n ]", "def for_model(self, model, **kwargs):\n \"\"\"\n Return layouts that are allowed for the given model.\n \"\"\"\n queryset = self.filter(\n content_types=ContentType.objects.get_for_model(model),\n **kwargs\n )\n return queryset", "def get_template_names(self):\n \"\"\"\n Return a list of template names to be used for the view.\n \"\"\"\n model_type = self.get_model_type()\n model_name = self.get_model_name()\n\n templates = [\n 'zinnia/%s/%s/entry_list.html' % (model_type, model_name),\n 'zinnia/%s/%s_entry_list.html' % (model_type, model_name),\n 'zinnia/%s/entry_list.html' % model_type,\n 'zinnia/entry_list.html']\n\n if self.template_name is not None:\n templates.insert(0, self.template_name)\n\n return templates", "def get_templates(self):\n \"\"\"\n Get list of templates this object use\n\n :return: list of templates\n :rtype: list\n \"\"\"\n use = getattr(self, 'use', '')\n if isinstance(use, list):\n return [n.strip() for n in use if n.strip()]\n\n return [n.strip() for n in use.split(',') if n.strip()]", "def get_template_names(self):\n '''\n Build the list of templates related to this user\n '''\n\n # Get user template\n template_model = getattr(self, 'template_model', \"{0}/{1}_{2}\".format(self._appname.lower(), self._modelname.lower(), self.get_template_names_key))\n template_model_ext = getattr(self, 'template_model_ext', 'html')\n templates = get_template(template_model, self.user, self.language, template_model_ext, raise_error=False)\n if type(templates) == list:\n templates.append(\"codenerix/{0}.html\".format(self.get_template_names_key))\n\n # Return thet of templates\n return templates", "def get_templates_from_publishable(name, publishable):\n \"\"\"\n Returns the same template list as `get_templates` but gets values from `Publishable` instance.\n \"\"\"\n slug = publishable.slug\n category = publishable.category\n app_label = publishable.content_type.app_label\n model_label = publishable.content_type.model\n return get_templates(name, slug, category, app_label, model_label)", "def get_templates(self, context, template_name=None):\n \" Extract parameters for `get_templates` from the context. \"\n if not template_name:\n template_name = self.template_name\n\n kw = {}\n if 'object' in context:\n o = context['object']\n kw['slug'] = o.slug\n\n if context.get('content_type', False):\n ct = context['content_type']\n kw['app_label'] = ct.app_label\n kw['model_label'] = ct.model\n\n return get_templates(template_name, category=context['category'], **kw)", "private String buildTemplates() {\n\t\tString[] templateString = new String[model.getTemplates().size()];\n\t\tfor (int i = 0; i < (model.getTemplates().size()); i++) {\n\t\t\tString templateLoaded = loadTemplateFile(\n\t\t\t\t\tmodel.getTemplates().get(i));\n\t\t\tif (templateLoaded != null) {\n\t\t\t\ttemplateString[i] = templateLoaded;\n\t\t\t}\n\t\t}\n\t\treturn implode(templateString, File.pathSeparator);\n\t}", "def models_for_pages(*args):\n \"\"\"\n Create a select list containing each of the models that subclass the\n ``Page`` model.\n \"\"\"\n from warnings import warn\n warn(\"template tag models_for_pages is deprectaed, use \"\n \"PageAdmin.get_content_models instead\")\n from yacms.pages.admin import PageAdmin\n return PageAdmin.get_content_models()", "def get_templates(self, id_or_uri, start=0, count=-1, filter='', query='', sort=''):\n \"\"\"\n Gets a list of volume templates. Returns a list of storage templates belonging to the storage system.\n\n Returns:\n list: Storage Template List.\n \"\"\"\n uri = self._client.build_uri(id_or_uri) + \"/templates\"\n return self._client.get(self._client.build_query_uri(start=start, count=count, filter=filter,\n query=query, sort=sort, uri=uri))", "def templates\n extracted_templates = []\n\n unless self.templates_list.nil? \n self.templates_list.each do |template|\n extracted_templates.push(Template.new(template))\n end\n end\n\n return extracted_templates \n end", "def applicable_models(self):\r\n \"\"\"\r\n Returns a list of model classes that subclass Page\r\n and include a \"tags\" field.\r\n\r\n :rtype: list.\r\n \"\"\"\r\n Page = apps.get_model('wagtailcore', 'Page')\r\n applicable = []\r\n\r\n for model in apps.get_models():\r\n meta = getattr(model, '_meta')\r\n fields = meta.get_all_field_names()\r\n\r\n if issubclass(model, Page) and 'tags' in fields:\r\n applicable.append(model)\r\n\r\n return applicable" ]
[ 0.78190678358078, 0.7594239115715027, 0.7541986107826233, 0.7524802684783936, 0.7364693284034729, 0.733937680721283, 0.7246741056442261, 0.7128293514251709, 0.7096962332725525, 0.7047660946846008, 0.7018197774887085, 0.69899982213974 ]
Registers the given layout(s) classes admin site: @pages.register(Page) class Default(PageLayout): pass
def attach(*layouts, **kwargs): """ Registers the given layout(s) classes admin site: @pages.register(Page) class Default(PageLayout): pass """ def _model_admin_wrapper(layout_class): register(layout_class, layouts[0]) return layout_class return _model_admin_wrapper
[ "def register():\n \"\"\" Register markdown for flatpages. \"\"\"\n\n admin.site.unregister(FlatPage)\n admin.site.register(FlatPage, LocalFlatPageAdmin)", "def register_classes_for_admin(db_session, show_pks=True, name='admin'):\n \"\"\"Registers classes for the Admin view that ultimately creates the admin\n interface.\n\n :param db_session: handle to database session\n :param list classes: list of classes to register with the admin\n :param bool show_pks: show primary key columns in the admin?\n\n \"\"\"\n\n with app.app_context():\n admin_view = Admin(current_app, name=name)\n for cls in set(\n cls for cls in current_app.class_references.values() if\n cls.use_admin):\n column_list = [column.name for column in\n cls.__table__.columns.values()]\n if hasattr(cls, '__view__'):\n # allow ability for model classes to specify model views\n admin_view_class = type(\n 'AdminView',\n (cls.__view__,),\n {'form_columns': column_list})\n elif show_pks:\n # the default of Flask-SQLAlchemy is to not show primary\n # classes, which obviously isn't acceptable in some cases\n admin_view_class = type(\n 'AdminView',\n (AdminModelViewWithPK,),\n {'form_columns': column_list})\n else:\n admin_view_class = ModelView\n admin_view.add_view(admin_view_class(cls, db_session))", "def register(model, admin=None, category=None):\n \"\"\" Decorator to registering you Admin class. \"\"\"\n def _model_admin_wrapper(admin_class):\n\n site.register(model, admin_class=admin_class)\n\n if category:\n site.register_block(model, category)\n\n return admin_class\n return _model_admin_wrapper", "def _register_admin(admin_site, model, admin_class):\n \"\"\" Register model in the admin, ignoring any previously registered models.\n Alternatively it could be used in the future to replace a previously \n registered model.\n \"\"\"\n try:\n admin_site.register(model, admin_class)\n except admin.sites.AlreadyRegistered:\n pass", "def register_seo_admin(admin_site, metadata_class):\n \"\"\"Register the backends specified in Meta.backends with the admin.\"\"\"\n\n if metadata_class._meta.use_sites:\n path_admin = SitePathMetadataAdmin\n model_instance_admin = SiteModelInstanceMetadataAdmin\n model_admin = SiteModelMetadataAdmin\n view_admin = SiteViewMetadataAdmin\n else:\n path_admin = PathMetadataAdmin\n model_instance_admin = ModelInstanceMetadataAdmin\n model_admin = ModelMetadataAdmin\n view_admin = ViewMetadataAdmin\n\n def get_list_display():\n return tuple(\n name for name, obj in metadata_class._meta.elements.items()\n if obj.editable)\n\n backends = metadata_class._meta.backends\n\n if 'model' in backends:\n class ModelAdmin(model_admin):\n form = get_model_form(metadata_class)\n list_display = model_admin.list_display + get_list_display()\n\n _register_admin(admin_site, metadata_class._meta.get_model('model'),\n ModelAdmin)\n\n if 'view' in backends:\n class ViewAdmin(view_admin):\n form = get_view_form(metadata_class)\n list_display = view_admin.list_display + get_list_display()\n\n _register_admin(admin_site, metadata_class._meta.get_model('view'),\n ViewAdmin)\n\n if 'path' in backends:\n class PathAdmin(path_admin):\n form = get_path_form(metadata_class)\n list_display = path_admin.list_display + get_list_display()\n\n _register_admin(admin_site, metadata_class._meta.get_model('path'),\n PathAdmin)\n\n if 'modelinstance' in backends:\n class ModelInstanceAdmin(model_instance_admin):\n form = get_modelinstance_form(metadata_class)\n list_display = (model_instance_admin.list_display +\n get_list_display())\n\n _register_admin(admin_site,\n metadata_class._meta.get_model('modelinstance'),\n ModelInstanceAdmin)", "def register(conf, conf_admin, **options):\n \"\"\"\n Register a new admin section.\n\n :param conf: A subclass of ``djconfig.admin.Config``\n :param conf_admin: A subclass of ``djconfig.admin.ConfigAdmin``\n :param options: Extra options passed to ``django.contrib.admin.site.register``\n \"\"\"\n assert issubclass(conf_admin, ConfigAdmin), (\n 'conf_admin is not a ConfigAdmin subclass')\n assert issubclass(\n getattr(conf_admin, 'change_list_form', None),\n ConfigForm), 'No change_list_form set'\n assert issubclass(conf, Config), (\n 'conf is not a Config subclass')\n assert conf.app_label, 'No app_label set'\n assert conf.verbose_name_plural, 'No verbose_name_plural set'\n assert not conf.name or re.match(r\"^[a-zA-Z_]+$\", conf.name), (\n 'Not a valid name. Valid chars are [a-zA-Z_]')\n config_class = type(\"Config\", (), {})\n config_class._meta = type(\"Meta\", (_ConfigMeta,), {\n 'app_label': conf.app_label,\n 'verbose_name_plural': conf.verbose_name_plural,\n 'object_name': 'Config',\n 'model_name': conf.name,\n 'module_name': conf.name})\n admin.site.register([config_class], conf_admin, **options)", "def add_view(self, request, form_url='', extra_context=None):\n \"\"\"The ``add`` admin view for the :class:`Page <pages.models.Page>`.\"\"\"\n extra_context = {\n 'language': get_language_from_request(request),\n 'page_languages': settings.PAGE_LANGUAGES,\n }\n return super(PageAdmin, self).add_view(request, form_url,\n extra_context)", "public function register()\n {\n $this->app['actions']->listen('admin_menu', function () {\n foreach ($this->adminControllers as $adminController) {\n /** @var AdminPageController $adminController */\n $adminController = $this->app->make($adminController);\n\n $menuTitle = $adminController->getMenuTitle();\n\n add_menu_page(\n $adminController->getTitle(),\n !empty($menuTitle) ?: $adminController->getTitle(),\n 'manage_options',\n $adminController->getSlug(),\n [$adminController, '_render'],\n $adminController->getIcon(),\n $adminController->getPosition()\n );\n }\n });\n }", "def register_view(self, view_class, *args, **kwargs):\n \"\"\"Register an admin view on this admin instance.\n\n :param view_class: The view class name passed to the view factory.\n :param args: Positional arugments for view class.\n :param kwargs: Keyword arguments to view class.\n \"\"\"\n protected_view_class = self.view_class_factory(view_class)\n if 'endpoint' not in kwargs:\n kwargs['endpoint'] = view_class(*args, **kwargs).endpoint\n self.admin.add_view(protected_view_class(*args, **kwargs))", "public function register() {\r\n include __DIR__.'/../routes.php';\r\n $config = __DIR__ . '/../config/config.php';\r\n $this->mergeConfigFrom($config, 'larakit.lk-adminlte');\r\n $this->publishes([\r\n $config => config_path('larakit/lk-adminlte/config.php'),\r\n ]);\r\n $this->loadViewsFrom(__DIR__.'/../views', 'lk-adminlte');\r\n if(\\Request::is('admin*')){\r\n \\LaraPage::body()->addClass(config('larakit.lk-adminlte.body_class'));\r\n }\r\n }", "def auto_register_inlines(admin_site, metadata_class):\n \"\"\" This is a questionable function that automatically adds our metadata\n inline to all relevant models in the site. \n \"\"\"\n inline_class = get_inline(metadata_class)\n\n for model, admin_class_instance in admin_site._registry.items():\n _monkey_inline(model, admin_class_instance, metadata_class, inline_class, admin_site)\n\n # Monkey patch the register method to automatically add an inline for this site.\n # _with_inline() is a decorator that wraps the register function with the same injection code\n # used above (_monkey_inline).\n admin_site.register = _with_inline(admin_site.register, admin_site, metadata_class, inline_class)", "def action(cls, view):\n \"\"\"Register admin view action.\"\"\"\n name = \"%s:%s\" % (cls.name, view.__name__)\n path = \"%s/%s\" % (cls.url, view.__name__)\n cls.actions.append((view.__doc__, path))\n return cls.register(path, name=name)(view)" ]
[ 0.7355599999427795, 0.7345627546310425, 0.7286189198493958, 0.724654495716095, 0.6845201253890991, 0.6805652976036072, 0.6734296083450317, 0.6697297096252441, 0.6669802069664001, 0.6662369966506958, 0.6658376455307007, 0.6657775044441223 ]
Function enhance Enhance the object with new item or enhanced items
def enhance(self): """ Function enhance Enhance the object with new item or enhanced items """ self.update({'os_default_templates': SubDict(self.api, self.objName, self.payloadObj, self.key, SubItemOsDefaultTemplate)}) self.update({'config_templates': SubDict(self.api, self.objName, self.payloadObj, self.key, SubItemConfigTemplate)}) self.update({'ptables': SubDict(self.api, self.objName, self.payloadObj, self.key, SubItemPTable)}) self.update({'media': SubDict(self.api, self.objName, self.payloadObj, self.key, SubItemMedia)}) self.update({'architectures': SubDict(self.api, self.objName, self.payloadObj, self.key, SubItemArchitecture)})
[ "def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemParameter)})\n self.update({'interfaces':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemInterface)})\n self.update({'subnets':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemSubnet)})", "def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'images':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemImages)})", "def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'os_default_templates':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemOsDefaultTemplate)})\n self.update({'operatingsystems':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemOperatingSystem)})", "def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'puppetclasses':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemPuppetClasses)})\n self.update({'parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemParameter)})\n self.update({'interfaces':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemInterface)})\n self.update({'smart_class_parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemSmartClassParameter)})", "def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'puppetclasses':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemPuppetClasses)})\n self.update({'parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemParameter)})\n self.update({'smart_class_parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n ItemSmartClassParameter)})", "def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n if self.objName in ['hosts', 'hostgroups',\n 'puppet_classes']:\n from foreman.itemSmartClassParameter\\\n import ItemSmartClassParameter\n self.update({'smart_class_parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n ItemSmartClassParameter)})", "public T enhance(T t) {\n if (!needsEnhancement(t)) {\n return t;\n }\n\n try {\n return getEnhancedClass().getConstructor(baseClass).newInstance(t);\n } catch (Exception e) {\n throw new RuntimeException(String.format(\"Could not enhance object %s (%s)\", t, t.getClass()), e);\n }\n }", "private static void doEnhancement(CtClass cc, Version modelVersion) throws CannotCompileException,\n NotFoundException, ClassNotFoundException {\n CtClass inter = cp.get(OpenEngSBModel.class.getName());\n cc.addInterface(inter);\n addFields(cc);\n addGetOpenEngSBModelTail(cc);\n addSetOpenEngSBModelTail(cc);\n addRetrieveModelName(cc);\n addRetrieveModelVersion(cc, modelVersion);\n addOpenEngSBModelEntryMethod(cc);\n addRemoveOpenEngSBModelEntryMethod(cc);\n addRetrieveInternalModelId(cc);\n addRetrieveInternalModelTimestamp(cc);\n addRetrieveInternalModelVersion(cc);\n addToOpenEngSBModelValues(cc);\n addToOpenEngSBModelEntries(cc);\n cc.setModifiers(cc.getModifiers() & ~Modifier.ABSTRACT);\n }", "def enhance(self, inverse=False, gamma=1.0, stretch=\"no\",\n stretch_parameters=None, **kwargs):\n \"\"\"Image enhancement function. It applies **in this order** inversion,\n gamma correction, and stretching to the current image, with parameters\n *inverse* (see :meth:`Image.invert`), *gamma* (see\n :meth:`Image.gamma`), and *stretch* (see :meth:`Image.stretch`).\n \"\"\"\n self.invert(inverse)\n if stretch_parameters is None:\n stretch_parameters = {}\n\n stretch_parameters.update(kwargs)\n self.stretch(stretch, **stretch_parameters)\n self.gamma(gamma)", "def load(self, data):\n \"\"\" Function load\n Store the object data\n \"\"\"\n self.clear()\n self.update(data)\n self.enhance()", "def enhance2dataset(dset):\n \"\"\"Apply enhancements to dataset *dset* and return the resulting data\n array of the image.\"\"\"\n attrs = dset.attrs\n img = get_enhanced_image(dset)\n # Clip image data to interval [0.0, 1.0]\n data = img.data.clip(0.0, 1.0)\n data.attrs = attrs\n\n return data", "function enhancedEcommerceProductAction(track, action, data) {\n enhancedEcommerceTrackProduct(track);\n window.ga('ec:setAction', action, data || {});\n}" ]
[ 0.8729672431945801, 0.8696154356002808, 0.8695272207260132, 0.8679497838020325, 0.8630505800247192, 0.8407313823699951, 0.7447202205657959, 0.7079142928123474, 0.707258939743042, 0.6839107275009155, 0.6810131072998047, 0.6799886226654053 ]
Get required API keys from environment variables.
def get_api_envs(): """Get required API keys from environment variables.""" client_id = os.environ.get('CLIENT_ID') user_id = os.environ.get('USER_ID') if not client_id or not user_id: raise ValueError('API keys are not found in the environment') return client_id, user_id
[ "def get_keys(self):\n \"\"\"\n Get the Twitter API keys. Order of precedence is command line,\n environment, config file. Return True if all the keys were found\n and False if not.\n \"\"\"\n env = os.environ.get\n if not self.consumer_key:\n self.consumer_key = env('CONSUMER_KEY')\n if not self.consumer_secret:\n self.consumer_secret = env('CONSUMER_SECRET')\n if not self.access_token:\n self.access_token = env('ACCESS_TOKEN')\n if not self.access_token_secret:\n self.access_token_secret = env('ACCESS_TOKEN_SECRET')\n\n if self.config and not (self.consumer_key and\n self.consumer_secret and\n self.access_token and\n self.access_token_secret):\n self.load_config()", "def get(cls):\n \"\"\"Get the current API key.\n if one has not been given via 'set' the env var STEAMODD_API_KEY will\n be checked instead.\n \"\"\"\n apikey = cls.__api_key or cls.__api_key_env_var\n\n if apikey:\n return apikey\n else:\n raise APIKeyMissingError(\"API key not set\")", "def _get_environ(environ):\n # type: (Dict[str, str]) -> Iterator[Tuple[str, str]]\n \"\"\"\n Returns our whitelisted environment variables.\n \"\"\"\n keys = [\"SERVER_NAME\", \"SERVER_PORT\"]\n if _should_send_default_pii():\n # Add all three headers here to make debugging of proxy setup easier.\n keys += [\"REMOTE_ADDR\", \"HTTP_X_FORWARDED_FOR\", \"HTTP_X_REAL_IP\"]\n\n for key in keys:\n if key in environ:\n yield key, environ[key]", "def get(self, request, bot_id, format=None):\n \"\"\"\n Get list of environment variables\n ---\n serializer: EnvironmentVarSerializer\n responseMessages:\n - code: 401\n message: Not authenticated\n \"\"\"\n return super(EnvironmentVarList, self).get(request, bot_id, format)", "def _required_envs(env_vars):\n \"\"\"\n Parse environment variables for required values,\n raising a `BrowserConfig` error if they are not found.\n\n Returns a `dict` of environment variables.\n \"\"\"\n envs = {\n key: os.environ.get(key)\n for key in env_vars\n }\n\n # Check for missing keys\n missing = [key for key, val in list(envs.items()) if val is None]\n if missing:\n msg = (\n u\"These environment variables must be set: \" + u\", \".join(missing)\n )\n raise BrowserConfigError(msg)\n\n # Check that we support this browser\n if envs['SELENIUM_BROWSER'] not in BROWSERS:\n msg = u\"Unsuppported browser: {0}\".format(envs['SELENIUM_BROWSER'])\n raise BrowserConfigError(msg)\n\n return envs", "def required_from_env(key):\n \"\"\"\n Retrieve a required variable from the current environment variables.\n\n Raises a ValueError if the env variable is not found or has no value.\n\n \"\"\"\n val = os.environ.get(key)\n if not val:\n raise ValueError(\n \"Required argument '{}' not supplied and not found in environment variables\".format(key))\n return val", "def get_environment_vars(filename):\n \"\"\"Return a dict of environment variables required to run a service under faketime.\"\"\"\n if sys.platform == \"linux\" or sys.platform == \"linux2\":\n return {\n 'LD_PRELOAD': path.join(LIBFAKETIME_DIR, \"libfaketime.so.1\"),\n 'FAKETIME_SKIP_CMDS': 'nodejs', # node doesn't seem to work in the current version.\n 'FAKETIME_TIMESTAMP_FILE': filename,\n }\n elif sys.platform == \"darwin\":\n return {\n 'DYLD_INSERT_LIBRARIES': path.join(LIBFAKETIME_DIR, \"libfaketime.1.dylib\"),\n 'DYLD_FORCE_FLAT_NAMESPACE': '1',\n 'FAKETIME_TIMESTAMP_FILE': filename,\n }\n else:\n raise RuntimeError(\"libfaketime does not support '{}' platform\".format(sys.platform))", "def get_credentials_from_env():\n \"\"\"Get credentials from environment variables.\n\n Preference of credentials is:\n - No credentials if DATASTORE_EMULATOR_HOST is set.\n - Google APIs Signed JWT credentials based on\n DATASTORE_SERVICE_ACCOUNT and DATASTORE_PRIVATE_KEY_FILE\n environments variables\n - Google Application Default\n https://developers.google.com/identity/protocols/application-default-credentials\n\n Returns:\n credentials or None.\n\n \"\"\"\n if os.getenv(_DATASTORE_USE_STUB_CREDENTIAL_FOR_TEST_ENV):\n logging.info('connecting without credentials because %s is set.',\n _DATASTORE_USE_STUB_CREDENTIAL_FOR_TEST_ENV)\n return None\n if os.getenv(_DATASTORE_EMULATOR_HOST_ENV):\n logging.info('connecting without credentials because %s is set.',\n _DATASTORE_EMULATOR_HOST_ENV)\n return None\n if (os.getenv(_DATASTORE_SERVICE_ACCOUNT_ENV)\n and os.getenv(_DATASTORE_PRIVATE_KEY_FILE_ENV)):\n with open(os.getenv(_DATASTORE_PRIVATE_KEY_FILE_ENV), 'rb') as f:\n key = f.read()\n credentials = client.SignedJwtAssertionCredentials(\n os.getenv(_DATASTORE_SERVICE_ACCOUNT_ENV), key, SCOPE)\n logging.info('connecting using private key file.')\n return credentials\n try:\n credentials = client.GoogleCredentials.get_application_default()\n credentials = credentials.create_scoped(SCOPE)\n logging.info('connecting using Google Application Default Credentials.')\n return credentials\n except client.ApplicationDefaultCredentialsError, e:\n logging.error('Unable to find any credentials to use. '\n 'If you are running locally, make sure to set the '\n '%s environment variable.', _DATASTORE_EMULATOR_HOST_ENV)\n raise e", "def get_eargs():\n \"\"\" Look for options in environment vars \"\"\"\n\n settings = {}\n\n zmq = os.environ.get(\"ZMQ_PREFIX\", None)\n if zmq is not None:\n debug(\"Found environ var ZMQ_PREFIX=%s\" % zmq)\n settings['zmq_prefix'] = zmq\n\n return settings", "def get_environ(keys):\n \"\"\"\n Get environment variables from :data:`os.environ`.\n\n :type keys: [str]\n :rtype: dict\n\n Some additional features.\n\n * If 'HOST' is not in :data:`os.environ`, this function\n automatically fetch it using :meth:`platform.node`.\n * If 'TTY' is not in :data:`os.environ`, this function\n automatically fetch it using :meth:`os.ttyname`.\n * Set 'RASH_SPENV_TERMINAL' if needed.\n\n \"\"\"\n items = ((k, os.environ.get(k)) for k in keys)\n subenv = dict((k, v) for (k, v) in items if v is not None)\n needset = lambda k: k in keys and not subenv.get(k)\n\n def setifnonempty(key, value):\n if value:\n subenv[key] = value\n\n if needset('HOST'):\n import platform\n subenv['HOST'] = platform.node()\n if needset('TTY'):\n setifnonempty('TTY', get_tty())\n if needset('RASH_SPENV_TERMINAL'):\n from .utils.termdetection import detect_terminal\n setifnonempty('RASH_SPENV_TERMINAL', detect_terminal())\n return subenv", "def __get_user_env_vars(self):\r\n \"\"\"Return the user defined environment variables\"\"\"\r\n return (os.environ.get(self.GP_URL_ENV_VAR),\r\n os.environ.get(self.GP_INSTANCE_ID_ENV_VAR),\r\n os.environ.get(self.GP_USER_ID_ENV_VAR),\r\n os.environ.get(self.GP_PASSWORD_ENV_VAR),\r\n os.environ.get(self.GP_IAM_API_KEY_ENV_VAR))", "def _ensure_api_keys(task_desc, failure_ret=None):\n \"\"\"Wrap Elsevier methods which directly use the API keys.\n\n Ensure that the keys are retrieved from the environment or config file when\n first called, and store global scope. Subsequently use globally stashed\n results and check for required ids.\n \"\"\"\n def check_func_wrapper(func):\n @wraps(func)\n def check_api_keys(*args, **kwargs):\n global ELSEVIER_KEYS\n if ELSEVIER_KEYS is None:\n ELSEVIER_KEYS = {}\n # Try to read in Elsevier API keys. For each key, first check\n # the environment variables, then check the INDRA config file.\n if not has_config(INST_KEY_ENV_NAME):\n logger.warning('Institution API key %s not found in config '\n 'file or environment variable: this will '\n 'limit access for %s'\n % (INST_KEY_ENV_NAME, task_desc))\n ELSEVIER_KEYS['X-ELS-Insttoken'] = get_config(INST_KEY_ENV_NAME)\n\n if not has_config(API_KEY_ENV_NAME):\n logger.error('API key %s not found in configuration file '\n 'or environment variable: cannot %s'\n % (API_KEY_ENV_NAME, task_desc))\n return failure_ret\n ELSEVIER_KEYS['X-ELS-APIKey'] = get_config(API_KEY_ENV_NAME)\n elif 'X-ELS-APIKey' not in ELSEVIER_KEYS.keys():\n logger.error('No Elsevier API key %s found: cannot %s'\n % (API_KEY_ENV_NAME, task_desc))\n return failure_ret\n return func(*args, **kwargs)\n return check_api_keys\n return check_func_wrapper" ]
[ 0.7456530332565308, 0.7439531087875366, 0.7352682948112488, 0.7221922874450684, 0.7217668294906616, 0.7119734287261963, 0.7052922248840332, 0.7042791843414307, 0.7039889097213745, 0.701678454875946, 0.6990089416503906, 0.6967628002166748 ]
Call given API end_point with API keys. :param method: HTTP method (e.g. 'get', 'delete'). :param end_point: API endpoint (e.g. 'users/john/sets'). :param params: Dictionary to be sent in the query string (e.g. {'myparam': 'myval'}) :param client_id: Quizlet client ID as string. :param access_token: Quizlet access token as string. client_id and access_token are mutually exclusive but mandatory.
def api_call(method, end_point, params=None, client_id=None, access_token=None): """Call given API end_point with API keys. :param method: HTTP method (e.g. 'get', 'delete'). :param end_point: API endpoint (e.g. 'users/john/sets'). :param params: Dictionary to be sent in the query string (e.g. {'myparam': 'myval'}) :param client_id: Quizlet client ID as string. :param access_token: Quizlet access token as string. client_id and access_token are mutually exclusive but mandatory. """ if bool(client_id) == bool(access_token): raise ValueError('Either client_id or access_token') url = 'https://api.quizlet.com/2.0/{}'.format(end_point) if not params: params = {} if client_id: params['client_id'] = client_id headers = {'Authorization': 'Bearer {}'.format(access_token)} if access_token else None response = requests.request(method, url, params=params, headers=headers) if int(response.status_code / 100) != 2: error_title = '' try: error_title += ', ' + response.json()['error_title'] except ValueError: pass except KeyError: pass raise ValueError( '{} returned {}{}'.format(url, response.status_code, error_title) ) try: return response.json() except json.decoder.JSONDecodeError: pass
[ "async def call_async(self, method, **parameters):\n \"\"\"Makes an async call to the API.\n\n :param method: The method name.\n :param params: Additional parameters to send (for example, search=dict(id='b123') )\n :return: The JSON result (decoded into a dict) from the server.abs\n :raise MyGeotabException: Raises when an exception occurs on the MyGeotab server.\n :raise TimeoutException: Raises when the request does not respond after some time.\n \"\"\"\n if method is None:\n raise Exception('A method name must be specified')\n params = api.process_parameters(parameters)\n if self.credentials and not self.credentials.session_id:\n self.authenticate()\n if 'credentials' not in params and self.credentials.session_id:\n params['credentials'] = self.credentials.get_param()\n\n try:\n result = await _query(self._server, method, params,\n verify_ssl=self._is_verify_ssl, loop=self.loop)\n if result is not None:\n self.__reauthorize_count = 0\n return result\n except MyGeotabException as exception:\n if exception.name == 'InvalidUserException':\n if self.__reauthorize_count == 0 and self.credentials.password:\n self.__reauthorize_count += 1\n self.authenticate()\n return await self.call_async(method, **parameters)\n else:\n raise AuthenticationException(self.credentials.username,\n self.credentials.database,\n self.credentials.server)\n raise", "def _api_call(self, endpoint, model=None, method=None, data=None, filters=None):\n \"\"\"\n Makes a call to the linode api. Data should only be given if the method is\n POST or PUT, and should be a dictionary\n \"\"\"\n if not self.token:\n raise RuntimeError(\"You do not have an API token!\")\n\n if not method:\n raise ValueError(\"Method is required for API calls!\")\n\n if model:\n endpoint = endpoint.format(**vars(model))\n url = '{}{}'.format(self.base_url, endpoint)\n headers = {\n 'Authorization': \"Bearer {}\".format(self.token),\n 'Content-Type': 'application/json',\n 'User-Agent': self._user_agent,\n }\n\n if filters:\n headers['X-Filter'] = json.dumps(filters)\n\n body = None\n if data is not None:\n body = json.dumps(data)\n\n response = method(url, headers=headers, data=body)\n\n warning = response.headers.get('Warning', None)\n if warning:\n logger.warning('Received warning from server: {}'.format(warning))\n\n if 399 < response.status_code < 600:\n j = None\n error_msg = '{}: '.format(response.status_code)\n try:\n j = response.json()\n if 'errors' in j.keys():\n for e in j['errors']:\n error_msg += '{}; '.format(e['reason']) \\\n if 'reason' in e.keys() else ''\n except:\n pass\n raise ApiError(error_msg, status=response.status_code, json=j)\n\n if response.status_code != 204:\n j = response.json()\n else:\n j = None # handle no response body\n\n return j", "def run_query(method, params, **kwargs):\n '''\n Send Zabbix API call\n\n Args:\n method: actual operation to perform via the API\n params: parameters required for specific method\n\n optional kwargs:\n _connection_user: zabbix user (can also be set in opts or pillar, see module's docstring)\n _connection_password: zabbix password (can also be set in opts or pillar, see module's docstring)\n _connection_url: url of zabbix frontend (can also be set in opts or pillar, see module's docstring)\n\n all optional template.get parameters: keyword argument names depends on your zabbix version, see:\n\n https://www.zabbix.com/documentation/2.4/manual/api/reference/\n\n Returns:\n Response from Zabbix API\n\n CLI Example:\n .. code-block:: bash\n\n salt '*' zabbix.run_query proxy.create '{\"host\": \"zabbixproxy.domain.com\", \"status\": \"5\"}'\n '''\n conn_args = _login(**kwargs)\n ret = {}\n try:\n if conn_args:\n method = method\n params = params\n params = _params_extend(params, **kwargs)\n ret = _query(method, params, conn_args['url'], conn_args['auth'])\n if isinstance(ret['result'], bool):\n return ret['result']\n return ret['result'] if ret['result'] else False\n else:\n raise KeyError\n except KeyError:\n return ret", "def authenticated_request(self, endpoint, method='GET', params=None, data=None):\n '''\n Send a request to the given Wunderlist API with 'X-Access-Token' and 'X-Client-ID' headers and ensure the response code is as expected given the request type\n\n Params:\n endpoint -- API endpoint to send request to\n\n Keyword Args:\n method -- GET, PUT, PATCH, DELETE, etc.\n params -- parameters to encode in the request\n data -- data to send with the request\n '''\n headers = {\n 'X-Access-Token' : self.access_token,\n 'X-Client-ID' : self.client_id\n }\n return self.api.request(endpoint, method=method, headers=headers, params=params, data=data)", "def api_call(method, endpoint, params={})\n # add api token to params\n params.merge!({ :token => self.token })\n\n # dispatch to the right method, with the full path (/api/v2 + endpoint)\n request = self.send(\"format_#{method}\", \"#{@url.path}/#{endpoint}\", params)\n response = @http.request(request)\n \n # Possible Responses\n #\n # 200 - (OK) The request was received successfully.\n # 400 - (Bad Request) There was a problem with your request parameters. Check\n # the error field of the response object for info on what was wrong.\n # 401 - (Unauthorized) Your API token in the token parameter was either missing\n # or incorrect.\n # 404 - (Not Found) The API method was not found. Check yor request URL for\n # typos.\n # 405 - (Method Not Allowed) An incorrect HTTP verb (i.e. GET, POST, etc) was\n # used for the request\n # 5XX - There was an error Crocodoc could not recover from. We are generally\n # notified of these automatically. If they are repeatedly received, you\n # should contact Crocodoc Support.\n\n unless response.code == '200'\n raise Crocodoc::Error, \"HTTP Error #{response.code}: #{response.body}\"\n end\n response.body\n end", "def request(self, endpoint, method=\"GET\", params=None):\n \"\"\"Return dict of response received from Safecast's API\n :param endpoint: (required) Full url or Safecast API endpoint\n (e.g. measurements/users)\n :type endpoint: string\n :param method: (optional) Method of accessing data, either\n GET, POST, PUT or DELETE. (default GET)\n :type method: string\n :param params: (optional) Dict of parameters (if any) accepted\n the by Safecast API endpoint you are trying to\n access (default None)\n :type params: dict or None\n :rtype: dict\n \"\"\"\n\n # In case they want to pass a full Safecast URL\n # i.e. https://api.safecast.org/measurements.json\n if endpoint.startswith(\"http\"):\n url = endpoint\n else:\n url = \"%s/%s.json\" % (self.api_url, endpoint)\n\n if method != \"GET\":\n if self.api_key is None:\n raise SafecastPyAuthError(\"Require an api_key\")\n url = url + \"?api_key={0}\".format(self.api_key)\n\n content = self._request(url, method=method, params=params, api_call=url)\n return content", "def access_token_endpoint(request):\n \"\"\" Generates :py:class:`djoauth2.models.AccessTokens` if provided with\n sufficient authorization.\n\n This endpoint only supports two grant types:\n * ``authorization_code``: http://tools.ietf.org/html/rfc6749#section-4.1\n * ``refresh_token``: http://tools.ietf.org/html/rfc6749#section-6\n\n For further documentation, read http://tools.ietf.org/html/rfc6749#section-3.2\n \"\"\"\n # TODO(peter): somehow implement the anti-brute-force requirement specified\n # by http://tools.ietf.org/html/rfc6749#section-2.3.1 :\n #\n # Since this client authentication method involves a password, the\n # authorization server MUST protect any endpoint utilizing it against\n # brute force attacks.\n #\n\n try:\n # From http://tools.ietf.org/html/rfc6749#section-3.2 :\n #\n # Since requests to the token endpoint result in the transmission of\n # clear-text credentials (in the HTTP request and response), the\n # authorization server MUST require the use of TLS as described in\n # Section 1.6 when sending requests to the token endpoint.\n #\n if settings.DJOAUTH2_SSL_ONLY and not request.is_secure():\n raise InvalidRequest('all token requests must use TLS')\n\n # From http://tools.ietf.org/html/rfc6749#section-3.2 :\n #\n # The client MUST use the HTTP \"POST\" method when making access token\n # requests.\n #\n if not request.method == 'POST':\n raise InvalidRequest('all posts must use POST')\n\n client_id = None\n client_secret = None\n\n # Allow client Authentication via HTTP Basic Authentication (\n # http://tools.ietf.org/html/rfc2617#section-2 ) as described by\n # http://tools.ietf.org/html/rfc6749#section-2.3.1 :\n #\n # Clients in possession of a client password MAY use the HTTP Basic\n # authentication scheme as defined in [RFC2617] to authenticate with\n # the authorization server. The client identifier is encoded using the\n # \"application/x-www-form-urlencoded\" encoding algorithm per Appendix\n # B, and the encoded value is used as the username; the client password\n # is encoded using the same algorithm and used as the password. The\n # authorization server MUST support the HTTP Basic authentication\n # scheme for authenticating clients that were issued a client password.\n #\n # by accepting an 'Authorization' header like so:\n #\n # Authorization: Basic czZCaGRSa3F0Mzo3RmpmcDBaQnIxS3REUmJuZlZkbUl3\n #\n # where 'czZCaGRSa3F0Mzo3RmpmcDBaQnIxS3REUmJuZlZkbUl3' is the result of\n #\n # base64encode('{client_id}:{client_secret}')\n #\n if 'HTTP_AUTHORIZATION' in request.META:\n try:\n http_authorization = request.META.get('HTTP_AUTHORIZATION', '')\n auth_method, auth_value = http_authorization.strip().split(' ', 1)\n except ValueError:\n raise InvalidRequest('malformed HTTP_AUTHORIZATION header')\n\n if not auth_method == 'Basic':\n raise InvalidRequest('unsupported HTTP_AUTHORIZATION method')\n\n try:\n client_id, client_secret = b64decode(auth_value).split(':')\n except (TypeError, ValueError):\n raise InvalidRequest('malformed HTTP_AUTHORIZATION value')\n\n\n # The 'client_id' and 'client_secret' parameters MUST NOT be included in\n # the request URI (GET parameters), as specified by\n # http://tools.ietf.org/html/rfc6749#section-2.3.1 :\n #\n # The parameters can only be transmitted in the request-body and MUST\n # NOT be included in the request URI.\n #\n if 'client_id' in request.GET or 'client_secret' in request.GET:\n raise InvalidRequest(\n 'must not include \"client_id\" or \"client_secret\" in request URI')\n\n\n # Allow Clients to authenticate via POST request data, as specified by\n # http://tools.ietf.org/html/rfc6749#section-3.2.1 :\n #\n # A client MAY use the \"client_id\" request parameter to identify itself\n # when sending requests to the token endpoint. In the\n # \"authorization_code\" \"grant_type\" request to the token endpoint, an\n # unauthenticated client MUST send its \"client_id\" to prevent itself\n # from inadvertently accepting a code intended for a client with a\n # different \"client_id\". This protects the client from substitution of\n # the authentication code. (It provides no additional security for the\n # protected resource.)\n #\n # Please note that this is NOT RECOMMENDED, and that the client should\n # instead authenticate via the HTTP_AUTHORIZATION header -- see\n # http://tools.ietf.org/html/rfc6749#section-2.3.1 :\n #\n # Alternatively, the authorization server MAY support including the\n # client credentials in the request-body using the following parameters:\n #\n # client_id\n # REQUIRED. The client identifier issued to the client during\n # the registration process described by Section 2.2.\n #\n # client_secret\n # REQUIRED. The client secret. The client MAY omit the\n # parameter if the client secret is an empty string.\n #\n # Including the client credentials in the request-body using the two\n # parameters is NOT RECOMMENDED and SHOULD be limited to clients unable\n # to directly utilize the HTTP Basic authentication scheme (or other\n # password-based HTTP authentication schemes). The parameters can only\n # be transmitted in the request-body and MUST NOT be included in the\n # request URI.\n #\n # In the case that the Client has already authenticated with the\n # HTTP_AUTHORIZATION method, ensure that they do not also attempt to\n # authenticate via POST data, as required by\n # http://tools.ietf.org/html/rfc6749#section-2.3 :\n #\n # The client MUST NOT use more than one authentication method in each\n # request.\n #\n if client_id and client_secret:\n if 'client_id' in request.POST or 'client_secret' in request.POST:\n raise InvalidRequest('must use only one authentication method')\n else:\n client_id = request.POST.get('client_id')\n client_secret = request.POST.get('client_secret')\n\n if not (client_id and client_secret):\n raise InvalidRequest('no client authentication provided')\n\n try:\n client = Client.objects.get(key=client_id, secret=client_secret)\n except Client.DoesNotExist:\n raise InvalidClient('client authentication failed')\n\n # The two supported grant types\n grant_type = request.POST.get('grant_type')\n if not grant_type:\n raise InvalidRequest('no \"grant_type\" provided')\n\n if grant_type == 'authorization_code':\n access_token = generate_access_token_from_authorization_code(request,\n client)\n elif grant_type == 'refresh_token':\n access_token = generate_access_token_from_refresh_token(request, client)\n else:\n raise UnsupportedGrantType(\n '\"{}\" is not a supported \"grant_type\"'.format(grant_type))\n\n # Successful response documentation:\n # http://tools.ietf.org/html/rfc6749#section-5.1\n response_data = {\n 'access_token': access_token.value,\n 'expires_in': access_token.lifetime,\n 'token_type': 'bearer', # http://tools.ietf.org/html/rfc6749#section-7.1\n 'scope': ' '.join(access_token.get_scope_names_set()),\n }\n if access_token.refreshable:\n response_data['refresh_token'] = access_token.refresh_token\n\n response = HttpResponse(content=json.dumps(response_data),\n content_type='application/json')\n response.status_code = 200\n response['Cache-Control'] = 'no-store'\n response['Pragma'] = 'no-cache'\n return response\n\n except AccessTokenError as generation_error:\n # Error response documentation:\n # http://tools.ietf.org/html/rfc6749#section-5.2\n error_name = getattr(generation_error,\n 'error_name',\n 'invalid_request')\n error_description = str(generation_error) or 'Invalid Request.'\n response_data = {\n 'error': error_name,\n 'error_description': error_description,\n }\n\n response = HttpResponse(content=json.dumps(response_data),\n content_type='application/json')\n if isinstance(generation_error, InvalidClient):\n response.status_code = 401\n else:\n response.status_code = 400\n\n return response", "def callm(method, param_dict, POST=False, socket_timeout=None, data=None):\n \"\"\"\n Call the api! \n Param_dict is a *regular* *python* *dictionary* so if you want to have multi-valued params\n put them in a list.\n \n ** note, if we require 2.6, we can get rid of this timeout munging.\n \"\"\"\n try:\n param_dict['api_key'] = config.ECHO_NEST_API_KEY\n param_list = []\n if not socket_timeout:\n socket_timeout = config.CALL_TIMEOUT\n\n for key,val in param_dict.iteritems():\n if isinstance(val, list):\n param_list.extend( [(key,subval) for subval in val] )\n elif val is not None:\n if isinstance(val, unicode):\n val = val.encode('utf-8')\n param_list.append( (key,val) )\n\n params = urllib.urlencode(param_list)\n\n orig_timeout = socket.getdefaulttimeout()\n socket.setdefaulttimeout(socket_timeout)\n\n if(POST):\n if (not method == 'track/upload') or ((method == 'track/upload') and 'url' in param_dict):\n \"\"\"\n this is a normal POST call\n \"\"\"\n url = 'http://%s/%s/%s/%s' % (config.API_HOST, config.API_SELECTOR,\n config.API_VERSION, method)\n\n if data is None:\n data = ''\n data = urllib.urlencode(data)\n data = \"&\".join([data, params])\n\n f = opener.open(url, data=data)\n else:\n \"\"\"\n upload with a local file is special, as the body of the request is the content of the file,\n and the other parameters stay on the URL\n \"\"\"\n url = '/%s/%s/%s?%s' % (config.API_SELECTOR, config.API_VERSION,\n method, params)\n\n if ':' in config.API_HOST:\n host, port = config.API_HOST.split(':')\n else:\n host = config.API_HOST\n port = 80\n\n if config.TRACE_API_CALLS:\n logger.info(\"%s/%s\" % (host+':'+str(port), url,))\n conn = httplib.HTTPConnection(host, port = port)\n conn.request('POST', url, body = data, headers = dict([('Content-Type', 'application/octet-stream')]+headers))\n f = conn.getresponse()\n\n else:\n \"\"\"\n just a normal GET call\n \"\"\"\n url = 'http://%s/%s/%s/%s?%s' % (config.API_HOST, config.API_SELECTOR, config.API_VERSION,\n method, params)\n\n f = opener.open(url)\n\n socket.setdefaulttimeout(orig_timeout)\n\n # try/except\n response_dict = get_successful_response(f)\n return response_dict\n\n except IOError, e:\n if hasattr(e, 'reason'):\n raise EchoNestIOError(error=e.reason)\n elif hasattr(e, 'code'):\n raise EchoNestIOError(code=e.code)\n else:\n raise", "def cli(env, service, method, parameters, _id, _filters, mask, limit, offset,\n output_python=False):\n \"\"\"Call arbitrary API endpoints with the given SERVICE and METHOD.\n\n Example::\n\n slcli call-api Account getObject\n slcli call-api Account getVirtualGuests --limit=10 --mask=id,hostname\n slcli call-api Virtual_Guest getObject --id=12345\n slcli call-api Metric_Tracking_Object getBandwidthData --id=1234 \\\\\n \"2015-01-01 00:00:00\" \"2015-01-1 12:00:00\" public\n slcli call-api Account getVirtualGuests \\\\\n -f 'virtualGuests.datacenter.name=dal05' \\\\\n -f 'virtualGuests.maxCpu=4' \\\\\n --mask=id,hostname,datacenter.name,maxCpu\n slcli call-api Account getVirtualGuests \\\\\n -f 'virtualGuests.datacenter.name IN dal05,sng01'\n \"\"\"\n\n args = [service, method] + list(parameters)\n kwargs = {\n 'id': _id,\n 'filter': _build_filters(_filters),\n 'mask': mask,\n 'limit': limit,\n 'offset': offset,\n }\n\n if output_python:\n env.out(_build_python_example(args, kwargs))\n else:\n result = env.client.call(*args, **kwargs)\n env.fout(formatting.iter_to_table(result))", "def _call(self, endpoint, data=None):\n \"\"\"\n Make an authorized API call to specified endpoint.\n :param str endpoint: API endpoint's relative URL, eg. `/account`.\n :param dict data: POST request data.\n :return: A dictionary or a string with response data.\n \"\"\"\n data = {} if data is None else data\n try:\n data['access_token'] = self.access_token()\n return self._request(endpoint, data)\n except AccessTokenExpired:\n self._cached_access_token = None\n data['access_token'] = self.access_token()\n return self._request(endpoint, data)", "def _apicall(self, method, **params):\n \"\"\"Call an API method and return response data. For more info, see:\n https://ccp.netcup.net/run/webservice/servers/endpoint\"\"\"\n LOGGER.debug('%s(%r)', method, params)\n auth = {\n 'customernumber': self._get_provider_option('auth_customer_id'),\n 'apikey': self._get_provider_option('auth_api_key'),\n }\n if method == 'login':\n auth['apipassword'] = self._get_provider_option('auth_api_password')\n else:\n auth['apisessionid'] = self.api_session_id\n if not all(auth.values()):\n raise Exception('No valid authentication mechanism found')\n data = self._request('POST', url='', data={\n 'action': method,\n 'param': dict(params, **auth),\n })\n if data['status'] != 'success':\n raise Exception(\"{} ({})\".format(\n data['longmessage'], data['statuscode']))\n return data.get('responsedata', {})", "def oauthgetm(method, param_dict, socket_timeout=None):\n try:\n import oauth2 # lazy import this so oauth2 is not a hard dep\n except ImportError:\n raise Exception(\"You must install the python-oauth2 library to use this method.\")\n\n \"\"\"\n Call the api! With Oauth! \n Param_dict is a *regular* *python* *dictionary* so if you want to have multi-valued params\n put them in a list.\n \n ** note, if we require 2.6, we can get rid of this timeout munging.\n \"\"\"\n def build_request(url):\n params = {\n 'oauth_version': \"1.0\",\n 'oauth_nonce': oauth2.generate_nonce(),\n 'oauth_timestamp': int(time.time())\n }\n consumer = oauth2.Consumer(key=config.ECHO_NEST_CONSUMER_KEY, secret=config.ECHO_NEST_SHARED_SECRET)\n params['oauth_consumer_key'] = config.ECHO_NEST_CONSUMER_KEY\n \n req = oauth2.Request(method='GET', url=url, parameters=params)\n signature_method = oauth2.SignatureMethod_HMAC_SHA1()\n req.sign_request(signature_method, consumer, None)\n return req\n \n param_dict['api_key'] = config.ECHO_NEST_API_KEY\n param_list = []\n if not socket_timeout:\n socket_timeout = config.CALL_TIMEOUT\n \n for key,val in param_dict.iteritems():\n if isinstance(val, list):\n param_list.extend( [(key,subval) for subval in val] )\n elif val is not None:\n if isinstance(val, unicode):\n val = val.encode('utf-8')\n param_list.append( (key,val) )\n\n params = urllib.urlencode(param_list)\n \n orig_timeout = socket.getdefaulttimeout()\n socket.setdefaulttimeout(socket_timeout)\n \"\"\"\n just a normal GET call\n \"\"\"\n url = 'http://%s/%s/%s/%s?%s' % (config.API_HOST, config.API_SELECTOR, config.API_VERSION, \n method, params)\n req = build_request(url)\n f = opener.open(req.to_url())\n \n socket.setdefaulttimeout(orig_timeout)\n \n # try/except\n response_dict = get_successful_response(f)\n return response_dict" ]
[ 0.7087173461914062, 0.7060321569442749, 0.7050427794456482, 0.6975470185279846, 0.6972582936286926, 0.6885087490081787, 0.6842185854911804, 0.6802607774734497, 0.6796349883079529, 0.6751108765602112, 0.6748965382575989, 0.6742551922798157 ]
:return: json with "keyId" as secret and "url" for posting key
def request_upload_secret(self, secret_id): """ :return: json with "keyId" as secret and "url" for posting key """ return self._router.post_request_upload_secret(org_id=self.organizationId, instance_id=self.instanceId, secret_id=secret_id).json()
[ "def POST(self, **kwargs):\n r'''\n Easily generate keys for a minion and auto-accept the new key\n\n Accepts all the same parameters as the :py:func:`key.gen_accept\n <salt.wheel.key.gen_accept>`.\n\n .. note:: A note about ``curl``\n Avoid using the ``-i`` flag or HTTP headers will be written and\n produce an invalid tar file.\n\n Example partial kickstart script to bootstrap a new minion:\n\n .. code-block:: text\n\n %post\n mkdir -p /etc/salt/pki/minion\n curl -sSk https://localhost:8000/keys \\\n -d mid=jerry \\\n -d username=kickstart \\\n -d password=kickstart \\\n -d eauth=pam \\\n | tar -C /etc/salt/pki/minion -xf -\n\n mkdir -p /etc/salt/minion.d\n printf 'master: 10.0.0.5\\nid: jerry' > /etc/salt/minion.d/id.conf\n %end\n\n .. http:post:: /keys\n\n Generate a public and private key and return both as a tarball\n\n Authentication credentials must be passed in the request.\n\n :status 200: |200|\n :status 401: |401|\n :status 406: |406|\n\n **Example request:**\n\n .. code-block:: bash\n\n curl -sSk https://localhost:8000/keys \\\n -d mid=jerry \\\n -d username=kickstart \\\n -d password=kickstart \\\n -d eauth=pam \\\n -o jerry-salt-keys.tar\n\n .. code-block:: text\n\n POST /keys HTTP/1.1\n Host: localhost:8000\n\n **Example response:**\n\n .. code-block:: text\n\n HTTP/1.1 200 OK\n Content-Length: 10240\n Content-Disposition: attachment; filename=\"saltkeys-jerry.tar\"\n Content-Type: application/x-tar\n\n jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000\n '''\n lowstate = cherrypy.request.lowstate\n lowstate[0].update({\n 'client': 'wheel',\n 'fun': 'key.gen_accept',\n })\n\n if 'mid' in lowstate[0]:\n lowstate[0]['id_'] = lowstate[0].pop('mid')\n\n result = self.exec_lowstate()\n ret = next(result, {}).get('data', {}).get('return', {})\n\n pub_key = ret.get('pub', '')\n pub_key_file = tarfile.TarInfo('minion.pub')\n pub_key_file.size = len(pub_key)\n\n priv_key = ret.get('priv', '')\n priv_key_file = tarfile.TarInfo('minion.pem')\n priv_key_file.size = len(priv_key)\n\n fileobj = BytesIO()\n tarball = tarfile.open(fileobj=fileobj, mode='w')\n\n if six.PY3:\n pub_key = pub_key.encode(__salt_system_encoding__)\n priv_key = priv_key.encode(__salt_system_encoding__)\n\n tarball.addfile(pub_key_file, BytesIO(pub_key))\n tarball.addfile(priv_key_file, BytesIO(priv_key))\n tarball.close()\n\n headers = cherrypy.response.headers\n headers['Content-Disposition'] = 'attachment; filename=\"saltkeys-{0}.tar\"'.format(lowstate[0]['id_'])\n headers['Content-Type'] = 'application/x-tar'\n headers['Content-Length'] = len(fileobj.getvalue())\n headers['Cache-Control'] = 'no-cache'\n\n fileobj.seek(0)\n return fileobj", "def post(self, path, data, is_json=True):\n '''Make a post request with client_id and secret key.'''\n post_data = {\n 'client_id': self.client_id,\n 'secret': self.secret,\n }\n post_data.update(data)\n return self._post(path, post_data, is_json)", "def _the_view_kwd(self, postinfo):\n '''\n Generate the kwd dict for view.\n :param postinfo: the postinfo\n :return: dict\n '''\n kwd = {\n 'pager': '',\n 'url': self.request.uri,\n 'cookie_str': tools.get_uuid(),\n 'daohangstr': '',\n 'signature': postinfo.uid,\n 'tdesc': '',\n 'eval_0': MEvaluation.app_evaluation_count(postinfo.uid, 0),\n 'eval_1': MEvaluation.app_evaluation_count(postinfo.uid, 1),\n 'login': 1 if self.get_current_user() else 0,\n 'has_image': 0,\n 'parentlist': MCategory.get_parent_list(),\n 'parentname': '',\n 'catname': '',\n 'router': router_post[postinfo.kind]\n }\n return kwd", "def post_public_key(self, path, data, is_json=True):\n '''Make a post request using a public key.'''\n post_data = {\n 'public_key': self.public_key\n }\n post_data.update(data)\n return self._post(path, post_data, is_json)", "def POST(self, **kwargs):\n '''\n Start an execution command and immediately return the job id\n\n .. http:post:: /minions\n\n :reqheader X-Auth-Token: |req_token|\n :reqheader Accept: |req_accept|\n :reqheader Content-Type: |req_ct|\n\n :resheader Content-Type: |res_ct|\n\n :status 200: |200|\n :status 400: |400|\n :status 401: |401|\n :status 406: |406|\n\n Lowstate data describing Salt commands must be sent in the request\n body. The ``client`` option will be set to\n :py:meth:`~salt.client.LocalClient.local_async`.\n\n **Example request:**\n\n .. code-block:: bash\n\n curl -sSi localhost:8000/minions \\\\\n -b ~/cookies.txt \\\\\n -H \"Accept: application/x-yaml\" \\\\\n -d '[{\"tgt\": \"*\", \"fun\": \"status.diskusage\"}]'\n\n .. code-block:: text\n\n POST /minions HTTP/1.1\n Host: localhost:8000\n Accept: application/x-yaml\n Content-Type: application/json\n\n tgt=*&fun=status.diskusage\n\n **Example response:**\n\n .. code-block:: text\n\n HTTP/1.1 202 Accepted\n Content-Length: 86\n Content-Type: application/x-yaml\n\n return:\n - jid: '20130603122505459265'\n minions: [ms-4, ms-3, ms-2, ms-1, ms-0]\n _links:\n jobs:\n - href: /jobs/20130603122505459265\n '''\n job_data = list(self.exec_lowstate(client='local_async',\n token=cherrypy.session.get('token')))\n\n cherrypy.response.status = 202\n return {\n 'return': job_data,\n '_links': {\n 'jobs': [{'href': '/jobs/{0}'.format(i['jid'])}\n for i in job_data if i],\n },\n }", "def post(self):\n '''\n Start an execution command and immediately return the job id\n\n .. http:post:: /minions\n\n :reqheader X-Auth-Token: |req_token|\n :reqheader Accept: |req_accept|\n :reqheader Content-Type: |req_ct|\n\n :resheader Content-Type: |res_ct|\n\n :status 200: |200|\n :status 401: |401|\n :status 406: |406|\n\n :term:`lowstate` data describing Salt commands must be sent in the\n request body. The ``client`` option will be set to\n :py:meth:`~salt.client.LocalClient.local_async`.\n\n **Example request:**\n\n .. code-block:: bash\n\n curl -sSi localhost:8000/minions \\\\\n -H \"Accept: application/x-yaml\" \\\\\n -d tgt='*' \\\\\n -d fun='status.diskusage'\n\n .. code-block:: text\n\n POST /minions HTTP/1.1\n Host: localhost:8000\n Accept: application/x-yaml\n Content-Length: 26\n Content-Type: application/x-www-form-urlencoded\n\n tgt=*&fun=status.diskusage\n\n **Example response:**\n\n .. code-block:: text\n\n HTTP/1.1 202 Accepted\n Content-Length: 86\n Content-Type: application/x-yaml\n\n return:\n - jid: '20130603122505459265'\n minions: [ms-4, ms-3, ms-2, ms-1, ms-0]\n '''\n # if you aren't authenticated, redirect to login\n if not self._verify_auth():\n self.redirect('/login')\n return\n\n # verify that all lowstates are the correct client type\n for low in self.lowstate:\n # if you didn't specify, its fine\n if 'client' not in low:\n low['client'] = 'local_async'\n continue\n # if you specified something else, we don't do that\n if low.get('client') != 'local_async':\n self.set_status(400)\n self.write('We don\\'t serve your kind here')\n self.finish()\n return\n\n self.disbatch()", "def dict(self):\n \"\"\" Returns dictionary of post fields and attributes\n \"\"\"\n post_dict = {\n 'id': self.id,\n 'link': self.link,\n 'permalink': self.permalink,\n 'content_type': self.content_type,\n 'slug': self.slug,\n 'updated': self.updated, #.strftime(conf.GOSCALE_ATOM_DATETIME_FORMAT),\n 'published': self.published, #.strftime(conf.GOSCALE_ATOM_DATETIME_FORMAT),\n 'title': self.title,\n 'description': self.description,\n 'author': self.author,\n 'categories': self.categories[1:-1].split(',') if self.categories else None,\n 'summary': self.summary,\n }\n if self.attributes:\n attributes = simplejson.loads(self.attributes)\n post_dict.update(attributes)\n return post_dict", "def transit_create_key(self, name, convergent_encryption=None, derived=None, exportable=None,\n key_type=None, mount_point='transit'):\n \"\"\"POST /<mount_point>/keys/<name>\n\n :param name:\n :type name:\n :param convergent_encryption:\n :type convergent_encryption:\n :param derived:\n :type derived:\n :param exportable:\n :type exportable:\n :param key_type:\n :type key_type:\n :param mount_point:\n :type mount_point:\n :return:\n :rtype:\n \"\"\"\n url = '/v1/{0}/keys/{1}'.format(mount_point, name)\n params = {}\n if convergent_encryption is not None:\n params['convergent_encryption'] = convergent_encryption\n if derived is not None:\n params['derived'] = derived\n if exportable is not None:\n params['exportable'] = exportable\n if key_type is not None:\n params['type'] = key_type\n\n return self._adapter.post(url, json=params)", "def _post(self, q, payload='', params=''):\n '''Generic POST wrapper including the api_key'''\n if (q[-1] == '/'): q = q[:-1]\n headers = {'Content-Type': 'application/json'}\n r = requests.post('{url}{q}?api_key={key}{params}'.format(url=self.url, q=q, key=self.api_key, params=params),\n headers=headers, data=payload)\n ret = DotDict(r.json())\n if (not r.ok or ('error' in ret and ret.error == True)):\n raise Exception(r.url, r.reason, r.status_code, r.json())\n return DotDict(r.json())", "def post(self, url, data):\n ''' Gives data to database '''\n data.update({'key': self.APIKEY})\n req = r.post(url,\n data = json.dumps(data),\n headers = self.headers,\n auth = self.auth)\n return self.process_request(req)", "def publish_post(self):\n \"\"\"\n If it's a new file, add it.\n Else, update it.\n \"\"\"\n payload = {'content': self.content_base64.decode('utf-8')}\n\n sha_blob = self.get_sha_blob()\n if sha_blob:\n commit_msg = 'ghPublish UPDATE: {}'.format(self.title)\n payload.update(sha=sha_blob)\n payload.update(message=commit_msg)\n else:\n commit_msg = 'ghPublish ADD: {}'.format(self.title)\n payload.update(message=commit_msg)\n\n r = requests.put(self.api_url,\n auth=self.get_auth_details(),\n data=json.dumps(payload))\n try:\n url = r.json()['content']['html_url']\n return r.status_code, url\n except KeyError:\n return r.status_code, None", "def PKCS_POST_query(self, req_hook, req_args):\n ''' Generic POST query method '''\n # HTTP POST queries require keyManagerTokens and sessionTokens\n headers = {'Content-Type': 'application/json',\n 'sessionToken': self.__session__,\n 'keyManagerToken': self.__keymngr__}\n\n # HTTP POST query to keymanager authenticate API\n try:\n if req_args is None:\n response = requests.post(self.__url__ + req_hook,\n headers=headers,\n cert=(self.__crt__, self.__key__),\n verify=True)\n else:\n response = requests.post(self.__url__ + req_hook,\n headers=headers,\n data=req_args,\n cert=(self.__crt__, self.__key__),\n verify=True)\n except requests.exceptions.RequestException as err:\n self.logger.error(err)\n return '500', 'Internal Error in PKCS_RESTful.POST_query()'\n # return the token\n self.logger.debug('%s: %s' % (response.status_code, response.text))\n return response.status_code, response.text" ]
[ 0.7206412553787231, 0.7182224988937378, 0.7178894281387329, 0.7159284353256226, 0.7123870849609375, 0.7001036405563354, 0.699505627155304, 0.6994513273239136, 0.6962321400642395, 0.693744421005249, 0.6907731890678406, 0.6809219717979431 ]
Function checkAndCreate Check if a subnet exists and create it if not @param key: The targeted subnet @param payload: The targeted subnet description @param domainId: The domainId to be attached wiuth the subnet @return RETURN: The id of the subnet
def checkAndCreate(self, key, payload, domainId): """ Function checkAndCreate Check if a subnet exists and create it if not @param key: The targeted subnet @param payload: The targeted subnet description @param domainId: The domainId to be attached wiuth the subnet @return RETURN: The id of the subnet """ if key not in self: self[key] = payload oid = self[key]['id'] if not oid: return False #~ Ensure subnet contains the domain subnetDomainIds = [] for domain in self[key]['domains']: subnetDomainIds.append(domain['id']) if domainId not in subnetDomainIds: subnetDomainIds.append(domainId) self[key]["domain_ids"] = subnetDomainIds if len(self[key]["domains"]) is not len(subnetDomainIds): return False return oid
[ "def checkAndCreate(self, key, payload):\n \"\"\" Function checkAndCreate\n Check if an object exists and create it if not\n\n @param key: The targeted object\n @param payload: The targeted object description\n @return RETURN: The id of the object\n \"\"\"\n if key not in self:\n self[key] = payload\n return self[key]['id']", "def checkAndCreate(self, key, payload):\n \"\"\" Function checkAndCreate\n Check if an object exists and create it if not\n\n @param key: The targeted object\n @param payload: The targeted object description\n @return RETURN: The id of the object\n \"\"\"\n if key not in self:\n if 'templates' in payload:\n templates = payload.pop('templates')\n self[key] = payload\n self.reload()\n return self[key]['id']", "def checkAndCreate(self, key, payload, osIds):\n \"\"\" Function checkAndCreate\n Check if an architectures exists and create it if not\n\n @param key: The targeted architectures\n @param payload: The targeted architectures description\n @param osIds: The list of os ids liked with this architecture\n @return RETURN: The id of the object\n \"\"\"\n if key not in self:\n self[key] = payload\n oid = self[key]['id']\n if not oid:\n return False\n #~ To be sure the OS list is good, we ensure our os are in the list\n for os in self[key]['operatingsystems']:\n osIds.add(os['id'])\n self[key][\"operatingsystem_ids\"] = list(osIds)\n if (len(self[key]['operatingsystems']) is not len(osIds)):\n return False\n return oid", "def create_subnet(vpc_id=None, cidr_block=None, vpc_name=None,\n availability_zone=None, subnet_name=None, tags=None,\n region=None, key=None, keyid=None, profile=None, auto_assign_public_ipv4=False):\n '''\n Given a valid VPC ID or Name and a CIDR block, create a subnet for the VPC.\n\n An optional availability zone argument can be provided.\n\n Returns True if the VPC subnet was created and returns False if the VPC subnet was not created.\n\n .. versionchanged:: 2015.8.0\n Added vpc_name argument\n\n CLI Examples:\n\n .. code-block:: bash\n\n salt myminion boto_vpc.create_subnet vpc_id='vpc-6b1fe402' \\\\\n subnet_name='mysubnet' cidr_block='10.0.0.0/25'\n salt myminion boto_vpc.create_subnet vpc_name='myvpc' \\\\\n subnet_name='mysubnet', cidr_block='10.0.0.0/25'\n '''\n\n try:\n vpc_id = check_vpc(vpc_id, vpc_name, region, key, keyid, profile)\n if not vpc_id:\n return {'created': False, 'error': {'message': 'VPC {0} does not exist.'.format(vpc_name or vpc_id)}}\n except BotoServerError as e:\n return {'created': False, 'error': __utils__['boto.get_error'](e)}\n\n subnet_object_dict = _create_resource('subnet', name=subnet_name, tags=tags, vpc_id=vpc_id,\n availability_zone=availability_zone,\n cidr_block=cidr_block, region=region, key=key,\n keyid=keyid, profile=profile)\n # if auto_assign_public_ipv4 is requested set that to true using boto3\n if auto_assign_public_ipv4:\n conn3 = _get_conn3(region=region, key=key, keyid=keyid, profile=profile)\n conn3.modify_subnet_attribute(MapPublicIpOnLaunch={'Value': True}, SubnetId=subnet_object_dict['id'])\n return subnet_object_dict", "def create_payload(self):\n \"\"\"Wrap submitted data within an extra dict.\n\n For more information, see `Bugzilla #1151220\n <https://bugzilla.redhat.com/show_bug.cgi?id=1151220>`_.\n\n In addition, rename the ``from_`` field to ``from``.\n\n \"\"\"\n payload = super(Subnet, self).create_payload()\n if 'from_' in payload:\n payload['from'] = payload.pop('from_')\n return {u'subnet': payload}", "def subnet_create(request, network_id, **kwargs):\n \"\"\"Create a subnet on a specified network.\n\n :param request: request context\n :param network_id: network id a subnet is created on\n :param cidr: (optional) subnet IP address range\n :param ip_version: (optional) IP version (4 or 6)\n :param gateway_ip: (optional) IP address of gateway\n :param tenant_id: (optional) tenant id of the subnet created\n :param name: (optional) name of the subnet created\n :param subnetpool_id: (optional) subnetpool to allocate prefix from\n :param prefixlen: (optional) length of prefix to allocate\n :returns: Subnet object\n\n Although both cidr+ip_version and subnetpool_id+preifxlen is listed as\n optional you MUST pass along one of the combinations to get a successful\n result.\n \"\"\"\n LOG.debug(\"subnet_create(): netid=%(network_id)s, kwargs=%(kwargs)s\",\n {'network_id': network_id, 'kwargs': kwargs})\n body = {'subnet': {'network_id': network_id}}\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body['subnet'].update(kwargs)\n subnet = neutronclient(request).create_subnet(body=body).get('subnet')\n return Subnet(subnet)", "def checkAndCreate(self, key, payload,\n hostgroupConf,\n hostgroupParent,\n puppetClassesId):\n \"\"\" Function checkAndCreate\n check And Create procedure for an hostgroup\n - check the hostgroup is not existing\n - create the hostgroup\n - Add puppet classes from puppetClassesId\n - Add params from hostgroupConf\n\n @param key: The hostgroup name or ID\n @param payload: The description of the hostgroup\n @param hostgroupConf: The configuration of the host group from the\n foreman.conf\n @param hostgroupParent: The id of the parent hostgroup\n @param puppetClassesId: The dict of puppet classes ids in foreman\n @return RETURN: The ItemHostsGroup object of an host\n \"\"\"\n if key not in self:\n self[key] = payload\n oid = self[key]['id']\n if not oid:\n return False\n\n # Create Hostgroup classes\n if 'classes' in hostgroupConf.keys():\n classList = list()\n for c in hostgroupConf['classes']:\n classList.append(puppetClassesId[c])\n if not self[key].checkAndCreateClasses(classList):\n print(\"Failed in classes\")\n return False\n\n # Set params\n if 'params' in hostgroupConf.keys():\n if not self[key].checkAndCreateParams(hostgroupConf['params']):\n print(\"Failed in params\")\n return False\n\n return oid", "def create_subnet_group(name, description, subnet_ids, tags=None,\n region=None, key=None, keyid=None, profile=None):\n '''\n Create an RDS subnet group\n\n CLI example to create an RDS subnet group::\n\n salt myminion boto_rds.create_subnet_group my-subnet-group \\\n \"group description\" '[subnet-12345678, subnet-87654321]' \\\n region=us-east-1\n '''\n res = __salt__['boto_rds.subnet_group_exists'](name, tags, region, key,\n keyid, profile)\n if res.get('exists'):\n return {'exists': bool(res)}\n\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n if not conn:\n return {'results': bool(conn)}\n\n taglist = _tag_doc(tags)\n rds = conn.create_db_subnet_group(DBSubnetGroupName=name,\n DBSubnetGroupDescription=description,\n SubnetIds=subnet_ids, Tags=taglist)\n\n return {'created': bool(rds)}\n except ClientError as e:\n return {'error': __utils__['boto3.get_error'](e)}", "def create_subnet_group(name, description, subnet_ids=None, subnet_names=None, tags=None,\n region=None, key=None, keyid=None, profile=None):\n '''\n Create an ElastiCache subnet group\n\n CLI example to create an ElastiCache subnet group::\n\n salt myminion boto_elasticache.create_subnet_group my-subnet-group \\\n \"group description\" subnet_ids='[subnet-12345678, subnet-87654321]' \\\n region=us-east-1\n '''\n if not _exactly_one((subnet_ids, subnet_names)):\n raise SaltInvocationError(\"Exactly one of either 'subnet_ids' or \"\n \"'subnet_names' must be provided.\")\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n if not conn:\n return False\n if subnet_group_exists(name, tags, region, key, keyid, profile):\n return True\n if subnet_names:\n subnet_ids = []\n for n in subnet_names:\n r = __salt__['boto_vpc.get_resource_id']('subnet', n,\n region=region, key=key,\n keyid=keyid, profile=profile)\n if 'id' not in r:\n log.error('Couldn\\'t resolve subnet name %s to an ID.', subnet_name)\n return False\n subnet_ids += [r['id']]\n try:\n ec = conn.create_cache_subnet_group(name, description, subnet_ids)\n if not ec:\n msg = 'Failed to create ElastiCache subnet group {0}'.format(name)\n log.error(msg)\n return False\n log.info('Created ElastiCache subnet group %s', name)\n return True\n except boto.exception.BotoServerError as e:\n log.debug(e)\n msg = 'Failed to create ElastiCache subnet group {0}'.format(name)\n log.error(msg)\n return False", "def create_subnetwork(kwargs=None, call=None):\n '''\n ... versionadded:: 2017.7.0\n Create a GCE Subnetwork. Must specify name, cidr, network, and region.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud -f create_subnetwork gce name=mysubnet network=mynet1 region=us-west1 cidr=10.0.0.0/24 description=optional\n '''\n if call != 'function':\n raise SaltCloudSystemExit(\n 'The create_subnetwork function must be called with -f or --function.'\n )\n\n if not kwargs or 'name' not in kwargs:\n log.error(\n 'Must specify name of subnet.'\n )\n return False\n\n if 'network' not in kwargs:\n log.errror(\n 'Must specify name of network to create subnet under.'\n )\n return False\n\n if 'cidr' not in kwargs:\n log.errror(\n 'A network CIDR range must be specified when creating a subnet.'\n )\n return False\n\n if 'region' not in kwargs:\n log.error(\n 'A region must be specified when creating a subnetwork.'\n )\n return False\n\n name = kwargs['name']\n cidr = kwargs['cidr']\n network = kwargs['network']\n region = kwargs['region']\n desc = kwargs.get('description', None)\n conn = get_conn()\n\n __utils__['cloud.fire_event'](\n 'event',\n 'create subnetwork',\n 'salt/cloud/subnet/creating',\n args={\n 'name': name,\n 'network': network,\n 'cidr': cidr,\n 'region': region,\n 'description': desc\n },\n sock_dir=__opts__['sock_dir'],\n transport=__opts__['transport']\n )\n\n subnet = conn.ex_create_subnetwork(name, cidr, network, region, desc)\n\n __utils__['cloud.fire_event'](\n 'event',\n 'created subnetwork',\n 'salt/cloud/subnet/created',\n args={\n 'name': name,\n 'network': network,\n 'cidr': cidr,\n 'region': region,\n 'description': desc\n },\n sock_dir=__opts__['sock_dir'],\n transport=__opts__['transport']\n )\n\n return _expand_item(subnet)", "def subnet_create(auth=None, **kwargs):\n '''\n Create a subnet\n\n network_name_or_id\n The unique name or ID of the attached network. If a non-unique name is\n supplied, an exception is raised.\n\n cidr\n The CIDR\n\n ip_version\n The IP version, which is 4 or 6.\n\n enable_dhcp : False\n Set to ``True`` if DHCP is enabled and ``False`` if disabled\n\n subnet_name\n The name of the subnet\n\n tenant_id\n The ID of the tenant who owns the network. Only administrative users\n can specify a tenant ID other than their own.\n\n allocation_pools\n A list of dictionaries of the start and end addresses for the\n allocation pools.\n\n gateway_ip\n The gateway IP address. When you specify both ``allocation_pools`` and\n ``gateway_ip``, you must ensure that the gateway IP does not overlap\n with the specified allocation pools.\n\n disable_gateway_ip : False\n Set to ``True`` if gateway IP address is disabled and ``False`` if\n enabled. It is not allowed with ``gateway_ip``.\n\n dns_nameservers\n A list of DNS name servers for the subnet\n\n host_routes\n A list of host route dictionaries for the subnet\n\n ipv6_ra_mode\n IPv6 Router Advertisement mode. Valid values are ``dhcpv6-stateful``,\n ``dhcpv6-stateless``, or ``slaac``.\n\n ipv6_address_mode\n IPv6 address mode. Valid values are ``dhcpv6-stateful``,\n ``dhcpv6-stateless``, or ``slaac``.\n\n use_default_subnetpool\n If ``True``, use the default subnetpool for ``ip_version`` to obtain a\n CIDR. It is required to pass ``None`` to the ``cidr`` argument when\n enabling this option.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' neutronng.subnet_create network_name_or_id=network1\n subnet_name=subnet1\n\n salt '*' neutronng.subnet_create subnet_name=subnet2\\\n network_name_or_id=network2 enable_dhcp=True \\\n allocation_pools='[{\"start\": \"192.168.199.2\",\\\n \"end\": \"192.168.199.254\"}]'\\\n gateway_ip='192.168.199.1' cidr=192.168.199.0/24\n\n salt '*' neutronng.subnet_create network_name_or_id=network1 \\\n subnet_name=subnet1 dns_nameservers='[\"8.8.8.8\", \"8.8.8.7\"]'\n\n '''\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.create_subnet(**kwargs)", "public ServiceFuture<SubnetInner> createOrUpdateAsync(String resourceGroupName, String virtualNetworkName, String subnetName, SubnetInner subnetParameters, final ServiceCallback<SubnetInner> serviceCallback) {\n return ServiceFuture.fromResponse(createOrUpdateWithServiceResponseAsync(resourceGroupName, virtualNetworkName, subnetName, subnetParameters), serviceCallback);\n }" ]
[ 0.7927488684654236, 0.7818474769592285, 0.7067288756370544, 0.6799399852752686, 0.6731507778167725, 0.6682384014129639, 0.6681324243545532, 0.663805365562439, 0.6622098088264465, 0.6585577726364136, 0.6567074656486511, 0.6497458219528198 ]
Function removeDomain Delete a domain from a subnet @param subnetId: The subnet Id @param domainId: The domainId to be attached wiuth the subnet @return RETURN: boolean
def removeDomain(self, subnetId, domainId): """ Function removeDomain Delete a domain from a subnet @param subnetId: The subnet Id @param domainId: The domainId to be attached wiuth the subnet @return RETURN: boolean """ subnetDomainIds = [] for domain in self[subnetId]['domains']: subnetDomainIds.append(domain['id']) subnetDomainIds.remove(domainId) self[subnetId]["domain_ids"] = subnetDomainIds return len(self[subnetId]["domains"]) is len(subnetDomainIds)
[ "public MockSubnet deleteSubnet(final String subnetId) {\n\n if (subnetId != null && allMockSubnets.containsKey(subnetId)) {\n return allMockSubnets.remove(subnetId);\n }\n\n return null;\n }", "def delete_subnet(self, subnet):\n '''\n Deletes the specified subnet\n '''\n subnet_id = self._find_subnet_id(subnet)\n ret = self.network_conn.delete_subnet(subnet=subnet_id)\n return ret if ret else True", "private DeleteSubnetResponseType deleteSubnet(final String subnetId) {\n DeleteSubnetResponseType ret = new DeleteSubnetResponseType();\n ret.setRequestId(UUID.randomUUID().toString());\n mockSubnetController.deleteSubnet(subnetId);\n return ret;\n }", "def delete_subnet(subnet_id=None, subnet_name=None, region=None, key=None,\n keyid=None, profile=None):\n '''\n Given a subnet ID or name, delete the subnet.\n\n Returns True if the subnet was deleted and returns False if the subnet was not deleted.\n\n .. versionchanged:: 2015.8.0\n Added subnet_name argument\n\n CLI Example:\n\n .. code-block:: bash\n\n salt myminion boto_vpc.delete_subnet 'subnet-6a1fe403'\n\n '''\n\n return _delete_resource(resource='subnet', name=subnet_name,\n resource_id=subnet_id, region=region, key=key,\n keyid=keyid, profile=profile)", "def remove_remote_subnet(self, context_id, subnet_id):\n \"\"\"Removes a remote subnet from a tunnel context.\n\n :param int context_id: The id-value representing the context instance.\n :param int subnet_id: The id-value representing the remote subnet.\n :return bool: True if remote subnet removal was successful.\n \"\"\"\n return self.context.removeCustomerSubnetFromNetworkTunnel(subnet_id,\n id=context_id)", "def remove_service_subnet(self, context_id, subnet_id):\n \"\"\"Removes a service subnet from a tunnel context.\n\n :param int context_id: The id-value representing the context instance.\n :param int subnet_id: The id-value representing the service subnet.\n :return bool: True if service subnet removal was successful.\n \"\"\"\n return self.context.removeServiceSubnetFromNetworkTunnel(subnet_id,\n id=context_id)", "def delete_subnet_group(name, region=None, key=None, keyid=None,\n profile=None):\n '''\n Delete an RDS subnet group.\n\n CLI example::\n\n salt myminion boto_rds.delete_subnet_group my-subnet-group \\\n region=us-east-1\n '''\n try:\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n if not conn:\n return {'results': bool(conn)}\n\n r = conn.delete_db_subnet_group(DBSubnetGroupName=name)\n return {'deleted': bool(r), 'message':\n 'Deleted RDS subnet group {0}.'.format(name)}\n except ClientError as e:\n return {'error': __utils__['boto3.get_error'](e)}", "private void removeDomain(Long domainId, boolean reverse) throws GloboDnsException {\n\t\tif (domainId == null) {\n\t\t\tthrow new GloboDnsException(\"Domain id cannot be null\");\n\t\t}\n\t\t\n\t\tGloboDnsRoot<Domain> globoDnsRoot = this.delete(\"/domains/\" + domainId + \".json\" + (reverse ? \"?reverse=true\" : \"\"), false);\n\t\tif (globoDnsRoot == null) {\n\t\t\tthrow new GloboDnsException(\"Invalid response\");\n\t\t}\n\t\treturn;\n\t}", "def checkAndCreate(self, key, payload, domainId):\n \"\"\" Function checkAndCreate\n Check if a subnet exists and create it if not\n\n @param key: The targeted subnet\n @param payload: The targeted subnet description\n @param domainId: The domainId to be attached wiuth the subnet\n @return RETURN: The id of the subnet\n \"\"\"\n if key not in self:\n self[key] = payload\n oid = self[key]['id']\n if not oid:\n return False\n #~ Ensure subnet contains the domain\n subnetDomainIds = []\n for domain in self[key]['domains']:\n subnetDomainIds.append(domain['id'])\n if domainId not in subnetDomainIds:\n subnetDomainIds.append(domainId)\n self[key][\"domain_ids\"] = subnetDomainIds\n if len(self[key][\"domains\"]) is not len(subnetDomainIds):\n return False\n return oid", "public void project_serviceName_network_private_networkId_subnet_subnetId_DELETE(String serviceName, String networkId, String subnetId) throws IOException {\n\t\tString qPath = \"/cloud/project/{serviceName}/network/private/{networkId}/subnet/{subnetId}\";\n\t\tStringBuilder sb = path(qPath, serviceName, networkId, subnetId);\n\t\texec(qPath, \"DELETE\", sb.toString(), null);\n\t}", "def remove_internal_subnet(self, context_id, subnet_id):\n \"\"\"Remove an internal subnet from a tunnel context.\n\n :param int context_id: The id-value representing the context instance.\n :param int subnet_id: The id-value representing the internal subnet.\n :return bool: True if internal subnet removal was successful.\n \"\"\"\n return self.context.removePrivateSubnetFromNetworkTunnel(subnet_id,\n id=context_id)", "def subnet_delete(name, virtual_network, resource_group, **kwargs):\n '''\n .. versionadded:: 2019.2.0\n\n Delete a subnet.\n\n :param name: The name of the subnet to delete.\n\n :param virtual_network: The virtual network name containing the\n subnet.\n\n :param resource_group: The resource group name assigned to the\n virtual network.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-call azurearm_network.subnet_delete testsubnet testnet testgroup\n\n '''\n result = False\n netconn = __utils__['azurearm.get_client']('network', **kwargs)\n try:\n subnet = netconn.subnets.delete(\n resource_group_name=resource_group,\n virtual_network_name=virtual_network,\n subnet_name=name\n )\n subnet.wait()\n result = True\n except CloudError as exc:\n __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)\n\n return result" ]
[ 0.7380985617637634, 0.7272915244102478, 0.7221181392669678, 0.7110066413879395, 0.704708456993103, 0.6778912544250488, 0.6716000437736511, 0.6663321852684021, 0.6658353209495544, 0.6627442240715027, 0.6623795032501221, 0.6595145463943481 ]
Mark a callable as exclusive :param via: factory for a Lock to guard the callable Guards the callable against being entered again before completion. Explicitly raises a :py:exc:`RuntimeError` on violation. :note: If applied to a method, it is exclusive across all instances.
def exclusive(via=threading.Lock): """ Mark a callable as exclusive :param via: factory for a Lock to guard the callable Guards the callable against being entered again before completion. Explicitly raises a :py:exc:`RuntimeError` on violation. :note: If applied to a method, it is exclusive across all instances. """ def make_exclusive(fnc): fnc_guard = via() @functools.wraps(fnc) def exclusive_call(*args, **kwargs): if fnc_guard.acquire(blocking=False): try: return fnc(*args, **kwargs) finally: fnc_guard.release() else: raise RuntimeError('exclusive call to %s violated') return exclusive_call return make_exclusive
[ "def exclusively(f):\n \"\"\"\n Decorate a function to make it thread-safe by serializing invocations\n using a per-instance lock.\n \"\"\"\n @wraps(f)\n def exclusively_f(self, *a, **kw):\n with self._lock:\n return f(self, *a, **kw)\n return exclusively_f", "def cross_validation_lock(obj):\n \"\"\"\n A contextmanager for holding Traited object's cross-validators.\n\n This should be used in circumstances where you want to call _validate, but\n don't want to fire cross-validators.\n \"\"\"\n # TODO: Replace this with usage of public API when\n # https://github.com/ipython/traitlets/pull/166 lands upstream.\n orig = getattr(obj, '_cross_validation_lock', False)\n try:\n obj._cross_validation_lock = True\n yield\n finally:\n obj._cross_validation_lock = orig", "def lock_excl(self, timeout='default'):\n \"\"\"Establish an exclusive lock to the resource.\n\n :param timeout: Absolute time period (in milliseconds) that a resource\n waits to get unlocked by the locking session before\n returning an error. (Defaults to self.timeout)\n\n \"\"\"\n timeout = self.timeout if timeout == 'default' else timeout\n timeout = self._cleanup_timeout(timeout)\n self.visalib.lock(self.session, constants.AccessModes.exclusive_lock, timeout, None)", "def _set_exclude_interface(self, v, load=False):\n \"\"\"\n Setter method for exclude_interface, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/bypass_lsp/exclude_interface (list)\n If this variable is read-only (config: false) in the\n source YANG file, then _set_exclude_interface is considered as a private\n method. Backends looking to populate this variable should\n do so via calling thisObj._set_exclude_interface() directly.\n \"\"\"\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"bypass_exclude_interface_type bypass_exclude_interface_name\",exclude_interface.exclude_interface, yang_name=\"exclude-interface\", rest_name=\"exclude-interface\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='bypass-exclude-interface-type bypass-exclude-interface-name', extensions={u'tailf-common': {u'info': u'choose the interface to avoid as well as to protect', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'callpoint': u'MplsBypassLspExcludeInterface'}}), is_container='list', yang_name=\"exclude-interface\", rest_name=\"exclude-interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'choose the interface to avoid as well as to protect', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'callpoint': u'MplsBypassLspExcludeInterface'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"exclude_interface must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"bypass_exclude_interface_type bypass_exclude_interface_name\",exclude_interface.exclude_interface, yang_name=\"exclude-interface\", rest_name=\"exclude-interface\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='bypass-exclude-interface-type bypass-exclude-interface-name', extensions={u'tailf-common': {u'info': u'choose the interface to avoid as well as to protect', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'callpoint': u'MplsBypassLspExcludeInterface'}}), is_container='list', yang_name=\"exclude-interface\", rest_name=\"exclude-interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'choose the interface to avoid as well as to protect', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'callpoint': u'MplsBypassLspExcludeInterface'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__exclude_interface = t\n if hasattr(self, '_set'):\n self._set()", "def lock(self, session, lock_type, timeout, requested_key=None):\n \"\"\"Establishes an access mode to the specified resources.\n\n Corresponds to viLock function of the VISA library.\n\n :param session: Unique logical identifier to a session.\n :param lock_type: Specifies the type of lock requested, either Constants.EXCLUSIVE_LOCK or Constants.SHARED_LOCK.\n :param timeout: Absolute time period (in milliseconds) that a resource waits to get unlocked by the\n locking session before returning an error.\n :param requested_key: This parameter is not used and should be set to VI_NULL when lockType is VI_EXCLUSIVE_LOCK.\n :return: access_key that can then be passed to other sessions to share the lock, return value of the library call.\n :rtype: str, :class:`pyvisa.constants.StatusCode`\n \"\"\"\n try:\n sess = self.sessions[session]\n except KeyError:\n return StatusCode.error_invalid_object\n\n return sess.lock(lock_type, timeout, requested_key)", "def lock(library, session, lock_type, timeout, requested_key=None):\n \"\"\"Establishes an access mode to the specified resources.\n\n Corresponds to viLock function of the VISA library.\n\n :param library: the visa library wrapped by ctypes.\n :param session: Unique logical identifier to a session.\n :param lock_type: Specifies the type of lock requested, either constants.AccessModes.exclusive_lock\n or constants.AccessModes.shared_lock.\n :param timeout: Absolute time period (in milliseconds) that a resource waits to get unlocked by the\n locking session before returning an error.\n :param requested_key: This parameter is not used and should be set to VI_NULL when lockType is VI_EXCLUSIVE_LOCK.\n :return: access_key that can then be passed to other sessions to share the lock, return value of the library call.\n :rtype: str, :class:`pyvisa.constants.StatusCode`\n \"\"\"\n if lock_type == constants.AccessModes.exclusive_lock:\n requested_key = None\n access_key = None\n else:\n access_key = create_string_buffer(256)\n ret = library.viLock(session, lock_type, timeout, requested_key, access_key)\n if access_key is None:\n return None, ret\n else:\n return access_key.value, ret", "def _set_exclude(self, exclude):\n \"\"\"Exclude setter.\"\"\"\n if exclude and (\n (not isinstance(exclude, list))\n or (\n isinstance(exclude, list)\n and any([not isinstance(item, str) for item in exclude])\n )\n ):\n raise RuntimeError(\"Argument `exclude` is not valid\")\n self._exclude = exclude", "void doExclusive(Runnable runnable) {\n\n LettuceAssert.notNull(runnable, \"Runnable must not be null\");\n\n doExclusive(() -> {\n runnable.run();\n return null;\n });\n }", "def lock():\n '''\n Attempts an exclusive lock on the candidate configuration. This\n is a non-blocking call.\n\n .. note::\n When locking, it is important to remember to call\n :py:func:`junos.unlock <salt.modules.junos.unlock>` once finished. If\n locking during orchestration, remember to include a step in the\n orchestration job to unlock.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt 'device_name' junos.lock\n '''\n conn = __proxy__['junos.conn']()\n ret = {}\n ret['out'] = True\n try:\n conn.cu.lock()\n ret['message'] = \"Successfully locked the configuration.\"\n except jnpr.junos.exception.LockError as exception:\n ret['message'] = 'Could not gain lock due to : \"{0}\"'.format(exception)\n ret['out'] = False\n\n return ret", "def write_locked(*args, **kwargs):\n \"\"\"Acquires & releases a write lock around call into decorated method.\n\n NOTE(harlowja): if no attribute name is provided then by default the\n attribute named '_lock' is looked for (this attribute is expected to be\n a :py:class:`.ReaderWriterLock` object) in the instance object this\n decorator is attached to.\n \"\"\"\n\n def decorator(f):\n attr_name = kwargs.get('lock', '_lock')\n\n @six.wraps(f)\n def wrapper(self, *args, **kwargs):\n rw_lock = getattr(self, attr_name)\n with rw_lock.write_lock():\n return f(self, *args, **kwargs)\n\n return wrapper\n\n # This is needed to handle when the decorator has args or the decorator\n # doesn't have args, python is rather weird here...\n if kwargs or not args:\n return decorator\n else:\n if len(args) == 1:\n return decorator(args[0])\n else:\n return decorator", "def lock(self, atime=30, ltime=5, identifier=None):\n '''Context manager to acquire the namespace global lock.\n\n This is typically used for multi-step registry operations,\n such as a read-modify-write sequence::\n\n with registry.lock() as session:\n d = session.get('dict', 'key')\n del d['traceback']\n session.set('dict', 'key', d)\n\n Callers may provide their own `identifier`; if they do, they\n must ensure that it is reasonably unique (e.g., a UUID).\n Using a stored worker ID that is traceable back to the lock\n holder is a good practice.\n\n :param int atime: maximum time (in seconds) to acquire lock\n :param int ltime: maximum time (in seconds) to own lock\n :param str identifier: worker-unique identifier for the lock\n\n '''\n if identifier is None:\n identifier = nice_identifier()\n if self._acquire_lock(identifier, atime, ltime) != identifier:\n raise LockError(\"could not acquire lock\")\n try:\n self._session_lock_identifier = identifier\n yield self\n finally:\n self._release_lock(identifier)\n self._session_lock_identifier = None", "def via(self, *args):\n \"\"\"\n Creates an empty error to record in the stack\n trace\n \"\"\"\n error = None\n if len(self.errors) > 0:\n error = self._err(\"via\", *args)\n return error" ]
[ 0.6868792772293091, 0.6690714359283447, 0.668178141117096, 0.6552027463912964, 0.6545938849449158, 0.6495324969291687, 0.6395823955535889, 0.6382086873054504, 0.6369533538818359, 0.635248064994812, 0.6320596933364868, 0.6301057934761047 ]
r""" Mark a class as implementing a Service Each Service class must have a ``run`` method, which does not take any arguments. This method is :py:meth:`~.ServiceRunner.adopt`\ ed after the daemon starts, unless * the Service has been garbage collected, or * the ServiceUnit has been :py:meth:`~.ServiceUnit.cancel`\ ed. For each service instance, its :py:class:`~.ServiceUnit` is available at ``service_instance.__service_unit__``.
def service(flavour): r""" Mark a class as implementing a Service Each Service class must have a ``run`` method, which does not take any arguments. This method is :py:meth:`~.ServiceRunner.adopt`\ ed after the daemon starts, unless * the Service has been garbage collected, or * the ServiceUnit has been :py:meth:`~.ServiceUnit.cancel`\ ed. For each service instance, its :py:class:`~.ServiceUnit` is available at ``service_instance.__service_unit__``. """ def service_unit_decorator(raw_cls): __new__ = raw_cls.__new__ def __new_service__(cls, *args, **kwargs): if __new__ is object.__new__: self = __new__(cls) else: self = __new__(cls, *args, **kwargs) service_unit = ServiceUnit(self, flavour) self.__service_unit__ = service_unit return self raw_cls.__new__ = __new_service__ if raw_cls.run.__doc__ is None: raw_cls.run.__doc__ = "Service entry point" return raw_cls return service_unit_decorator
[ "def run_services(config, *services, **kwargs):\n \"\"\" Serves a number of services for a contextual block.\n The caller can specify a number of service classes then serve them either\n stopping (default) or killing them on exiting the contextual block.\n\n\n Example::\n\n with run_services(config, Foobar, Spam) as runner:\n # interact with services and stop them on exiting the block\n\n # services stopped\n\n\n Additional configuration available to :class:``ServiceRunner`` instances\n can be specified through keyword arguments::\n\n with run_services(config, Foobar, Spam, kill_on_exit=True):\n # interact with services\n\n # services killed\n\n :Parameters:\n config : dict\n Configuration to instantiate the service containers with\n services : service definitions\n Services to be served for the contextual block\n kill_on_exit : bool (default=False)\n If ``True``, run ``kill()`` on the service containers when exiting\n the contextual block. Otherwise ``stop()`` will be called on the\n service containers on exiting the block.\n\n :Returns: The configured :class:`ServiceRunner` instance\n\n \"\"\"\n kill_on_exit = kwargs.pop('kill_on_exit', False)\n\n runner = ServiceRunner(config)\n for service in services:\n runner.add_service(service)\n\n runner.start()\n\n yield runner\n\n if kill_on_exit:\n runner.kill()\n else:\n runner.stop()", "def create_service(cluster=None, serviceName=None, taskDefinition=None, loadBalancers=None, desiredCount=None, clientToken=None, role=None, deploymentConfiguration=None, placementConstraints=None, placementStrategy=None):\n \"\"\"\n Runs and maintains a desired number of tasks from a specified task definition. If the number of tasks running in a service drops below desiredCount , Amazon ECS spawns another copy of the task in the specified cluster. To update an existing service, see UpdateService .\n In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind a load balancer. The load balancer distributes traffic across the tasks that are associated with the service. For more information, see Service Load Balancing in the Amazon EC2 Container Service Developer Guide .\n You can optionally specify a deployment configuration for your service. During a deployment (which is triggered by changing the task definition or the desired count of a service with an UpdateService operation), the service scheduler uses the minimumHealthyPercent and maximumPercent parameters to determine the deployment strategy.\n The minimumHealthyPercent represents a lower limit on the number of your service's tasks that must remain in the RUNNING state during a deployment, as a percentage of the desiredCount (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a desiredCount of four tasks and a minimumHealthyPercent of 50%, the scheduler can stop two existing tasks to free up cluster capacity before starting two new tasks. Tasks for services that do not use a load balancer are considered healthy if they are in the RUNNING state. Tasks for services that do use a load balancer are considered healthy if they are in the RUNNING state and the container instance they are hosted on is reported as healthy by the load balancer. The default value for minimumHealthyPercent is 50% in the console and 100% for the AWS CLI, the AWS SDKs, and the APIs.\n The maximumPercent parameter represents an upper limit on the number of your service's tasks that are allowed in the RUNNING or PENDING state during a deployment, as a percentage of the desiredCount (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service has a desiredCount of four tasks and a maximumPercent value of 200%, the scheduler can start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximumPercent is 200%.\n When the service scheduler launches new tasks, it determines task placement in your cluster using the following logic:\n See also: AWS API Documentation\n \n Examples\n This example creates a service in your default region called ecs-simple-service. The service uses the hello_world task definition and it maintains 10 copies of that task.\n Expected Output:\n This example creates a service in your default region called ecs-simple-service-elb. The service uses the ecs-demo task definition and it maintains 10 copies of that task. You must reference an existing load balancer in the same region by its name.\n Expected Output:\n \n :example: response = client.create_service(\n cluster='string',\n serviceName='string',\n taskDefinition='string',\n loadBalancers=[\n {\n 'targetGroupArn': 'string',\n 'loadBalancerName': 'string',\n 'containerName': 'string',\n 'containerPort': 123\n },\n ],\n desiredCount=123,\n clientToken='string',\n role='string',\n deploymentConfiguration={\n 'maximumPercent': 123,\n 'minimumHealthyPercent': 123\n },\n placementConstraints=[\n {\n 'type': 'distinctInstance'|'memberOf',\n 'expression': 'string'\n },\n ],\n placementStrategy=[\n {\n 'type': 'random'|'spread'|'binpack',\n 'field': 'string'\n },\n ]\n )\n \n \n :type cluster: string\n :param cluster: The short name or full Amazon Resource Name (ARN) of the cluster on which to run your service. If you do not specify a cluster, the default cluster is assumed.\n\n :type serviceName: string\n :param serviceName: [REQUIRED]\n The name of your service. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. Service names must be unique within a cluster, but you can have similarly named services in multiple clusters within a region or across multiple regions.\n \n\n :type taskDefinition: string\n :param taskDefinition: [REQUIRED]\n The family and revision (family:revision ) or full Amazon Resource Name (ARN) of the task definition to run in your service. If a revision is not specified, the latest ACTIVE revision is used.\n \n\n :type loadBalancers: list\n :param loadBalancers: A load balancer object representing the load balancer to use with your service. Currently, you are limited to one load balancer or target group per service. After you create a service, the load balancer name or target group ARN, container name, and container port specified in the service definition are immutable.\n For Elastic Load Balancing Classic load balancers, this object must contain the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance is registered with the load balancer specified here.\n For Elastic Load Balancing Application load balancers, this object must contain the load balancer target group ARN, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance and port combination is registered as a target in the target group specified here.\n (dict) --Details on a load balancer that is used with a service.\n targetGroupArn (string) --The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group associated with a service.\n loadBalancerName (string) --The name of a Classic load balancer.\n containerName (string) --The name of the container (as it appears in a container definition) to associate with the load balancer.\n containerPort (integer) --The port on the container to associate with the load balancer. This port must correspond to a containerPort in the service's task definition. Your container instances must allow ingress traffic on the hostPort of the port mapping.\n \n \n\n :type desiredCount: integer\n :param desiredCount: [REQUIRED]\n The number of instantiations of the specified task definition to place and keep running on your cluster.\n \n\n :type clientToken: string\n :param clientToken: Unique, case-sensitive identifier you provide to ensure the idempotency of the request. Up to 32 ASCII characters are allowed.\n\n :type role: string\n :param role: The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your load balancer on your behalf. This parameter is required if you are using a load balancer with your service. If you specify the role parameter, you must also specify a load balancer object with the loadBalancers parameter.\n If your specified role has a path other than / , then you must either specify the full role ARN (this is recommended) or prefix the role name with the path. For example, if a role with the name bar has a path of /foo/ then you would specify /foo/bar as the role name. For more information, see Friendly Names and Paths in the IAM User Guide .\n \n\n :type deploymentConfiguration: dict\n :param deploymentConfiguration: Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks.\n maximumPercent (integer) --The upper limit (as a percentage of the service's desiredCount ) of the number of tasks that are allowed in the RUNNING or PENDING state in a service during a deployment. The maximum number of tasks during a deployment is the desiredCount multiplied by maximumPercent /100, rounded down to the nearest integer value.\n minimumHealthyPercent (integer) --The lower limit (as a percentage of the service's desiredCount ) of the number of running tasks that must remain in the RUNNING state in a service during a deployment. The minimum healthy tasks during a deployment is the desiredCount multiplied by minimumHealthyPercent /100, rounded up to the nearest integer value.\n \n\n :type placementConstraints: list\n :param placementConstraints: An array of placement constraint objects to use for tasks in your service. You can specify a maximum of 10 constraints per task (this limit includes constraints in the task definition and those specified at run time).\n (dict) --An object representing a constraint on task placement. For more information, see Task Placement Constraints in the Amazon EC2 Container Service Developer Guide .\n type (string) --The type of constraint. Use distinctInstance to ensure that each task in a particular group is running on a different container instance. Use memberOf to restrict selection to a group of valid candidates. Note that distinctInstance is not supported in task definitions.\n expression (string) --A cluster query language expression to apply to the constraint. Note you cannot specify an expression if the constraint type is distinctInstance . For more information, see Cluster Query Language in the Amazon EC2 Container Service Developer Guide .\n \n \n\n :type placementStrategy: list\n :param placementStrategy: The placement strategy objects to use for tasks in your service. You can specify a maximum of 5 strategy rules per service.\n (dict) --The task placement strategy for a task or service. For more information, see Task Placement Strategies in the Amazon EC2 Container Service Developer Guide .\n type (string) --The type of placement strategy. The random placement strategy randomly places tasks on available candidates. The spread placement strategy spreads placement across available candidates evenly based on the field parameter. The binpack strategy places tasks on available candidates that have the least available amount of the resource that is specified with the field parameter. For example, if you binpack on memory, a task is placed on the instance with the least amount of remaining memory (but still enough to run the task).\n field (string) --The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId (or host , which has the same effect), or any platform or custom attribute that is applied to a container instance, such as attribute:ecs.availability-zone . For the binpack placement strategy, valid values are cpu and memory . For the random placement strategy, this field is not used.\n \n \n\n :rtype: dict\n :return: {\n 'service': {\n 'serviceArn': 'string',\n 'serviceName': 'string',\n 'clusterArn': 'string',\n 'loadBalancers': [\n {\n 'targetGroupArn': 'string',\n 'loadBalancerName': 'string',\n 'containerName': 'string',\n 'containerPort': 123\n },\n ],\n 'status': 'string',\n 'desiredCount': 123,\n 'runningCount': 123,\n 'pendingCount': 123,\n 'taskDefinition': 'string',\n 'deploymentConfiguration': {\n 'maximumPercent': 123,\n 'minimumHealthyPercent': 123\n },\n 'deployments': [\n {\n 'id': 'string',\n 'status': 'string',\n 'taskDefinition': 'string',\n 'desiredCount': 123,\n 'pendingCount': 123,\n 'runningCount': 123,\n 'createdAt': datetime(2015, 1, 1),\n 'updatedAt': datetime(2015, 1, 1)\n },\n ],\n 'roleArn': 'string',\n 'events': [\n {\n 'id': 'string',\n 'createdAt': datetime(2015, 1, 1),\n 'message': 'string'\n },\n ],\n 'createdAt': datetime(2015, 1, 1),\n 'placementConstraints': [\n {\n 'type': 'distinctInstance'|'memberOf',\n 'expression': 'string'\n },\n ],\n 'placementStrategy': [\n {\n 'type': 'random'|'spread'|'binpack',\n 'field': 'string'\n },\n ]\n }\n }\n \n \n :returns: \n cluster (string) -- The short name or full Amazon Resource Name (ARN) of the cluster on which to run your service. If you do not specify a cluster, the default cluster is assumed.\n serviceName (string) -- [REQUIRED]\n The name of your service. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. Service names must be unique within a cluster, but you can have similarly named services in multiple clusters within a region or across multiple regions.\n \n taskDefinition (string) -- [REQUIRED]\n The family and revision (family:revision ) or full Amazon Resource Name (ARN) of the task definition to run in your service. If a revision is not specified, the latest ACTIVE revision is used.\n \n loadBalancers (list) -- A load balancer object representing the load balancer to use with your service. Currently, you are limited to one load balancer or target group per service. After you create a service, the load balancer name or target group ARN, container name, and container port specified in the service definition are immutable.\n For Elastic Load Balancing Classic load balancers, this object must contain the load balancer name, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance is registered with the load balancer specified here.\n For Elastic Load Balancing Application load balancers, this object must contain the load balancer target group ARN, the container name (as it appears in a container definition), and the container port to access from the load balancer. When a task from this service is placed on a container instance, the container instance and port combination is registered as a target in the target group specified here.\n \n (dict) --Details on a load balancer that is used with a service.\n \n targetGroupArn (string) --The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group associated with a service.\n \n loadBalancerName (string) --The name of a Classic load balancer.\n \n containerName (string) --The name of the container (as it appears in a container definition) to associate with the load balancer.\n \n containerPort (integer) --The port on the container to associate with the load balancer. This port must correspond to a containerPort in the service's task definition. Your container instances must allow ingress traffic on the hostPort of the port mapping.\n \n \n \n \n \n desiredCount (integer) -- [REQUIRED]\n The number of instantiations of the specified task definition to place and keep running on your cluster.\n \n clientToken (string) -- Unique, case-sensitive identifier you provide to ensure the idempotency of the request. Up to 32 ASCII characters are allowed.\n role (string) -- The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your load balancer on your behalf. This parameter is required if you are using a load balancer with your service. If you specify the role parameter, you must also specify a load balancer object with the loadBalancers parameter.\n If your specified role has a path other than / , then you must either specify the full role ARN (this is recommended) or prefix the role name with the path. For example, if a role with the name bar has a path of /foo/ then you would specify /foo/bar as the role name. For more information, see Friendly Names and Paths in the IAM User Guide .\n \n deploymentConfiguration (dict) -- Optional deployment parameters that control how many tasks run during the deployment and the ordering of stopping and starting tasks.\n \n maximumPercent (integer) --The upper limit (as a percentage of the service's desiredCount ) of the number of tasks that are allowed in the RUNNING or PENDING state in a service during a deployment. The maximum number of tasks during a deployment is the desiredCount multiplied by maximumPercent /100, rounded down to the nearest integer value.\n \n minimumHealthyPercent (integer) --The lower limit (as a percentage of the service's desiredCount ) of the number of running tasks that must remain in the RUNNING state in a service during a deployment. The minimum healthy tasks during a deployment is the desiredCount multiplied by minimumHealthyPercent /100, rounded up to the nearest integer value.\n \n \n \n placementConstraints (list) -- An array of placement constraint objects to use for tasks in your service. You can specify a maximum of 10 constraints per task (this limit includes constraints in the task definition and those specified at run time).\n \n (dict) --An object representing a constraint on task placement. For more information, see Task Placement Constraints in the Amazon EC2 Container Service Developer Guide .\n \n type (string) --The type of constraint. Use distinctInstance to ensure that each task in a particular group is running on a different container instance. Use memberOf to restrict selection to a group of valid candidates. Note that distinctInstance is not supported in task definitions.\n \n expression (string) --A cluster query language expression to apply to the constraint. Note you cannot specify an expression if the constraint type is distinctInstance . For more information, see Cluster Query Language in the Amazon EC2 Container Service Developer Guide .\n \n \n \n \n \n placementStrategy (list) -- The placement strategy objects to use for tasks in your service. You can specify a maximum of 5 strategy rules per service.\n \n (dict) --The task placement strategy for a task or service. For more information, see Task Placement Strategies in the Amazon EC2 Container Service Developer Guide .\n \n type (string) --The type of placement strategy. The random placement strategy randomly places tasks on available candidates. The spread placement strategy spreads placement across available candidates evenly based on the field parameter. The binpack strategy places tasks on available candidates that have the least available amount of the resource that is specified with the field parameter. For example, if you binpack on memory, a task is placed on the instance with the least amount of remaining memory (but still enough to run the task).\n \n field (string) --The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId (or host , which has the same effect), or any platform or custom attribute that is applied to a container instance, such as attribute:ecs.availability-zone . For the binpack placement strategy, valid values are cpu and memory . For the random placement strategy, this field is not used.\n \n \n \n \n \n \n \"\"\"\n pass", "def generic_service_main(cls: Type[WindowsService], name: str) -> None:\n \"\"\"\n Call this from your command-line entry point to manage a service.\n\n - Via inherited functions, enables you to ``install``, ``update``,\n ``remove``, ``start``, ``stop``, and ``restart`` the service.\n - Via our additional code, allows you to run the service function directly\n from the command line in debug mode, using the ``debug`` command.\n - Run with an invalid command like ``help`` to see help (!).\n\n See\n https://mail.python.org/pipermail/python-win32/2008-April/007299.html\n\n Args:\n cls: class deriving from :class:`WindowsService`\n name: name of this service\n \"\"\"\n argc = len(sys.argv)\n if argc == 1:\n try:\n print(\"Trying to start service directly...\")\n evtsrc_dll = os.path.abspath(servicemanager.__file__)\n # noinspection PyUnresolvedReferences\n servicemanager.PrepareToHostSingle(cls) # <-- sets up the service\n # noinspection PyUnresolvedReferences\n servicemanager.Initialize(name, evtsrc_dll)\n # noinspection PyUnresolvedReferences\n servicemanager.StartServiceCtrlDispatcher()\n except win32service.error as details:\n print(\"Failed: {}\".format(details))\n # print(repr(details.__dict__))\n errnum = details.winerror\n if errnum == winerror.ERROR_FAILED_SERVICE_CONTROLLER_CONNECT:\n win32serviceutil.usage()\n elif argc == 2 and sys.argv[1] == 'debug':\n s = cls()\n s.run_debug()\n else:\n win32serviceutil.HandleCommandLine(cls)", "def service_execution(self, name=None, pk=None, scope=None, service=None, **kwargs):\n \"\"\"\n Retrieve single KE-chain ServiceExecution.\n\n Uses the same interface as the :func:`service_executions` method but returns only a single\n pykechain :class:`models.ServiceExecution` instance.\n\n If additional `keyword=value` arguments are provided, these are added to the request parameters. Please\n refer to the documentation of the KE-chain API for additional query parameters.\n\n :param name: (optional) name to limit the search for\n :type name: basestring or None\n :param pk: (optional) primary key or id (UUID) of the service to search for\n :type pk: basestring or None\n :param scope: (optional) id (UUID) of the scope to search in\n :type scope: basestring or None\n :param kwargs: (optional) additional search keyword arguments\n :type kwargs: dict or None\n :return: a single :class:`models.ServiceExecution` object\n :raises NotFoundError: When no `ServiceExecution` object is found\n :raises MultipleFoundError: When more than a single `ServiceExecution` object is found\n \"\"\"\n _service_executions = self.service_executions(name=name, pk=pk, scope=scope, service=service, **kwargs)\n\n if len(_service_executions) == 0:\n raise NotFoundError(\"No service execution fits criteria\")\n if len(_service_executions) != 1:\n raise MultipleFoundError(\"Multiple service executions fit criteria\")\n\n return _service_executions[0]", "def run(classes, args=None, msg_stream=sys.stdout,\n verbose=False, util=None, event_loop=None,\n post_init_callback=None, green_mode=None,\n raises=False):\n \"\"\"\n Provides a simple way to run a tango server. It handles exceptions\n by writting a message to the msg_stream.\n\n The `classes` parameter can be either a sequence of:\n\n * :class:`~tango.server.Device` or\n * a sequence of two elements\n :class:`~tango.DeviceClass`, :class:`~tango.DeviceImpl` or\n * a sequence of three elements\n :class:`~tango.DeviceClass`, :class:`~tango.DeviceImpl`,\n tango class name (str)\n\n or a dictionary where:\n\n * key is the tango class name\n * value is either:\n * a :class:`~tango.server.Device` class or\n * a sequence of two elements\n :class:`~tango.DeviceClass`, :class:`~tango.DeviceImpl`\n or\n * a sequence of three elements\n :class:`~tango.DeviceClass`, :class:`~tango.DeviceImpl`,\n tango class name (str)\n\n The optional `post_init_callback` can be a callable (without\n arguments) or a tuple where the first element is the callable,\n the second is a list of arguments (optional) and the third is a\n dictionary of keyword arguments (also optional).\n\n .. note::\n the order of registration of tango classes defines the order\n tango uses to initialize the corresponding devices.\n if using a dictionary as argument for classes be aware that the\n order of registration becomes arbitrary. If you need a\n predefined order use a sequence or an OrderedDict.\n\n Example 1: registering and running a PowerSupply inheriting from\n :class:`~tango.server.Device`::\n\n from tango.server import Device, DeviceMeta, run\n\n class PowerSupply(Device):\n pass\n\n run((PowerSupply,))\n\n Example 2: registering and running a MyServer defined by tango\n classes `MyServerClass` and `MyServer`::\n\n from tango import Device_4Impl, DeviceClass\n from tango.server import run\n\n class MyServer(Device_4Impl):\n pass\n\n class MyServerClass(DeviceClass):\n pass\n\n run({'MyServer': (MyServerClass, MyServer)})\n\n Example 3: registering and running a MyServer defined by tango\n classes `MyServerClass` and `MyServer`::\n\n from tango import Device_4Impl, DeviceClass\n from tango.server import Device, DeviceMeta, run\n\n class PowerSupply(Device):\n pass\n\n class MyServer(Device_4Impl):\n pass\n\n class MyServerClass(DeviceClass):\n pass\n\n run([PowerSupply, [MyServerClass, MyServer]])\n # or: run({'MyServer': (MyServerClass, MyServer)})\n\n :param classes:\n a sequence of :class:`~tango.server.Device` classes or\n a dictionary where keyword is the tango class name and value\n is a sequence of Tango Device Class python class, and Tango\n Device python class\n :type classes: sequence or dict\n\n :param args:\n list of command line arguments [default: None, meaning use\n sys.argv]\n :type args: list\n\n :param msg_stream:\n stream where to put messages [default: sys.stdout]\n\n :param util:\n PyTango Util object [default: None meaning create a Util\n instance]\n :type util: :class:`~tango.Util`\n\n :param event_loop: event_loop callable\n :type event_loop: callable\n\n :param post_init_callback:\n an optional callback that is executed between the calls\n Util.server_init and Util.server_run\n :type post_init_callback:\n callable or tuple (see description above)\n\n :param raises:\n Disable error handling and propagate exceptions from the server\n :type raises: bool\n\n :return: The Util singleton object\n :rtype: :class:`~tango.Util`\n\n .. versionadded:: 8.1.2\n\n .. versionchanged:: 8.1.4\n when classes argument is a sequence, the items can also be\n a sequence <TangoClass, TangoClassClass>[, tango class name]\n\n .. versionchanged:: 9.2.2\n `raises` argument has been added\n \"\"\"\n server_run = functools.partial(\n __server_run, classes,\n args=args, msg_stream=msg_stream,\n util=util, event_loop=event_loop,\n post_init_callback=post_init_callback,\n green_mode=green_mode)\n # Run the server without error handling\n if raises:\n return server_run()\n # Run the server with error handling\n write = msg_stream.write if msg_stream else lambda msg: None\n try:\n return server_run()\n except KeyboardInterrupt:\n write(\"Exiting: Keyboard interrupt\\n\")\n except DevFailed as df:\n write(\"Exiting: Server exited with tango.DevFailed:\\n\" +\n str(df) + \"\\n\")\n if verbose:\n write(traceback.format_exc())\n except Exception as e:\n write(\"Exiting: Server exited with unforseen exception:\\n\" +\n str(e) + \"\\n\")\n if verbose:\n write(traceback.format_exc())\n write(\"\\nExited\\n\")", "def process_services(self, device_ids=None, removed_devices_info=None):\n \"\"\"Process services managed by this config agent.\n\n This method is invoked by any of three scenarios.\n\n 1. Invoked by a periodic task running every `RPC_LOOP_INTERVAL`\n seconds. This is the most common scenario.\n In this mode, the method is called without any arguments.\n\n 2. Called by the `_process_backlogged_hosting_devices()` as part of\n the backlog processing task. In this mode, a list of device_ids\n are passed as arguments. These are the list of backlogged\n hosting devices that are now reachable and we want to sync services\n on them.\n\n 3. Called by the `hosting_devices_removed()` method. This is when\n the config agent has received a notification from the plugin that\n some hosting devices are going to be removed. The payload contains\n the details of the hosting devices and the associated neutron\n resources on them which should be processed and removed.\n\n To avoid race conditions with these scenarios, this function is\n protected by a lock.\n\n This method goes on to invoke `process_service()` on the\n different service helpers.\n\n :param device_ids: List of devices that are now available and needs to\n be processed\n :param removed_devices_info: Info about the hosting devices which are\n going to be removed and details of the resources hosted on them.\n Expected Format::\n\n {\n 'hosting_data': {'hd_id1': {'routers': [id1, id2, ...]},\n 'hd_id2': {'routers': [id3, id4, ...]}, ...},\n 'deconfigure': True/False\n }\n\n :returns: None\n\n \"\"\"\n LOG.debug(\"Processing services started\")\n # Now we process only routing service, additional services will be\n # added in future\n if self.routing_service_helper:\n self.routing_service_helper.process_service(device_ids,\n removed_devices_info)\n else:\n LOG.warning(\"No routing service helper loaded\")\n LOG.debug(\"Processing services completed\")", "def _load_service_containers(self, service, configs, use_cache):\n \"\"\"\n :param service:\n :return:\n \"\"\"\n if not isinstance(service, Service):\n raise TypeError(\"service must of an instance of Service\")\n\n if not service.containers:\n container_name = self._container_registration(service.alias)\n\n if service.dependencies:\n self._load_dependency_containers(service)\n\n if not service.cargo:\n self._load_service_cargo(service, configs, use_cache)\n\n self._update_container_host_config(service)\n service.containers[container_name] = Container(\n self._client_session,\n container_name,\n service.cargo.id,\n container_config=service.container_config.to_dict(),\n host_config=service.host_config.to_dict()\n )", "def service(self):\n \"\"\" Returns a Splunk service object for this command invocation or None.\n\n The service object is created from the Splunkd URI and authentication token passed to the command invocation in\n the search results info file. This data is not passed to a command invocation by default. You must request it by\n specifying this pair of configuration settings in commands.conf:\n\n .. code-block:: python\n enableheader = true\n requires_srinfo = true\n\n The :code:`enableheader` setting is :code:`true` by default. Hence, you need not set it. The\n :code:`requires_srinfo` setting is false by default. Hence, you must set it.\n\n :return: :class:`splunklib.client.Service`, if :code:`enableheader` and :code:`requires_srinfo` are both\n :code:`true`. Otherwise, if either :code:`enableheader` or :code:`requires_srinfo` are :code:`false`, a value\n of :code:`None` is returned.\n\n \"\"\"\n if self._service is not None:\n return self._service\n\n metadata = self._metadata\n\n if metadata is None:\n return None\n\n try:\n searchinfo = self._metadata.searchinfo\n except AttributeError:\n return None\n\n splunkd_uri = searchinfo.splunkd_uri\n\n if splunkd_uri is None:\n return None\n\n uri = urlsplit(splunkd_uri, allow_fragments=False)\n\n self._service = Service(\n scheme=uri.scheme, host=uri.hostname, port=uri.port, app=searchinfo.app, token=searchinfo.session_key)\n\n return self._service", "def service(self):\n \"\"\" Returns a Splunk service object for this command invocation or None.\n\n The service object is created from the Splunkd URI and authentication\n token passed to the command invocation in the search results info file.\n This data is not passed to a command invocation by default. You must\n request it by specifying this pair of configuration settings in\n commands.conf:\n\n .. code-block:: python\n enableheader=true\n requires_srinfo=true\n\n The :code:`enableheader` setting is :code:`true` by default. Hence, you\n need not set it. The :code:`requires_srinfo` setting is false by\n default. Hence, you must set it.\n\n :return: :class:`splunklib.client.Service`, if :code:`enableheader` and\n :code:`requires_srinfo` are both :code:`true`. Otherwise, if either\n :code:`enableheader` or :code:`requires_srinfo` are :code:`false`,\n a value of :code:`None` is returned.\n\n \"\"\"\n if self._service is not None:\n return self._service\n\n info = self.search_results_info\n\n if info is None:\n return None\n\n splunkd = urlsplit(info.splunkd_uri, info.splunkd_protocol, allow_fragments=False)\n\n self._service = Service(\n scheme=splunkd.scheme, host=splunkd.hostname, port=splunkd.port, token=info.auth_token, app=info.ppc_app)\n\n return self._service", "def of_service(config):\n \"\"\"Connect to an IBM Streams service instance running in IBM Cloud Private for Data.\n\n The instance is specified in `config`. The configuration may be code injected from the list of services\n in a Jupyter notebook running in ICPD or manually created. The code that selects a service instance by name is::\n\n # Two lines are code injected in a Jupyter notebook by selecting the service instance\n from icpd_core import ipcd_util\n cfg = icpd_util.get_service_details(name='instanceName')\n\n instance = Instance.of_service(cfg)\n\n SSL host verification is disabled by setting :py:const:`~streamsx.topology.context.ConfigParams.SSL_VERIFY`\n to ``False`` within `config` before calling this method::\n\n cfg[ConfigParams.SSL_VERIFY] = False\n instance = Instance.of_service(cfg)\n\n Args:\n config(dict): Configuration of IBM Streams service instance.\n\n Returns:\n Instance: Instance representing for IBM Streams service instance.\n\n .. note:: Only supported when running within the ICPD cluster,\n for example in a Jupyter notebook within a ICPD project.\n\n .. versionadded:: 1.12\n \"\"\"\n service = Instance._find_service_def(config)\n if not service:\n raise ValueError()\n endpoint = service['connection_info'].get('serviceRestEndpoint')\n resource_url, name = Instance._root_from_endpoint(endpoint)\n\n sc = streamsx.rest.StreamsConnection(resource_url=resource_url, auth=_ICPDAuthHandler(name, service['service_token']))\n if streamsx.topology.context.ConfigParams.SSL_VERIFY in config:\n sc.session.verify = config[streamsx.topology.context.ConfigParams.SSL_VERIFY]\n return sc.get_instance(name)", "def define_unit(\n symbol, value, tex_repr=None, offset=None, prefixable=False, registry=None\n):\n \"\"\"\n Define a new unit and add it to the specified unit registry.\n\n Parameters\n ----------\n symbol : string\n The symbol for the new unit.\n value : tuple or :class:`unyt.array.unyt_quantity`\n The definition of the new unit in terms of some other units. For\n example, one would define a new \"mph\" unit with ``(1.0, \"mile/hr\")``\n or with ``1.0*unyt.mile/unyt.hr``\n tex_repr : string, optional\n The LaTeX representation of the new unit. If one is not supplied, it\n will be generated automatically based on the symbol string.\n offset : float, optional\n The default offset for the unit. If not set, an offset of 0 is assumed.\n prefixable : boolean, optional\n Whether or not the new unit can use SI prefixes. Default: False\n registry : :class:`unyt.unit_registry.UnitRegistry` or None\n The unit registry to add the unit to. If None, then defaults to the\n global default unit registry. If registry is set to None then the\n unit object will be added as an attribute to the top-level :mod:`unyt`\n namespace to ease working with the newly defined unit. See the example\n below.\n\n Examples\n --------\n >>> from unyt import day\n >>> two_weeks = 14.0*day\n >>> one_day = 1.0*day\n >>> define_unit(\"two_weeks\", two_weeks)\n >>> from unyt import two_weeks\n >>> print((3*two_weeks)/one_day)\n 42.0 dimensionless\n \"\"\"\n from unyt.array import unyt_quantity, _iterable\n import unyt\n\n if registry is None:\n registry = default_unit_registry\n if symbol in registry:\n raise RuntimeError(\n \"Unit symbol '%s' already exists in the provided \" \"registry\" % symbol\n )\n if not isinstance(value, unyt_quantity):\n if _iterable(value) and len(value) == 2:\n value = unyt_quantity(value[0], value[1], registry=registry)\n else:\n raise RuntimeError(\n '\"value\" needs to be a quantity or ' \"(value, unit) tuple!\"\n )\n base_value = float(value.in_base(unit_system=\"mks\"))\n dimensions = value.units.dimensions\n registry.add(\n symbol,\n base_value,\n dimensions,\n prefixable=prefixable,\n tex_repr=tex_repr,\n offset=offset,\n )\n if registry is default_unit_registry:\n u = Unit(symbol, registry=registry)\n setattr(unyt, symbol, u)", "def run_instances(DryRun=None, ImageId=None, MinCount=None, MaxCount=None, KeyName=None, SecurityGroups=None, SecurityGroupIds=None, UserData=None, InstanceType=None, Placement=None, KernelId=None, RamdiskId=None, BlockDeviceMappings=None, Monitoring=None, SubnetId=None, DisableApiTermination=None, InstanceInitiatedShutdownBehavior=None, PrivateIpAddress=None, Ipv6Addresses=None, Ipv6AddressCount=None, ClientToken=None, AdditionalInfo=None, NetworkInterfaces=None, IamInstanceProfile=None, EbsOptimized=None, TagSpecifications=None):\n \"\"\"\n Launches the specified number of instances using an AMI for which you have permissions.\n You can specify a number of options, or leave the default options. The following rules apply:\n To ensure faster instance launches, break up large requests into smaller batches. For example, create 5 separate launch requests for 100 instances each instead of 1 launch request for 500 instances.\n An instance is ready for you to use when it's in the running state. You can check the state of your instance using DescribeInstances . You can tag instances and EBS volumes during launch, after launch, or both. For more information, see CreateTags and Tagging Your Amazon EC2 Resources .\n Linux instances have access to the public key of the key pair at boot. You can use this key to provide secure access to the instance. Amazon EC2 public images use this feature to provide secure access without passwords. For more information, see Key Pairs in the Amazon Elastic Compute Cloud User Guide .\n For troubleshooting, see What To Do If An Instance Immediately Terminates , and Troubleshooting Connecting to Your Instance in the Amazon Elastic Compute Cloud User Guide .\n See also: AWS API Documentation\n \n \n :example: response = client.run_instances(\n DryRun=True|False,\n ImageId='string',\n MinCount=123,\n MaxCount=123,\n KeyName='string',\n SecurityGroups=[\n 'string',\n ],\n SecurityGroupIds=[\n 'string',\n ],\n UserData='string',\n InstanceType='t1.micro'|'t2.nano'|'t2.micro'|'t2.small'|'t2.medium'|'t2.large'|'t2.xlarge'|'t2.2xlarge'|'m1.small'|'m1.medium'|'m1.large'|'m1.xlarge'|'m3.medium'|'m3.large'|'m3.xlarge'|'m3.2xlarge'|'m4.large'|'m4.xlarge'|'m4.2xlarge'|'m4.4xlarge'|'m4.10xlarge'|'m4.16xlarge'|'m2.xlarge'|'m2.2xlarge'|'m2.4xlarge'|'cr1.8xlarge'|'r3.large'|'r3.xlarge'|'r3.2xlarge'|'r3.4xlarge'|'r3.8xlarge'|'r4.large'|'r4.xlarge'|'r4.2xlarge'|'r4.4xlarge'|'r4.8xlarge'|'r4.16xlarge'|'x1.16xlarge'|'x1.32xlarge'|'i2.xlarge'|'i2.2xlarge'|'i2.4xlarge'|'i2.8xlarge'|'i3.large'|'i3.xlarge'|'i3.2xlarge'|'i3.4xlarge'|'i3.8xlarge'|'i3.16xlarge'|'hi1.4xlarge'|'hs1.8xlarge'|'c1.medium'|'c1.xlarge'|'c3.large'|'c3.xlarge'|'c3.2xlarge'|'c3.4xlarge'|'c3.8xlarge'|'c4.large'|'c4.xlarge'|'c4.2xlarge'|'c4.4xlarge'|'c4.8xlarge'|'cc1.4xlarge'|'cc2.8xlarge'|'g2.2xlarge'|'g2.8xlarge'|'cg1.4xlarge'|'p2.xlarge'|'p2.8xlarge'|'p2.16xlarge'|'d2.xlarge'|'d2.2xlarge'|'d2.4xlarge'|'d2.8xlarge'|'f1.2xlarge'|'f1.16xlarge',\n Placement={\n 'AvailabilityZone': 'string',\n 'GroupName': 'string',\n 'Tenancy': 'default'|'dedicated'|'host',\n 'HostId': 'string',\n 'Affinity': 'string'\n },\n KernelId='string',\n RamdiskId='string',\n BlockDeviceMappings=[\n {\n 'VirtualName': 'string',\n 'DeviceName': 'string',\n 'Ebs': {\n 'SnapshotId': 'string',\n 'VolumeSize': 123,\n 'DeleteOnTermination': True|False,\n 'VolumeType': 'standard'|'io1'|'gp2'|'sc1'|'st1',\n 'Iops': 123,\n 'Encrypted': True|False\n },\n 'NoDevice': 'string'\n },\n ],\n Monitoring={\n 'Enabled': True|False\n },\n SubnetId='string',\n DisableApiTermination=True|False,\n InstanceInitiatedShutdownBehavior='stop'|'terminate',\n PrivateIpAddress='string',\n Ipv6Addresses=[\n {\n 'Ipv6Address': 'string'\n },\n ],\n Ipv6AddressCount=123,\n ClientToken='string',\n AdditionalInfo='string',\n NetworkInterfaces=[\n {\n 'NetworkInterfaceId': 'string',\n 'DeviceIndex': 123,\n 'SubnetId': 'string',\n 'Description': 'string',\n 'PrivateIpAddress': 'string',\n 'Groups': [\n 'string',\n ],\n 'DeleteOnTermination': True|False,\n 'PrivateIpAddresses': [\n {\n 'PrivateIpAddress': 'string',\n 'Primary': True|False\n },\n ],\n 'SecondaryPrivateIpAddressCount': 123,\n 'AssociatePublicIpAddress': True|False,\n 'Ipv6Addresses': [\n {\n 'Ipv6Address': 'string'\n },\n ],\n 'Ipv6AddressCount': 123\n },\n ],\n IamInstanceProfile={\n 'Arn': 'string',\n 'Name': 'string'\n },\n EbsOptimized=True|False,\n TagSpecifications=[\n {\n 'ResourceType': 'customer-gateway'|'dhcp-options'|'image'|'instance'|'internet-gateway'|'network-acl'|'network-interface'|'reserved-instances'|'route-table'|'snapshot'|'spot-instances-request'|'subnet'|'security-group'|'volume'|'vpc'|'vpn-connection'|'vpn-gateway',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n },\n ]\n )\n \n \n :type DryRun: boolean\n :param DryRun: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation . Otherwise, it is UnauthorizedOperation .\n\n :type ImageId: string\n :param ImageId: [REQUIRED]\n The ID of the AMI, which you can get by calling DescribeImages .\n \n\n :type MinCount: integer\n :param MinCount: [REQUIRED]\n The minimum number of instances to launch. If you specify a minimum that is more instances than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches no instances.\n Constraints: Between 1 and the maximum number you're allowed for the specified instance type. For more information about the default limits, and how to request an increase, see How many instances can I run in Amazon EC2 in the Amazon EC2 General FAQ.\n \n\n :type MaxCount: integer\n :param MaxCount: [REQUIRED]\n The maximum number of instances to launch. If you specify more instances than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches the largest possible number of instances above MinCount .\n Constraints: Between 1 and the maximum number you're allowed for the specified instance type. For more information about the default limits, and how to request an increase, see How many instances can I run in Amazon EC2 in the Amazon EC2 FAQ.\n \n\n :type KeyName: string\n :param KeyName: The name of the key pair. You can create a key pair using CreateKeyPair or ImportKeyPair .\n Warning\n If you do not specify a key pair, you can't connect to the instance unless you choose an AMI that is configured to allow users another way to log in.\n \n\n :type SecurityGroups: list\n :param SecurityGroups: [EC2-Classic, default VPC] One or more security group names. For a nondefault VPC, you must use security group IDs instead.\n Default: Amazon EC2 uses the default security group.\n (string) --\n \n\n :type SecurityGroupIds: list\n :param SecurityGroupIds: One or more security group IDs. You can create a security group using CreateSecurityGroup .\n Default: Amazon EC2 uses the default security group.\n (string) --\n \n\n :type UserData: string\n :param UserData: The user data to make available to the instance. For more information, see Running Commands on Your Linux Instance at Launch (Linux) and Adding User Data (Windows). If you are using an AWS SDK or command line tool, Base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide Base64-encoded text.\n This value will be base64 encoded automatically. Do not base64 encode this value prior to performing the operation.\n \n\n :type InstanceType: string\n :param InstanceType: The instance type. For more information, see Instance Types in the Amazon Elastic Compute Cloud User Guide .\n Default: m1.small\n \n\n :type Placement: dict\n :param Placement: The placement for the instance.\n AvailabilityZone (string) --The Availability Zone of the instance.\n GroupName (string) --The name of the placement group the instance is in (for cluster compute instances).\n Tenancy (string) --The tenancy of the instance (if the instance is running in a VPC). An instance with a tenancy of dedicated runs on single-tenant hardware. The host tenancy is not supported for the ImportInstance command.\n HostId (string) --The ID of the Dedicated Host on which the instance resides. This parameter is not supported for the ImportInstance command.\n Affinity (string) --The affinity setting for the instance on the Dedicated Host. This parameter is not supported for the ImportInstance command.\n \n\n :type KernelId: string\n :param KernelId: The ID of the kernel.\n Warning\n We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB in the Amazon Elastic Compute Cloud User Guide .\n \n\n :type RamdiskId: string\n :param RamdiskId: The ID of the RAM disk.\n Warning\n We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB in the Amazon Elastic Compute Cloud User Guide .\n \n\n :type BlockDeviceMappings: list\n :param BlockDeviceMappings: The block device mapping.\n Warning\n Supplying both a snapshot ID and an encryption value as arguments for block-device mapping results in an error. This is because only blank volumes can be encrypted on start, and these are not created from a snapshot. If a snapshot is the basis for the volume, it contains data by definition and its encryption status cannot be changed using this action.\n (dict) --Describes a block device mapping.\n VirtualName (string) --The virtual device name (ephemeral N). Instance store volumes are numbered starting from 0. An instance type with 2 available instance store volumes can specify mappings for ephemeral0 and ephemeral1 .The number of available instance store volumes depends on the instance type. After you connect to the instance, you must mount the volume.\n Constraints: For M3 instances, you must specify instance store volumes in the block device mapping for the instance. When you launch an M3 instance, we ignore any instance store volumes specified in the block device mapping for the AMI.\n DeviceName (string) --The device name exposed to the instance (for example, /dev/sdh or xvdh ).\n Ebs (dict) --Parameters used to automatically set up EBS volumes when the instance is launched.\n SnapshotId (string) --The ID of the snapshot.\n VolumeSize (integer) --The size of the volume, in GiB.\n Constraints: 1-16384 for General Purpose SSD (gp2 ), 4-16384 for Provisioned IOPS SSD (io1 ), 500-16384 for Throughput Optimized HDD (st1 ), 500-16384 for Cold HDD (sc1 ), and 1-1024 for Magnetic (standard ) volumes. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.\n Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.\n DeleteOnTermination (boolean) --Indicates whether the EBS volume is deleted on instance termination.\n VolumeType (string) --The volume type: gp2 , io1 , st1 , sc1 , or standard .\n Default: standard\n Iops (integer) --The number of I/O operations per second (IOPS) that the volume supports. For io1 , this represents the number of IOPS that are provisioned for the volume. For gp2 , this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information about General Purpose SSD baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide .\n Constraint: Range is 100-20000 IOPS for io1 volumes and 100-10000 IOPS for gp2 volumes.\n Condition: This parameter is required for requests to create io1 volumes; it is not used in requests to create gp2 , st1 , sc1 , or standard volumes.\n Encrypted (boolean) --Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes may only be attached to instances that support Amazon EBS encryption.\n NoDevice (string) --Suppresses the specified device included in the block device mapping of the AMI.\n \n \n\n :type Monitoring: dict\n :param Monitoring: The monitoring for the instance.\n Enabled (boolean) -- [REQUIRED]Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled.\n \n\n :type SubnetId: string\n :param SubnetId: [EC2-VPC] The ID of the subnet to launch the instance into.\n\n :type DisableApiTermination: boolean\n :param DisableApiTermination: If you set this parameter to true , you can't terminate the instance using the Amazon EC2 console, CLI, or API; otherwise, you can. To change this attribute to false after launch, use ModifyInstanceAttribute . Alternatively, if you set InstanceInitiatedShutdownBehavior to terminate , you can terminate the instance by running the shutdown command from the instance.\n Default: false\n \n\n :type InstanceInitiatedShutdownBehavior: string\n :param InstanceInitiatedShutdownBehavior: Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).\n Default: stop\n \n\n :type PrivateIpAddress: string\n :param PrivateIpAddress: [EC2-VPC] The primary IPv4 address. You must specify a value from the IPv4 address range of the subnet.\n Only one private IP address can be designated as primary. You can't specify this option if you've specified the option to designate a private IP address as the primary IP address in a network interface specification. You cannot specify this option if you're launching more than one instance in the request.\n \n\n :type Ipv6Addresses: list\n :param Ipv6Addresses: [EC2-VPC] Specify one or more IPv6 addresses from the range of the subnet to associate with the primary network interface. You cannot specify this option and the option to assign a number of IPv6 addresses in the same request. You cannot specify this option if you've specified a minimum number of instances to launch.\n (dict) --Describes an IPv6 address.\n Ipv6Address (string) --The IPv6 address.\n \n \n\n :type Ipv6AddressCount: integer\n :param Ipv6AddressCount: [EC2-VPC] A number of IPv6 addresses to associate with the primary network interface. Amazon EC2 chooses the IPv6 addresses from the range of your subnet. You cannot specify this option and the option to assign specific IPv6 addresses in the same request. You can specify this option if you've specified a minimum number of instances to launch.\n\n :type ClientToken: string\n :param ClientToken: Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency .\n Constraints: Maximum 64 ASCII characters\n \n\n :type AdditionalInfo: string\n :param AdditionalInfo: Reserved.\n\n :type NetworkInterfaces: list\n :param NetworkInterfaces: One or more network interfaces.\n (dict) --Describes a network interface.\n NetworkInterfaceId (string) --The ID of the network interface.\n DeviceIndex (integer) --The index of the device on the instance for the network interface attachment. If you are specifying a network interface in a RunInstances request, you must provide the device index.\n SubnetId (string) --The ID of the subnet associated with the network string. Applies only if creating a network interface when launching an instance.\n Description (string) --The description of the network interface. Applies only if creating a network interface when launching an instance.\n PrivateIpAddress (string) --The private IPv4 address of the network interface. Applies only if creating a network interface when launching an instance. You cannot specify this option if you're launching more than one instance in a RunInstances request.\n Groups (list) --The IDs of the security groups for the network interface. Applies only if creating a network interface when launching an instance.\n (string) --\n DeleteOnTermination (boolean) --If set to true , the interface is deleted when the instance is terminated. You can specify true only if creating a new network interface when launching an instance.\n PrivateIpAddresses (list) --One or more private IPv4 addresses to assign to the network interface. Only one private IPv4 address can be designated as primary. You cannot specify this option if you're launching more than one instance in a RunInstances request.\n (dict) --Describes a secondary private IPv4 address for a network interface.\n PrivateIpAddress (string) -- [REQUIRED]The private IPv4 addresses.\n Primary (boolean) --Indicates whether the private IPv4 address is the primary private IPv4 address. Only one IPv4 address can be designated as primary.\n \n SecondaryPrivateIpAddressCount (integer) --The number of secondary private IPv4 addresses. You can't specify this option and specify more than one private IP address using the private IP addresses option. You cannot specify this option if you're launching more than one instance in a RunInstances request.\n AssociatePublicIpAddress (boolean) --Indicates whether to assign a public IPv4 address to an instance you launch in a VPC. The public IP address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one. You cannot specify more than one network interface in the request. If launching into a default subnet, the default value is true .\n Ipv6Addresses (list) --One or more IPv6 addresses to assign to the network interface. You cannot specify this option and the option to assign a number of IPv6 addresses in the same request. You cannot specify this option if you've specified a minimum number of instances to launch.\n (dict) --Describes an IPv6 address.\n Ipv6Address (string) --The IPv6 address.\n \n Ipv6AddressCount (integer) --A number of IPv6 addresses to assign to the network interface. Amazon EC2 chooses the IPv6 addresses from the range of the subnet. You cannot specify this option and the option to assign specific IPv6 addresses in the same request. You can specify this option if you've specified a minimum number of instances to launch.\n \n \n\n :type IamInstanceProfile: dict\n :param IamInstanceProfile: The IAM instance profile.\n Arn (string) --The Amazon Resource Name (ARN) of the instance profile.\n Name (string) --The name of the instance profile.\n \n\n :type EbsOptimized: boolean\n :param EbsOptimized: Indicates whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS-optimized instance.\n Default: false\n \n\n :type TagSpecifications: list\n :param TagSpecifications: The tags to apply to the resources during launch. You can tag instances and volumes. The specified tags are applied to all instances or volumes that are created during launch.\n (dict) --The tags to apply to a resource when the resource is being created.\n ResourceType (string) --The type of resource to tag. Currently, the resource types that support tagging on creation are instance and volume .\n Tags (list) --The tags to apply to the resource.\n (dict) --Describes a tag.\n Key (string) --The key of the tag.\n Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with aws:\n Value (string) --The value of the tag.\n Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.\n \n \n \n\n :rtype: dict\n :return: {\n 'ReservationId': 'string',\n 'OwnerId': 'string',\n 'RequesterId': 'string',\n 'Groups': [\n {\n 'GroupName': 'string',\n 'GroupId': 'string'\n },\n ],\n 'Instances': [\n {\n 'InstanceId': 'string',\n 'ImageId': 'string',\n 'State': {\n 'Code': 123,\n 'Name': 'pending'|'running'|'shutting-down'|'terminated'|'stopping'|'stopped'\n },\n 'PrivateDnsName': 'string',\n 'PublicDnsName': 'string',\n 'StateTransitionReason': 'string',\n 'KeyName': 'string',\n 'AmiLaunchIndex': 123,\n 'ProductCodes': [\n {\n 'ProductCodeId': 'string',\n 'ProductCodeType': 'devpay'|'marketplace'\n },\n ],\n 'InstanceType': 't1.micro'|'t2.nano'|'t2.micro'|'t2.small'|'t2.medium'|'t2.large'|'t2.xlarge'|'t2.2xlarge'|'m1.small'|'m1.medium'|'m1.large'|'m1.xlarge'|'m3.medium'|'m3.large'|'m3.xlarge'|'m3.2xlarge'|'m4.large'|'m4.xlarge'|'m4.2xlarge'|'m4.4xlarge'|'m4.10xlarge'|'m4.16xlarge'|'m2.xlarge'|'m2.2xlarge'|'m2.4xlarge'|'cr1.8xlarge'|'r3.large'|'r3.xlarge'|'r3.2xlarge'|'r3.4xlarge'|'r3.8xlarge'|'r4.large'|'r4.xlarge'|'r4.2xlarge'|'r4.4xlarge'|'r4.8xlarge'|'r4.16xlarge'|'x1.16xlarge'|'x1.32xlarge'|'i2.xlarge'|'i2.2xlarge'|'i2.4xlarge'|'i2.8xlarge'|'i3.large'|'i3.xlarge'|'i3.2xlarge'|'i3.4xlarge'|'i3.8xlarge'|'i3.16xlarge'|'hi1.4xlarge'|'hs1.8xlarge'|'c1.medium'|'c1.xlarge'|'c3.large'|'c3.xlarge'|'c3.2xlarge'|'c3.4xlarge'|'c3.8xlarge'|'c4.large'|'c4.xlarge'|'c4.2xlarge'|'c4.4xlarge'|'c4.8xlarge'|'cc1.4xlarge'|'cc2.8xlarge'|'g2.2xlarge'|'g2.8xlarge'|'cg1.4xlarge'|'p2.xlarge'|'p2.8xlarge'|'p2.16xlarge'|'d2.xlarge'|'d2.2xlarge'|'d2.4xlarge'|'d2.8xlarge'|'f1.2xlarge'|'f1.16xlarge',\n 'LaunchTime': datetime(2015, 1, 1),\n 'Placement': {\n 'AvailabilityZone': 'string',\n 'GroupName': 'string',\n 'Tenancy': 'default'|'dedicated'|'host',\n 'HostId': 'string',\n 'Affinity': 'string'\n },\n 'KernelId': 'string',\n 'RamdiskId': 'string',\n 'Platform': 'Windows',\n 'Monitoring': {\n 'State': 'disabled'|'disabling'|'enabled'|'pending'\n },\n 'SubnetId': 'string',\n 'VpcId': 'string',\n 'PrivateIpAddress': 'string',\n 'PublicIpAddress': 'string',\n 'StateReason': {\n 'Code': 'string',\n 'Message': 'string'\n },\n 'Architecture': 'i386'|'x86_64',\n 'RootDeviceType': 'ebs'|'instance-store',\n 'RootDeviceName': 'string',\n 'BlockDeviceMappings': [\n {\n 'DeviceName': 'string',\n 'Ebs': {\n 'VolumeId': 'string',\n 'Status': 'attaching'|'attached'|'detaching'|'detached',\n 'AttachTime': datetime(2015, 1, 1),\n 'DeleteOnTermination': True|False\n }\n },\n ],\n 'VirtualizationType': 'hvm'|'paravirtual',\n 'InstanceLifecycle': 'spot'|'scheduled',\n 'SpotInstanceRequestId': 'string',\n 'ClientToken': 'string',\n 'Tags': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ],\n 'SecurityGroups': [\n {\n 'GroupName': 'string',\n 'GroupId': 'string'\n },\n ],\n 'SourceDestCheck': True|False,\n 'Hypervisor': 'ovm'|'xen',\n 'NetworkInterfaces': [\n {\n 'NetworkInterfaceId': 'string',\n 'SubnetId': 'string',\n 'VpcId': 'string',\n 'Description': 'string',\n 'OwnerId': 'string',\n 'Status': 'available'|'attaching'|'in-use'|'detaching',\n 'MacAddress': 'string',\n 'PrivateIpAddress': 'string',\n 'PrivateDnsName': 'string',\n 'SourceDestCheck': True|False,\n 'Groups': [\n {\n 'GroupName': 'string',\n 'GroupId': 'string'\n },\n ],\n 'Attachment': {\n 'AttachmentId': 'string',\n 'DeviceIndex': 123,\n 'Status': 'attaching'|'attached'|'detaching'|'detached',\n 'AttachTime': datetime(2015, 1, 1),\n 'DeleteOnTermination': True|False\n },\n 'Association': {\n 'PublicIp': 'string',\n 'PublicDnsName': 'string',\n 'IpOwnerId': 'string'\n },\n 'PrivateIpAddresses': [\n {\n 'PrivateIpAddress': 'string',\n 'PrivateDnsName': 'string',\n 'Primary': True|False,\n 'Association': {\n 'PublicIp': 'string',\n 'PublicDnsName': 'string',\n 'IpOwnerId': 'string'\n }\n },\n ],\n 'Ipv6Addresses': [\n {\n 'Ipv6Address': 'string'\n },\n ]\n },\n ],\n 'IamInstanceProfile': {\n 'Arn': 'string',\n 'Id': 'string'\n },\n 'EbsOptimized': True|False,\n 'SriovNetSupport': 'string',\n 'EnaSupport': True|False\n },\n ]\n }\n \n \n :returns: \n DryRun (boolean) -- Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation . Otherwise, it is UnauthorizedOperation .\n ImageId (string) -- [REQUIRED]\n The ID of the AMI, which you can get by calling DescribeImages .\n \n MinCount (integer) -- [REQUIRED]\n The minimum number of instances to launch. If you specify a minimum that is more instances than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches no instances.\n Constraints: Between 1 and the maximum number you're allowed for the specified instance type. For more information about the default limits, and how to request an increase, see How many instances can I run in Amazon EC2 in the Amazon EC2 General FAQ.\n \n MaxCount (integer) -- [REQUIRED]\n The maximum number of instances to launch. If you specify more instances than Amazon EC2 can launch in the target Availability Zone, Amazon EC2 launches the largest possible number of instances above MinCount .\n Constraints: Between 1 and the maximum number you're allowed for the specified instance type. For more information about the default limits, and how to request an increase, see How many instances can I run in Amazon EC2 in the Amazon EC2 FAQ.\n \n KeyName (string) -- The name of the key pair. You can create a key pair using CreateKeyPair or ImportKeyPair .\n \n Warning\n If you do not specify a key pair, you can't connect to the instance unless you choose an AMI that is configured to allow users another way to log in.\n \n \n SecurityGroups (list) -- [EC2-Classic, default VPC] One or more security group names. For a nondefault VPC, you must use security group IDs instead.\n Default: Amazon EC2 uses the default security group.\n \n (string) --\n \n \n SecurityGroupIds (list) -- One or more security group IDs. You can create a security group using CreateSecurityGroup .\n Default: Amazon EC2 uses the default security group.\n \n (string) --\n \n \n UserData (string) -- The user data to make available to the instance. For more information, see Running Commands on Your Linux Instance at Launch (Linux) and Adding User Data (Windows). If you are using an AWS SDK or command line tool, Base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide Base64-encoded text.\n \n This value will be base64 encoded automatically. Do not base64 encode this value prior to performing the operation.\n \n InstanceType (string) -- The instance type. For more information, see Instance Types in the Amazon Elastic Compute Cloud User Guide .\n Default: m1.small\n \n Placement (dict) -- The placement for the instance.\n \n AvailabilityZone (string) --The Availability Zone of the instance.\n \n GroupName (string) --The name of the placement group the instance is in (for cluster compute instances).\n \n Tenancy (string) --The tenancy of the instance (if the instance is running in a VPC). An instance with a tenancy of dedicated runs on single-tenant hardware. The host tenancy is not supported for the ImportInstance command.\n \n HostId (string) --The ID of the Dedicated Host on which the instance resides. This parameter is not supported for the ImportInstance command.\n \n Affinity (string) --The affinity setting for the instance on the Dedicated Host. This parameter is not supported for the ImportInstance command.\n \n \n \n KernelId (string) -- The ID of the kernel.\n \n Warning\n We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB in the Amazon Elastic Compute Cloud User Guide .\n \n \n RamdiskId (string) -- The ID of the RAM disk.\n \n Warning\n We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see PV-GRUB in the Amazon Elastic Compute Cloud User Guide .\n \n \n BlockDeviceMappings (list) -- The block device mapping.\n \n Warning\n Supplying both a snapshot ID and an encryption value as arguments for block-device mapping results in an error. This is because only blank volumes can be encrypted on start, and these are not created from a snapshot. If a snapshot is the basis for the volume, it contains data by definition and its encryption status cannot be changed using this action.\n \n \n (dict) --Describes a block device mapping.\n \n VirtualName (string) --The virtual device name (ephemeral N). Instance store volumes are numbered starting from 0. An instance type with 2 available instance store volumes can specify mappings for ephemeral0 and ephemeral1 .The number of available instance store volumes depends on the instance type. After you connect to the instance, you must mount the volume.\n Constraints: For M3 instances, you must specify instance store volumes in the block device mapping for the instance. When you launch an M3 instance, we ignore any instance store volumes specified in the block device mapping for the AMI.\n \n DeviceName (string) --The device name exposed to the instance (for example, /dev/sdh or xvdh ).\n \n Ebs (dict) --Parameters used to automatically set up EBS volumes when the instance is launched.\n \n SnapshotId (string) --The ID of the snapshot.\n \n VolumeSize (integer) --The size of the volume, in GiB.\n Constraints: 1-16384 for General Purpose SSD (gp2 ), 4-16384 for Provisioned IOPS SSD (io1 ), 500-16384 for Throughput Optimized HDD (st1 ), 500-16384 for Cold HDD (sc1 ), and 1-1024 for Magnetic (standard ) volumes. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.\n Default: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.\n \n DeleteOnTermination (boolean) --Indicates whether the EBS volume is deleted on instance termination.\n \n VolumeType (string) --The volume type: gp2 , io1 , st1 , sc1 , or standard .\n Default: standard\n \n Iops (integer) --The number of I/O operations per second (IOPS) that the volume supports. For io1 , this represents the number of IOPS that are provisioned for the volume. For gp2 , this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information about General Purpose SSD baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types in the Amazon Elastic Compute Cloud User Guide .\n Constraint: Range is 100-20000 IOPS for io1 volumes and 100-10000 IOPS for gp2 volumes.\n Condition: This parameter is required for requests to create io1 volumes; it is not used in requests to create gp2 , st1 , sc1 , or standard volumes.\n \n Encrypted (boolean) --Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes may only be attached to instances that support Amazon EBS encryption.\n \n \n \n NoDevice (string) --Suppresses the specified device included in the block device mapping of the AMI.\n \n \n \n \n \n Monitoring (dict) -- The monitoring for the instance.\n \n Enabled (boolean) -- [REQUIRED]Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled.\n \n \n \n SubnetId (string) -- [EC2-VPC] The ID of the subnet to launch the instance into.\n DisableApiTermination (boolean) -- If you set this parameter to true , you can't terminate the instance using the Amazon EC2 console, CLI, or API; otherwise, you can. To change this attribute to false after launch, use ModifyInstanceAttribute . Alternatively, if you set InstanceInitiatedShutdownBehavior to terminate , you can terminate the instance by running the shutdown command from the instance.\n Default: false\n \n InstanceInitiatedShutdownBehavior (string) -- Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).\n Default: stop\n \n PrivateIpAddress (string) -- [EC2-VPC] The primary IPv4 address. You must specify a value from the IPv4 address range of the subnet.\n Only one private IP address can be designated as primary. You can't specify this option if you've specified the option to designate a private IP address as the primary IP address in a network interface specification. You cannot specify this option if you're launching more than one instance in the request.\n \n Ipv6Addresses (list) -- [EC2-VPC] Specify one or more IPv6 addresses from the range of the subnet to associate with the primary network interface. You cannot specify this option and the option to assign a number of IPv6 addresses in the same request. You cannot specify this option if you've specified a minimum number of instances to launch.\n \n (dict) --Describes an IPv6 address.\n \n Ipv6Address (string) --The IPv6 address.\n \n \n \n \n \n Ipv6AddressCount (integer) -- [EC2-VPC] A number of IPv6 addresses to associate with the primary network interface. Amazon EC2 chooses the IPv6 addresses from the range of your subnet. You cannot specify this option and the option to assign specific IPv6 addresses in the same request. You can specify this option if you've specified a minimum number of instances to launch.\n ClientToken (string) -- Unique, case-sensitive identifier you provide to ensure the idempotency of the request. For more information, see Ensuring Idempotency .\n Constraints: Maximum 64 ASCII characters\n \n AdditionalInfo (string) -- Reserved.\n NetworkInterfaces (list) -- One or more network interfaces.\n \n (dict) --Describes a network interface.\n \n NetworkInterfaceId (string) --The ID of the network interface.\n \n DeviceIndex (integer) --The index of the device on the instance for the network interface attachment. If you are specifying a network interface in a RunInstances request, you must provide the device index.\n \n SubnetId (string) --The ID of the subnet associated with the network string. Applies only if creating a network interface when launching an instance.\n \n Description (string) --The description of the network interface. Applies only if creating a network interface when launching an instance.\n \n PrivateIpAddress (string) --The private IPv4 address of the network interface. Applies only if creating a network interface when launching an instance. You cannot specify this option if you're launching more than one instance in a RunInstances request.\n \n Groups (list) --The IDs of the security groups for the network interface. Applies only if creating a network interface when launching an instance.\n \n (string) --\n \n \n DeleteOnTermination (boolean) --If set to true , the interface is deleted when the instance is terminated. You can specify true only if creating a new network interface when launching an instance.\n \n PrivateIpAddresses (list) --One or more private IPv4 addresses to assign to the network interface. Only one private IPv4 address can be designated as primary. You cannot specify this option if you're launching more than one instance in a RunInstances request.\n \n (dict) --Describes a secondary private IPv4 address for a network interface.\n \n PrivateIpAddress (string) -- [REQUIRED]The private IPv4 addresses.\n \n Primary (boolean) --Indicates whether the private IPv4 address is the primary private IPv4 address. Only one IPv4 address can be designated as primary.\n \n \n \n \n \n SecondaryPrivateIpAddressCount (integer) --The number of secondary private IPv4 addresses. You can't specify this option and specify more than one private IP address using the private IP addresses option. You cannot specify this option if you're launching more than one instance in a RunInstances request.\n \n AssociatePublicIpAddress (boolean) --Indicates whether to assign a public IPv4 address to an instance you launch in a VPC. The public IP address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one. You cannot specify more than one network interface in the request. If launching into a default subnet, the default value is true .\n \n Ipv6Addresses (list) --One or more IPv6 addresses to assign to the network interface. You cannot specify this option and the option to assign a number of IPv6 addresses in the same request. You cannot specify this option if you've specified a minimum number of instances to launch.\n \n (dict) --Describes an IPv6 address.\n \n Ipv6Address (string) --The IPv6 address.\n \n \n \n \n \n Ipv6AddressCount (integer) --A number of IPv6 addresses to assign to the network interface. Amazon EC2 chooses the IPv6 addresses from the range of the subnet. You cannot specify this option and the option to assign specific IPv6 addresses in the same request. You can specify this option if you've specified a minimum number of instances to launch.\n \n \n \n \n \n IamInstanceProfile (dict) -- The IAM instance profile.\n \n Arn (string) --The Amazon Resource Name (ARN) of the instance profile.\n \n Name (string) --The name of the instance profile.\n \n \n \n EbsOptimized (boolean) -- Indicates whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS-optimized instance.\n Default: false\n \n TagSpecifications (list) -- The tags to apply to the resources during launch. You can tag instances and volumes. The specified tags are applied to all instances or volumes that are created during launch.\n \n (dict) --The tags to apply to a resource when the resource is being created.\n \n ResourceType (string) --The type of resource to tag. Currently, the resource types that support tagging on creation are instance and volume .\n \n Tags (list) --The tags to apply to the resource.\n \n (dict) --Describes a tag.\n \n Key (string) --The key of the tag.\n Constraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with aws:\n \n Value (string) --The value of the tag.\n Constraints: Tag values are case-sensitive and accept a maximum of 255 Unicode characters.\n \n \n \n \n \n \n \n \n \n \n \"\"\"\n pass" ]
[ 0.8049406409263611, 0.7954756617546082, 0.7793565392494202, 0.7734854221343994, 0.7600001692771912, 0.7543185949325562, 0.7501642107963562, 0.7460813522338867, 0.7432956695556641, 0.7425548434257507, 0.7424837350845337, 0.7364528775215149 ]
Synchronously run ``payload`` and provide its output If ``*args*`` and/or ``**kwargs`` are provided, pass them to ``payload`` upon execution.
def execute(self, payload, *args, flavour: ModuleType, **kwargs): """ Synchronously run ``payload`` and provide its output If ``*args*`` and/or ``**kwargs`` are provided, pass them to ``payload`` upon execution. """ if args or kwargs: payload = functools.partial(payload, *args, **kwargs) return self._meta_runner.run_payload(payload, flavour=flavour)
[ "def run_payload(self, payload, *, flavour: ModuleType):\n \"\"\"Execute one payload after its runner is started and return its output\"\"\"\n return self.runners[flavour].run_payload(payload)", "def adopt(self, payload, *args, flavour: ModuleType, **kwargs):\n \"\"\"\n Concurrently run ``payload`` in the background\n\n If ``*args*`` and/or ``**kwargs`` are provided, pass them to ``payload`` upon execution.\n \"\"\"\n if args or kwargs:\n payload = functools.partial(payload, *args, **kwargs)\n self._meta_runner.register_payload(payload, flavour=flavour)", "def register_payload(self, *payloads, flavour: ModuleType):\n \"\"\"Queue one or more payload for execution after its runner is started\"\"\"\n for payload in payloads:\n self._logger.debug('registering payload %s (%s)', NameRepr(payload), NameRepr(flavour))\n self.runners[flavour].register_payload(payload)", "async def _run_payloads(self):\n \"\"\"Async component of _run\"\"\"\n delay = 0.0\n try:\n while self.running.is_set():\n await self._start_payloads()\n await self._reap_payloads()\n await asyncio.sleep(delay)\n delay = min(delay + 0.1, 1.0)\n except Exception:\n await self._cancel_payloads()\n raise", "def perform(self):\n \"\"\"This method converts payload into args and calls the ``perform``\n method on the payload class.\n\n Before calling ``perform``, a ``before_perform`` class method\n is called, if it exists. It takes a dictionary as an argument;\n currently the only things stored on the dictionary are the\n args passed into ``perform`` and a timestamp of when the job\n was enqueued.\n\n Similarly, an ``after_perform`` class method is called after\n ``perform`` is finished. The metadata dictionary contains the\n same data, plus a timestamp of when the job was performed, a\n ``failed`` boolean value, and if it did fail, a ``retried``\n boolean value. This method is called after retry, and is\n called regardless of whether an exception is ultimately thrown\n by the perform method.\n\n\n \"\"\"\n payload_class_str = self._payload[\"class\"]\n payload_class = self.safe_str_to_class(payload_class_str)\n payload_class.resq = self.resq\n args = self._payload.get(\"args\")\n\n metadata = dict(args=args)\n if self.enqueue_timestamp:\n metadata[\"enqueue_timestamp\"] = self.enqueue_timestamp\n\n before_perform = getattr(payload_class, \"before_perform\", None)\n\n metadata[\"failed\"] = False\n metadata[\"perform_timestamp\"] = time.time()\n check_after = True\n try:\n if before_perform:\n payload_class.before_perform(metadata)\n return payload_class.perform(*args)\n except Exception as e:\n metadata[\"failed\"] = True\n metadata[\"exception\"] = e\n if not self.retry(payload_class, args):\n metadata[\"retried\"] = False\n raise\n else:\n metadata[\"retried\"] = True\n logging.exception(\"Retry scheduled after error in %s\", self._payload)\n finally:\n after_perform = getattr(payload_class, \"after_perform\", None)\n\n if after_perform:\n payload_class.after_perform(metadata)\n\n delattr(payload_class,'resq')", "def run(self):\n \"\"\"\n Execute all current and future payloads\n\n Blocks and executes payloads until :py:meth:`stop` is called.\n It is an error for any orphaned payload to return or raise.\n \"\"\"\n self._logger.info('runner started: %s', self)\n try:\n with self._lock:\n assert not self.running.is_set() and self._stopped.is_set(), 'cannot re-run: %s' % self\n self.running.set()\n self._stopped.clear()\n self._run()\n except Exception:\n self._logger.exception('runner aborted: %s', self)\n raise\n else:\n self._logger.info('runner stopped: %s', self)\n finally:\n with self._lock:\n self.running.clear()\n self._stopped.set()", "def publish(self, payload, **kwargs):\n \"\"\" Publish a message.\n \"\"\"\n publish_kwargs = self.publish_kwargs.copy()\n\n # merge headers from when the publisher was instantiated\n # with any provided now; \"extra\" headers always win\n headers = publish_kwargs.pop('headers', {}).copy()\n headers.update(kwargs.pop('headers', {}))\n headers.update(kwargs.pop('extra_headers', {}))\n\n use_confirms = kwargs.pop('use_confirms', self.use_confirms)\n transport_options = kwargs.pop('transport_options',\n self.transport_options\n )\n transport_options['confirm_publish'] = use_confirms\n\n delivery_mode = kwargs.pop('delivery_mode', self.delivery_mode)\n mandatory = kwargs.pop('mandatory', self.mandatory)\n priority = kwargs.pop('priority', self.priority)\n expiration = kwargs.pop('expiration', self.expiration)\n serializer = kwargs.pop('serializer', self.serializer)\n compression = kwargs.pop('compression', self.compression)\n retry = kwargs.pop('retry', self.retry)\n retry_policy = kwargs.pop('retry_policy', self.retry_policy)\n\n declare = self.declare[:]\n declare.extend(kwargs.pop('declare', ()))\n\n publish_kwargs.update(kwargs) # remaining publish-time kwargs win\n\n with get_producer(self.amqp_uri,\n use_confirms,\n self.ssl,\n transport_options,\n ) as producer:\n try:\n producer.publish(\n payload,\n headers=headers,\n delivery_mode=delivery_mode,\n mandatory=mandatory,\n priority=priority,\n expiration=expiration,\n compression=compression,\n declare=declare,\n retry=retry,\n retry_policy=retry_policy,\n serializer=serializer,\n **publish_kwargs\n )\n except ChannelError as exc:\n if \"NO_ROUTE\" in str(exc):\n raise UndeliverableMessage()\n raise\n\n if mandatory:\n if not use_confirms:\n warnings.warn(\n \"Mandatory delivery was requested, but \"\n \"unroutable messages cannot be detected without \"\n \"publish confirms enabled.\"\n )", "def _handle_payload(self, payload):\n '''\n The _handle_payload method is the key method used to figure out what\n needs to be done with communication to the server\n\n Example cleartext payload generated for 'salt myminion test.ping':\n\n {'enc': 'clear',\n 'load': {'arg': [],\n 'cmd': 'publish',\n 'fun': 'test.ping',\n 'jid': '',\n 'key': 'alsdkjfa.,maljf-==adflkjadflkjalkjadfadflkajdflkj',\n 'kwargs': {'show_jid': False, 'show_timeout': False},\n 'ret': '',\n 'tgt': 'myminion',\n 'tgt_type': 'glob',\n 'user': 'root'}}\n\n :param dict payload: The payload route to the appropriate handler\n '''\n key = payload['enc']\n load = payload['load']\n ret = {'aes': self._handle_aes,\n 'clear': self._handle_clear}[key](load)\n raise tornado.gen.Return(ret)", "async def _await_all(self):\n \"\"\"Async component of _run\"\"\"\n delay = 0.0\n # we run a top-level nursery that automatically reaps/cancels for us\n async with trio.open_nursery() as nursery:\n while self.running.is_set():\n await self._start_payloads(nursery=nursery)\n await trio.sleep(delay)\n delay = min(delay + 0.1, 1.0)\n # cancel the scope to cancel all payloads\n nursery.cancel_scope.cancel()", "def handle_decoded_payload(self, data):\n '''\n Override this method if you wish to handle the decoded data\n differently.\n '''\n # Ensure payload is unicode. Disregard failure to decode binary blobs.\n if six.PY2:\n data = salt.utils.data.decode(data, keep=True)\n if 'user' in data:\n log.info(\n 'User %s Executing command %s with jid %s',\n data['user'], data['fun'], data['jid']\n )\n else:\n log.info(\n 'Executing command %s with jid %s',\n data['fun'], data['jid']\n )\n log.debug('Command details %s', data)\n\n # Don't duplicate jobs\n log.trace('Started JIDs: %s', self.jid_queue)\n if self.jid_queue is not None:\n if data['jid'] in self.jid_queue:\n return\n else:\n self.jid_queue.append(data['jid'])\n if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']:\n self.jid_queue.pop(0)\n\n if isinstance(data['fun'], six.string_types):\n if data['fun'] == 'sys.reload_modules':\n self.functions, self.returners, self.function_errors, self.executors = self._load_modules()\n self.schedule.functions = self.functions\n self.schedule.returners = self.returners\n\n process_count_max = self.opts.get('process_count_max')\n process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs')\n if process_count_max > 0:\n process_count = len(salt.utils.minion.running(self.opts))\n while process_count >= process_count_max:\n log.warning('Maximum number of processes (%s) reached while '\n 'executing jid %s, waiting %s seconds...',\n process_count_max,\n data['jid'],\n process_count_max_sleep_secs)\n yield tornado.gen.sleep(process_count_max_sleep_secs)\n process_count = len(salt.utils.minion.running(self.opts))\n\n # We stash an instance references to allow for the socket\n # communication in Windows. You can't pickle functions, and thus\n # python needs to be able to reconstruct the reference on the other\n # side.\n instance = self\n multiprocessing_enabled = self.opts.get('multiprocessing', True)\n if multiprocessing_enabled:\n if sys.platform.startswith('win'):\n # let python reconstruct the minion on the other side if we're\n # running on windows\n instance = None\n with default_signals(signal.SIGINT, signal.SIGTERM):\n process = SignalHandlingMultiprocessingProcess(\n target=self._target, args=(instance, self.opts, data, self.connected)\n )\n else:\n process = threading.Thread(\n target=self._target,\n args=(instance, self.opts, data, self.connected),\n name=data['jid']\n )\n\n if multiprocessing_enabled:\n with default_signals(signal.SIGINT, signal.SIGTERM):\n # Reset current signals before starting the process in\n # order not to inherit the current signal handlers\n process.start()\n else:\n process.start()\n\n # TODO: remove the windows specific check?\n if multiprocessing_enabled and not salt.utils.platform.is_windows():\n # we only want to join() immediately if we are daemonizing a process\n process.join()\n else:\n self.win_proc.append(process)", "def transmit(self, payload):\n '''\n Transmit single payload, and receive response, if expected.\n The actual implementation of the send/receive should be in\n ``_send_to_target`` and ``_receive_from_target``.\n\n :type payload: str\n :param payload: payload to send\n :rtype: str\n :return: the response (if received)\n '''\n response = None\n trans_report_name = 'transmission_0x%04x' % self.transmission_count\n trans_report = Report(trans_report_name)\n self.transmission_report = trans_report\n self.report.add(trans_report_name, trans_report)\n try:\n trans_report.add('request (hex)', hexlify(payload).decode())\n trans_report.add('request (raw)', '%s' % payload)\n trans_report.add('request length', len(payload))\n trans_report.add('request time', time.time())\n\n request = hexlify(payload).decode()\n request = request if len(request) < 100 else (request[:100] + ' ...')\n self.logger.info('request(%d): %s' % (len(payload), request))\n self._send_to_target(payload)\n trans_report.success()\n\n if self.expect_response:\n try:\n response = self._receive_from_target()\n trans_report.add('response time', time.time())\n trans_report.add('response (hex)', hexlify(response).decode())\n trans_report.add('response (raw)', '%s' % response)\n trans_report.add('response length', len(response))\n printed_response = hexlify(response).decode()\n printed_response = printed_response if len(printed_response) < 100 else (printed_response[:100] + ' ...')\n self.logger.info('response(%d): %s' % (len(response), printed_response))\n except Exception as ex2:\n trans_report.failed('failed to receive response: %s' % ex2)\n trans_report.add('traceback', traceback.format_exc())\n self.logger.error('target.transmit - failure in receive (exception: %s)' % ex2)\n self.logger.error(traceback.format_exc())\n self.receive_failure = True\n else:\n response = ''\n except Exception as ex1:\n trans_report.failed('failed to send payload: %s' % ex1)\n trans_report.add('traceback', traceback.format_exc())\n self.logger.error('target.transmit - failure in send (exception: %s)' % ex1)\n self.logger.error(traceback.format_exc())\n self.send_failure = True\n self.transmission_count += 1\n return response", "async def _start_payloads(self, nursery):\n \"\"\"Start all queued payloads\"\"\"\n with self._lock:\n for coroutine in self._payloads:\n nursery.start_soon(coroutine)\n self._payloads.clear()\n await trio.sleep(0)" ]
[ 0.8208741545677185, 0.7724427580833435, 0.742557168006897, 0.7417251467704773, 0.7405674457550049, 0.7298162579536438, 0.715425968170166, 0.709276020526886, 0.7042000889778137, 0.6956870555877686, 0.6918435096740723, 0.6878609657287598 ]
Concurrently run ``payload`` in the background If ``*args*`` and/or ``**kwargs`` are provided, pass them to ``payload`` upon execution.
def adopt(self, payload, *args, flavour: ModuleType, **kwargs): """ Concurrently run ``payload`` in the background If ``*args*`` and/or ``**kwargs`` are provided, pass them to ``payload`` upon execution. """ if args or kwargs: payload = functools.partial(payload, *args, **kwargs) self._meta_runner.register_payload(payload, flavour=flavour)
[ "def execute(self, payload, *args, flavour: ModuleType, **kwargs):\n \"\"\"\n Synchronously run ``payload`` and provide its output\n\n If ``*args*`` and/or ``**kwargs`` are provided, pass them to ``payload`` upon execution.\n \"\"\"\n if args or kwargs:\n payload = functools.partial(payload, *args, **kwargs)\n return self._meta_runner.run_payload(payload, flavour=flavour)", "def run_payload(self, payload, *, flavour: ModuleType):\n \"\"\"Execute one payload after its runner is started and return its output\"\"\"\n return self.runners[flavour].run_payload(payload)", "def register_payload(self, *payloads, flavour: ModuleType):\n \"\"\"Queue one or more payload for execution after its runner is started\"\"\"\n for payload in payloads:\n self._logger.debug('registering payload %s (%s)', NameRepr(payload), NameRepr(flavour))\n self.runners[flavour].register_payload(payload)", "async def _run_payloads(self):\n \"\"\"Async component of _run\"\"\"\n delay = 0.0\n try:\n while self.running.is_set():\n await self._start_payloads()\n await self._reap_payloads()\n await asyncio.sleep(delay)\n delay = min(delay + 0.1, 1.0)\n except Exception:\n await self._cancel_payloads()\n raise", "def run(self):\n \"\"\"\n Execute all current and future payloads\n\n Blocks and executes payloads until :py:meth:`stop` is called.\n It is an error for any orphaned payload to return or raise.\n \"\"\"\n self._logger.info('runner started: %s', self)\n try:\n with self._lock:\n assert not self.running.is_set() and self._stopped.is_set(), 'cannot re-run: %s' % self\n self.running.set()\n self._stopped.clear()\n self._run()\n except Exception:\n self._logger.exception('runner aborted: %s', self)\n raise\n else:\n self._logger.info('runner stopped: %s', self)\n finally:\n with self._lock:\n self.running.clear()\n self._stopped.set()", "def perform(self):\n \"\"\"This method converts payload into args and calls the ``perform``\n method on the payload class.\n\n Before calling ``perform``, a ``before_perform`` class method\n is called, if it exists. It takes a dictionary as an argument;\n currently the only things stored on the dictionary are the\n args passed into ``perform`` and a timestamp of when the job\n was enqueued.\n\n Similarly, an ``after_perform`` class method is called after\n ``perform`` is finished. The metadata dictionary contains the\n same data, plus a timestamp of when the job was performed, a\n ``failed`` boolean value, and if it did fail, a ``retried``\n boolean value. This method is called after retry, and is\n called regardless of whether an exception is ultimately thrown\n by the perform method.\n\n\n \"\"\"\n payload_class_str = self._payload[\"class\"]\n payload_class = self.safe_str_to_class(payload_class_str)\n payload_class.resq = self.resq\n args = self._payload.get(\"args\")\n\n metadata = dict(args=args)\n if self.enqueue_timestamp:\n metadata[\"enqueue_timestamp\"] = self.enqueue_timestamp\n\n before_perform = getattr(payload_class, \"before_perform\", None)\n\n metadata[\"failed\"] = False\n metadata[\"perform_timestamp\"] = time.time()\n check_after = True\n try:\n if before_perform:\n payload_class.before_perform(metadata)\n return payload_class.perform(*args)\n except Exception as e:\n metadata[\"failed\"] = True\n metadata[\"exception\"] = e\n if not self.retry(payload_class, args):\n metadata[\"retried\"] = False\n raise\n else:\n metadata[\"retried\"] = True\n logging.exception(\"Retry scheduled after error in %s\", self._payload)\n finally:\n after_perform = getattr(payload_class, \"after_perform\", None)\n\n if after_perform:\n payload_class.after_perform(metadata)\n\n delattr(payload_class,'resq')", "async def _start_payloads(self):\n \"\"\"Start all queued payloads\"\"\"\n with self._lock:\n for coroutine in self._payloads:\n task = self.event_loop.create_task(coroutine())\n self._tasks.add(task)\n self._payloads.clear()\n await asyncio.sleep(0)", "async def _await_all(self):\n \"\"\"Async component of _run\"\"\"\n delay = 0.0\n # we run a top-level nursery that automatically reaps/cancels for us\n async with trio.open_nursery() as nursery:\n while self.running.is_set():\n await self._start_payloads(nursery=nursery)\n await trio.sleep(delay)\n delay = min(delay + 0.1, 1.0)\n # cancel the scope to cancel all payloads\n nursery.cancel_scope.cancel()", "def with_payload(payload)\n current_payload = self.payload\n Thread.current[:sapience_payload] = current_payload ? current_payload.merge(payload) : payload\n yield\n ensure\n Thread.current[:sapience_payload] = current_payload\n end", "async def _start_payloads(self, nursery):\n \"\"\"Start all queued payloads\"\"\"\n with self._lock:\n for coroutine in self._payloads:\n nursery.start_soon(coroutine)\n self._payloads.clear()\n await trio.sleep(0)", "def _start_payloads(self):\n \"\"\"Start all queued payloads\"\"\"\n with self._lock:\n payloads = self._payloads.copy()\n self._payloads.clear()\n for subroutine in payloads:\n thread = CapturingThread(target=subroutine)\n thread.start()\n self._threads.add(thread)\n self._logger.debug('booted thread %s', thread)\n time.sleep(0)", "def payload_register(ptype, klass, pid):\n \"\"\" is used while a hook is running to let Juju know that a\n payload has been started.\"\"\"\n cmd = ['payload-register']\n for x in [ptype, klass, pid]:\n cmd.append(x)\n subprocess.check_call(cmd)" ]
[ 0.8430559635162354, 0.808273434638977, 0.754439651966095, 0.7477638721466064, 0.7286441326141357, 0.7200483679771423, 0.7090065479278564, 0.7083503603935242, 0.6952285766601562, 0.695154070854187, 0.6930971145629883, 0.6929183006286621 ]
Start accepting synchronous, asynchronous and service payloads Since services are globally defined, only one :py:class:`ServiceRunner` may :py:meth:`accept` payloads at any time.
def accept(self): """ Start accepting synchronous, asynchronous and service payloads Since services are globally defined, only one :py:class:`ServiceRunner` may :py:meth:`accept` payloads at any time. """ if self._meta_runner: raise RuntimeError('payloads scheduled for %s before being started' % self) self._must_shutdown = False self._logger.info('%s starting', self.__class__.__name__) # force collecting objects so that defunct, migrated and overwritten services are destroyed now gc.collect() self._adopt_services() self.adopt(self._accept_services, flavour=trio) self._meta_runner.run()
[ "def adopt(self, payload, *args, flavour: ModuleType, **kwargs):\n \"\"\"\n Concurrently run ``payload`` in the background\n\n If ``*args*`` and/or ``**kwargs`` are provided, pass them to ``payload`` upon execution.\n \"\"\"\n if args or kwargs:\n payload = functools.partial(payload, *args, **kwargs)\n self._meta_runner.register_payload(payload, flavour=flavour)", "async def _await_all(self):\n \"\"\"Async component of _run\"\"\"\n delay = 0.0\n # we run a top-level nursery that automatically reaps/cancels for us\n async with trio.open_nursery() as nursery:\n while self.running.is_set():\n await self._start_payloads(nursery=nursery)\n await trio.sleep(delay)\n delay = min(delay + 0.1, 1.0)\n # cancel the scope to cancel all payloads\n nursery.cancel_scope.cancel()", "def start_accepting_passive_svc_checks(self):\n \"\"\"Enable passive service check submission (globally)\n Format of the line that triggers function call::\n\n START_ACCEPTING_PASSIVE_SVC_CHECKS\n\n :return: None\n \"\"\"\n # todo: #783 create a dedicated brok for global parameters\n if not self.my_conf.accept_passive_service_checks:\n self.my_conf.modified_attributes |= DICT_MODATTR[\"MODATTR_PASSIVE_CHECKS_ENABLED\"].value\n self.my_conf.accept_passive_service_checks = True\n self.my_conf.explode_global_conf()\n self.daemon.update_program_status()", "def _start_payloads(self):\n \"\"\"Start all queued payloads\"\"\"\n with self._lock:\n payloads = self._payloads.copy()\n self._payloads.clear()\n for subroutine in payloads:\n thread = CapturingThread(target=subroutine)\n thread.start()\n self._threads.add(thread)\n self._logger.debug('booted thread %s', thread)\n time.sleep(0)", "async def _run_payloads(self):\n \"\"\"Async component of _run\"\"\"\n delay = 0.0\n try:\n while self.running.is_set():\n await self._start_payloads()\n await self._reap_payloads()\n await asyncio.sleep(delay)\n delay = min(delay + 0.1, 1.0)\n except Exception:\n await self._cancel_payloads()\n raise", "async def _start_payloads(self, nursery):\n \"\"\"Start all queued payloads\"\"\"\n with self._lock:\n for coroutine in self._payloads:\n nursery.start_soon(coroutine)\n self._payloads.clear()\n await trio.sleep(0)", "def register_payload(self, *payloads, flavour: ModuleType):\n \"\"\"Queue one or more payload for execution after its runner is started\"\"\"\n for payload in payloads:\n self._logger.debug('registering payload %s (%s)', NameRepr(payload), NameRepr(flavour))\n self.runners[flavour].register_payload(payload)", "def run_payload(self, payload, *, flavour: ModuleType):\n \"\"\"Execute one payload after its runner is started and return its output\"\"\"\n return self.runners[flavour].run_payload(payload)", "async def _start_payloads(self):\n \"\"\"Start all queued payloads\"\"\"\n with self._lock:\n for coroutine in self._payloads:\n task = self.event_loop.create_task(coroutine())\n self._tasks.add(task)\n self._payloads.clear()\n await asyncio.sleep(0)", "def run(self):\n \"\"\"\n Execute all current and future payloads\n\n Blocks and executes payloads until :py:meth:`stop` is called.\n It is an error for any orphaned payload to return or raise.\n \"\"\"\n self._logger.info('runner started: %s', self)\n try:\n with self._lock:\n assert not self.running.is_set() and self._stopped.is_set(), 'cannot re-run: %s' % self\n self.running.set()\n self._stopped.clear()\n self._run()\n except Exception:\n self._logger.exception('runner aborted: %s', self)\n raise\n else:\n self._logger.info('runner stopped: %s', self)\n finally:\n with self._lock:\n self.running.clear()\n self._stopped.set()", "def start(self, **kwargs):\n \"\"\"Start listening to command queue, process commands in main loop,\n set status, etc...\n This function is most likely called by the frontend in a separate\n process.\"\"\"\n\n # Keep a copy of keyword arguments for use in subclasses\n self.start_kwargs.update(kwargs)\n try:\n self.initialize_logging()\n\n self.__update_service_status(self.SERVICE_STATUS_STARTING)\n\n self.start_transport()\n\n self.initializing()\n self._register(\"command\", self.__process_command)\n\n if self.__pipe_commands is None:\n # can only listen to commands if command queue is defined\n self.__shutdown = True\n else:\n # start listening to command queue in separate thread\n self.__start_command_queue_listener()\n\n while not self.__shutdown: # main loop\n self.__update_service_status(self.SERVICE_STATUS_IDLE)\n\n if self._idle_time is None:\n task = self.__queue.get()\n else:\n try:\n task = self.__queue.get(True, self._idle_time)\n except queue.Empty:\n self.__update_service_status(self.SERVICE_STATUS_TIMER)\n if self._idle_callback:\n self._idle_callback()\n continue\n\n self.__update_service_status(self.SERVICE_STATUS_PROCESSING)\n\n if task[0] == Priority.COMMAND:\n message = task[2]\n if message and \"band\" in message:\n processor = self.__callback_register.get(message[\"band\"])\n if processor is None:\n self.log.warning(\n \"received message on unregistered band\\n%s\", message\n )\n else:\n processor(message.get(\"payload\"))\n else:\n self.log.warning(\n \"received message without band information\\n%s\", message\n )\n elif task[0] == Priority.TRANSPORT:\n callback, header, message = task[2]\n callback(header, message)\n else:\n self.log.warning(\"Unknown item on main service queue\\n%r\", task)\n\n except KeyboardInterrupt:\n self.log.warning(\"Ctrl+C detected. Shutting down.\")\n\n except Exception as e:\n self.process_uncaught_exception(e)\n self.__update_service_status(self.SERVICE_STATUS_ERROR)\n self.in_shutdown()\n return\n\n try:\n self.__update_service_status(self.SERVICE_STATUS_SHUTDOWN)\n self.in_shutdown()\n self.__update_service_status(self.SERVICE_STATUS_END)\n except Exception as e:\n self.process_uncaught_exception(e)\n self.__update_service_status(self.SERVICE_STATUS_ERROR)", "def execute(self, payload, *args, flavour: ModuleType, **kwargs):\n \"\"\"\n Synchronously run ``payload`` and provide its output\n\n If ``*args*`` and/or ``**kwargs`` are provided, pass them to ``payload`` upon execution.\n \"\"\"\n if args or kwargs:\n payload = functools.partial(payload, *args, **kwargs)\n return self._meta_runner.run_payload(payload, flavour=flavour)" ]
[ 0.7352553009986877, 0.7254830002784729, 0.7184668183326721, 0.7160866856575012, 0.7129335403442383, 0.7126063704490662, 0.7119871973991394, 0.7111474871635437, 0.6988613605499268, 0.6975022554397583, 0.6972094178199768, 0.6907680630683899 ]
Shutdown the accept loop and stop running payloads
def shutdown(self): """Shutdown the accept loop and stop running payloads""" self._must_shutdown = True self._is_shutdown.wait() self._meta_runner.stop()
[ "def stop(self):\n \"\"\"Stop execution of all current and future payloads\"\"\"\n if not self.running.wait(0.2):\n return\n self._logger.debug('runner disabled: %s', self)\n with self._lock:\n self.running.clear()\n self._stopped.wait()", "def shutdown(self, payload=None):\n \"\"\"\n Close the connection/shutdown the messaging loop.\n :param payload: None: not used. Here to allow using this method with add_command.\n \"\"\"\n logging.info(\"Work queue shutdown.\")\n self.connection.close()\n self.receiving_messages = False", "def stop(self):\n \"\"\"Stops the loop.\"\"\"\n self.running = False\n self.callbacks = {}\n self.epoll = epoll()\n\n self.event_queue = deque()\n self.event_callbacks = defaultdict(set)", "def shutdown(\n self):\n \"\"\"shutdown\"\"\"\n self.debug_log('shutdown - start')\n\n # Only initiate shutdown once\n if not self.shutdown_now:\n self.debug_log('shutdown - still shutting down')\n # Cancels the scheduled Timer, allows exit immediately\n if self.timer:\n self.timer.cancel()\n self.timer = None\n return\n else:\n self.debug_log('shutdown - start - setting instance shutdown')\n self.shutdown_now = True\n self.shutdown_event.set()\n # if/else already shutting down\n\n # Cancels the scheduled Timer, allows exit immediately\n self.timer.cancel()\n self.timer = None\n\n self.debug_log(\n 'shutdown - publishing remaining logs')\n\n if self.sleep_interval > 0:\n try:\n self.build_payload_from_queued_messages(\n use_queue=self.queue,\n shutdown_event=self.shutdown_event,\n triggered_by_shutdown=True)\n except Exception as e:\n self.write_log((\n 'shutdown - failed to build a payload for remaining '\n 'messages in queue Exception shutting down '\n 'with ex={}').format(\n e))\n\n self.debug_log(\n 'publishing remaining logs')\n\n # Send the remaining items in the queue\n self.publish_to_splunk()\n # end of try to publish remaining messages in the queue\n # during shutdown\n\n self.debug_log('shutdown - done')", "def shutdown(self):\n \"\"\" Request the system to shutdown the main loop and shutdown the system\n This is a one-way trip! Reconnecting requires a new connection\n to be made!\n \"\"\"\n self._request_shutdown = True\n for i in range(100):\n if self._state == STATE_DISCONNECTED:\n break\n time.sleep(0.1)", "def run(self):\n \"\"\"\n Execute all current and future payloads\n\n Blocks and executes payloads until :py:meth:`stop` is called.\n It is an error for any orphaned payload to return or raise.\n \"\"\"\n self._logger.info('runner started: %s', self)\n try:\n with self._lock:\n assert not self.running.is_set() and self._stopped.is_set(), 'cannot re-run: %s' % self\n self.running.set()\n self._stopped.clear()\n self._run()\n except Exception:\n self._logger.exception('runner aborted: %s', self)\n raise\n else:\n self._logger.info('runner stopped: %s', self)\n finally:\n with self._lock:\n self.running.clear()\n self._stopped.set()", "def stop(self, io_loop):\n \"\"\"\n Asynchronously stop the application.\n\n :param tornado.ioloop.IOLoop io_loop: loop to run until all\n callbacks, timeouts, and queued calls are complete\n\n Call this method to start the application shutdown process.\n The IOLoop will be stopped once the application is completely\n shut down.\n\n \"\"\"\n running_async = False\n shutdown = _ShutdownHandler(io_loop)\n for callback in self.on_shutdown_callbacks:\n try:\n maybe_future = callback(self.tornado_application)\n\n if asyncio.iscoroutine(maybe_future):\n maybe_future = asyncio.create_task(maybe_future)\n\n if concurrent.is_future(maybe_future):\n shutdown.add_future(maybe_future)\n running_async = True\n except Exception as error:\n self.logger.warning('exception raised from shutdown '\n 'callback %r, ignored: %s',\n callback, error, exc_info=1)\n\n if not running_async:\n shutdown.on_shutdown_ready()", "def stop\n return if @state != :started\n # stop receiving new incoming messages\n @service_queue.unsubscribe\n # only stop the service if all incoming and outgoing messages are complete\n decisecond_timeout = @options[:timeout]/100\n waited_deciseconds = 0 # guarantee that this loop will stop\n while (@transactions.length > 0 || @processing_messages > 0) && waited_deciseconds < decisecond_timeout\n sleep(0.1) # wait a decisecond to check the incoming and outgoing messages again\n waited_deciseconds += 1\n end\n\n @channel.close\n @state = :stopped\n end", "public final void stop()\n {\n if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(this, tc, \"stop\");\n\n synchronized (alarmLock)\n {\n if (enabled)\n {\n loaderStopTime = timeNow();\n enabled = false;\n shutdown = true;\n }\n if (loaderAlarm != null)\n {\n loaderAlarm.cancel();\n loaderAlarm = null;\n }\n }\n\n if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(this, tc, \"stop\");\n }", "def stop_daemon(self, payload=None):\n \"\"\"Kill current processes and initiate daemon shutdown.\n\n The daemon will shut down after a last check on all killed processes.\n \"\"\"\n kill_signal = signals['9']\n self.process_handler.kill_all(kill_signal, True)\n self.running = False\n\n return {'message': 'Pueue daemon shutting down',\n 'status': 'success'}", "def shutdown(self):\n \"\"\"Disconnect all connections and end the loop\n\n :returns: None\n :rtype: None\n :raises: None\n \"\"\"\n log.debug('Shutting down %s' % self)\n self.disconnect_all()\n self._looping.clear()", "async def _await_all(self):\n \"\"\"Async component of _run\"\"\"\n delay = 0.0\n # we run a top-level nursery that automatically reaps/cancels for us\n async with trio.open_nursery() as nursery:\n while self.running.is_set():\n await self._start_payloads(nursery=nursery)\n await trio.sleep(delay)\n delay = min(delay + 0.1, 1.0)\n # cancel the scope to cancel all payloads\n nursery.cancel_scope.cancel()" ]
[ 0.7566830515861511, 0.7525043487548828, 0.7137492895126343, 0.7121773362159729, 0.7077625393867493, 0.7046758532524109, 0.7045189738273621, 0.7034379243850708, 0.6988451480865479, 0.698276937007904, 0.6970272064208984, 0.6965173482894897 ]
View/edit/close milestones on github
def milestones(ctx, list, close): """View/edit/close milestones on github """ repos = get_repos(ctx.parent.agile.get('labels')) if list: _list_milestones(repos) elif close: click.echo('Closing milestones "%s"' % close) _close_milestone(repos, close) else: click.echo(ctx.get_help())
[ "def get_milestones(repo_name=None,\n profile='github',\n state='open',\n sort='due_on',\n direction='asc',\n output='min',\n per_page=None):\n '''\n Return information about milestones for a given repository.\n\n .. versionadded:: 2016.11.0\n\n repo_name\n The name of the repository for which to list issues. This argument is\n required, either passed via the CLI, or defined in the configured\n profile. A ``repo_name`` passed as a CLI argument will override the\n repo_name defined in the configured profile, if provided.\n\n profile\n The name of the profile configuration to use. Defaults to ``github``.\n\n state\n The state of the milestone. Either ``open``, ``closed``, or ``all``.\n Default is ``open``.\n\n sort\n What to sort results by. Either ``due_on`` or ``completeness``. Default\n is ``due_on``.\n\n direction\n The direction of the sort. Either ``asc`` or ``desc``. Default is ``asc``.\n\n output\n The amount of data returned by each issue. Defaults to ``min``. Change\n to ``full`` to see all issue output.\n\n per_page\n GitHub paginates data in their API calls. Use this value to increase or\n decrease the number of issues gathered from GitHub, per page. If not set,\n GitHub defaults are used.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt myminion github.get_milestones\n\n '''\n org_name = _get_config_value(profile, 'org_name')\n if repo_name is None:\n repo_name = _get_config_value(profile, 'repo_name')\n\n action = '/'.join(['repos', org_name, repo_name])\n args = {}\n\n if per_page:\n args['per_page'] = per_page\n\n # Only pass the following API args if they're not the defaults listed.\n if state and state != 'open':\n args['state'] = state\n if sort and sort != 'due_on':\n args['sort'] = sort\n if direction and direction != 'asc':\n args['direction'] = direction\n\n ret = {}\n milestones = _query(profile, action=action, command='milestones', args=args)\n\n for milestone in milestones:\n milestone_id = milestone.get('id')\n if output == 'full':\n ret[milestone_id] = milestone\n else:\n milestone.pop('creator')\n milestone.pop('html_url')\n milestone.pop('labels_url')\n ret[milestone_id] = milestone\n\n return ret", "def close_milestone(id, **kwargs):\n \"\"\"\n Close a milestone. This triggers its release process.\n\n The user can optionally specify the release-date, otherwise today's date is\n used.\n\n If the wait parameter is specified and set to True, upon closing the milestone,\n we'll periodically check that the release being processed is done.\n\n Required:\n - id: int\n\n Optional:\n - wait key: bool\n \"\"\"\n data = close_milestone_raw(id, **kwargs)\n if data:\n return utils.format_json(data)", "def list_milestones(self, project_id, find=None):\n \"\"\"\n This lets you query the list of milestones for a project. You can\n either return all milestones, or only those that are late, completed,\n or upcoming.\n \"\"\"\n path = '/projects/%u/milestones/list' % project_id\n req = ET.Element('request')\n if find is not None:\n ET.SubElement(req, 'find').text = str(find)\n return self._request(path, req)", "def create_milestone(self, title, state=github.GithubObject.NotSet, description=github.GithubObject.NotSet, due_on=github.GithubObject.NotSet):\n \"\"\"\n :calls: `POST /repos/:owner/:repo/milestones <http://developer.github.com/v3/issues/milestones>`_\n :param title: string\n :param state: string\n :param description: string\n :param due_on: datetime\n :rtype: :class:`github.Milestone.Milestone`\n \"\"\"\n assert isinstance(title, (str, unicode)), title\n assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state\n assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description\n assert due_on is github.GithubObject.NotSet or isinstance(due_on, (datetime.datetime, datetime.date)), due_on\n post_parameters = {\n \"title\": title,\n }\n if state is not github.GithubObject.NotSet:\n post_parameters[\"state\"] = state\n if description is not github.GithubObject.NotSet:\n post_parameters[\"description\"] = description\n if due_on is not github.GithubObject.NotSet:\n if isinstance(due_on, datetime.date):\n post_parameters[\"due_on\"] = due_on.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n else:\n post_parameters[\"due_on\"] = due_on.isoformat()\n headers, data = self._requester.requestJsonAndCheck(\n \"POST\",\n self.url + \"/milestones\",\n input=post_parameters\n )\n return github.Milestone.Milestone(self._requester, headers, data, completed=True)", "def get_milestone(number=None,\n name=None,\n repo_name=None,\n profile='github',\n output='min'):\n '''\n Return information about a single milestone in a named repository.\n\n .. versionadded:: 2016.11.0\n\n number\n The number of the milestone to retrieve. If provided, this option\n will be favored over ``name``.\n\n name\n The name of the milestone to retrieve.\n\n repo_name\n The name of the repository for which to list issues. This argument is\n required, either passed via the CLI, or defined in the configured\n profile. A ``repo_name`` passed as a CLI argument will override the\n repo_name defined in the configured profile, if provided.\n\n profile\n The name of the profile configuration to use. Defaults to ``github``.\n\n output\n The amount of data returned by each issue. Defaults to ``min``. Change\n to ``full`` to see all issue output.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt myminion github.get_milestone 72\n salt myminion github.get_milestone name=my_milestone\n\n '''\n ret = {}\n\n if not any([number, name]):\n raise CommandExecutionError(\n 'Either a milestone \\'name\\' or \\'number\\' must be provided.'\n )\n\n org_name = _get_config_value(profile, 'org_name')\n if repo_name is None:\n repo_name = _get_config_value(profile, 'repo_name')\n\n action = '/'.join(['repos', org_name, repo_name])\n if number:\n command = 'milestones/' + six.text_type(number)\n milestone_data = _query(profile, action=action, command=command)\n milestone_id = milestone_data.get('id')\n if output == 'full':\n ret[milestone_id] = milestone_data\n else:\n milestone_data.pop('creator')\n milestone_data.pop('html_url')\n milestone_data.pop('labels_url')\n ret[milestone_id] = milestone_data\n return ret\n\n else:\n milestones = get_milestones(repo_name=repo_name, profile=profile, output=output)\n for key, val in six.iteritems(milestones):\n if val.get('title') == name:\n ret[key] = val\n return ret\n\n return ret", "def get_milestones(self, state=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet):\n \"\"\"\n :calls: `GET /repos/:owner/:repo/milestones <http://developer.github.com/v3/issues/milestones>`_\n :param state: string\n :param sort: string\n :param direction: string\n :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Milestone.Milestone`\n \"\"\"\n assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state\n assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort\n assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction\n url_parameters = dict()\n if state is not github.GithubObject.NotSet:\n url_parameters[\"state\"] = state\n if sort is not github.GithubObject.NotSet:\n url_parameters[\"sort\"] = sort\n if direction is not github.GithubObject.NotSet:\n url_parameters[\"direction\"] = direction\n return github.PaginatedList.PaginatedList(\n github.Milestone.Milestone,\n self._requester,\n self.url + \"/milestones\",\n url_parameters\n )", "def list(*args)\n arguments(args, required: [:user, :repo]) do\n permit VALID_MILESTONE_OPTIONS.keys\n assert_values VALID_MILESTONE_OPTIONS\n end\n\n response = get_request(\"/repos/#{arguments.user}/#{arguments.repo}/milestones\", arguments.params)\n return response unless block_given?\n response.each { |el| yield el }\n end", "def get_milestone(self, title):\n \"\"\"\n given the title as str, looks for an existing milestone or create a new one,\n and return the object\n \"\"\"\n if not title:\n return GithubObject.NotSet\n if not hasattr(self, '_milestones'):\n self._milestones = {m.title: m for m in self.repo.get_milestones()}\n\n milestone = self._milestones.get(title)\n if not milestone:\n milestone = self.repo.create_milestone(title=title)\n return milestone", "def index():\n \"\"\"Display list of the user's repositories.\"\"\"\n github = GitHubAPI(user_id=current_user.id)\n token = github.session_token\n ctx = dict(connected=False)\n\n if token:\n # The user is authenticated and the token we have is still valid.\n if github.account.extra_data.get('login') is None:\n github.init_account()\n db.session.commit()\n\n # Sync if needed\n if request.method == 'POST' or github.check_sync():\n # When we're in an XHR request, we want to synchronously sync hooks\n github.sync(async_hooks=(not request.is_xhr))\n db.session.commit()\n\n # Generate the repositories view object\n extra_data = github.account.extra_data\n repos = extra_data['repos']\n if repos:\n # 'Enhance' our repos dict, from our database model\n db_repos = Repository.query.filter(\n Repository.github_id.in_([int(k) for k in repos.keys()]),\n ).all()\n for repo in db_repos:\n repos[str(repo.github_id)]['instance'] = repo\n repos[str(repo.github_id)]['latest'] = GitHubRelease(\n repo.latest_release())\n\n last_sync = humanize.naturaltime(\n (utcnow() - parse_timestamp(extra_data['last_sync'])))\n\n ctx.update({\n 'connected': True,\n 'repos': sorted(repos.items(), key=lambda x: x[1]['full_name']),\n 'last_sync': last_sync,\n })\n\n return render_template(current_app.config['GITHUB_TEMPLATE_INDEX'], **ctx)", "def edit(self, title, state=github.GithubObject.NotSet, description=github.GithubObject.NotSet, due_on=github.GithubObject.NotSet):\n \"\"\"\n :calls: `PATCH /repos/:owner/:repo/milestones/:number <http://developer.github.com/v3/issues/milestones>`_\n :param title: string\n :param state: string\n :param description: string\n :param due_on: date\n :rtype: None\n \"\"\"\n assert isinstance(title, (str, unicode)), title\n assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state\n assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description\n assert due_on is github.GithubObject.NotSet or isinstance(due_on, datetime.date), due_on\n post_parameters = {\n \"title\": title,\n }\n if state is not github.GithubObject.NotSet:\n post_parameters[\"state\"] = state\n if description is not github.GithubObject.NotSet:\n post_parameters[\"description\"] = description\n if due_on is not github.GithubObject.NotSet:\n post_parameters[\"due_on\"] = due_on.strftime(\"%Y-%m-%d\")\n headers, data = self._requester.requestJsonAndCheck(\n \"PATCH\",\n self.url,\n input=post_parameters\n )\n self._useAttributes(data)", "def get_milestone(self, number):\n \"\"\"\n :calls: `GET /repos/:owner/:repo/milestones/:number <http://developer.github.com/v3/issues/milestones>`_\n :param number: integer\n :rtype: :class:`github.Milestone.Milestone`\n \"\"\"\n assert isinstance(number, (int, long)), number\n headers, data = self._requester.requestJsonAndCheck(\n \"GET\",\n self.url + \"/milestones/\" + str(number)\n )\n return github.Milestone.Milestone(self._requester, headers, data, completed=True)", "def issues(self, **kwargs):\n \"\"\"List issues related to this milestone.\n\n Args:\n all (bool): If True, return all the items, without pagination\n per_page (int): Number of items to retrieve per request\n page (int): ID of the page to return (starts with page 1)\n as_list (bool): If set to False and no pagination option is\n defined, return a generator instead of a list\n **kwargs: Extra options to send to the server (e.g. sudo)\n\n Raises:\n GitlabAuthenticationError: If authentication is not correct\n GitlabListError: If the list could not be retrieved\n\n Returns:\n RESTObjectList: The list of issues\n \"\"\"\n\n path = '%s/%s/issues' % (self.manager.path, self.get_id())\n data_list = self.manager.gitlab.http_list(path, as_list=False,\n **kwargs)\n manager = ProjectIssueManager(self.manager.gitlab,\n parent=self.manager._parent)\n # FIXME(gpocentek): the computed manager path is not correct\n return RESTObjectList(manager, ProjectIssue, data_list)" ]
[ 0.7411880493164062, 0.7096606492996216, 0.6985909342765808, 0.6985446214675903, 0.6976544260978699, 0.6975024342536926, 0.6945411562919617, 0.6902499794960022, 0.6846461892127991, 0.6774165034294128, 0.675011396408081, 0.6747035980224609 ]
Starts a console; modified from code.interact
def start_console(local_vars={}): '''Starts a console; modified from code.interact''' transforms.CONSOLE_ACTIVE = True transforms.remove_not_allowed_in_console() sys.ps1 = prompt console = ExperimentalInteractiveConsole(locals=local_vars) console.interact(banner=banner)
[ "def d(self, depth=1):\n \"\"\"Launches an interactive console at the point where it's called.\"\"\"\n info = self.inspect.getframeinfo(self.sys._getframe(1))\n s = self.Stanza(self.indent)\n s.add([info.function + ': '])\n s.add([self.MAGENTA, 'Interactive console opened', self.NORMAL])\n self.writer.write(s.chunks)\n\n frame = self.sys._getframe(depth)\n env = frame.f_globals.copy()\n env.update(frame.f_locals)\n self.indent += 2\n self.in_console = True\n self.code.interact(\n 'Python console opened by q.d() in ' + info.function, local=env)\n self.in_console = False\n self.indent -= 2\n\n s = self.Stanza(self.indent)\n s.add([info.function + ': '])\n s.add([self.MAGENTA, 'Interactive console closed', self.NORMAL])\n self.writer.write(s.chunks)", "def open_python(self, message, namespace):\n \"\"\"Open interactive python console\"\"\"\n\n # Importing readline will in some cases print weird escape\n # characters to stdout. To avoid this we only import readline\n # and related packages at this point when we are certain\n # they are needed.\n from code import InteractiveConsole\n import readline\n import rlcompleter\n\n readline.set_completer(rlcompleter.Completer(namespace).complete)\n readline.parse_and_bind('tab: complete')\n console = InteractiveConsole(namespace)\n console.interact(message)", "def interact(banner=None, readfunc=None, local=None):\n \"\"\"Closely emulate the interactive Python interpreter.\n\n This is a backwards compatible interface to the InteractiveConsole\n class. When readfunc is not specified, it attempts to import the\n readline module to enable GNU readline if it is available.\n\n Arguments (all optional, all default to None):\n\n banner -- passed to InteractiveConsole.interact()\n readfunc -- if not None, replaces InteractiveConsole.raw_input()\n local -- passed to InteractiveInterpreter.__init__()\n\n \"\"\"\n console = InteractiveConsole(local)\n if readfunc is not None:\n console.raw_input = readfunc\n else:\n try:\n import readline\n except ImportError:\n pass\n console.interact(banner)", "def interact_plain(header=UP_LINE, local_ns=None,\n module=None, dummy=None,\n stack_depth=1, global_ns=None):\n \"\"\"\n Create an interactive python console\n \"\"\"\n frame = sys._getframe(stack_depth)\n\n variables = {}\n\n if local_ns is not None:\n variables.update(local_ns)\n else:\n variables.update(frame.f_locals)\n\n if global_ns is not None:\n variables.update(local_ns)\n else:\n variables.update(frame.f_globals)\n\n shell = code.InteractiveConsole(variables)\n return shell.interact(banner=header)", "def python_console(namespace=None):\n \"\"\"Start a interactive python console with caller's stack\"\"\"\n\n if namespace is None:\n import inspect\n frame = inspect.currentframe()\n caller = frame.f_back\n if not caller:\n logging.error(\"can't find caller who start this console.\")\n caller = frame\n namespace = dict(caller.f_globals)\n namespace.update(caller.f_locals)\n\n return get_python_console(namespace=namespace).interact()", "def interact(self, banner=None):\n \"\"\"Closely emulate the interactive Python console.\n\n The optional banner argument specify the banner to print\n before the first interaction; by default it prints a banner\n similar to the one printed by the real Python interpreter,\n followed by the current class name in parentheses (so as not\n to confuse this with the real interpreter -- since it's so\n close!).\n\n \"\"\"\n try:\n sys.ps1 #@UndefinedVariable\n except AttributeError:\n sys.ps1 = \">>> \"\n try:\n sys.ps2 #@UndefinedVariable\n except AttributeError:\n sys.ps2 = \"... \"\n cprt = 'Type \"help\", \"copyright\", \"credits\" or \"license\" for more information.'\n if banner is None:\n self.write(\"Python %s on %s\\n%s\\n(%s)\\n\" %\n (sys.version, sys.platform, cprt,\n self.__class__.__name__))\n else:\n self.write(\"%s\\n\" % str(banner))\n more = 0\n while 1:\n try:\n if more:\n prompt = sys.ps2 #@UndefinedVariable\n else:\n prompt = sys.ps1 #@UndefinedVariable\n try:\n line = self.raw_input(prompt)\n # Can be None if sys.stdin was redefined\n encoding = getattr(sys.stdin, \"encoding\", None)\n if encoding and not isinstance(line, unicode):\n line = line.decode(encoding)\n except EOFError:\n self.write(\"\\n\")\n break\n else:\n more = self.push(line)\n except KeyboardInterrupt:\n self.write(\"\\nKeyboardInterrupt\\n\")\n self.resetbuffer()\n more = 0", "def run_console(*, locals=None, banner=None, serve=None, prompt_control=None):\n \"\"\"Run the interactive event loop.\"\"\"\n loop = InteractiveEventLoop(\n locals=locals,\n banner=banner,\n serve=serve,\n prompt_control=prompt_control)\n asyncio.set_event_loop(loop)\n try:\n loop.run_forever()\n except KeyboardInterrupt:\n pass", "function Console(game, messageHistoryCount, elClassName) {\n this.el = document.createElement('div');\n this.el.className = elClassName || 'console';\n this.messageHistoryCount = messageHistoryCount || this.messageHistoryCount;\n this.game = game;\n }", "def interact(self, banner=None):\n \"\"\"Closely emulate the interactive Python console.\n\n This method overwrites its superclass' method to specify a different help\n text and to enable proper handling of the debugger status line.\n\n Args:\n banner: Text to be displayed on interpreter startup.\n \"\"\"\n sys.ps1 = getattr(sys, 'ps1', '>>> ')\n sys.ps2 = getattr(sys, 'ps2', '... ')\n if banner is None:\n print ('Pyringe (Python %s.%s.%s) on %s\\n%s' %\n (sys.version_info.major, sys.version_info.minor,\n sys.version_info.micro, sys.platform, _WELCOME_MSG))\n else:\n print banner\n more = False\n while True:\n try:\n if more:\n prompt = sys.ps2\n else:\n prompt = self.StatusLine() + '\\n' + sys.ps1\n try:\n line = self.raw_input(prompt)\n except EOFError:\n print ''\n break\n else:\n more = self.push(line)\n except KeyboardInterrupt:\n print '\\nKeyboardInterrupt'\n self.resetbuffer()\n more = False", "def console(session, database=None, user=None, password=None, server=None):\n \"\"\"An interactive Python API console for MyGeotab\n\n If IPython is installed, it will launch an interactive IPython console instead of the built-in Python console. The\n IPython console has numerous advantages over the stock Python console, including: colors, pretty printing,\n command auto-completion, and more.\n\n By default, all library objects are available as locals in the script, with 'myg' being the active API object.\n\n :param session: The current Session object.\n :param database: The database name to open a console to.\n :param user: The username used for MyGeotab servers. Usually an email address.\n :param password: The password associated with the username. Optional if `session_id` is provided.\n :param server: The server ie. my23.geotab.com. Optional as this usually gets resolved upon authentication.\n \"\"\"\n local_vars = _populate_locals(database, password, server, session, user)\n version = 'MyGeotab Console {0} [Python {1}]'.format(mygeotab.__version__,\n sys.version.replace('\\n', ''))\n auth_line = ('Logged in as: %s' % session.credentials) if session.credentials else 'Not logged in'\n banner = '\\n'.join([version, auth_line])\n try:\n from IPython import embed\n\n embed(banner1=banner, user_ns=local_vars)\n except ImportError:\n import code\n\n code.interact(banner, local=local_vars)", "def get_python_console(namespace=None):\n \"\"\"\n Return a interactive python console instance with caller's stack\n \"\"\"\n\n if namespace is None:\n import inspect\n frame = inspect.currentframe()\n caller = frame.f_back\n if not caller:\n logging.error(\"can't find caller who start this console.\")\n caller = frame\n namespace = dict(caller.f_globals)\n namespace.update(caller.f_locals)\n\n try:\n from IPython.terminal.interactiveshell import TerminalInteractiveShell\n shell = TerminalInteractiveShell(user_ns=namespace)\n except ImportError:\n try:\n import readline\n import rlcompleter\n readline.set_completer(rlcompleter.Completer(namespace).complete)\n readline.parse_and_bind(\"tab: complete\")\n except ImportError:\n pass\n import code\n shell = code.InteractiveConsole(namespace)\n shell._quit = False\n\n def exit():\n shell._quit = True\n\n def readfunc(prompt=\"\"):\n if shell._quit:\n raise EOFError\n return six.moves.input(prompt)\n\n # inject exit method\n shell.ask_exit = exit\n shell.raw_input = readfunc\n\n return shell", "def _run_interpreter(variables, banner):\n \"\"\"\n Runs a Python interpreter console and blocks until the user exits it.\n\n :param variables: Interpreters variables (locals)\n :param banner: Start-up banners\n \"\"\"\n # Script-only imports\n import code\n\n try:\n import readline\n import rlcompleter\n\n readline.set_completer(rlcompleter.Completer(variables).complete)\n readline.parse_and_bind(\"tab: complete\")\n except ImportError:\n # readline is not available: ignore\n pass\n\n # Start the console\n shell = code.InteractiveConsole(variables)\n shell.interact(banner)" ]
[ 0.7744324207305908, 0.7488301992416382, 0.7446141839027405, 0.740533709526062, 0.7396584153175354, 0.7339498996734619, 0.7265417575836182, 0.7221200466156006, 0.719450831413269, 0.7167628407478333, 0.7133338451385498, 0.7087196111679077 ]
Transform and push a line to the interpreter. The line should not have a trailing newline; it may have internal newlines. The line is appended to a buffer and the interpreter's runsource() method is called with the concatenated contents of the buffer as source. If this indicates that the command was executed or invalid, the buffer is reset; otherwise, the command is incomplete, and the buffer is left as it was after the line was appended. The return value is 1 if more input is required, 0 if the line was dealt with in some way (this is the same as runsource()).
def push(self, line): """Transform and push a line to the interpreter. The line should not have a trailing newline; it may have internal newlines. The line is appended to a buffer and the interpreter's runsource() method is called with the concatenated contents of the buffer as source. If this indicates that the command was executed or invalid, the buffer is reset; otherwise, the command is incomplete, and the buffer is left as it was after the line was appended. The return value is 1 if more input is required, 0 if the line was dealt with in some way (this is the same as runsource()). """ if transforms.FROM_EXPERIMENTAL.match(line): transforms.add_transformers(line) self.buffer.append("\n") else: self.buffer.append(line) add_pass = False if line.rstrip(' ').endswith(":"): add_pass = True source = "\n".join(self.buffer) if add_pass: source += "pass" source = transforms.transform(source) if add_pass: source = source.rstrip(' ') if source.endswith("pass"): source = source[:-4] # some transformations may strip an empty line meant to end a block if not self.buffer[-1]: source += "\n" try: more = self.runsource(source, self.filename) except SystemExit: os._exit(1) if not more: self.resetbuffer() return more
[ "def push(self, line):\n \"\"\"Push a line to the interpreter.\n\n The line should not have a trailing newline; it may have\n internal newlines. The line is appended to a buffer and the\n interpreter's runsource() method is called with the\n concatenated contents of the buffer as source. If this\n indicates that the command was executed or invalid, the buffer\n is reset; otherwise, the command is incomplete, and the buffer\n is left as it was after the line was appended. The return\n value is 1 if more input is required, 0 if the line was dealt\n with in some way (this is the same as runsource()).\n\n \"\"\"\n self.buffer.append(line)\n source = \"\\n\".join(self.buffer)\n more = self.runsource(source, self.filename)\n if not more:\n self.resetbuffer()\n return more", "def push_line(self, line):\n \"\"\"Push line back onto the line buffer.\n\n :param line: the line with no trailing newline\n \"\"\"\n self.lineno -= 1\n self._buffer.append(line + b'\\n')", "def push(self, line, frame, buffer_output=True):\n \"\"\"Change built-in stdout and stderr methods by the\n new custom StdMessage.\n execute the InteractiveConsole.push.\n Change the stdout and stderr back be the original built-ins\n\n :param buffer_output: if False won't redirect the output.\n\n Return boolean (True if more input is required else False),\n output_messages and input_messages\n \"\"\"\n self.__buffer_output = buffer_output\n more = False\n if buffer_output:\n original_stdout = sys.stdout\n original_stderr = sys.stderr\n try:\n try:\n self.frame = frame\n if buffer_output:\n out = sys.stdout = IOBuf()\n err = sys.stderr = IOBuf()\n more = self.add_exec(line)\n except Exception:\n exc = get_exception_traceback_str()\n if buffer_output:\n err.buflist.append(\"Internal Error: %s\" % (exc,))\n else:\n sys.stderr.write(\"Internal Error: %s\\n\" % (exc,))\n finally:\n #Remove frame references.\n self.frame = None\n frame = None\n if buffer_output:\n sys.stdout = original_stdout\n sys.stderr = original_stderr\n\n if buffer_output:\n return more, out.buflist, err.buflist\n else:\n return more, [], []", "def magic_run(self, line):\n \"\"\"\n Run the current program\n\n Usage:\n Call with a numbe rto run that many steps,\n or call with no arguments to run to the end of the current program\n\n `%run`\n or\n `%run 1`\n \"\"\"\n i = float('inf')\n if line.strip():\n i = int(line)\n\n try:\n with warnings.catch_warnings(record=True) as w:\n self.interpreter.run(i)\n for warning_message in w:\n # TODO should this be stdout or stderr\n stream_content = {'name': 'stdout', 'text': 'Warning: ' + str(warning_message.message) + '\\n'}\n self.send_response(self.iopub_socket, 'stream', stream_content)\n except iarm.exceptions.EndOfProgram as e:\n f_name = self.interpreter.program[self.interpreter.register['PC'] - 1].__name__\n f_name = f_name[:f_name.find('_')]\n message = \"Error in {}: \".format(f_name)\n stream_content = {'name': 'stdout', 'text': message + str(e) + '\\n'}\n self.send_response(self.iopub_socket, 'stream', stream_content)\n except Exception as e:\n for err in e.args:\n stream_content = {'name': 'stderr', 'text': str(err)}\n self.send_response(self.iopub_socket, 'stream', stream_content)\n return {'status': 'error',\n 'execution_count': self.execution_count,\n 'ename': type(e).__name__,\n 'evalue': str(e),\n 'traceback': '???'}", "def process_input_line(self, line, store_history=True):\n \"\"\"process the input, capturing stdout\"\"\"\n #print \"input='%s'\"%self.input\n stdout = sys.stdout\n splitter = self.IP.input_splitter\n try:\n sys.stdout = self.cout\n splitter.push(line)\n more = splitter.push_accepts_more()\n if not more:\n source_raw = splitter.source_raw_reset()[1]\n self.IP.run_cell(source_raw, store_history=store_history)\n finally:\n sys.stdout = stdout", "def push_source(self, newstream, newfile=None):\n \"Push an input source onto the lexer's input source stack.\"\n if isinstance(newstream, basestring):\n newstream = StringIO(newstream)\n self.filestack.appendleft((self.infile, self.instream, self.lineno))\n self.infile = newfile\n self.instream = newstream\n self.lineno = 1", "async def brpoplpush(self, src, dst, timeout=0):\n \"\"\"\n Pop a value off the tail of ``src``, push it on the head of ``dst``\n and then return it.\n\n This command blocks until a value is in ``src`` or until ``timeout``\n seconds elapse, whichever is first. A ``timeout`` value of 0 blocks\n forever.\n\n Cluster impl:\n Call brpop() then send the result into lpush()\n\n Operation is no longer atomic.\n \"\"\"\n try:\n value = await self.brpop(src, timeout=timeout)\n if value is None:\n return None\n except TimeoutError:\n # Timeout was reached\n return None\n\n await self.lpush(dst, value[1])\n return value[1]", "def push(self, len, buf):\n \"\"\"Push the content of the arry in the input buffer This\n routine handle the I18N transcoding to internal UTF-8 This\n is used when operating the parser in progressive (push)\n mode. \"\"\"\n ret = libxml2mod.xmlParserInputBufferPush(self._o, len, buf)\n return ret", "private int lineBeginningFor(int pos) {\n if (sourceChars == null) {\n return -1;\n }\n if (pos <= 0) {\n return 0;\n }\n char[] buf = sourceChars;\n if (pos >= buf.length) {\n pos = buf.length - 1;\n }\n while (--pos >= 0) {\n char c = buf[pos];\n if (ScriptRuntime.isJSLineTerminator(c)) {\n return pos + 1; // want position after the newline\n }\n }\n return 0;\n }", "def magic_postpone_execution(self, line):\n \"\"\"\n Postpone execution of instructions until explicitly run\n\n Usage:\n Call this magic with `true` or nothing to postpone execution,\n or call with `false` to execute each instruction when evaluated.\n This defaults to True.\n\n Note that each cell is executed only executed after all lines in\n the cell have been evaluated properly.\n\n `%postpone_execution`\n or\n `%postpone_execution true`\n or\n `%postpone_execution false`\n \"\"\"\n line = line.strip().lower()\n if not line or line == 'true':\n self.interpreter.postpone_execution = True\n elif line == 'false':\n self.interpreter.postpone_execution = False\n else:\n stream_content = {'name': 'stderr', 'text': \"unknwon value '{}'\".format(line)}\n self.send_response(self.iopub_socket, 'stream', stream_content)\n return {'status': 'error',\n 'execution_count': self.execution_count,\n 'ename': ValueError.__name__,\n 'evalue': \"unknwon value '{}'\".format(line),\n 'traceback': '???'}", "def addr2line(self, addrq):\n '''\n Get the line number for a given bytecode offset\n\n Analogous to PyCode_Addr2Line; translated from pseudocode in\n Objects/lnotab_notes.txt\n '''\n co_lnotab = self.pyop_field('co_lnotab').proxyval(set())\n\n # Initialize lineno to co_firstlineno as per PyCode_Addr2Line\n # not 0, as lnotab_notes.txt has it:\n lineno = int_from_int(self.field('co_firstlineno'))\n\n addr = 0\n for addr_incr, line_incr in zip(co_lnotab[::2], co_lnotab[1::2]):\n addr += ord(addr_incr)\n if addr > addrq:\n return lineno\n lineno += ord(line_incr)\n return lineno", "def insert_line lineno=@current_index\n prompt = \"Insert: \"\n maxlen = 80\n #config={}; \n #config[:default] = line\n #ret, str = rb_getstr(@form.window, $error_message_row, $error_message_col, prompt, maxlen, config)\n ret, str = input_string prompt\n #ret, str = rb_getstr(@form.window, @row+@height-1, @col+1, prompt, maxlen, config)\n $log.debug \" rb_getstr returned #{ret} , #{str} \"\n return if ret != 0\n\n # pad based expect @content not list\n # remove list after a while FIXME\n @list ||= @content\n @list.insert lineno, str\n ## added handler on 2010-05-23 11:46 - undo works - tested in testlistbox.rb\n fire_handler :CHANGE, InputDataEvent.new(0,str.length, self, :INSERT_LINE, lineno, str)\n fire_dimension_changed\n end" ]
[ 0.9045475125312805, 0.7175180912017822, 0.701181173324585, 0.6785591244697571, 0.6757619380950928, 0.6738131046295166, 0.6724637150764465, 0.6719925999641418, 0.6677915453910828, 0.6668353080749512, 0.6662765741348267, 0.665422260761261 ]
Write dict object into file :param obj: the object to be dumped into toml :param f: the file object :param preserve: optional flag to preserve the inline table in result
def dump(obj, f, preserve=False): """Write dict object into file :param obj: the object to be dumped into toml :param f: the file object :param preserve: optional flag to preserve the inline table in result """ if not f.write: raise TypeError('You can only dump an object into a file object') encoder = Encoder(f, preserve=preserve) return encoder.write_dict(obj)
[ "def dumps(obj, preserve=False):\n \"\"\"Stringifies a dict as toml\n\n :param obj: the object to be dumped into toml\n :param preserve: optional flag to preserve the inline table in result\n \"\"\"\n f = StringIO()\n dump(obj, f, preserve)\n return f.getvalue()", "def dump(o, f):\n \"\"\"Writes out dict as toml to a file\n\n Args:\n o: Object to dump into toml\n f: File descriptor where the toml should be stored\n\n Returns:\n String containing the toml corresponding to dictionary\n\n Raises:\n TypeError: When anything other than file descriptor is passed\n \"\"\"\n\n if not f.write:\n raise TypeError(\"You can only dump an object to a file descriptor\")\n d = dumps(o)\n f.write(d)\n return d", "def _dump(f, obj, flip_faces=False, ungroup=False, comments=None, split_normals=False, write_mtl=True): # pylint: disable=redefined-outer-name\n '''\n write_mtl: When True and mesh has a texture, includes a mtllib\n reference in the .obj and writes a .mtl alongside.\n\n '''\n import os\n import numpy as np\n from baiji import s3\n\n ff = -1 if flip_faces else 1\n def write_face_to_obj_file(obj, faces, face_index, obj_file):\n vertex_indices = faces[face_index][::ff] + 1\n\n write_normals = obj.fn is not None or (obj.vn is not None and obj.vn.shape == obj.v.shape)\n write_texture = obj.ft is not None and obj.vt is not None\n\n if write_normals and obj.fn is not None:\n normal_indices = obj.fn[face_index][::ff] + 1\n assert len(normal_indices) == len(vertex_indices)\n elif write_normals: # unspecified fn but per-vertex normals, assume ordering is same as for v\n normal_indices = faces[face_index][::ff] + 1\n\n if write_texture:\n texture_indices = obj.ft[face_index][::ff] + 1\n assert len(texture_indices) == len(vertex_indices)\n\n # Valid obj face lines are: v, v/vt, v//vn, v/vt/vn\n if write_normals and write_texture:\n pattern = '%d/%d/%d'\n value = tuple(np.array([vertex_indices, texture_indices, normal_indices]).T.flatten())\n elif write_normals:\n pattern = '%d//%d'\n value = tuple(np.array([vertex_indices, normal_indices]).T.flatten())\n elif write_texture:\n pattern = '%d/%d'\n value = tuple(np.array([vertex_indices, texture_indices]).T.flatten())\n else:\n pattern = '%d'\n value = tuple(vertex_indices)\n obj_file.write(('f ' + ' '.join([pattern]*len(vertex_indices)) + '\\n') % value)\n\n if comments != None:\n if isinstance(comments, basestring):\n comments = [comments]\n for comment in comments:\n for line in comment.split(\"\\n\"):\n f.write(\"# %s\\n\" % line)\n\n if write_mtl and hasattr(obj, 'texture_filepath') and obj.texture_filepath is not None:\n save_to = s3.path.dirname(f.name)\n mtl_name = os.path.splitext(s3.path.basename(f.name))[0]\n mtl_filename = mtl_name + '.mtl'\n f.write('mtllib %s\\n' % mtl_filename)\n f.write('usemtl %s\\n' % mtl_name)\n texture_filename = mtl_name + os.path.splitext(obj.texture_filepath)[1]\n if not s3.exists(s3.path.join(save_to, texture_filename)):\n s3.cp(obj.texture_filepath, s3.path.join(save_to, texture_filename))\n obj.write_mtl(s3.path.join(save_to, mtl_filename), mtl_name, texture_filename)\n\n if obj.vc is not None:\n for r, c in zip(obj.v, obj.vc):\n f.write('v %f %f %f %f %f %f\\n' % (r[0], r[1], r[2], c[0], c[1], c[2]))\n elif obj.v is not None:\n for r in obj.v:\n f.write('v %f %f %f\\n' % (r[0], r[1], r[2]))\n\n if obj.vn is not None:\n if split_normals:\n for vn_idx in obj.fn:\n r = obj.vn[vn_idx[0]]\n f.write('vn %f %f %f\\n' % (r[0], r[1], r[2]))\n r = obj.vn[vn_idx[1]]\n f.write('vn %f %f %f\\n' % (r[0], r[1], r[2]))\n r = obj.vn[vn_idx[2]]\n f.write('vn %f %f %f\\n' % (r[0], r[1], r[2]))\n else:\n for r in obj.vn:\n f.write('vn %f %f %f\\n' % (r[0], r[1], r[2]))\n\n if obj.ft is not None and obj.vt is not None:\n for r in obj.vt:\n if len(r) == 3:\n f.write('vt %f %f %f\\n' % (r[0], r[1], r[2]))\n else:\n f.write('vt %f %f\\n' % (r[0], r[1]))\n if obj.f4 is not None:\n faces = obj.f4\n elif obj.f is not None:\n faces = obj.f\n else:\n faces = None\n if obj.segm is not None and not ungroup:\n if faces is not None:\n # An array of strings.\n group_names = np.array(obj.segm.keys())\n\n # A 2d array of booleans indicating which face is in which group.\n group_mask = np.zeros((len(group_names), len(faces)), dtype=bool)\n for i, segm_faces in enumerate(obj.segm.itervalues()):\n group_mask[i][segm_faces] = True\n\n # In an OBJ file, \"g\" changes the current state. This is a slice of\n # group_mask that represents the current state.\n current_group_mask = np.zeros((len(group_names),), dtype=bool)\n\n for face_index in range(len(faces)):\n # If the group has changed from the previous face, write the\n # group entry.\n this_group_mask = group_mask[:, face_index]\n if any(current_group_mask != this_group_mask):\n current_group_mask = this_group_mask\n f.write('g %s\\n' % ' '.join(group_names[current_group_mask]))\n\n write_face_to_obj_file(obj, faces, face_index, f)\n else:\n if faces is not None:\n for face_index in range(len(faces)):\n write_face_to_obj_file(obj, faces, face_index, f)", "def save_as(self, fname, obj=None):\n \"\"\" Save DICOM file given a GDCM DICOM object.\n Examples of a GDCM DICOM object:\n * gdcm.Writer()\n * gdcm.Reader()\n * gdcm.Anonymizer()\n\n :param fname: DICOM file name to be saved\n :param obj: DICOM object to be saved, if None, Anonymizer() is used\n \"\"\"\n writer = gdcm.Writer()\n writer.SetFileName(fname)\n if obj is None and self._anon_obj:\n obj = self._anon_obj\n else:\n raise ValueError(\"Need DICOM object, e.g. obj=gdcm.Anonymizer()\")\n writer.SetFile(obj.GetFile())\n if not writer.Write():\n raise IOError(\"Could not save DICOM file\")\n return True", "def dump2file(self, obj, filepath, override=False, **kwargs):\n \"\"\"\n Dump a dictionary into a file. (Extensions: XYZ or PDB)\n\n Parameters\n ----------\n obj : :class:`dict`\n A dictionary containing molecular information.\n\n filepath : :class:`str`\n The filepath for the dumped file.\n\n override : :class:`bool`\n If True, any file in the filepath will be override. (default=False)\n \"\"\"\n # First we check if the file already exists. If yes and the override\n # keyword is False (default), we will raise an exception. Otherwise\n # the file will be overwritten.\n if override is False:\n if os.path.isfile(filepath):\n raise _FileAlreadyExists(\n \"The file {0} already exists. Use a different filepath, \"\n \"or set the 'override' kwarg to True.\".format(filepath))\n if str(filepath[-3:]) not in self._save_funcs.keys():\n raise _FileTypeError(\n \"The {0} file extension is \"\n \"not supported for dumping a MolecularSystem or a Molecule. \"\n \"Please use XYZ or PDB.\".format(str(filepath[-3:])))\n self._save_funcs[str(filepath[-3:])](obj, filepath, **kwargs)", "def _save( self, fn ):\n \"\"\"Persist the notebook to the given file.\n\n :param fn: the file name\"\"\"\n\n # create JSON object\n j = json.dumps({ 'description': self.description(),\n 'pending': self._pending,\n 'results': self._results },\n indent = 4,\n cls = MetadataEncoder)\n\n # write to file\n with open(fn, 'w') as f:\n f.write(j)", "def save_object(fname, obj):\n \"\"\"Pickle a Python object\"\"\"\n\n fd = gzip.open(fname, \"wb\")\n six.moves.cPickle.dump(obj, fd)\n fd.close()", "def save(filepath, obj, on_overwrite = 'ignore'):\n \"\"\"\n Serialize `object` to a file denoted by `filepath`.\n\n Parameters\n ----------\n filepath : str\n A filename. If the suffix is `.joblib` and joblib can be\n imported, `joblib.dump` is used in place of the regular\n pickling mechanisms; this results in much faster saves by\n saving arrays as separate .npy files on disk. If the file\n suffix is `.npy` than `numpy.save` is attempted on `obj`.\n Otherwise, (c)pickle is used.\n\n obj : object\n A Python object to be serialized.\n\n on_overwrite: A string specifying what to do if the file already\n exists.\n ignore: just overwrite it\n backup: make a copy of the file (<filepath>.bak) and\n delete it when done saving the new copy.\n this allows recovery of the old version of\n the file if saving the new one fails\n \"\"\"\n\n\n filepath = preprocess(filepath)\n\n if os.path.exists(filepath):\n if on_overwrite == 'backup':\n backup = filepath + '.bak'\n shutil.move(filepath, backup)\n save(filepath, obj)\n try:\n os.remove(backup)\n except Exception, e:\n warnings.warn(\"Got an error while traing to remove \"+backup+\":\"+str(e))\n return\n else:\n assert on_overwrite == 'ignore'\n\n\n try:\n _save(filepath, obj)\n except RuntimeError, e:\n \"\"\" Sometimes for large theano graphs, pickle/cPickle exceed the\n maximum recursion depth. This seems to me like a fundamental\n design flaw in pickle/cPickle. The workaround I employ here\n is the one recommended to someone who had a similar problem\n on stackexchange:\n\n http://stackoverflow.com/questions/2134706/hitting-maximum-recursion-depth-using-pythons-pickle-cpickle\n\n Obviously this does not scale and could cause a crash\n but I don't see another solution short of writing our\n own implementation of pickle.\n \"\"\"\n if str(e).find('recursion') != -1:\n warnings.warn('pylearn2.utils.save encountered the following '\n 'error: ' + str(e) +\n '\\nAttempting to resolve this error by calling ' +\n 'sys.setrecusionlimit and retrying')\n old_limit = sys.getrecursionlimit()\n try:\n sys.setrecursionlimit(50000)\n _save(filepath, obj)\n finally:\n sys.setrecursionlimit(old_limit)", "def dump(self, obj, key=None):\n \"\"\"Write a pickled representation of obj to the open TFile.\"\"\"\n if key is None:\n key = '_pickle'\n with preserve_current_directory():\n self.__file.cd()\n if sys.version_info[0] < 3:\n pickle.Pickler.dump(self, obj)\n else:\n super(Pickler, self).dump(obj)\n s = ROOT.TObjString(self.__io.getvalue())\n self.__io.reopen()\n s.Write(key)\n self.__file.GetFile().Flush()\n self.__pmap.clear()", "def save_object(self, obj):\n \"\"\"\n Save object to disk as JSON.\n\n Generally shouldn't be called directly.\n \"\"\"\n obj.pre_save(self.jurisdiction.jurisdiction_id)\n\n filename = '{0}_{1}.json'.format(obj._type, obj._id).replace('/', '-')\n\n self.info('save %s %s as %s', obj._type, obj, filename)\n self.debug(json.dumps(OrderedDict(sorted(obj.as_dict().items())),\n cls=utils.JSONEncoderPlus, indent=4, separators=(',', ': ')))\n\n self.output_names[obj._type].add(filename)\n\n with open(os.path.join(self.datadir, filename), 'w') as f:\n json.dump(obj.as_dict(), f, cls=utils.JSONEncoderPlus)\n\n # validate after writing, allows for inspection on failure\n try:\n obj.validate()\n except ValueError as ve:\n if self.strict_validation:\n raise ve\n else:\n self.warning(ve)\n\n # after saving and validating, save subordinate objects\n for obj in obj._related:\n self.save_object(obj)", "def to_file(self, f):\n \"\"\"Saves the history as a json file. In order to use this feature,\n the history must only contain JSON encodable Python data structures.\n Numpy and PyTorch types should not be in the history.\n\n Parameters\n ----------\n f : file-like object or str\n\n \"\"\"\n with open_file_like(f, 'w') as fp:\n json.dump(self.to_list(), fp)", "def write_tabular(obj, filepath):\n \"\"\"Write tabular object in HDF5 or pickle format\n\n Args:\n obj (array or DataFrame): tabular object to write\n filepath (path-like): path to write to; must end in '.h5' or '.pkl'\n \"\"\"\n _, fn, ext = splitext2(filepath)\n if ext == '.h5':\n _write_tabular_h5(obj, filepath)\n elif ext == '.pkl':\n _write_tabular_pickle(obj, filepath)\n else:\n raise NotImplementedError" ]
[ 0.8067118525505066, 0.7437865734100342, 0.7063112854957581, 0.7061246037483215, 0.7056741118431091, 0.7037335634231567, 0.7030279636383057, 0.6980135440826416, 0.6967124938964844, 0.6956966519355774, 0.6951221227645874, 0.6929183602333069 ]
Stringifies a dict as toml :param obj: the object to be dumped into toml :param preserve: optional flag to preserve the inline table in result
def dumps(obj, preserve=False): """Stringifies a dict as toml :param obj: the object to be dumped into toml :param preserve: optional flag to preserve the inline table in result """ f = StringIO() dump(obj, f, preserve) return f.getvalue()
[ "def dump(obj, f, preserve=False):\n \"\"\"Write dict object into file\n\n :param obj: the object to be dumped into toml\n :param f: the file object\n :param preserve: optional flag to preserve the inline table in result\n \"\"\"\n if not f.write:\n raise TypeError('You can only dump an object into a file object')\n encoder = Encoder(f, preserve=preserve)\n return encoder.write_dict(obj)", "def dumps(o, preserve=False):\n \"\"\"Stringifies input dict as toml\n\n Args:\n o: Object to dump into toml\n\n preserve: Boolean parameter. If true, preserve inline tables.\n\n Returns:\n String containing the toml corresponding to dict\n \"\"\"\n\n retval = \"\"\n addtoretval, sections = _dump_sections(o, \"\")\n retval += addtoretval\n while sections != {}:\n newsections = {}\n for section in sections:\n addtoretval, addtosections = _dump_sections(sections[section],\n section, preserve)\n if addtoretval or (not addtoretval and not addtosections):\n if retval and retval[-2:] != \"\\n\\n\":\n retval += \"\\n\"\n retval += \"[\" + section + \"]\\n\"\n if addtoretval:\n retval += addtoretval\n for s in addtosections:\n newsections[section + \".\" + s] = addtosections[s]\n sections = newsections\n return retval", "def dumps(o, encoder=None):\n \"\"\"Stringifies input dict as toml\n\n Args:\n o: Object to dump into toml\n\n preserve: Boolean parameter. If true, preserve inline tables.\n\n Returns:\n String containing the toml corresponding to dict\n \"\"\"\n\n retval = \"\"\n if encoder is None:\n encoder = TomlEncoder(o.__class__)\n addtoretval, sections = encoder.dump_sections(o, \"\")\n retval += addtoretval\n while sections:\n newsections = encoder.get_empty_table()\n for section in sections:\n addtoretval, addtosections = encoder.dump_sections(\n sections[section], section)\n\n if addtoretval or (not addtoretval and not addtosections):\n if retval and retval[-2:] != \"\\n\\n\":\n retval += \"\\n\"\n retval += \"[\" + section + \"]\\n\"\n if addtoretval:\n retval += addtoretval\n for s in addtosections:\n newsections[section + \".\" + s] = addtosections[s]\n sections = newsections\n return retval", "def dumps(o, encoder=None):\n \"\"\"Stringifies input dict as toml\n\n Args:\n o: Object to dump into toml\n\n preserve: Boolean parameter. If true, preserve inline tables.\n\n Returns:\n String containing the toml corresponding to dict\n \"\"\"\n\n retval = \"\"\n if encoder is None:\n encoder = TomlEncoder(o.__class__)\n addtoretval, sections = encoder.dump_sections(o, \"\")\n retval += addtoretval\n outer_objs = [id(o)]\n while sections:\n section_ids = [id(section) for section in sections]\n for outer_obj in outer_objs:\n if outer_obj in section_ids:\n raise ValueError(\"Circular reference detected\")\n outer_objs += section_ids\n newsections = encoder.get_empty_table()\n for section in sections:\n addtoretval, addtosections = encoder.dump_sections(\n sections[section], section)\n\n if addtoretval or (not addtoretval and not addtosections):\n if retval and retval[-2:] != \"\\n\\n\":\n retval += \"\\n\"\n retval += \"[\" + section + \"]\\n\"\n if addtoretval:\n retval += addtoretval\n for s in addtosections:\n newsections[section + \".\" + s] = addtosections[s]\n sections = newsections\n return retval", "def save_object(self, obj):\n \"\"\"\n Save object to disk as JSON.\n\n Generally shouldn't be called directly.\n \"\"\"\n obj.pre_save(self.jurisdiction.jurisdiction_id)\n\n filename = '{0}_{1}.json'.format(obj._type, obj._id).replace('/', '-')\n\n self.info('save %s %s as %s', obj._type, obj, filename)\n self.debug(json.dumps(OrderedDict(sorted(obj.as_dict().items())),\n cls=utils.JSONEncoderPlus, indent=4, separators=(',', ': ')))\n\n self.output_names[obj._type].add(filename)\n\n with open(os.path.join(self.datadir, filename), 'w') as f:\n json.dump(obj.as_dict(), f, cls=utils.JSONEncoderPlus)\n\n # validate after writing, allows for inspection on failure\n try:\n obj.validate()\n except ValueError as ve:\n if self.strict_validation:\n raise ve\n else:\n self.warning(ve)\n\n # after saving and validating, save subordinate objects\n for obj in obj._related:\n self.save_object(obj)", "def serialize(obj, **options):\n '''\n Serialize Python data to a Python string representation (via pprint.format)\n\n :param obj: the data structure to serialize\n :param options: options given to pprint.format\n '''\n\n #round-trip this through JSON to avoid OrderedDict types\n # there's probably a more performant way to do this...\n # TODO remove json round-trip when all dataset will use\n # serializers\n return pprint.pformat(\n salt.utils.json.loads(\n salt.utils.json.dumps(obj, _json_module=_json),\n _json_module=_json\n ),\n **options\n )", "def serialize(obj, **options):\n '''\n Serialize Python data to TOML.\n\n :param obj: the data structure to serialize.\n :param options: options given to lower pytoml module.\n '''\n\n try:\n if 'file_out' in options:\n return toml.dump(obj, options['file_out'], **options)\n else:\n return toml.dumps(obj, **options)\n except Exception as error:\n raise SerializationError(error)", "def pp_obj(obj, out_path=None, indent=4, sort_keys=False, ensure_ascii=False, default=None):\n \"\"\"\n pretty prints a (list tuple, dict etc) object\n\n :useful:\n - when we want to avoid importing pprint if we have already imported json\n - when we want to pprint to file without breaking json compatibility\n \"\"\"\n rt = anyjson.dumps(obj, sort_keys=sort_keys, indent=indent, ensure_ascii=ensure_ascii,\n separators=(',', ': '), default=default, namedtuple_as_object=False)\n # unicode_available\n if out_path is None:\n return rt\n else:\n with open(out_path, 'w') as out:\n out.write(rt)", "def dumps(obj, pretty=False, escaped=True):\n \"\"\"\n Serialize ``obj`` to a VDF formatted ``str``.\n \"\"\"\n if not isinstance(obj, dict):\n raise TypeError(\"Expected data to be an instance of``dict``\")\n if not isinstance(pretty, bool):\n raise TypeError(\"Expected pretty to be of type bool\")\n if not isinstance(escaped, bool):\n raise TypeError(\"Expected escaped to be of type bool\")\n\n return ''.join(_dump_gen(obj, pretty, escaped))", "def dump(o, f):\n \"\"\"Writes out dict as toml to a file\n\n Args:\n o: Object to dump into toml\n f: File descriptor where the toml should be stored\n\n Returns:\n String containing the toml corresponding to dictionary\n\n Raises:\n TypeError: When anything other than file descriptor is passed\n \"\"\"\n\n if not f.write:\n raise TypeError(\"You can only dump an object to a file descriptor\")\n d = dumps(o)\n f.write(d)\n return d", "def dump(obj, file_path, prettify=False):\n \"\"\"\n Dumps a data structure to the filesystem as TOML.\n\n The given value must be either a dict of dict values, a dict, or a TOML file constructed by this module.\n \"\"\"\n with open(file_path, 'w') as fp:\n fp.write(dumps(obj))", "def _prep_obj(self, obj, parent, parents_ids=EMPTY_FROZENSET, is_namedtuple=False):\n \"\"\"Difference of 2 objects\"\"\"\n original_type = type(obj)\n try:\n if is_namedtuple:\n obj = obj._asdict()\n else:\n obj = obj.__dict__\n except AttributeError:\n try:\n obj = {i: getattr(obj, i) for i in obj.__slots__}\n except AttributeError:\n self[UNPROCESSED].append(obj)\n return unprocessed\n\n result = self._prep_dict(obj, parent=parent, parents_ids=parents_ids,\n print_as_attribute=True, original_type=original_type)\n result = \"nt{}\".format(result) if is_namedtuple else \"obj{}\".format(result)\n return result" ]
[ 0.8154090046882629, 0.7720668911933899, 0.7706790566444397, 0.7599770426750183, 0.6874340772628784, 0.6836484670639038, 0.6806732416152954, 0.6798952221870422, 0.6748935580253601, 0.6687008738517761, 0.6676678657531738, 0.6665995121002197 ]
Loads licenses from the given directory.
def license_loader(lic_dir=LIC_DIR): """Loads licenses from the given directory.""" lics = [] for ln in os.listdir(lic_dir): lp = os.path.join(lic_dir, ln) with open(lp) as lf: txt = lf.read() lic = License(txt) lics.append(lic) return lics
[ "private void loadCertificatesFromDirectory(File directory) {\n\n directorySanityChecks(directory);\n\n synchronized (listenerLock) {\n listener.notifyCertficateLookupEvent(directory.getAbsolutePath());\n }\n\n File[] certFiles = directory.listFiles(new FilenameFilter() {\n\n public boolean accept(File dir, String name) {\n\n return name.endsWith(CERTIFICATE_FILENAME_SUFFIX);\n }\n });\n\n for (File f : certFiles)\n loadCertificateFromFile(f);\n\n }", "public static TagLib[] loadFromDirectory(Resource dir, Identification id) throws TagLibException {\n\tif (!dir.isDirectory()) return new TagLib[0];\n\tArrayList<TagLib> arr = new ArrayList<TagLib>();\n\n\tResource[] files = dir.listResources(new ExtensionResourceFilter(new String[] { \"tld\", \"tldx\" }));\n\tfor (int i = 0; i < files.length; i++) {\n\t if (files[i].isFile()) arr.add(TagLibFactory.loadFromFile(files[i], id));\n\n\t}\n\treturn arr.toArray(new TagLib[arr.size()]);\n }", "public static FunctionLib[] loadFromDirectory(Resource dir, Identification id) throws FunctionLibException {\n\tif (!dir.isDirectory()) return new FunctionLib[0];\n\tArrayList<FunctionLib> arr = new ArrayList<FunctionLib>();\n\n\tResource[] files = dir.listResources(new ExtensionResourceFilter(new String[] { \"fld\", \"fldx\" }));\n\tfor (int i = 0; i < files.length; i++) {\n\t if (files[i].isFile()) arr.add(FunctionLibFactory.loadFromFile(files[i], id));\n\t}\n\n\treturn arr.toArray(new FunctionLib[arr.size()]);\n }", "def load_directory(self, directory, ext=None):\n \"\"\"Load RiveScript documents from a directory.\n\n :param str directory: The directory of RiveScript documents to load\n replies from.\n :param []str ext: List of file extensions to consider as RiveScript\n documents. The default is ``[\".rive\", \".rs\"]``.\n \"\"\"\n self._say(\"Loading from directory: \" + directory)\n\n if ext is None:\n # Use the default extensions - .rive is preferable.\n ext = ['.rive', '.rs']\n elif type(ext) == str:\n # Backwards compatibility for ext being a string value.\n ext = [ext]\n\n if not os.path.isdir(directory):\n self._warn(\"Error: \" + directory + \" is not a directory.\")\n return\n\n for root, subdirs, files in os.walk(directory):\n for file in files:\n for extension in ext:\n if file.lower().endswith(extension):\n # Load this file.\n self.load_file(os.path.join(root, file))\n break", "def licenses(family_directory):\n \"\"\"Get a list of paths for every license\n file found in a font project.\"\"\"\n found = []\n search_paths = [family_directory]\n gitroot = git_rootdir(family_directory)\n if gitroot and gitroot not in search_paths:\n search_paths.append(gitroot)\n\n for directory in search_paths:\n if directory:\n for license in ['OFL.txt', 'LICENSE.txt']:\n license_path = os.path.join(directory, license)\n if os.path.exists(license_path):\n found.append(license_path)\n return found", "def load_from_directory(list_name):\n \"\"\"\n To resolve the symbol in the LEVERAGED_ETF list,\n the date on which the symbol was in effect is needed.\n\n Furthermore, to maintain a point in time record of our own maintenance\n of the restricted list, we need a knowledge date. Thus, restricted lists\n are dictionaries of datetime->symbol lists.\n new symbols should be entered as a new knowledge date entry.\n\n This method assumes a directory structure of:\n SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/add.txt\n SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/delete.txt\n\n The return value is a dictionary with:\n knowledge_date -> lookup_date ->\n {add: [symbol list], 'delete': [symbol list]}\n \"\"\"\n data = {}\n dir_path = os.path.join(SECURITY_LISTS_DIR, list_name)\n for kd_name in listdir(dir_path):\n kd = datetime.strptime(kd_name, DATE_FORMAT).replace(\n tzinfo=pytz.utc)\n data[kd] = {}\n kd_path = os.path.join(dir_path, kd_name)\n for ld_name in listdir(kd_path):\n ld = datetime.strptime(ld_name, DATE_FORMAT).replace(\n tzinfo=pytz.utc)\n data[kd][ld] = {}\n ld_path = os.path.join(kd_path, ld_name)\n for fname in listdir(ld_path):\n fpath = os.path.join(ld_path, fname)\n with open(fpath) as f:\n symbols = f.read().splitlines()\n data[kd][ld][fname] = symbols\n\n return data", "def _get_licences():\n \"\"\" Lists all the licenses on command line \"\"\"\n licenses = _LICENSES\n\n for license in licenses:\n print(\"{license_name} [{license_code}]\".format(\n license_name=licenses[license], license_code=license))", "def license(dir = Dir.pwd,\n license = FalkorLib::Config::Bootstrap::DEFAULTS[:metadata][:license],\n authors = '',\n options = {\n :filename => 'LICENSE'\n })\n return if ((license.empty?) or (license == 'none') or (license =~ /^CC/))\n return unless FalkorLib::Config::Bootstrap::DEFAULTS[:licenses].keys.include?( license )\n info \"Generate the #{license} licence file\"\n path = normalized_path(dir)\n use_git = FalkorLib::Git.init?(path)\n rootdir = (use_git) ? FalkorLib::Git.rootdir(path) : path\n Dir.chdir( rootdir ) do\n run %( licgen #{license.downcase} #{authors} )\n run %( mv LICENSE #{options[:filename]} ) if( options[:filename] and options[:filename] != 'LICENSE')\n end\n end", "function checkLicenses(rootDir){\n\n var licenses = {},\n nodeModulesFolder = path.resolve(rootDir, \"./node_modules\"),\n moduleFolders = fs.readdirSync(nodeModulesFolder),\n res;\n\n _(moduleFolders).each(function(module) {\n licenses[module] = checkModuleLicense(nodeModulesFolder + \"/\" + module);\n });\n\n return licenses;\n}", "public Set<InstallLicense> getFeatureLicense(Collection<String> featureIds, File fromDir, String toExtension, boolean offlineOnly, Locale locale) throws InstallException {\n Set<InstallLicense> licenses = new HashSet<InstallLicense>();\n ArrayList<InstallAsset> installAssets = new ArrayList<InstallAsset>();\n ArrayList<String> unresolvedFeatures = new ArrayList<String>();\n Collection<ESAAsset> autoFeatures = getResolveDirector().getAutoFeature(fromDir, toExtension);\n getResolveDirector().resolve(featureIds, fromDir, toExtension, offlineOnly, installAssets, unresolvedFeatures);\n if (!offlineOnly && !unresolvedFeatures.isEmpty()) {\n log(Level.FINEST, \"getFeatureLicense() determined unresolved features: \" + unresolvedFeatures.toString() + \" from \" + fromDir.getAbsolutePath());\n licenses = getFeatureLicense(unresolvedFeatures, locale, null, null);\n }\n if (installAssets.isEmpty()) {\n return licenses;\n }\n getResolveDirector().resolveAutoFeatures(autoFeatures, installAssets);\n Map<String, InstallLicenseImpl> licenseIds = new HashMap<String, InstallLicenseImpl>();\n for (InstallAsset installAsset : installAssets) {\n if (installAsset.isFeature()) {\n ESAAsset esa = (ESAAsset) installAsset;\n if (esa.isPublic()) {\n ExeInstallAction.incrementNumOfLocalFeatures();\n }\n LicenseProvider lp = esa.getLicenseProvider(locale);\n String licenseId = esa.getLicenseId();\n if (licenseId != null && !licenseId.isEmpty()) {\n InstallLicenseImpl ili = licenseIds.get(licenseId);\n if (ili == null) {\n ili = new InstallLicenseImpl(licenseId, null, lp);\n licenseIds.put(licenseId, ili);\n }\n ili.addFeature(esa.getProvideFeature());\n }\n }\n }\n licenses.addAll(licenseIds.values());\n return licenses;\n }", "function (options, callback) {\n var licenseSrc = path.join(options.src, 'LICENSE')\n try {\n fs.accessSync(licenseSrc)\n } catch (err) {\n try {\n licenseSrc = path.join(options.src, 'LICENSE.txt')\n fs.accessSync(licenseSrc)\n } catch (err) {\n licenseSrc = path.join(options.src, 'LICENSE.md')\n fs.accessSync(licenseSrc)\n }\n }\n options.logger('Reading license file from ' + licenseSrc)\n\n fs.readFile(licenseSrc, callback)\n}", "def update(data_dir):\n \"\"\"Update NLPIR license.\"\"\"\n try:\n license_updated = update_license_file(data_dir)\n except URLError:\n click.secho('Error: unable to fetch newest license.', fg='red')\n exit(1)\n except (IOError, OSError):\n click.secho('Error: unable to move license to data directory.',\n fg='red')\n exit(1)\n\n if license_updated:\n click.echo('License updated.')\n else:\n click.echo('Your license is already up-to-date.')" ]
[ 0.7496620416641235, 0.7284637093544006, 0.7106066942214966, 0.7100866436958313, 0.7081019878387451, 0.700099766254425, 0.6998074054718018, 0.6977293491363525, 0.6956530213356018, 0.695216715335846, 0.693123996257782, 0.6906286478042603 ]
Return pseudo-choice vectors.
def get_vector(self, max_choice=3): """Return pseudo-choice vectors.""" vec = {} for dim in ['forbidden', 'required', 'permitted']: if self.meta[dim] is None: continue dim_vec = map(lambda x: (x, max_choice), self.meta[dim]) vec[dim] = dict(dim_vec) return vec
[ "def getUtilities(self, decision, orderVector):\n \"\"\"\n Returns a floats that contains the utilities of every candidate in the decision.\n\n :ivar list<int> decision: Contains a list of integer representations of candidates in the \n current decision.\n :ivar list<int> orderVector: A list of integer representations for each candidate ordered\n from most preferred to least.\n \"\"\"\n \n scoringVector = self.getScoringVector(orderVector)\n utilities = []\n for alt in decision:\n altPosition = orderVector.index(alt)\n utility = float(scoringVector[altPosition])\n if self.isLoss == True:\n utility = -1*utility\n utilities.append(utility)\n return utilities", "def choose(n, k):\n \"\"\"\n Computes \"n choose k\". This can handle higher values than\n scipy.special.binom().\n \"\"\"\n if isinstance(k, numbers.Number):\n return _choose(n, k)\n else:\n return np.array([_choose(n, k2) for k2 in k])", "def _get_vectors(self, tree, precomputed_info):\n \"\"\"\n Populate the vectors m and M.\n \"\"\"\n little_m = []\n big_m = []\n\n leaf_nodes = sorted(tree.leaf_nodes(), key=lambda x: x.taxon.label)\n # inner nodes, sorted order\n for leaf_a, leaf_b in combinations(leaf_nodes, 2):\n mrca = tree.mrca(taxa=[leaf_a.taxon, leaf_b.taxon])\n little_m.append(precomputed_info[mrca].edges_from_root)\n big_m.append(precomputed_info[mrca].dist_from_root)\n \n # leaf nodes, sorted order\n for leaf in leaf_nodes:\n little_m.append(1)\n big_m.append(leaf.edge_length)\n\n return np.array(little_m), np.array(big_m)", "def getOrderVector(self):\n \"\"\"\n Returns a list of lists. Each list represents tiers of candidates. candidates in earlier\n tiers are preferred to candidates appearing in later tiers. Candidates in the same tier\n are preferred equally. \n \"\"\"\n\n # We sort the candidates based on the number of incoming edges they have in the graph. If \n # two candidates have the same number, we assume that they are tied.\n incEdgesMap = self.getIncEdgesMap()\n sortedKeys = sorted(incEdgesMap.keys(), reverse = True)\n orderVector = []\n print(sortedKeys)\n # print(\"sortedKeys\",sortedKeys)\n # print(\"incEdgesMap\", incEdgesMap)\n for key in sortedKeys:\n tier = []\n cands = incEdgesMap[key]\n # print(\"qq\",cands)\n for cand in cands:\n tier.append(cand)\n # print(\"cand=\",cand)\n # print(\"tier\", tier)\n orderVector.append(tier[0]) # replace tier with tier[0]\n return orderVector", "def _v(self, token, previous=None, next=None):\n \"\"\" Returns a training vector for the given (word, tag)-tuple and its context.\n \"\"\"\n def f(v, s1, s2):\n if s2: \n v[s1 + \" \" + s2] = 1\n p, n = previous, next\n p = (\"\", \"\") if not p else (p[0] or \"\", p[1] or \"\")\n n = (\"\", \"\") if not n else (n[0] or \"\", n[1] or \"\")\n v = {}\n f(v, \"b\", \"b\") # Bias.\n f(v, \"h\", token[0]) # Capitalization.\n f(v, \"w\", token[-6:] if token not in self.known or token in self.unknown else \"\")\n f(v, \"x\", token[-3:]) # Word suffix.\n f(v, \"-x\", p[0][-3:]) # Word suffix left.\n f(v, \"+x\", n[0][-3:]) # Word suffix right.\n f(v, \"-t\", p[1]) # Tag left.\n f(v, \"-+\", p[1] + n[1]) # Tag left + right.\n f(v, \"+t\", n[1]) # Tag right.\n return v", "def ball_pick(n, d, rng=None):\n \"\"\"Return cartesian vectors uniformly picked on the unit ball in an\n arbitrary number of dimensions.\n\n The unit ball is the space enclosed by the unit sphere.\n\n The picking is done by rejection sampling in the unit cube.\n\n In 3-dimensional space, the fraction `\\pi / 6 \\sim 0.52` points are valid.\n\n Parameters\n ----------\n n: integer\n Number of points to return.\n d: integer\n Number of dimensions of the space in which the ball lives\n\n Returns\n -------\n r: array, shape (n, d)\n Sample cartesian vectors.\n \"\"\"\n def valid(r):\n return vector_mag_sq(r) < 1.0\n return rejection_pick(L=2.0, n=n, d=d, valid=valid, rng=rng)", "def _choicerank(n_items, data, params):\n \"\"\"Inner loop of ChoiceRank algorithm.\"\"\"\n weights = exp_transform(params)\n adj, adj_t, traffic_in, traffic_out = data\n # First phase of message passing.\n zs = adj.dot(weights)\n # Second phase of message passing.\n with np.errstate(invalid=\"ignore\"):\n denoms = adj_t.dot(traffic_out / zs)\n return traffic_in, denoms", "def choice(*es):\n \"\"\"\n Create a PEG function to match an ordered choice.\n \"\"\"\n msg = 'Expected one of: {}'.format(', '.join(map(repr, es)))\n def match_choice(s, grm=None, pos=0):\n errs = []\n for e in es:\n try:\n return e(s, grm, pos)\n except PegreError as ex:\n errs.append((ex.message, ex.position))\n if errs:\n raise PegreChoiceError(errs, pos)\n return match_choice", "def select(self, condition):\n \"\"\"\n Select only those pseudopotentials for which condition is True.\n Return new class:`PseudoTable` object.\n\n Args:\n condition:\n Function that accepts a :class:`Pseudo` object and returns True or False.\n \"\"\"\n return self.__class__([p for p in self if condition(p)])", "def _compute_projection_pick(artist, path, xy):\n \"\"\"\n Project *xy* on *path* to obtain a `Selection` for *artist*.\n\n *path* is first transformed to screen coordinates using the artist\n transform, and the target of the returned `Selection` is transformed\n back to data coordinates using the artist *axes* inverse transform. The\n `Selection` `index` is returned as a float. This function returns ``None``\n for degenerate inputs.\n\n The caller is responsible for converting the index to the proper class if\n needed.\n \"\"\"\n transform = artist.get_transform().frozen()\n tpath = (path.cleaned(transform) if transform.is_affine\n # `cleaned` only handles affine transforms.\n else transform.transform_path(path).cleaned())\n # `cleaned` should return a path where the first element is `MOVETO`, the\n # following are `LINETO` or `CLOSEPOLY`, and the last one is `STOP`, i.e.\n # codes = path.codes\n # assert (codes[0], codes[-1]) == (path.MOVETO, path.STOP)\n # assert np.in1d(codes[1:-1], [path.LINETO, path.CLOSEPOLY]).all()\n vertices = tpath.vertices[:-1]\n codes = tpath.codes[:-1]\n vertices[codes == tpath.CLOSEPOLY] = vertices[0]\n # Unit vectors for each segment.\n us = vertices[1:] - vertices[:-1]\n ls = np.hypot(*us.T)\n with np.errstate(invalid=\"ignore\"):\n # Results in 0/0 for repeated consecutive points.\n us /= ls[:, None]\n # Vectors from each vertex to the event (overwritten below).\n vs = xy - vertices[:-1]\n # Clipped dot products -- `einsum` cannot be done in place, `clip` can.\n # `clip` can trigger invalid comparisons if there are nan points.\n with np.errstate(invalid=\"ignore\"):\n dot = np.clip(np.einsum(\"ij,ij->i\", vs, us), 0, ls, out=vs[:, 0])\n # Projections.\n projs = vertices[:-1] + dot[:, None] * us\n ds = np.hypot(*(xy - projs).T, out=vs[:, 1])\n try:\n argmin = np.nanargmin(ds)\n dmin = ds[argmin]\n except (ValueError, IndexError): # See above re: exceptions caught.\n return\n else:\n target = AttrArray(\n artist.axes.transData.inverted().transform_point(projs[argmin]))\n target.index = (\n (argmin + dot[argmin] / ls[argmin])\n / (path._interpolation_steps / tpath._interpolation_steps))\n return Selection(artist, target, dmin, None, None)", "def getOrderVectors(self):\n \"\"\"\n Returns a list of lists, one for each preference, of candidates ordered from most preferred\n to least. Note that ties are not indicated in the returned lists. Also returns a list of\n the number of times each preference is given.\n \"\"\"\n\n orderVectors = []\n for preference in self.preferences:\n orderVectors.append(preference.getOrderVector())\n return orderVectors", "def getScoringVector(self, orderVector):\n \"\"\"\n Returns a scoring vector such that the first k candidates recieve 1 point and all others \n recive 0 This function is called by getUtilities() which is implemented in the parent\n class.\n\n :ivar list<int> orderVector: A list of integer representations for each candidate ordered\n from most preferred to least.\n \"\"\"\n\n scoringVector = []\n for i in range(0, self.k):\n scoringVector.append(1)\n for i in range(self.k, len(orderVector)):\n scoringVector.append(0)\n return scoringVector" ]
[ 0.679014265537262, 0.6754641532897949, 0.6727849841117859, 0.6662163734436035, 0.6640924215316772, 0.6624693870544434, 0.6603453755378723, 0.6599308252334595, 0.6597691774368286, 0.6582732796669006, 0.6564963459968567, 0.6564908623695374 ]
CLI for tonomi.com using contrib-python-qubell-client To enable completion: eval "$(_NOMI_COMPLETE=source nomi)"
def entity(ctx, debug, uncolorize, **kwargs): """ CLI for tonomi.com using contrib-python-qubell-client To enable completion: eval "$(_NOMI_COMPLETE=source nomi)" """ global PROVIDER_CONFIG if debug: log.basicConfig(level=log.DEBUG) log.getLogger("requests.packages.urllib3.connectionpool").setLevel(log.DEBUG) for (k, v) in kwargs.iteritems(): if v: QUBELL[k] = v PROVIDER_CONFIG = { 'configuration.provider': PROVIDER['provider_type'], 'configuration.legacy-regions': PROVIDER['provider_region'], 'configuration.endpoint-url': '', 'configuration.legacy-security-group': '', 'configuration.identity': PROVIDER['provider_identity'], 'configuration.credential': PROVIDER['provider_credential'] } class UserContext(object): def __init__(self): self.platform = None self.unauthenticated_platform = None self.colorize = not (uncolorize) def get_platform(self): if not self.platform: assert QUBELL["tenant"], "No platform URL provided. Set QUBELL_TENANT or use --tenant option." if not QUBELL["token"]: assert QUBELL["user"], "No username. Set QUBELL_USER or use --user option." assert QUBELL["password"], "No password provided. Set QUBELL_PASSWORD or use --password option." self.platform = QubellPlatform.connect( tenant=QUBELL["tenant"], user=QUBELL["user"], password=QUBELL["password"], token=QUBELL["token"]) return self.platform def get_unauthenticated_platform(self): if not self.unauthenticated_platform: assert QUBELL["tenant"], "No platform URL provided. Set QUBELL_TENANT or use --tenant option." self.unauthenticated_platform = QubellPlatform.connect(tenant=QUBELL["tenant"]) return self.unauthenticated_platform ctx = click.get_current_context() ctx.obj = UserContext()
[ "def consult(string_in):\n \"\"\"\n provide file:consult/1 functionality with python types\n \"\"\"\n # pylint: disable=eval-used\n # pylint: disable=too-many-branches\n # pylint: disable=too-many-statements\n\n # manually parse textual erlang data to avoid external dependencies\n list_out = []\n tuple_binary = False # binaries become tuples of integers\n quoted_string = False # strings become python string\n atom_string = False # atoms become python string\n number = False\n whitespace = frozenset(('\\n', '\\t', ' '))\n i = 0\n while i < len(string_in):\n character = string_in[i]\n if character == ',':\n if atom_string:\n list_out.append('\"')\n atom_string = False\n list_out.append(',')\n number = string_in[i + 1].isdigit()\n elif character == '{':\n list_out.append('(')\n number = string_in[i + 1].isdigit()\n elif character == '}':\n if atom_string:\n list_out.append('\"')\n atom_string = False\n list_out.append(')')\n number = False\n elif character == '[':\n list_out.append('[')\n number = string_in[i + 1].isdigit()\n elif character == ']':\n if atom_string:\n list_out.append('\"')\n atom_string = False\n list_out.append(']')\n number = False\n elif character == '<' and string_in[i + 1] == '<':\n list_out.append('(')\n tuple_binary = True\n i += 1\n elif character == '>' and string_in[i + 1] == '>':\n list_out.append(')')\n tuple_binary = False\n i += 1\n elif not quoted_string and not atom_string and character in whitespace:\n number = string_in[i + 1].isdigit()\n elif tuple_binary or number:\n list_out.append(character)\n elif character == '\"':\n if quoted_string:\n quoted_string = False\n else:\n quoted_string = True\n list_out.append('\"')\n elif character == \"'\":\n if atom_string:\n atom_string = False\n else:\n atom_string = True\n list_out.append('\"')\n elif not quoted_string and not atom_string:\n atom_string = True\n list_out.append('\"')\n list_out.append(character)\n else:\n list_out.append(character)\n i += 1\n return eval(''.join(list_out))", "def tonicdns_client(uri, method, token='', data='', keyword='',\n content='', raw_flag=False):\n \"\"\"TonicDNS API client\n\n Arguments:\n\n uri: TonicDNS API URI\n method: TonicDNS API request method\n token: TonicDNS API authentication token\n data: Post data to TonicDNS API\n keyword: Processing keyword of response\n content: data exist flag\n raw_flag: True is return response data, False is pretty printing\n \"\"\"\n res = request(uri, method, data, token)\n if token:\n if keyword == 'serial':\n args = {\"token\": token, \"keyword\": keyword, \"content\": content}\n cur_soa, new_soa = response(uri, method, res, **args)\n return cur_soa, new_soa\n\n else:\n if content is None:\n args = {\"token\": token, \"keyword\": keyword,\n \"content\": content.get('domain')}\n response(uri, method, res, **args)\n else:\n # get sub command\n args = {\"token\": token, \"keyword\": keyword,\n \"raw_flag\": raw_flag}\n data = response(uri, method, res, **args)\n return data\n\n else:\n args = {\"token\": token, \"keyword\": keyword}\n token = response(uri, method, res, **args)\n return token", "def rl_complete(self, text, state):\n \"\"\"\n Alternate entry point for using the argcomplete completer in a readline-based REPL. See also\n `rlcompleter <https://docs.python.org/2/library/rlcompleter.html#completer-objects>`_.\n Usage:\n\n .. code-block:: python\n\n import argcomplete, argparse, readline\n parser = argparse.ArgumentParser()\n ...\n completer = argcomplete.CompletionFinder(parser)\n readline.set_completer_delims(\"\")\n readline.set_completer(completer.rl_complete)\n readline.parse_and_bind(\"tab: complete\")\n result = input(\"prompt> \")\n\n (Use ``raw_input`` instead of ``input`` on Python 2, or use `eight <https://github.com/kislyuk/eight>`_).\n \"\"\"\n if state == 0:\n cword_prequote, cword_prefix, cword_suffix, comp_words, first_colon_pos = split_line(text)\n comp_words.insert(0, sys.argv[0])\n matches = self._get_completions(comp_words, cword_prefix, cword_prequote, first_colon_pos)\n self._rl_matches = [text + match[len(cword_prefix):] for match in matches]\n\n if state < len(self._rl_matches):\n return self._rl_matches[state]\n else:\n return None", "def complete_func(self, findstart, base):\n \"\"\"Handle omni completion.\"\"\"\n self.log.debug('complete_func: in %s %s', findstart, base)\n\n def detect_row_column_start():\n row, col = self.editor.cursor()\n start = col\n line = self.editor.getline()\n while start > 0 and line[start - 1] not in \" .,([{\":\n start -= 1\n # Start should be 1 when startcol is zero\n return row, col, start if start else 1\n\n if str(findstart) == \"1\":\n row, col, startcol = detect_row_column_start()\n\n # Make request to get response ASAP\n self.complete(row, col)\n self.completion_started = True\n\n # We always allow autocompletion, even with empty seeds\n return startcol\n else:\n result = []\n # Only handle snd invocation if fst has already been done\n if self.completion_started:\n # Unqueing messages until we get suggestions\n self.unqueue(timeout=self.completion_timeout, should_wait=True)\n suggestions = self.suggestions or []\n self.log.debug('complete_func: suggestions in')\n for m in suggestions:\n result.append(m)\n self.suggestions = None\n self.completion_started = False\n return result", "def cmd_nc(host, port, family, ssl_enable, crlf, source_ip, source_port, protocol):\n \"\"\"Some kind of netcat/ncat replacement.\n\n The execution emulates the feeling of this popular tools.\n\n Example:\n\n \\b\n $ habu.nc --crlf www.portantier.com 80\n Connected to 45.77.113.133 80\n HEAD / HTTP/1.0\n\n \\b\n HTTP/1.0 301 Moved Permanently\n Date: Thu, 26 Jul 2018 21:10:51 GMT\n Server: OpenBSD httpd\n Connection: close\n Content-Type: text/html\n Content-Length: 443\n Location: https://www.portantier.com/\n \"\"\"\n\n resolved = socket.getaddrinfo(host, port)\n\n families = {\n '4' : [ socket.AF_INET ],\n '6' : [ socket.AF_INET6 ],\n '46': [ socket.AF_INET, socket.AF_INET6]\n }\n\n address = None\n for r in resolved:\n if r[0] in families[family]:\n address = r # (<AddressFamily.AF_INET6: 10>, <SocketType.SOCK_STREAM: 1>, 6, '', ('2606:2800:220:1:248:1893:25c8:1946', 80, 0, 0))\n\n if not address:\n print('Could not resolve {} to the ip address family selected ({})'.format(host, family), file=sys.stderr)\n sys.exit(1)\n\n to_send = b''\n\n if not source_ip:\n source_ip = which_source_for(address[4][0])\n\n if protocol == 'tcp':\n s = socket.socket(address[0], socket.SOCK_STREAM)\n else:\n s = socket.socket(address[0], socket.SOCK_DGRAM)\n\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.bind((source_ip, source_port))\n\n if ssl_enable:\n ssl_context = ssl.SSLContext()\n s = ssl_context.wrap_socket(s, server_side=False)\n\n try:\n s.connect((address[4][0], port))\n\n print('Connected to', address[4][0], port, file=sys.stderr)\n except Exception as e:\n print(e, file=sys.stderr)\n sys.exit(1)\n\n while True:\n\n iready, oready, eready = select.select([sys.stdin, s], [], [s])\n\n for i in iready:\n if i == sys.stdin:\n if crlf:\n to_send += i.readline().replace('\\n', '\\r\\n').encode()\n else:\n to_send += i.readline().encode()\n else:\n received = s.recv(4096)\n if not received:\n sys.exit(1)\n\n os.write(sys.stdout.fileno(), received)\n\n iready, oready, eready = select.select([], [s], [s])\n\n for o in oready:\n if to_send:\n o.send(to_send)\n to_send = b''\n\n s.close()", "def cli(ctx, config, debug, language, verbose):\n \"\"\"\n Cucco allows to apply normalizations to a given text or file.\n This normalizations include, among others, removal of accent\n marks, stop words an extra white spaces, replacement of\n punctuation symbols, emails, emojis, etc.\n\n For more info on how to use and configure Cucco, check the\n project website at https://cucco.io.\n \"\"\"\n ctx.obj = {}\n\n try:\n ctx.obj['config'] = Config(normalizations=config,\n language=language,\n debug=debug,\n verbose=verbose)\n except ConfigError as e:\n click.echo(e.message)\n sys.exit(-1)\n\n ctx.obj['cucco'] = Cucco(ctx.obj['config'])", "def eval(self, script, numkeys, *keys_and_args):\n \"\"\"\n Execute the Lua ``script``, specifying the ``numkeys`` the script\n will touch and the key names and argument values in ``keys_and_args``.\n Returns the result of the script.\n\n In practice, use the object returned by ``register_script``. This\n function exists purely for Redis API completion.\n \"\"\"\n return self.execute_command('EVAL', script, numkeys, *keys_and_args)", "def main(arguments=None):\n \"\"\"\n *The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command*\n \"\"\"\n # setup the command-line util settings\n su = tools(\n arguments=arguments,\n docString=__doc__,\n logLevel=\"WARNING\",\n options_first=False,\n projectName=\"qubits\"\n )\n arguments, settings, log, dbConn = su.setup()\n\n # unpack remaining cl arguments using `exec` to setup the variable names\n # automatically\n for arg, val in arguments.iteritems():\n if arg[0] == \"-\":\n varname = arg.replace(\"-\", \"\") + \"Flag\"\n else:\n varname = arg.replace(\"<\", \"\").replace(\">\", \"\")\n if varname == \"import\":\n varname = \"iimport\"\n if isinstance(val, str) or isinstance(val, unicode):\n exec(varname + \" = '%s'\" % (val,))\n else:\n exec(varname + \" = %s\" % (val,))\n if arg == \"--dbConn\":\n dbConn = val\n log.debug('%s = %s' % (varname, val,))\n\n ## START LOGGING ##\n startTime = times.get_now_sql_datetime()\n log.info(\n '--- STARTING TO RUN THE cl_utils.py AT %s' %\n (startTime,))\n\n if init:\n from . import workspace\n ws = workspace(\n log=log,\n pathToWorkspace=pathToWorkspace\n )\n ws.setup()\n return\n\n # IMPORT THE SIMULATION SETTINGS\n (allSettings,\n programSettings,\n limitingMags,\n sampleNumber,\n peakMagnitudeDistributions,\n explosionDaysFromSettings,\n extendLightCurveTail,\n relativeSNRates,\n lowerRedshiftLimit,\n upperRedshiftLimit,\n redshiftResolution,\n restFrameFilter,\n kCorrectionTemporalResolution,\n kCorPolyOrder,\n kCorMinimumDataPoints,\n extinctionType,\n extinctionConstant,\n hostExtinctionDistributions,\n galacticExtinctionDistribution,\n surveyCadenceSettings,\n snLightCurves,\n surveyArea,\n CCSNRateFraction,\n transientToCCSNRateFraction,\n extraSurveyConstraints,\n lightCurvePolyOrder,\n logLevel) = cu.read_in_survey_parameters(\n log,\n pathToSettingsFile=pathToSettingsFile\n )\n\n logFilePath = pathToOutputDirectory + \"/qubits.log\"\n del log\n log = _set_up_command_line_tool(\n level=str(logLevel),\n logFilePath=logFilePath\n )\n\n # dbConn, log = cu.settings(\n # pathToSettingsFile=pathToSettingsFile,\n # dbConn=False,\n # log=True\n # )\n\n ## START LOGGING ##\n startTime = dcu.get_now_sql_datetime()\n log.info('--- STARTING TO RUN THE qubits AT %s' % (startTime,))\n\n resultsDict = {}\n\n pathToOutputPlotDirectory = pathToOutputDirectory + \"/plots/\"\n dcu.dryx_mkdir(\n log,\n directoryPath=pathToOutputPlotDirectory\n )\n\n pathToResultsFolder = pathToOutputDirectory + \"/results/\"\n dcu.dryx_mkdir(\n log,\n directoryPath=pathToResultsFolder\n )\n\n if not programSettings['Extract Lightcurves from Spectra'] and not programSettings['Generate KCorrection Database'] and not programSettings['Run the Simulation'] and not programSettings['Compile and Plot Results']:\n print \"All stages of the simulatation have been switched off. Please switch on at least one stage of the simulation under the 'Programming Settings' in the settings file `%(pathToSettingsFile)s`\" % locals()\n\n # GENERATE THE DATA FOR SIMULATIONS\n if programSettings['Extract Lightcurves from Spectra']:\n log.info('generating the Lightcurves')\n dg.generate_model_lightcurves(\n log=log,\n pathToSpectralDatabase=pathToSpectralDatabase,\n pathToOutputDirectory=pathToOutputDirectory,\n pathToOutputPlotDirectory=pathToOutputPlotDirectory,\n explosionDaysFromSettings=explosionDaysFromSettings,\n extendLightCurveTail=extendLightCurveTail,\n polyOrder=lightCurvePolyOrder\n )\n print \"The lightcurve file can be found here: %(pathToOutputDirectory)stransient_light_curves.yaml\" % locals()\n print \"The lightcurve plots can be found in %(pathToOutputPlotDirectory)s\" % locals()\n\n if programSettings['Generate KCorrection Database']:\n log.info('generating the kcorrection data')\n dg.generate_kcorrection_listing_database(\n log,\n pathToOutputDirectory=pathToOutputDirectory,\n pathToSpectralDatabase=pathToSpectralDatabase,\n restFrameFilter=restFrameFilter,\n temporalResolution=kCorrectionTemporalResolution,\n redshiftResolution=redshiftResolution,\n redshiftLower=lowerRedshiftLimit,\n redshiftUpper=upperRedshiftLimit + redshiftResolution)\n log.info('generating the kcorrection polynomials')\n dg.generate_kcorrection_polynomial_database(\n log,\n pathToOutputDirectory=pathToOutputDirectory,\n restFrameFilter=restFrameFilter,\n kCorPolyOrder=kCorPolyOrder, # ORDER OF THE POLYNOMIAL TO FIT\n kCorMinimumDataPoints=kCorMinimumDataPoints,\n redshiftResolution=redshiftResolution,\n redshiftLower=lowerRedshiftLimit,\n redshiftUpper=upperRedshiftLimit + redshiftResolution,\n plot=programSettings['Generate KCorrection Plots'])\n\n print \"The k-correction database has been generated here: %(pathToOutputDirectory)sk_corrections\" % locals()\n if programSettings['Generate KCorrection Plots']:\n print \"The k-correction polynomial plots can also be found in %(pathToOutputDirectory)sk_corrections\" % locals()\n\n if programSettings['Run the Simulation']:\n # CREATE THE OBSERVABLE UNIVERSE!\n log.info('generating the redshift array')\n redshiftArray = u.random_redshift_array(\n log,\n sampleNumber,\n lowerRedshiftLimit,\n upperRedshiftLimit,\n redshiftResolution=redshiftResolution,\n pathToOutputPlotDirectory=pathToOutputPlotDirectory,\n plot=programSettings['Plot Simulation Helper Plots'])\n resultsDict['Redshifts'] = redshiftArray.tolist()\n\n log.info('generating the SN type array')\n snTypesArray = u.random_sn_types_array(\n log,\n sampleNumber,\n relativeSNRates,\n pathToOutputPlotDirectory=pathToOutputPlotDirectory,\n plot=programSettings['Plot Simulation Helper Plots'])\n resultsDict['SN Types'] = snTypesArray.tolist()\n\n log.info('generating peak magnitudes for the SNe')\n peakMagnitudesArray = u.random_peak_magnitudes(\n log,\n peakMagnitudeDistributions,\n snTypesArray,\n plot=programSettings['Plot Simulation Helper Plots'])\n\n log.info('generating the SN host extictions array')\n hostExtinctionArray = u.random_host_extinction(\n log,\n sampleNumber,\n extinctionType,\n extinctionConstant,\n hostExtinctionDistributions,\n plot=programSettings['Plot Simulation Helper Plots'])\n\n log.info('generating the SN galactic extictions array')\n galacticExtinctionArray = u.random_galactic_extinction(\n log,\n sampleNumber,\n extinctionType,\n extinctionConstant,\n galacticExtinctionDistribution,\n plot=programSettings['Plot Simulation Helper Plots'])\n\n log.info('generating the raw lightcurves for the SNe')\n rawLightCurveDict = u.generate_numpy_polynomial_lightcurves(\n log,\n snLightCurves=snLightCurves,\n pathToOutputDirectory=pathToOutputDirectory,\n pathToOutputPlotDirectory=pathToOutputPlotDirectory,\n plot=programSettings['Plot Simulation Helper Plots'])\n\n log.info('generating the k-correction array for the SNe')\n kCorrectionArray = u.build_kcorrection_array(\n log,\n redshiftArray,\n snTypesArray,\n snLightCurves,\n pathToOutputDirectory=pathToOutputDirectory,\n plot=programSettings['Plot Simulation Helper Plots'])\n\n log.info('generating the observed lightcurves for the SNe')\n observedFrameLightCurveInfo, peakAppMagList = u.convert_lightcurves_to_observered_frame(\n log,\n snLightCurves=snLightCurves,\n rawLightCurveDict=rawLightCurveDict,\n redshiftArray=redshiftArray,\n snTypesArray=snTypesArray,\n peakMagnitudesArray=peakMagnitudesArray,\n kCorrectionArray=kCorrectionArray,\n hostExtinctionArray=hostExtinctionArray,\n galacticExtinctionArray=galacticExtinctionArray,\n restFrameFilter=restFrameFilter,\n pathToOutputDirectory=pathToOutputDirectory,\n pathToOutputPlotDirectory=pathToOutputPlotDirectory,\n polyOrder=lightCurvePolyOrder,\n plot=programSettings['Plot Simulation Helper Plots'])\n\n log.info('generating the survey observation cadence')\n cadenceDictionary = ss.survey_cadence_arrays(\n log,\n surveyCadenceSettings,\n pathToOutputDirectory=pathToOutputDirectory,\n pathToOutputPlotDirectory=pathToOutputPlotDirectory,\n plot=programSettings['Plot Simulation Helper Plots'])\n\n log.info('determining if the SNe are discoverable by the survey')\n discoverableList = ss.determine_if_sne_are_discoverable(\n log,\n redshiftArray=redshiftArray,\n limitingMags=limitingMags,\n observedFrameLightCurveInfo=observedFrameLightCurveInfo,\n pathToOutputDirectory=pathToOutputDirectory,\n pathToOutputPlotDirectory=pathToOutputPlotDirectory,\n plot=programSettings['Plot Simulation Helper Plots'])\n\n log.info(\n 'determining the day (if and) when each SN is first discoverable by the survey')\n ripeDayList = ss.determine_when_sne_are_ripe_for_discovery(\n log,\n redshiftArray=redshiftArray,\n limitingMags=limitingMags,\n discoverableList=discoverableList,\n observedFrameLightCurveInfo=observedFrameLightCurveInfo,\n plot=programSettings['Plot Simulation Helper Plots'])\n\n # log.info('determining the day when each SN is disappears fainter than the survey limiting mags')\n # disappearDayList = determine_when_discovered_sne_disappear(\n # log,\n # redshiftArray=redshiftArray,\n # limitingMags=limitingMags,\n # ripeDayList=ripeDayList,\n # observedFrameLightCurveInfo=observedFrameLightCurveInfo,\n # plot=programSettings['Plot Simulation Helper Plots'])\n\n log.info('determining if and when each SN is discovered by the survey')\n lightCurveDiscoveryDayList, surveyDiscoveryDayList, snCampaignLengthList = ss.determine_if_sne_are_discovered(\n log,\n limitingMags=limitingMags,\n ripeDayList=ripeDayList,\n cadenceDictionary=cadenceDictionary,\n observedFrameLightCurveInfo=observedFrameLightCurveInfo,\n extraSurveyConstraints=extraSurveyConstraints,\n plot=programSettings['Plot Simulation Helper Plots'])\n\n resultsDict[\n 'Discoveries Relative to Peak Magnitudes'] = lightCurveDiscoveryDayList\n resultsDict[\n 'Discoveries Relative to Survey Year'] = surveyDiscoveryDayList\n resultsDict['Campaign Length'] = snCampaignLengthList\n resultsDict['Cadence Dictionary'] = cadenceDictionary\n resultsDict['Peak Apparent Magnitudes'] = peakAppMagList\n\n now = datetime.now()\n now = now.strftime(\"%Y%m%dt%H%M%S\")\n fileName = pathToOutputDirectory + \\\n \"/simulation_results_%s.yaml\" % (now,)\n stream = file(fileName, 'w')\n yamlContent = dict(allSettings.items() + resultsDict.items())\n yaml.dump(yamlContent, stream, default_flow_style=False)\n stream.close()\n\n print \"The simulation output file can be found here: %(fileName)s. Remember to update your settings file 'Simulation Results File Used for Plots' parameter with this filename before compiling the results.\" % locals()\n if programSettings['Plot Simulation Helper Plots']:\n print \"The simulation helper-plots found in %(pathToOutputPlotDirectory)s\" % locals()\n\n # COMPILE AND PLOT THE RESULTS\n if programSettings['Compile and Plot Results']:\n pathToYamlFile = pathToOutputDirectory + \"/\" + \\\n programSettings['Simulation Results File Used for Plots']\n result_log = r.log_the_survey_settings(log, pathToYamlFile)\n snSurveyDiscoveryTimes, lightCurveDiscoveryTimes, snTypes, redshifts, cadenceDictionary, peakAppMagList, snCampaignLengthList = r.import_results(\n log, pathToYamlFile)\n snRatePlotLink, totalRate, tooFaintRate, shortCampaignRate = r.determine_sn_rate(\n log,\n lightCurveDiscoveryTimes,\n snSurveyDiscoveryTimes,\n redshifts,\n surveyCadenceSettings=surveyCadenceSettings,\n lowerRedshiftLimit=lowerRedshiftLimit,\n upperRedshiftLimit=upperRedshiftLimit,\n redshiftResolution=redshiftResolution,\n surveyArea=surveyArea,\n CCSNRateFraction=CCSNRateFraction,\n transientToCCSNRateFraction=transientToCCSNRateFraction,\n peakAppMagList=peakAppMagList,\n snCampaignLengthList=snCampaignLengthList,\n extraSurveyConstraints=extraSurveyConstraints,\n pathToOutputPlotFolder=pathToOutputPlotDirectory)\n result_log += \"\"\"\n## Results ##\n\nThis simulated survey discovered a total of **%s** transients per year. An extra **%s** transients were detected but deemed too faint to constrain a positive transient identification and a further **%s** transients where detected but an observational campaign of more than **%s** days could not be completed to ensure identification. See below for the various output plots.\n\n \"\"\" % (totalRate, tooFaintRate, shortCampaignRate, extraSurveyConstraints[\"Observable for at least ? number of days\"])\n cadenceWheelLink = r.plot_cadence_wheel(\n log,\n cadenceDictionary,\n pathToOutputPlotFolder=pathToOutputPlotDirectory)\n result_log += \"\"\"%s\"\"\" % (cadenceWheelLink,)\n discoveryMapLink = r.plot_sn_discovery_map(\n log,\n snSurveyDiscoveryTimes,\n peakAppMagList,\n snCampaignLengthList,\n redshifts,\n extraSurveyConstraints,\n pathToOutputPlotFolder=pathToOutputPlotDirectory)\n result_log += \"\"\"%s\"\"\" % (discoveryMapLink,)\n ratioMapLink = r.plot_sn_discovery_ratio_map(\n log,\n snSurveyDiscoveryTimes,\n redshifts,\n peakAppMagList,\n snCampaignLengthList,\n extraSurveyConstraints,\n pathToOutputPlotFolder=pathToOutputPlotDirectory)\n result_log += \"\"\"%s\"\"\" % (ratioMapLink,)\n result_log += \"\"\"%s\"\"\" % (snRatePlotLink,)\n\n now = datetime.now()\n now = now.strftime(\"%Y%m%dt%H%M%S\")\n mdLogPath = pathToResultsFolder + \\\n \"simulation_result_log_%s.md\" % (now,)\n mdLog = open(mdLogPath, 'w')\n mdLog.write(result_log)\n mdLog.close()\n\n dmd.convert_to_html(\n log=log,\n pathToMMDFile=mdLogPath,\n css=\"amblin\"\n )\n\n print \"Results can be found here: %(pathToResultsFolder)s\" % locals()\n html = mdLogPath.replace(\".md\", \".html\")\n print \"Open this file in your browser: %(html)s\" % locals()\n\n if \"dbConn\" in locals() and dbConn:\n dbConn.commit()\n dbConn.close()\n ## FINISH LOGGING ##\n endTime = times.get_now_sql_datetime()\n runningTime = times.calculate_time_difference(startTime, endTime)\n log.info('-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' %\n (endTime, runningTime, ))\n\n return", "def show_namespace(name, **kwargs):\n '''\n Return information for a given namespace defined by the specified name\n\n CLI Examples::\n\n salt '*' kubernetes.show_namespace kube-system\n '''\n cfg = _setup_conn(**kwargs)\n try:\n api_instance = kubernetes.client.CoreV1Api()\n api_response = api_instance.read_namespace(name)\n\n return api_response.to_dict()\n except (ApiException, HTTPError) as exc:\n if isinstance(exc, ApiException) and exc.status == 404:\n return None\n else:\n log.exception(\n 'Exception when calling '\n 'CoreV1Api->read_namespace'\n )\n raise CommandExecutionError(exc)\n finally:\n _cleanup(**cfg)", "def fun_en_complete_func(self, client, findstart_and_base, base=None):\n \"\"\"Invokable function from vim and neovim to perform completion.\"\"\"\n if isinstance(findstart_and_base, list):\n # Invoked by neovim\n findstart = findstart_and_base[0]\n base = findstart_and_base[1]\n else:\n # Invoked by vim\n findstart = findstart_and_base\n return client.complete_func(findstart, base)", "def main():\n from IPython import embed\n \"\"\" Python 3.6.6\n ibttl 2.605194091796875\n ttl 3.8316309452056885\n diff lt - ttl -1.2264368534088135\n librdfxml 31.267616748809814\n rdfxml 58.25124502182007\n diff lr - rl -26.983628273010254\n simple time 17.405116319656372\n \"\"\"\n\n \"\"\" Python 3.5.3 (pypy3)\n libttl 2.387338638305664\n ttl 1.3430471420288086\n diff lt - ttl 1.0442914962768555\n librdfxml 24.70371127128601\n rdfxml 17.85916304588318\n diff lr - rl 6.844548225402832\n simple time 18.32300615310669\n \"\"\"\n\n # well I guess that answers that question ...\n # librdf much faster for cpython, not for pypy3\n\n from time import time\n rdflib.plugin.register('librdfxml', rdflib.parser.Parser,\n 'librdflib', 'libRdfxmlParser')\n rdflib.plugin.register('libttl', rdflib.parser.Parser,\n 'librdflib', 'libTurtleParser')\n\n p1 = Path('~/git/NIF-Ontology/ttl/NIF-Molecule.ttl').expanduser()\n start = time()\n graph = rdflib.Graph().parse(p1.as_posix(), format='libttl')\n stop = time()\n lttime = stop - start\n print('libttl', lttime)\n #serialize(graph)\n\n start = time()\n graph = rdflib.Graph().parse(p1.as_posix(), format='turtle')\n stop = time()\n ttltime = stop - start\n print('ttl', ttltime)\n print('diff lt - ttl', lttime - ttltime)\n\n p2 = Path('~/git/NIF-Ontology/ttl/external/uberon.owl').expanduser()\n start = time()\n graph2 = rdflib.Graph().parse(p2.as_posix(), format='librdfxml')\n stop = time()\n lrtime = stop - start\n print('librdfxml', lrtime)\n if True:\n start = time()\n graph2 = rdflib.Graph().parse(p2.as_posix(), format='xml')\n stop = time()\n rltime = stop - start\n print('rdfxml', rltime)\n print('diff lr - rl', lrtime - rltime)\n\n if True:\n file_uri = p2.as_uri()\n parser = RDF.Parser(name='rdfxml')\n stream = parser.parse_as_stream(file_uri)\n start = time()\n # t = list(stream)\n t = tuple(statement_to_tuple(statement) for statement in stream)\n stop = time()\n stime = stop - start\n print('simple time', stime)\n\n embed()", "def complete(self, text, state):\n '''\n Alternate entry point for using the argcomplete completer in a readline-based REPL. See also\n `rlcompleter <https://docs.python.org/2/library/rlcompleter.html#completer-objects>`_.\n\n Usage:\n\n .. code-block:: python\n\n import argcomplete, argparse, readline\n parser = argparse.ArgumentParser()\n ...\n completer = argcomplete.CompletionFinder(parser)\n readline.set_completer(completer.complete)\n readline.parse_and_bind(\"tab: complete\")\n result = input(\"prompt> \")\n\n (Use ``raw_input`` instead of ``input`` on Python 2, or use `eight <https://github.com/kislyuk/eight>`_).\n '''\n if state == 0:\n print(\"Retrieving matches for\", text)\n cword_prequote, cword_prefix, cword_suffix, comp_words, first_colon_pos = split_line(text)\n print(\"Split line into prequote={}, prefix={}, suffix={}, words={}, fcp={}\".format(cword_prequote, cword_prefix, cword_suffix, comp_words, first_colon_pos))\n comp_words.insert(0, \"prog\")\n self.matches = self._get_completions(comp_words, cword_prefix, cword_prequote, first_colon_pos)\n print(\"Set matches to\", self.matches)\n if state < len(self.matches):\n print(\"Returning\", self.matches[state])\n return self.matches[state]\n else:\n return None" ]
[ 0.6493841409683228, 0.6391778588294983, 0.6360665559768677, 0.6351642608642578, 0.63483065366745, 0.633405864238739, 0.6264551877975464, 0.6261827349662781, 0.6250460743904114, 0.6250454187393188, 0.6231980919837952, 0.6224732398986816 ]
Upload application from file. By default, file name will be used as application name, with "-vXX.YYY" suffix stripped. Application is looked up by one of these classifiers, in order of priority: app-id, app-name, filename. If app-id is provided, looks up existing application and updates its manifest. If app-id is NOT specified, looks up by name, or creates new application.
def import_app(files, category, overwrite, id, name): """ Upload application from file. By default, file name will be used as application name, with "-vXX.YYY" suffix stripped. Application is looked up by one of these classifiers, in order of priority: app-id, app-name, filename. If app-id is provided, looks up existing application and updates its manifest. If app-id is NOT specified, looks up by name, or creates new application. """ platform = _get_platform() org = platform.get_organization(QUBELL["organization"]) if category: category = org.categories[category] regex = re.compile(r"^(.*?)(-v(\d+)|)\.[^.]+$") if (id or name) and len(files) > 1: raise Exception("--id and --name are supported only for single-file mode") for filename in files: click.echo("Importing " + filename, nl=False) if not name: match = regex.match(basename(filename)) if not match: click.echo(_color("RED", "FAIL") + " unknown filename format") break name = regex.match(basename(filename)).group(1) click.echo(" => ", nl=False) app = None try: app = org.get_application(id=id, name=name) if app and not overwrite: click.echo("%s %s already exists %s" % ( app.id, _color("BLUE", app and app.name or name), _color("RED", "FAIL"))) break except NotFoundError: if id: click.echo("%s %s not found %s" % ( id or "", _color("BLUE", app and app.name or name), _color("RED", "FAIL"))) break click.echo(_color("BLUE", app and app.name or name) + " ", nl=False) try: with file(filename, "r") as f: if app: app.update(name=app.name, category=category and category.id or app.category, manifest=Manifest(content=f.read())) else: app = org.application(id=id, name=name, manifest=Manifest(content=f.read())) if category: app.update(category=category.id) click.echo(app.id + _color("GREEN", " OK")) except IOError as e: click.echo(_color("RED", " FAIL") + " " + e.message) break
[ "def app_upload(path, name, manifest, package, docker_address, registry, manifest_only, **kwargs):\n \"\"\"\n Upload application with its environment (directory) into the storage.\n\n Application directory or its subdirectories must contain valid manifest file\n named `manifest.json` or `manifest` otherwise you must specify it explicitly by\n setting `--manifest` option.\n\n You can specify application name. By default, leaf directory name is treated as application\n name.\n\n If you have already prepared application archive (*.tar.gz), you can explicitly specify path to\n it by setting `--package` option.\n\n Additional output can be turned on by passing `-vvvv` option.\n \"\"\"\n lower_limit = 120.0\n\n ctx = Context(**kwargs)\n if ctx.timeout < lower_limit:\n ctx.timeout = lower_limit\n log.info('shifted timeout to the %.2fs', ctx.timeout)\n\n mutex_record = collections.namedtuple('mutex_record', 'value, name')\n mutex = [\n (mutex_record(path, 'PATH'), mutex_record(package, '--package')),\n (mutex_record(package, '--package'), mutex_record(docker_address, '--docker')),\n (mutex_record(package, '--package'), mutex_record(registry, '--registry')),\n ]\n for (f, s) in mutex:\n if f.value and s.value:\n click.echo('Wrong usage: option {} and {} are mutual exclusive, you can only use one'.\n format(f.name, s.name))\n exit(os.EX_USAGE)\n\n if manifest_only:\n ctx.execute_action('app:upload-manual', **{\n 'storage': ctx.repo.create_secure_service('storage'),\n 'name': name,\n 'manifest': manifest,\n 'package': None,\n 'manifest_only': manifest_only,\n })\n elif package:\n ctx.execute_action('app:upload-manual', **{\n 'storage': ctx.repo.create_secure_service('storage'),\n 'name': name,\n 'manifest': manifest,\n 'package': package\n })\n elif docker_address:\n ctx.execute_action('app:upload-docker', **{\n 'storage': ctx.repo.create_secure_service('storage'),\n 'path': path,\n 'name': name,\n 'manifest': manifest,\n 'address': docker_address,\n 'registry': registry\n })\n else:\n ctx.execute_action('app:upload', **{\n 'storage': ctx.repo.create_secure_service('storage'),\n 'path': path,\n 'name': name,\n 'manifest': manifest\n })", "def application(self, id=None, manifest=None, name=None):\n \"\"\" Smart method. Creates, picks or modifies application.\n If application found by name or id and manifest not changed: return app.\n If app found by id, but other parameters differs: change them.\n If no application found, create.\n \"\"\"\n\n modify = False\n found = False\n\n # Try to find application by name or id\n if name and id:\n found = self.get_application(id=id)\n if not found.name == name:\n modify = True\n elif id:\n found = self.get_application(id=id)\n name = found.name\n elif name:\n try:\n found = self.get_application(name=name)\n except exceptions.NotFoundError:\n pass\n\n # If found - compare parameters\n if found:\n if manifest and not manifest == found.manifest:\n modify = True\n\n # We need to update application\n if found and modify:\n found.update(name=name, manifest=manifest)\n if not found:\n created = self.create_application(name=name, manifest=manifest)\n\n return found or created", "def upload(path, check_resources = true)\n unless File.exist? path\n raise CFoundry::Error, \"Invalid application path '#{path}'\"\n end\n\n zipfile = \"#{Dir.tmpdir}/#{@guid}.zip\"\n tmpdir = \"#{Dir.tmpdir}/.cf_#{@guid}_files\"\n\n FileUtils.rm_f(zipfile)\n FileUtils.rm_rf(tmpdir)\n\n prepare_package(path, tmpdir)\n\n resources = determine_resources(tmpdir) if check_resources\n\n packed = CFoundry::Zip.pack(tmpdir, zipfile)\n\n @client.base.upload_app(@guid, packed && zipfile, resources || [])\n ensure\n FileUtils.rm_f(zipfile) if zipfile\n FileUtils.rm_rf(tmpdir) if tmpdir\n end", "def upload_scan(self, application_id, file_path):\n \"\"\"\n Uploads and processes a scan file.\n :param application_id: Application identifier.\n :param file_path: Path to the scan file to be uploaded.\n \"\"\"\n return self._request(\n 'POST', 'rest/applications/' + str(application_id) + '/upload',\n files={'file': open(file_path, 'rb')}\n )", "def upload_applications(self, metadata, category=None):\n \"\"\"\n Mimics get starter-kit and wizard functionality to create components\n Note: may create component duplicates, not idempotent\n :type metadata: str\n :type category: Category\n :param metadata: url to meta.yml\n :param category: category\n \"\"\"\n upload_json = self._router.get_upload(params=dict(metadataUrl=metadata)).json()\n manifests = [dict(name=app['name'], manifest=app['url']) for app in upload_json['applications']]\n if not category:\n category = self.categories['Application']\n data = {'categoryId': category.id, 'applications': manifests}\n self._router.post_application_kits(org_id=self.organizationId, data=json.dumps(data))", "def create_app(self, apps_path, name):\n \"\"\"\n Create Trionyx app in given path\n\n :param str path: path to create app in.\n :param str name: name of app\n :raises FileExistsError:\n \"\"\"\n app_path = os.path.join(apps_path, name.lower())\n\n shutil.copytree(self.app_path, app_path)\n\n self.update_file(app_path, '__init__.py', {\n 'name': name.lower()\n })\n\n self.update_file(app_path, 'apps.py', {\n 'name': name.lower(),\n 'verbose_name': name.capitalize()\n })", "def from_file(self, filename):\n \"\"\" Uploads a file from a filename on your system.\n\n :param filename: Path to file on your system.\n\n Example:\n >>> myimage.from_file('/path/to/dinner.png')\n \"\"\"\n\n mimetype = mimetypes.guess_type(filename)[0] or \"application/octal-stream\"\n headers = {\n \"Content-Type\": mimetype,\n \"Content-Length\": str(os.path.getsize(filename)),\n }\n\n # upload file\n file_data = self._pump.request(\n \"/api/user/{0}/uploads\".format(self._pump.client.nickname),\n method=\"POST\",\n data=open(filename, \"rb\").read(),\n headers=headers,\n )\n\n # now post it to the feed\n data = {\n \"verb\": \"post\",\n \"object\": file_data,\n }\n data.update(self.serialize())\n\n if not self.content and not self.display_name and not self.license:\n self._post_activity(data)\n else:\n self._post_activity(data, unserialize=False)\n\n # update post with display_name and content\n if self.content:\n file_data['content'] = self.content\n if self.display_name:\n file_data['displayName'] = self.display_name\n if self.license:\n file_data['license'] = self.license\n data = {\n \"verb\": \"update\",\n \"object\": file_data,\n }\n self._post_activity(data)\n\n return self", "def addApplication(self, name, version=None, path=None, disk_num=0, soft=-1):\n \"\"\"Add a new application in some disk.\"\"\"\n\n fapp = Features()\n fapp.features.append(Feature(\"name\", \"=\", name))\n if version:\n fapp.features.append(Feature(\"version\", \"=\", version))\n if path:\n fapp.features.append(Feature(\"path\", \"=\", path))\n self.features.append(Feature(\"disk.%d.applications\" % disk_num, \"contains\", fapp, soft > 0))", "public function appAction() {\n $name = ucfirst($this->getFirstParamOrAsk('Enter application\\'s name'));;\n FSService::makeWritable(DC::getEnvironment()->getApplicationRoot() . $name);\n $this->createCV(array(\n 'name' => 'Index',\n 'app' => $name,\n ));\n $appPath = DC::getEnvironment()->getApplicationRoot() . $name . '/';\n $this->safeCreateFromTemplate($appPath . 'Views/_layout.slot', '_view_layout', array('app'=>$name));\n $this->safeCreateFromTemplate($appPath . 'config.yml', '_app_config', array('app'=>$name));\n $config = DC::getProjectConfig();\n if (!$config->has('applications/'.strtolower($name))) {\n $config->set('applications/'.strtolower($name), strtolower($name));\n $this->notify('added information to project.yml', '+ config');\n $config->save();\n }\n }", "def _upload(self, project_id, updating, file_path, language_code=None,\n overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None):\n \"\"\"\n Internal: updates terms / translations\n\n File uploads are limited to one every 30 seconds\n \"\"\"\n options = [\n self.UPDATING_TERMS,\n self.UPDATING_TERMS_TRANSLATIONS,\n self.UPDATING_TRANSLATIONS\n ]\n if updating not in options:\n raise POEditorArgsException(\n 'Updating arg must be in {}'.format(options)\n )\n\n options = [\n self.UPDATING_TERMS_TRANSLATIONS,\n self.UPDATING_TRANSLATIONS\n ]\n if language_code is None and updating in options:\n raise POEditorArgsException(\n 'Language code is required only if updating is '\n 'terms_translations or translations)'\n )\n\n if updating == self.UPDATING_TRANSLATIONS:\n tags = None\n sync_terms = None\n\n # Special content type:\n tags = tags or ''\n language_code = language_code or ''\n sync_terms = '1' if sync_terms else '0'\n overwrite = '1' if overwrite else '0'\n fuzzy_trigger = '1' if fuzzy_trigger else '0'\n project_id = str(project_id)\n\n with open(file_path, 'r+b') as local_file:\n data = self._run(\n url_path=\"projects/upload\",\n id=project_id,\n language=language_code,\n file=local_file,\n updating=updating,\n tags=tags,\n sync_terms=sync_terms,\n overwrite=overwrite,\n fuzzy_trigger=fuzzy_trigger\n )\n return data['result']", "def _execute(self, app_, file_):\n \"\"\"Run app with file as input.\n\n :param app_: application to run.\n :param file_: file to run app with.\n :return: success True, else False\n :rtype: bool\n \"\"\"\n app_name = os.path.basename(app_)\n args = [app_]\n args.extend(self.args[app_])\n args.append(file_)\n process = subprocess.Popen(args)\n\n time.sleep(1)\n status = {True: Status.SUCCESS, False: Status.FAILED}\n crashed = process.poll()\n result = status[crashed is None]\n self.stats_.add(app_name, result)\n if result is Status.SUCCESS:\n # process did not crash, so just terminate it\n process.terminate()", "function upload(file_mngr_url, file, filename, success, error) {\n // Assemble data\n var fd = new FormData();\n fd.append(\"filename\", filename);\n fd.append(\"file\", file);\n var req = new XMLHttpRequest();\n req.open(\"POST\", file_mngr_url + \"/upload\");\n req.onload = function(e) {\n var url = JSON.parse(req.response).url;\n if (url === undefined)\n error();\n else\n success(url);\n };\n req.send(fd);\n}" ]
[ 0.7606568336486816, 0.722040593624115, 0.7040286064147949, 0.6995224952697754, 0.6908955574035645, 0.6881159543991089, 0.6877210140228271, 0.6844629049301147, 0.684209942817688, 0.68125319480896, 0.680411696434021, 0.6802532076835632 ]
Exports current account configuration in shell-friendly form. Takes into account explicit top-level flags like --organization.
def show_account(): """ Exports current account configuration in shell-friendly form. Takes into account explicit top-level flags like --organization. """ click.echo("# tonomi api") for (key, env) in REVERSE_MAPPING.items(): value = QUBELL.get(key, None) if value: click.echo("export %s='%s'" % (env, value)) if any(map(lambda x: PROVIDER.get(x), REVERSE_PROVIDER_MAPPING.keys())): click.echo("# cloud account") for (key, env) in REVERSE_PROVIDER_MAPPING.items(): value = PROVIDER.get(key, None) if value: click.echo("export %s='%s'" % (env, value))
[ "def main(role, ou, assume, profile, output, regions, active):\n \"\"\"Generate a c7n-org accounts config file using AWS Organizations\n\n With c7n-org you can then run policies or arbitrary scripts across\n accounts.\n \"\"\"\n\n session = get_session(assume, 'c7n-org', profile)\n client = session.client('organizations')\n accounts = []\n for path in ou:\n ou = get_ou_from_path(client, path)\n accounts.extend(get_accounts_for_ou(client, ou, active))\n\n results = []\n for a in accounts:\n tags = []\n path_parts = a['Path'].strip('/').split('/')\n for idx, _ in enumerate(path_parts):\n tags.append(\"path:/%s\" % \"/\".join(path_parts[:idx + 1]))\n\n ainfo = {\n 'account_id': a['Id'],\n 'email': a['Email'],\n 'name': a['Name'],\n 'tags': tags,\n 'role': role.format(**a)}\n if regions:\n ainfo['regions'] = regions\n results.append(ainfo)\n\n print(\n yaml.safe_dump(\n {'accounts': results},\n default_flow_style=False),\n file=output)", "def sync(config, group, accounts=(), dryrun=False, region=None):\n \"\"\"sync last recorded export to actual\n\n Use --dryrun to check status.\n \"\"\"\n config = validate.callback(config)\n destination = config.get('destination')\n client = boto3.Session().client('s3')\n\n for account in config.get('accounts', ()):\n if accounts and account['name'] not in accounts:\n continue\n\n session = get_session(account['role'], region)\n account_id = session.client('sts').get_caller_identity()['Account']\n prefix = destination.get('prefix', '').rstrip('/') + '/%s' % account_id\n prefix = \"%s/%s\" % (prefix, group)\n\n exports = get_exports(client, destination['bucket'], prefix + \"/\")\n\n role = account.pop('role')\n if isinstance(role, six.string_types):\n account['account_id'] = role.split(':')[4]\n else:\n account['account_id'] = role[-1].split(':')[4]\n account.pop('groups')\n\n if exports:\n last_export = exports.pop()\n account['export'] = last_export\n else:\n account['export'] = 'missing'\n last_export = None\n try:\n tag_set = client.get_object_tagging(\n Bucket=destination['bucket'], Key=prefix).get('TagSet', [])\n except ClientError:\n tag_set = []\n\n tags = {t['Key']: t['Value'] for t in tag_set}\n tagged_last_export = None\n\n if 'LastExport' in tags:\n le = parse(tags['LastExport'])\n tagged_last_export = (le.year, le.month, le.day)\n account['sync'] = tagged_last_export\n else:\n account['sync'] = account['export'] != 'missing' and 'sync' or 'missing'\n\n if last_export is None:\n continue\n\n if tagged_last_export == last_export or account['export'] == 'missing':\n continue\n\n if dryrun:\n continue\n\n client.put_object(\n Bucket=destination['bucket'],\n Key=prefix,\n Body=json.dumps({}),\n ACL=\"bucket-owner-full-control\",\n ServerSideEncryption=\"AES256\")\n\n export_time = datetime.now().replace(tzinfo=tzlocal()).astimezone(tzutc())\n export_time = export_time.replace(\n year=last_export[0], month=last_export[1], day=last_export[2],\n minute=0, second=0, microsecond=0, hour=0)\n client.put_object_tagging(\n Bucket=destination['bucket'], Key=prefix,\n Tagging={\n 'TagSet': [{\n 'Key': 'LastExport',\n 'Value': export_time.isoformat()}]})\n\n accounts_report = []\n for a in config.get('accounts'):\n if accounts and a['name'] not in accounts:\n continue\n if isinstance(a['sync'], tuple):\n a['sync'] = \"%s/%s/%s\" % (a['sync'])\n if isinstance(a['export'], tuple):\n a['export'] = \"%s/%s/%s\" % (a['export'])\n accounts_report.append(a)\n\n accounts_report.sort(key=operator.itemgetter('export'), reverse=True)\n print(tabulate(accounts_report, headers='keys'))", "def status(config, group, accounts=(), region=None):\n \"\"\"report current export state status\"\"\"\n config = validate.callback(config)\n destination = config.get('destination')\n client = boto3.Session().client('s3')\n\n for account in config.get('accounts', ()):\n if accounts and account['name'] not in accounts:\n continue\n\n session = get_session(account['role'], region)\n account_id = session.client('sts').get_caller_identity()['Account']\n prefix = destination.get('prefix', '').rstrip('/') + '/%s' % account_id\n prefix = \"%s/flow-log\" % prefix\n\n role = account.pop('role')\n if isinstance(role, six.string_types):\n account['account_id'] = role.split(':')[4]\n else:\n account['account_id'] = role[-1].split(':')[4]\n\n account.pop('groups')\n\n try:\n tag_set = client.get_object_tagging(\n Bucket=destination['bucket'], Key=prefix).get('TagSet', [])\n except ClientError:\n account['export'] = 'missing'\n continue\n tags = {t['Key']: t['Value'] for t in tag_set}\n\n if 'LastExport' not in tags:\n account['export'] = 'empty'\n else:\n last_export = parse(tags['LastExport'])\n account['export'] = last_export.strftime('%Y/%m/%d')\n\n accounts = [a for a in config.get('accounts') if a in accounts or not accounts]\n accounts.sort(key=operator.itemgetter('export'), reverse=True)\n print(tabulate(accounts, headers='keys'))", "def run(config, start, end, accounts, region, debug):\n \"\"\"run export across accounts and log groups specified in config.\"\"\"\n config = validate.callback(config)\n destination = config.get('destination')\n start = start and parse(start) or start\n end = end and parse(end) or datetime.now()\n executor = debug and MainThreadExecutor or ThreadPoolExecutor\n with executor(max_workers=32) as w:\n futures = {}\n for account in config.get('accounts', ()):\n if accounts and account['name'] not in accounts:\n continue\n futures[\n w.submit(process_account, account, start,\n end, destination, region)] = account\n for f in as_completed(futures):\n account = futures[f]\n if f.exception():\n log.error(\"Error on account %s err: %s\",\n account['name'], f.exception())\n log.info(\"Completed %s\", account['name'])", "def export(self):\n \"\"\"Export a set of organizations.\n\n Method to export organizations from the registry. Organizations schema\n will follow Sorting Hat JSON format.\n\n :returns: a JSON formatted str\n \"\"\"\n organizations = {}\n\n orgs = api.registry(self.db)\n\n for org in orgs:\n domains = [{'domain': dom.domain,\n 'is_top': dom.is_top_domain}\n for dom in org.domains]\n domains.sort(key=lambda x: x['domain'])\n\n organizations[org.name] = domains\n\n obj = {'time': str(datetime.datetime.now()),\n 'blacklist': [],\n 'organizations': organizations,\n 'uidentities': {}}\n\n return json.dumps(obj, default=self._json_encoder,\n indent=4, separators=(',', ': '),\n sort_keys=True)", "def export_account_state(self, account_state):\n \"\"\"\n Make an account state presentable to external consumers\n \"\"\"\n return {\n 'address': account_state['address'],\n 'type': account_state['type'],\n 'credit_value': '{}'.format(account_state['credit_value']),\n 'debit_value': '{}'.format(account_state['debit_value']),\n 'lock_transfer_block_id': account_state['lock_transfer_block_id'],\n 'block_id': account_state['block_id'],\n 'vtxindex': account_state['vtxindex'],\n 'txid': account_state['txid'],\n }", "def export(user, export_name, export_data)\n @user = user\n @organization = user.organization\n\n filename = export_data.filename(export_name)\n filename_without_extension = export_data.filename(export_name, extension: false)\n\n attachments[\"#{filename_without_extension}.zip\"] = FileZipper.new(filename, export_data.read).zip\n\n with_user(user) do\n mail(to: \"#{user.name} <#{user.email}>\", subject: I18n.t(\"decidim.export_mailer.subject\", name: filename))\n end\n end", "def output_account(gandi, account, output_keys, justify=17):\n \"\"\" Helper to output an account information.\"\"\"\n output_generic(gandi, account, output_keys, justify)\n\n if 'prepaid' in output_keys:\n prepaid = '%s %s' % (account['prepaid_info']['amount'],\n account['prepaid_info']['currency'])\n output_line(gandi, 'prepaid', prepaid, justify)\n\n if 'credit' in output_keys:\n output_line(gandi, 'credits', None, justify)\n available = account.get('credits')\n output_line(gandi, ' available', available, justify)\n # sometimes rating is returning nothing\n usage_str = left_str = 'not available'\n usage = account.get('credit_usage', 0)\n left = account.get('left')\n if usage:\n usage_str = '%d/h' % usage\n\n years, months, days, hours = left\n left_str = ('%d year(s) %d month(s) %d day(s) %d hour(s)' %\n (years, months, days, hours))\n\n output_line(gandi, ' usage', usage_str, justify)\n output_line(gandi, ' time left', left_str, justify)", "def run(self, *args):\n \"\"\"Export data from the registry.\n\n By default, it writes the data to the standard output. If a\n positional argument is given, it will write the data on that\n file.\n \"\"\"\n params = self.parser.parse_args(args)\n\n with params.outfile as outfile:\n if params.identities:\n code = self.export_identities(outfile, params.source)\n elif params.orgs:\n code = self.export_organizations(outfile)\n else:\n # The running proccess never should reach this section\n raise RuntimeError(\"Unexpected export option\")\n\n return code", "def access(config, region, accounts=()):\n \"\"\"Check iam permissions for log export access in each account\"\"\"\n config = validate.callback(config)\n accounts_report = []\n\n def check_access(account):\n accounts_report.append(account)\n session = get_session(account['role'], region)\n identity = session.client('sts').get_caller_identity()\n account['account_id'] = identity['Account']\n account.pop('groups')\n account.pop('role')\n client = session.client('iam')\n policy_arn = identity['Arn']\n if policy_arn.count('/') > 1:\n policy_arn = policy_arn.rsplit('/', 1)[0]\n if ':sts:' in policy_arn:\n policy_arn = policy_arn.replace(':sts', ':iam')\n if ':assumed-role' in policy_arn:\n policy_arn = policy_arn.replace(':assumed-role', ':role')\n evaluation = client.simulate_principal_policy(\n PolicySourceArn=policy_arn,\n ActionNames=['logs:CreateExportTask'])['EvaluationResults']\n account['access'] = evaluation[0]['EvalDecision']\n\n with ThreadPoolExecutor(max_workers=16) as w:\n futures = {}\n for account in config.get('accounts', ()):\n if accounts and account['name'] not in accounts:\n continue\n futures[w.submit(check_access, account)] = None\n for f in as_completed(futures):\n pass\n accounts_report.sort(key=operator.itemgetter('access'), reverse=True)\n print(tabulate(accounts_report, headers='keys'))", "def outpat(self, acc=None):\n \"\"\"\n Determine the full outfile pattern for the given account.\n\n Return None if not specified.\n \"\"\"\n outdir = self.outdir(acc)\n outpat = self.get('outpat', acc=acc)\n return os.path.join(outdir, outpat) if outdir and outpat else None", "def export(name, path, replace=False):\n '''\n Export a zones configuration\n\n name : string\n name of the zone\n path : string\n path of file to export too.\n replace : boolean\n replace the file if it exists\n\n '''\n ret = {'name': name,\n 'changes': {},\n 'result': None,\n 'comment': ''}\n\n zones = __salt__['zoneadm.list'](installed=True, configured=True)\n if name in zones:\n ## zone exists\n if __opts__['test']:\n ## pretend we did the correct thing\n ret['result'] = True\n ret['comment'] = 'Zone configartion for {0} exported to {1}'.format(\n name,\n path,\n )\n ret['changes'][name] = 'exported'\n if __salt__['file.file_exists'](path) and not replace:\n ret['result'] = False\n ret['changes'] = {}\n ret['comment'] = 'File {0} exists, zone configuration for {1} not exported.'.format(\n path,\n name,\n )\n else:\n ## export and update file\n cfg_tmp = salt.utils.files.mkstemp()\n __salt__['zonecfg.export'](name, cfg_tmp)\n if not __salt__['file.file_exists'](path):\n ## move cfg_tmp to path\n try:\n __salt__['file.move'](cfg_tmp, path)\n except CommandExecutionError:\n if __salt__['file.file_exists'](cfg_tmp):\n __salt__['file.remove'](cfg_tmp)\n ret['result'] = False\n ret['comment'] = 'Unable to export zone configuration for {0} to {1}!'.format(\n name,\n path,\n )\n else:\n ret['result'] = True\n ret['comment'] = 'Zone configuration for {0} was exported to {1}.'.format(\n name,\n path,\n )\n ret['changes'][name] = 'exported'\n else:\n cfg_diff = __salt__['file.get_diff'](path, cfg_tmp)\n if not cfg_diff:\n ret['result'] = True\n ret['comment'] = 'Zone configuration for {0} was already exported to {1}.'.format(\n name,\n path\n )\n if __salt__['file.file_exists'](cfg_tmp):\n __salt__['file.remove'](cfg_tmp)\n else:\n if replace:\n try:\n __salt__['file.move'](cfg_tmp, path)\n except CommandExecutionError:\n if __salt__['file.file_exists'](cfg_tmp):\n __salt__['file.remove'](cfg_tmp)\n ret['result'] = False\n ret['comment'] = 'Unable to be re-export zone configuration for {0} to {1}!'.format(\n name,\n path,\n )\n else:\n ret['result'] = True\n ret['comment'] = 'Zone configuration for {0} was re-exported to {1}.'.format(\n name,\n path,\n )\n ret['changes'][name] = 'exported'\n else:\n ret['result'] = False\n ret['comment'] = 'Zone configuration for {0} is different from the one exported to {1}!'.format(\n name,\n path\n )\n if __salt__['file.file_exists'](cfg_tmp):\n __salt__['file.remove'](cfg_tmp)\n else:\n ## zone does not exist\n ret['comment'] = []\n ret['comment'].append(\n 'The zone {0} does not exist.'.format(name)\n )\n for zone in zones:\n if zones[zone]['uuid'] == name:\n ret['comment'].append(\n 'The zone {0} has a uuid of {1}, please use the zone name instead!'.format(\n name,\n path,\n )\n )\n\n ret['result'] = False\n ret['comment'] = \"\\n\".join(ret['comment'])\n\n return ret" ]
[ 0.7122164964675903, 0.6900547742843628, 0.6882532835006714, 0.6875327825546265, 0.6827141642570496, 0.6803291440010071, 0.6782150864601135, 0.677182674407959, 0.6758404970169067, 0.6755895018577576, 0.6741968989372253, 0.6734408140182495 ]
Generates new session token from the given refresh token. :param refresh_token: refresh token to generate from :param verbose: whether expiration time should be added to output
def generate_session_token(refresh_token, verbose): """ Generates new session token from the given refresh token. :param refresh_token: refresh token to generate from :param verbose: whether expiration time should be added to output """ platform = _get_platform(authenticated=False) session_token, expires_in = platform.generate_session_token(refresh_token) if verbose: click.echo("%s\n\n%s" % (session_token, _color('YELLOW', "Expires in %d seconds" % expires_in))) else: click.echo(session_token)
[ "def use_refresh_token(self, refresh_token, scope=None):\n # type (str, Optional[List[str]]) -> Tuple[se_leg_op.access_token.AccessToken, Optional[str]]\n \"\"\"\n Creates a new access token, and refresh token, based on the supplied refresh token.\n :return: new access token and new refresh token if the old one had an expiration time\n \"\"\"\n\n if refresh_token not in self.refresh_tokens:\n raise InvalidRefreshToken('{} unknown'.format(refresh_token))\n\n refresh_token_info = self.refresh_tokens[refresh_token]\n if 'exp' in refresh_token_info and refresh_token_info['exp'] < int(time.time()):\n raise InvalidRefreshToken('{} has expired'.format(refresh_token))\n\n authz_info = self.access_tokens[refresh_token_info['access_token']]\n\n if scope:\n if not requested_scope_is_allowed(scope, authz_info['granted_scope']):\n logger.debug('trying to refresh token with superset scope, requested_scope=%s, granted_scope=%s',\n scope, authz_info['granted_scope'])\n raise InvalidScope('Requested scope includes non-granted value')\n scope = ' '.join(scope)\n logger.debug('refreshing token with new scope, old_scope=%s -> new_scope=%s', authz_info['scope'], scope)\n else:\n # OAuth 2.0: scope: \"[...] if omitted is treated as equal to the scope originally granted by the resource owner\"\n scope = authz_info['granted_scope']\n\n new_access_token = self._create_access_token(authz_info['sub'], authz_info[self.KEY_AUTHORIZATION_REQUEST],\n authz_info['granted_scope'], scope)\n\n new_refresh_token = None\n if self.refresh_token_threshold \\\n and 'exp' in refresh_token_info \\\n and refresh_token_info['exp'] - int(time.time()) < self.refresh_token_threshold:\n # refresh token is close to expiry, issue a new one\n new_refresh_token = self.create_refresh_token(new_access_token.value)\n else:\n self.refresh_tokens[refresh_token]['access_token'] = new_access_token.value\n\n logger.debug('refreshed tokens, new_access_token=%s new_refresh_token=%s old_refresh_token=%s',\n new_access_token, new_refresh_token, refresh_token)\n return new_access_token, new_refresh_token", "def generate_access_token_from_refresh_token(request, client):\n \"\"\" Generates a new AccessToken from a request containing a refresh token.\n\n Read the specification: http://tools.ietf.org/html/rfc6749#section-6.\n \"\"\"\n refresh_token_value = request.POST.get('refresh_token')\n if not refresh_token_value:\n raise InvalidRequest('no \"refresh_token\" provided')\n\n try:\n existing_access_token = AccessToken.objects.get(\n refresh_token=refresh_token_value,\n client=client)\n except AccessToken.DoesNotExist:\n raise InvalidGrant('\"{}\" is not a valid \"refresh_token\"'.format(\n refresh_token_value))\n\n if existing_access_token.invalidated:\n refresh_token_used_after_invalidation.send(\n sender='djoauth2',\n access_token=existing_access_token,\n request=request)\n raise InvalidGrant('\"{}\" is not a valid \"refresh_token\"'.format(\n refresh_token_value))\n\n if not existing_access_token.refreshable:\n raise InvalidGrant('access token is not refreshable')\n\n # The specification (http://tools.ietf.org/html/rfc6749#section-6) describes\n # the scope parameter as follows:\n #\n # scope\n # OPTIONAL. The scope of the access request as described by\n # Section 3.3. The requested scope MUST NOT include any\n # scope not originally granted by the resource owner, and if\n # omitted is treated as equal to the scope originally granted\n # by the resource owner.\n #\n # This opens the possibility that a Client might successfully request a\n # subset of the existing scopes, but later in the same section comes the\n # following:\n #\n # If a new refresh token is issued, the refresh token scope MUST be\n # identical to that of the refresh token included by the client in the\n # request.\n #\n # Confusingly, http://tools.ietf.org/html/rfc6749#section-1.5 includes the\n # following:\n #\n # Refresh tokens are credentials used to obtain access tokens. Refresh\n # tokens are issued to the client by the authorization server and are\n # used to obtain a new access token when the current access token becomes\n # invalid or expires, or to obtain additional access tokens with\n # identical or narrower scope (access tokens may have a shorter lifetime\n # and fewer permissions than authorized by the resource owner).\n #\n # This last section explicitly allows tokens with narrower scope than\n # originally granted, which is in direct contradiction with the directive\n # that the scope must be equivalent to that granted earlier.\n #\n # Because the specification seems to contradict itself, I tend towards\n # observing the stricter directive (not allowing a subset of scope,) even\n # though to me there seems to be no reason to disallow that feature. That\n # said, I'm not sure why a client would ever ask for less scope than\n # originally granted.\n scope_objects = existing_access_token.scopes.all()\n new_scope_names = request.POST.get('scope', '')\n if new_scope_names:\n new_scope_names = new_scope_names.split(' ')\n if not existing_access_token.has_scope(*new_scope_names):\n raise InvalidScope('requested scopes exceed initial grant')\n\n scope_objects = []\n for scope_name in new_scope_names:\n try:\n scope_objects.append(Scope.objects.get(name=scope_name))\n except Scope.DoesNotExist:\n raise InvalidScope('\"{}\" is not a valid scope'.format(scope_name))\n\n requested_scope_string = request.POST.get('scope', '')\n if requested_scope_string:\n requested_scope_names = set(requested_scope_string.split(' '))\n if not requested_scope_names == existing_access_token.get_scope_names_set():\n raise InvalidScope('requested scopes do not match initial grant')\n\n\n # The new AccessToken is only refreshable if at the time of refresh the\n # server is configured to create refreshable tokens by default. It DOES NOT\n # inherit the existing token's 'refreshability' automatically. No behavior is\n # described in the specification; I believe this to be a sane decision.\n new_access_token = AccessToken.objects.create(\n user=existing_access_token.user,\n client=existing_access_token.client)\n new_access_token.authorization_code = existing_access_token.authorization_code\n new_access_token.scopes = scope_objects\n new_access_token.save()\n\n existing_access_token.invalidate()\n\n return new_access_token", "def create_refresh_token(self, access_token_value):\n # type: (str) -> str\n \"\"\"\n Creates an refresh token bound to the specified access token.\n \"\"\"\n if access_token_value not in self.access_tokens:\n raise InvalidAccessToken('{} unknown'.format(access_token_value))\n\n if not self.refresh_token_lifetime:\n logger.debug('no refresh token issued for for access_token=%s', access_token_value)\n return None\n\n refresh_token = rand_str()\n authz_info = {'access_token': access_token_value, 'exp': int(time.time()) + self.refresh_token_lifetime}\n self.refresh_tokens[refresh_token] = authz_info\n\n logger.debug('issued refresh_token=%s expiring=%d for access_token=%s', refresh_token, authz_info['exp'],\n access_token_value)\n return refresh_token", "async def generate_refresh_token(self, request, user):\n \"\"\"\n Generate a refresh token for a given user.\n \"\"\"\n refresh_token = await utils.call(self.config.generate_refresh_token())\n user_id = await self._get_user_id(user)\n await utils.call(\n self.store_refresh_token,\n user_id=user_id,\n refresh_token=refresh_token,\n request=request,\n )\n return refresh_token", "def delete_refresh_token(self, refresh_token):\n \"\"\"\n Deletes a refresh token after use\n :param refresh_token: The refresh token to delete.\n \"\"\"\n access_token = self.fetch_by_refresh_token(refresh_token)\n self.mc.delete(self._generate_cache_key(access_token.token))\n self.mc.delete(self._generate_cache_key(refresh_token))", "def refresh_token(self, token, new_refresh=False):\n \"\"\"\n Issue a new access token using a valid refresh token\n\n :param token: Refresh token\n :param new_refresh: Whether a new refresh token should be minted or not\n :return: Dictionary with session info\n :raises: ExpiredToken for invalid refresh token\n WrongTokenType for wrong token type\n \"\"\"\n\n try:\n _tinfo = self.handler['refresh_token'].info(token)\n except KeyError:\n return False\n\n if is_expired(int(_tinfo['exp'])) or _tinfo['black_listed']:\n raise ExpiredToken()\n\n _sid = _tinfo['sid']\n session_info = self[_sid]\n\n session_info = self.replace_token(_sid, session_info, 'access_token')\n\n session_info[\"token_type\"] = self.handler['access_token'].token_type\n\n if new_refresh:\n session_info = self.replace_token(_sid, session_info,\n 'refresh_token')\n\n self[_sid] = session_info\n return session_info", "def refresh_access_token(self, refresh_token):\n ''' Refreshes the current access token.\n\n Gets a new access token, updates client auth and returns it.\n\n Args:\n\n refresh_token (str): Refresh token to use\n\n Returns:\n The new access token\n '''\n request = self._get_request()\n response = request.post(self.OAUTH_TOKEN_URL, {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": refresh_token\n })\n self.auth = HSAccessTokenAuth.from_response(response)\n return self.auth.access_token", "def refresh_token(self, refresh_token):\n \"\"\"\n Get a new token, using the provided refresh token. Returns the new\n access_token.\n \"\"\"\n\n response = requests.post('%saccess_token' % OAUTH_URL, {\n 'refresh_token': refresh_token,\n 'grant_type': 'refresh_token',\n 'client_id': self.client_id,\n 'client_secret': self.client_secret\n })\n resp = json.loads(response.content)\n\n if 'access_token' in resp:\n self.token = resp['access_token']\n\n return resp", "def create_token_with_refresh_token(self, data, token_valid_for=180,\r\n refresh_token_valid_for=86400):\r\n \"\"\" Create an encrypted JWT with a refresh_token \"\"\"\r\n refresh_token = None\r\n refresh_token = jwt.encode({\r\n 'exp':\r\n datetime.utcnow() +\r\n timedelta(seconds=refresh_token_valid_for)},\r\n self.app_secret).decode(\"utf-8\")\r\n jwt_token = jwt.encode({\r\n 'data': data,\r\n 'refresh_token': refresh_token,\r\n 'exp': datetime.utcnow() + timedelta(seconds=token_valid_for)},\r\n self.app_secret)\r\n return Security.encrypt(jwt_token)", "def refresh(self, refresh_token):\n \"\"\"\n Renew an oauth token given an appropriate refresh token.\n\n :param refresh_token: The Refresh Token\n :return: A tuple of (token, expiration time in unix time stamp)\n \"\"\"\n r = requests.post(self.apiurl + \"/token\", params={\"grant_type\": \"refresh_token\", \"client_id\": self.cid,\n \"client_secret\": self.csecret,\n \"refresh_token\": refresh_token})\n\n if r.status_code != 200:\n raise ServerError\n\n jsd = r.json()\n\n return jsd['access_token'], int(jsd['expires_in']) + int(jsd['created_at'])", "def refresh_jwt_token(self, token, override_access_lifespan=None):\n \"\"\"\n Creates a new token for a user if and only if the old token's access\n permission is expired but its refresh permission is not yet expired.\n The new token's refresh expiration moment is the same as the old\n token's, but the new token's access expiration is refreshed\n\n :param: token: The existing jwt token that needs to\n be replaced with a new, refreshed\n token\n :param: override_access_lifespan: Override's the instance's access\n lifespan to set a custom duration\n after which the new token's\n accessability will expire. May not\n exceed the refresh lifespan\n \"\"\"\n moment = pendulum.now('UTC')\n # Note: we disable exp verification because we do custom checks here\n with InvalidTokenHeader.handle_errors('failed to decode JWT token'):\n data = jwt.decode(\n token,\n self.encode_key,\n algorithms=self.allowed_algorithms,\n options={'verify_exp': False},\n )\n\n self._validate_jwt_data(data, access_type=AccessType.refresh)\n\n user = self.user_class.identify(data['id'])\n self._check_user(user)\n\n if override_access_lifespan is None:\n access_lifespan = self.access_lifespan\n else:\n access_lifespan = override_access_lifespan\n refresh_expiration = data['rf_exp']\n access_expiration = min(\n (moment + access_lifespan).int_timestamp,\n refresh_expiration,\n )\n\n custom_claims = {\n k: v for (k, v) in data.items() if k not in RESERVED_CLAIMS\n }\n payload_parts = dict(\n iat=moment.int_timestamp,\n exp=access_expiration,\n rf_exp=refresh_expiration,\n jti=data['jti'],\n id=data['id'],\n rls=','.join(user.rolenames),\n **custom_claims\n )\n return jwt.encode(\n payload_parts, self.encode_key, self.encode_algorithm,\n ).decode('utf-8')", "def create(cls, session_token):\n \"\"\" Parses session token and creates the vector session token\n\n :param str session_token:\n\n :return:\n A Vector session Token \n :rtype: VectorSessionToken\n \"\"\"\n\n version = None\n global_lsn = None\n local_lsn_by_region = {}\n\n if not session_token:\n return None\n\n segments = session_token.split(cls.segment_separator)\n \n if len(segments) < 2:\n return None\n\n try:\n version = int(segments[0])\n except ValueError as _:\n return None\n\n try:\n global_lsn = int(segments[1])\n except ValueError as _:\n return None\n\n for i in range(2, len(segments)):\n region_segment = segments[i]\n region_id_with_lsn = region_segment.split(cls.region_progress_separator)\n\n if len(region_id_with_lsn) != 2:\n return None\n\n try:\n region_id = int(region_id_with_lsn[0])\n local_lsn = int(region_id_with_lsn[1])\n except ValueError as _:\n return None\n local_lsn_by_region[region_id] = local_lsn\n\n return VectorSessionToken(version, global_lsn, local_lsn_by_region, session_token)" ]
[ 0.6952347159385681, 0.6901059150695801, 0.6878064274787903, 0.6642931699752808, 0.6559945940971375, 0.6553444862365723, 0.6552048921585083, 0.6551014184951782, 0.6549071073532104, 0.6531271934509277, 0.6507424116134644, 0.6490411162376404 ]
Example interface commands
def runcommand(cosmology='WMAP5'): """ Example interface commands """ # Return the WMAP5 cosmology concentration predicted for # z=0 range of masses Mi = [1e8, 1e9, 1e10] zi = 0 print("Concentrations for haloes of mass %s at z=%s" % (Mi, zi)) output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi) print(output['c'].flatten()) # Return the WMAP5 cosmology concentration predicted for # z=0 range of masses AND cosmological parameters Mi = [1e8, 1e9, 1e10] zi = 0 print("Concentrations for haloes of mass %s at z=%s" % (Mi, zi)) output, cosmo = commah.run(cosmology=cosmology, zi=zi, Mi=Mi, retcosmo=True) print(output['c'].flatten()) print(cosmo) # Return the WMAP5 cosmology concentration predicted for MW # mass (2e12 Msol) across redshift Mi = 2e12 z = [0, 0.5, 1, 1.5, 2, 2.5] output = commah.run(cosmology=cosmology, zi=0, Mi=Mi, z=z) for zval in z: print("M(z=0)=%s has c(z=%s)=%s" % (Mi, zval, output[output['z'] == zval]['c'].flatten())) # Return the WMAP5 cosmology concentration predicted for MW # mass (2e12 Msol) across redshift Mi = 2e12 zi = [0, 0.5, 1, 1.5, 2, 2.5] output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi) for zval in zi: print("M(z=%s)=%s has concentration %s" % (zval, Mi, output[(output['zi'] == zval) & (output['z'] == zval)]['c'].flatten())) # Return the WMAP5 cosmology concentration and # rarity of high-z cluster Mi = 2e14 zi = 6 output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi) print("Concentrations for haloes of mass %s at z=%s" % (Mi, zi)) print(output['c'].flatten()) print("Mass variance sigma of haloes of mass %s at z=%s" % (Mi, zi)) print(output['sig'].flatten()) print("Fluctuation for haloes of mass %s at z=%s" % (Mi, zi)) print(output['nu'].flatten()) # Return the WMAP5 cosmology accretion rate prediction # for haloes at range of redshift and mass Mi = [1e8, 1e9, 1e10] zi = [0] z = [0, 0.5, 1, 1.5, 2, 2.5] output = commah.run(cosmology=cosmology, zi=zi, Mi=Mi, z=z) for Mval in Mi: print("dM/dt for halo of mass %s at z=%s across redshift %s is: " % (Mval, zi, z)) print(output[output['Mi'] == Mval]['dMdt'].flatten()) # Return the WMAP5 cosmology Halo Mass History for haloes with M(z=0) = 1e8 M = [1e8] z = [0, 0.5, 1, 1.5, 2, 2.5] print("Halo Mass History for z=0 mass of %s across z=%s" % (M, z)) output = commah.run(cosmology=cosmology, zi=0, Mi=M, z=z) print(output['Mz'].flatten()) # Return the WMAP5 cosmology formation redshifts for haloes at # range of redshift and mass M = [1e8, 1e9, 1e10] z = [0] print("Formation Redshifts for haloes of mass %s at z=%s" % (M, z)) output = commah.run(cosmology=cosmology, zi=0, Mi=M, z=z) for Mval in M: print(output[output['Mi'] == Mval]['zf'].flatten()) return("Done")
[ "def example_repl(self, text, example, start_index, continue_flag):\n \"\"\" REPL for interactive tutorials \"\"\"\n if start_index:\n start_index = start_index + 1\n cmd = ' '.join(text.split()[:start_index])\n example_cli = CommandLineInterface(\n application=self.create_application(\n full_layout=False),\n eventloop=create_eventloop())\n example_cli.buffers['example_line'].reset(\n initial_document=Document(u'{}\\n'.format(\n add_new_lines(example)))\n )\n while start_index < len(text.split()):\n if self.default_command:\n cmd = cmd.replace(self.default_command + ' ', '')\n example_cli.buffers[DEFAULT_BUFFER].reset(\n initial_document=Document(\n u'{}'.format(cmd),\n cursor_position=len(cmd)))\n example_cli.request_redraw()\n answer = example_cli.run()\n if not answer:\n return \"\", True\n answer = answer.text\n if answer.strip('\\n') == cmd.strip('\\n'):\n continue\n else:\n if len(answer.split()) > 1:\n start_index += 1\n cmd += \" \" + answer.split()[-1] + \" \" +\\\n u' '.join(text.split()[start_index:start_index + 1])\n example_cli.exit()\n del example_cli\n else:\n cmd = text\n\n return cmd, continue_flag", "def cli(ctx, name,all):\n \"\"\"Show example for doing some task in bubble(experimental)\"\"\"\n ctx.gbc.say('all_example_functions',stuff=all_examples_functions, verbosity=1000)\n\n for example in all_examples_functions:\n if all or (name and example['name'] == name):\n if all:\n ctx.gbc.say('example',stuff=example, verbosity=100)\n name = example['name']\n #click.echo_via_pager(example['fun']())\n click.echo(\"#\"*80)\n click.echo(\"### start of bubble example: \"+name)\n click.echo(\"#\"*80)\n click.echo(example['fun']())\n click.echo(\"#\"*80)\n click.echo(\"### end of bubble example: \"+name)\n click.echo(\"#\"*80)\n click.echo()\n\n else:\n click.echo(\"available example: \" + example['name'])", "def cmd_example(self, args):\n '''control behaviour of the module'''\n if len(args) == 0:\n print(self.usage())\n elif args[0] == \"status\":\n print(self.status())\n elif args[0] == \"set\":\n self.example_settings.command(args[1:])\n else:\n print(self.usage())", "def world_command_examples():\n \"\"\"A few examples to showcase commands for manipulating the worlds.\"\"\"\n env = holodeck.make(\"MazeWorld\")\n\n # This is the unaltered MazeWorld\n for _ in range(300):\n _ = env.tick()\n env.reset()\n\n # The set_day_time_command sets the hour between 0 and 23 (military time). This example sets it to 6 AM.\n env.set_day_time(6)\n for _ in range(300):\n _ = env.tick()\n env.reset() # reset() undoes all alterations to the world\n\n # The start_day_cycle command starts rotating the sun to emulate day cycles.\n # The parameter sets the day length in minutes.\n env.start_day_cycle(5)\n for _ in range(1500):\n _ = env.tick()\n env.reset()\n\n # The set_fog_density changes the density of the fog in the world. 1 is the maximum density.\n env.set_fog_density(.25)\n for _ in range(300):\n _ = env.tick()\n env.reset()\n\n # The set_weather_command changes the weather in the world. The two available options are \"rain\" and \"cloudy\".\n # The rainfall particle system is attached to the agent, so the rain particles will only be found around each agent.\n # Every world is clear by default.\n env.set_weather(\"rain\")\n for _ in range(500):\n _ = env.tick()\n env.reset()\n\n env.set_weather(\"cloudy\")\n for _ in range(500):\n _ = env.tick()\n env.reset()\n\n env.teleport_camera([1000, 1000, 1000], [0, 0, 0])\n for _ in range(500):\n _ = env.tick()\n env.reset()", "function commandExamples(examples) {\n if (!examples || !examples.length) {\n return ''\n }\n let exampleRender = `**Examples**${newLine}`\n exampleRender += '\\`\\`\\`bash\\n'\n examples.forEach((ex) => {\n console.log('ex', ex)\n exampleRender += `${ex}\\n`\n })\n exampleRender += `\\`\\`\\`${newLine}`\n return exampleRender\n}", "def do(self, arg):\n \".example - This is an example plugin for the command line debugger\"\n print \"This is an example command.\"\n print \"%s.do(%r, %r):\" % (__name__, self, arg)\n print \" last event\", self.lastEvent\n print \" prefix\", self.cmdprefix\n print \" arguments\", self.split_tokens(arg)", "def generate_command(self):\n \"\"\" Generate a sample command\n \"\"\"\n example = []\n example.append(f\"{sys.argv[0]}\")\n for key in sorted(list(self.spec.keys())):\n if self.spec[key]['type'] == list:\n value = \" \".join(self.spec[key].get('example', ''))\n elif self.spec[key]['type'] == dict:\n value = f\"\\'{json.dumps(self.spec[key].get('example', ''))}\\'\"\n else:\n value = self.spec[key].get('example', '')\n string = f\" --{key.lower()} {value}\"\n example.append(string)\n print(\" \\\\\\n\".join(example))", "def examples():\n \"\"\"Prints examples of using the script to the console using colored output.\n \"\"\"\n script = \"Continuous Integration Automation Server\"\n explain = (\"For complex codes with many collaborators, it is often difficult to maintian \"\n \"a pristine code that everyone can rely on. If every developer has power to \"\n \"commit to master, unintentional mistakes happen that can cripple those who \"\n \"rely on the code for day-to-day business. One way to overcome this is to isolate \"\n \"the master branch and require collaborators to work on separate forks/branches. \"\n \"When they are ready to commit their changes to master, they create a pull request \"\n \"that summarizes the changes and why they want to merge them into the master branch.\\n\\n\"\n \"A continuous integration server monitors repositories for new pull requests. When a new \"\n \"request is made, the proposed changes are downloaded to a local sandbox and tested \"\n \"against all the existing code. If the master branch has a rich suite of unit tests \"\n \"this will detect any bugs in the proposed merger. If all the tests pass, then the \"\n \"owner of the master branch can have confidence that the merger will be okay.\")\n contents = [((\"Configure this machine to be a CI server. Unfortunately, this step requires \"\n \"sudo authority because the API accesses the crontab for arbitrary users.\"), \n \"sudo ci.py -setup\", \n (\"Before this setup can proceed, you need to make sure the global configuration \"\n \"XML file has been created and the environment variable to its path has been set:\\n\"\n \"\\texport PYCI_XML='~/path/to/global.xml'.\\nSee also: -rollback\")),\n ((\"Remove the cron tab from the server, delete the list of installed repositories \"\n \"and undo anything else that the script did when -setup was used.\"),\n \"sudo ci.py -rollback\",\n (\"This action deletes the files specified in 'ARCHFILE' and 'DATAFILE' in 'global.xml'. \"\n \"Also, the crontab is removed, which is why sudo privileges are needed. See also -setup.\")),\n ((\"Install the repository described by myrepo.xml onto the CI server so that \"\n \"it's pull requests are monitored and unit ,tested.\"),\n \"ci.py -install myrepo.xml\",\n (\"After installation, you can query the repository immediately by running the \"\n \"script with -cron. You can install a list of repositories with a single command.\"\n \"See also -uninstall.\")),\n ((\"Run the routines that check for new pull requests, run the unit tests, and post \"\n \"the results to the media wiki.\"),\n \"ci.py -cron\", \"\")]\n required = (\"REQUIRED:\\n\\t-'repo.xml' file for *each* repository that gets installed on the server.\\n\"\n \"\\t-'global.xml' file with configuration settings for *all* repositories.\\n\"\n \"\\t- git user and API key with push access for *each* repository installed.\")\n output = (\"RETURNS: prints status information to stdout.\")\n details = (\"This script installs a continous integration server on the local machine by \"\n \"configuring a cron to call this script every couple of minutes. The script interacts \"\n \"with github using an API to monitor the pull requests. When new ones are found, the \"\n \"list of tests specified in the 'repo.xml' file is executed and the results are posted \"\n \"to a media wiki page associated with the specific pull request. For more details, see \"\n \"the online repo at https://github.com/rosenbrockc/ci.\")\n outputfmt = (\"\")\n\n from pyci.msg import example\n example(script, explain, contents, required, output, outputfmt, details)", "def editor_example():\n \"\"\"This editor example shows how to interact with holodeck worlds while they are being built\n in the Unreal Engine. Most people that use holodeck will not need this.\n \"\"\"\n sensors = [Sensors.PIXEL_CAMERA, Sensors.LOCATION_SENSOR, Sensors.VELOCITY_SENSOR]\n agent = AgentDefinition(\"uav0\", agents.UavAgent, sensors)\n env = HolodeckEnvironment(agent, start_world=False)\n env.agents[\"uav0\"].set_control_scheme(1)\n command = [0, 0, 10, 50]\n\n for i in range(10):\n env.reset()\n for _ in range(1000):\n state, reward, terminal, _ = env.step(command)", "def utils(\n ctx,\n filepath=None,\n jsonld=False,\n discover=False,\n):\n \"\"\"UTILS: miscellaneous bits and pieces.\n \"\"\"\n verbose = ctx.obj['VERBOSE']\n sTime = ctx.obj['STIME']\n print_opts = {\n 'labels': verbose,\n }\n DONE_ACTION = False\n\n if jsonld:\n if not filepath:\n click.secho(\n \"What do you want to test? Please specify a valid JSONLD source.\",\n fg='red')\n else:\n filepath = filepath[0]\n action_jsonld_playground(filepath, verbose)\n DONE_ACTION = True\n elif discover:\n DONE_ACTION = True\n action_webimport()\n else:\n click.secho(\"You haven't specified any utils command.\")\n click.echo(ctx.get_help())\n\n if DONE_ACTION:\n eTime = time.time()\n tTime = eTime - sTime\n printDebug(\"\\n-----------\\n\" + \"Time:\t %0.2fs\" % tTime, \"comment\")", "def show(commands,\n raw_text=True,\n **kwargs):\n '''\n Execute one or more show (non-configuration) commands.\n\n commands\n The commands to be executed.\n\n raw_text: ``True``\n Whether to return raw text or structured data.\n\n transport: ``https``\n Specifies the type of connection transport to use. Valid values for the\n connection are ``http``, and ``https``.\n\n host: ``localhost``\n The IP address or DNS host name of the connection device.\n\n username: ``admin``\n The username to pass to the device to authenticate the NX-API connection.\n\n password\n The password to pass to the device to authenticate the NX-API connection.\n\n port\n The TCP port of the endpoint for the NX-API connection. If this keyword is\n not specified, the default value is automatically determined by the\n transport type (``80`` for ``http``, or ``443`` for ``https``).\n\n timeout: ``60``\n Time in seconds to wait for the device to respond. Default: 60 seconds.\n\n verify: ``True``\n Either a boolean, in which case it controls whether we verify the NX-API\n TLS certificate, or a string, in which case it must be a path to a CA bundle\n to use. Defaults to ``True``.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-call --local nxos_api.show 'show version'\n salt '*' nxos_api.show 'show bgp sessions' 'show processes' raw_text=False\n salt 'regular-minion' nxos_api.show 'show interfaces' host=sw01.example.com username=test password=test\n '''\n ret = []\n if raw_text:\n method = 'cli_ascii'\n key = 'msg'\n else:\n method = 'cli'\n key = 'body'\n response_list = _cli_command(commands,\n method=method,\n **kwargs)\n ret = [response[key] for response in response_list if response]\n return ret", "function printUsage () {\n log ('This is an example of a basic command-line interface to all exchanges')\n log ('Usage: node', process.argv[1], 'id'.green, 'method'.yellow, '\"param1\" param2 \"param3\" param4 ...'.blue)\n log ('Examples:')\n log ('node', process.argv[1], 'okcoinusd fetchOHLCV BTC/USD 15m')\n log ('node', process.argv[1], 'bitfinex fetchBalance')\n log ('node', process.argv[1], 'kraken fetchOrderBook ETH/BTC')\n printSupportedExchanges ()\n log ('Supported options:')\n log ('--verbose Print verbose output')\n log ('--debug Print debugging output')\n log ('--cloudscrape Use https://github.com/codemanki/cloudscraper to bypass Cloudflare')\n log ('--cfscrape Use https://github.com/Anorov/cloudflare-scrape to bypass Cloudflare (requires python and cfscrape)')\n log ('--poll Repeat continuously in rate-limited mode')\n log (\"--no-send Print the request but don't actually send it to the exchange (sets verbose and load-markets)\")\n log ('--no-load-markets Do not pre-load markets (for debugging)')\n log ('--details Print detailed fetch responses')\n log ('--no-table Do not print the fetch response as a table')\n log ('--table Print the fetch response as a table')\n log ('--iso8601 Print timestamps as ISO8601 datetimes')\n log ('--cors use CORS proxy for debugging')\n}" ]
[ 0.7600148320198059, 0.7454254627227783, 0.7358937859535217, 0.7320149540901184, 0.7233145833015442, 0.7186895608901978, 0.7185418605804443, 0.7126096487045288, 0.7122504711151123, 0.7091414928436279, 0.7089841365814209, 0.7071307897567749 ]
Example ways to interrogate the dataset and plot the commah output
def plotcommand(cosmology='WMAP5', plotname=None): """ Example ways to interrogate the dataset and plot the commah output """ # Plot the c-M relation as a functon of redshift xarray = 10**(np.arange(1, 15, 0.2)) yval = 'c' # Specify the redshift range zarray = np.arange(0, 5, 0.5) xtitle = r"Halo Mass (M$_{sol}$)" ytitle = r"Concentration" linelabel = "z=" fig = plt.figure() ax = fig.add_subplot(111) ax.set_xlabel(xtitle) ax.set_ylabel(ytitle) plt.ylim([2, 30]) colors = cm.rainbow(np.linspace(0, 1, len(zarray))) for zind, zval in enumerate(zarray): output = commah.run(cosmology=cosmology, zi=zval, Mi=xarray) # Access the column yval from the data file yarray = output[yval].flatten() # Plot each line in turn with different colour ax.plot(xarray, yarray, label=linelabel+str(zval), color=colors[zind]) # Overplot the D08 predictions in black ax.plot(xarray, commah.commah.cduffy(zval, xarray), color="black") ax.set_xscale('log') ax.set_yscale('log') leg = ax.legend(loc=1) # Make box totally transparent leg.get_frame().set_alpha(0) leg.get_frame().set_edgecolor('white') for label in leg.get_texts(): label.set_fontsize('small') # the font size for label in leg.get_lines(): label.set_linewidth(4) # the legend line width if plotname: fig.tight_layout(pad=0.2) print("Plotting to '%s_CM_relation.png'" % (plotname)) fig.savefig(plotname+"_CM_relation.png", dpi=fig.dpi*5) else: plt.show() # Plot the c-z relation as a function of mass (so always Mz=M0) xarray = 10**(np.arange(0, 1, 0.05)) - 1 yval = 'c' # Specify the mass range zarray = 10**np.arange(6, 14, 2) xtitle = r"Redshift" ytitle = r"NFW Concentration" linelabel = r"log$_{10}$ M$_{z}$(M$_{sol}$)=" fig = plt.figure() ax = fig.add_subplot(111) ax.set_xlabel(xtitle) ax.set_ylabel(ytitle) colors = cm.rainbow(np.linspace(0, 1, len(zarray))) for zind, zval in enumerate(zarray): output = commah.run(cosmology=cosmology, zi=xarray, Mi=zval) # Access the column yval from the data file yarray = output[yval].flatten() # Plot each line in turn with different colours ax.plot(xarray, yarray, label=linelabel+"{0:.1f}".format(np.log10(zval)), color=colors[zind],) leg = ax.legend(loc=1) # Make box totally transparent leg.get_frame().set_alpha(0) leg.get_frame().set_edgecolor('white') for label in leg.get_texts(): label.set_fontsize('small') # the font size for label in leg.get_lines(): label.set_linewidth(4) # the legend line width if plotname: fig.tight_layout(pad=0.2) print("Plotting to '%s_Cz_relation.png'" % (plotname)) fig.savefig(plotname+"_Cz_relation.png", dpi=fig.dpi*5) else: plt.show() # Plot the zf-z relation for different masses (so always Mz=M0) xarray = 10**(np.arange(0, 1, 0.05)) - 1 yval = 'zf' # Specify the mass range zarray = 10**np.arange(6, 14, 2) xtitle = r"Redshift" ytitle = r"Formation Redshift" linelabel = r"log$_{10}$ M$_{z}$(M$_{sol}$)=" fig = plt.figure() ax = fig.add_subplot(111) ax.set_xlabel(xtitle) ax.set_ylabel(ytitle) colors = cm.rainbow(np.linspace(0, 1, len(zarray))) for zind, zval in enumerate(zarray): output = commah.run(cosmology=cosmology, zi=xarray, Mi=zval) yarray = output[yval].flatten() # Plot each line in turn with different colour ax.plot(xarray, yarray, label=linelabel+"{0:.1f}".format(np.log10(zval)), color=colors[zind],) leg = ax.legend(loc=2) # Make box totally transparent leg.get_frame().set_alpha(0) leg.get_frame().set_edgecolor('white') for label in leg.get_texts(): label.set_fontsize('small') # the font size for label in leg.get_lines(): label.set_linewidth(4) # the legend line width if plotname: fig.tight_layout(pad=0.2) print("Plotting to '%s_zfz_relation.png'" % (plotname)) fig.savefig(plotname+"_zfz_relation.png", dpi=fig.dpi*5) else: plt.show() # Plot the dM/dt-z relation for different masses (so always Mz=M0) xarray = 10**(np.arange(0, 1, 0.05)) - 1 yval = 'dMdt' # Specify the mass range zarray = 10**np.arange(10, 14, 0.5) xtitle = r"log$_{10}$ (1+z)" ytitle = r"log$_{10}$ Accretion Rate M$_{sol}$ yr$^{-1}$" linelabel = r"log$_{10}$ M$_z$(M$_{sol}$)=" fig = plt.figure() ax = fig.add_subplot(111) ax.set_xlabel(xtitle) ax.set_ylabel(ytitle) colors = cm.rainbow(np.linspace(0, 1, len(zarray))) cosmo = commah.getcosmo(cosmology) for zind, zval in enumerate(zarray): output = commah.run(cosmology=cosmology, zi=xarray, Mi=zval, com=False, mah=True) yarray = output[yval].flatten() # Plot each line in turn with different colour ax.plot(np.log10(xarray+1.), np.log10(yarray), label=linelabel+"{0:.1f}".format(np.log10(zval)), color=colors[zind],) # Plot the semi-analytic approximate formula from Correa et al 2015b semianalytic_approx = 71.6 * (zval / 1e12) * (cosmo['h'] / 0.7) *\ (-0.24 + 0.75 * (xarray + 1)) * np.sqrt( cosmo['omega_M_0'] * (xarray + 1)**3 + cosmo['omega_lambda_0']) ax.plot(np.log10(xarray + 1), np.log10(semianalytic_approx), color='black') leg = ax.legend(loc=2) # Make box totally transparent leg.get_frame().set_alpha(0) leg.get_frame().set_edgecolor('white') for label in leg.get_texts(): label.set_fontsize('small') # the font size for label in leg.get_lines(): label.set_linewidth(4) # the legend line width if plotname: fig.tight_layout(pad=0.2) print("Plotting to '%s_dMdtz_relation.png'" % (plotname)) fig.savefig(plotname+"_dMdtz_relation.png", dpi=fig.dpi*5) else: plt.show() # Plot the dMdt-M relation as a function of redshift xarray = 10**(np.arange(10, 14, 0.5)) yval = 'dMdt' # Specify the redshift range zarray = np.arange(0, 5, 0.5) xtitle = r"Halo Mass M$_{sol}$" ytitle = r"Accretion Rate M$_{sol}$ yr$^{-1}$" linelabel = "z=" fig = plt.figure() ax = fig.add_subplot(111) ax.set_xlabel(xtitle) ax.set_ylabel(ytitle) colors = cm.rainbow(np.linspace(0, 1, len(zarray))) for zind, zval in enumerate(zarray): output = commah.run(cosmology=cosmology, zi=zval, Mi=xarray, com=False, mah=True) yarray = output[yval].flatten() # Plot each line in turn with different colour ax.plot(xarray, yarray, label=linelabel+str(zval), color=colors[zind],) ax.set_xscale('log') ax.set_yscale('log') leg = ax.legend(loc=2) # Make box totally transparent leg.get_frame().set_alpha(0) leg.get_frame().set_edgecolor('white') for label in leg.get_texts(): label.set_fontsize('small') # the font size for label in leg.get_lines(): label.set_linewidth(4) # the legend line width if plotname: fig.tight_layout(pad=0.2) print("Plotting to '%s_MAH_M_relation.png'" % (plotname)) fig.savefig(plotname+"_MAH_M_relation.png", dpi=fig.dpi*5) else: plt.show() # Plot the (dM/M)dt-M relation as a function of redshift xarray = 10**(np.arange(10, 14, 0.5)) yval = 'dMdt' # Specify the redshift range zarray = np.arange(0, 5, 0.5) xtitle = r"Halo Mass M$_{sol}$" ytitle = r"Specific Accretion Rate yr$^{-1}$" linelabel = "z=" fig = plt.figure() ax = fig.add_subplot(111) ax.set_xlabel(xtitle) ax.set_ylabel(ytitle) colors = cm.rainbow(np.linspace(0, 1, len(zarray))) for zind, zval in enumerate(zarray): output = commah.run(cosmology=cosmology, zi=zval, Mi=xarray, mah=True, com=False) yarray = output[yval].flatten() # Plot each line in turn with different colour ax.plot(xarray, yarray/xarray, label=linelabel+str(zval), color=colors[zind],) ax.set_xscale('log') ax.set_yscale('log') leg = ax.legend(loc=1) # Make box totally transparent leg.get_frame().set_alpha(0) leg.get_frame().set_edgecolor('white') for label in leg.get_texts(): label.set_fontsize('small') # the font size for label in leg.get_lines(): label.set_linewidth(4) # the legend line width if plotname: fig.tight_layout(pad=0.2) print("Plotting to '%s_specificMAH_M_relation.png'" % (plotname)) fig.savefig(plotname+"_specificMAH_M_relation.png", dpi=fig.dpi*5) else: plt.show() # Plot the Mz-z relation as a function of mass # (so mass is decreasing to zero as z-> inf) xarray = 10**(np.arange(0, 1, 0.05)) - 1 yval = 'Mz' # Specify the mass range zarray = 10**np.arange(10, 14, 0.5) xtitle = r"Redshift" ytitle = r"M(z) (M$_{sol}$)" linelabel = r"log$_{10}$ M$_{0}$(M$_{sol}$)=" fig = plt.figure() ax = fig.add_subplot(111) ax.set_xlabel(xtitle) ax.set_ylabel(ytitle) colors = cm.rainbow(np.linspace(0, 1, len(zarray))) for zind, zval in enumerate(zarray): output = commah.run(cosmology=cosmology, zi=0, Mi=zval, z=xarray) yarray = output[yval].flatten() # Plot each line in turn with different colour ax.plot(xarray, yarray, label=linelabel+"{0:.1f}".format(np.log10(zval)), color=colors[zind],) ax.set_yscale('log') leg = ax.legend(loc=1) # Make box totally transparent leg.get_frame().set_alpha(0) leg.get_frame().set_edgecolor('white') for label in leg.get_texts(): label.set_fontsize('small') # the font size for label in leg.get_lines(): label.set_linewidth(4) # the legend line width if plotname: fig.tight_layout(pad=0.2) print("Plotting to '%s_Mzz_relation.png'" % (plotname)) fig.savefig(plotname+"_Mzz_relation.png", dpi=fig.dpi*5) else: plt.show() # Plot the Mz/M0-z relation as a function of mass xarray = 10**(np.arange(0, 1, 0.02)) - 1 yval = 'Mz' # Specify the mass range zarray = 10**np.arange(10, 14, 0.5) xtitle = r"Redshift" ytitle = r"log$_{10}$ M(z)/M$_{0}$" linelabel = r"log$_{10}$ M$_{0}$(M$_{sol}$)=" fig = plt.figure() ax = fig.add_subplot(111) ax.set_xlabel(xtitle) ax.set_ylabel(ytitle) colors = cm.rainbow(np.linspace(0, 1, len(zarray))) for zind, zval in enumerate(zarray): output = commah.run(cosmology=cosmology, zi=0, Mi=zval, z=xarray) yarray = output[yval].flatten() # Plot each line in turn with different colour ax.plot(xarray, np.log10(yarray/zval), label=linelabel+"{0:.1f}".format(np.log10(zval)), color=colors[zind],) leg = ax.legend(loc=3) # Make box totally transparent leg.get_frame().set_alpha(0) leg.get_frame().set_edgecolor('white') for label in leg.get_texts(): label.set_fontsize('small') # the font size for label in leg.get_lines(): label.set_linewidth(4) # the legend line width if plotname: fig.tight_layout(pad=0.2) print("Plotting to '%s_MzM0z_relation.png'" % (plotname)) fig.savefig(plotname+"_MzM0z_relation.png", dpi=fig.dpi*5) else: plt.show() return("Done")
[ "def howPlotAsk(goodFormat):\n '''plots using inquirer prompts\n\n Arguments:\n goodFormat {dict} -- module : [results for module]\n '''\n plotAnswer = askPlot()\n if \"Save\" in plotAnswer['plotQ']:\n exportPlotsPath = pathlib.Path(askSave())\n if \"Show\" in plotAnswer['plotQ']:\n plotter(exportPlotsPath, True, goodFormat)\n else:\n plotter(exportPlotsPath, False, goodFormat)\n elif \"Show\" in plotAnswer['plotQ']:\n plotter(None, True, goodFormat)", "def plot(self, key=None,\n cmap=None, ms=4, vmin=None, vmax=None,\n vmin_map=None, vmax_map=None, cmap_map=None, normt_map=False,\n ntMax=None, nchMax=None, nlbdMax=3,\n lls=None, lct=None, lcch=None, lclbd=None, cbck=None,\n inct=[1,10], incX=[1,5], inclbd=[1,10],\n fmt_t='06.3f', fmt_X='01.0f',\n invert=True, Lplot='In', dmarker=None,\n Bck=True, fs=None, dmargin=None, wintit=None, tit=None,\n fontsize=None, labelpad=None, draw=True, connect=True):\n \"\"\" Plot the data content in a generic interactive figure \"\"\"\n kh = _plot.Data_plot(self, key=key, indref=0,\n cmap=cmap, ms=ms, vmin=vmin, vmax=vmax,\n vmin_map=vmin_map, vmax_map=vmax_map,\n cmap_map=cmap_map, normt_map=normt_map,\n ntMax=ntMax, nchMax=nchMax, nlbdMax=nlbdMax,\n lls=lls, lct=lct, lcch=lcch, lclbd=lclbd, cbck=cbck,\n inct=inct, incX=incX, inclbd=inclbd,\n fmt_t=fmt_t, fmt_X=fmt_X, Lplot=Lplot,\n invert=invert, dmarker=dmarker, Bck=Bck,\n fs=fs, dmargin=dmargin, wintit=wintit, tit=tit,\n fontsize=fontsize, labelpad=labelpad,\n draw=draw, connect=connect)\n return kh", "def plot(args):\n \"\"\"\n %prog plot workdir sample chr1,chr2\n\n Plot some chromosomes for visual proof. Separate multiple chromosomes with\n comma. Must contain folder workdir/sample-cn/.\n \"\"\"\n from jcvi.graphics.base import savefig\n\n p = OptionParser(plot.__doc__)\n opts, args, iopts = p.set_image_options(args, figsize=\"8x7\", format=\"png\")\n\n if len(args) != 3:\n sys.exit(not p.print_help())\n\n workdir, sample_key, chrs = args\n chrs = chrs.split(\",\")\n hmm = CopyNumberHMM(workdir=workdir)\n hmm.plot(sample_key, chrs=chrs)\n\n image_name = sample_key + \"_cn.\" + iopts.format\n savefig(image_name, dpi=iopts.dpi, iopts=iopts)", "def main():\n \"\"\" We here demostrate the basic functionality of barrett. We use a global scan\n of scalar dark matter as an example. The details aren't really important.\n \"\"\"\n dataset = 'RD'\n\n observables = ['log(<\\sigma v>)', '\\Omega_{\\chi}h^2', 'log(\\sigma_p^{SI})']\n var = ['log(m_{\\chi})']\n var += ['log(C_1)', 'log(C_2)', 'log(C_3)', 'log(C_4)', 'log(C_5)', 'log(C_6)']\n var += observables\n\n plot_vs_mass(dataset, observables, 'mass_vs_observables.png')\n plot_oneD(dataset, var, 'oneD.png')\n pairplot(dataset, var, 'pairplot.png')", "def plot(self, key=None, invert=None, plotmethod='imshow',\n cmap=plt.cm.gray, ms=4, Max=None,\n fs=None, dmargin=None, wintit=None,\n draw=True, connect=True):\n \"\"\" Plot the data content in a predefined figure \"\"\"\n dax, KH = _plot.Data_plot(self, key=key, invert=invert, Max=Max,\n plotmethod=plotmethod, cmap=cmap, ms=ms,\n fs=fs, dmargin=dmargin, wintit=wintit,\n draw=draw, connect=connect)\n return dax, KH", "def plot(self, fig=None, iabscissa=1, iteridx=None,\n plot_mean=False, # was: plot_mean=True\n foffset=1e-19, x_opt=None, fontsize=9):\n \"\"\"plot data from a `CMADataLogger` (using the files written \n by the logger).\n\n Arguments\n ---------\n `fig`\n figure number, by default 325\n `iabscissa`\n ``0==plot`` versus iteration count,\n ``1==plot`` versus function evaluation number\n `iteridx`\n iteration indices to plot\n\n Return `CMADataLogger` itself.\n\n Examples\n --------\n ::\n\n import cma\n logger = cma.CMADataLogger() # with default name\n # try to plot the \"default logging\" data (e.g.\n # from previous fmin calls, which is essentially what\n # also cma.plot() does)\n logger.plot()\n cma.savefig('fig325.png') # save current figure\n logger.closefig()\n\n Dependencies: matlabplotlib/pyplot.\n\n \"\"\"\n try:\n # pyplot: prodedural interface for matplotlib\n from matplotlib.pyplot import figure, subplot, hold, gcf\n except ImportError:\n ImportError('could not find matplotlib.pyplot module, function plot() is not available')\n return\n\n if fig is None:\n fig = 325\n if iabscissa not in (0, 1):\n iabscissa = 1\n\n self.load() # better load only conditionally?\n dat = self\n dat.x = dat.xmean # this is the genotyp\n if not plot_mean:\n if len(dat.x) < 2:\n print('not enough data to plot recent x')\n else:\n dat.x = dat.xrecent\n\n # index out some data\n if iteridx is not None:\n self.select_data(iteridx)\n\n if len(dat.f) <= 1:\n print('nothing to plot')\n return\n\n # not in use anymore, see formatter above\n # xticklocs = np.arange(5) * np.round(minxend/4., -int(np.log10(minxend/4.)))\n\n # dfit(dfit<1e-98) = NaN;\n\n # TODO: if abscissa==0 plot in chunks, ie loop over subsets where\n # dat.f[:,0]==countiter is monotonous\n\n figure(fig)\n self._enter_plotting(fontsize)\n self.fighandle = gcf() # fighandle.number\n\n subplot(2, 2, 1)\n self.plot_divers(iabscissa, foffset)\n pyplot.xlabel('')\n\n # Scaling\n subplot(2, 2, 3)\n self.plot_axes_scaling(iabscissa)\n\n # spectrum of correlation matrix\n figure(fig)\n\n subplot(2, 2, 2)\n if plot_mean:\n self.plot_mean(iabscissa, x_opt)\n else:\n self.plot_xrecent(iabscissa, x_opt)\n pyplot.xlabel('')\n # pyplot.xticks(xticklocs)\n\n # standard deviations\n subplot(2, 2, 4)\n self.plot_stds(iabscissa)\n self._finalize_plotting()\n return self", "def plot(self):\n \"\"\"\n Return a matplotlib figure of the dose-response dataset.\n\n Examples\n --------\n >>> fig = dataset.plot()\n >>> fig.show()\n\n .. image:: ../tests/resources/test_cdataset_plot.png\n :align: center\n :alt: Example generated BMD plot\n\n Returns\n -------\n out : matplotlib.figure.Figure\n A matplotlib figure representation of the dataset.\n \"\"\"\n fig = plotting.create_empty_figure()\n ax = fig.gca()\n xlabel = self.kwargs.get(\"xlabel\", \"Dose\")\n ylabel = self.kwargs.get(\"ylabel\", \"Response\")\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.errorbar(\n self.doses,\n self.means,\n yerr=self.errorbars,\n label=\"Mean ± 95% CI\",\n **plotting.DATASET_POINT_FORMAT,\n )\n ax.margins(plotting.PLOT_MARGINS)\n ax.set_title(self._get_dataset_name())\n ax.legend(**settings.LEGEND_OPTS)\n return fig", "def plotFCM(data, channel_names, kind='histogram', ax=None,\n autolabel=True, xlabel_kwargs={}, ylabel_kwargs={},\n colorbar=False, grid=False,\n **kwargs):\n \"\"\"\n Plots the sample on the current axis.\n\n Follow with a call to matplotlibs show() in order to see the plot.\n\n Parameters\n ----------\n data : DataFrame\n {graph_plotFCM_pars}\n {common_plot_ax}\n\n Returns\n -------\n The output of the plot command used\n \"\"\"\n\n if ax == None: ax = pl.gca()\n\n xlabel_kwargs.setdefault('size', 16)\n ylabel_kwargs.setdefault('size', 16)\n\n channel_names = to_list(channel_names)\n\n if len(channel_names) == 1:\n # 1D so histogram plot\n kwargs.setdefault('color', 'gray')\n kwargs.setdefault('histtype', 'stepfilled')\n kwargs.setdefault('bins', 200) # Do not move above\n\n x = data[channel_names[0]].values\n if len(x) >= 1:\n if (len(x) == 1) and isinstance(kwargs['bins'], int):\n # Only needed for hist (not hist2d) due to hist function doing\n # excessive input checking\n warnings.warn(\"One of the data sets only has a single event. \"\n \"This event won't be plotted unless the bin locations\"\n \" are explicitly provided to the plotting function. \")\n return None\n plot_output = ax.hist(x, **kwargs)\n else:\n return None\n\n elif len(channel_names) == 2:\n x = data[channel_names[0]].values # value of first channel\n y = data[channel_names[1]].values # value of second channel\n\n if len(x) == 0:\n # Don't draw a plot if there's no data\n return None\n if kind == 'scatter':\n kwargs.setdefault('edgecolor', 'none')\n plot_output = ax.scatter(x, y, **kwargs)\n elif kind == 'histogram':\n kwargs.setdefault('bins', 200) # Do not move above\n kwargs.setdefault('cmin', 1)\n kwargs.setdefault('cmap', pl.cm.copper)\n kwargs.setdefault('norm', matplotlib.colors.LogNorm())\n plot_output = ax.hist2d(x, y, **kwargs)\n mappable = plot_output[-1]\n\n if colorbar:\n pl.colorbar(mappable, ax=ax)\n else:\n raise ValueError(\"Not a valid plot type. Must be 'scatter', 'histogram'\")\n else:\n raise ValueError('Received an unexpected number of channels: \"{}\"'.format(channel_names))\n\n pl.grid(grid)\n\n if autolabel:\n y_label_text = 'Counts' if len(channel_names) == 1 else channel_names[1]\n ax.set_xlabel(channel_names[0], **xlabel_kwargs)\n ax.set_ylabel(y_label_text, **ylabel_kwargs)\n\n return plot_output", "def get_plot(self, xlim=None, ylim=None, plot_negative=None,\n integrated=False, invert_axes=True):\n \"\"\"\n Get a matplotlib plot showing the COHP.\n\n Args:\n xlim: Specifies the x-axis limits. Defaults to None for\n automatic determination.\n\n ylim: Specifies the y-axis limits. Defaults to None for\n automatic determination.\n\n plot_negative: It is common to plot -COHP(E) so that the\n sign means the same for COOPs and COHPs. Defaults to None\n for automatic determination: If are_coops is True, this\n will be set to False, else it will be set to True.\n\n integrated: Switch to plot ICOHPs. Defaults to False.\n\n invert_axes: Put the energies onto the y-axis, which is\n common in chemistry.\n\n Returns:\n A matplotlib object.\n \"\"\"\n if self.are_coops:\n cohp_label = \"COOP\"\n else:\n cohp_label = \"COHP\"\n\n if plot_negative is None:\n plot_negative = True if not self.are_coops else False\n\n if integrated:\n cohp_label = \"I\" + cohp_label + \" (eV)\"\n\n if plot_negative:\n cohp_label = \"-\" + cohp_label\n\n if self.zero_at_efermi:\n energy_label = \"$E - E_f$ (eV)\"\n else:\n energy_label = \"$E$ (eV)\"\n\n ncolors = max(3, len(self._cohps))\n ncolors = min(9, ncolors)\n\n import palettable\n\n colors = palettable.colorbrewer.qualitative.Set1_9.mpl_colors\n\n plt = pretty_plot(12, 8)\n\n allpts = []\n keys = self._cohps.keys()\n for i, key in enumerate(keys):\n energies = self._cohps[key][\"energies\"]\n if not integrated:\n populations = self._cohps[key][\"COHP\"]\n else:\n populations = self._cohps[key][\"ICOHP\"]\n for spin in [Spin.up, Spin.down]:\n if spin in populations:\n if invert_axes:\n x = -populations[spin] if plot_negative \\\n else populations[spin]\n y = energies\n else:\n x = energies\n y = -populations[spin] if plot_negative \\\n else populations[spin]\n allpts.extend(list(zip(x, y)))\n if spin == Spin.up:\n plt.plot(x, y, color=colors[i % ncolors],\n linestyle='-', label=str(key), linewidth=3)\n else:\n plt.plot(x, y, color=colors[i % ncolors],\n linestyle='--', linewidth=3)\n\n if xlim:\n plt.xlim(xlim)\n if ylim:\n plt.ylim(ylim)\n else:\n xlim = plt.xlim()\n relevanty = [p[1] for p in allpts if xlim[0] < p[0] < xlim[1]]\n plt.ylim((min(relevanty), max(relevanty)))\n\n xlim = plt.xlim()\n ylim = plt.ylim()\n if not invert_axes:\n plt.plot(xlim, [0, 0], \"k-\", linewidth=2)\n if self.zero_at_efermi:\n plt.plot([0, 0], ylim, \"k--\", linewidth=2)\n else:\n plt.plot([self._cohps[key]['efermi'],\n self._cohps[key]['efermi']], ylim,\n color=colors[i % ncolors],\n linestyle='--', linewidth=2)\n else:\n plt.plot([0, 0], ylim, \"k-\", linewidth=2)\n if self.zero_at_efermi:\n plt.plot(xlim, [0, 0], \"k--\", linewidth=2)\n else:\n plt.plot(xlim, [self._cohps[key]['efermi'],\n self._cohps[key]['efermi']],\n color=colors[i % ncolors],\n linestyle='--', linewidth=2)\n\n if invert_axes:\n plt.xlabel(cohp_label)\n plt.ylabel(energy_label)\n else:\n plt.xlabel(energy_label)\n plt.ylabel(cohp_label)\n\n plt.legend()\n leg = plt.gca().get_legend()\n ltext = leg.get_texts()\n plt.setp(ltext, fontsize=30)\n plt.tight_layout()\n return plt", "def plot_data():\n '''Plot sample data up with the fancy colormaps.\n\n '''\n\n var = ['temp', 'oxygen', 'salinity', 'fluorescence-ECO', 'density', 'PAR', 'turbidity', 'fluorescence-CDOM']\n # colorbar limits for each property\n lims = np.array([[26, 33], [0, 10], [0, 36], [0, 6], [1005, 1025], [0, 0.6], [0, 2], [0, 9]]) # reasonable values\n # lims = np.array([[20,36], [26,33], [1.5,5.6], [0,4], [0,9], [0,1.5]]) # values to show colormaps\n\n for fname in fnames:\n fig, axes = plt.subplots(nrows=4, ncols=2)\n fig.set_size_inches(20, 10)\n fig.subplots_adjust(top=0.95, bottom=0.01, left=0.2, right=0.99, wspace=0.0, hspace=0.07)\n i = 0\n for ax, Var, cmap in zip(axes.flat, var, cmaps): # loop through data to plot up\n\n # get variable data\n lat, lon, z, data = test.read(Var, fname)\n\n map1 = ax.scatter(lat, -z, c=data, cmap=cmap, s=10, linewidths=0., vmin=lims[i, 0], vmax=lims[i, 1])\n # no stupid offset\n y_formatter = mpl.ticker.ScalarFormatter(useOffset=False)\n ax.xaxis.set_major_formatter(y_formatter)\n if i == 6:\n ax.set_xlabel('Latitude [degrees]')\n ax.set_ylabel('Depth [m]')\n else:\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_ylim(-z.max(), 0)\n ax.set_xlim(lat.min(), lat.max())\n cb = plt.colorbar(map1, ax=ax, pad=0.02)\n cb.set_label(cmap.name + ' [' + '$' + cmap.units + '$]')\n i += 1\n\n fig.savefig('figures/' + fname.split('.')[0] + '.png', bbox_inches='tight')", "def plot(self):\n \"\"\"\n Return a matplotlib figure of the dose-response dataset.\n\n Examples\n --------\n >>> fig = dataset.plot()\n >>> fig.show()\n >>> fig.clear()\n\n .. image:: ../tests/resources/test_cidataset_plot.png\n :align: center\n :alt: Example generated BMD plot\n\n Returns\n -------\n out : matplotlib.figure.Figure\n A matplotlib figure representation of the dataset.\n \"\"\"\n fig = plotting.create_empty_figure()\n ax = fig.gca()\n xlabel = self.kwargs.get(\"xlabel\", \"Dose\")\n ylabel = self.kwargs.get(\"ylabel\", \"Response\")\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.scatter(\n self.individual_doses,\n self.responses,\n label=\"Data\",\n **plotting.DATASET_INDIVIDUAL_FORMAT,\n )\n ax.margins(plotting.PLOT_MARGINS)\n ax.set_title(self._get_dataset_name())\n ax.legend(**settings.LEGEND_OPTS)\n return fig", "def plot(what, calc_id=-1, other_id=None, webapi=False):\n \"\"\"\n Generic plotter for local and remote calculations.\n \"\"\"\n if '?' not in what:\n raise SystemExit('Missing ? in %r' % what)\n prefix, rest = what.split('?', 1)\n assert prefix in 'source_geom hcurves hmaps uhs', prefix\n if prefix in 'hcurves hmaps' and 'imt=' not in rest:\n raise SystemExit('Missing imt= in %r' % what)\n elif prefix == 'uhs' and 'imt=' in rest:\n raise SystemExit('Invalid IMT in %r' % what)\n elif prefix in 'hcurves uhs' and 'site_id=' not in rest:\n what += '&site_id=0'\n if webapi:\n xs = [WebExtractor(calc_id)]\n if other_id:\n xs.append(WebExtractor(other_id))\n else:\n xs = [Extractor(calc_id)]\n if other_id:\n xs.append(Extractor(other_id))\n make_figure = globals()['make_figure_' + prefix]\n plt = make_figure(xs, what)\n plt.show()" ]
[ 0.7343975305557251, 0.7127325534820557, 0.7039694786071777, 0.6969481110572815, 0.6965529322624207, 0.6965466141700745, 0.6953907012939453, 0.692695140838623, 0.6920647621154785, 0.6920480132102966, 0.691997766494751, 0.6880983710289001 ]
Function enhance Enhance the object with new item or enhanced items
def enhance(self): """ Function enhance Enhance the object with new item or enhanced items """ self.update({'puppetclasses': SubDict(self.api, self.objName, self.payloadObj, self.key, SubItemPuppetClasses)}) self.update({'parameters': SubDict(self.api, self.objName, self.payloadObj, self.key, SubItemParameter)}) self.update({'smart_class_parameters': SubDict(self.api, self.objName, self.payloadObj, self.key, ItemSmartClassParameter)})
[ "def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'os_default_templates':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemOsDefaultTemplate)})\n self.update({'config_templates':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemConfigTemplate)})\n self.update({'ptables':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemPTable)})\n self.update({'media':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemMedia)})\n self.update({'architectures':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemArchitecture)})", "def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemParameter)})\n self.update({'interfaces':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemInterface)})\n self.update({'subnets':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemSubnet)})", "def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'images':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemImages)})", "def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'os_default_templates':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemOsDefaultTemplate)})\n self.update({'operatingsystems':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemOperatingSystem)})", "def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n self.update({'puppetclasses':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemPuppetClasses)})\n self.update({'parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemParameter)})\n self.update({'interfaces':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemInterface)})\n self.update({'smart_class_parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n SubItemSmartClassParameter)})", "def enhance(self):\n \"\"\" Function enhance\n Enhance the object with new item or enhanced items\n \"\"\"\n if self.objName in ['hosts', 'hostgroups',\n 'puppet_classes']:\n from foreman.itemSmartClassParameter\\\n import ItemSmartClassParameter\n self.update({'smart_class_parameters':\n SubDict(self.api, self.objName,\n self.payloadObj, self.key,\n ItemSmartClassParameter)})", "public T enhance(T t) {\n if (!needsEnhancement(t)) {\n return t;\n }\n\n try {\n return getEnhancedClass().getConstructor(baseClass).newInstance(t);\n } catch (Exception e) {\n throw new RuntimeException(String.format(\"Could not enhance object %s (%s)\", t, t.getClass()), e);\n }\n }", "private static void doEnhancement(CtClass cc, Version modelVersion) throws CannotCompileException,\n NotFoundException, ClassNotFoundException {\n CtClass inter = cp.get(OpenEngSBModel.class.getName());\n cc.addInterface(inter);\n addFields(cc);\n addGetOpenEngSBModelTail(cc);\n addSetOpenEngSBModelTail(cc);\n addRetrieveModelName(cc);\n addRetrieveModelVersion(cc, modelVersion);\n addOpenEngSBModelEntryMethod(cc);\n addRemoveOpenEngSBModelEntryMethod(cc);\n addRetrieveInternalModelId(cc);\n addRetrieveInternalModelTimestamp(cc);\n addRetrieveInternalModelVersion(cc);\n addToOpenEngSBModelValues(cc);\n addToOpenEngSBModelEntries(cc);\n cc.setModifiers(cc.getModifiers() & ~Modifier.ABSTRACT);\n }", "def enhance(self, inverse=False, gamma=1.0, stretch=\"no\",\n stretch_parameters=None, **kwargs):\n \"\"\"Image enhancement function. It applies **in this order** inversion,\n gamma correction, and stretching to the current image, with parameters\n *inverse* (see :meth:`Image.invert`), *gamma* (see\n :meth:`Image.gamma`), and *stretch* (see :meth:`Image.stretch`).\n \"\"\"\n self.invert(inverse)\n if stretch_parameters is None:\n stretch_parameters = {}\n\n stretch_parameters.update(kwargs)\n self.stretch(stretch, **stretch_parameters)\n self.gamma(gamma)", "def load(self, data):\n \"\"\" Function load\n Store the object data\n \"\"\"\n self.clear()\n self.update(data)\n self.enhance()", "def enhance2dataset(dset):\n \"\"\"Apply enhancements to dataset *dset* and return the resulting data\n array of the image.\"\"\"\n attrs = dset.attrs\n img = get_enhanced_image(dset)\n # Clip image data to interval [0.0, 1.0]\n data = img.data.clip(0.0, 1.0)\n data.attrs = attrs\n\n return data", "function enhancedEcommerceProductAction(track, action, data) {\n enhancedEcommerceTrackProduct(track);\n window.ga('ec:setAction', action, data || {});\n}" ]
[ 0.8752948045730591, 0.8729672431945801, 0.8696154356002808, 0.8695272207260132, 0.8679497838020325, 0.8407313823699951, 0.7447202205657959, 0.7079142928123474, 0.707258939743042, 0.6839107275009155, 0.6810131072998047, 0.6799886226654053 ]
Extract the transformers names from a line of code of the form from __experimental__ import transformer1 [,...] and adds them to the globally known dict
def add_transformers(line): '''Extract the transformers names from a line of code of the form from __experimental__ import transformer1 [,...] and adds them to the globally known dict ''' assert FROM_EXPERIMENTAL.match(line) line = FROM_EXPERIMENTAL.sub(' ', line) # we now have: " transformer1 [,...]" line = line.split("#")[0] # remove any end of line comments # and insert each transformer as an item in a list for trans in line.replace(' ', '').split(','): import_transformer(trans)
[ "def extract_transformers_from_source(source):\n '''Scan a source for lines of the form\n from __experimental__ import transformer1 [,...]\n identifying transformers to be used. Such line is passed to the\n add_transformer function, after which it is removed from the\n code to be executed.\n '''\n lines = source.split('\\n')\n linenumbers = []\n for number, line in enumerate(lines):\n if FROM_EXPERIMENTAL.match(line):\n add_transformers(line)\n linenumbers.insert(0, number)\n\n # drop the \"fake\" import from the source code\n for number in linenumbers:\n del lines[number]\n return '\\n'.join(lines)", "def import_transformer(name):\n '''If needed, import a transformer, and adds it to the globally known dict\n The code inside a module where a transformer is defined should be\n standard Python code, which does not need any transformation.\n So, we disable the import hook, and let the normal module import\n do its job - which is faster and likely more reliable than our\n custom method.\n '''\n if name in transformers:\n return transformers[name]\n\n # We are adding a transformer built from normal/standard Python code.\n # As we are not performing transformations, we temporarily disable\n # our import hook, both to avoid potential problems AND because we\n # found that this resulted in much faster code.\n hook = sys.meta_path[0]\n sys.meta_path = sys.meta_path[1:]\n try:\n transformers[name] = __import__(name)\n # Some transformers are not allowed in the console.\n # If an attempt is made to activate one of them in the console,\n # we replace it by a transformer that does nothing and print a\n # message specific to that transformer as written in its module.\n if CONSOLE_ACTIVE:\n if hasattr(transformers[name], \"NO_CONSOLE\"):\n print(transformers[name].NO_CONSOLE)\n transformers[name] = NullTransformer()\n except ImportError:\n sys.stderr.write(\"Warning: Import Error in add_transformers: %s not found\\n\" % name)\n transformers[name] = NullTransformer()\n except Exception as e:\n sys.stderr.write(\"Unexpected exception in transforms.import_transformer%s\\n \" %\n e.__class__.__name__)\n finally:\n sys.meta_path.insert(0, hook) # restore import hook\n\n return transformers[name]", "def transform(source):\n '''Used to convert the source code, making use of known transformers.\n\n \"transformers\" are modules which must contain a function\n\n transform_source(source)\n\n which returns a tranformed source.\n Some transformers (for example, those found in the standard library\n module lib2to3) cannot cope with non-standard syntax; as a result, they\n may fail during a first attempt. We keep track of all failing\n transformers and keep retrying them until either they all succeeded\n or a fixed set of them fails twice in a row.\n '''\n source = extract_transformers_from_source(source)\n\n # Some transformer fail when multiple non-Python constructs\n # are present. So, we loop multiple times keeping track of\n # which transformations have been unsuccessfully performed.\n not_done = transformers\n while True:\n failed = {}\n for name in not_done:\n tr_module = import_transformer(name)\n try:\n source = tr_module.transform_source(source)\n except Exception as e:\n failed[name] = tr_module\n # from traceback import print_exc\n # print(\"Unexpected exception in transforms.transform\",\n # e.__class__.__name__)\n # print_exc()\n\n if not failed:\n break\n # Insanity is doing the same Tting over and overaAgain and\n # expecting different results ...\n # If the exact same set of transformations are not performed\n # twice in a row, there is no point in trying out a third time.\n if failed == not_done:\n print(\"Warning: the following transforms could not be done:\")\n for key in failed:\n print(key)\n break\n not_done = failed # attempt another pass\n\n return source", "def load_transforms(transforms):\n \"\"\"\n Load transform modules and return instance of transform class.\n\n Parameters\n ----------\n transforms : [str] or [[str]]\n array of transform module name,\n or nested array of transform module name with argv to load\n\n Returns\n -------\n array of transform instance\n \"\"\"\n\n from . import Transform\n import inspect\n\n # normalize arguments to form as [(name, [option, ...]), ...]\n transforms_with_argv = map(lambda t: (t[0], t[1:]) if isinstance(t, list) else (t, []),\n transforms)\n\n def instantiate_transform(module_name, argv):\n tr_module = __import__(\n module_name if module_name.count('.') > 0 else TRANSFORM_MODULE_PREFIX + module_name,\n fromlist=['dummy'])\n tr_classes = inspect.getmembers(\n tr_module,\n lambda c: issubclass(c if inspect.isclass(c) else None.__class__,\n Transform))\n\n if len(tr_classes) != 1:\n raise TypeError('Transform module must have only one subclass of Transform')\n\n tr_class = tr_classes[0]\n return tr_class[1](argv)\n\n return [instantiate_transform(tr[0], tr[1])\n for tr in transforms_with_argv]", "def _iter(self):\n \"\"\"Generate (name, est, weight) tuples excluding None transformers\n \"\"\"\n get_weight = (self.transformer_weights or {}).get\n return ((name, trans, get_weight(name))\n for name, trans in self.transformer_list\n if trans is not None)", "def extract_code_globals(cls, co):\n \"\"\"\n Find all globals names read or written to by codeblock co\n \"\"\"\n out_names = cls._extract_code_globals_cache.get(co)\n if out_names is None:\n try:\n names = co.co_names\n except AttributeError:\n # PyPy \"builtin-code\" object\n out_names = set()\n else:\n out_names = set(names[oparg]\n for op, oparg in _walk_global_ops(co))\n\n # see if nested function have any global refs\n if co.co_consts:\n for const in co.co_consts:\n if type(const) is types.CodeType:\n out_names |= cls.extract_code_globals(const)\n\n cls._extract_code_globals_cache[co] = out_names\n\n return out_names", "def transform_feature_names(transformer, in_names=None):\n \"\"\"Get feature names for transformer output as a function of input names.\n\n Used by :func:`explain_weights` when applied to a scikit-learn Pipeline,\n this ``singledispatch`` should be registered with custom name\n transformations for each class of transformer.\n \n If there is no ``singledispatch`` handler registered for a transformer \n class, ``transformer.get_feature_names()`` method is called; if there is\n no such method then feature names are not supported and \n this function raises an exception.\n\n Parameters\n ----------\n transformer : scikit-learn-compatible transformer\n in_names : list of str, optional\n Names for features input to transformer.transform().\n If not provided, the implementation may generate default feature names\n if the number of input features is known.\n\n Returns\n -------\n feature_names : list of str\n \"\"\"\n if hasattr(transformer, 'get_feature_names'):\n return transformer.get_feature_names()\n raise NotImplementedError('transform_feature_names not available for '\n '{}'.format(transformer))", "def exec_module(self, module):\n '''import the source code, transforma it before executing it so that\n it is known to Python.'''\n global MAIN_MODULE_NAME\n if module.__name__ == MAIN_MODULE_NAME:\n module.__name__ = \"__main__\"\n MAIN_MODULE_NAME = None\n\n with open(self.filename) as f:\n source = f.read()\n\n if transforms.transformers:\n source = transforms.transform(source)\n else:\n for line in source.split('\\n'):\n if transforms.FROM_EXPERIMENTAL.match(line):\n ## transforms.transform will extract all such relevant\n ## lines and add them all relevant transformers\n source = transforms.transform(source)\n break\n exec(source, vars(module))", "def compose_transformers(transformers, types, preprocessors, postprocessors)\n if types.length < 2\n raise ArgumentError, \"too few transform types: #{types.inspect}\"\n end\n\n processors = types.each_cons(2).map { |src, dst|\n unless processor = transformers[src][dst]\n raise ArgumentError, \"missing transformer for type: #{src} to #{dst}\"\n end\n processor\n }\n\n compose_transformer_list processors, preprocessors, postprocessors\n end", "def _info_transformers(fields, transformers):\n \"\"\"Utility function to determine transformer functions for variants\n fields.\"\"\"\n if transformers is None:\n # no transformers specified by user\n transformers = dict()\n for f in fields:\n if f not in transformers:\n transformers[f] = config.DEFAULT_TRANSFORMER.get(f, None)\n return tuple(transformers[f] for f in fields)", "def _get_transformers(self):\n \"\"\"Load the contents of meta_file and extract information about the transformers.\n\n Returns:\n dict: tuple(str, str) -> Transformer.\n \"\"\"\n transformer_dict = {}\n\n for table in self.metadata['tables']:\n table_name = table['name']\n\n for field in table['fields']:\n transformer_type = field.get('type')\n if transformer_type:\n col_name = field['name']\n transformer_dict[(table_name, col_name)] = transformer_type\n\n return transformer_dict", "def remove_not_allowed_in_console():\n '''This function should be called from the console, when it starts.\n\n Some transformers are not allowed in the console and they could have\n been loaded prior to the console being activated. We effectively remove them\n and print an information message specific to that transformer\n as written in the transformer module.\n\n '''\n not_allowed_in_console = []\n if CONSOLE_ACTIVE:\n for name in transformers:\n tr_module = import_transformer(name)\n if hasattr(tr_module, \"NO_CONSOLE\"):\n not_allowed_in_console.append((name, tr_module))\n for name, tr_module in not_allowed_in_console:\n print(tr_module.NO_CONSOLE)\n # Note: we do not remove them, so as to avoid seeing the\n # information message displayed again if an attempt is\n # made to re-import them from a console instruction.\n transformers[name] = NullTransformer()" ]
[ 0.8757690787315369, 0.735373854637146, 0.6851067543029785, 0.6705670356750488, 0.6693321466445923, 0.6668049693107605, 0.6660348773002625, 0.6654304265975952, 0.6635963916778564, 0.6592937707901001, 0.6576693058013916, 0.6569241285324097 ]
If needed, import a transformer, and adds it to the globally known dict The code inside a module where a transformer is defined should be standard Python code, which does not need any transformation. So, we disable the import hook, and let the normal module import do its job - which is faster and likely more reliable than our custom method.
def import_transformer(name): '''If needed, import a transformer, and adds it to the globally known dict The code inside a module where a transformer is defined should be standard Python code, which does not need any transformation. So, we disable the import hook, and let the normal module import do its job - which is faster and likely more reliable than our custom method. ''' if name in transformers: return transformers[name] # We are adding a transformer built from normal/standard Python code. # As we are not performing transformations, we temporarily disable # our import hook, both to avoid potential problems AND because we # found that this resulted in much faster code. hook = sys.meta_path[0] sys.meta_path = sys.meta_path[1:] try: transformers[name] = __import__(name) # Some transformers are not allowed in the console. # If an attempt is made to activate one of them in the console, # we replace it by a transformer that does nothing and print a # message specific to that transformer as written in its module. if CONSOLE_ACTIVE: if hasattr(transformers[name], "NO_CONSOLE"): print(transformers[name].NO_CONSOLE) transformers[name] = NullTransformer() except ImportError: sys.stderr.write("Warning: Import Error in add_transformers: %s not found\n" % name) transformers[name] = NullTransformer() except Exception as e: sys.stderr.write("Unexpected exception in transforms.import_transformer%s\n " % e.__class__.__name__) finally: sys.meta_path.insert(0, hook) # restore import hook return transformers[name]
[ "def add_transformers(line):\n '''Extract the transformers names from a line of code of the form\n from __experimental__ import transformer1 [,...]\n and adds them to the globally known dict\n '''\n assert FROM_EXPERIMENTAL.match(line)\n\n line = FROM_EXPERIMENTAL.sub(' ', line)\n # we now have: \" transformer1 [,...]\"\n line = line.split(\"#\")[0] # remove any end of line comments\n # and insert each transformer as an item in a list\n for trans in line.replace(' ', '').split(','):\n import_transformer(trans)", "def visit_Module(self, node):\n \"\"\"\n When we normalize call, we need to add correct import for method\n to function transformation.\n\n a.max()\n\n for numpy array will become:\n\n numpy.max(a)\n\n so we have to import numpy.\n \"\"\"\n self.skip_functions = True\n self.generic_visit(node)\n self.skip_functions = False\n self.generic_visit(node)\n new_imports = self.to_import - self.globals\n imports = [ast.Import(names=[ast.alias(name=mod[17:], asname=mod)])\n for mod in new_imports]\n node.body = imports + node.body\n self.update |= bool(imports)\n return node", "def decode_import_json(self, json_doc, transformers=None):\n \"\"\" Decode a JSON string based on a list of transformers.\n\n Each transformer is a pair of ([conditional], transformer). If\n all conditionals are met on each non-list, non-dict object,\n the transformer tries to apply itself.\n\n conditional: Callable that returns a Bool.\n transformer: Callable transformer on non-dict, non-list objects.\n \"\"\"\n\n def custom_decoder(dct):\n\n def transform(o):\n\n if not transformers:\n return o\n\n for conditionals, transformer in transformers:\n\n conditions_met = True\n for conditional in conditionals:\n try:\n condition_met = conditional(o)\n except:\n condition_met = False\n if not condition_met:\n conditions_met = False\n break\n\n if not conditions_met:\n continue\n\n try:\n return transformer(o)\n except:\n pass\n\n return o\n\n for key in dct.iterkeys():\n if isinstance(key, dict):\n custom_decoder(dct[key])\n elif isinstance(key, list):\n [custom_decoder[elem] for elem in dct[key]]\n else:\n dct[key] = transform(dct[key])\n\n return dct\n\n return json.loads(json_doc, object_hook=custom_decoder)", "def install_import_hook():\n \"\"\"Installs __import__ hook.\"\"\"\n saved_import = builtins.__import__\n @functools.wraps(saved_import)\n def import_hook(name, *args, **kwargs):\n if name == 'end':\n process_import()\n end\n return saved_import(name, *args, **kwargs)\n end\n builtins.__import__ = import_hook", "def import_handle(self, original, loc, tokens):\n \"\"\"Universalizes imports.\"\"\"\n if len(tokens) == 1:\n imp_from, imports = None, tokens[0]\n elif len(tokens) == 2:\n imp_from, imports = tokens\n if imp_from == \"__future__\":\n self.strict_err_or_warn(\"unnecessary from __future__ import (Coconut does these automatically)\", original, loc)\n return \"\"\n else:\n raise CoconutInternalException(\"invalid import tokens\", tokens)\n if self.strict:\n self.unused_imports.update(imported_names(imports))\n return universal_import(imports, imp_from=imp_from, target=self.target)", "private static synchronized void addTransformer() {\n if (detailedTransformTrace && tc.isEntryEnabled())\n Tr.entry(tc, \"addTransformer\");\n\n if (registeredTransformer == null && instrumentation != null) {\n registeredTransformer = new LibertyRuntimeTransformer();\n instrumentation.addTransformer(registeredTransformer, true);\n }\n\n if (detailedTransformTrace && tc.isEntryEnabled())\n Tr.exit(tc, \"addTransformer\");\n }", "def enable_result_transforms(func):\n \"\"\"Decorator that tries to use the object provided using a kwarg called\n 'electrode_transformator' to transform the return values of an import\n function. It is intended to be used to transform electrode numbers and\n locations, i.e. for use in roll-along-measurement schemes.\n\n The transformator object must have a function .transform, which takes three\n parameters: data, electrode, topography and returns three correspondingly\n transformed objects.\n \"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n func_transformator = kwargs.pop('electrode_transformator', None)\n data, electrodes, topography = func(*args, **kwargs)\n if func_transformator is not None:\n data_transformed, electrodes_transformed, \\\n topography_transformed = func_transformator.transform(\n data, electrodes, topography\n )\n return data_transformed, electrodes_transformed, \\\n topography_transformed\n else:\n return data, electrodes, topography\n return wrapper", "def _import(module, cls):\n \"\"\"\n A messy way to import library-specific classes.\n TODO: I should really make a factory class or something, but I'm lazy.\n Plus, factories remind me a lot of java...\n \"\"\"\n global Scanner\n\n try:\n cls = str(cls)\n mod = __import__(str(module), globals(), locals(), [cls], 1)\n Scanner = getattr(mod, cls)\n except ImportError:\n pass", "def _ImportHookBySuffix(\n name, globals=None, locals=None, fromlist=None, level=None):\n \"\"\"Callback when an import statement is executed by the Python interpreter.\n\n Argument names have to exactly match those of __import__. Otherwise calls\n to __import__ that use keyword syntax will fail: __import('a', fromlist=[]).\n \"\"\"\n _IncrementNestLevel()\n\n if level is None:\n # A level of 0 means absolute import, positive values means relative\n # imports, and -1 means to try both an absolute and relative import.\n # Since imports were disambiguated in Python 3, -1 is not a valid value.\n # The default values are 0 and -1 for Python 3 and 3 respectively.\n # https://docs.python.org/2/library/functions.html#__import__\n # https://docs.python.org/3/library/functions.html#__import__\n level = 0 if six.PY3 else -1\n\n try:\n # Really import modules.\n module = _real_import(name, globals, locals, fromlist, level)\n finally:\n # This _real_import call may raise an exception (e.g., ImportError).\n # However, there might be several modules already loaded before the\n # exception was raised. For instance:\n # a.py\n # import b # success\n # import c # ImportError exception.\n # In this case, an 'import a' statement would have the side effect of\n # importing module 'b'. This should trigger the import hooks for module\n # 'b'. To achieve this, we always search/invoke import callbacks (i.e.,\n # even when an exception is raised).\n #\n # Important Note: Do not use 'return' inside the finally block. It will\n # cause any pending exception to be discarded.\n _ProcessImportBySuffix(name, fromlist, globals)\n\n return module", "def add_transformer(self, transformer):\n \"\"\"Adds a transformer to _transformers if not already existing\n \n Args\n ----\n transformer : StationDing0\n Description #TODO\n \"\"\"\n if transformer not in self.transformers() and isinstance(transformer, TransformerDing0):\n self._transformers.append(transformer)", "def visit_import(self, node):\n '''triggered when an import statement is seen'''\n if self.process_module:\n # Store salt imported modules\n for module, import_as in node.names:\n if not module.startswith('salt'):\n continue\n if import_as and import_as not in self.imported_salt_modules:\n self.imported_salt_modules[import_as] = module\n continue\n if module not in self.imported_salt_modules:\n self.imported_salt_modules[module] = module", "def _InstallImportHookBySuffix():\n \"\"\"Lazily installs import hook.\"\"\"\n global _real_import\n\n if _real_import:\n return # Import hook already installed\n\n _real_import = getattr(builtins, '__import__')\n assert _real_import\n builtins.__import__ = _ImportHookBySuffix\n\n if six.PY3:\n # In Python 2, importlib.import_module calls __import__ internally so\n # overriding __import__ is enough. In Python 3, they are separate so it also\n # needs to be overwritten.\n global _real_import_module\n _real_import_module = importlib.import_module\n assert _real_import_module\n importlib.import_module = _ImportModuleHookBySuffix" ]
[ 0.740732729434967, 0.7014108300209045, 0.6842485070228577, 0.6801718473434448, 0.6798920035362244, 0.6796746253967285, 0.6788545250892639, 0.6783643364906311, 0.6754996180534363, 0.6742110848426819, 0.6730639934539795, 0.6721809506416321 ]
Scan a source for lines of the form from __experimental__ import transformer1 [,...] identifying transformers to be used. Such line is passed to the add_transformer function, after which it is removed from the code to be executed.
def extract_transformers_from_source(source): '''Scan a source for lines of the form from __experimental__ import transformer1 [,...] identifying transformers to be used. Such line is passed to the add_transformer function, after which it is removed from the code to be executed. ''' lines = source.split('\n') linenumbers = [] for number, line in enumerate(lines): if FROM_EXPERIMENTAL.match(line): add_transformers(line) linenumbers.insert(0, number) # drop the "fake" import from the source code for number in linenumbers: del lines[number] return '\n'.join(lines)
[ "def add_transformers(line):\n '''Extract the transformers names from a line of code of the form\n from __experimental__ import transformer1 [,...]\n and adds them to the globally known dict\n '''\n assert FROM_EXPERIMENTAL.match(line)\n\n line = FROM_EXPERIMENTAL.sub(' ', line)\n # we now have: \" transformer1 [,...]\"\n line = line.split(\"#\")[0] # remove any end of line comments\n # and insert each transformer as an item in a list\n for trans in line.replace(' ', '').split(','):\n import_transformer(trans)", "def import_transformer(name):\n '''If needed, import a transformer, and adds it to the globally known dict\n The code inside a module where a transformer is defined should be\n standard Python code, which does not need any transformation.\n So, we disable the import hook, and let the normal module import\n do its job - which is faster and likely more reliable than our\n custom method.\n '''\n if name in transformers:\n return transformers[name]\n\n # We are adding a transformer built from normal/standard Python code.\n # As we are not performing transformations, we temporarily disable\n # our import hook, both to avoid potential problems AND because we\n # found that this resulted in much faster code.\n hook = sys.meta_path[0]\n sys.meta_path = sys.meta_path[1:]\n try:\n transformers[name] = __import__(name)\n # Some transformers are not allowed in the console.\n # If an attempt is made to activate one of them in the console,\n # we replace it by a transformer that does nothing and print a\n # message specific to that transformer as written in its module.\n if CONSOLE_ACTIVE:\n if hasattr(transformers[name], \"NO_CONSOLE\"):\n print(transformers[name].NO_CONSOLE)\n transformers[name] = NullTransformer()\n except ImportError:\n sys.stderr.write(\"Warning: Import Error in add_transformers: %s not found\\n\" % name)\n transformers[name] = NullTransformer()\n except Exception as e:\n sys.stderr.write(\"Unexpected exception in transforms.import_transformer%s\\n \" %\n e.__class__.__name__)\n finally:\n sys.meta_path.insert(0, hook) # restore import hook\n\n return transformers[name]", "def transform(source):\n '''Used to convert the source code, making use of known transformers.\n\n \"transformers\" are modules which must contain a function\n\n transform_source(source)\n\n which returns a tranformed source.\n Some transformers (for example, those found in the standard library\n module lib2to3) cannot cope with non-standard syntax; as a result, they\n may fail during a first attempt. We keep track of all failing\n transformers and keep retrying them until either they all succeeded\n or a fixed set of them fails twice in a row.\n '''\n source = extract_transformers_from_source(source)\n\n # Some transformer fail when multiple non-Python constructs\n # are present. So, we loop multiple times keeping track of\n # which transformations have been unsuccessfully performed.\n not_done = transformers\n while True:\n failed = {}\n for name in not_done:\n tr_module = import_transformer(name)\n try:\n source = tr_module.transform_source(source)\n except Exception as e:\n failed[name] = tr_module\n # from traceback import print_exc\n # print(\"Unexpected exception in transforms.transform\",\n # e.__class__.__name__)\n # print_exc()\n\n if not failed:\n break\n # Insanity is doing the same Tting over and overaAgain and\n # expecting different results ...\n # If the exact same set of transformations are not performed\n # twice in a row, there is no point in trying out a third time.\n if failed == not_done:\n print(\"Warning: the following transforms could not be done:\")\n for key in failed:\n print(key)\n break\n not_done = failed # attempt another pass\n\n return source", "def exec_module(self, module):\n '''import the source code, transforma it before executing it so that\n it is known to Python.'''\n global MAIN_MODULE_NAME\n if module.__name__ == MAIN_MODULE_NAME:\n module.__name__ = \"__main__\"\n MAIN_MODULE_NAME = None\n\n with open(self.filename) as f:\n source = f.read()\n\n if transforms.transformers:\n source = transforms.transform(source)\n else:\n for line in source.split('\\n'):\n if transforms.FROM_EXPERIMENTAL.match(line):\n ## transforms.transform will extract all such relevant\n ## lines and add them all relevant transformers\n source = transforms.transform(source)\n break\n exec(source, vars(module))", "def find_source_lines(self):\n \"\"\"Mark all executable source lines in fn as executed 0 times.\"\"\"\n strs = trace.find_strings(self.filename)\n lines = trace.find_lines_from_code(self.fn.__code__, strs)\n self.firstcodelineno = sys.maxint\n for lineno in lines:\n self.firstcodelineno = min(self.firstcodelineno, lineno)\n self.sourcelines.setdefault(lineno, 0)\n if self.firstcodelineno == sys.maxint:\n self.firstcodelineno = self.firstlineno", "def FortranScan(path_variable=\"FORTRANPATH\"):\n \"\"\"Return a prototype Scanner instance for scanning source files\n for Fortran USE & INCLUDE statements\"\"\"\n\n# The USE statement regex matches the following:\n#\n# USE module_name\n# USE :: module_name\n# USE, INTRINSIC :: module_name\n# USE, NON_INTRINSIC :: module_name\n#\n# Limitations\n#\n# -- While the regex can handle multiple USE statements on one line,\n# it cannot properly handle them if they are commented out.\n# In either of the following cases:\n#\n# ! USE mod_a ; USE mod_b [entire line is commented out]\n# USE mod_a ! ; USE mod_b [in-line comment of second USE statement]\n#\n# the second module name (mod_b) will be picked up as a dependency\n# even though it should be ignored. The only way I can see\n# to rectify this would be to modify the scanner to eliminate\n# the call to re.findall, read in the contents of the file,\n# treating the comment character as an end-of-line character\n# in addition to the normal linefeed, loop over each line,\n# weeding out the comments, and looking for the USE statements.\n# One advantage to this is that the regex passed to the scanner\n# would no longer need to match a semicolon.\n#\n# -- I question whether or not we need to detect dependencies to\n# INTRINSIC modules because these are built-in to the compiler.\n# If we consider them a dependency, will SCons look for them, not\n# find them, and kill the build? Or will we there be standard\n# compiler-specific directories we will need to point to so the\n# compiler and SCons can locate the proper object and mod files?\n\n# Here is a breakdown of the regex:\n#\n# (?i) : regex is case insensitive\n# ^ : start of line\n# (?: : group a collection of regex symbols without saving the match as a \"group\"\n# ^|; : matches either the start of the line or a semicolon - semicolon\n# ) : end the unsaved grouping\n# \\s* : any amount of white space\n# USE : match the string USE, case insensitive\n# (?: : group a collection of regex symbols without saving the match as a \"group\"\n# \\s+| : match one or more whitespace OR .... (the next entire grouped set of regex symbols)\n# (?: : group a collection of regex symbols without saving the match as a \"group\"\n# (?: : establish another unsaved grouping of regex symbols\n# \\s* : any amount of white space\n# , : match a comma\n# \\s* : any amount of white space\n# (?:NON_)? : optionally match the prefix NON_, case insensitive\n# INTRINSIC : match the string INTRINSIC, case insensitive\n# )? : optionally match the \", INTRINSIC/NON_INTRINSIC\" grouped expression\n# \\s* : any amount of white space\n# :: : match a double colon that must appear after the INTRINSIC/NON_INTRINSIC attribute\n# ) : end the unsaved grouping\n# ) : end the unsaved grouping\n# \\s* : match any amount of white space\n# (\\w+) : match the module name that is being USE'd\n#\n#\n use_regex = \"(?i)(?:^|;)\\s*USE(?:\\s+|(?:(?:\\s*,\\s*(?:NON_)?INTRINSIC)?\\s*::))\\s*(\\w+)\"\n\n\n# The INCLUDE statement regex matches the following:\n#\n# INCLUDE 'some_Text'\n# INCLUDE \"some_Text\"\n# INCLUDE \"some_Text\" ; INCLUDE \"some_Text\"\n# INCLUDE kind_\"some_Text\"\n# INCLUDE kind_'some_Text\"\n#\n# where some_Text can include any alphanumeric and/or special character\n# as defined by the Fortran 2003 standard.\n#\n# Limitations:\n#\n# -- The Fortran standard dictates that a \" or ' in the INCLUDE'd\n# string must be represented as a \"\" or '', if the quotes that wrap\n# the entire string are either a ' or \", respectively. While the\n# regular expression below can detect the ' or \" characters just fine,\n# the scanning logic, presently is unable to detect them and reduce\n# them to a single instance. This probably isn't an issue since,\n# in practice, ' or \" are not generally used in filenames.\n#\n# -- This regex will not properly deal with multiple INCLUDE statements\n# when the entire line has been commented out, ala\n#\n# ! INCLUDE 'some_file' ; INCLUDE 'some_file'\n#\n# In such cases, it will properly ignore the first INCLUDE file,\n# but will actually still pick up the second. Interestingly enough,\n# the regex will properly deal with these cases:\n#\n# INCLUDE 'some_file'\n# INCLUDE 'some_file' !; INCLUDE 'some_file'\n#\n# To get around the above limitation, the FORTRAN programmer could\n# simply comment each INCLUDE statement separately, like this\n#\n# ! INCLUDE 'some_file' !; INCLUDE 'some_file'\n#\n# The way I see it, the only way to get around this limitation would\n# be to modify the scanning logic to replace the calls to re.findall\n# with a custom loop that processes each line separately, throwing\n# away fully commented out lines before attempting to match against\n# the INCLUDE syntax.\n#\n# Here is a breakdown of the regex:\n#\n# (?i) : regex is case insensitive\n# (?: : begin a non-saving group that matches the following:\n# ^ : either the start of the line\n# | : or\n# ['\">]\\s*; : a semicolon that follows a single quote,\n# double quote or greater than symbol (with any\n# amount of whitespace in between). This will\n# allow the regex to match multiple INCLUDE\n# statements per line (although it also requires\n# the positive lookahead assertion that is\n# used below). It will even properly deal with\n# (i.e. ignore) cases in which the additional\n# INCLUDES are part of an in-line comment, ala\n# \" INCLUDE 'someFile' ! ; INCLUDE 'someFile2' \"\n# ) : end of non-saving group\n# \\s* : any amount of white space\n# INCLUDE : match the string INCLUDE, case insensitive\n# \\s+ : match one or more white space characters\n# (?\\w+_)? : match the optional \"kind-param _\" prefix allowed by the standard\n# [<\"'] : match the include delimiter - an apostrophe, double quote, or less than symbol\n# (.+?) : match one or more characters that make up\n# the included path and file name and save it\n# in a group. The Fortran standard allows for\n# any non-control character to be used. The dot\n# operator will pick up any character, including\n# control codes, but I can't conceive of anyone\n# putting control codes in their file names.\n# The question mark indicates it is non-greedy so\n# that regex will match only up to the next quote,\n# double quote, or greater than symbol\n# (?=[\"'>]) : positive lookahead assertion to match the include\n# delimiter - an apostrophe, double quote, or\n# greater than symbol. This level of complexity\n# is required so that the include delimiter is\n# not consumed by the match, thus allowing the\n# sub-regex discussed above to uniquely match a\n# set of semicolon-separated INCLUDE statements\n# (as allowed by the F2003 standard)\n\n include_regex = \"\"\"(?i)(?:^|['\">]\\s*;)\\s*INCLUDE\\s+(?:\\w+_)?[<\"'](.+?)(?=[\"'>])\"\"\"\n\n# The MODULE statement regex finds module definitions by matching\n# the following:\n#\n# MODULE module_name\n#\n# but *not* the following:\n#\n# MODULE PROCEDURE procedure_name\n#\n# Here is a breakdown of the regex:\n#\n# (?i) : regex is case insensitive\n# ^\\s* : any amount of white space\n# MODULE : match the string MODULE, case insensitive\n# \\s+ : match one or more white space characters\n# (?!PROCEDURE) : but *don't* match if the next word matches\n# PROCEDURE (negative lookahead assertion),\n# case insensitive\n# (\\w+) : match one or more alphanumeric characters\n# that make up the defined module name and\n# save it in a group\n\n def_regex = \"\"\"(?i)^\\s*MODULE\\s+(?!PROCEDURE)(\\w+)\"\"\"\n\n scanner = F90Scanner(\"FortranScan\",\n \"$FORTRANSUFFIXES\",\n path_variable,\n use_regex,\n include_regex,\n def_regex)\n return scanner", "def _loadThreePartSource(self, sourceFName, sourceLines):\n \"\"\"is a helper for _loadOneSource.\n \"\"\"\n lineno = 1\n for ln in sourceLines:\n lineno += 1\n try:\n stem, pubType, source = ln.split(\"\\t\", 2)\n stem = stem.strip()[-9:]\n self._addPub(stem, source)\n if pubType==\"C\":\n self.confstems[stem] = 1\n except ValueError:\n sys.stderr.write(\"sourcematchers.py: %s (%d): skipping source line: %s\"%(sourceFName,lineno,ln))", "def imports_on_separate_lines(logical_line):\n r\"\"\"Place imports on separate lines.\n\n Okay: import os\\nimport sys\n E401: import sys, os\n\n Okay: from subprocess import Popen, PIPE\n Okay: from myclas import MyClass\n Okay: from foo.bar.yourclass import YourClass\n Okay: import myclass\n Okay: import foo.bar.yourclass\n \"\"\"\n line = logical_line\n if line.startswith('import '):\n found = line.find(',')\n if -1 < found and ';' not in line[:found]:\n yield found, \"E401 multiple imports on one line\"", "def pysourcefiles(self):\n \"\"\"All source files of the actual models Python classes and their\n respective base classes.\"\"\"\n sourcefiles = set()\n for (name, child) in vars(self).items():\n try:\n parents = inspect.getmro(child)\n except AttributeError:\n continue\n for parent in parents:\n try:\n sourcefile = inspect.getfile(parent)\n except TypeError:\n break\n sourcefiles.add(sourcefile)\n return Lines(*sourcefiles)", "function traceSources(node, original) {\n\t\t\tvar i,\n\t\t\t\tsource,\n\t\t\t\tsources;\n\n\t\t\tif (!(node instanceof EffectNode) && !(node instanceof TransformNode)) {\n\t\t\t\treturn false;\n\t\t\t}\n\n\t\t\tsources = node.sources;\n\n\t\t\tfor (i in sources) {\n\t\t\t\tif (sources.hasOwnProperty(i)) {\n\t\t\t\t\tsource = sources[i];\n\n\t\t\t\t\tif (source === original || traceSources(source, original)) {\n\t\t\t\t\t\treturn true;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn false;\n\t\t}", "def scan_module(self, pkgpath, modpath, node):\n \"\"\"Scans a module, collecting possible origins for all names, assuming\n names can only become bound to values in other modules by import.\"\"\"\n\n def scan_imports(node):\n if node_type(node) == 'Import':\n for binding in node.names:\n name, asname = binding.name, binding.asname\n if asname:\n self.add(modpath, asname, name)\n else:\n top_name = name.split('.')[0]\n self.add(modpath, top_name, top_name)\n self.add_package_origins(name)\n\n elif node_type(node) == 'ImportFrom':\n frompath = resolve_frompath(pkgpath, node.module, node.level)\n for binding in node.names:\n name, asname = binding.name, binding.asname\n if name == '*':\n for name in self.get_star_names(frompath):\n self.add(modpath, name, frompath + '.' + name)\n self.add_package_origins(frompath)\n else:\n self.add(modpath, asname or name, frompath + '.' + name)\n self.add_package_origins(frompath + '.' + name)\n\n else:\n for_each_child(node, scan_imports)\n\n for_each_child(node, scan_imports)", "def _loadTwoPartSource(self, sourceFName, sourceLines):\n \"\"\"is a helper for _loadOneSource.\n \"\"\"\n lineno = 1\n enterInConfstems = 0\n if sourceFName.find(\"conferences\")!=-1:\n enterInConfstems = 1\n for ln in sourceLines:\n lineno += 1\n try:\n stem, source = ln.split(\"\\t\", 1)\n stem = stem.strip()[-9:]\n if not source.strip():\n sys.stderr.write(\"sourcematchers.py: warning: skipping entry %s in file %s\\n\"%(ln.strip(),sourceFName))\n continue\n self._addPub(stem, source)\n if enterInConfstems:\n self.confstems[stem] = 1\n except ValueError:\n sys.stderr.write(\"sourcematchers.py: %s (%d): skipping source line: %s\"%(sourceFName,lineno,ln))" ]
[ 0.8276940584182739, 0.7044193744659424, 0.6995483040809631, 0.6984575390815735, 0.6789594888687134, 0.6653650999069214, 0.660295844078064, 0.6600147485733032, 0.659911572933197, 0.6583966612815857, 0.6572274565696716, 0.6546871662139893 ]
This function should be called from the console, when it starts. Some transformers are not allowed in the console and they could have been loaded prior to the console being activated. We effectively remove them and print an information message specific to that transformer as written in the transformer module.
def remove_not_allowed_in_console(): '''This function should be called from the console, when it starts. Some transformers are not allowed in the console and they could have been loaded prior to the console being activated. We effectively remove them and print an information message specific to that transformer as written in the transformer module. ''' not_allowed_in_console = [] if CONSOLE_ACTIVE: for name in transformers: tr_module = import_transformer(name) if hasattr(tr_module, "NO_CONSOLE"): not_allowed_in_console.append((name, tr_module)) for name, tr_module in not_allowed_in_console: print(tr_module.NO_CONSOLE) # Note: we do not remove them, so as to avoid seeing the # information message displayed again if an attempt is # made to re-import them from a console instruction. transformers[name] = NullTransformer()
[ "private static synchronized void addTransformer() {\n if (detailedTransformTrace && tc.isEntryEnabled())\n Tr.entry(tc, \"addTransformer\");\n\n if (registeredTransformer == null && instrumentation != null) {\n registeredTransformer = new LibertyRuntimeTransformer();\n instrumentation.addTransformer(registeredTransformer, true);\n }\n\n if (detailedTransformTrace && tc.isEntryEnabled())\n Tr.exit(tc, \"addTransformer\");\n }", "function (isJson, transformer) {\n printHelpOnExit = false;\n return function (value) {\n if (isJson) {\n console.log(JSON.stringify(value, null, 2));\n } else if (typeof transformer === 'function') {\n console.log(transformer(value));\n } else {\n console.log(value);\n }\n if (theUfo) theUfo.disconnect();\n };\n}", "private static synchronized void addTransformer() {\n if (detailedTransformTrace && tc.isEntryEnabled())\n Tr.entry(tc, \"addTransformer\");\n\n if (registeredTransformer == null && instrumentation != null) {\n registeredTransformer = new LibertyJava8WorkaroundRuntimeTransformer();\n instrumentation.addTransformer(registeredTransformer, false);\n }\n\n if (detailedTransformTrace && tc.isEntryEnabled())\n Tr.exit(tc, \"addTransformer\");\n }", "def import_transformer(name):\n '''If needed, import a transformer, and adds it to the globally known dict\n The code inside a module where a transformer is defined should be\n standard Python code, which does not need any transformation.\n So, we disable the import hook, and let the normal module import\n do its job - which is faster and likely more reliable than our\n custom method.\n '''\n if name in transformers:\n return transformers[name]\n\n # We are adding a transformer built from normal/standard Python code.\n # As we are not performing transformations, we temporarily disable\n # our import hook, both to avoid potential problems AND because we\n # found that this resulted in much faster code.\n hook = sys.meta_path[0]\n sys.meta_path = sys.meta_path[1:]\n try:\n transformers[name] = __import__(name)\n # Some transformers are not allowed in the console.\n # If an attempt is made to activate one of them in the console,\n # we replace it by a transformer that does nothing and print a\n # message specific to that transformer as written in its module.\n if CONSOLE_ACTIVE:\n if hasattr(transformers[name], \"NO_CONSOLE\"):\n print(transformers[name].NO_CONSOLE)\n transformers[name] = NullTransformer()\n except ImportError:\n sys.stderr.write(\"Warning: Import Error in add_transformers: %s not found\\n\" % name)\n transformers[name] = NullTransformer()\n except Exception as e:\n sys.stderr.write(\"Unexpected exception in transforms.import_transformer%s\\n \" %\n e.__class__.__name__)\n finally:\n sys.meta_path.insert(0, hook) # restore import hook\n\n return transformers[name]", "def cli(ctx, stage):\n \"\"\"Show transformer rules\"\"\"\n if not ctx.bubble:\n ctx.say_yellow('There is no bubble present, ' +\n 'will not show any transformer rules')\n raise click.Abort()\n\n path = ctx.home + '/'\n RULES = None\n ctx.say('Stage:'+stage, verbosity=10)\n if stage in STAGES:\n if stage in ctx.cfg.CFG:\n STAGE = ctx.cfg.CFG[stage]\n ctx.say('Stage found:', stuff=STAGE,verbosity=100)\n\n\n if 'TRANSFORM' in STAGE:\n TRANSFORM = STAGE.TRANSFORM\n ctx.say('Transform found:', stuff=TRANSFORM, verbosity=100)\n\n if 'RULES' in TRANSFORM:\n RULES = TRANSFORM.RULES\n ctx.say('Rules found:', stuff=RULES, verbosity=100)\n\n\n if not RULES:\n ctx.say_red('There is no TRANSFORM.RULES in stage:' + stage)\n ctx.say_yellow('please check configuration in ' +\n ctx.home + '/config/config.yaml')\n raise click.Abort()\n\n if type(RULES) == str and RULES.endswith('.bubble'):\n ctx.say('loading rules',verbosity=10)\n rules = get_bubble(ctx, path + RULES)\n rule_type = 'bubble'\n transformer = Transformer(rules=rules,\n rule_type=rule_type,\n bubble_path=path,\n verbose=ctx.get_verbose())\n rules = transformer._rules.get_rules()\n ctx.say('current number of rules:' + str(len(rules)),\n verbosity=1)\n for r in rules:\n ctx.say('rule: ' + str(r), verbosity=1)\n\n ctx.gbc.say('rules: ', stuff=rules, verbosity=100)\n else:\n ctx.say('no rules!')\n\n return True", "function() {\n\t\t\t\tvar outputDiv = document.getElementsByClassName(\"gcli-output\")[0]; //$NON-NLS-0$\n\t\t\t\twhile (outputDiv.hasChildNodes()) {\n\t\t\t\t\toutputDiv.removeChild(outputDiv.lastChild);\n\t\t\t\t}\t\t\t\t\n\t\t\t\tthis.output(i18nUtil.formatMessage(messages[\"AvailableCmdsType\"], \"<b>help</b>\")); //$NON-NLS-0$\n\t\t\t}", "def debug_transform(ctx, transform, params, value, fields):\n \"\"\"Runs Canari local transforms in a terminal-friendly fashion.\"\"\"\n from canari.commands.debug_transform import debug_transform\n debug_transform(transform, value, fields, params, ctx.project, ctx.config)", "def _info_transformers(fields, transformers):\n \"\"\"Utility function to determine transformer functions for variants\n fields.\"\"\"\n if transformers is None:\n # no transformers specified by user\n transformers = dict()\n for f in fields:\n if f not in transformers:\n transformers[f] = config.DEFAULT_TRANSFORMER.get(f, None)\n return tuple(transformers[f] for f in fields)", "protected void onRemoved() {\n List<TransformerChangeListener> listeners = getAllListeners();\n for (TransformerChangeListener listener : listeners) {\n listener.onOutputChanged(this, new LinkedList<MutableInputColumn<?>>());\n listener.onRemove(this);\n }\n }", "public void clear() {\n synchronized (extensions) { // we synchronize just to guard unnamedMerged\n transformerRegistry = TransformerRegistry.Factory.create();\n extensions.clear();\n reverseMap.clear();\n subsystemsInfo.clear();\n }\n }", "def extract_transformers_from_source(source):\n '''Scan a source for lines of the form\n from __experimental__ import transformer1 [,...]\n identifying transformers to be used. Such line is passed to the\n add_transformer function, after which it is removed from the\n code to be executed.\n '''\n lines = source.split('\\n')\n linenumbers = []\n for number, line in enumerate(lines):\n if FROM_EXPERIMENTAL.match(line):\n add_transformers(line)\n linenumbers.insert(0, number)\n\n # drop the \"fake\" import from the source code\n for number in linenumbers:\n del lines[number]\n return '\\n'.join(lines)", "def add_transformers(line):\n '''Extract the transformers names from a line of code of the form\n from __experimental__ import transformer1 [,...]\n and adds them to the globally known dict\n '''\n assert FROM_EXPERIMENTAL.match(line)\n\n line = FROM_EXPERIMENTAL.sub(' ', line)\n # we now have: \" transformer1 [,...]\"\n line = line.split(\"#\")[0] # remove any end of line comments\n # and insert each transformer as an item in a list\n for trans in line.replace(' ', '').split(','):\n import_transformer(trans)" ]
[ 0.7023383378982544, 0.6878604292869568, 0.6830065846443176, 0.6781601905822754, 0.6752806901931763, 0.6651531457901001, 0.6632915139198303, 0.6570521593093872, 0.6551002860069275, 0.6537941694259644, 0.6495481133460999, 0.6486765742301941 ]
Used to convert the source code, making use of known transformers. "transformers" are modules which must contain a function transform_source(source) which returns a tranformed source. Some transformers (for example, those found in the standard library module lib2to3) cannot cope with non-standard syntax; as a result, they may fail during a first attempt. We keep track of all failing transformers and keep retrying them until either they all succeeded or a fixed set of them fails twice in a row.
def transform(source): '''Used to convert the source code, making use of known transformers. "transformers" are modules which must contain a function transform_source(source) which returns a tranformed source. Some transformers (for example, those found in the standard library module lib2to3) cannot cope with non-standard syntax; as a result, they may fail during a first attempt. We keep track of all failing transformers and keep retrying them until either they all succeeded or a fixed set of them fails twice in a row. ''' source = extract_transformers_from_source(source) # Some transformer fail when multiple non-Python constructs # are present. So, we loop multiple times keeping track of # which transformations have been unsuccessfully performed. not_done = transformers while True: failed = {} for name in not_done: tr_module = import_transformer(name) try: source = tr_module.transform_source(source) except Exception as e: failed[name] = tr_module # from traceback import print_exc # print("Unexpected exception in transforms.transform", # e.__class__.__name__) # print_exc() if not failed: break # Insanity is doing the same Tting over and overaAgain and # expecting different results ... # If the exact same set of transformations are not performed # twice in a row, there is no point in trying out a third time. if failed == not_done: print("Warning: the following transforms could not be done:") for key in failed: print(key) break not_done = failed # attempt another pass return source
[ "def extract_transformers_from_source(source):\n '''Scan a source for lines of the form\n from __experimental__ import transformer1 [,...]\n identifying transformers to be used. Such line is passed to the\n add_transformer function, after which it is removed from the\n code to be executed.\n '''\n lines = source.split('\\n')\n linenumbers = []\n for number, line in enumerate(lines):\n if FROM_EXPERIMENTAL.match(line):\n add_transformers(line)\n linenumbers.insert(0, number)\n\n # drop the \"fake\" import from the source code\n for number in linenumbers:\n del lines[number]\n return '\\n'.join(lines)", "def source_to_code(self, nodes, path, *, _optimize=-1):\n \"\"\"* Convert the current source to ast \n * Apply ast transformers.\n * Compile the code.\"\"\"\n if not isinstance(nodes, ast.Module):\n nodes = ast.parse(nodes, self.path)\n if self._markdown_docstring:\n nodes = update_docstring(nodes)\n return super().source_to_code(\n ast.fix_missing_locations(self.visit(nodes)), path, _optimize=_optimize\n )", "def gen_source(self, ast, name, customize, is_lambda=False, returnNone=False):\n \"\"\"convert SyntaxTree to Python source code\"\"\"\n\n rn = self.return_none\n self.return_none = returnNone\n old_name = self.name\n self.name = name\n # if code would be empty, append 'pass'\n if len(ast) == 0:\n self.println(self.indent, 'pass')\n else:\n self.customize(customize)\n if is_lambda:\n self.write(self.traverse(ast, is_lambda=is_lambda))\n else:\n self.text = self.traverse(ast, is_lambda=is_lambda)\n self.println(self.text)\n self.name = old_name\n self.return_none = rn", "def refactor(source, fixer_names, ignore=None, filename=''):\n \"\"\"Return refactored code using lib2to3.\n\n Skip if ignore string is produced in the refactored code.\n\n \"\"\"\n check_lib2to3()\n from lib2to3 import pgen2\n try:\n new_text = refactor_with_2to3(source,\n fixer_names=fixer_names,\n filename=filename)\n except (pgen2.parse.ParseError,\n SyntaxError,\n UnicodeDecodeError,\n UnicodeEncodeError):\n return source\n\n if ignore:\n if ignore in new_text and ignore not in source:\n return source\n\n return new_text", "def transform(self, code, *, name=None, filename=None):\n \"\"\"Transform a codetransformer.Code object applying the transforms.\n\n Parameters\n ----------\n code : Code\n The code object to transform.\n name : str, optional\n The new name for this code object.\n filename : str, optional\n The new filename for this code object.\n\n Returns\n -------\n new_code : Code\n The transformed code object.\n \"\"\"\n # reverse lookups from for constants and names.\n reversed_consts = {}\n reversed_names = {}\n reversed_varnames = {}\n for instr in code:\n if isinstance(instr, LOAD_CONST):\n reversed_consts[instr] = instr.arg\n if instr.uses_name:\n reversed_names[instr] = instr.arg\n if isinstance(instr, (STORE_FAST, LOAD_FAST)):\n reversed_varnames[instr] = instr.arg\n\n instrs, consts = tuple(zip(*reversed_consts.items())) or ((), ())\n for instr, const in zip(instrs, self.transform_consts(consts)):\n instr.arg = const\n\n instrs, names = tuple(zip(*reversed_names.items())) or ((), ())\n for instr, name_ in zip(instrs, self.transform_names(names)):\n instr.arg = name_\n\n instrs, varnames = tuple(zip(*reversed_varnames.items())) or ((), ())\n for instr, varname in zip(instrs, self.transform_varnames(varnames)):\n instr.arg = varname\n\n with self._new_context(code):\n post_transform = self.patterndispatcher(code)\n\n return Code(\n post_transform,\n code.argnames,\n cellvars=self.transform_cellvars(code.cellvars),\n freevars=self.transform_freevars(code.freevars),\n name=name if name is not None else code.name,\n filename=filename if filename is not None else code.filename,\n firstlineno=code.firstlineno,\n lnotab=_new_lnotab(post_transform, code.lnotab),\n flags=code.flags,\n )", "def refactor_with_2to3(source_text, fixer_names, filename=''):\n \"\"\"Use lib2to3 to refactor the source.\n\n Return the refactored source code.\n\n \"\"\"\n from lib2to3.refactor import RefactoringTool\n fixers = ['lib2to3.fixes.fix_' + name for name in fixer_names]\n tool = RefactoringTool(fixer_names=fixers, explicit=fixers)\n\n from lib2to3.pgen2 import tokenize as lib2to3_tokenize\n try:\n # The name parameter is necessary particularly for the \"import\" fixer.\n return unicode(tool.refactor_string(source_text, name=filename))\n except lib2to3_tokenize.TokenError:\n return source_text", "def to_source(node, indent_with=' ' * 4, add_line_information=False,\n pretty_string=pretty_string, pretty_source=pretty_source):\n \"\"\"This function can convert a node tree back into python sourcecode.\n This is useful for debugging purposes, especially if you're dealing with\n custom asts not generated by python itself.\n\n It could be that the sourcecode is evaluable when the AST itself is not\n compilable / evaluable. The reason for this is that the AST contains some\n more data than regular sourcecode does, which is dropped during\n conversion.\n\n Each level of indentation is replaced with `indent_with`. Per default this\n parameter is equal to four spaces as suggested by PEP 8, but it might be\n adjusted to match the application's styleguide.\n\n If `add_line_information` is set to `True` comments for the line numbers\n of the nodes are added to the output. This can be used to spot wrong line\n number information of statement nodes.\n\n \"\"\"\n generator = SourceGenerator(indent_with, add_line_information,\n pretty_string)\n generator.visit(node)\n generator.result.append('\\n')\n if set(generator.result[0]) == set('\\n'):\n generator.result[0] = ''\n return pretty_source(generator.result)", "def convert(srctree, dsttree=dsttree, readonly=False, dumpall=False,\n ignore_exceptions=False, fullcomp=False):\n \"\"\"Walk the srctree, and convert/copy all python files\n into the dsttree\n\n \"\"\"\n\n if fullcomp:\n allow_ast_comparison()\n\n parse_file = code_to_ast.parse_file\n find_py_files = code_to_ast.find_py_files\n srctree = os.path.normpath(srctree)\n\n if not readonly:\n dsttree = os.path.normpath(dsttree)\n logging.info('')\n logging.info('Trashing ' + dsttree)\n shutil.rmtree(dsttree, True)\n\n unknown_src_nodes = set()\n unknown_dst_nodes = set()\n badfiles = set()\n broken = []\n\n oldpath = None\n\n allfiles = find_py_files(srctree, None if readonly else dsttree)\n for srcpath, fname in allfiles:\n # Create destination directory\n if not readonly and srcpath != oldpath:\n oldpath = srcpath\n if srcpath >= srctree:\n dstpath = srcpath.replace(srctree, dsttree, 1)\n if not dstpath.startswith(dsttree):\n raise ValueError(\"%s not a subdirectory of %s\" %\n (dstpath, dsttree))\n else:\n assert srctree.startswith(srcpath)\n dstpath = dsttree\n os.makedirs(dstpath)\n\n srcfname = os.path.join(srcpath, fname)\n logging.info('Converting %s' % srcfname)\n try:\n srcast = parse_file(srcfname)\n except SyntaxError:\n badfiles.add(srcfname)\n continue\n\n try:\n dsttxt = to_source(srcast)\n except:\n if not ignore_exceptions:\n raise\n dsttxt = ''\n\n if not readonly:\n dstfname = os.path.join(dstpath, fname)\n try:\n with open(dstfname, 'wb') as f:\n f.write(out_prep(dsttxt))\n except UnicodeEncodeError:\n badfiles.add(dstfname)\n\n # As a sanity check, make sure that ASTs themselves\n # round-trip OK\n try:\n dstast = ast.parse(dsttxt) if readonly else parse_file(dstfname)\n except SyntaxError:\n dstast = []\n if fullcomp:\n unknown_src_nodes.update(strip_tree(srcast))\n unknown_dst_nodes.update(strip_tree(dstast))\n bad = srcast != dstast\n else:\n bad = not fast_compare(srcast, dstast)\n if dumpall or bad:\n srcdump = dump_tree(srcast)\n dstdump = dump_tree(dstast)\n logging.warning(' calculating dump -- %s' %\n ('bad' if bad else 'OK'))\n if bad:\n broken.append(srcfname)\n if dumpall or bad:\n if not readonly:\n try:\n with open(dstfname[:-3] + '.srcdmp', 'wb') as f:\n f.write(out_prep(srcdump))\n except UnicodeEncodeError:\n badfiles.add(dstfname[:-3] + '.srcdmp')\n try:\n with open(dstfname[:-3] + '.dstdmp', 'wb') as f:\n f.write(out_prep(dstdump))\n except UnicodeEncodeError:\n badfiles.add(dstfname[:-3] + '.dstdmp')\n elif dumpall:\n sys.stdout.write('\\n\\nAST:\\n\\n ')\n sys.stdout.write(srcdump.replace('\\n', '\\n '))\n sys.stdout.write('\\n\\nDecompile:\\n\\n ')\n sys.stdout.write(dsttxt.replace('\\n', '\\n '))\n sys.stdout.write('\\n\\nNew AST:\\n\\n ')\n sys.stdout.write('(same as old)' if dstdump == srcdump\n else dstdump.replace('\\n', '\\n '))\n sys.stdout.write('\\n')\n\n if badfiles:\n logging.warning('\\nFiles not processed due to syntax errors:')\n for fname in sorted(badfiles):\n logging.warning(' %s' % fname)\n if broken:\n logging.warning('\\nFiles failed to round-trip to AST:')\n for srcfname in broken:\n logging.warning(' %s' % srcfname)\n\n ok_to_strip = 'col_offset _precedence _use_parens lineno _p_op _pp'\n ok_to_strip = set(ok_to_strip.split())\n bad_nodes = (unknown_dst_nodes | unknown_src_nodes) - ok_to_strip\n if bad_nodes:\n logging.error('\\nERROR -- UNKNOWN NODES STRIPPED: %s' % bad_nodes)\n logging.info('\\n')\n return broken", "def main(conversion_type, input_pyc, output_pyc):\n \"\"\"Convert Python bytecode from one version to another.\n\n INPUT_PYC contains the input bytecode path name\n OUTPUT_PYC contians the output bytecode path name if supplied\n The --conversion type option specifies what conversion to do.\n\n Note: there are a very limited set of conversions currently supported.\n Help out and write more!\"\"\"\n\n shortname = osp.basename(input_pyc)\n if shortname.endswith('.pyc'):\n shortname = shortname[:-4]\n src_version = conversion_to_version(conversion_type, is_dest=False)\n dest_version = conversion_to_version(conversion_type, is_dest=True)\n if output_pyc is None:\n output_pyc = \"%s-%s.pyc\" % (shortname, dest_version)\n\n if conversion_type in UPWARD_COMPATABLE:\n copy_magic_into_pyc(input_pyc, output_pyc, src_version, dest_version)\n return\n temp_asm = NamedTemporaryFile('w', suffix='.pyasm', prefix=shortname, delete=False)\n (filename, co, version,\n timestamp, magic_int) = disassemble_file(input_pyc, temp_asm, asm_format=True)\n temp_asm.close()\n assert version == float(src_version), (\n \"Need Python %s bytecode; got bytecode for version %s\" %\n (src_version, version))\n asm = asm_file(temp_asm.name)\n new_asm = transform_asm(asm, conversion_type, src_version, dest_version)\n os.unlink(temp_asm.name)\n write_pycfile(output_pyc, new_asm)", "def fix_2to3(source,\n aggressive=True, select=None, ignore=None, filename='',\n where='global', verbose=False):\n \"\"\"Fix various deprecated code (via lib2to3).\"\"\"\n if not aggressive:\n return source\n\n select = select or []\n ignore = ignore or []\n\n return refactor(source,\n code_to_2to3(select=select,\n ignore=ignore,\n where=where,\n verbose=verbose),\n filename=filename)", "def convert(source, to, format=None, extra_args=(), encoding='utf-8'):\n \"\"\"Convert given `source` from `format` `to` another.\n\n `source` may be either a file path or a string to be converted.\n It's possible to pass `extra_args` if needed. In case `format` is not\n provided, it will try to invert the format based on given `source`.\n\n Raises OSError if pandoc is not found! Make sure it has been installed and\n is available at path.\n\n \"\"\"\n return _convert(\n _read_file, _process_file,\n source, to,\n format, extra_args,\n encoding=encoding)", "def transform_sources(self, sources, with_string=False):\n \"\"\"Get the defintions of needed strings and functions\n after replacement.\n \"\"\"\n modules = {}\n updater = partial(\n self.replace_source, modules=modules, prefix='string_')\n for filename in sources:\n updated = update_func_body(sources[filename], updater)\n sources[filename] = EXTERN_AND_SEG + updated\n logging.debug('modules: %s', modules)\n return sources, self.build_funcs(modules)" ]
[ 0.7585968971252441, 0.7482191920280457, 0.7213826179504395, 0.7133317589759827, 0.7072563767433167, 0.7071275115013123, 0.7033327221870422, 0.7027499079704285, 0.7023561596870422, 0.7019122242927551, 0.7003361582756042, 0.7002484202384949 ]
Match all requests/responses that satisfy the following conditions: * An Admin App; i.e. the path is something like /admin/some_app/ * The ``include_flag`` is not in the response's content
def _match(self, request, response): """Match all requests/responses that satisfy the following conditions: * An Admin App; i.e. the path is something like /admin/some_app/ * The ``include_flag`` is not in the response's content """ is_html = 'text/html' in response.get('Content-Type', '') if is_html and hasattr(response, 'rendered_content'): correct_path = PATH_MATCHER.match(request.path) is not None not_included = self.include_flag not in response.rendered_content return correct_path and not_included return False
[ "def _is_request_in_include_path(self, request):\n \"\"\"Check if the request path is in the `_include_paths` list.\n\n If no specific include paths are given then we assume that\n authentication is required for all paths.\n\n \"\"\"\n if self._include_paths:\n for path in self._include_paths:\n if request.path.startswith(path):\n return True\n return False\n else:\n return True", "def match_path_to_api_path(path_definitions, target_path, base_path='',\n context=None):\n \"\"\"\n Match a request or response path to one of the api paths.\n\n Anything other than exactly one match is an error condition.\n \"\"\"\n if context is None:\n context = {}\n assert isinstance(context, collections.Mapping)\n if target_path.startswith(base_path):\n # Convert all of the api paths into Path instances for easier regex\n # matching.\n normalized_target_path = re.sub(NORMALIZE_SLASH_REGEX, '/',\n target_path)\n matching_api_paths = list()\n matching_api_paths_regex = list()\n for p, v in path_definitions.items():\n # Doing this to help with case where we might have base_path\n # being just /, and then the path starts with / as well.\n full_path = re.sub(NORMALIZE_SLASH_REGEX, '/', base_path + p)\n r = path_to_regex(\n api_path=full_path,\n path_parameters=extract_path_parameters(v),\n operation_parameters=extract_operation_parameters(v),\n context=context,\n )\n if full_path == normalized_target_path:\n matching_api_paths.append(p)\n elif r.match(normalized_target_path):\n matching_api_paths_regex.\\\n append((p, r.match(normalized_target_path)))\n\n # Keep it consistent with the previous behavior\n target_path = target_path[len(base_path):]\n else:\n matching_api_paths = []\n matching_api_paths_regex = []\n\n if not matching_api_paths and not matching_api_paths_regex:\n fstr = MESSAGES['path']['no_matching_paths_found'].format(target_path)\n raise LookupError(fstr)\n elif len(matching_api_paths) == 1:\n return matching_api_paths[0]\n elif len(matching_api_paths) > 1:\n raise MultiplePathsFound(\n MESSAGES['path']['multiple_paths_found'].format(\n target_path, [v[0] for v in matching_api_paths],\n )\n )\n elif len(matching_api_paths_regex) == 1:\n return matching_api_paths_regex[0][0]\n elif len(matching_api_paths_regex) > 1:\n # TODO: This area needs improved logic.\n # We check to see if any of the matched paths is longers than\n # the others. If so, we *assume* it is the correct match. This is\n # going to be prone to false positives. in certain cases.\n matches_by_path_size = collections.defaultdict(list)\n for path, match in matching_api_paths_regex:\n matches_by_path_size[len(path)].append(path)\n longest_match = max(matches_by_path_size.keys())\n if len(matches_by_path_size[longest_match]) == 1:\n return matches_by_path_size[longest_match][0]\n raise MultiplePathsFound(\n MESSAGES['path']['multiple_paths_found'].format(\n target_path, [v[0] for v in matching_api_paths_regex],\n )\n )\n else:\n return matching_api_paths_regex[0][0]", "def match(self, environ):\n ''' Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). '''\n verb = environ['REQUEST_METHOD'].upper()\n path = environ['PATH_INFO'] or '/'\n target = None\n if verb == 'HEAD':\n methods = ['PROXY', verb, 'GET', 'ANY']\n else:\n methods = ['PROXY', verb, 'ANY']\n\n for method in methods:\n if method in self.static and path in self.static[method]:\n target, getargs = self.static[method][path]\n return target, getargs(path) if getargs else {}\n elif method in self.dyna_regexes:\n for combined, rules in self.dyna_regexes[method]:\n match = combined(path)\n if match:\n target, getargs = rules[match.lastindex - 1]\n return target, getargs(path) if getargs else {}\n\n # No matching route found. Collect alternative methods for 405 response\n allowed = set([])\n nocheck = set(methods)\n for method in set(self.static) - nocheck:\n if path in self.static[method]:\n allowed.add(verb)\n for method in set(self.dyna_regexes) - allowed - nocheck:\n for combined, rules in self.dyna_regexes[method]:\n match = combined(path)\n if match:\n allowed.add(method)\n if allowed:\n allow_header = \",\".join(sorted(allowed))\n raise HTTPError(405, \"Method not allowed.\", Allow=allow_header)\n\n # No matching route and no alternative method found. We give up\n raise HTTPError(404, \"Not found: \" + repr(path))", "def match(path)\n if match = @regexp.match(path)\n params = {}\n @named.each_with_index { |name, i| params[name] = match[i + 1] } if @type == :handlers\n {\n path: path,\n title: self.flags.fetch(:title){nil},\n params: params,\n route: self\n }\n else\n false\n end\n end", "def process_response(self, request, response):\n \"\"\"\n Add the header to prevent sites under *.herokuapp.com from being indexed.\n \"\"\"\n http_host = request.get_host()\n\n if http_host and 'herokuapp' in http_host:\n response['X-Robots-Tag'] = 'noindex, nofollow'\n\n return response", "def _is_match(self, response, answer):\n \"\"\"Does the response match the answer \"\"\"\n\n def compare_conditions(droppable_id, spatial_units, response_conditions):\n \"\"\"Compare response coordinates with spatial units for droppable_id\"\"\"\n coordinate_match = True\n for coordinate in response_conditions['coordinate_conditions']['include'][droppable_id]:\n answer_match = False\n for spatial_unit in spatial_units:\n if (coordinate['containerId'] == spatial_unit['containerId'] and\n coordinate['coordinate'] in spatial_unit['spatialUnit']):\n answer_match = True\n break\n coordinate_match = coordinate_match and answer_match\n return coordinate_match\n\n # Did the consumer application already do the work for us?\n if response.has_zone_conditions():\n return bool(response.get_zone_conditions() == answer.get_zone_conditions())\n\n answer_conditions = self._get_conditions_map(answer)\n response_conditions = self._get_conditions_map(response)\n\n # Check to see if the lists of droppables used are the same:\n if set(answer_conditions['spatial_unit_conditions']['include']) != set(response_conditions['coordinate_conditions']['include']):\n return False\n\n # Compare included answer spatial unit areas to response coordinates\n for droppable_id, spatial_units in answer_conditions['spatial_unit_conditions']['include'].items():\n # Do the number of defined include conditions match:\n if len(spatial_units) != len(response_conditions['coordinate_conditions']['include'][droppable_id]):\n return False\n if not compare_conditions(droppable_id, spatial_units, response_conditions):\n return False\n\n # Compare excluded answer spatial unit areas to response coordinates\n for droppable_id, spatial_units in answer_conditions['spatial_unit_conditions']['exclude'].items():\n if compare_conditions(droppable_id, spatial_units, response_conditions):\n return False\n return True", "def is_matching_rule(self, request):\n\t\t\"\"\"Check according to the rules defined in the class docstring.\"\"\"\n\t\t# First, if in DEBUG mode and with django-debug-toolbar, we skip\n\t\t# this entire process.\n\t\tif settings.DEBUG and request.path.startswith(\"/__debug__\"):\n\t\t\treturn True\n\n\t\t# Second we check against matches\n\t\tmatch = resolve(request.path, getattr(request, \"urlconf\", settings.ROOT_URLCONF))\n\t\tif \"({0})\".format(match.app_name) in EXEMPT:\n\t\t\treturn True\n\n\t\tif \"[{0}]\".format(match.namespace) in EXEMPT:\n\t\t\treturn True\n\n\t\tif \"{0}:{1}\".format(match.namespace, match.url_name) in EXEMPT:\n\t\t\treturn True\n\n\t\tif match.url_name in EXEMPT:\n\t\t\treturn True\n\n\t\t# Third, we check wildcards:\n\t\tfor exempt in [x for x in EXEMPT if x.startswith(\"fn:\")]:\n\t\t\texempt = exempt.replace(\"fn:\", \"\")\n\t\t\tif fnmatch.fnmatch(request.path, exempt):\n\t\t\t\treturn True\n\n\t\treturn False", "def is_cloudflare_challenge(response):\n \"\"\"Test if the given response contains the cloudflare's anti-bot protection\"\"\"\n\n return (\n response.status == 503\n and response.headers.get('Server', '').startswith(b'cloudflare')\n and 'jschl_vc' in response.text\n and 'jschl_answer' in response.text\n )", "def glob_include?(enum, e)\n entry = Pathutil.new(site.in_source_dir).join(e)\n enum.any? do |exp|\n # Users who send a Regexp knows what they want to\n # exclude, so let them send a Regexp to exclude files,\n # we will not bother caring if it works or not, it's\n # on them at this point.\n\n if exp.is_a?(Regexp)\n entry =~ exp\n\n else\n item = Pathutil.new(site.in_source_dir).join(exp)\n\n # If it's a directory they want to exclude, AKA\n # ends with a \"/\" then we will go on to check and\n # see if the entry falls within that path and\n # exclude it if that's the case.\n\n if e.end_with?(\"/\")\n entry.in_path?(\n item\n )\n\n else\n File.fnmatch?(item, entry) ||\n entry.to_path.start_with?(\n item\n )\n end\n end\n end\n end", "def match(self, environ):\n ''' Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). '''\n path, targets, urlargs = environ['PATH_INFO'] or '/', None, {}\n if path in self.static:\n targets = self.static[path]\n else:\n for combined, rules in self.dynamic:\n match = combined.match(path)\n if not match: continue\n getargs, targets = rules[match.lastindex - 1]\n urlargs = getargs(path) if getargs else {}\n break\n\n if not targets:\n raise HTTPError(404, \"Not found: \" + repr(environ['PATH_INFO']))\n method = environ['REQUEST_METHOD'].upper()\n if method in targets:\n return targets[method], urlargs\n if method == 'HEAD' and 'GET' in targets:\n return targets['GET'], urlargs\n if 'ANY' in targets:\n return targets['ANY'], urlargs\n allowed = [verb for verb in targets if verb != 'ANY']\n if 'GET' in allowed and 'HEAD' not in allowed:\n allowed.append('HEAD')\n raise HTTPError(405, \"Method not allowed.\",\n header=[('Allow',\",\".join(allowed))])", "def _wrapped_include(arg):\n \"\"\"Convert the old 3-tuple arg for include() into the new format.\n\n The argument \"arg\" should be a tuple with 3 elements:\n (pattern_list, app_namespace, instance_namespace)\n\n Prior to Django 2.0, django.urls.conf.include() accepts 3-tuple arg\n (urlconf, namespace, app_name), but it was droppped in Django 2.0.\n This function is used to convert the older 3-tuple used in horizon code\n into the new format where namespace needs to be passed as the second arg.\n\n For more details, see\n https://docs.djangoproject.com/en/2.0/releases/1.9/#passing-a-3-tuple-or-an-app-name-to-include\n \"\"\"\n pattern_list, app_namespace, instance_namespace = arg\n return include((pattern_list, app_namespace), namespace=instance_namespace)", "def match(self, environ):\n ''' Return a (target, url_agrs) tuple or raise HTTPError(404/405). '''\n targets, urlargs = self._match_path(environ)\n if not targets:\n raise HTTPError(404, \"Not found: \" + repr(environ['PATH_INFO']))\n method = environ['REQUEST_METHOD'].upper()\n if method in targets:\n return targets[method], urlargs\n if method == 'HEAD' and 'GET' in targets:\n return targets['GET'], urlargs\n if 'ANY' in targets:\n return targets['ANY'], urlargs\n allowed = [verb for verb in targets if verb != 'ANY']\n if 'GET' in allowed and 'HEAD' not in allowed:\n allowed.append('HEAD')\n raise HTTPError(405, \"Method not allowed.\",\n header=[('Allow',\",\".join(allowed))])" ]
[ 0.6703752875328064, 0.6700488924980164, 0.6664093136787415, 0.6612303853034973, 0.6594433188438416, 0.654962956905365, 0.6497297883033752, 0.649634599685669, 0.6488224267959595, 0.6488022208213806, 0.6486542224884033, 0.6471434235572815 ]
Read the minified CSS file including STATIC_URL in the references to the sprite images.
def _chosen_css(self): """Read the minified CSS file including STATIC_URL in the references to the sprite images.""" css = render_to_string(self.css_template, {}) for sprite in self.chosen_sprites: # rewrite path to sprites in the css css = css.replace(sprite, settings.STATIC_URL + "img/" + sprite) return css
[ "def static(request):\n \"Shorthand static URLs. In debug mode, the JavaScript is not minified.\"\n static_url = settings.STATIC_URL\n prefix = 'src' if settings.DEBUG else 'min'\n return {\n 'CSS_URL': os.path.join(static_url, 'stylesheets/css'),\n 'IMAGES_URL': os.path.join(static_url, 'images'),\n 'JAVASCRIPT_URL': os.path.join(static_url, 'js', prefix),\n }", "def _do_magic_import(self, rule, p_selectors, p_parents, p_children, scope, media, c_lineno, c_property, c_codestr, code, name):\n \"\"\"\n Implements @import for sprite-maps\n Imports magic sprite map directories\n \"\"\"\n if callable(STATIC_ROOT):\n files = sorted(STATIC_ROOT(name))\n else:\n glob_path = os.path.join(STATIC_ROOT, name)\n files = glob.glob(glob_path)\n files = sorted((file[len(STATIC_ROOT):], None) for file in files)\n\n if files:\n # Build magic context\n map_name = os.path.normpath(\n os.path.dirname(name)).replace('\\\\', '_').replace('/', '_')\n kwargs = {}\n\n def setdefault(var, val):\n _var = '$' + map_name + '-' + var\n if _var in rule[CONTEXT]:\n kwargs[var] = interpolate(rule[CONTEXT][_var], rule)\n else:\n rule[CONTEXT][_var] = val\n kwargs[var] = interpolate(val, rule)\n return rule[CONTEXT][_var]\n\n setdefault(\n 'sprite-base-class', StringValue('.' + map_name + '-sprite'))\n setdefault('sprite-dimensions', BooleanValue(False))\n position = setdefault('position', NumberValue(0, '%'))\n spacing = setdefault('spacing', NumberValue(0))\n repeat = setdefault('repeat', StringValue('no-repeat'))\n names = tuple(os.path.splitext(\n os.path.basename(file))[0] for file, storage in files)\n for n in names:\n setdefault(n + '-position', position)\n setdefault(n + '-spacing', spacing)\n setdefault(n + '-repeat', repeat)\n sprite_map = _sprite_map(name, **kwargs)\n rule[CONTEXT]['$' + map_name + '-' + 'sprites'] = sprite_map\n ret = '''\n @import \"compass/utilities/sprites/base\";\n\n // All sprites should extend this class\n // The %(map_name)s-sprite mixin will do so for you.\n #{$%(map_name)s-sprite-base-class} {\n background: $%(map_name)s-sprites;\n }\n\n // Use this to set the dimensions of an element\n // based on the size of the original image.\n @mixin %(map_name)s-sprite-dimensions($name) {\n @include sprite-dimensions($%(map_name)s-sprites, $name);\n }\n\n // Move the background position to display the sprite.\n @mixin %(map_name)s-sprite-position($name, $offset-x: 0, $offset-y: 0) {\n @include sprite-position($%(map_name)s-sprites, $name, $offset-x, $offset-y);\n }\n\n // Extends the sprite base class and set the background position for the desired sprite.\n // It will also apply the image dimensions if $dimensions is true.\n @mixin %(map_name)s-sprite($name, $dimensions: $%(map_name)s-sprite-dimensions, $offset-x: 0, $offset-y: 0) {\n @extend #{$%(map_name)s-sprite-base-class};\n @include sprite($%(map_name)s-sprites, $name, $dimensions, $offset-x, $offset-y);\n }\n\n @mixin %(map_name)s-sprites($sprite-names, $dimensions: $%(map_name)s-sprite-dimensions) {\n @include sprites($%(map_name)s-sprites, $sprite-names, $%(map_name)s-sprite-base-class, $dimensions);\n }\n\n // Generates a class for each sprited image.\n @mixin all-%(map_name)s-sprites($dimensions: $%(map_name)s-sprite-dimensions) {\n @include %(map_name)s-sprites(%(sprites)s, $dimensions);\n }\n ''' % {'map_name': map_name, 'sprites': ' '.join(names)}\n return ret", "def stylesheet_url(path, only_path=False, cache_buster=True):\n \"\"\"\n Generates a path to an asset found relative to the project's css directory.\n Passing a true value as the second argument will cause the only the path to\n be returned instead of a `url()` function\n \"\"\"\n filepath = String.unquoted(path).value\n if callable(config.STATIC_ROOT):\n try:\n _file, _storage = list(config.STATIC_ROOT(filepath))[0]\n except IndexError:\n filetime = None\n else:\n filetime = getmtime(_file, _storage)\n if filetime is None:\n filetime = 'NA'\n else:\n _path = os.path.join(config.STATIC_ROOT, filepath.strip('/'))\n filetime = getmtime(_path)\n if filetime is None:\n filetime = 'NA'\n BASE_URL = config.STATIC_URL\n\n url = '%s%s' % (BASE_URL, filepath)\n if cache_buster:\n url = add_cache_buster(url, filetime)\n if only_path:\n return String.unquoted(url)\n else:\n return Url.unquoted(url)", "private String calculateDynamicSkinUrlPathToUse(PortletRequest request, String lessfileBaseName)\n throws IOException {\n final DynamicSkinInstanceData data = new DefaultDynamicSkinInstanceDataImpl(request);\n if (!service.skinCssFileExists(data)) {\n // Trigger the LESS compilation\n service.generateSkinCssFile(data);\n }\n return service.getSkinCssPath(data);\n }", "def load_css(self):\n \"\"\"\n Creates a dict of all icons available in CSS file, and finds out\n what's their common prefix.\n\n :returns sorted icons dict, common icon prefix\n \"\"\"\n icons = dict()\n common_prefix = None\n parser = tinycss.make_parser('page3')\n stylesheet = parser.parse_stylesheet_file(self.css_file)\n\n is_icon = re.compile(\"\\.(.*):before,?\")\n\n for rule in stylesheet.rules:\n selector = rule.selector.as_css()\n\n # Skip CSS classes that are not icons\n if not is_icon.match(selector):\n continue\n\n # Find out what the common prefix is\n if common_prefix is None:\n common_prefix = selector[1:]\n else:\n common_prefix = os.path.commonprefix((common_prefix,\n selector[1:]))\n\n for match in is_icon.finditer(selector):\n name = match.groups()[0]\n for declaration in rule.declarations:\n if declaration.name == \"content\":\n val = declaration.value.as_css()\n # Strip quotation marks\n if re.match(\"^['\\\"].*['\\\"]$\", val):\n val = val[1:-1]\n icons[name] = unichr(int(val[1:], 16))\n\n common_prefix = common_prefix or ''\n\n # Remove common prefix\n if not self.keep_prefix and len(common_prefix) > 0:\n non_prefixed_icons = {}\n for name in icons.keys():\n non_prefixed_icons[name[len(common_prefix):]] = icons[name]\n icons = non_prefixed_icons\n\n sorted_icons = OrderedDict(sorted(icons.items(), key=lambda t: t[0]))\n\n return sorted_icons, common_prefix", "def _inline_image(image, mime_type=None):\n \"\"\"\n Embeds the contents of a file directly inside your stylesheet, eliminating\n the need for another HTTP request. For small files such images or fonts,\n this can be a performance benefit at the cost of a larger generated CSS\n file.\n \"\"\"\n file = StringValue(image).value\n mime_type = StringValue(mime_type).value or mimetypes.guess_type(file)[0]\n path = None\n if callable(STATIC_ROOT):\n try:\n _file, _storage = list(STATIC_ROOT(file))[0]\n path = _storage.open(_file)\n except:\n pass\n else:\n _path = os.path.join(STATIC_ROOT, file)\n if os.path.exists(_path):\n path = open(_path, 'rb')\n if path:\n url = 'data:' + mime_type + ';base64,' + base64.b64encode(path.read())\n url = url = '%s%s?_=%s' % (STATIC_URL, file, 'NA')\n inline = 'url(\"%s\")' % escape(url)\n return StringValue(inline)", "function(){\n this.config = utils.mixin(this.config, defaults);\n var file = this.file;\n\n var cssFile = file;\n var destFile = file;\n\n this._getConfig();\n\n if (!this.text) {\n if (this.config.global.writeFile){\n var sourceFile = file.replace('.css', '.source.css');\n if (exists(sourceFile)){\n cssFile = sourceFile;\n } else {\n cssFile = file;\n }\n } else {\n destFile = file.replace('.css', '.sprite.css');\n }\n\n this.cssReader = new cssReader({\n file: cssFile, copyFile: sourceFile\n });\n\n } else {\n this.cssReader = new cssReader({\n text: this.text\n });\n }\n\n var imgPath = this.config.global.imgPath;\n this.spriteDef = new SpriteDef({\n file: file,\n imgPath: this.config.global.imgPath,\n relative: this.config.global.relative,\n cssReader: this.cssReader,\n layout: this.config.global.layout,\n force8bit: this.config.global.force8bit,\n config: this.config\n });\n\n this.cssWrite = new CssWrite({\n destFile: this.dest ? this.dest : destFile,\n cssReader: this.cssReader\n });\n\n }", "def handle_import(self, name, compilation, rule):\n \"\"\"Implementation of Compass's \"magic\" imports, which generate\n spritesheets on the fly, given either a wildcard or the name of a\n directory.\n \"\"\"\n from .sprites import sprite_map\n\n # TODO check that the found file is actually under the root\n if callable(config.STATIC_ROOT):\n files = sorted(config.STATIC_ROOT(name))\n else:\n glob_path = os.path.join(config.STATIC_ROOT, name)\n files = glob.glob(glob_path)\n files = sorted((fn[len(config.STATIC_ROOT):], None) for fn in files)\n\n if not files:\n return\n\n # Build magic context\n calculator = compilation._make_calculator(rule.namespace)\n map_name = os.path.normpath(os.path.dirname(name)).replace('\\\\', '_').replace('/', '_')\n kwargs = {}\n\n # TODO this is all kinds of busted. rule.context hasn't existed for\n # ages.\n def setdefault(var, val):\n _var = '$' + map_name + '-' + var\n if _var in rule.context:\n kwargs[var] = calculator.interpolate(rule.context[_var], rule, self._library)\n else:\n rule.context[_var] = val\n kwargs[var] = calculator.interpolate(val, rule, self._library)\n return rule.context[_var]\n\n setdefault('sprite-base-class', String('.' + map_name + '-sprite', quotes=None))\n setdefault('sprite-dimensions', Boolean(False))\n position = setdefault('position', Number(0, '%'))\n spacing = setdefault('spacing', Number(0))\n repeat = setdefault('repeat', String('no-repeat', quotes=None))\n names = tuple(os.path.splitext(os.path.basename(file))[0] for file, storage in files)\n for n in names:\n setdefault(n + '-position', position)\n setdefault(n + '-spacing', spacing)\n setdefault(n + '-repeat', repeat)\n rule.context['$' + map_name + '-' + 'sprites'] = sprite_map(name, **kwargs)\n generated_code = '''\n @import \"compass/utilities/sprites/base\";\n\n // All sprites should extend this class\n // The %(map_name)s-sprite mixin will do so for you.\n #{$%(map_name)s-sprite-base-class} {\n background: $%(map_name)s-sprites;\n }\n\n // Use this to set the dimensions of an element\n // based on the size of the original image.\n @mixin %(map_name)s-sprite-dimensions($name) {\n @include sprite-dimensions($%(map_name)s-sprites, $name);\n }\n\n // Move the background position to display the sprite.\n @mixin %(map_name)s-sprite-position($name, $offset-x: 0, $offset-y: 0) {\n @include sprite-position($%(map_name)s-sprites, $name, $offset-x, $offset-y);\n }\n\n // Extends the sprite base class and set the background position for the desired sprite.\n // It will also apply the image dimensions if $dimensions is true.\n @mixin %(map_name)s-sprite($name, $dimensions: $%(map_name)s-sprite-dimensions, $offset-x: 0, $offset-y: 0) {\n @extend #{$%(map_name)s-sprite-base-class};\n @include sprite($%(map_name)s-sprites, $name, $dimensions, $offset-x, $offset-y);\n }\n\n @mixin %(map_name)s-sprites($sprite-names, $dimensions: $%(map_name)s-sprite-dimensions) {\n @include sprites($%(map_name)s-sprites, $sprite-names, $%(map_name)s-sprite-base-class, $dimensions);\n }\n\n // Generates a class for each sprited image.\n @mixin all-%(map_name)s-sprites($dimensions: $%(map_name)s-sprite-dimensions) {\n @include %(map_name)s-sprites(%(sprites)s, $dimensions);\n }\n ''' % {'map_name': map_name, 'sprites': ' '.join(names)}\n\n return SourceFile.from_string(generated_code)", "def static_proxy(request):\n \"\"\"\n Serves TinyMCE plugins inside the inline popups and the uploadify\n SWF, as these are normally static files, and will break with\n cross-domain JavaScript errors if ``STATIC_URL`` is an external\n host. URL for the file is passed in via querystring in the inline\n popup plugin template, and we then attempt to pull out the relative\n path to the file, so that we can serve it locally via Django.\n \"\"\"\n normalize = lambda u: (\"//\" + u.split(\"://\")[-1]) if \"://\" in u else u\n url = normalize(request.GET[\"u\"])\n host = \"//\" + request.get_host()\n static_url = normalize(settings.STATIC_URL)\n for prefix in (host, static_url, \"/\"):\n if url.startswith(prefix):\n url = url.replace(prefix, \"\", 1)\n response = \"\"\n (content_type, encoding) = mimetypes.guess_type(url)\n if content_type is None:\n content_type = \"application/octet-stream\"\n path = finders.find(url)\n if path:\n if isinstance(path, (list, tuple)):\n path = path[0]\n if url.endswith(\".htm\"):\n # Inject <base href=\"{{ STATIC_URL }}\"> into TinyMCE\n # plugins, since the path static files in these won't be\n # on the same domain.\n static_url = settings.STATIC_URL + os.path.split(url)[0] + \"/\"\n if not urlparse(static_url).scheme:\n static_url = urljoin(host, static_url)\n base_tag = \"<base href='%s'>\" % static_url\n with open(path, \"r\") as f:\n response = f.read().replace(\"<head>\", \"<head>\" + base_tag)\n else:\n try:\n with open(path, \"rb\") as f:\n response = f.read()\n except IOError:\n return HttpResponseNotFound()\n return HttpResponse(response, content_type=content_type)", "def retrieve_styles(self, asset_url_path):\n \"\"\"\n Get style URLs from the source HTML page and specified cached\n asset base URL.\n \"\"\"\n if not asset_url_path.endswith('/'):\n asset_url_path += '/'\n self.style_urls.extend(self._get_style_urls(asset_url_path))", "public function initialize() : void\n {\n foreach (glob(\"{$this->cssDir}/*.css\") as $file) {\n $filename = basename($file);\n $url = \"{$this->cssUrl}/$filename\";\n $content = file_get_contents($file);\n $comment = strstr($content, \"*/\", true);\n $comment = preg_replace([\"#\\/\\*!#\", \"#\\*#\"], \"\", $comment);\n $comment = preg_replace(\"#@#\", \"<br>@\", $comment);\n $first = strpos($comment, \".\");\n $short = substr($comment, 0, $first + 1);\n $long = substr($comment, $first + 1);\n $this->styles[$url] = [\n \"shortDescription\" => $short,\n \"longDescription\" => $long,\n ];\n }\n\n foreach ($this->styles as $key => $value) {\n $isMinified = strstr($key, \".min.css\", true);\n if ($isMinified) {\n unset($this->styles[\"$isMinified.css\"]);\n }\n }\n }", "def staticfiles_url_fetcher(url):\n \"\"\"\n Returns the file matching url.\n\n This method will handle any URL resources that rendering HTML requires\n (eg: images pointed my ``img`` tags, stylesheets, etc).\n\n The default behaviour will fetch any http(s) files normally, and will\n also attempt to resolve staticfiles internally (this should mostly\n affect development scenarios, but also works if static files are served\n under a relative url).\n\n Returns a dictionary with two entries: ``string``, which is the\n resources data as a string and ``mime_type``, which is the identified\n mime type for the resource.\n \"\"\"\n if url.startswith('/'):\n base_url = staticfiles_storage.base_url\n filename = url.replace(base_url, '', 1)\n\n path = finders.find(filename)\n if path:\n # This should match most cases. Manifest static files with relative\n # URLs will only be picked up in DEBUG mode here.\n with open(path, 'rb') as f:\n data = f.read()\n else:\n # This should just match things like Manifest static files with\n # relative URLs. While this code path will expect `collectstatic`\n # to have run, it should only be reached on if DEBUG = False.\n\n # XXX: Only Django >= 2.0 supports using this as a context manager:\n f = staticfiles_storage.open(filename)\n data = f.read()\n f.close()\n\n return {\n 'string': data,\n 'mime_type': mimetypes.guess_type(url)[0],\n }\n else:\n return default_url_fetcher(url)" ]
[ 0.7316712737083435, 0.6827130913734436, 0.6762562394142151, 0.674936830997467, 0.6738751530647278, 0.6731905937194824, 0.6721377968788147, 0.6718498468399048, 0.6640922427177429, 0.6606653332710266, 0.659768283367157, 0.6592134833335876 ]
Embed Chosen.js directly in html of the response.
def _embed(self, request, response): """Embed Chosen.js directly in html of the response.""" if self._match(request, response): # Render the <link> and the <script> tags to include Chosen. head = render_to_string( "chosenadmin/_head_css.html", {"chosen_css": self._chosen_css()} ) body = render_to_string( "chosenadmin/_script.html", {"chosen_js": self._chosen_js()} ) # Re-write the Response's content to include our new html content = response.rendered_content content = content.replace('</head>', head) content = content.replace('</body>', body) response.content = content return response
[ "function (data) {\n $.ajax({\n url: trumbowyg.o.plugins.noembed.proxy,\n type: 'GET',\n data: data,\n cache: false,\n dataType: 'json',\n\n success: trumbowyg.o.plugins.noembed.success || function (data) {\n if (data.html) {\n trumbowyg.execCmd('insertHTML', data.html);\n setTimeout(function () {\n trumbowyg.closeModal();\n }, 250);\n } else {\n trumbowyg.addErrorOnModalField(\n $('input[type=text]', $modal),\n data.error\n );\n }\n },\n error: trumbowyg.o.plugins.noembed.error || function () {\n trumbowyg.addErrorOnModalField(\n $('input[type=text]', $modal),\n trumbowyg.lang.noembedError\n );\n }\n });\n }", "function() {\n var template = this._data ? 'singleSelectedItem' : 'singleSelectPlaceholder';\n var options = this._data\n ? assign(\n {\n removable: this.options.allowClear && !this.options.readOnly\n },\n this._data\n )\n : { placeholder: this.options.placeholder };\n\n this.el.querySelector('input').value = this._value;\n this.$('.selectivity-single-result-container').innerHTML = this.template(template, options);\n }", "async function saveEmbedData(opts, pluginOptions) {\n const { regex } = pluginOptions\n let options = extend({}, opts)\n\n if (isAnchorTagApplied(options, { regex })) {\n await stringReplaceAsync(\n options.result,\n anchorRegex,\n async (match, url, index) => {\n if (!isMatchPresent(regex, match, true)) return match\n saveServiceName(options, pluginOptions, match)\n options = await pushEmbedContent(url, options, pluginOptions, index)\n return match\n }\n )\n } else {\n options = pushEmbedContent(options.result, options, pluginOptions)\n }\n\n return options\n}", "function Embedza(options) {\n debug('init');\n\n this.__options__ = _.merge({\n enabledProviders: true,\n cache: new Cache(),\n // Default options for `got` in `.request()` method\n request: {\n retries: 1, // Default (5) hangs too long\n timeout: 15 * 1000,\n headers: {\n 'user-agent': defaultAgent\n }\n }\n }, options);\n\n this.templates = _.clone(templates);\n\n // User will request `inline` or `block` content form renderer.\n // Create alias for names match\n this.aliases = {\n block: [ 'player', 'rich' ]\n };\n\n this.__fetchers__ = {};\n this.__mixins__ = {};\n this.__mixinsAfter__ = {};\n this.__domains__ = [];\n\n // Domains config cache\n this.__rulesCache__ = null;\n\n // Init plugins\n\n fetchers.forEach(fetcher => this.addFetcher(fetcher));\n mixins.forEach(mixin => this.addMixin(mixin));\n mixinsAfter.forEach(mixinAfter => this.addMixinAfter(mixinAfter));\n domains.forEach(domain => this.addDomain(domain));\n\n // Deactivate some providers if needed\n if (_.isArray(this.__options__.enabledProviders)) {\n\n // Disable all first\n this.forEach(domain => { domain.enabled = false; });\n\n // Enable required and add missing providers\n this.__options__.enabledProviders.forEach(domain => {\n if (!this.__domains__[domain]) {\n this.addDomain(domain);\n } else {\n this.__domains__[domain].enabled = true;\n }\n });\n }\n\n debug('init: done');\n}", "async function _oembed(value) {\n let result = cache.get(value);\n\n if (result) {\n return result;\n }\n\n try {\n const unfurled = await unfurl(value);\n result = unfurled.oembed.html;\n } catch (err) {\n result = `<a href=\"${value}\">${value}</a>`;\n }\n\n return cache.put(value, result);\n }", "function () {\n if (!this._active) {\n return\n }\n const editor = this.editor\n const $selectionELem = editor.selection.getSelectionContainerElem()\n if (!$selectionELem) {\n return\n }\n const selectionText = editor.selection.getSelectionText()\n editor.cmd.do('insertHTML', '<span>' + selectionText + '</span>')\n }", "function(response, fields) {\n var\n values = response[fields.values] || {},\n html = ''\n ;\n $.each(values, function(index, option) {\n var\n maybeText = (option[fields.text])\n ? 'data-text=\"' + option[fields.text] + '\"'\n : '',\n maybeDisabled = (option[fields.disabled])\n ? 'disabled '\n : ''\n ;\n html += '<div class=\"'+ maybeDisabled +'item\" data-value=\"' + option[fields.value] + '\"' + maybeText + '>';\n html += option[fields.name];\n html += '</div>';\n });\n return html;\n }", "function show (req, res, next) {\n var _embed = req.query._embed\n var _expand = req.query._expand\n var id = utils.toNative(req.params.id)\n var resource = db.get(name).getById(id).value()\n\n if (resource) {\n // Clone resource to avoid making changes to the underlying object\n resource = _.cloneDeep(resource)\n\n // Embed other resources based on resource id\n // /posts/1?_embed=comments\n embed(resource, _embed)\n\n // Expand inner resources based on id\n // /posts/1?_expand=user\n expand(resource, _expand)\n\n res.locals.data = resource\n }\n\n next()\n }", "def render_embed_js(self, js_embed: Iterable[bytes]) -> bytes:\n \"\"\"Default method used to render the final embedded js for the\n rendered webpage.\n\n Override this method in a sub-classed controller to change the output.\n \"\"\"\n return (\n b'<script type=\"text/javascript\">\\n//<![CDATA[\\n'\n + b\"\\n\".join(js_embed)\n + b\"\\n//]]>\\n</script>\"\n )", "private void encodeJS(FacesContext fc, ResponseWriter rw, DateTimePicker dtp, String datePickerId)\n\tthrows IOException {\n\t\tString clientId = dtp.getClientId();\n\t\tString fieldId = dtp.getFieldId();\n\t\tif (null == fieldId) {\n\t\t\tfieldId = clientId + \"_Input\";\n\t\t} else if (fieldId.equals(dtp.getId())) {\n\t\t\tthrow new FacesException(\"The field id must differ from the regular id.\");\n\t\t}\n\t\tString mode = dtp.getMode();\n\n\t\tObject v = dtp.getSubmittedValue();\n\t\tif (v == null) {\n\t\t\tv = dtp.getValue();\n\t\t}\n\n\t\t// show all buttons\n\t\tif(dtp.isShowButtonPanel()) {\n\t\t\tdtp.setShowClearButton(true);\n\t\t\tdtp.setShowCloseButton(true);\n\t\t\tdtp.setShowTodayButton(true);\n\t\t}\n\n\t\tLocale sloc = BsfUtils.selectLocale(fc.getViewRoot().getLocale(), dtp.getLocale(), dtp);\n\t\tString format = BsfUtils.selectMomentJSDateTimeFormat(sloc, dtp.getFormat(), dtp.isShowDate(), dtp.isShowTime());\n\t\tString displayFormat = \"'\" + format + \"'\";\n\t\tString inlineDisplayDate = \"'\" +\n\t\t\t\tgetValueAsString(v, fc, dtp) + \"'\";\n\n\t\tString fullSelector = \"#\" + BsfUtils.escapeJQuerySpecialCharsInSelector(datePickerId);\n\n\t\tString defaultDate = BsfUtils.isStringValued(dtp.getInitialDate()) ?\n\t\t\tdtp.getInitialDate().contains(\"moment\") ? dtp.getInitialDate() : \"'\" + dtp.getInitialDate() + \"'\" : \"\";\n\t\tString minDate = BsfUtils.isStringValued(dtp.getMinDate()) ?\n\t\t\t\t\t\t\tdtp.getMinDate().contains(\"moment\") ? dtp.getMinDate() : \"'\" + dtp.getMinDate() + \"'\" :\n\t\t\t\t\t\t\t\"\";\n\t\tString maxDate = BsfUtils.isStringValued(dtp.getMaxDate()) ?\n\t\t\t\t\t\t\tdtp.getMaxDate().contains(\"moment\") ? dtp.getMaxDate() : \"'\" + dtp.getMaxDate() + \"'\" :\n\t\t\t\t\t\t\t\"\";\n\n\t\trw.startElement(\"script\", dtp);\n\t\trw.writeText(\"$(function () { \" +\n\t\t\t\t\t \"$('\" + fullSelector + \"').datetimepicker({ \" +\n\t\t\t\t\t \"ignoreReadonly: false, \" +\n\t\t\t\t\t (dtp.isAllowInputToggle() ? \t\t\"allowInputToggle: true, \": \"\") +\n\t\t\t\t\t \t(dtp.isCollapse() ? \t\t\t\t\t\t\t\t\t\t\"collapse: \" + dtp.isCollapse() + \", \": \"\") +\n\t\t\t\t\t \t(BsfUtils.isStringValued(dtp.getDayViewHeaderFormat()) ? \t\"dayViewHeaderFormat: '\" + dtp.getDayViewHeaderFormat() + \"', \" : \"\") +\n\t\t\t\t\t \t(BsfUtils.isStringValued(dtp.getDisabledDates()) ?\t\t\t\"disabledDates: [\" + dtp.getDisabledDates() + \"], \" : \"\") +\n\t\t\t\t\t \t(BsfUtils.isStringValued(dtp.getDisableTimeInterval()) ?\t\"disabledTimeIntervals: [\" + dtp.getDisableTimeInterval() + \"], \" : \"\") +\n\t\t\t\t\t \t(BsfUtils.isStringValued(dtp.getEnabledDates()) ?\t\t\t\"enabledDates: [\" + dtp.getDisableTimeInterval() + \"], \" : \"\") +\n\t\t\t\t\t \t(dtp.isFocusOnShow() ? \t\t\t\t\t\t\t\t\t\t\"focusOnShow: \" + dtp.isFocusOnShow() + \", \": \"\") +\n\t\t\t\t\t \t(BsfUtils.isStringValued(dtp.getInitialDate()) ?\t\t\t\"defaultDate: \" + defaultDate + \", \" : \"\") +\n\t\t\t\t\t \t(dtp.isKeepInvalid() ? \t\t\t\t\t\t\t\t\t\t\"keepInvalid: \" + dtp.isKeepInvalid() + \", \": \"\") +\n\t\t\t\t\t \t(dtp.isKeepOpen() ? \t\t\t\t\t\t\t\t\t\t\"keepOpen: \" + dtp.isKeepOpen() + \", \": \"\") +\n\t\t\t\t\t \t(BsfUtils.isStringValued(minDate) ?\t\t\t\t\t\t\t\"minDate: \" + minDate + \", \" : \"\") +\n\t\t\t\t\t \t(BsfUtils.isStringValued(maxDate) ?\t\t\t\t\t\t\t\"maxDate: \" + maxDate + \", \" : \"\") +\n\t\t\t\t\t \t(dtp.isShowWeek() ? \t\t\t\t\t\t\t\t\t\t\"calendarWeeks: \" + dtp.isShowWeek() + \", \": \"\") +\n\t\t\t\t\t \t(dtp.isShowClearButton() ? \t\t\t\t\t\t\t\t\t\"showClear: \" + dtp.isShowClearButton() + \", \": \"\") +\n\t\t\t\t\t \t(dtp.isShowCloseButton() ? \t\t\t\t\t\t\t\t\t\"showClose: \" + dtp.isShowCloseButton() + \", \": \"\") +\n\t\t\t\t\t \t(dtp.isShowTodayButton() ? \t\t\t\t\t\t\t\t\t\"showTodayButton: \" + dtp.isShowTodayButton() + \", \": \"\") +\n\t\t\t\t\t \t(dtp.isSideBySide() || \"inline\".equals(mode) ? \t\t\t\t\"sideBySide: true, \": \"\") +\n\t\t\t\t\t \t(dtp.getTimeStepping() > 0 ?\t\t\t\t\t\t\t\t\"stepping: \" + dtp.getTimeStepping() + \", \": \"\") +\n\t\t\t\t\t \t(BsfUtils.isStringValued(dtp.getToolbarPlacement()) ?\t\t\"toolbarPlacement: '\" + dtp.getToolbarPlacement() + \"', \" : \"\") +\n\t\t\t\t\t \t(BsfUtils.isStringValued(dtp.getViewMode()) ?\t\t\t\t\"viewMode: '\" + dtp.getViewMode() + \"', \" : \"\") +\n\t\t\t\t\t \t(dtp.isUseCurrent() ? \t\t\t\t\t\t\t\t\t\t\"\": \"useCurrent:false,\") +\n\t\t\t\t\t \t(dtp.isUseStrict() ? \t\t\t\t\t\t\t\t\t\t\"useStrict: \" + dtp.isUseStrict() + \", \": \"\") +\n\t\t\t\t\t \t(BsfUtils.isStringValued(dtp.getWidgetParent()) ? \"widgetParent: '\" + BsfUtils.resolveSearchExpressions(dtp.getWidgetParent()) + \"', \" : \"\" ) +\n\t\t\t\t\t \t(\"inline\".equals(mode) ? \t\t\t\t\t\t\t\t\t\"inline: true,\" : \"\" ) +\n\t\t\t\t\t \t\"date: moment(\" + inlineDisplayDate + \", \" + displayFormat + \"), \" +\n\t\t\t\t\t \t\"locale: '\" + sloc.getLanguage() + \"', \" +\n\t\t\t\t\t \t\"format: \" + displayFormat +\n\t\t\t\t\t\t \"});\" +\n\t\t\t\t\t // (\"inline\".equals(type) ? \"$('\" + fullSelector + \"').date(\" + inlineDisplayDate + \")\" : \"\") +\n\t\t\t\t\t \"});\", null);\n\n\t\tif(\"inline\".equals(mode)) {\n\t\t\trw.writeText(\"$('\" + fullSelector + \"').on('dp.change', function(e) { \" +\n\t\t\t\t\t\t \" $('#\" + BsfUtils.escapeJQuerySpecialCharsInSelector(fieldId) + \"').val( e.date.format(\" + displayFormat + \") ); \" +\n\t\t\t\t\t\t \"});\", null);\n\t\t}\n\t\trw.endElement(\"script\");\n\t\tnew AJAXRenderer().generateBootsFacesAJAXAndJavaScriptForJQuery(fc, dtp, rw, fullSelector, null, true);\n\t}", "function () {\n\t\t\n\t\t\tvar choiceIndex;\n\t\t\t\n\t\t\tif (this.el.selectedIndex >= 0) {\n\t\t\t\t\n\t\t\t\tchoiceIndex = inputEx.indexOf(this.el.childNodes[this.el.selectedIndex], this.choicesList, function (node, choice) {\n\t\t\t\t\treturn node === choice.node;\n\t\t\t\t});\n\t\t\t\n\t\t\t\treturn this.choicesList[choiceIndex].value;\n\t\t\t\t\n\t\t\t} else {\n\t\t\t\t\n\t\t\t\treturn \"\";\n\t\t\t\t\n\t\t\t}\n\t\t}", "function generateEmbed(id, source, html) {\n id = id || Embedo.utils.uuid();\n var container = document.createElement('div');\n\n container.setAttribute('id', id);\n container.setAttribute('data-embedo-id', id);\n container.setAttribute('data-embedo-source', source);\n\n if (Embedo.utils.validateElement(html)) {\n container.appendChild(html);\n } else {\n container.innerHTML = html || '';\n }\n\n return container;\n }" ]
[ 0.6733248829841614, 0.6718196868896484, 0.6667999625205994, 0.6569007635116577, 0.6564831137657166, 0.6560764312744141, 0.6543261408805847, 0.6530284285545349, 0.6528116464614868, 0.6516057848930359, 0.6515606045722961, 0.6499364376068115 ]
Close the I2C bus
def clean_up(self): """ Close the I2C bus """ self.log.debug("Closing I2C bus for address: 0x%02X" % self.address) self.bus.close()
[ "def close(self):\n \"\"\"\n Close the i2c connection.\n \"\"\"\n if self.fd:\n os.close(self.fd)\n self.fd = None", "func (conn *I2CConnection) Close() error {\n\tglog.V(2).Info(\"hd44780: closing I2C bus\")\n\treturn conn.I2C.Close()\n}", "def close(self):\n \"\"\"Closes the gpib transport.\"\"\"\n if self._device is not None:\n ibsta = self._lib.ibonl(self._device, 0)\n self._check_status(ibsta)\n self._device = None", "def close(self):\n \"\"\"! @brief Close the USB interface.\"\"\"\n assert self.closed is False\n\n if self.is_swo_running:\n self.stop_swo()\n self.closed = True\n self.rx_stop_event.set()\n self.read_sem.release()\n self.thread.join()\n assert self.rcv_data[-1] is None\n self.rcv_data = []\n self.swo_data = []\n usb.util.release_interface(self.dev, self.intf_number)\n usb.util.dispose_resources(self.dev)\n self.ep_out = None\n self.ep_in = None\n self.ep_swo = None\n self.dev = None\n self.intf_number = None\n self.thread = None", "func (c *i2cConnection) Close() error {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\n\treturn c.bus.Close()\n}", "def close(self):\n \"\"\"\n close the interface\n \"\"\"\n assert self.closed is False\n\n log.debug(\"closing interface\")\n self.closed = True\n self.read_sem.release()\n self.thread.join()\n assert self.rcv_data[-1] is None\n self.rcv_data = []\n usb.util.release_interface(self.dev, self.intf_number)\n if self.kernel_driver_was_attached:\n try:\n self.dev.attach_kernel_driver(self.intf_number)\n except Exception as exception:\n log.warning('Exception attaching kernel driver: %s',\n str(exception))\n usb.util.dispose_resources(self.dev)\n self.ep_out = None\n self.ep_in = None\n self.dev = None\n self.intf_number = None\n self.kernel_driver_was_attached = False\n self.thread = None", "def close(self):\n \"\"\"Turn the device off.\"\"\"\n close_command = StandardSend(self._address,\n COMMAND_LIGHT_OFF_0X13_0X00)\n self._send_method(close_command, self._closed_message_received)", "def close(self):\n \"\"\"\n close the interface\n \"\"\"\n logging.debug(\"closing interface\")\n self.closed = True\n self.read_sem.release()\n self.thread.join()\n usb.util.dispose_resources(self.dev)", "def close(self):\n '''\n Routines to handle any cleanup before the instance shuts down.\n Sockets and filehandles should be closed explicitly, to prevent\n leaks.\n '''\n if not self._closing:\n IPCClient.close(self)\n if self._closing:\n # This will prevent this message from showing up:\n # '[ERROR ] Future exception was never retrieved:\n # StreamClosedError'\n if self._read_sync_future is not None and self._read_sync_future.done():\n self._read_sync_future.exception()\n if self._read_stream_future is not None and self._read_stream_future.done():\n self._read_stream_future.exception()", "public void close()\r\n\t{\r\n\t\tsynchronized (this) {\r\n\t\t\tif (closed)\r\n\t\t\t\treturn;\r\n\t\t\tclosed = true;\r\n\t\t}\r\n\t\ttry {\r\n\t\t\tnormalMode();\r\n\t\t}\r\n\t\tcatch (final KNXException e) {\r\n\t\t\tlogger.error(\"could not switch BCU back to normal mode\", e);\r\n\t\t}\r\n\t\tconn.close();\r\n\t\tnotifier.quit();\r\n\t}", "void close() {\n final String methodName = \"close\";\n if (TraceComponent.isAnyTracingEnabled() && TRACE.isEntryEnabled()) {\n SibTr.entry(this, TRACE, methodName);\n }\n\n close(false);\n\n if (TraceComponent.isAnyTracingEnabled() && TRACE.isEntryEnabled()) {\n SibTr.exit(this, TRACE, methodName);\n }\n }", "def close(self):\n \"\"\"\n Close and release the current usb device.\n :return: None\n \"\"\"\n # This may not be absolutely necessary, but it is safe.\n # It's the closest thing to a close() method.\n if self._dev is not None:\n usb.util.dispose_resources(self._dev)\n self._dev = None" ]
[ 0.8122109770774841, 0.7482624053955078, 0.7416961789131165, 0.7368502020835876, 0.731259286403656, 0.728217601776123, 0.7252492904663086, 0.7216896414756775, 0.717282772064209, 0.7143097519874573, 0.7116347551345825, 0.711581826210022 ]
Send only the read / write bit
def write_quick(self): """ Send only the read / write bit """ self.bus.write_quick(self.address) self.log.debug("write_quick: Sent the read / write bit")
[ "def write(self, b):\n '''write some bytes'''\n from . import mavutil\n self.debug(\"sending '%s' (0x%02x) of len %u\\n\" % (b, ord(b[0]), len(b)), 2)\n while len(b) > 0:\n n = len(b)\n if n > 70:\n n = 70\n buf = [ord(x) for x in b[:n]]\n buf.extend([0]*(70-len(buf)))\n self.mav.mav.serial_control_send(self.port,\n mavutil.mavlink.SERIAL_CONTROL_FLAG_EXCLUSIVE |\n mavutil.mavlink.SERIAL_CONTROL_FLAG_RESPOND,\n 0,\n 0,\n n,\n buf)\n b = b[n:]", "def write_single_coil(self, bit_addr, bit_value):\n \"\"\"Modbus function WRITE_SINGLE_COIL (0x05)\n\n :param bit_addr: bit address (0 to 65535)\n :type bit_addr: int\n :param bit_value: bit value to write\n :type bit_value: bool\n :returns: True if write ok or None if fail\n :rtype: bool or None\n \"\"\"\n # check params\n if not (0 <= int(bit_addr) <= 65535):\n self.__debug_msg('write_single_coil(): bit_addr out of range')\n return None\n # build frame\n bit_value = 0xFF if bit_value else 0x00\n tx_buffer = self._mbus_frame(const.WRITE_SINGLE_COIL, struct.pack('>HBB', bit_addr, bit_value, 0))\n # send request\n s_send = self._send_mbus(tx_buffer)\n # check error\n if not s_send:\n return None\n # receive\n f_body = self._recv_mbus()\n # check error\n if not f_body:\n return None\n # check fix frame size\n if len(f_body) != 4:\n self.__last_error = const.MB_RECV_ERR\n self.__debug_msg('write_single_coil(): rx frame size error')\n self.close()\n return None\n # register extract\n (rx_bit_addr, rx_bit_value, rx_padding) = struct.unpack('>HBB', f_body[:4])\n # check bit write\n is_ok = (rx_bit_addr == bit_addr) and (rx_bit_value == bit_value)\n return True if is_ok else None", "private static void writeBits(StringBuilder sb, boolean r, boolean w, boolean x) {\n if (r) {\n sb.append('r');\n } else {\n sb.append('-');\n }\n if (w) {\n sb.append('w');\n } else {\n sb.append('-');\n }\n if (x) {\n sb.append('x');\n } else {\n sb.append('-');\n }\n }", "def write_bit(self, registeraddress, value, functioncode=5):\n \"\"\"Write one bit to the slave.\n\n Args:\n * registeraddress (int): The slave register address (use decimal numbers, not hex).\n * value (int): 0 or 1\n * functioncode (int): Modbus function code. Can be 5 or 15.\n\n Returns:\n None\n\n Raises:\n ValueError, TypeError, IOError\n\n \"\"\"\n _checkFunctioncode(functioncode, [5, 15])\n _checkInt(value, minvalue=0, maxvalue=1, description='input value')\n self._genericCommand(functioncode, registeraddress, value)", "function send_bits(value, length) {\n\t\t// If not enough room in bi_buf, use (valid) bits from bi_buf and\n\t\t// (16 - bi_valid) bits from value, leaving (width - (16-bi_valid))\n\t\t// unused bits in value.\n\t\tif (bi_valid > Buf_size - length) {\n\t\t\tbi_buf |= (value << bi_valid);\n\t\t\tput_short(bi_buf);\n\t\t\tbi_buf = (value >> (Buf_size - bi_valid));\n\t\t\tbi_valid += length - Buf_size;\n\t\t} else {\n\t\t\tbi_buf |= value << bi_valid;\n\t\t\tbi_valid += length;\n\t\t}\n\t}", "def transmit bits\n raise ::LegoNXT::BadOpCodeError unless bytestring(DirectOps::NO_RESPONSE, SystemOps::NO_RESPONSE).include? bits[0]\n transmit! bits\n end", "public void ser(DataOutput out) throws IOException {\n if (bits.length == 0) {\n out.writeByte(0);\n return;\n }\n // removing trailing empty bytes.\n int bytesToWrite;\n for (bytesToWrite = bits.length; bytesToWrite > 1 && bits[bytesToWrite - 1] == 0; bytesToWrite--) ;\n // Writing first bytes, with the rightmost bit set\n for (int i = 0; i < (bytesToWrite - 1); i++) {\n out.writeByte((bits[i] | 1));\n }\n // Writing the last byte, with the rightmost bit unset\n out.writeByte((bits[bytesToWrite - 1] & ~1));\n }", "def set_write_bit(fn):\n # type: (str) -> None\n \"\"\"\n Set read-write permissions for the current user on the target path. Fail silently\n if the path doesn't exist.\n\n :param str fn: The target filename or path\n :return: None\n \"\"\"\n\n fn = fs_encode(fn)\n if not os.path.exists(fn):\n return\n file_stat = os.stat(fn).st_mode\n os.chmod(fn, file_stat | stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)\n if os.name == \"nt\":\n from ._winconsole import get_current_user\n\n user_sid = get_current_user()\n icacls_exe = _find_icacls_exe() or \"icacls\"\n from .misc import run\n if user_sid:\n _, err = run([icacls_exe, \"/grant\", \"{0}:WD\".format(user_sid), \"''{0}''\".format(fn), \"/T\", \"/C\", \"/Q\"])\n if not err:\n return\n\n if not os.path.isdir(fn):\n for path in [fn, os.path.dirname(fn)]:\n try:\n os.chflags(path, 0)\n except AttributeError:\n pass\n return None\n for root, dirs, files in os.walk(fn, topdown=False):\n for dir_ in [os.path.join(root, d) for d in dirs]:\n set_write_bit(dir_)\n for file_ in [os.path.join(root, f) for f in files]:\n set_write_bit(file_)", "def write_bit(self, b):\n \"\"\"\n Write a boolean value.\n\n \"\"\"\n if b:\n b = 1\n else:\n b = 0\n shift = self.bitcount % 8\n if shift == 0:\n self.bits.append(0)\n self.bits[-1] |= (b << shift)\n self.bitcount += 1", "function SEND_CODE(c, tree) {\n\t\tsend_bits(tree[c].fc, tree[c].dl);\n\t}", "def send(self, line):\n '''send some bytes'''\n line = line.strip()\n if line == \".\":\n self.stop()\n return\n mav = self.master.mav\n if line != '+++':\n line += \"\\r\\n\"\n buf = [ord(x) for x in line]\n buf.extend([0]*(70-len(buf)))\n\n flags = mavutil.mavlink.SERIAL_CONTROL_FLAG_RESPOND\n flags |= mavutil.mavlink.SERIAL_CONTROL_FLAG_MULTI\n flags |= mavutil.mavlink.SERIAL_CONTROL_FLAG_EXCLUSIVE\n mav.serial_control_send(self.serial_settings.port,\n flags,\n 0, self.serial_settings.baudrate,\n len(line), buf)", "def set_write_bit(fn):\n # type: (str) -> None\n \"\"\"\n Set read-write permissions for the current user on the target path. Fail silently\n if the path doesn't exist.\n\n :param str fn: The target filename or path\n :return: None\n \"\"\"\n\n fn = fs_encode(fn)\n if not os.path.exists(fn):\n return\n file_stat = os.stat(fn).st_mode\n os.chmod(fn, file_stat | stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)\n if not os.path.isdir(fn):\n for path in [fn, os.path.dirname(fn)]:\n try:\n os.chflags(path, 0)\n except AttributeError:\n pass\n return None\n for root, dirs, files in os.walk(fn, topdown=False):\n for dir_ in [os.path.join(root, d) for d in dirs]:\n set_write_bit(dir_)\n for file_ in [os.path.join(root, f) for f in files]:\n set_write_bit(file_)" ]
[ 0.7094565033912659, 0.6987737417221069, 0.6941272020339966, 0.6860069632530212, 0.6829076409339905, 0.6804078221321106, 0.6793176531791687, 0.6791053414344788, 0.6791037917137146, 0.6784651875495911, 0.6781317591667175, 0.6769970655441284 ]
Writes an 8-bit byte to the specified command register
def write_byte(self, cmd, value): """ Writes an 8-bit byte to the specified command register """ self.bus.write_byte_data(self.address, cmd, value) self.log.debug( "write_byte: Wrote 0x%02X to command register 0x%02X" % ( value, cmd ) )
[ "def _write8(self, reg, value):\n \"\"\"Write a 8-bit value to a register.\"\"\"\n self.i2c.write8(TCS34725_COMMAND_BIT | reg, value)", "def _write8(self, reg, value):\n \"\"\"Write a 8-bit value to a register.\"\"\"\n self._device.write8(TCS34725_COMMAND_BIT | reg, value)", "def write_word(self, cmd, value):\n \"\"\"\n Writes a 16-bit word to the specified command register\n \"\"\"\n self.bus.write_word_data(self.address, cmd, value)\n self.log.debug(\n \"write_word: Wrote 0x%04X to command register 0x%02X\" % (\n value, cmd\n )\n )", "def write(self, reg, value):\n \"\"\"Write raw byte value to the specified register\n\n :param reg: the register number (0-69, 250-255)\n :param value: byte value\n \"\"\"\n # TODO: check reg: 0-69, 250-255\n self.__check_range('register_value', value)\n logger.debug(\"Write '%s' to register '%s'\" % (value, reg))\n self.__bus.write_byte_data(self.__address, reg, value)", "def _write_register(self, reg, value):\n \"\"\"Write 16 bit value to register.\"\"\"\n self.buf[0] = reg\n self.buf[1] = (value >> 8) & 0xFF\n self.buf[2] = value & 0xFF\n with self.i2c_device as i2c:\n i2c.write(self.buf)", "def write_register(self, registeraddress, value, numberOfDecimals=0, functioncode=16, signed=False):\n \"\"\"Write an integer to one 16-bit register in the slave, possibly scaling it.\n\n The slave register can hold integer values in the range 0 to 65535 (\"Unsigned INT16\").\n\n Args:\n * registeraddress (int): The slave register address (use decimal numbers, not hex).\n * value (int or float): The value to store in the slave register (might be scaled before sending).\n * numberOfDecimals (int): The number of decimals for content conversion.\n * functioncode (int): Modbus function code. Can be 6 or 16.\n * signed (bool): Whether the data should be interpreted as unsigned or signed.\n\n To store for example ``value=77.0``, use ``numberOfDecimals=1`` if the slave register will hold it as 770 internally.\n This will multiply ``value`` by 10 before sending it to the slave register.\n\n Similarly ``numberOfDecimals=2`` will multiply ``value`` by 100 before sending it to the slave register.\n\n For discussion on negative values, the range and on alternative names, see :meth:`.read_register`.\n\n Use the parameter ``signed=True`` if writing to a register that can hold\n negative values. Then negative input will be automatically converted into\n upper range data (two's complement).\n\n Returns:\n None\n\n Raises:\n ValueError, TypeError, IOError\n\n \"\"\"\n _checkFunctioncode(functioncode, [6, 16])\n _checkInt(numberOfDecimals, minvalue=0, maxvalue=10, description='number of decimals')\n _checkBool(signed, description='signed')\n _checkNumerical(value, description='input value')\n\n self._genericCommand(functioncode, registeraddress, value, numberOfDecimals, signed=signed)", "def write_reg(self, addr, value, mask=0xFFFFFFFF, delay_us=0):\n \"\"\" Write to memory address in target\n\n Note: mask option is not supported by stub loaders, use update_reg() function.\n \"\"\"\n return self.check_command(\"write target memory\", self.ESP_WRITE_REG,\n struct.pack('<IIII', addr, value, mask, delay_us))", "def write_byte_data(self, address, register, value):\n \"\"\"Write a byte value to a device's register. \"\"\"\n LOGGER.debug(\"Writing byte data %s to register %s on device %s\",\n bin(value), hex(register), hex(address))\n return self.driver.write_byte_data(address, register, value)", "def write_byte(self, address, value):\n \"\"\"Writes the byte to unaddressed register in a device. \"\"\"\n LOGGER.debug(\"Writing byte %s to device %s!\", bin(value), hex(address))\n return self.driver.write_byte(address, value)", "def write_block_data(self, cmd, block):\n \"\"\"\n Writes a block of bytes to the bus using I2C format to the specified\n command register\n \"\"\"\n self.bus.write_i2c_block_data(self.address, cmd, block)\n self.log.debug(\n \"write_block_data: Wrote [%s] to command register 0x%02X\" % (\n ', '.join(['0x%02X' % x for x in block]),\n cmd\n )\n )", "protected void writeRegister(int register, int value) throws IOException {\n\n // create packet in data buffer\n byte packet[] = new byte[3];\n packet[0] = (byte)(register); // register byte\n packet[1] = (byte)(value>>8); // value MSB\n packet[2] = (byte)(value & 0xFF); // value LSB\n\n // write data to I2C device\n device.write(packet, 0, 3);\n }", "def write_raw_byte(self, value):\n \"\"\"\n Writes an 8-bit byte directly to the bus\n \"\"\"\n self.bus.write_byte(self.address, value)\n self.log.debug(\"write_raw_byte: Wrote 0x%02X\" % value)" ]
[ 0.8645734190940857, 0.8588345646858215, 0.7863130569458008, 0.7781301736831665, 0.7779532074928284, 0.7688304781913757, 0.7631951570510864, 0.7490862011909485, 0.7449775338172913, 0.7423670291900635, 0.7418322563171387, 0.7391216158866882 ]
Writes a 16-bit word to the specified command register
def write_word(self, cmd, value): """ Writes a 16-bit word to the specified command register """ self.bus.write_word_data(self.address, cmd, value) self.log.debug( "write_word: Wrote 0x%04X to command register 0x%02X" % ( value, cmd ) )
[ "def write_byte(self, cmd, value):\n \"\"\"\n Writes an 8-bit byte to the specified command register\n \"\"\"\n self.bus.write_byte_data(self.address, cmd, value)\n self.log.debug(\n \"write_byte: Wrote 0x%02X to command register 0x%02X\" % (\n value, cmd\n )\n )", "def write_wdata(self, address, register, value):\n \"\"\"Write a word (two bytes) value to a device's register. \"\"\"\n warnings.warn(\"write_wdata() is deprecated and will be removed in future versions replace with write_word_data()\", DeprecationWarning)\n LOGGER.debug(\"Writing word data %s to register %s on device %s\",\n bin(value), hex(register), hex(address))\n return self.driver.write_word_data(address, register, value)", "def _write_register(self, reg, value):\n \"\"\"Write 16 bit value to register.\"\"\"\n self.buf[0] = reg\n self.buf[1] = (value >> 8) & 0xFF\n self.buf[2] = value & 0xFF\n with self.i2c_device as i2c:\n i2c.write(self.buf)", "def program_word(self, offset, word):\n\t\t\"\"\"\n\t\t.. _program_word:\n\n\t\tWrite the word ``word`` to the memory at offset ``offset``.\n\t\tUsed to write the boot code.\n\n\t\tMight raise AddressError_, if the offset exceeds the address space.\n\n\t\t\"\"\"\n\t\tif(offset >= self.size):\n\t\t\traise AddressError(\"Offset({}) not in address space({})\".format(offset, self.size))\n\t\tself.repr_[offset].setvalue(word)", "def write_register(self, registeraddress, value, numberOfDecimals=0, functioncode=16, signed=False):\n \"\"\"Write an integer to one 16-bit register in the slave, possibly scaling it.\n\n The slave register can hold integer values in the range 0 to 65535 (\"Unsigned INT16\").\n\n Args:\n * registeraddress (int): The slave register address (use decimal numbers, not hex).\n * value (int or float): The value to store in the slave register (might be scaled before sending).\n * numberOfDecimals (int): The number of decimals for content conversion.\n * functioncode (int): Modbus function code. Can be 6 or 16.\n * signed (bool): Whether the data should be interpreted as unsigned or signed.\n\n To store for example ``value=77.0``, use ``numberOfDecimals=1`` if the slave register will hold it as 770 internally.\n This will multiply ``value`` by 10 before sending it to the slave register.\n\n Similarly ``numberOfDecimals=2`` will multiply ``value`` by 100 before sending it to the slave register.\n\n For discussion on negative values, the range and on alternative names, see :meth:`.read_register`.\n\n Use the parameter ``signed=True`` if writing to a register that can hold\n negative values. Then negative input will be automatically converted into\n upper range data (two's complement).\n\n Returns:\n None\n\n Raises:\n ValueError, TypeError, IOError\n\n \"\"\"\n _checkFunctioncode(functioncode, [6, 16])\n _checkInt(numberOfDecimals, minvalue=0, maxvalue=10, description='number of decimals')\n _checkBool(signed, description='signed')\n _checkNumerical(value, description='input value')\n\n self._genericCommand(functioncode, registeraddress, value, numberOfDecimals, signed=signed)", "def _write8(self, reg, value):\n \"\"\"Write a 8-bit value to a register.\"\"\"\n self.i2c.write8(TCS34725_COMMAND_BIT | reg, value)", "def write_uint16(self, word):\n \"\"\"Write 2 bytes.\"\"\"\n self.write_byte(nyamuk_net.MOSQ_MSB(word))\n self.write_byte(nyamuk_net.MOSQ_LSB(word))", "def memory_write16(self, addr, data, zone=None):\n \"\"\"Writes half-words to memory of a target system.\n\n Args:\n self (JLink): the ``JLink`` instance\n addr (int): start address to write to\n data (list): list of half-words to write\n zone (str): optional memory zone to access\n\n Returns:\n Number of half-words written to target.\n\n Raises:\n JLinkException: on memory access error.\n \"\"\"\n return self.memory_write(addr, data, zone, 16)", "def write_word_data(self, address, register, value):\n \"\"\"\n SMBus Write Word: i2c_smbus_write_word_data()\n ==============================================\n\n This is the opposite of the Read Word operation. 16 bits\n of data is written to a device, to the designated register that is\n specified through the Comm byte.\n\n S Addr Wr [A] Comm [A] DataLow [A] DataHigh [A] P\n\n Functionality flag: I2C_FUNC_SMBUS_WRITE_WORD_DATA\n\n Note the convenience function i2c_smbus_write_word_swapped is\n available for writes where the two data bytes are the other way\n around (not SMBus compliant, but very popular.)\n \"\"\"\n return self.smbus.write_word_data(address, register, value)", "def write_reg(self, addr, value, mask=0xFFFFFFFF, delay_us=0):\n \"\"\" Write to memory address in target\n\n Note: mask option is not supported by stub loaders, use update_reg() function.\n \"\"\"\n return self.check_command(\"write target memory\", self.ESP_WRITE_REG,\n struct.pack('<IIII', addr, value, mask, delay_us))", "def write(self, name_or_index, word):\n\t\t\"\"\"\n\t\tWrite a word in the Register with the name ``name_or_index`` or with the index ``name_or_index``.\n\t\t``name_or_index`` hat to be either ``str`` or ``int``. If the type of ``name_or_index``\n\t\tis wrong an AttributeError will be raised.\n\n\t\tIf there is no Register with the specified name or index, a NameError will be raised.\n\t\t\"\"\"\n\t\tif(isinstance(name_or_index, str)):\n\t\t\tif(name_or_index in self.registers_by_name):\n\t\t\t\tself.registers_by_name[name_or_index].write(word)\n\t\t\telse:\n\t\t\t\traise NameError(\"No Register with name '{}'\".format(name_or_index))\n\t\telif( isinstance(name_or_index, int)):\n\t\t\tif(name_or_index < len(self.registers_by_index)):\n\t\t\t\tself.registers_by_index[name_or_index].write(word)\n\t\t\telse:\n\t\t\t\traise NameError(\"No Register with index '{}'\".format(name_or_index))\n\t\telse:\n\t\t\traise AttributeError(\"name_or_index has to be `str` or `int`, but is {}\".format(type(name_or_index)))", "def _write8(self, reg, value):\n \"\"\"Write a 8-bit value to a register.\"\"\"\n self._device.write8(TCS34725_COMMAND_BIT | reg, value)" ]
[ 0.7869971394538879, 0.777798593044281, 0.7590028047561646, 0.7558252215385437, 0.7445629835128784, 0.744454562664032, 0.7431163191795349, 0.7430921196937561, 0.7393445372581482, 0.7382387518882751, 0.7364676594734192, 0.7359804511070251 ]
Writes an 8-bit byte directly to the bus
def write_raw_byte(self, value): """ Writes an 8-bit byte directly to the bus """ self.bus.write_byte(self.address, value) self.log.debug("write_raw_byte: Wrote 0x%02X" % value)
[ "def write_byte(self, cmd, value):\n \"\"\"\n Writes an 8-bit byte to the specified command register\n \"\"\"\n self.bus.write_byte_data(self.address, cmd, value)\n self.log.debug(\n \"write_byte: Wrote 0x%02X to command register 0x%02X\" % (\n value, cmd\n )\n )", "def _write8(self, reg, value):\n \"\"\"Write a 8-bit value to a register.\"\"\"\n self._device.write8(TCS34725_COMMAND_BIT | reg, value)", "def write8(self, offset, value):\n \"\"\"Write 8-bits to the specified `offset` in bytes, relative to the\n base physical address of the MMIO region.\n\n Args:\n offset (int, long): offset from base physical address, in bytes.\n value (int, long): 8-bit value to write.\n\n Raises:\n TypeError: if `offset` or `value` type are invalid.\n ValueError: if `offset` or `value` are out of bounds.\n\n \"\"\"\n if not isinstance(offset, (int, long)):\n raise TypeError(\"Invalid offset type, should be integer.\")\n if not isinstance(value, (int, long)):\n raise TypeError(\"Invalid value type, should be integer.\")\n if value < 0 or value > 0xff:\n raise ValueError(\"Value out of bounds.\")\n\n offset = self._adjust_offset(offset)\n self._validate_offset(offset, 1)\n self.mapping[offset:offset + 1] = struct.pack(\"B\", value)", "def write8(self, value, char_mode=False):\n \"\"\"Write 8-bit value in character or data mode. Value should be an int\n value from 0-255, and char_mode is True if character data or False if\n non-character data (default).\n \"\"\"\n # One millisecond delay to prevent writing too quickly.\n self._delay_microseconds(1000)\n # Set character / data bit.\n self._gpio.output(self._rs, char_mode)\n # Write upper 4 bits.\n self._gpio.output_pins({ self._d4: ((value >> 4) & 1) > 0,\n self._d5: ((value >> 5) & 1) > 0,\n self._d6: ((value >> 6) & 1) > 0,\n self._d7: ((value >> 7) & 1) > 0 })\n self._pulse_enable()\n # Write lower 4 bits.\n self._gpio.output_pins({ self._d4: (value & 1) > 0,\n self._d5: ((value >> 1) & 1) > 0,\n self._d6: ((value >> 2) & 1) > 0,\n self._d7: ((value >> 3) & 1) > 0 })\n self._pulse_enable()", "def _write8(self, reg, value):\n \"\"\"Write a 8-bit value to a register.\"\"\"\n self.i2c.write8(TCS34725_COMMAND_BIT | reg, value)", "def write(self, reg, value):\n \"\"\"Write raw byte value to the specified register\n\n :param reg: the register number (0-69, 250-255)\n :param value: byte value\n \"\"\"\n # TODO: check reg: 0-69, 250-255\n self.__check_range('register_value', value)\n logger.debug(\"Write '%s' to register '%s'\" % (value, reg))\n self.__bus.write_byte_data(self.__address, reg, value)", "def write_byte(self, address, value):\n \"\"\"Writes the byte to unaddressed register in a device. \"\"\"\n LOGGER.debug(\"Writing byte %s to device %s!\", bin(value), hex(address))\n return self.driver.write_byte(address, value)", "def write8(self, value, char_mode=False):\n \"\"\"Write 8-bit value in character or data mode. Value should be an int\n value from 0-255, and char_mode is True if character data or False if\n non-character data (default).\n \"\"\"\n # One millisecond delay to prevent writing too quickly.\n self._delay_microseconds(1000)\n # Set character / data bit.\n self._rs.set(char_mode)\n # Write upper 4 bits.\n self._d4.set(((value >> 4) & 1) > 0)\n self._d5.set(((value >> 5) & 1) > 0)\n self._d6.set(((value >> 6) & 1) > 0)\n self._d7.set(((value >> 7) & 1) > 0)\n self._pulse_enable()\n # Write lower 4 bits.\n self._d4.set((value & 1) > 0)\n self._d5.set(((value >> 1) & 1) > 0)\n self._d6.set(((value >> 2) & 1) > 0)\n self._d7.set(((value >> 3) & 1) > 0)\n self._pulse_enable()", "def write_quick(self):\n \"\"\"\n Send only the read / write bit\n \"\"\"\n self.bus.write_quick(self.address)\n self.log.debug(\"write_quick: Sent the read / write bit\")", "def read_raw_byte(self):\n \"\"\"\n Read an 8-bit byte directly from the bus\n \"\"\"\n result = self.bus.read_byte(self.address)\n self.log.debug(\"read_raw_byte: Read 0x%02X from the bus\" % result)\n return result", "protected void writeDirectBytes(byte[] b, int off, int len)\n\t\t\tthrows IOException {\n\t\tostream.write(b, off, len);\n\t\tlen += len;\n\t}", "def write_bit(self, b):\n \"\"\"\n Write a boolean value.\n\n \"\"\"\n if b:\n b = 1\n else:\n b = 0\n shift = self.bitcount % 8\n if shift == 0:\n self.bits.append(0)\n self.bits[-1] |= (b << shift)\n self.bitcount += 1" ]
[ 0.824142575263977, 0.7746727466583252, 0.7695673108100891, 0.7655920386314392, 0.7634241580963135, 0.760619580745697, 0.7577587962150574, 0.7562206983566284, 0.7498108744621277, 0.7486347556114197, 0.7472392916679382, 0.7456278800964355 ]