repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
limix/numpy-sugar
numpy_sugar/linalg/_kron.py
kron_dot
def kron_dot(A, B, C, out=None): r""" Kronecker product followed by dot product. Let :math:`\mathrm A`, :math:`\mathrm B`, and :math:`\mathrm C` be matrices of dimensions :math:`p\times p`, :math:`n\times d`, and :math:`d\times p`. It computes .. math:: \text{unvec}((\mathrm A\otimes\mathrm B)\text{vec}(\mathrm C)) \in n\times p, which is equivalent to :math:`\mathrm B\mathrm C\mathrm A^{\intercal}`. Parameters ---------- A : array_like Matrix A. B : array_like Matrix B. C : array_like Matrix C. out : :class:`numpy.ndarray`, optional Copy result to. Defaults to ``None``. Returns ------- :class:`numpy.ndarray` unvec((A ⊗ B) vec(C)) """ from numpy import dot, zeros, asarray A = asarray(A) B = asarray(B) C = asarray(C) if out is None: out = zeros((B.shape[0], A.shape[0])) dot(B, dot(C, A.T), out=out) return out
python
def kron_dot(A, B, C, out=None): r""" Kronecker product followed by dot product. Let :math:`\mathrm A`, :math:`\mathrm B`, and :math:`\mathrm C` be matrices of dimensions :math:`p\times p`, :math:`n\times d`, and :math:`d\times p`. It computes .. math:: \text{unvec}((\mathrm A\otimes\mathrm B)\text{vec}(\mathrm C)) \in n\times p, which is equivalent to :math:`\mathrm B\mathrm C\mathrm A^{\intercal}`. Parameters ---------- A : array_like Matrix A. B : array_like Matrix B. C : array_like Matrix C. out : :class:`numpy.ndarray`, optional Copy result to. Defaults to ``None``. Returns ------- :class:`numpy.ndarray` unvec((A ⊗ B) vec(C)) """ from numpy import dot, zeros, asarray A = asarray(A) B = asarray(B) C = asarray(C) if out is None: out = zeros((B.shape[0], A.shape[0])) dot(B, dot(C, A.T), out=out) return out
[ "def", "kron_dot", "(", "A", ",", "B", ",", "C", ",", "out", "=", "None", ")", ":", "from", "numpy", "import", "dot", ",", "zeros", ",", "asarray", "A", "=", "asarray", "(", "A", ")", "B", "=", "asarray", "(", "B", ")", "C", "=", "asarray", "...
r""" Kronecker product followed by dot product. Let :math:`\mathrm A`, :math:`\mathrm B`, and :math:`\mathrm C` be matrices of dimensions :math:`p\times p`, :math:`n\times d`, and :math:`d\times p`. It computes .. math:: \text{unvec}((\mathrm A\otimes\mathrm B)\text{vec}(\mathrm C)) \in n\times p, which is equivalent to :math:`\mathrm B\mathrm C\mathrm A^{\intercal}`. Parameters ---------- A : array_like Matrix A. B : array_like Matrix B. C : array_like Matrix C. out : :class:`numpy.ndarray`, optional Copy result to. Defaults to ``None``. Returns ------- :class:`numpy.ndarray` unvec((A ⊗ B) vec(C))
[ "r", "Kronecker", "product", "followed", "by", "dot", "product", "." ]
train
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/_kron.py#L1-L41
limix/numpy-sugar
numpy_sugar/linalg/property.py
check_semidefinite_positiveness
def check_semidefinite_positiveness(A): """Check if ``A`` is a semi-definite positive matrix. Args: A (array_like): Matrix. Returns: bool: ``True`` if ``A`` is definite positive; ``False`` otherwise. """ B = empty_like(A) B[:] = A B[diag_indices_from(B)] += sqrt(finfo(float).eps) try: cholesky(B) except LinAlgError: return False return True
python
def check_semidefinite_positiveness(A): """Check if ``A`` is a semi-definite positive matrix. Args: A (array_like): Matrix. Returns: bool: ``True`` if ``A`` is definite positive; ``False`` otherwise. """ B = empty_like(A) B[:] = A B[diag_indices_from(B)] += sqrt(finfo(float).eps) try: cholesky(B) except LinAlgError: return False return True
[ "def", "check_semidefinite_positiveness", "(", "A", ")", ":", "B", "=", "empty_like", "(", "A", ")", "B", "[", ":", "]", "=", "A", "B", "[", "diag_indices_from", "(", "B", ")", "]", "+=", "sqrt", "(", "finfo", "(", "float", ")", ".", "eps", ")", ...
Check if ``A`` is a semi-definite positive matrix. Args: A (array_like): Matrix. Returns: bool: ``True`` if ``A`` is definite positive; ``False`` otherwise.
[ "Check", "if", "A", "is", "a", "semi", "-", "definite", "positive", "matrix", "." ]
train
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/property.py#L21-L37
limix/numpy-sugar
numpy_sugar/linalg/property.py
check_symmetry
def check_symmetry(A): """Check if ``A`` is a symmetric matrix. Args: A (array_like): Matrix. Returns: bool: ``True`` if ``A`` is symmetric; ``False`` otherwise. """ A = asanyarray(A) if A.ndim != 2: raise ValueError("Checks symmetry only for bi-dimensional arrays.") if A.shape[0] != A.shape[1]: return False return abs(A - A.T).max() < sqrt(finfo(float).eps)
python
def check_symmetry(A): """Check if ``A`` is a symmetric matrix. Args: A (array_like): Matrix. Returns: bool: ``True`` if ``A`` is symmetric; ``False`` otherwise. """ A = asanyarray(A) if A.ndim != 2: raise ValueError("Checks symmetry only for bi-dimensional arrays.") if A.shape[0] != A.shape[1]: return False return abs(A - A.T).max() < sqrt(finfo(float).eps)
[ "def", "check_symmetry", "(", "A", ")", ":", "A", "=", "asanyarray", "(", "A", ")", "if", "A", ".", "ndim", "!=", "2", ":", "raise", "ValueError", "(", "\"Checks symmetry only for bi-dimensional arrays.\"", ")", "if", "A", ".", "shape", "[", "0", "]", "!...
Check if ``A`` is a symmetric matrix. Args: A (array_like): Matrix. Returns: bool: ``True`` if ``A`` is symmetric; ``False`` otherwise.
[ "Check", "if", "A", "is", "a", "symmetric", "matrix", "." ]
train
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/property.py#L40-L56
limix/numpy-sugar
numpy_sugar/linalg/cho.py
cho_solve
def cho_solve(L, b): r"""Solve for Cholesky decomposition. Solve the linear equations :math:`\mathrm A \mathbf x = \mathbf b`, given the Cholesky factorization of :math:`\mathrm A`. Args: L (array_like): Lower triangular matrix. b (array_like): Right-hand side. Returns: :class:`numpy.ndarray`: The solution to the system :math:`\mathrm A \mathbf x = \mathbf b`. See Also -------- numpy.linalg.cholesky : Cholesky decomposition. scipy.linalg.cho_solve : Solve linear equations given Cholesky factorization. """ from scipy.linalg import cho_solve as sp_cho_solve L = asarray(L, float) b = asarray(b, float) if L.size == 0: if b.size != 0: raise ValueError("Dimension mismatch between L and b.") return empty(b.shape) return sp_cho_solve((L, True), b, check_finite=False)
python
def cho_solve(L, b): r"""Solve for Cholesky decomposition. Solve the linear equations :math:`\mathrm A \mathbf x = \mathbf b`, given the Cholesky factorization of :math:`\mathrm A`. Args: L (array_like): Lower triangular matrix. b (array_like): Right-hand side. Returns: :class:`numpy.ndarray`: The solution to the system :math:`\mathrm A \mathbf x = \mathbf b`. See Also -------- numpy.linalg.cholesky : Cholesky decomposition. scipy.linalg.cho_solve : Solve linear equations given Cholesky factorization. """ from scipy.linalg import cho_solve as sp_cho_solve L = asarray(L, float) b = asarray(b, float) if L.size == 0: if b.size != 0: raise ValueError("Dimension mismatch between L and b.") return empty(b.shape) return sp_cho_solve((L, True), b, check_finite=False)
[ "def", "cho_solve", "(", "L", ",", "b", ")", ":", "from", "scipy", ".", "linalg", "import", "cho_solve", "as", "sp_cho_solve", "L", "=", "asarray", "(", "L", ",", "float", ")", "b", "=", "asarray", "(", "b", ",", "float", ")", "if", "L", ".", "si...
r"""Solve for Cholesky decomposition. Solve the linear equations :math:`\mathrm A \mathbf x = \mathbf b`, given the Cholesky factorization of :math:`\mathrm A`. Args: L (array_like): Lower triangular matrix. b (array_like): Right-hand side. Returns: :class:`numpy.ndarray`: The solution to the system :math:`\mathrm A \mathbf x = \mathbf b`. See Also -------- numpy.linalg.cholesky : Cholesky decomposition. scipy.linalg.cho_solve : Solve linear equations given Cholesky factorization.
[ "r", "Solve", "for", "Cholesky", "decomposition", "." ]
train
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/cho.py#L4-L32
opentargets/validator
opentargets_validator/helpers.py
file_or_resource
def file_or_resource(fname=None): '''get filename and check if in getcwd then get from the package resources folder ''' if fname is not None: filename = os.path.expanduser(fname) resource_package = opentargets_validator.__name__ resource_path = os.path.sep.join(('resources', filename)) abs_filename = os.path.join(os.path.abspath(os.getcwd()), filename) \ if not os.path.isabs(filename) else filename return abs_filename if os.path.isfile(abs_filename) \ else res.resource_filename(resource_package, resource_path)
python
def file_or_resource(fname=None): '''get filename and check if in getcwd then get from the package resources folder ''' if fname is not None: filename = os.path.expanduser(fname) resource_package = opentargets_validator.__name__ resource_path = os.path.sep.join(('resources', filename)) abs_filename = os.path.join(os.path.abspath(os.getcwd()), filename) \ if not os.path.isabs(filename) else filename return abs_filename if os.path.isfile(abs_filename) \ else res.resource_filename(resource_package, resource_path)
[ "def", "file_or_resource", "(", "fname", "=", "None", ")", ":", "if", "fname", "is", "not", "None", ":", "filename", "=", "os", ".", "path", ".", "expanduser", "(", "fname", ")", "resource_package", "=", "opentargets_validator", ".", "__name__", "resource_pa...
get filename and check if in getcwd then get from the package resources folder
[ "get", "filename", "and", "check", "if", "in", "getcwd", "then", "get", "from", "the", "package", "resources", "folder" ]
train
https://github.com/opentargets/validator/blob/0a80c42fc02237c72e27a32e022c1d5d9f4e25ff/opentargets_validator/helpers.py#L157-L171
limix/numpy-sugar
numpy_sugar/linalg/tri.py
stl
def stl(A, b): r"""Shortcut to ``solve_triangular(A, b, lower=True, check_finite=False)``. Solve linear systems :math:`\mathrm A \mathbf x = \mathbf b` when :math:`\mathrm A` is a lower-triangular matrix. Args: A (array_like): A lower-triangular matrix. b (array_like): Ordinate values. Returns: :class:`numpy.ndarray`: Solution ``x``. See Also -------- scipy.linalg.solve_triangular: Solve triangular linear equations. """ from scipy.linalg import solve_triangular A = asarray(A, float) b = asarray(b, float) return solve_triangular(A, b, lower=True, check_finite=False)
python
def stl(A, b): r"""Shortcut to ``solve_triangular(A, b, lower=True, check_finite=False)``. Solve linear systems :math:`\mathrm A \mathbf x = \mathbf b` when :math:`\mathrm A` is a lower-triangular matrix. Args: A (array_like): A lower-triangular matrix. b (array_like): Ordinate values. Returns: :class:`numpy.ndarray`: Solution ``x``. See Also -------- scipy.linalg.solve_triangular: Solve triangular linear equations. """ from scipy.linalg import solve_triangular A = asarray(A, float) b = asarray(b, float) return solve_triangular(A, b, lower=True, check_finite=False)
[ "def", "stl", "(", "A", ",", "b", ")", ":", "from", "scipy", ".", "linalg", "import", "solve_triangular", "A", "=", "asarray", "(", "A", ",", "float", ")", "b", "=", "asarray", "(", "b", ",", "float", ")", "return", "solve_triangular", "(", "A", ",...
r"""Shortcut to ``solve_triangular(A, b, lower=True, check_finite=False)``. Solve linear systems :math:`\mathrm A \mathbf x = \mathbf b` when :math:`\mathrm A` is a lower-triangular matrix. Args: A (array_like): A lower-triangular matrix. b (array_like): Ordinate values. Returns: :class:`numpy.ndarray`: Solution ``x``. See Also -------- scipy.linalg.solve_triangular: Solve triangular linear equations.
[ "r", "Shortcut", "to", "solve_triangular", "(", "A", "b", "lower", "=", "True", "check_finite", "=", "False", ")", "." ]
train
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/tri.py#L4-L25
DiamondLightSource/python-procrunner
procrunner/__init__.py
_windows_resolve
def _windows_resolve(command): """ Try and find the full path and file extension of the executable to run. This is so that e.g. calls to 'somescript' will point at 'somescript.cmd' without the need to set shell=True in the subprocess. If the executable contains periods it is a special case. Here the win32api call will fail to resolve the extension automatically, and it has do be done explicitly. :param command: The command array to be run, with the first element being the command with or w/o path, with or w/o extension. :return: Returns the command array with the executable resolved with the correct extension. If the executable cannot be resolved for any reason the original command array is returned. """ try: import win32api except ImportError: if (2, 8) < sys.version_info < (3, 5): logger.info( "Resolving executable names only supported on Python 2.7 and 3.5+" ) else: logger.warning( "Could not resolve executable name: package win32api missing" ) return command if not command or not isinstance(command[0], six.string_types): return command try: _, found_executable = win32api.FindExecutable(command[0]) logger.debug("Resolved %s as %s", command[0], found_executable) return (found_executable,) + tuple(command[1:]) except Exception as e: if not hasattr(e, "winerror"): raise # Keep this error message for later in case we fail to resolve the name logwarning = getattr(e, "strerror", str(e)) if "." in command[0]: # Special case. The win32api may not properly check file extensions, so # try to resolve the executable explicitly. for extension in os.getenv("PATHEXT").split(os.pathsep): try: _, found_executable = win32api.FindExecutable(command[0] + extension) logger.debug("Resolved %s as %s", command[0], found_executable) return (found_executable,) + tuple(command[1:]) except Exception as e: if not hasattr(e, "winerror"): raise logger.warning("Error trying to resolve the executable: %s", logwarning) return command
python
def _windows_resolve(command): """ Try and find the full path and file extension of the executable to run. This is so that e.g. calls to 'somescript' will point at 'somescript.cmd' without the need to set shell=True in the subprocess. If the executable contains periods it is a special case. Here the win32api call will fail to resolve the extension automatically, and it has do be done explicitly. :param command: The command array to be run, with the first element being the command with or w/o path, with or w/o extension. :return: Returns the command array with the executable resolved with the correct extension. If the executable cannot be resolved for any reason the original command array is returned. """ try: import win32api except ImportError: if (2, 8) < sys.version_info < (3, 5): logger.info( "Resolving executable names only supported on Python 2.7 and 3.5+" ) else: logger.warning( "Could not resolve executable name: package win32api missing" ) return command if not command or not isinstance(command[0], six.string_types): return command try: _, found_executable = win32api.FindExecutable(command[0]) logger.debug("Resolved %s as %s", command[0], found_executable) return (found_executable,) + tuple(command[1:]) except Exception as e: if not hasattr(e, "winerror"): raise # Keep this error message for later in case we fail to resolve the name logwarning = getattr(e, "strerror", str(e)) if "." in command[0]: # Special case. The win32api may not properly check file extensions, so # try to resolve the executable explicitly. for extension in os.getenv("PATHEXT").split(os.pathsep): try: _, found_executable = win32api.FindExecutable(command[0] + extension) logger.debug("Resolved %s as %s", command[0], found_executable) return (found_executable,) + tuple(command[1:]) except Exception as e: if not hasattr(e, "winerror"): raise logger.warning("Error trying to resolve the executable: %s", logwarning) return command
[ "def", "_windows_resolve", "(", "command", ")", ":", "try", ":", "import", "win32api", "except", "ImportError", ":", "if", "(", "2", ",", "8", ")", "<", "sys", ".", "version_info", "<", "(", "3", ",", "5", ")", ":", "logger", ".", "info", "(", "\"R...
Try and find the full path and file extension of the executable to run. This is so that e.g. calls to 'somescript' will point at 'somescript.cmd' without the need to set shell=True in the subprocess. If the executable contains periods it is a special case. Here the win32api call will fail to resolve the extension automatically, and it has do be done explicitly. :param command: The command array to be run, with the first element being the command with or w/o path, with or w/o extension. :return: Returns the command array with the executable resolved with the correct extension. If the executable cannot be resolved for any reason the original command array is returned.
[ "Try", "and", "find", "the", "full", "path", "and", "file", "extension", "of", "the", "executable", "to", "run", ".", "This", "is", "so", "that", "e", ".", "g", ".", "calls", "to", "somescript", "will", "point", "at", "somescript", ".", "cmd", "without...
train
https://github.com/DiamondLightSource/python-procrunner/blob/e11c446f97f28abceb507d21403259757f08be0a/procrunner/__init__.py#L277-L331
DiamondLightSource/python-procrunner
procrunner/__init__.py
run
def run( command, timeout=None, debug=False, stdin=None, print_stdout=True, print_stderr=True, callback_stdout=None, callback_stderr=None, environment=None, environment_override=None, win32resolve=True, working_directory=None, ): """ Run an external process. File system path objects (PEP-519) are accepted in the command, environment, and working directory arguments. :param array command: Command line to be run, specified as array. :param timeout: Terminate program execution after this many seconds. :param boolean debug: Enable further debug messages. :param stdin: Optional string that is passed to command stdin. :param boolean print_stdout: Pass stdout through to sys.stdout. :param boolean print_stderr: Pass stderr through to sys.stderr. :param callback_stdout: Optional function which is called for each stdout line. :param callback_stderr: Optional function which is called for each stderr line. :param dict environment: The full execution environment for the command. :param dict environment_override: Change environment variables from the current values for command execution. :param boolean win32resolve: If on Windows, find the appropriate executable first. This allows running of .bat, .cmd, etc. files without explicitly specifying their extension. :param string working_directory: If specified, run the executable from within this working directory. :return: A ReturnObject() containing the executed command, stdout and stderr (both as bytestrings), and the exitcode. Further values such as process runtime can be accessed as dictionary values. """ time_start = time.strftime("%Y-%m-%d %H:%M:%S GMT", time.gmtime()) logger.debug("Starting external process: %s", command) if stdin is None: stdin_pipe = None else: assert sys.platform != "win32", "stdin argument not supported on Windows" stdin_pipe = subprocess.PIPE start_time = timeit.default_timer() if timeout is not None: max_time = start_time + timeout if environment is not None: env = {key: _path_resolve(environment[key]) for key in environment} else: env = os.environ if environment_override: env = copy.copy(env) env.update( { key: str(_path_resolve(environment_override[key])) for key in environment_override } ) command = tuple(_path_resolve(part) for part in command) if win32resolve and sys.platform == "win32": command = _windows_resolve(command) p = subprocess.Popen( command, shell=False, cwd=_path_resolve(working_directory), env=env, stdin=stdin_pipe, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) thread_pipe_pool = [] notifyee, notifier = Pipe(False) thread_pipe_pool.append(notifyee) stdout = _NonBlockingStreamReader( p.stdout, output=print_stdout, debug=debug, notify=notifier.close, callback=callback_stdout, ) notifyee, notifier = Pipe(False) thread_pipe_pool.append(notifyee) stderr = _NonBlockingStreamReader( p.stderr, output=print_stderr, debug=debug, notify=notifier.close, callback=callback_stderr, ) if stdin is not None: notifyee, notifier = Pipe(False) thread_pipe_pool.append(notifyee) stdin = _NonBlockingStreamWriter( p.stdin, data=stdin, debug=debug, notify=notifier.close ) timeout_encountered = False while (p.returncode is None) and ( (timeout is None) or (timeit.default_timer() < max_time) ): if debug and timeout is not None: logger.debug("still running (T%.2fs)" % (timeit.default_timer() - max_time)) # wait for some time or until a stream is closed try: if thread_pipe_pool: # Wait for up to 0.5 seconds or for a signal on a remaining stream, # which could indicate that the process has terminated. try: event = thread_pipe_pool[0].poll(0.5) except IOError as e: # on Windows this raises "IOError: [Errno 109] The pipe has been ended" # which is for all intents and purposes equivalent to a True return value. if e.errno != 109: raise event = True if event: # One-shot, so remove stream and watch remaining streams thread_pipe_pool.pop(0) if debug: logger.debug("Event received from stream thread") else: time.sleep(0.5) except KeyboardInterrupt: p.kill() # if user pressed Ctrl+C we won't be able to produce a proper report anyway # but at least make sure the child process dies with us raise # check if process is still running p.poll() if p.returncode is None: # timeout condition timeout_encountered = True if debug: logger.debug("timeout (T%.2fs)" % (timeit.default_timer() - max_time)) # send terminate signal and wait some time for buffers to be read p.terminate() if thread_pipe_pool: thread_pipe_pool[0].poll(0.5) if not stdout.has_finished() or not stderr.has_finished(): time.sleep(2) p.poll() if p.returncode is None: # thread still alive # send kill signal and wait some more time for buffers to be read p.kill() if thread_pipe_pool: thread_pipe_pool[0].poll(0.5) if not stdout.has_finished() or not stderr.has_finished(): time.sleep(5) p.poll() if p.returncode is None: raise RuntimeError("Process won't terminate") runtime = timeit.default_timer() - start_time if timeout is not None: logger.debug( "Process ended after %.1f seconds with exit code %d (T%.2fs)" % (runtime, p.returncode, timeit.default_timer() - max_time) ) else: logger.debug( "Process ended after %.1f seconds with exit code %d" % (runtime, p.returncode) ) stdout = stdout.get_output() stderr = stderr.get_output() time_end = time.strftime("%Y-%m-%d %H:%M:%S GMT", time.gmtime()) result = ReturnObject( { "exitcode": p.returncode, "command": command, "stdout": stdout, "stderr": stderr, "timeout": timeout_encountered, "runtime": runtime, "time_start": time_start, "time_end": time_end, } ) if stdin is not None: result.update( { "stdin_bytes_sent": stdin.bytes_sent(), "stdin_bytes_remain": stdin.bytes_remaining(), } ) return result
python
def run( command, timeout=None, debug=False, stdin=None, print_stdout=True, print_stderr=True, callback_stdout=None, callback_stderr=None, environment=None, environment_override=None, win32resolve=True, working_directory=None, ): """ Run an external process. File system path objects (PEP-519) are accepted in the command, environment, and working directory arguments. :param array command: Command line to be run, specified as array. :param timeout: Terminate program execution after this many seconds. :param boolean debug: Enable further debug messages. :param stdin: Optional string that is passed to command stdin. :param boolean print_stdout: Pass stdout through to sys.stdout. :param boolean print_stderr: Pass stderr through to sys.stderr. :param callback_stdout: Optional function which is called for each stdout line. :param callback_stderr: Optional function which is called for each stderr line. :param dict environment: The full execution environment for the command. :param dict environment_override: Change environment variables from the current values for command execution. :param boolean win32resolve: If on Windows, find the appropriate executable first. This allows running of .bat, .cmd, etc. files without explicitly specifying their extension. :param string working_directory: If specified, run the executable from within this working directory. :return: A ReturnObject() containing the executed command, stdout and stderr (both as bytestrings), and the exitcode. Further values such as process runtime can be accessed as dictionary values. """ time_start = time.strftime("%Y-%m-%d %H:%M:%S GMT", time.gmtime()) logger.debug("Starting external process: %s", command) if stdin is None: stdin_pipe = None else: assert sys.platform != "win32", "stdin argument not supported on Windows" stdin_pipe = subprocess.PIPE start_time = timeit.default_timer() if timeout is not None: max_time = start_time + timeout if environment is not None: env = {key: _path_resolve(environment[key]) for key in environment} else: env = os.environ if environment_override: env = copy.copy(env) env.update( { key: str(_path_resolve(environment_override[key])) for key in environment_override } ) command = tuple(_path_resolve(part) for part in command) if win32resolve and sys.platform == "win32": command = _windows_resolve(command) p = subprocess.Popen( command, shell=False, cwd=_path_resolve(working_directory), env=env, stdin=stdin_pipe, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) thread_pipe_pool = [] notifyee, notifier = Pipe(False) thread_pipe_pool.append(notifyee) stdout = _NonBlockingStreamReader( p.stdout, output=print_stdout, debug=debug, notify=notifier.close, callback=callback_stdout, ) notifyee, notifier = Pipe(False) thread_pipe_pool.append(notifyee) stderr = _NonBlockingStreamReader( p.stderr, output=print_stderr, debug=debug, notify=notifier.close, callback=callback_stderr, ) if stdin is not None: notifyee, notifier = Pipe(False) thread_pipe_pool.append(notifyee) stdin = _NonBlockingStreamWriter( p.stdin, data=stdin, debug=debug, notify=notifier.close ) timeout_encountered = False while (p.returncode is None) and ( (timeout is None) or (timeit.default_timer() < max_time) ): if debug and timeout is not None: logger.debug("still running (T%.2fs)" % (timeit.default_timer() - max_time)) # wait for some time or until a stream is closed try: if thread_pipe_pool: # Wait for up to 0.5 seconds or for a signal on a remaining stream, # which could indicate that the process has terminated. try: event = thread_pipe_pool[0].poll(0.5) except IOError as e: # on Windows this raises "IOError: [Errno 109] The pipe has been ended" # which is for all intents and purposes equivalent to a True return value. if e.errno != 109: raise event = True if event: # One-shot, so remove stream and watch remaining streams thread_pipe_pool.pop(0) if debug: logger.debug("Event received from stream thread") else: time.sleep(0.5) except KeyboardInterrupt: p.kill() # if user pressed Ctrl+C we won't be able to produce a proper report anyway # but at least make sure the child process dies with us raise # check if process is still running p.poll() if p.returncode is None: # timeout condition timeout_encountered = True if debug: logger.debug("timeout (T%.2fs)" % (timeit.default_timer() - max_time)) # send terminate signal and wait some time for buffers to be read p.terminate() if thread_pipe_pool: thread_pipe_pool[0].poll(0.5) if not stdout.has_finished() or not stderr.has_finished(): time.sleep(2) p.poll() if p.returncode is None: # thread still alive # send kill signal and wait some more time for buffers to be read p.kill() if thread_pipe_pool: thread_pipe_pool[0].poll(0.5) if not stdout.has_finished() or not stderr.has_finished(): time.sleep(5) p.poll() if p.returncode is None: raise RuntimeError("Process won't terminate") runtime = timeit.default_timer() - start_time if timeout is not None: logger.debug( "Process ended after %.1f seconds with exit code %d (T%.2fs)" % (runtime, p.returncode, timeit.default_timer() - max_time) ) else: logger.debug( "Process ended after %.1f seconds with exit code %d" % (runtime, p.returncode) ) stdout = stdout.get_output() stderr = stderr.get_output() time_end = time.strftime("%Y-%m-%d %H:%M:%S GMT", time.gmtime()) result = ReturnObject( { "exitcode": p.returncode, "command": command, "stdout": stdout, "stderr": stderr, "timeout": timeout_encountered, "runtime": runtime, "time_start": time_start, "time_end": time_end, } ) if stdin is not None: result.update( { "stdin_bytes_sent": stdin.bytes_sent(), "stdin_bytes_remain": stdin.bytes_remaining(), } ) return result
[ "def", "run", "(", "command", ",", "timeout", "=", "None", ",", "debug", "=", "False", ",", "stdin", "=", "None", ",", "print_stdout", "=", "True", ",", "print_stderr", "=", "True", ",", "callback_stdout", "=", "None", ",", "callback_stderr", "=", "None"...
Run an external process. File system path objects (PEP-519) are accepted in the command, environment, and working directory arguments. :param array command: Command line to be run, specified as array. :param timeout: Terminate program execution after this many seconds. :param boolean debug: Enable further debug messages. :param stdin: Optional string that is passed to command stdin. :param boolean print_stdout: Pass stdout through to sys.stdout. :param boolean print_stderr: Pass stderr through to sys.stderr. :param callback_stdout: Optional function which is called for each stdout line. :param callback_stderr: Optional function which is called for each stderr line. :param dict environment: The full execution environment for the command. :param dict environment_override: Change environment variables from the current values for command execution. :param boolean win32resolve: If on Windows, find the appropriate executable first. This allows running of .bat, .cmd, etc. files without explicitly specifying their extension. :param string working_directory: If specified, run the executable from within this working directory. :return: A ReturnObject() containing the executed command, stdout and stderr (both as bytestrings), and the exitcode. Further values such as process runtime can be accessed as dictionary values.
[ "Run", "an", "external", "process", "." ]
train
https://github.com/DiamondLightSource/python-procrunner/blob/e11c446f97f28abceb507d21403259757f08be0a/procrunner/__init__.py#L381-L590
DiamondLightSource/python-procrunner
procrunner/__init__.py
run_process_dummy
def run_process_dummy(command, **kwargs): """ A stand-in function that returns a valid result dictionary indicating a successful execution. The external process is not run. """ warnings.warn( "procrunner.run_process_dummy() is deprecated", DeprecationWarning, stacklevel=2 ) time_start = time.strftime("%Y-%m-%d %H:%M:%S GMT", time.gmtime()) logger.info("run_process is disabled. Requested command: %s", command) result = ReturnObject( { "exitcode": 0, "command": command, "stdout": "", "stderr": "", "timeout": False, "runtime": 0, "time_start": time_start, "time_end": time_start, } ) if kwargs.get("stdin") is not None: result.update( {"stdin_bytes_sent": len(kwargs["stdin"]), "stdin_bytes_remain": 0} ) return result
python
def run_process_dummy(command, **kwargs): """ A stand-in function that returns a valid result dictionary indicating a successful execution. The external process is not run. """ warnings.warn( "procrunner.run_process_dummy() is deprecated", DeprecationWarning, stacklevel=2 ) time_start = time.strftime("%Y-%m-%d %H:%M:%S GMT", time.gmtime()) logger.info("run_process is disabled. Requested command: %s", command) result = ReturnObject( { "exitcode": 0, "command": command, "stdout": "", "stderr": "", "timeout": False, "runtime": 0, "time_start": time_start, "time_end": time_start, } ) if kwargs.get("stdin") is not None: result.update( {"stdin_bytes_sent": len(kwargs["stdin"]), "stdin_bytes_remain": 0} ) return result
[ "def", "run_process_dummy", "(", "command", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "\"procrunner.run_process_dummy() is deprecated\"", ",", "DeprecationWarning", ",", "stacklevel", "=", "2", ")", "time_start", "=", "time", ".", "strftime"...
A stand-in function that returns a valid result dictionary indicating a successful execution. The external process is not run.
[ "A", "stand", "-", "in", "function", "that", "returns", "a", "valid", "result", "dictionary", "indicating", "a", "successful", "execution", ".", "The", "external", "process", "is", "not", "run", "." ]
train
https://github.com/DiamondLightSource/python-procrunner/blob/e11c446f97f28abceb507d21403259757f08be0a/procrunner/__init__.py#L593-L621
DiamondLightSource/python-procrunner
procrunner/__init__.py
run_process
def run_process(*args, **kwargs): """API used up to version 0.2.0.""" warnings.warn( "procrunner.run_process() is deprecated and has been renamed to run()", DeprecationWarning, stacklevel=2, ) return run(*args, **kwargs)
python
def run_process(*args, **kwargs): """API used up to version 0.2.0.""" warnings.warn( "procrunner.run_process() is deprecated and has been renamed to run()", DeprecationWarning, stacklevel=2, ) return run(*args, **kwargs)
[ "def", "run_process", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "\"procrunner.run_process() is deprecated and has been renamed to run()\"", ",", "DeprecationWarning", ",", "stacklevel", "=", "2", ",", ")", "return", "run", ...
API used up to version 0.2.0.
[ "API", "used", "up", "to", "version", "0", ".", "2", ".", "0", "." ]
train
https://github.com/DiamondLightSource/python-procrunner/blob/e11c446f97f28abceb507d21403259757f08be0a/procrunner/__init__.py#L624-L631
DiamondLightSource/python-procrunner
procrunner/__init__.py
_LineAggregator.add
def add(self, data): """ Add a single character to buffer. If one or more full lines are found, print them (if desired) and pass to callback function. """ data = self._decoder.decode(data) if not data: return self._buffer += data if "\n" in data: to_print, remainder = self._buffer.rsplit("\n") if self._print: try: print(to_print) except UnicodeEncodeError: print(to_print.encode(sys.getdefaultencoding(), errors="replace")) if not hasattr(self, "_warned"): logger.warning("output encoding error, characters replaced") setattr(self, "_warned", True) if self._callback: self._callback(to_print) self._buffer = remainder
python
def add(self, data): """ Add a single character to buffer. If one or more full lines are found, print them (if desired) and pass to callback function. """ data = self._decoder.decode(data) if not data: return self._buffer += data if "\n" in data: to_print, remainder = self._buffer.rsplit("\n") if self._print: try: print(to_print) except UnicodeEncodeError: print(to_print.encode(sys.getdefaultencoding(), errors="replace")) if not hasattr(self, "_warned"): logger.warning("output encoding error, characters replaced") setattr(self, "_warned", True) if self._callback: self._callback(to_print) self._buffer = remainder
[ "def", "add", "(", "self", ",", "data", ")", ":", "data", "=", "self", ".", "_decoder", ".", "decode", "(", "data", ")", "if", "not", "data", ":", "return", "self", ".", "_buffer", "+=", "data", "if", "\"\\n\"", "in", "data", ":", "to_print", ",", ...
Add a single character to buffer. If one or more full lines are found, print them (if desired) and pass to callback function.
[ "Add", "a", "single", "character", "to", "buffer", ".", "If", "one", "or", "more", "full", "lines", "are", "found", "print", "them", "(", "if", "desired", ")", "and", "pass", "to", "callback", "function", "." ]
train
https://github.com/DiamondLightSource/python-procrunner/blob/e11c446f97f28abceb507d21403259757f08be0a/procrunner/__init__.py#L77-L98
DiamondLightSource/python-procrunner
procrunner/__init__.py
_LineAggregator.flush
def flush(self): """Print/send any remaining data to callback function.""" self._buffer += self._decoder.decode(b"", final=True) if self._buffer: if self._print: print(self._buffer) if self._callback: self._callback(self._buffer) self._buffer = ""
python
def flush(self): """Print/send any remaining data to callback function.""" self._buffer += self._decoder.decode(b"", final=True) if self._buffer: if self._print: print(self._buffer) if self._callback: self._callback(self._buffer) self._buffer = ""
[ "def", "flush", "(", "self", ")", ":", "self", ".", "_buffer", "+=", "self", ".", "_decoder", ".", "decode", "(", "b\"\"", ",", "final", "=", "True", ")", "if", "self", ".", "_buffer", ":", "if", "self", ".", "_print", ":", "print", "(", "self", ...
Print/send any remaining data to callback function.
[ "Print", "/", "send", "any", "remaining", "data", "to", "callback", "function", "." ]
train
https://github.com/DiamondLightSource/python-procrunner/blob/e11c446f97f28abceb507d21403259757f08be0a/procrunner/__init__.py#L100-L108
DiamondLightSource/python-procrunner
procrunner/__init__.py
_NonBlockingStreamReader.get_output
def get_output(self): """ Retrieve the stored data in full. This call may block if the reading thread has not yet terminated. """ self._closing = True if not self.has_finished(): if self._debug: # Main thread overtook stream reading thread. underrun_debug_timer = timeit.default_timer() logger.warning("NBSR underrun") self._thread.join() if not self.has_finished(): if self._debug: logger.debug( "NBSR join after %f seconds, underrun not resolved" % (timeit.default_timer() - underrun_debug_timer) ) raise Exception("thread did not terminate") if self._debug: logger.debug( "NBSR underrun resolved after %f seconds" % (timeit.default_timer() - underrun_debug_timer) ) if self._closed: raise Exception("streamreader double-closed") self._closed = True data = self._buffer.getvalue() self._buffer.close() return data
python
def get_output(self): """ Retrieve the stored data in full. This call may block if the reading thread has not yet terminated. """ self._closing = True if not self.has_finished(): if self._debug: # Main thread overtook stream reading thread. underrun_debug_timer = timeit.default_timer() logger.warning("NBSR underrun") self._thread.join() if not self.has_finished(): if self._debug: logger.debug( "NBSR join after %f seconds, underrun not resolved" % (timeit.default_timer() - underrun_debug_timer) ) raise Exception("thread did not terminate") if self._debug: logger.debug( "NBSR underrun resolved after %f seconds" % (timeit.default_timer() - underrun_debug_timer) ) if self._closed: raise Exception("streamreader double-closed") self._closed = True data = self._buffer.getvalue() self._buffer.close() return data
[ "def", "get_output", "(", "self", ")", ":", "self", ".", "_closing", "=", "True", "if", "not", "self", ".", "has_finished", "(", ")", ":", "if", "self", ".", "_debug", ":", "# Main thread overtook stream reading thread.", "underrun_debug_timer", "=", "timeit", ...
Retrieve the stored data in full. This call may block if the reading thread has not yet terminated.
[ "Retrieve", "the", "stored", "data", "in", "full", ".", "This", "call", "may", "block", "if", "the", "reading", "thread", "has", "not", "yet", "terminated", "." ]
train
https://github.com/DiamondLightSource/python-procrunner/blob/e11c446f97f28abceb507d21403259757f08be0a/procrunner/__init__.py#L173-L202
limix/numpy-sugar
numpy_sugar/linalg/diag.py
trace2
def trace2(A, B): r"""Trace of :math:`\mathrm A \mathrm B^\intercal`. Args: A (array_like): Left-hand side. B (array_like): Right-hand side. Returns: float: Trace of :math:`\mathrm A \mathrm B^\intercal`. """ A = asarray(A, float) B = asarray(B, float) layout_error = "Wrong matrix layout." if not (len(A.shape) == 2 and len(B.shape) == 2): raise ValueError(layout_error) if not (A.shape[1] == B.shape[0] and A.shape[0] == B.shape[1]): raise ValueError(layout_error) return _sum(A.T * B)
python
def trace2(A, B): r"""Trace of :math:`\mathrm A \mathrm B^\intercal`. Args: A (array_like): Left-hand side. B (array_like): Right-hand side. Returns: float: Trace of :math:`\mathrm A \mathrm B^\intercal`. """ A = asarray(A, float) B = asarray(B, float) layout_error = "Wrong matrix layout." if not (len(A.shape) == 2 and len(B.shape) == 2): raise ValueError(layout_error) if not (A.shape[1] == B.shape[0] and A.shape[0] == B.shape[1]): raise ValueError(layout_error) return _sum(A.T * B)
[ "def", "trace2", "(", "A", ",", "B", ")", ":", "A", "=", "asarray", "(", "A", ",", "float", ")", "B", "=", "asarray", "(", "B", ",", "float", ")", "layout_error", "=", "\"Wrong matrix layout.\"", "if", "not", "(", "len", "(", "A", ".", "shape", "...
r"""Trace of :math:`\mathrm A \mathrm B^\intercal`. Args: A (array_like): Left-hand side. B (array_like): Right-hand side. Returns: float: Trace of :math:`\mathrm A \mathrm B^\intercal`.
[ "r", "Trace", "of", ":", "math", ":", "\\", "mathrm", "A", "\\", "mathrm", "B^", "\\", "intercal", "." ]
train
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/diag.py#L5-L26
limix/numpy-sugar
numpy_sugar/linalg/diag.py
sum2diag
def sum2diag(A, D, out=None): r"""Add values ``D`` to the diagonal of matrix ``A``. Args: A (array_like): Left-hand side. D (array_like or float): Values to add. out (:class:`numpy.ndarray`, optional): copy result to. Returns: :class:`numpy.ndarray`: Resulting matrix. """ A = asarray(A, float) D = asarray(D, float) if out is None: out = copy(A) else: copyto(out, A) einsum("ii->i", out)[:] += D return out
python
def sum2diag(A, D, out=None): r"""Add values ``D`` to the diagonal of matrix ``A``. Args: A (array_like): Left-hand side. D (array_like or float): Values to add. out (:class:`numpy.ndarray`, optional): copy result to. Returns: :class:`numpy.ndarray`: Resulting matrix. """ A = asarray(A, float) D = asarray(D, float) if out is None: out = copy(A) else: copyto(out, A) einsum("ii->i", out)[:] += D return out
[ "def", "sum2diag", "(", "A", ",", "D", ",", "out", "=", "None", ")", ":", "A", "=", "asarray", "(", "A", ",", "float", ")", "D", "=", "asarray", "(", "D", ",", "float", ")", "if", "out", "is", "None", ":", "out", "=", "copy", "(", "A", ")",...
r"""Add values ``D`` to the diagonal of matrix ``A``. Args: A (array_like): Left-hand side. D (array_like or float): Values to add. out (:class:`numpy.ndarray`, optional): copy result to. Returns: :class:`numpy.ndarray`: Resulting matrix.
[ "r", "Add", "values", "D", "to", "the", "diagonal", "of", "matrix", "A", "." ]
train
https://github.com/limix/numpy-sugar/blob/4bdfa26913135c76ef3cd542a332f4e5861e948b/numpy_sugar/linalg/diag.py#L29-L47
ikegami-yukino/jaconv
jaconv/jaconv.py
hira2kata
def hira2kata(text, ignore=''): """Convert Hiragana to Full-width (Zenkaku) Katakana. Parameters ---------- text : str Hiragana string. ignore : str Characters to be ignored in converting. Return ------ str Katakana string. Examples -------- >>> print(jaconv.hira2kata('ともえまみ')) トモエマミ >>> print(jaconv.hira2kata('まどまぎ', ignore='ど')) マどマギ """ if ignore: h2k_map = _exclude_ignorechar(ignore, H2K_TABLE.copy()) return _convert(text, h2k_map) return _convert(text, H2K_TABLE)
python
def hira2kata(text, ignore=''): """Convert Hiragana to Full-width (Zenkaku) Katakana. Parameters ---------- text : str Hiragana string. ignore : str Characters to be ignored in converting. Return ------ str Katakana string. Examples -------- >>> print(jaconv.hira2kata('ともえまみ')) トモエマミ >>> print(jaconv.hira2kata('まどまぎ', ignore='ど')) マどマギ """ if ignore: h2k_map = _exclude_ignorechar(ignore, H2K_TABLE.copy()) return _convert(text, h2k_map) return _convert(text, H2K_TABLE)
[ "def", "hira2kata", "(", "text", ",", "ignore", "=", "''", ")", ":", "if", "ignore", ":", "h2k_map", "=", "_exclude_ignorechar", "(", "ignore", ",", "H2K_TABLE", ".", "copy", "(", ")", ")", "return", "_convert", "(", "text", ",", "h2k_map", ")", "retur...
Convert Hiragana to Full-width (Zenkaku) Katakana. Parameters ---------- text : str Hiragana string. ignore : str Characters to be ignored in converting. Return ------ str Katakana string. Examples -------- >>> print(jaconv.hira2kata('ともえまみ')) トモエマミ >>> print(jaconv.hira2kata('まどまぎ', ignore='ど')) マどマギ
[ "Convert", "Hiragana", "to", "Full", "-", "width", "(", "Zenkaku", ")", "Katakana", "." ]
train
https://github.com/ikegami-yukino/jaconv/blob/5319e4c6b4676ab27b5e9ebec9a299d09a5a62d7/jaconv/jaconv.py#L21-L46
ikegami-yukino/jaconv
jaconv/jaconv.py
hira2hkata
def hira2hkata(text, ignore=''): """Convert Hiragana to Half-width (Hankaku) Katakana Parameters ---------- text : str Hiragana string. ignore : str Characters to be ignored in converting. Return ------ str Half-width Katakana string. Examples -------- >>> print(jaconv.hira2hkata('ともえまみ')) トモエマミ >>> print(jaconv.hira2hkata('ともえまみ', ignore='み')) トモエマみ """ if ignore: h2hk_map = _exclude_ignorechar(ignore, H2HK_TABLE.copy()) return _convert(text, h2hk_map) return _convert(text, H2HK_TABLE)
python
def hira2hkata(text, ignore=''): """Convert Hiragana to Half-width (Hankaku) Katakana Parameters ---------- text : str Hiragana string. ignore : str Characters to be ignored in converting. Return ------ str Half-width Katakana string. Examples -------- >>> print(jaconv.hira2hkata('ともえまみ')) トモエマミ >>> print(jaconv.hira2hkata('ともえまみ', ignore='み')) トモエマみ """ if ignore: h2hk_map = _exclude_ignorechar(ignore, H2HK_TABLE.copy()) return _convert(text, h2hk_map) return _convert(text, H2HK_TABLE)
[ "def", "hira2hkata", "(", "text", ",", "ignore", "=", "''", ")", ":", "if", "ignore", ":", "h2hk_map", "=", "_exclude_ignorechar", "(", "ignore", ",", "H2HK_TABLE", ".", "copy", "(", ")", ")", "return", "_convert", "(", "text", ",", "h2hk_map", ")", "r...
Convert Hiragana to Half-width (Hankaku) Katakana Parameters ---------- text : str Hiragana string. ignore : str Characters to be ignored in converting. Return ------ str Half-width Katakana string. Examples -------- >>> print(jaconv.hira2hkata('ともえまみ')) トモエマミ >>> print(jaconv.hira2hkata('ともえまみ', ignore='み')) トモエマみ
[ "Convert", "Hiragana", "to", "Half", "-", "width", "(", "Hankaku", ")", "Katakana" ]
train
https://github.com/ikegami-yukino/jaconv/blob/5319e4c6b4676ab27b5e9ebec9a299d09a5a62d7/jaconv/jaconv.py#L49-L74
ikegami-yukino/jaconv
jaconv/jaconv.py
kata2hira
def kata2hira(text, ignore=''): """Convert Full-width Katakana to Hiragana Parameters ---------- text : str Full-width Katakana string. ignore : str Characters to be ignored in converting. Return ------ str Hiragana string. Examples -------- >>> print(jaconv.kata2hira('巴マミ')) 巴まみ >>> print(jaconv.kata2hira('マミサン', ignore='ン')) まみさン """ if ignore: k2h_map = _exclude_ignorechar(ignore, K2H_TABLE.copy()) return _convert(text, k2h_map) return _convert(text, K2H_TABLE)
python
def kata2hira(text, ignore=''): """Convert Full-width Katakana to Hiragana Parameters ---------- text : str Full-width Katakana string. ignore : str Characters to be ignored in converting. Return ------ str Hiragana string. Examples -------- >>> print(jaconv.kata2hira('巴マミ')) 巴まみ >>> print(jaconv.kata2hira('マミサン', ignore='ン')) まみさン """ if ignore: k2h_map = _exclude_ignorechar(ignore, K2H_TABLE.copy()) return _convert(text, k2h_map) return _convert(text, K2H_TABLE)
[ "def", "kata2hira", "(", "text", ",", "ignore", "=", "''", ")", ":", "if", "ignore", ":", "k2h_map", "=", "_exclude_ignorechar", "(", "ignore", ",", "K2H_TABLE", ".", "copy", "(", ")", ")", "return", "_convert", "(", "text", ",", "k2h_map", ")", "retur...
Convert Full-width Katakana to Hiragana Parameters ---------- text : str Full-width Katakana string. ignore : str Characters to be ignored in converting. Return ------ str Hiragana string. Examples -------- >>> print(jaconv.kata2hira('巴マミ')) 巴まみ >>> print(jaconv.kata2hira('マミサン', ignore='ン')) まみさン
[ "Convert", "Full", "-", "width", "Katakana", "to", "Hiragana" ]
train
https://github.com/ikegami-yukino/jaconv/blob/5319e4c6b4676ab27b5e9ebec9a299d09a5a62d7/jaconv/jaconv.py#L77-L102
ikegami-yukino/jaconv
jaconv/jaconv.py
h2z
def h2z(text, ignore='', kana=True, ascii=False, digit=False): """Convert Half-width (Hankaku) Katakana to Full-width (Zenkaku) Katakana Parameters ---------- text : str Half-width Katakana string. ignore : str Characters to be ignored in converting. kana : bool Either converting Kana or not. ascii : bool Either converting ascii or not. digit : bool Either converting digit or not. Return ------ str Full-width Katakana string. Examples -------- >>> print(jaconv.h2z('ティロフィナーレ')) ティロフィナーレ >>> print(jaconv.h2z('ティロフィナーレ', ignore='ィ')) ティロフィナーレ >>> print(jaconv.h2z('abcd', ascii=True)) ABCD >>> print(jaconv.h2z('1234', digit=True)) 1234 """ def _conv_dakuten(text): """Convert Hankaku Dakuten Kana to Zenkaku Dakuten Kana """ text = text.replace("ガ", "ガ").replace("ギ", "ギ") text = text.replace("グ", "グ").replace("ゲ", "ゲ") text = text.replace("ゴ", "ゴ").replace("ザ", "ザ") text = text.replace("ジ", "ジ").replace("ズ", "ズ") text = text.replace("ゼ", "ゼ").replace("ゾ", "ゾ") text = text.replace("ダ", "ダ").replace("ヂ", "ヂ") text = text.replace("ヅ", "ヅ").replace("デ", "デ") text = text.replace("ド", "ド").replace("バ", "バ") text = text.replace("ビ", "ビ").replace("ブ", "ブ") text = text.replace("ベ", "ベ").replace("ボ", "ボ") text = text.replace("パ", "パ").replace("ピ", "ピ") text = text.replace("プ", "プ").replace("ペ", "ペ") return text.replace("ポ", "ポ").replace("ヴ", "ヴ") if ascii: if digit: if kana: h2z_map = H2Z_ALL else: h2z_map = H2Z_AD elif kana: h2z_map = H2Z_AK else: h2z_map = H2Z_A elif digit: if kana: h2z_map = H2Z_DK else: h2z_map = H2Z_D else: h2z_map = H2Z_K if kana: text = _conv_dakuten(text) if ignore: h2z_map = _exclude_ignorechar(ignore, h2z_map.copy()) return _convert(text, h2z_map)
python
def h2z(text, ignore='', kana=True, ascii=False, digit=False): """Convert Half-width (Hankaku) Katakana to Full-width (Zenkaku) Katakana Parameters ---------- text : str Half-width Katakana string. ignore : str Characters to be ignored in converting. kana : bool Either converting Kana or not. ascii : bool Either converting ascii or not. digit : bool Either converting digit or not. Return ------ str Full-width Katakana string. Examples -------- >>> print(jaconv.h2z('ティロフィナーレ')) ティロフィナーレ >>> print(jaconv.h2z('ティロフィナーレ', ignore='ィ')) ティロフィナーレ >>> print(jaconv.h2z('abcd', ascii=True)) ABCD >>> print(jaconv.h2z('1234', digit=True)) 1234 """ def _conv_dakuten(text): """Convert Hankaku Dakuten Kana to Zenkaku Dakuten Kana """ text = text.replace("ガ", "ガ").replace("ギ", "ギ") text = text.replace("グ", "グ").replace("ゲ", "ゲ") text = text.replace("ゴ", "ゴ").replace("ザ", "ザ") text = text.replace("ジ", "ジ").replace("ズ", "ズ") text = text.replace("ゼ", "ゼ").replace("ゾ", "ゾ") text = text.replace("ダ", "ダ").replace("ヂ", "ヂ") text = text.replace("ヅ", "ヅ").replace("デ", "デ") text = text.replace("ド", "ド").replace("バ", "バ") text = text.replace("ビ", "ビ").replace("ブ", "ブ") text = text.replace("ベ", "ベ").replace("ボ", "ボ") text = text.replace("パ", "パ").replace("ピ", "ピ") text = text.replace("プ", "プ").replace("ペ", "ペ") return text.replace("ポ", "ポ").replace("ヴ", "ヴ") if ascii: if digit: if kana: h2z_map = H2Z_ALL else: h2z_map = H2Z_AD elif kana: h2z_map = H2Z_AK else: h2z_map = H2Z_A elif digit: if kana: h2z_map = H2Z_DK else: h2z_map = H2Z_D else: h2z_map = H2Z_K if kana: text = _conv_dakuten(text) if ignore: h2z_map = _exclude_ignorechar(ignore, h2z_map.copy()) return _convert(text, h2z_map)
[ "def", "h2z", "(", "text", ",", "ignore", "=", "''", ",", "kana", "=", "True", ",", "ascii", "=", "False", ",", "digit", "=", "False", ")", ":", "def", "_conv_dakuten", "(", "text", ")", ":", "\"\"\"Convert Hankaku Dakuten Kana to Zenkaku Dakuten Kana\n ...
Convert Half-width (Hankaku) Katakana to Full-width (Zenkaku) Katakana Parameters ---------- text : str Half-width Katakana string. ignore : str Characters to be ignored in converting. kana : bool Either converting Kana or not. ascii : bool Either converting ascii or not. digit : bool Either converting digit or not. Return ------ str Full-width Katakana string. Examples -------- >>> print(jaconv.h2z('ティロフィナーレ')) ティロフィナーレ >>> print(jaconv.h2z('ティロフィナーレ', ignore='ィ')) ティロフィナーレ >>> print(jaconv.h2z('abcd', ascii=True)) ABCD >>> print(jaconv.h2z('1234', digit=True)) 1234
[ "Convert", "Half", "-", "width", "(", "Hankaku", ")", "Katakana", "to", "Full", "-", "width", "(", "Zenkaku", ")", "Katakana" ]
train
https://github.com/ikegami-yukino/jaconv/blob/5319e4c6b4676ab27b5e9ebec9a299d09a5a62d7/jaconv/jaconv.py#L105-L175
ikegami-yukino/jaconv
jaconv/jaconv.py
z2h
def z2h(text, ignore='', kana=True, ascii=False, digit=False): """Convert Full-width (Zenkaku) Katakana to Half-width (Hankaku) Katakana Parameters ---------- text : str Full-width Katakana string. ignore : str Characters to be ignored in converting. kana : bool Either converting Kana or not. ascii : bool Either converting ascii or not. digit : bool Either converting digit or not. Return ------ str Half-width Katakana string. Examples -------- >>> print(jaconv.z2h('ティロフィナーレ')) ティロフィナーレ >>> print(jaconv.z2h('ティロフィナーレ', ignore='ィ')) ティロフィナーレ >>> print(jaconv.z2h('ABCD', ascii=True)) abcd >>> print(jaconv.z2h('1234', digit=True)) 1234 """ if ascii: if digit: if kana: z2h_map = Z2H_ALL else: z2h_map = Z2H_AD elif kana: z2h_map = Z2H_AK else: z2h_map = Z2H_A elif digit: if kana: z2h_map = Z2H_DK else: z2h_map = Z2H_D else: z2h_map = Z2H_K if ignore: z2h_map = _exclude_ignorechar(ignore, z2h_map.copy()) return _convert(text, z2h_map)
python
def z2h(text, ignore='', kana=True, ascii=False, digit=False): """Convert Full-width (Zenkaku) Katakana to Half-width (Hankaku) Katakana Parameters ---------- text : str Full-width Katakana string. ignore : str Characters to be ignored in converting. kana : bool Either converting Kana or not. ascii : bool Either converting ascii or not. digit : bool Either converting digit or not. Return ------ str Half-width Katakana string. Examples -------- >>> print(jaconv.z2h('ティロフィナーレ')) ティロフィナーレ >>> print(jaconv.z2h('ティロフィナーレ', ignore='ィ')) ティロフィナーレ >>> print(jaconv.z2h('ABCD', ascii=True)) abcd >>> print(jaconv.z2h('1234', digit=True)) 1234 """ if ascii: if digit: if kana: z2h_map = Z2H_ALL else: z2h_map = Z2H_AD elif kana: z2h_map = Z2H_AK else: z2h_map = Z2H_A elif digit: if kana: z2h_map = Z2H_DK else: z2h_map = Z2H_D else: z2h_map = Z2H_K if ignore: z2h_map = _exclude_ignorechar(ignore, z2h_map.copy()) return _convert(text, z2h_map)
[ "def", "z2h", "(", "text", ",", "ignore", "=", "''", ",", "kana", "=", "True", ",", "ascii", "=", "False", ",", "digit", "=", "False", ")", ":", "if", "ascii", ":", "if", "digit", ":", "if", "kana", ":", "z2h_map", "=", "Z2H_ALL", "else", ":", ...
Convert Full-width (Zenkaku) Katakana to Half-width (Hankaku) Katakana Parameters ---------- text : str Full-width Katakana string. ignore : str Characters to be ignored in converting. kana : bool Either converting Kana or not. ascii : bool Either converting ascii or not. digit : bool Either converting digit or not. Return ------ str Half-width Katakana string. Examples -------- >>> print(jaconv.z2h('ティロフィナーレ')) ティロフィナーレ >>> print(jaconv.z2h('ティロフィナーレ', ignore='ィ')) ティロフィナーレ >>> print(jaconv.z2h('ABCD', ascii=True)) abcd >>> print(jaconv.z2h('1234', digit=True)) 1234
[ "Convert", "Full", "-", "width", "(", "Zenkaku", ")", "Katakana", "to", "Half", "-", "width", "(", "Hankaku", ")", "Katakana" ]
train
https://github.com/ikegami-yukino/jaconv/blob/5319e4c6b4676ab27b5e9ebec9a299d09a5a62d7/jaconv/jaconv.py#L178-L229
ikegami-yukino/jaconv
jaconv/jaconv.py
normalize
def normalize(text, mode='NFKC', ignore=''): """Convert Half-width (Hankaku) Katakana to Full-width (Zenkaku) Katakana, Full-width (Zenkaku) ASCII and DIGIT to Half-width (Hankaku) ASCII and DIGIT. Additionally, Full-width wave dash (〜) etc. are normalized Parameters ---------- text : str Source string. mode : str Unicode normalization mode. ignore : str Characters to be ignored in converting. Return ------ str Normalized string. Examples -------- >>> print(jaconv.normalize('ティロ・フィナ〜レ', 'NFKC')) ティロ・フィナーレ """ text = text.replace('〜', 'ー').replace('~', 'ー') text = text.replace("’", "'").replace('”', '"').replace('“', '``') text = text.replace('―', '-').replace('‐', '-').replace('˗', '-').replace('֊', '-') text = text.replace('‐', '-').replace('‑', '-').replace('‒', '-').replace('–', '-') text = text.replace('⁃', '-').replace('⁻', '-').replace('₋', '-').replace('−', '-') text = text.replace('﹣', 'ー').replace('-', 'ー').replace('—', 'ー').replace('―', 'ー') text = text.replace('━', 'ー').replace('─', 'ー') return unicodedata.normalize(mode, text)
python
def normalize(text, mode='NFKC', ignore=''): """Convert Half-width (Hankaku) Katakana to Full-width (Zenkaku) Katakana, Full-width (Zenkaku) ASCII and DIGIT to Half-width (Hankaku) ASCII and DIGIT. Additionally, Full-width wave dash (〜) etc. are normalized Parameters ---------- text : str Source string. mode : str Unicode normalization mode. ignore : str Characters to be ignored in converting. Return ------ str Normalized string. Examples -------- >>> print(jaconv.normalize('ティロ・フィナ〜レ', 'NFKC')) ティロ・フィナーレ """ text = text.replace('〜', 'ー').replace('~', 'ー') text = text.replace("’", "'").replace('”', '"').replace('“', '``') text = text.replace('―', '-').replace('‐', '-').replace('˗', '-').replace('֊', '-') text = text.replace('‐', '-').replace('‑', '-').replace('‒', '-').replace('–', '-') text = text.replace('⁃', '-').replace('⁻', '-').replace('₋', '-').replace('−', '-') text = text.replace('﹣', 'ー').replace('-', 'ー').replace('—', 'ー').replace('―', 'ー') text = text.replace('━', 'ー').replace('─', 'ー') return unicodedata.normalize(mode, text)
[ "def", "normalize", "(", "text", ",", "mode", "=", "'NFKC'", ",", "ignore", "=", "''", ")", ":", "text", "=", "text", ".", "replace", "(", "'〜', ", "'", "').re", "p", "l", "ace('~'", ",", " 'ー')", "", "", "", "text", "=", "text", ".", "replace", ...
Convert Half-width (Hankaku) Katakana to Full-width (Zenkaku) Katakana, Full-width (Zenkaku) ASCII and DIGIT to Half-width (Hankaku) ASCII and DIGIT. Additionally, Full-width wave dash (〜) etc. are normalized Parameters ---------- text : str Source string. mode : str Unicode normalization mode. ignore : str Characters to be ignored in converting. Return ------ str Normalized string. Examples -------- >>> print(jaconv.normalize('ティロ・フィナ〜レ', 'NFKC')) ティロ・フィナーレ
[ "Convert", "Half", "-", "width", "(", "Hankaku", ")", "Katakana", "to", "Full", "-", "width", "(", "Zenkaku", ")", "Katakana", "Full", "-", "width", "(", "Zenkaku", ")", "ASCII", "and", "DIGIT", "to", "Half", "-", "width", "(", "Hankaku", ")", "ASCII",...
train
https://github.com/ikegami-yukino/jaconv/blob/5319e4c6b4676ab27b5e9ebec9a299d09a5a62d7/jaconv/jaconv.py#L232-L264
ikegami-yukino/jaconv
jaconv/jaconv.py
kana2alphabet
def kana2alphabet(text): """Convert Hiragana to hepburn-style alphabets Parameters ---------- text : str Hiragana string. Return ------ str Hepburn-style alphabets string. Examples -------- >>> print(jaconv.kana2alphabet('まみさん')) mamisan """ text = text.replace('きゃ', 'kya').replace('きゅ', 'kyu').replace('きょ', 'kyo') text = text.replace('ぎゃ', 'gya').replace('ぎゅ', 'gyu').replace('ぎょ', 'gyo') text = text.replace('しゃ', 'sha').replace('しゅ', 'shu').replace('しょ', 'sho') text = text.replace('じゃ', 'ja').replace('じゅ', 'ju').replace('じょ', 'jo') text = text.replace('ちゃ', 'cha').replace('ちゅ', 'chu').replace('ちょ', 'cho') text = text.replace('にゃ', 'nya').replace('にゅ', 'nyu').replace('にょ', 'nyo') text = text.replace('ふぁ', 'fa').replace('ふぃ', 'fi').replace('ふぇ', 'fe') text = text.replace('ふぉ', 'fo') text = text.replace('ひゃ', 'hya').replace('ひゅ', 'hyu').replace('ひょ', 'hyo') text = text.replace('みゃ', 'mya').replace('みゅ', 'myu').replace('みょ', 'myo') text = text.replace('りゃ', 'rya').replace('りゅ', 'ryu').replace('りょ', 'ryo') text = text.replace('びゃ', 'bya').replace('びゅ', 'byu').replace('びょ', 'byo') text = text.replace('ぴゃ', 'pya').replace('ぴゅ', 'pyu').replace('ぴょ', 'pyo') text = text.replace('が', 'ga').replace('ぎ', 'gi').replace('ぐ', 'gu') text = text.replace('げ', 'ge').replace('ご', 'go').replace('ざ', 'za') text = text.replace('じ', 'ji').replace('ず', 'zu').replace('ぜ', 'ze') text = text.replace('ぞ', 'zo').replace('だ', 'da').replace('ぢ', 'ji') text = text.replace('づ', 'zu').replace('で', 'de').replace('ど', 'do') text = text.replace('ば', 'ba').replace('び', 'bi').replace('ぶ', 'bu') text = text.replace('べ', 'be').replace('ぼ', 'bo').replace('ぱ', 'pa') text = text.replace('ぴ', 'pi').replace('ぷ', 'pu').replace('ぺ', 'pe') text = text.replace('ぽ', 'po') text = text.replace('か', 'ka').replace('き', 'ki').replace('く', 'ku') text = text.replace('け', 'ke').replace('こ', 'ko').replace('さ', 'sa') text = text.replace('し', 'shi').replace('す', 'su').replace('せ', 'se') text = text.replace('そ', 'so').replace('た', 'ta').replace('ち', 'chi') text = text.replace('つ', 'tsu').replace('て', 'te').replace('と', 'to') text = text.replace('な', 'na').replace('に', 'ni').replace('ぬ', 'nu') text = text.replace('ね', 'ne').replace('の', 'no').replace('は', 'ha') text = text.replace('ひ', 'hi').replace('ふ', 'fu').replace('へ', 'he') text = text.replace('ほ', 'ho').replace('ま', 'ma').replace('み', 'mi') text = text.replace('む', 'mu').replace('め', 'me').replace('も', 'mo') text = text.replace('ら', 'ra').replace('り', 'ri').replace('る', 'ru') text = text.replace('れ', 're').replace('ろ', 'ro') text = text.replace('や', 'ya').replace('ゆ', 'yu').replace('よ', 'yo') text = text.replace('わ', 'wa').replace('ゐ', 'wi').replace('を', 'wo') text = text.replace('ゑ', 'we') text = _convert(text, KANA2HEP) while 'っ' in text: text = list(text) tsu_pos = text.index('っ') if len(text) <= tsu_pos + 1: return ''.join(text[:-1]) + 'xtsu' if tsu_pos == 0: text[tsu_pos] = 'xtsu' else: text[tsu_pos] = text[tsu_pos + 1] text = ''.join(text) return text
python
def kana2alphabet(text): """Convert Hiragana to hepburn-style alphabets Parameters ---------- text : str Hiragana string. Return ------ str Hepburn-style alphabets string. Examples -------- >>> print(jaconv.kana2alphabet('まみさん')) mamisan """ text = text.replace('きゃ', 'kya').replace('きゅ', 'kyu').replace('きょ', 'kyo') text = text.replace('ぎゃ', 'gya').replace('ぎゅ', 'gyu').replace('ぎょ', 'gyo') text = text.replace('しゃ', 'sha').replace('しゅ', 'shu').replace('しょ', 'sho') text = text.replace('じゃ', 'ja').replace('じゅ', 'ju').replace('じょ', 'jo') text = text.replace('ちゃ', 'cha').replace('ちゅ', 'chu').replace('ちょ', 'cho') text = text.replace('にゃ', 'nya').replace('にゅ', 'nyu').replace('にょ', 'nyo') text = text.replace('ふぁ', 'fa').replace('ふぃ', 'fi').replace('ふぇ', 'fe') text = text.replace('ふぉ', 'fo') text = text.replace('ひゃ', 'hya').replace('ひゅ', 'hyu').replace('ひょ', 'hyo') text = text.replace('みゃ', 'mya').replace('みゅ', 'myu').replace('みょ', 'myo') text = text.replace('りゃ', 'rya').replace('りゅ', 'ryu').replace('りょ', 'ryo') text = text.replace('びゃ', 'bya').replace('びゅ', 'byu').replace('びょ', 'byo') text = text.replace('ぴゃ', 'pya').replace('ぴゅ', 'pyu').replace('ぴょ', 'pyo') text = text.replace('が', 'ga').replace('ぎ', 'gi').replace('ぐ', 'gu') text = text.replace('げ', 'ge').replace('ご', 'go').replace('ざ', 'za') text = text.replace('じ', 'ji').replace('ず', 'zu').replace('ぜ', 'ze') text = text.replace('ぞ', 'zo').replace('だ', 'da').replace('ぢ', 'ji') text = text.replace('づ', 'zu').replace('で', 'de').replace('ど', 'do') text = text.replace('ば', 'ba').replace('び', 'bi').replace('ぶ', 'bu') text = text.replace('べ', 'be').replace('ぼ', 'bo').replace('ぱ', 'pa') text = text.replace('ぴ', 'pi').replace('ぷ', 'pu').replace('ぺ', 'pe') text = text.replace('ぽ', 'po') text = text.replace('か', 'ka').replace('き', 'ki').replace('く', 'ku') text = text.replace('け', 'ke').replace('こ', 'ko').replace('さ', 'sa') text = text.replace('し', 'shi').replace('す', 'su').replace('せ', 'se') text = text.replace('そ', 'so').replace('た', 'ta').replace('ち', 'chi') text = text.replace('つ', 'tsu').replace('て', 'te').replace('と', 'to') text = text.replace('な', 'na').replace('に', 'ni').replace('ぬ', 'nu') text = text.replace('ね', 'ne').replace('の', 'no').replace('は', 'ha') text = text.replace('ひ', 'hi').replace('ふ', 'fu').replace('へ', 'he') text = text.replace('ほ', 'ho').replace('ま', 'ma').replace('み', 'mi') text = text.replace('む', 'mu').replace('め', 'me').replace('も', 'mo') text = text.replace('ら', 'ra').replace('り', 'ri').replace('る', 'ru') text = text.replace('れ', 're').replace('ろ', 'ro') text = text.replace('や', 'ya').replace('ゆ', 'yu').replace('よ', 'yo') text = text.replace('わ', 'wa').replace('ゐ', 'wi').replace('を', 'wo') text = text.replace('ゑ', 'we') text = _convert(text, KANA2HEP) while 'っ' in text: text = list(text) tsu_pos = text.index('っ') if len(text) <= tsu_pos + 1: return ''.join(text[:-1]) + 'xtsu' if tsu_pos == 0: text[tsu_pos] = 'xtsu' else: text[tsu_pos] = text[tsu_pos + 1] text = ''.join(text) return text
[ "def", "kana2alphabet", "(", "text", ")", ":", "text", "=", "text", ".", "replace", "(", "'きゃ', 'k", "y", "').re", "p", "l", "ace('きゅ", "'", ", 'kyu')", ".", "eplac", "e", "(", "'きょ', '", "k", "yo')", "", "", "", "text", "=", "text", ".", "replace"...
Convert Hiragana to hepburn-style alphabets Parameters ---------- text : str Hiragana string. Return ------ str Hepburn-style alphabets string. Examples -------- >>> print(jaconv.kana2alphabet('まみさん')) mamisan
[ "Convert", "Hiragana", "to", "hepburn", "-", "style", "alphabets" ]
train
https://github.com/ikegami-yukino/jaconv/blob/5319e4c6b4676ab27b5e9ebec9a299d09a5a62d7/jaconv/jaconv.py#L267-L333
ikegami-yukino/jaconv
jaconv/jaconv.py
alphabet2kana
def alphabet2kana(text): """Convert alphabets to Hiragana Parameters ---------- text : str Alphabets string. Return ------ str Hiragana string. Examples -------- >>> print(jaconv.alphabet2kana('mamisan')) まみさん """ text = text.replace('kya', 'きゃ').replace('kyu', 'きゅ').replace('kyo', 'きょ') text = text.replace('gya', 'ぎゃ').replace('gyu', 'ぎゅ').replace('gyo', 'ぎょ') text = text.replace('sha', 'しゃ').replace('shu', 'しゅ').replace('sho', 'しょ') text = text.replace('zya', 'じゃ').replace('zyu', 'じゅ').replace('zyo', 'じょ') text = text.replace('zyi', 'じぃ').replace('zye', 'じぇ') text = text.replace('ja', 'じゃ').replace('ju', 'じゅ').replace('jo', 'じょ') text = text.replace('jya', 'じゃ').replace('jyu', 'じゅ').replace('jyo', 'じょ') text = text.replace('cha', 'ちゃ').replace('chu', 'ちゅ').replace('cho', 'ちょ') text = text.replace('tya', 'ちゃ').replace('tyu', 'ちゅ').replace('tyo', 'ちょ') text = text.replace('nya', 'にゃ').replace('nyu', 'にゅ').replace('nyo', 'にょ') text = text.replace('hya', 'ひゃ').replace('hyu', 'ひゅ').replace('hyo', 'ひょ') text = text.replace('mya', 'みゃ').replace('myu', 'みゅ').replace('myo', 'みょ') text = text.replace('rya', 'りゃ').replace('ryu', 'りゅ').replace('ryo', 'りょ') text = text.replace('bya', 'びゃ').replace('byu', 'びゅ').replace('byo', 'びょ') text = text.replace('pya', 'ぴゃ').replace('pyu', 'ぴゅ').replace('pyo', 'ぴょ') text = text.replace('oh', 'おお') text = text.replace('ga', 'が').replace('gi', 'ぎ').replace('gu', 'ぐ') text = text.replace('ge', 'げ').replace('go', 'ご').replace('za', 'ざ') text = text.replace('ji', 'じ').replace('zu', 'ず').replace('ze', 'ぜ') text = text.replace('zo', 'ぞ').replace('da', 'だ').replace('ji', 'ぢ').replace('di', 'ぢ') text = text.replace('va', 'ゔぁ').replace('vi', 'ゔぃ').replace('vu', 'ゔ') text = text.replace('ve', 'ゔぇ').replace('vo', 'ゔぉ').replace('vya', 'ゔゃ') text = text.replace('vyi', 'ゔぃ').replace('vyu', 'ゔゅ').replace('vye', 'ゔぇ') text = text.replace('vyo', 'ゔょ') text = text.replace('zu', 'づ').replace('de', 'で').replace('do', 'ど') text = text.replace('ba', 'ば').replace('bi', 'び').replace('bu', 'ぶ') text = text.replace('be', 'べ').replace('bo', 'ぼ').replace('pa', 'ぱ') text = text.replace('pi', 'ぴ').replace('pu', 'ぷ').replace('pe', 'ぺ') text = text.replace('po', 'ぽ').replace('dha', 'でゃ').replace('dhi', 'でぃ') text = text.replace('dhu', 'でゅ').replace('dhe', 'でぇ').replace('dho', 'でょ') text = text.replace('ka', 'か').replace('ki', 'き').replace('ku', 'く') text = text.replace('ke', 'け').replace('ko', 'こ').replace('sa', 'さ') text = text.replace('shi', 'し').replace('su', 'す').replace('se', 'せ') text = text.replace('so', 'そ').replace('ta', 'た').replace('chi', 'ち') text = text.replace('tsu', 'つ').replace('te', 'て').replace('to', 'と') text = text.replace('na', 'な').replace('ni', 'に').replace('nu', 'ぬ') text = text.replace('ne', 'ね').replace('no', 'の').replace('ha', 'は') text = text.replace('hi', 'ひ').replace('fu', 'ふ').replace('he', 'へ') text = text.replace('ho', 'ほ').replace('ma', 'ま').replace('mi', 'み') text = text.replace('mu', 'む').replace('me', 'め').replace('mo', 'も') text = text.replace('ra', 'ら').replace('ri', 'り').replace('ru', 'る') text = text.replace('re', 'れ').replace('ro', 'ろ') text = text.replace('ya', 'や').replace('yu', 'ゆ').replace('yo', 'よ') text = text.replace('wa', 'わ').replace('wi', 'ゐ').replace('we', 'ゑ') text = text.replace('wo', 'を') text = text.replace('nn', 'ん').replace('tu', 'つ').replace('hu', 'ふ') text = text.replace('fa', 'ふぁ').replace('fi', 'ふぃ').replace('fe', 'ふぇ') text = text.replace('fo', 'ふぉ').replace('-', 'ー') text = _convert(text, HEP2KANA) ret = [] for (i, char) in enumerate(text): if char in consonants: char = 'っ' ret.append(char) return ''.join(ret)
python
def alphabet2kana(text): """Convert alphabets to Hiragana Parameters ---------- text : str Alphabets string. Return ------ str Hiragana string. Examples -------- >>> print(jaconv.alphabet2kana('mamisan')) まみさん """ text = text.replace('kya', 'きゃ').replace('kyu', 'きゅ').replace('kyo', 'きょ') text = text.replace('gya', 'ぎゃ').replace('gyu', 'ぎゅ').replace('gyo', 'ぎょ') text = text.replace('sha', 'しゃ').replace('shu', 'しゅ').replace('sho', 'しょ') text = text.replace('zya', 'じゃ').replace('zyu', 'じゅ').replace('zyo', 'じょ') text = text.replace('zyi', 'じぃ').replace('zye', 'じぇ') text = text.replace('ja', 'じゃ').replace('ju', 'じゅ').replace('jo', 'じょ') text = text.replace('jya', 'じゃ').replace('jyu', 'じゅ').replace('jyo', 'じょ') text = text.replace('cha', 'ちゃ').replace('chu', 'ちゅ').replace('cho', 'ちょ') text = text.replace('tya', 'ちゃ').replace('tyu', 'ちゅ').replace('tyo', 'ちょ') text = text.replace('nya', 'にゃ').replace('nyu', 'にゅ').replace('nyo', 'にょ') text = text.replace('hya', 'ひゃ').replace('hyu', 'ひゅ').replace('hyo', 'ひょ') text = text.replace('mya', 'みゃ').replace('myu', 'みゅ').replace('myo', 'みょ') text = text.replace('rya', 'りゃ').replace('ryu', 'りゅ').replace('ryo', 'りょ') text = text.replace('bya', 'びゃ').replace('byu', 'びゅ').replace('byo', 'びょ') text = text.replace('pya', 'ぴゃ').replace('pyu', 'ぴゅ').replace('pyo', 'ぴょ') text = text.replace('oh', 'おお') text = text.replace('ga', 'が').replace('gi', 'ぎ').replace('gu', 'ぐ') text = text.replace('ge', 'げ').replace('go', 'ご').replace('za', 'ざ') text = text.replace('ji', 'じ').replace('zu', 'ず').replace('ze', 'ぜ') text = text.replace('zo', 'ぞ').replace('da', 'だ').replace('ji', 'ぢ').replace('di', 'ぢ') text = text.replace('va', 'ゔぁ').replace('vi', 'ゔぃ').replace('vu', 'ゔ') text = text.replace('ve', 'ゔぇ').replace('vo', 'ゔぉ').replace('vya', 'ゔゃ') text = text.replace('vyi', 'ゔぃ').replace('vyu', 'ゔゅ').replace('vye', 'ゔぇ') text = text.replace('vyo', 'ゔょ') text = text.replace('zu', 'づ').replace('de', 'で').replace('do', 'ど') text = text.replace('ba', 'ば').replace('bi', 'び').replace('bu', 'ぶ') text = text.replace('be', 'べ').replace('bo', 'ぼ').replace('pa', 'ぱ') text = text.replace('pi', 'ぴ').replace('pu', 'ぷ').replace('pe', 'ぺ') text = text.replace('po', 'ぽ').replace('dha', 'でゃ').replace('dhi', 'でぃ') text = text.replace('dhu', 'でゅ').replace('dhe', 'でぇ').replace('dho', 'でょ') text = text.replace('ka', 'か').replace('ki', 'き').replace('ku', 'く') text = text.replace('ke', 'け').replace('ko', 'こ').replace('sa', 'さ') text = text.replace('shi', 'し').replace('su', 'す').replace('se', 'せ') text = text.replace('so', 'そ').replace('ta', 'た').replace('chi', 'ち') text = text.replace('tsu', 'つ').replace('te', 'て').replace('to', 'と') text = text.replace('na', 'な').replace('ni', 'に').replace('nu', 'ぬ') text = text.replace('ne', 'ね').replace('no', 'の').replace('ha', 'は') text = text.replace('hi', 'ひ').replace('fu', 'ふ').replace('he', 'へ') text = text.replace('ho', 'ほ').replace('ma', 'ま').replace('mi', 'み') text = text.replace('mu', 'む').replace('me', 'め').replace('mo', 'も') text = text.replace('ra', 'ら').replace('ri', 'り').replace('ru', 'る') text = text.replace('re', 'れ').replace('ro', 'ろ') text = text.replace('ya', 'や').replace('yu', 'ゆ').replace('yo', 'よ') text = text.replace('wa', 'わ').replace('wi', 'ゐ').replace('we', 'ゑ') text = text.replace('wo', 'を') text = text.replace('nn', 'ん').replace('tu', 'つ').replace('hu', 'ふ') text = text.replace('fa', 'ふぁ').replace('fi', 'ふぃ').replace('fe', 'ふぇ') text = text.replace('fo', 'ふぉ').replace('-', 'ー') text = _convert(text, HEP2KANA) ret = [] for (i, char) in enumerate(text): if char in consonants: char = 'っ' ret.append(char) return ''.join(ret)
[ "def", "alphabet2kana", "(", "text", ")", ":", "text", "=", "text", ".", "replace", "(", "'kya'", ",", "'きゃ').re", "p", "l", "ace('ky", "u", "', 'き", "ゅ", ").replac", "e", "(", "'kyo', ", "'", "きょ')", "", "", "", "text", "=", "text", ".", "replace"...
Convert alphabets to Hiragana Parameters ---------- text : str Alphabets string. Return ------ str Hiragana string. Examples -------- >>> print(jaconv.alphabet2kana('mamisan')) まみさん
[ "Convert", "alphabets", "to", "Hiragana" ]
train
https://github.com/ikegami-yukino/jaconv/blob/5319e4c6b4676ab27b5e9ebec9a299d09a5a62d7/jaconv/jaconv.py#L336-L408
ibab/matplotlib-hep
matplotlib_hep/__init__.py
histpoints
def histpoints(x, bins=None, xerr=None, yerr='gamma', normed=False, **kwargs): """ Plot a histogram as a series of data points. Compute and draw the histogram of *x* using individual (x,y) points for the bin contents. By default, vertical poisson error bars are calculated using the gamma distribution. Horizontal error bars are omitted by default. These can be enabled using the *xerr* argument. Use ``xerr='binwidth'`` to draw horizontal error bars that indicate the width of each histogram bin. Parameters --------- x : (n,) array or sequence of (n,) arrays Input values. This takes either a single array or a sequence of arrays, which are not required to be of the same length. """ import matplotlib.pyplot as plt if bins is None: bins = calc_nbins(x) h, bins = np.histogram(x, bins=bins) width = bins[1] - bins[0] center = (bins[:-1] + bins[1:]) / 2 area = sum(h * width) if isinstance(yerr, str): yerr = poisson_limits(h, yerr) if xerr == 'binwidth': xerr = width / 2 if normed: h = h / area yerr = yerr / area area = 1. if not 'color' in kwargs: kwargs['color'] = 'black' if not 'fmt' in kwargs: kwargs['fmt'] = 'o' plt.errorbar(center, h, xerr=xerr, yerr=yerr, **kwargs) return center, (yerr[0], h, yerr[1]), area
python
def histpoints(x, bins=None, xerr=None, yerr='gamma', normed=False, **kwargs): """ Plot a histogram as a series of data points. Compute and draw the histogram of *x* using individual (x,y) points for the bin contents. By default, vertical poisson error bars are calculated using the gamma distribution. Horizontal error bars are omitted by default. These can be enabled using the *xerr* argument. Use ``xerr='binwidth'`` to draw horizontal error bars that indicate the width of each histogram bin. Parameters --------- x : (n,) array or sequence of (n,) arrays Input values. This takes either a single array or a sequence of arrays, which are not required to be of the same length. """ import matplotlib.pyplot as plt if bins is None: bins = calc_nbins(x) h, bins = np.histogram(x, bins=bins) width = bins[1] - bins[0] center = (bins[:-1] + bins[1:]) / 2 area = sum(h * width) if isinstance(yerr, str): yerr = poisson_limits(h, yerr) if xerr == 'binwidth': xerr = width / 2 if normed: h = h / area yerr = yerr / area area = 1. if not 'color' in kwargs: kwargs['color'] = 'black' if not 'fmt' in kwargs: kwargs['fmt'] = 'o' plt.errorbar(center, h, xerr=xerr, yerr=yerr, **kwargs) return center, (yerr[0], h, yerr[1]), area
[ "def", "histpoints", "(", "x", ",", "bins", "=", "None", ",", "xerr", "=", "None", ",", "yerr", "=", "'gamma'", ",", "normed", "=", "False", ",", "*", "*", "kwargs", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "if", "bins", "is", ...
Plot a histogram as a series of data points. Compute and draw the histogram of *x* using individual (x,y) points for the bin contents. By default, vertical poisson error bars are calculated using the gamma distribution. Horizontal error bars are omitted by default. These can be enabled using the *xerr* argument. Use ``xerr='binwidth'`` to draw horizontal error bars that indicate the width of each histogram bin. Parameters --------- x : (n,) array or sequence of (n,) arrays Input values. This takes either a single array or a sequence of arrays, which are not required to be of the same length.
[ "Plot", "a", "histogram", "as", "a", "series", "of", "data", "points", "." ]
train
https://github.com/ibab/matplotlib-hep/blob/7ff83ffbc059a0ca9326f1ecb39979b13e33b22d/matplotlib_hep/__init__.py#L32-L84
kevinconway/daemons
daemons/message/eventlet.py
EventletMessageManager.pool
def pool(self): """Get an eventlet pool used to dispatch requests.""" self._pool = self._pool or eventlet.GreenPool(size=self.pool_size) return self._pool
python
def pool(self): """Get an eventlet pool used to dispatch requests.""" self._pool = self._pool or eventlet.GreenPool(size=self.pool_size) return self._pool
[ "def", "pool", "(", "self", ")", ":", "self", ".", "_pool", "=", "self", ".", "_pool", "or", "eventlet", ".", "GreenPool", "(", "size", "=", "self", ".", "pool_size", ")", "return", "self", ".", "_pool" ]
Get an eventlet pool used to dispatch requests.
[ "Get", "an", "eventlet", "pool", "used", "to", "dispatch", "requests", "." ]
train
https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/message/eventlet.py#L18-L21
kevinconway/daemons
daemons/startstop/simple.py
SimpleStartStopManager.start
def start(self): """Start the process with daemonization. If the process is already started this call should exit with code ALREADY_RUNNING. Otherwise it must call the 'daemonize' method and then call 'run'. """ if self.pid is not None: LOG.error( "The process is already running with pid {0}.".format(self.pid) ) sys.exit(exit.ALREADY_RUNNING) self.daemonize() LOG.info("Beginning run loop for process.") try: self.run() except Exception: LOG.exception("Uncaught exception in the daemon run() method.") self.stop() sys.exit(exit.RUN_FAILURE)
python
def start(self): """Start the process with daemonization. If the process is already started this call should exit with code ALREADY_RUNNING. Otherwise it must call the 'daemonize' method and then call 'run'. """ if self.pid is not None: LOG.error( "The process is already running with pid {0}.".format(self.pid) ) sys.exit(exit.ALREADY_RUNNING) self.daemonize() LOG.info("Beginning run loop for process.") try: self.run() except Exception: LOG.exception("Uncaught exception in the daemon run() method.") self.stop() sys.exit(exit.RUN_FAILURE)
[ "def", "start", "(", "self", ")", ":", "if", "self", ".", "pid", "is", "not", "None", ":", "LOG", ".", "error", "(", "\"The process is already running with pid {0}.\"", ".", "format", "(", "self", ".", "pid", ")", ")", "sys", ".", "exit", "(", "exit", ...
Start the process with daemonization. If the process is already started this call should exit with code ALREADY_RUNNING. Otherwise it must call the 'daemonize' method and then call 'run'.
[ "Start", "the", "process", "with", "daemonization", "." ]
train
https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/startstop/simple.py#L24-L49
kevinconway/daemons
daemons/startstop/simple.py
SimpleStartStopManager.stop
def stop(self): """Stop the daemonized process. If the process is already stopped this call should exit successfully. If the process cannot be stopped this call should exit with code STOP_FAILED. """ if self.pid is None: return None try: while True: self.send(signal.SIGTERM) time.sleep(0.1) except RuntimeError as err: if "No such process" in str(err): LOG.info("Succesfully stopped the process.") return None LOG.exception("Failed to stop the process:") sys.exit(exit.STOP_FAILED) except TypeError as err: if "an integer is required" in str(err): LOG.info("Succesfully stopped the process.") return None LOG.exception("Failed to stop the process:") sys.exit(exit.STOP_FAILED)
python
def stop(self): """Stop the daemonized process. If the process is already stopped this call should exit successfully. If the process cannot be stopped this call should exit with code STOP_FAILED. """ if self.pid is None: return None try: while True: self.send(signal.SIGTERM) time.sleep(0.1) except RuntimeError as err: if "No such process" in str(err): LOG.info("Succesfully stopped the process.") return None LOG.exception("Failed to stop the process:") sys.exit(exit.STOP_FAILED) except TypeError as err: if "an integer is required" in str(err): LOG.info("Succesfully stopped the process.") return None LOG.exception("Failed to stop the process:") sys.exit(exit.STOP_FAILED)
[ "def", "stop", "(", "self", ")", ":", "if", "self", ".", "pid", "is", "None", ":", "return", "None", "try", ":", "while", "True", ":", "self", ".", "send", "(", "signal", ".", "SIGTERM", ")", "time", ".", "sleep", "(", "0.1", ")", "except", "Runt...
Stop the daemonized process. If the process is already stopped this call should exit successfully. If the process cannot be stopped this call should exit with code STOP_FAILED.
[ "Stop", "the", "daemonized", "process", "." ]
train
https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/startstop/simple.py#L51-L87
kevinconway/daemons
daemons/signal/simple.py
SimpleSignalManager.handle
def handle(self, signum, handler): """Set a function to run when the given signal is recieved. Multiple handlers may be assigned to a single signal. The order of handlers does not need to be preserved. 'signum' must be an integer representing a signal. 'handler' must be a callable. """ if not isinstance(signum, int): raise TypeError( "Signals must be given as integers. Got {0}.".format( type(signum), ), ) if not callable(handler): raise TypeError( "Signal handlers must be callable.", ) signal.signal(signum, self._handle_signals) self._handlers[signum].append(handler)
python
def handle(self, signum, handler): """Set a function to run when the given signal is recieved. Multiple handlers may be assigned to a single signal. The order of handlers does not need to be preserved. 'signum' must be an integer representing a signal. 'handler' must be a callable. """ if not isinstance(signum, int): raise TypeError( "Signals must be given as integers. Got {0}.".format( type(signum), ), ) if not callable(handler): raise TypeError( "Signal handlers must be callable.", ) signal.signal(signum, self._handle_signals) self._handlers[signum].append(handler)
[ "def", "handle", "(", "self", ",", "signum", ",", "handler", ")", ":", "if", "not", "isinstance", "(", "signum", ",", "int", ")", ":", "raise", "TypeError", "(", "\"Signals must be given as integers. Got {0}.\"", ".", "format", "(", "type", "(", "signum", ")...
Set a function to run when the given signal is recieved. Multiple handlers may be assigned to a single signal. The order of handlers does not need to be preserved. 'signum' must be an integer representing a signal. 'handler' must be a callable.
[ "Set", "a", "function", "to", "run", "when", "the", "given", "signal", "is", "recieved", "." ]
train
https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/signal/simple.py#L43-L68
kevinconway/daemons
daemons/signal/simple.py
SimpleSignalManager.send
def send(self, signum): """Send the given signal to the running process. If the process is not running a RuntimeError with a message of "No such process" should be emitted. """ if not isinstance(signum, int): raise TypeError( "Signals must be given as integers. Got {0}.".format( type(signum), ), ) try: os.kill(self.pid, signum) except OSError as err: if "No such process" in err.strerror: raise RuntimeError("No such process {0}.".format(self.pid)) raise err
python
def send(self, signum): """Send the given signal to the running process. If the process is not running a RuntimeError with a message of "No such process" should be emitted. """ if not isinstance(signum, int): raise TypeError( "Signals must be given as integers. Got {0}.".format( type(signum), ), ) try: os.kill(self.pid, signum) except OSError as err: if "No such process" in err.strerror: raise RuntimeError("No such process {0}.".format(self.pid)) raise err
[ "def", "send", "(", "self", ",", "signum", ")", ":", "if", "not", "isinstance", "(", "signum", ",", "int", ")", ":", "raise", "TypeError", "(", "\"Signals must be given as integers. Got {0}.\"", ".", "format", "(", "type", "(", "signum", ")", ",", ")", ","...
Send the given signal to the running process. If the process is not running a RuntimeError with a message of "No such process" should be emitted.
[ "Send", "the", "given", "signal", "to", "the", "running", "process", "." ]
train
https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/signal/simple.py#L70-L94
kevinconway/daemons
daemons/signal/simple.py
SimpleSignalManager._handle_signals
def _handle_signals(self, signum, frame): """Handler for all signals. This method must be used to handle all signals for the process. It is responsible for runnin the appropriate signal handlers registered with the 'handle' method unless they are shutdown signals. Shutdown signals must trigger the 'shutdown' method. """ if signum in self.kill_signals: return self.shutdown(signum) for handler in self._handlers[signum]: handler()
python
def _handle_signals(self, signum, frame): """Handler for all signals. This method must be used to handle all signals for the process. It is responsible for runnin the appropriate signal handlers registered with the 'handle' method unless they are shutdown signals. Shutdown signals must trigger the 'shutdown' method. """ if signum in self.kill_signals: return self.shutdown(signum) for handler in self._handlers[signum]: handler()
[ "def", "_handle_signals", "(", "self", ",", "signum", ",", "frame", ")", ":", "if", "signum", "in", "self", ".", "kill_signals", ":", "return", "self", ".", "shutdown", "(", "signum", ")", "for", "handler", "in", "self", ".", "_handlers", "[", "signum", ...
Handler for all signals. This method must be used to handle all signals for the process. It is responsible for runnin the appropriate signal handlers registered with the 'handle' method unless they are shutdown signals. Shutdown signals must trigger the 'shutdown' method.
[ "Handler", "for", "all", "signals", "." ]
train
https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/signal/simple.py#L96-L110
kevinconway/daemons
daemons/signal/simple.py
SimpleSignalManager.shutdown
def shutdown(self, signum): """Handle all signals which trigger a process stop. This method should run all appropriate signal handlers registered through the 'handle' method. At the end it should cause the process to exit with a status code. If any of the handlers raise an exception the exit code should be SHUTDOWN_FAILED otherwise SUCCESS. """ dirty = False for handler in self._handlers[signum]: try: handler() except: LOG.exception("A shutdown handler failed to execute:") dirty = True del self.pid if dirty: sys.exit(exit.SHUTDOWN_FAILED) return None sys.exit(exit.SUCCESS) return None
python
def shutdown(self, signum): """Handle all signals which trigger a process stop. This method should run all appropriate signal handlers registered through the 'handle' method. At the end it should cause the process to exit with a status code. If any of the handlers raise an exception the exit code should be SHUTDOWN_FAILED otherwise SUCCESS. """ dirty = False for handler in self._handlers[signum]: try: handler() except: LOG.exception("A shutdown handler failed to execute:") dirty = True del self.pid if dirty: sys.exit(exit.SHUTDOWN_FAILED) return None sys.exit(exit.SUCCESS) return None
[ "def", "shutdown", "(", "self", ",", "signum", ")", ":", "dirty", "=", "False", "for", "handler", "in", "self", ".", "_handlers", "[", "signum", "]", ":", "try", ":", "handler", "(", ")", "except", ":", "LOG", ".", "exception", "(", "\"A shutdown handl...
Handle all signals which trigger a process stop. This method should run all appropriate signal handlers registered through the 'handle' method. At the end it should cause the process to exit with a status code. If any of the handlers raise an exception the exit code should be SHUTDOWN_FAILED otherwise SUCCESS.
[ "Handle", "all", "signals", "which", "trigger", "a", "process", "stop", "." ]
train
https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/signal/simple.py#L112-L140
DEIB-GECO/PyGMQL
gmql/dataset/loaders/Loader.py
preprocess_path
def preprocess_path(path): """ Given a dataset path, the following structure is to be expected: - path/ - files/ - S_00000.gdm - S_00000.gdm.meta - S_00001.gdm - S_00001.gdm.meta - ... - schema.xml - [profile.xml] - [web_profile.xml] - [info.txt] - [query.txt] - [vocabulary.txt] :param path :return: the path where the gdm data are """ if path.startswith("gs://"): fs = gcsfs.GCSFileSystem(token=get_gcloud_token()) for sub_f in fs.ls(path): if sub_f.endswith("/") and sub_f.split("/")[-2] == FILES_FOLDER: return "gs://" + sub_f return path if path.startswith("hdfs://"): return path for sub_f in os.listdir(path): sub_f_tot = os.path.join(path, sub_f) if os.path.isdir(sub_f_tot) and sub_f == FILES_FOLDER: if check_for_dataset(sub_f_tot): return sub_f_tot else: raise ValueError("Dataset in {} was not in GMQL format".format(sub_f_tot)) # if we are here it means that there is no files folder...so we need to check the root if check_for_dataset(path): return path else: raise ValueError("Dataset in {} was not in GMQL format".format(path))
python
def preprocess_path(path): """ Given a dataset path, the following structure is to be expected: - path/ - files/ - S_00000.gdm - S_00000.gdm.meta - S_00001.gdm - S_00001.gdm.meta - ... - schema.xml - [profile.xml] - [web_profile.xml] - [info.txt] - [query.txt] - [vocabulary.txt] :param path :return: the path where the gdm data are """ if path.startswith("gs://"): fs = gcsfs.GCSFileSystem(token=get_gcloud_token()) for sub_f in fs.ls(path): if sub_f.endswith("/") and sub_f.split("/")[-2] == FILES_FOLDER: return "gs://" + sub_f return path if path.startswith("hdfs://"): return path for sub_f in os.listdir(path): sub_f_tot = os.path.join(path, sub_f) if os.path.isdir(sub_f_tot) and sub_f == FILES_FOLDER: if check_for_dataset(sub_f_tot): return sub_f_tot else: raise ValueError("Dataset in {} was not in GMQL format".format(sub_f_tot)) # if we are here it means that there is no files folder...so we need to check the root if check_for_dataset(path): return path else: raise ValueError("Dataset in {} was not in GMQL format".format(path))
[ "def", "preprocess_path", "(", "path", ")", ":", "if", "path", ".", "startswith", "(", "\"gs://\"", ")", ":", "fs", "=", "gcsfs", ".", "GCSFileSystem", "(", "token", "=", "get_gcloud_token", "(", ")", ")", "for", "sub_f", "in", "fs", ".", "ls", "(", ...
Given a dataset path, the following structure is to be expected: - path/ - files/ - S_00000.gdm - S_00000.gdm.meta - S_00001.gdm - S_00001.gdm.meta - ... - schema.xml - [profile.xml] - [web_profile.xml] - [info.txt] - [query.txt] - [vocabulary.txt] :param path :return: the path where the gdm data are
[ "Given", "a", "dataset", "path", "the", "following", "structure", "is", "to", "be", "expected", ":", "-", "path", "/", "-", "files", "/", "-", "S_00000", ".", "gdm", "-", "S_00000", ".", "gdm", ".", "meta", "-", "S_00001", ".", "gdm", "-", "S_00001",...
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/loaders/Loader.py#L25-L63
DEIB-GECO/PyGMQL
gmql/dataset/loaders/Loader.py
check_for_dataset
def check_for_dataset(files): """ A GDM dataset has the form: - S_00000.gdm - S_00000.gdm.meta - S_00001.gdm - S_00001.gdm.meta - ... - schema.xml - [profile.xml] - [web_profile.xml] :param files: path of the dataset :return: True if the path contains a gdm dataset """ all_files = os.listdir(files) meta_files = set(map(lambda y: y[: -9], filter(lambda x: x.endswith(".gdm.meta"), all_files))) regs_files = set(map(lambda y: y[: -4], filter(lambda x: x.endswith(".gdm"), all_files))) return meta_files == regs_files
python
def check_for_dataset(files): """ A GDM dataset has the form: - S_00000.gdm - S_00000.gdm.meta - S_00001.gdm - S_00001.gdm.meta - ... - schema.xml - [profile.xml] - [web_profile.xml] :param files: path of the dataset :return: True if the path contains a gdm dataset """ all_files = os.listdir(files) meta_files = set(map(lambda y: y[: -9], filter(lambda x: x.endswith(".gdm.meta"), all_files))) regs_files = set(map(lambda y: y[: -4], filter(lambda x: x.endswith(".gdm"), all_files))) return meta_files == regs_files
[ "def", "check_for_dataset", "(", "files", ")", ":", "all_files", "=", "os", ".", "listdir", "(", "files", ")", "meta_files", "=", "set", "(", "map", "(", "lambda", "y", ":", "y", "[", ":", "-", "9", "]", ",", "filter", "(", "lambda", "x", ":", "x...
A GDM dataset has the form: - S_00000.gdm - S_00000.gdm.meta - S_00001.gdm - S_00001.gdm.meta - ... - schema.xml - [profile.xml] - [web_profile.xml] :param files: path of the dataset :return: True if the path contains a gdm dataset
[ "A", "GDM", "dataset", "has", "the", "form", ":", "-", "S_00000", ".", "gdm", "-", "S_00000", ".", "gdm", ".", "meta", "-", "S_00001", ".", "gdm", "-", "S_00001", ".", "gdm", ".", "meta", "-", "...", "-", "schema", ".", "xml", "-", "[", "profile"...
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/loaders/Loader.py#L66-L82
DEIB-GECO/PyGMQL
gmql/dataset/loaders/Loader.py
load_from_path
def load_from_path(local_path=None, parser=None, all_load=False): """ Loads the data from a local path into a GMQLDataset. The loading of the files is "lazy", which means that the files are loaded only when the user does a materialization (see :func:`~gmql.dataset.GMQLDataset.GMQLDataset.materialize` ). The user can force the materialization of the data (maybe for an initial data exploration on only the metadata) by setting the :attr:`~.reg_load` (load in memory the region data), :attr:`~.meta_load` (load in memory the metadata) or :attr:`~.all_load` (load both region and meta data in memory). If the user specifies this final parameter as True, a :class:`~gmql.dataset.GDataframe.GDataframe` is returned, otherwise a :class:`~gmql.dataset.GMQLDataset.GMQLDataset` is returned :param local_path: local path of the dataset :param parser: the parser to be used for reading the data :param all_load: if set to True, both region and meta data are loaded in memory and an instance of GDataframe is returned :return: A new GMQLDataset or a GDataframe """ from .. import GDataframe from .. import GMQLDataset pmg = get_python_manager() local_path = preprocess_path(local_path) if all_load: # load directly the metadata for exploration meta = MetaLoaderFile.load_meta_from_path(local_path) if isinstance(parser, RegionParser): # region data regs = RegLoaderFile.load_reg_from_path(local_path, parser) else: regs = RegLoaderFile.load_reg_from_path(local_path) return GDataframe.GDataframe(regs=regs, meta=meta) else: from ...settings import is_metaprofiling_enabled if is_metaprofiling_enabled(): meta_profile = create_metadata_profile(local_path) else: meta_profile = None if parser is None: # find the parser parser = RegLoaderFile.get_parser(local_path) elif not isinstance(parser, RegionParser): raise ValueError("parser must be RegionParser. {} was provided".format(type(parser))) source_table = get_source_table() id = source_table.search_source(local=local_path) if id is None: id = source_table.add_source(local=local_path, parser=parser) local_sources = [id] index = pmg.read_dataset(str(id), parser.get_gmql_parser()) return GMQLDataset.GMQLDataset(index=index, parser=parser, location="local", path_or_name=local_path, local_sources=local_sources, meta_profile=meta_profile)
python
def load_from_path(local_path=None, parser=None, all_load=False): """ Loads the data from a local path into a GMQLDataset. The loading of the files is "lazy", which means that the files are loaded only when the user does a materialization (see :func:`~gmql.dataset.GMQLDataset.GMQLDataset.materialize` ). The user can force the materialization of the data (maybe for an initial data exploration on only the metadata) by setting the :attr:`~.reg_load` (load in memory the region data), :attr:`~.meta_load` (load in memory the metadata) or :attr:`~.all_load` (load both region and meta data in memory). If the user specifies this final parameter as True, a :class:`~gmql.dataset.GDataframe.GDataframe` is returned, otherwise a :class:`~gmql.dataset.GMQLDataset.GMQLDataset` is returned :param local_path: local path of the dataset :param parser: the parser to be used for reading the data :param all_load: if set to True, both region and meta data are loaded in memory and an instance of GDataframe is returned :return: A new GMQLDataset or a GDataframe """ from .. import GDataframe from .. import GMQLDataset pmg = get_python_manager() local_path = preprocess_path(local_path) if all_load: # load directly the metadata for exploration meta = MetaLoaderFile.load_meta_from_path(local_path) if isinstance(parser, RegionParser): # region data regs = RegLoaderFile.load_reg_from_path(local_path, parser) else: regs = RegLoaderFile.load_reg_from_path(local_path) return GDataframe.GDataframe(regs=regs, meta=meta) else: from ...settings import is_metaprofiling_enabled if is_metaprofiling_enabled(): meta_profile = create_metadata_profile(local_path) else: meta_profile = None if parser is None: # find the parser parser = RegLoaderFile.get_parser(local_path) elif not isinstance(parser, RegionParser): raise ValueError("parser must be RegionParser. {} was provided".format(type(parser))) source_table = get_source_table() id = source_table.search_source(local=local_path) if id is None: id = source_table.add_source(local=local_path, parser=parser) local_sources = [id] index = pmg.read_dataset(str(id), parser.get_gmql_parser()) return GMQLDataset.GMQLDataset(index=index, parser=parser, location="local", path_or_name=local_path, local_sources=local_sources, meta_profile=meta_profile)
[ "def", "load_from_path", "(", "local_path", "=", "None", ",", "parser", "=", "None", ",", "all_load", "=", "False", ")", ":", "from", ".", ".", "import", "GDataframe", "from", ".", ".", "import", "GMQLDataset", "pmg", "=", "get_python_manager", "(", ")", ...
Loads the data from a local path into a GMQLDataset. The loading of the files is "lazy", which means that the files are loaded only when the user does a materialization (see :func:`~gmql.dataset.GMQLDataset.GMQLDataset.materialize` ). The user can force the materialization of the data (maybe for an initial data exploration on only the metadata) by setting the :attr:`~.reg_load` (load in memory the region data), :attr:`~.meta_load` (load in memory the metadata) or :attr:`~.all_load` (load both region and meta data in memory). If the user specifies this final parameter as True, a :class:`~gmql.dataset.GDataframe.GDataframe` is returned, otherwise a :class:`~gmql.dataset.GMQLDataset.GMQLDataset` is returned :param local_path: local path of the dataset :param parser: the parser to be used for reading the data :param all_load: if set to True, both region and meta data are loaded in memory and an instance of GDataframe is returned :return: A new GMQLDataset or a GDataframe
[ "Loads", "the", "data", "from", "a", "local", "path", "into", "a", "GMQLDataset", ".", "The", "loading", "of", "the", "files", "is", "lazy", "which", "means", "that", "the", "files", "are", "loaded", "only", "when", "the", "user", "does", "a", "materiali...
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/loaders/Loader.py#L85-L142
DEIB-GECO/PyGMQL
gmql/dataset/loaders/Loader.py
load_from_remote
def load_from_remote(remote_name, owner=None): """ Loads the data from a remote repository. :param remote_name: The name of the dataset in the remote repository :param owner: (optional) The owner of the dataset. If nothing is provided, the current user is used. For public datasets use 'public'. :return: A new GMQLDataset or a GDataframe """ from .. import GMQLDataset pmg = get_python_manager() remote_manager = get_remote_manager() parser = remote_manager.get_dataset_schema(remote_name, owner) source_table = get_source_table() id = source_table.search_source(remote=remote_name) if id is None: id = source_table.add_source(remote=remote_name, parser=parser) index = pmg.read_dataset(str(id), parser.get_gmql_parser()) remote_sources = [id] return GMQLDataset.GMQLDataset(index=index, location="remote", path_or_name=remote_name, remote_sources=remote_sources)
python
def load_from_remote(remote_name, owner=None): """ Loads the data from a remote repository. :param remote_name: The name of the dataset in the remote repository :param owner: (optional) The owner of the dataset. If nothing is provided, the current user is used. For public datasets use 'public'. :return: A new GMQLDataset or a GDataframe """ from .. import GMQLDataset pmg = get_python_manager() remote_manager = get_remote_manager() parser = remote_manager.get_dataset_schema(remote_name, owner) source_table = get_source_table() id = source_table.search_source(remote=remote_name) if id is None: id = source_table.add_source(remote=remote_name, parser=parser) index = pmg.read_dataset(str(id), parser.get_gmql_parser()) remote_sources = [id] return GMQLDataset.GMQLDataset(index=index, location="remote", path_or_name=remote_name, remote_sources=remote_sources)
[ "def", "load_from_remote", "(", "remote_name", ",", "owner", "=", "None", ")", ":", "from", ".", ".", "import", "GMQLDataset", "pmg", "=", "get_python_manager", "(", ")", "remote_manager", "=", "get_remote_manager", "(", ")", "parser", "=", "remote_manager", "...
Loads the data from a remote repository. :param remote_name: The name of the dataset in the remote repository :param owner: (optional) The owner of the dataset. If nothing is provided, the current user is used. For public datasets use 'public'. :return: A new GMQLDataset or a GDataframe
[ "Loads", "the", "data", "from", "a", "remote", "repository", "." ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/loaders/Loader.py#L145-L166
nathan-hoad/python-iwlib
iwlib/iwlist.py
scan
def scan(interface): """Perform a scan for access points in the area. Arguments: interface - device to use for scanning (e.g. eth1, wlan0). """ interface = _get_bytes(interface) head = ffi.new('wireless_scan_head *') with iwlib_socket() as sock: range = _get_range_info(interface, sock=sock) if iwlib.iw_scan(sock, interface, range.we_version_compiled, head) != 0: errno = ffi.errno strerror = "Error while scanning: %s" % os.strerror(errno) raise OSError(errno, strerror) results = [] scan = head.result buf = ffi.new('char []', 1024) while scan != ffi.NULL: parsed_scan = {} if scan.b.has_mode: parsed_scan['Mode'] = ffi.string(iwlib.iw_operation_mode[scan.b.mode]) if scan.b.essid_on: parsed_scan['ESSID'] = ffi.string(scan.b.essid) else: parsed_scan['ESSID'] = b'Auto' if scan.has_ap_addr: iwlib.iw_ether_ntop( ffi.cast('struct ether_addr *', scan.ap_addr.sa_data), buf) if scan.b.has_mode and scan.b.mode == iwlib.IW_MODE_ADHOC: parsed_scan['Cell'] = ffi.string(buf) else: parsed_scan['Access Point'] = ffi.string(buf) if scan.has_maxbitrate: iwlib.iw_print_bitrate(buf, len(buf), scan.maxbitrate.value) parsed_scan['BitRate'] = ffi.string(buf) if scan.has_stats: parsed_scan['stats'] = _parse_stats(scan.stats) results.append(parsed_scan) scan = scan.next return results
python
def scan(interface): """Perform a scan for access points in the area. Arguments: interface - device to use for scanning (e.g. eth1, wlan0). """ interface = _get_bytes(interface) head = ffi.new('wireless_scan_head *') with iwlib_socket() as sock: range = _get_range_info(interface, sock=sock) if iwlib.iw_scan(sock, interface, range.we_version_compiled, head) != 0: errno = ffi.errno strerror = "Error while scanning: %s" % os.strerror(errno) raise OSError(errno, strerror) results = [] scan = head.result buf = ffi.new('char []', 1024) while scan != ffi.NULL: parsed_scan = {} if scan.b.has_mode: parsed_scan['Mode'] = ffi.string(iwlib.iw_operation_mode[scan.b.mode]) if scan.b.essid_on: parsed_scan['ESSID'] = ffi.string(scan.b.essid) else: parsed_scan['ESSID'] = b'Auto' if scan.has_ap_addr: iwlib.iw_ether_ntop( ffi.cast('struct ether_addr *', scan.ap_addr.sa_data), buf) if scan.b.has_mode and scan.b.mode == iwlib.IW_MODE_ADHOC: parsed_scan['Cell'] = ffi.string(buf) else: parsed_scan['Access Point'] = ffi.string(buf) if scan.has_maxbitrate: iwlib.iw_print_bitrate(buf, len(buf), scan.maxbitrate.value) parsed_scan['BitRate'] = ffi.string(buf) if scan.has_stats: parsed_scan['stats'] = _parse_stats(scan.stats) results.append(parsed_scan) scan = scan.next return results
[ "def", "scan", "(", "interface", ")", ":", "interface", "=", "_get_bytes", "(", "interface", ")", "head", "=", "ffi", ".", "new", "(", "'wireless_scan_head *'", ")", "with", "iwlib_socket", "(", ")", "as", "sock", ":", "range", "=", "_get_range_info", "(",...
Perform a scan for access points in the area. Arguments: interface - device to use for scanning (e.g. eth1, wlan0).
[ "Perform", "a", "scan", "for", "access", "points", "in", "the", "area", "." ]
train
https://github.com/nathan-hoad/python-iwlib/blob/f7604de0a27709fca139c4bada58263bdce4f08e/iwlib/iwlist.py#L21-L74
DEIB-GECO/PyGMQL
gmql/RemoteConnection/RemoteManager.py
RemoteManager.login
def login(self, username=None, password=None): """ Before doing any remote operation, the user has to login to the GMQL serivice. This can be done in the two following ways: * Guest mode: the user has no credentials and uses the system only as a temporary guest * Authenticated mode: the users has credentials and a stable remote account If neither username and password are specified, the user enters the system as a guest. If both are specified and they correspond to an existent user, the user enters as an authenticated user :param username: (optional) :param password: (optional) :return: None """ if (username is None) and (password is None): auth_token = self.__login_guest() elif (username is not None) and (password is not None): auth_token, fullName = self.__login_credentials(username, password) self.logger.info("You are logged as {}".format(fullName)) else: raise ValueError("you have to specify both username and password or nothing") if auth_token is not None: self.auth_token = auth_token else: raise ConnectionError("Impossible to retrieve the authentication token")
python
def login(self, username=None, password=None): """ Before doing any remote operation, the user has to login to the GMQL serivice. This can be done in the two following ways: * Guest mode: the user has no credentials and uses the system only as a temporary guest * Authenticated mode: the users has credentials and a stable remote account If neither username and password are specified, the user enters the system as a guest. If both are specified and they correspond to an existent user, the user enters as an authenticated user :param username: (optional) :param password: (optional) :return: None """ if (username is None) and (password is None): auth_token = self.__login_guest() elif (username is not None) and (password is not None): auth_token, fullName = self.__login_credentials(username, password) self.logger.info("You are logged as {}".format(fullName)) else: raise ValueError("you have to specify both username and password or nothing") if auth_token is not None: self.auth_token = auth_token else: raise ConnectionError("Impossible to retrieve the authentication token")
[ "def", "login", "(", "self", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "if", "(", "username", "is", "None", ")", "and", "(", "password", "is", "None", ")", ":", "auth_token", "=", "self", ".", "__login_guest", "(", ")", ...
Before doing any remote operation, the user has to login to the GMQL serivice. This can be done in the two following ways: * Guest mode: the user has no credentials and uses the system only as a temporary guest * Authenticated mode: the users has credentials and a stable remote account If neither username and password are specified, the user enters the system as a guest. If both are specified and they correspond to an existent user, the user enters as an authenticated user :param username: (optional) :param password: (optional) :return: None
[ "Before", "doing", "any", "remote", "operation", "the", "user", "has", "to", "login", "to", "the", "GMQL", "serivice", ".", "This", "can", "be", "done", "in", "the", "two", "following", "ways", ":" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/RemoteConnection/RemoteManager.py#L93-L119
DEIB-GECO/PyGMQL
gmql/RemoteConnection/RemoteManager.py
RemoteManager.logout
def logout(self): """ Logout from the remote account :return: None """ url = self.address + "/logout" header = self.__check_authentication() response = requests.get(url, headers=header) if response.status_code != 200: raise ValueError("Code {}. {}".format(response.status_code, response.json().get("error")))
python
def logout(self): """ Logout from the remote account :return: None """ url = self.address + "/logout" header = self.__check_authentication() response = requests.get(url, headers=header) if response.status_code != 200: raise ValueError("Code {}. {}".format(response.status_code, response.json().get("error")))
[ "def", "logout", "(", "self", ")", ":", "url", "=", "self", ".", "address", "+", "\"/logout\"", "header", "=", "self", ".", "__check_authentication", "(", ")", "response", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "header", ")", "if...
Logout from the remote account :return: None
[ "Logout", "from", "the", "remote", "account" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/RemoteConnection/RemoteManager.py#L168-L177
DEIB-GECO/PyGMQL
gmql/RemoteConnection/RemoteManager.py
RemoteManager.get_dataset_list
def get_dataset_list(self): """ Returns the list of available datasets for the current user. :return: a pandas Dataframe """ url = self.address + "/datasets" header = self.__check_authentication() response = requests.get(url, headers=header) response = response.json() datasets = response.get("datasets") res = pd.DataFrame.from_dict(datasets) return self.process_info_list(res, "info")
python
def get_dataset_list(self): """ Returns the list of available datasets for the current user. :return: a pandas Dataframe """ url = self.address + "/datasets" header = self.__check_authentication() response = requests.get(url, headers=header) response = response.json() datasets = response.get("datasets") res = pd.DataFrame.from_dict(datasets) return self.process_info_list(res, "info")
[ "def", "get_dataset_list", "(", "self", ")", ":", "url", "=", "self", ".", "address", "+", "\"/datasets\"", "header", "=", "self", ".", "__check_authentication", "(", ")", "response", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "header", ...
Returns the list of available datasets for the current user. :return: a pandas Dataframe
[ "Returns", "the", "list", "of", "available", "datasets", "for", "the", "current", "user", "." ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/RemoteConnection/RemoteManager.py#L196-L207
DEIB-GECO/PyGMQL
gmql/RemoteConnection/RemoteManager.py
RemoteManager.get_dataset_samples
def get_dataset_samples(self, dataset_name, owner=None): """ Get the list of samples of a specific remote dataset. :param dataset_name: the dataset name :param owner: (optional) who owns the dataset. If it is not specified, the current user is used. For public dataset use 'public'. :return: a pandas Dataframe """ if isinstance(owner, str): owner = owner.lower() dataset_name = owner + "." + dataset_name header = self.__check_authentication() url = self.address + "/datasets/" + dataset_name response = requests.get(url, headers=header) if response.status_code != 200: raise ValueError("Code {}: {}".format(response.status_code, response.json().get("error"))) response = response.json() samples = response.get("samples") if len(samples) == 0: return None res = pd.DataFrame.from_dict(samples) return self.process_info_list(res, "info")
python
def get_dataset_samples(self, dataset_name, owner=None): """ Get the list of samples of a specific remote dataset. :param dataset_name: the dataset name :param owner: (optional) who owns the dataset. If it is not specified, the current user is used. For public dataset use 'public'. :return: a pandas Dataframe """ if isinstance(owner, str): owner = owner.lower() dataset_name = owner + "." + dataset_name header = self.__check_authentication() url = self.address + "/datasets/" + dataset_name response = requests.get(url, headers=header) if response.status_code != 200: raise ValueError("Code {}: {}".format(response.status_code, response.json().get("error"))) response = response.json() samples = response.get("samples") if len(samples) == 0: return None res = pd.DataFrame.from_dict(samples) return self.process_info_list(res, "info")
[ "def", "get_dataset_samples", "(", "self", ",", "dataset_name", ",", "owner", "=", "None", ")", ":", "if", "isinstance", "(", "owner", ",", "str", ")", ":", "owner", "=", "owner", ".", "lower", "(", ")", "dataset_name", "=", "owner", "+", "\".\"", "+",...
Get the list of samples of a specific remote dataset. :param dataset_name: the dataset name :param owner: (optional) who owns the dataset. If it is not specified, the current user is used. For public dataset use 'public'. :return: a pandas Dataframe
[ "Get", "the", "list", "of", "samples", "of", "a", "specific", "remote", "dataset", "." ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/RemoteConnection/RemoteManager.py#L209-L232
DEIB-GECO/PyGMQL
gmql/RemoteConnection/RemoteManager.py
RemoteManager.get_dataset_schema
def get_dataset_schema(self, dataset_name, owner=None): """ Given a dataset name, it returns a BedParser coherent with the schema of it :param dataset_name: a dataset name on the repository :param owner: (optional) who owns the dataset. If it is not specified, the current user is used. For public dataset use 'public'. :return: a BedParser """ if isinstance(owner, str): owner = owner.lower() dataset_name = owner + "." + dataset_name url = self.address + "/datasets/" + dataset_name+"/schema" header = self.__check_authentication() response = requests.get(url, headers=header) if response.status_code != 200: raise ValueError("Code {}: {}".format(response.status_code, response.json().get("error"))) response = response.json() name = response.get("name") schemaType = response.get("type") coordinates_system = response.get("coordinate_system") fields = response.get("fields") i = 0 chrPos, startPos, stopPos, strandPos = None, None, None, None otherPos = [] if schemaType == GTF: chrPos = 0 # seqname startPos = 3 # start stopPos = 4 # end strandPos = 6 # strand otherPos = [(1, 'source', 'string'), (2, 'feature', 'string'), (5, 'score', 'float'), (7, 'frame', 'string')] for field in fields: fieldName = field.get("name") fieldType = field.get("type").lower() if fieldName.lower() not in {'seqname', 'start', 'end', 'strand', 'source', 'feature', 'score', 'frame'}: otherPos.append((i, fieldName, fieldType)) i += 1 else: for field in fields: fieldName = field.get("name") fieldType = field.get("type").lower() if fieldName.lower() in chr_aliases and chrPos is None: chrPos = i elif fieldName.lower() in start_aliases and startPos is None: startPos = i elif fieldName.lower() in stop_aliases and stopPos is None: stopPos = i elif fieldName.lower() in strand_aliases and strandPos is None: strandPos = i else: # other positions otherPos.append((i, fieldName, fieldType)) i += 1 if len(otherPos) == 0: otherPos = None return RegionParser(chrPos=chrPos, startPos=startPos, stopPos=stopPos, strandPos=strandPos, otherPos=otherPos, schema_format=schemaType, coordinate_system=coordinates_system, delimiter="\t", parser_name=name)
python
def get_dataset_schema(self, dataset_name, owner=None): """ Given a dataset name, it returns a BedParser coherent with the schema of it :param dataset_name: a dataset name on the repository :param owner: (optional) who owns the dataset. If it is not specified, the current user is used. For public dataset use 'public'. :return: a BedParser """ if isinstance(owner, str): owner = owner.lower() dataset_name = owner + "." + dataset_name url = self.address + "/datasets/" + dataset_name+"/schema" header = self.__check_authentication() response = requests.get(url, headers=header) if response.status_code != 200: raise ValueError("Code {}: {}".format(response.status_code, response.json().get("error"))) response = response.json() name = response.get("name") schemaType = response.get("type") coordinates_system = response.get("coordinate_system") fields = response.get("fields") i = 0 chrPos, startPos, stopPos, strandPos = None, None, None, None otherPos = [] if schemaType == GTF: chrPos = 0 # seqname startPos = 3 # start stopPos = 4 # end strandPos = 6 # strand otherPos = [(1, 'source', 'string'), (2, 'feature', 'string'), (5, 'score', 'float'), (7, 'frame', 'string')] for field in fields: fieldName = field.get("name") fieldType = field.get("type").lower() if fieldName.lower() not in {'seqname', 'start', 'end', 'strand', 'source', 'feature', 'score', 'frame'}: otherPos.append((i, fieldName, fieldType)) i += 1 else: for field in fields: fieldName = field.get("name") fieldType = field.get("type").lower() if fieldName.lower() in chr_aliases and chrPos is None: chrPos = i elif fieldName.lower() in start_aliases and startPos is None: startPos = i elif fieldName.lower() in stop_aliases and stopPos is None: stopPos = i elif fieldName.lower() in strand_aliases and strandPos is None: strandPos = i else: # other positions otherPos.append((i, fieldName, fieldType)) i += 1 if len(otherPos) == 0: otherPos = None return RegionParser(chrPos=chrPos, startPos=startPos, stopPos=stopPos, strandPos=strandPos, otherPos=otherPos, schema_format=schemaType, coordinate_system=coordinates_system, delimiter="\t", parser_name=name)
[ "def", "get_dataset_schema", "(", "self", ",", "dataset_name", ",", "owner", "=", "None", ")", ":", "if", "isinstance", "(", "owner", ",", "str", ")", ":", "owner", "=", "owner", ".", "lower", "(", ")", "dataset_name", "=", "owner", "+", "\".\"", "+", ...
Given a dataset name, it returns a BedParser coherent with the schema of it :param dataset_name: a dataset name on the repository :param owner: (optional) who owns the dataset. If it is not specified, the current user is used. For public dataset use 'public'. :return: a BedParser
[ "Given", "a", "dataset", "name", "it", "returns", "a", "BedParser", "coherent", "with", "the", "schema", "of", "it" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/RemoteConnection/RemoteManager.py#L234-L304
DEIB-GECO/PyGMQL
gmql/RemoteConnection/RemoteManager.py
RemoteManager.upload_dataset
def upload_dataset(self, dataset, dataset_name, schema_path=None): """ Upload to the repository an entire dataset from a local path :param dataset: the local path of the dataset :param dataset_name: the name you want to assign to the dataset remotely :return: None """ url = self.address + "/datasets/" + dataset_name + "/uploadSample" header = self.__check_authentication() fields = dict() remove = False if isinstance(dataset, GDataframe): tmp_path = TempFileManager.get_new_dataset_tmp_folder() dataset.to_dataset_files(local_path=tmp_path) dataset = tmp_path remove = True # a path is provided if not isinstance(dataset, str): raise TypeError("Dataset can be a path or a GDataframe. {} was passed".format(type(dataset))) file_paths, schema_path_found = Loader.get_file_paths(dataset) if schema_path is None: schema_path = schema_path_found fields['schema'] = (os.path.basename(schema_path), open(schema_path, "rb"), 'application/octet-stream') for i, file in enumerate(file_paths): fields["file"+str(i + 1)] = (os.path.basename(file), open(file, "rb"), 'application/octet-stream') encoder = MultipartEncoder(fields) callback = create_callback(encoder, len(fields)) m_encoder = MultipartEncoderMonitor(encoder, callback) header['Content-Type'] = m_encoder.content_type self.logger.debug("Uploading dataset at {} with name {}".format(dataset, dataset_name)) response = requests.post(url, data=m_encoder, headers=header) # closing files for fn in fields.keys(): _, f, _ = fields[fn] f.close() if response.status_code != 200: raise ValueError("Code {}: {}".format(response.status_code, response.content)) if remove: TempFileManager.delete_tmp_dataset(dataset)
python
def upload_dataset(self, dataset, dataset_name, schema_path=None): """ Upload to the repository an entire dataset from a local path :param dataset: the local path of the dataset :param dataset_name: the name you want to assign to the dataset remotely :return: None """ url = self.address + "/datasets/" + dataset_name + "/uploadSample" header = self.__check_authentication() fields = dict() remove = False if isinstance(dataset, GDataframe): tmp_path = TempFileManager.get_new_dataset_tmp_folder() dataset.to_dataset_files(local_path=tmp_path) dataset = tmp_path remove = True # a path is provided if not isinstance(dataset, str): raise TypeError("Dataset can be a path or a GDataframe. {} was passed".format(type(dataset))) file_paths, schema_path_found = Loader.get_file_paths(dataset) if schema_path is None: schema_path = schema_path_found fields['schema'] = (os.path.basename(schema_path), open(schema_path, "rb"), 'application/octet-stream') for i, file in enumerate(file_paths): fields["file"+str(i + 1)] = (os.path.basename(file), open(file, "rb"), 'application/octet-stream') encoder = MultipartEncoder(fields) callback = create_callback(encoder, len(fields)) m_encoder = MultipartEncoderMonitor(encoder, callback) header['Content-Type'] = m_encoder.content_type self.logger.debug("Uploading dataset at {} with name {}".format(dataset, dataset_name)) response = requests.post(url, data=m_encoder, headers=header) # closing files for fn in fields.keys(): _, f, _ = fields[fn] f.close() if response.status_code != 200: raise ValueError("Code {}: {}".format(response.status_code, response.content)) if remove: TempFileManager.delete_tmp_dataset(dataset)
[ "def", "upload_dataset", "(", "self", ",", "dataset", ",", "dataset_name", ",", "schema_path", "=", "None", ")", ":", "url", "=", "self", ".", "address", "+", "\"/datasets/\"", "+", "dataset_name", "+", "\"/uploadSample\"", "header", "=", "self", ".", "__che...
Upload to the repository an entire dataset from a local path :param dataset: the local path of the dataset :param dataset_name: the name you want to assign to the dataset remotely :return: None
[ "Upload", "to", "the", "repository", "an", "entire", "dataset", "from", "a", "local", "path" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/RemoteConnection/RemoteManager.py#L306-L356
DEIB-GECO/PyGMQL
gmql/RemoteConnection/RemoteManager.py
RemoteManager.delete_dataset
def delete_dataset(self, dataset_name): """ Deletes the dataset having the specified name :param dataset_name: the name that the dataset has on the repository :return: None """ url = self.address + "/datasets/" + dataset_name header = self.__check_authentication() response = requests.delete(url, headers=header) if response.status_code != 200: raise ValueError("Code {}: {}".format(response.status_code, response.json().get("error"))) self.logger.debug("Dataset {} was deleted from the repository".format(dataset_name))
python
def delete_dataset(self, dataset_name): """ Deletes the dataset having the specified name :param dataset_name: the name that the dataset has on the repository :return: None """ url = self.address + "/datasets/" + dataset_name header = self.__check_authentication() response = requests.delete(url, headers=header) if response.status_code != 200: raise ValueError("Code {}: {}".format(response.status_code, response.json().get("error"))) self.logger.debug("Dataset {} was deleted from the repository".format(dataset_name))
[ "def", "delete_dataset", "(", "self", ",", "dataset_name", ")", ":", "url", "=", "self", ".", "address", "+", "\"/datasets/\"", "+", "dataset_name", "header", "=", "self", ".", "__check_authentication", "(", ")", "response", "=", "requests", ".", "delete", "...
Deletes the dataset having the specified name :param dataset_name: the name that the dataset has on the repository :return: None
[ "Deletes", "the", "dataset", "having", "the", "specified", "name" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/RemoteConnection/RemoteManager.py#L358-L369
DEIB-GECO/PyGMQL
gmql/RemoteConnection/RemoteManager.py
RemoteManager.download_dataset
def download_dataset(self, dataset_name, local_path, how="stream"): """ It downloads from the repository the specified dataset and puts it in the specified local folder :param dataset_name: the name the dataset has in the repository :param local_path: where you want to save the dataset :param how: 'zip' downloads the whole dataset as a zip file and decompress it; 'stream' downloads the dataset sample by sample :return: None """ if not os.path.isdir(local_path): os.makedirs(local_path) else: raise ValueError("Path {} already exists!".format(local_path)) local_path = os.path.join(local_path, FILES_FOLDER) os.makedirs(local_path) if how == 'zip': return self.download_as_zip(dataset_name, local_path) elif how == 'stream': return self.download_as_stream(dataset_name, local_path) else: raise ValueError("how must be {'zip', 'stream'}")
python
def download_dataset(self, dataset_name, local_path, how="stream"): """ It downloads from the repository the specified dataset and puts it in the specified local folder :param dataset_name: the name the dataset has in the repository :param local_path: where you want to save the dataset :param how: 'zip' downloads the whole dataset as a zip file and decompress it; 'stream' downloads the dataset sample by sample :return: None """ if not os.path.isdir(local_path): os.makedirs(local_path) else: raise ValueError("Path {} already exists!".format(local_path)) local_path = os.path.join(local_path, FILES_FOLDER) os.makedirs(local_path) if how == 'zip': return self.download_as_zip(dataset_name, local_path) elif how == 'stream': return self.download_as_stream(dataset_name, local_path) else: raise ValueError("how must be {'zip', 'stream'}")
[ "def", "download_dataset", "(", "self", ",", "dataset_name", ",", "local_path", ",", "how", "=", "\"stream\"", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "local_path", ")", ":", "os", ".", "makedirs", "(", "local_path", ")", "else", "...
It downloads from the repository the specified dataset and puts it in the specified local folder :param dataset_name: the name the dataset has in the repository :param local_path: where you want to save the dataset :param how: 'zip' downloads the whole dataset as a zip file and decompress it; 'stream' downloads the dataset sample by sample :return: None
[ "It", "downloads", "from", "the", "repository", "the", "specified", "dataset", "and", "puts", "it", "in", "the", "specified", "local", "folder" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/RemoteConnection/RemoteManager.py#L375-L398
DEIB-GECO/PyGMQL
gmql/RemoteConnection/RemoteManager.py
RemoteManager.query
def query(self, query, output_path=None, file_name="query", output="tab"): """ Execute a GMQL textual query on the remote server. :param query: the string containing the query :param output_path (optional): where to store the results locally. If specified the results are downloaded locally :param file_name (optional): the name of the query :param output (optional): how to save the results. It can be "tab" or "gtf" :return: a pandas dataframe with the dictionary ids of the results """ header = self.__check_authentication() header['Content-Type'] = "text/plain" output = output.lower() if output not in ['tab', 'gtf']: raise ValueError("output must be 'tab' or 'gtf'") url = self.address + "/queries/run/" + file_name + '/' + output response = requests.post(url, data=query, headers=header) if response.status_code != 200: raise ValueError("Code {}. {}".format(response.status_code, response.json().get("error"))) response = response.json() jobid = response.get("id") self.logger.debug("JobId: {}. Waiting for the result".format(jobid)) status_resp = self._wait_for_result(jobid) datasets = status_resp.get("datasets") return self.__process_result_datasets(datasets, output_path)
python
def query(self, query, output_path=None, file_name="query", output="tab"): """ Execute a GMQL textual query on the remote server. :param query: the string containing the query :param output_path (optional): where to store the results locally. If specified the results are downloaded locally :param file_name (optional): the name of the query :param output (optional): how to save the results. It can be "tab" or "gtf" :return: a pandas dataframe with the dictionary ids of the results """ header = self.__check_authentication() header['Content-Type'] = "text/plain" output = output.lower() if output not in ['tab', 'gtf']: raise ValueError("output must be 'tab' or 'gtf'") url = self.address + "/queries/run/" + file_name + '/' + output response = requests.post(url, data=query, headers=header) if response.status_code != 200: raise ValueError("Code {}. {}".format(response.status_code, response.json().get("error"))) response = response.json() jobid = response.get("id") self.logger.debug("JobId: {}. Waiting for the result".format(jobid)) status_resp = self._wait_for_result(jobid) datasets = status_resp.get("datasets") return self.__process_result_datasets(datasets, output_path)
[ "def", "query", "(", "self", ",", "query", ",", "output_path", "=", "None", ",", "file_name", "=", "\"query\"", ",", "output", "=", "\"tab\"", ")", ":", "header", "=", "self", ".", "__check_authentication", "(", ")", "header", "[", "'Content-Type'", "]", ...
Execute a GMQL textual query on the remote server. :param query: the string containing the query :param output_path (optional): where to store the results locally. If specified the results are downloaded locally :param file_name (optional): the name of the query :param output (optional): how to save the results. It can be "tab" or "gtf" :return: a pandas dataframe with the dictionary ids of the results
[ "Execute", "a", "GMQL", "textual", "query", "on", "the", "remote", "server", "." ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/RemoteConnection/RemoteManager.py#L490-L516
DEIB-GECO/PyGMQL
gmql/RemoteConnection/RemoteManager.py
RemoteManager.trace_job
def trace_job(self, jobId): """ Get information about the specified remote job :param jobId: the job identifier :return: a dictionary with the information """ header = self.__check_authentication() status_url = self.address + "/jobs/" + jobId + "/trace" status_resp = requests.get(status_url, headers=header) if status_resp.status_code != 200: raise ValueError("Code {}. {}".format(status_resp.status_code, status_resp.json().get("error"))) return status_resp.json()
python
def trace_job(self, jobId): """ Get information about the specified remote job :param jobId: the job identifier :return: a dictionary with the information """ header = self.__check_authentication() status_url = self.address + "/jobs/" + jobId + "/trace" status_resp = requests.get(status_url, headers=header) if status_resp.status_code != 200: raise ValueError("Code {}. {}".format(status_resp.status_code, status_resp.json().get("error"))) return status_resp.json()
[ "def", "trace_job", "(", "self", ",", "jobId", ")", ":", "header", "=", "self", ".", "__check_authentication", "(", ")", "status_url", "=", "self", ".", "address", "+", "\"/jobs/\"", "+", "jobId", "+", "\"/trace\"", "status_resp", "=", "requests", ".", "ge...
Get information about the specified remote job :param jobId: the job identifier :return: a dictionary with the information
[ "Get", "information", "about", "the", "specified", "remote", "job" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/RemoteConnection/RemoteManager.py#L611-L622
DEIB-GECO/PyGMQL
gmql/settings.py
set_mode
def set_mode(how): """ Sets the behavior of the API :param how: if 'remote' all the execution is performed on the remote server; if 'local' all it is executed locally. Default = 'local' :return: None """ global __mode if how == "local": __mode = how elif how == "remote": __mode = how else: raise ValueError("how must be 'local' or 'remote'")
python
def set_mode(how): """ Sets the behavior of the API :param how: if 'remote' all the execution is performed on the remote server; if 'local' all it is executed locally. Default = 'local' :return: None """ global __mode if how == "local": __mode = how elif how == "remote": __mode = how else: raise ValueError("how must be 'local' or 'remote'")
[ "def", "set_mode", "(", "how", ")", ":", "global", "__mode", "if", "how", "==", "\"local\"", ":", "__mode", "=", "how", "elif", "how", "==", "\"remote\"", ":", "__mode", "=", "how", "else", ":", "raise", "ValueError", "(", "\"how must be 'local' or 'remote'\...
Sets the behavior of the API :param how: if 'remote' all the execution is performed on the remote server; if 'local' all it is executed locally. Default = 'local' :return: None
[ "Sets", "the", "behavior", "of", "the", "API" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/settings.py#L79-L92
DEIB-GECO/PyGMQL
gmql/settings.py
set_progress
def set_progress(how): """ Enables or disables the progress bars for the loading, writing and downloading of datasets :param how: True if you want the progress bar, False otherwise :return: None Example:: import gmql as gl gl.set_progress(True) # abilitates progress bars # ....do something... gl.set_progress(False) # removes progress bars # ....do something... """ global __progress_bar if isinstance(how, bool): __progress_bar = how else: raise ValueError( "how must be a boolean. {} was found".format(type(how)))
python
def set_progress(how): """ Enables or disables the progress bars for the loading, writing and downloading of datasets :param how: True if you want the progress bar, False otherwise :return: None Example:: import gmql as gl gl.set_progress(True) # abilitates progress bars # ....do something... gl.set_progress(False) # removes progress bars # ....do something... """ global __progress_bar if isinstance(how, bool): __progress_bar = how else: raise ValueError( "how must be a boolean. {} was found".format(type(how)))
[ "def", "set_progress", "(", "how", ")", ":", "global", "__progress_bar", "if", "isinstance", "(", "how", ",", "bool", ")", ":", "__progress_bar", "=", "how", "else", ":", "raise", "ValueError", "(", "\"how must be a boolean. {} was found\"", ".", "format", "(", ...
Enables or disables the progress bars for the loading, writing and downloading of datasets :param how: True if you want the progress bar, False otherwise :return: None Example:: import gmql as gl gl.set_progress(True) # abilitates progress bars # ....do something... gl.set_progress(False) # removes progress bars # ....do something...
[ "Enables", "or", "disables", "the", "progress", "bars", "for", "the", "loading", "writing", "and", "downloading", "of", "datasets" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/settings.py#L107-L128
DEIB-GECO/PyGMQL
gmql/settings.py
set_meta_profiling
def set_meta_profiling(how): """ Enables or disables the profiling of metadata at the loading of a GMQLDataset :param how: True if you want to analyze the metadata when a GMQLDataset is created by a load_from_*. False otherwise. (Default=True) :return: None """ global __metadata_profiling if isinstance(how, bool): __metadata_profiling = how else: raise TypeError("how must be boolean. {} was provided".format(type(how)))
python
def set_meta_profiling(how): """ Enables or disables the profiling of metadata at the loading of a GMQLDataset :param how: True if you want to analyze the metadata when a GMQLDataset is created by a load_from_*. False otherwise. (Default=True) :return: None """ global __metadata_profiling if isinstance(how, bool): __metadata_profiling = how else: raise TypeError("how must be boolean. {} was provided".format(type(how)))
[ "def", "set_meta_profiling", "(", "how", ")", ":", "global", "__metadata_profiling", "if", "isinstance", "(", "how", ",", "bool", ")", ":", "__metadata_profiling", "=", "how", "else", ":", "raise", "TypeError", "(", "\"how must be boolean. {} was provided\"", ".", ...
Enables or disables the profiling of metadata at the loading of a GMQLDataset :param how: True if you want to analyze the metadata when a GMQLDataset is created by a load_from_*. False otherwise. (Default=True) :return: None
[ "Enables", "or", "disables", "the", "profiling", "of", "metadata", "at", "the", "loading", "of", "a", "GMQLDataset" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/settings.py#L136-L147
DEIB-GECO/PyGMQL
gmql/dataset/parsers/RegionParser.py
RegionParser.parse_regions
def parse_regions(self, path): """ Given a file path, it loads it into memory as a Pandas dataframe :param path: file path :return: a Pandas Dataframe """ if self.schema_format.lower() == GTF.lower(): res = self._parse_gtf_regions(path) else: res = self._parse_tab_regions(path) return res
python
def parse_regions(self, path): """ Given a file path, it loads it into memory as a Pandas dataframe :param path: file path :return: a Pandas Dataframe """ if self.schema_format.lower() == GTF.lower(): res = self._parse_gtf_regions(path) else: res = self._parse_tab_regions(path) return res
[ "def", "parse_regions", "(", "self", ",", "path", ")", ":", "if", "self", ".", "schema_format", ".", "lower", "(", ")", "==", "GTF", ".", "lower", "(", ")", ":", "res", "=", "self", ".", "_parse_gtf_regions", "(", "path", ")", "else", ":", "res", "...
Given a file path, it loads it into memory as a Pandas dataframe :param path: file path :return: a Pandas Dataframe
[ "Given", "a", "file", "path", "it", "loads", "it", "into", "memory", "as", "a", "Pandas", "dataframe" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/parsers/RegionParser.py#L101-L111
DEIB-GECO/PyGMQL
gmql/dataset/parsers/RegionParser.py
RegionParser.get_attributes
def get_attributes(self): """ Returns the unordered list of attributes :return: list of strings """ attr = ['chr', 'start', 'stop'] if self.strandPos is not None: attr.append('strand') if self.otherPos: for i, o in enumerate(self.otherPos): attr.append(o[1]) return attr
python
def get_attributes(self): """ Returns the unordered list of attributes :return: list of strings """ attr = ['chr', 'start', 'stop'] if self.strandPos is not None: attr.append('strand') if self.otherPos: for i, o in enumerate(self.otherPos): attr.append(o[1]) return attr
[ "def", "get_attributes", "(", "self", ")", ":", "attr", "=", "[", "'chr'", ",", "'start'", ",", "'stop'", "]", "if", "self", ".", "strandPos", "is", "not", "None", ":", "attr", ".", "append", "(", "'strand'", ")", "if", "self", ".", "otherPos", ":", ...
Returns the unordered list of attributes :return: list of strings
[ "Returns", "the", "unordered", "list", "of", "attributes" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/parsers/RegionParser.py#L153-L165
DEIB-GECO/PyGMQL
gmql/dataset/parsers/RegionParser.py
RegionParser.get_ordered_attributes
def get_ordered_attributes(self): """ Returns the ordered list of attributes :return: list of strings """ attrs = self.get_attributes() attr_arr = np.array(attrs) poss = [self.chrPos, self.startPos, self.stopPos] if self.strandPos is not None: poss.append(self.strandPos) if self.otherPos: for o in self.otherPos: poss.append(o[0]) idx_sort = np.array(poss).argsort() return attr_arr[idx_sort].tolist()
python
def get_ordered_attributes(self): """ Returns the ordered list of attributes :return: list of strings """ attrs = self.get_attributes() attr_arr = np.array(attrs) poss = [self.chrPos, self.startPos, self.stopPos] if self.strandPos is not None: poss.append(self.strandPos) if self.otherPos: for o in self.otherPos: poss.append(o[0]) idx_sort = np.array(poss).argsort() return attr_arr[idx_sort].tolist()
[ "def", "get_ordered_attributes", "(", "self", ")", ":", "attrs", "=", "self", ".", "get_attributes", "(", ")", "attr_arr", "=", "np", ".", "array", "(", "attrs", ")", "poss", "=", "[", "self", ".", "chrPos", ",", "self", ".", "startPos", ",", "self", ...
Returns the ordered list of attributes :return: list of strings
[ "Returns", "the", "ordered", "list", "of", "attributes" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/parsers/RegionParser.py#L167-L182
DEIB-GECO/PyGMQL
gmql/dataset/parsers/RegionParser.py
RegionParser.get_types
def get_types(self): """ Returns the unordered list of data types :return: list of data types """ types = [str, int, int] if self.strandPos is not None: types.append(str) if self.otherPos: for o in self.otherPos: types.append(o[2]) return types
python
def get_types(self): """ Returns the unordered list of data types :return: list of data types """ types = [str, int, int] if self.strandPos is not None: types.append(str) if self.otherPos: for o in self.otherPos: types.append(o[2]) return types
[ "def", "get_types", "(", "self", ")", ":", "types", "=", "[", "str", ",", "int", ",", "int", "]", "if", "self", ".", "strandPos", "is", "not", "None", ":", "types", ".", "append", "(", "str", ")", "if", "self", ".", "otherPos", ":", "for", "o", ...
Returns the unordered list of data types :return: list of data types
[ "Returns", "the", "unordered", "list", "of", "data", "types" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/parsers/RegionParser.py#L184-L196
DEIB-GECO/PyGMQL
gmql/dataset/parsers/RegionParser.py
RegionParser.get_name_type_dict
def get_name_type_dict(self): """ Returns a dictionary of the type {'column_name': data_type, ...} :return: dict """ attrs = self.get_attributes() types = self.get_types() d = dict() for i,a in enumerate(attrs): d[a] = types[i] return d
python
def get_name_type_dict(self): """ Returns a dictionary of the type {'column_name': data_type, ...} :return: dict """ attrs = self.get_attributes() types = self.get_types() d = dict() for i,a in enumerate(attrs): d[a] = types[i] return d
[ "def", "get_name_type_dict", "(", "self", ")", ":", "attrs", "=", "self", ".", "get_attributes", "(", ")", "types", "=", "self", ".", "get_types", "(", ")", "d", "=", "dict", "(", ")", "for", "i", ",", "a", "in", "enumerate", "(", "attrs", ")", ":"...
Returns a dictionary of the type {'column_name': data_type, ...} :return: dict
[ "Returns", "a", "dictionary", "of", "the", "type", "{", "column_name", ":", "data_type", "...", "}" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/parsers/RegionParser.py#L198-L210
DEIB-GECO/PyGMQL
gmql/dataset/parsers/RegionParser.py
RegionParser.get_ordered_types
def get_ordered_types(self): """ Returns the ordered list of data types :return: list of data types """ types = self.get_types() types_arr = np.array(types) poss = [self.chrPos, self.startPos, self.stopPos] if self.strandPos is not None: poss.append(self.strandPos) if self.otherPos: for o in self.otherPos: poss.append(o[0]) idx_sort = np.array(poss).argsort() return types_arr[idx_sort].tolist()
python
def get_ordered_types(self): """ Returns the ordered list of data types :return: list of data types """ types = self.get_types() types_arr = np.array(types) poss = [self.chrPos, self.startPos, self.stopPos] if self.strandPos is not None: poss.append(self.strandPos) if self.otherPos: for o in self.otherPos: poss.append(o[0]) idx_sort = np.array(poss).argsort() return types_arr[idx_sort].tolist()
[ "def", "get_ordered_types", "(", "self", ")", ":", "types", "=", "self", ".", "get_types", "(", ")", "types_arr", "=", "np", ".", "array", "(", "types", ")", "poss", "=", "[", "self", ".", "chrPos", ",", "self", ".", "startPos", ",", "self", ".", "...
Returns the ordered list of data types :return: list of data types
[ "Returns", "the", "ordered", "list", "of", "data", "types" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/parsers/RegionParser.py#L212-L226
DEIB-GECO/PyGMQL
gmql/ml/algorithms/clustering.py
Clustering.xmeans
def xmeans(cls, initial_centers=None, kmax=20, tolerance=0.025, criterion=splitting_type.BAYESIAN_INFORMATION_CRITERION, ccore=False): """ Constructor of the x-means clustering.rst algorithm :param initial_centers: Initial coordinates of centers of clusters that are represented by list: [center1, center2, ...] Note: The dimensions of the initial centers should be same as of the dataset. :param kmax: Maximum number of clusters that can be allocated. :param tolerance: Stop condition for each iteration: if maximum value of change of centers of clusters is less than tolerance than algorithm will stop processing :param criterion: Type of splitting creation. :param ccore: Defines should be CCORE (C++ pyclustering library) used instead of Python code or not. :return: returns the clustering.rst object """ model = xmeans(None, initial_centers, kmax, tolerance, criterion, ccore) return cls(model)
python
def xmeans(cls, initial_centers=None, kmax=20, tolerance=0.025, criterion=splitting_type.BAYESIAN_INFORMATION_CRITERION, ccore=False): """ Constructor of the x-means clustering.rst algorithm :param initial_centers: Initial coordinates of centers of clusters that are represented by list: [center1, center2, ...] Note: The dimensions of the initial centers should be same as of the dataset. :param kmax: Maximum number of clusters that can be allocated. :param tolerance: Stop condition for each iteration: if maximum value of change of centers of clusters is less than tolerance than algorithm will stop processing :param criterion: Type of splitting creation. :param ccore: Defines should be CCORE (C++ pyclustering library) used instead of Python code or not. :return: returns the clustering.rst object """ model = xmeans(None, initial_centers, kmax, tolerance, criterion, ccore) return cls(model)
[ "def", "xmeans", "(", "cls", ",", "initial_centers", "=", "None", ",", "kmax", "=", "20", ",", "tolerance", "=", "0.025", ",", "criterion", "=", "splitting_type", ".", "BAYESIAN_INFORMATION_CRITERION", ",", "ccore", "=", "False", ")", ":", "model", "=", "x...
Constructor of the x-means clustering.rst algorithm :param initial_centers: Initial coordinates of centers of clusters that are represented by list: [center1, center2, ...] Note: The dimensions of the initial centers should be same as of the dataset. :param kmax: Maximum number of clusters that can be allocated. :param tolerance: Stop condition for each iteration: if maximum value of change of centers of clusters is less than tolerance than algorithm will stop processing :param criterion: Type of splitting creation. :param ccore: Defines should be CCORE (C++ pyclustering library) used instead of Python code or not. :return: returns the clustering.rst object
[ "Constructor", "of", "the", "x", "-", "means", "clustering", ".", "rst", "algorithm" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L26-L39
DEIB-GECO/PyGMQL
gmql/ml/algorithms/clustering.py
Clustering.clarans
def clarans(cls, number_clusters, num_local, max_neighbour): """ Constructor of the CLARANS clustering.rst algorithm :param number_clusters: the number of clusters to be allocated :param num_local: the number of local minima obtained (amount of iterations for solving the problem). :param max_neighbour: the number of local minima obtained (amount of iterations for solving the problem). :return: the resulting clustering.rst object """ model = clarans(None, number_clusters, num_local, max_neighbour) return cls(model)
python
def clarans(cls, number_clusters, num_local, max_neighbour): """ Constructor of the CLARANS clustering.rst algorithm :param number_clusters: the number of clusters to be allocated :param num_local: the number of local minima obtained (amount of iterations for solving the problem). :param max_neighbour: the number of local minima obtained (amount of iterations for solving the problem). :return: the resulting clustering.rst object """ model = clarans(None, number_clusters, num_local, max_neighbour) return cls(model)
[ "def", "clarans", "(", "cls", ",", "number_clusters", ",", "num_local", ",", "max_neighbour", ")", ":", "model", "=", "clarans", "(", "None", ",", "number_clusters", ",", "num_local", ",", "max_neighbour", ")", "return", "cls", "(", "model", ")" ]
Constructor of the CLARANS clustering.rst algorithm :param number_clusters: the number of clusters to be allocated :param num_local: the number of local minima obtained (amount of iterations for solving the problem). :param max_neighbour: the number of local minima obtained (amount of iterations for solving the problem). :return: the resulting clustering.rst object
[ "Constructor", "of", "the", "CLARANS", "clustering", ".", "rst", "algorithm" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L42-L52
DEIB-GECO/PyGMQL
gmql/ml/algorithms/clustering.py
Clustering.rock
def rock(cls, data, eps, number_clusters, threshold=0.5, ccore=False): """ Constructor of the ROCK cluster analysis algorithm :param eps: Connectivity radius (similarity threshold), points are neighbors if distance between them is less than connectivity radius :param number_clusters: Defines number of clusters that should be allocated from the input data set :param threshold: Value that defines degree of normalization that influences on choice of clusters for merging during processing :param ccore: Defines should be CCORE (C++ pyclustering library) used instead of Python code or not. :return: The resulting clustering.rst object """ data = cls.input_preprocess(data) model = rock(data, eps, number_clusters, threshold, ccore) return cls(model)
python
def rock(cls, data, eps, number_clusters, threshold=0.5, ccore=False): """ Constructor of the ROCK cluster analysis algorithm :param eps: Connectivity radius (similarity threshold), points are neighbors if distance between them is less than connectivity radius :param number_clusters: Defines number of clusters that should be allocated from the input data set :param threshold: Value that defines degree of normalization that influences on choice of clusters for merging during processing :param ccore: Defines should be CCORE (C++ pyclustering library) used instead of Python code or not. :return: The resulting clustering.rst object """ data = cls.input_preprocess(data) model = rock(data, eps, number_clusters, threshold, ccore) return cls(model)
[ "def", "rock", "(", "cls", ",", "data", ",", "eps", ",", "number_clusters", ",", "threshold", "=", "0.5", ",", "ccore", "=", "False", ")", ":", "data", "=", "cls", ".", "input_preprocess", "(", "data", ")", "model", "=", "rock", "(", "data", ",", "...
Constructor of the ROCK cluster analysis algorithm :param eps: Connectivity radius (similarity threshold), points are neighbors if distance between them is less than connectivity radius :param number_clusters: Defines number of clusters that should be allocated from the input data set :param threshold: Value that defines degree of normalization that influences on choice of clusters for merging during processing :param ccore: Defines should be CCORE (C++ pyclustering library) used instead of Python code or not. :return: The resulting clustering.rst object
[ "Constructor", "of", "the", "ROCK", "cluster", "analysis", "algorithm" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L55-L67
DEIB-GECO/PyGMQL
gmql/ml/algorithms/clustering.py
Clustering.optics
def optics(cls, data, eps, minpts, ccore=False): """ Constructor of OPTICS clustering.rst algorithm :param data: Input data that is presented as a list of points (objects), where each point is represented by list or tuple :param eps: Connectivity radius between points, points may be connected if distance between them less than the radius :param minpts: Minimum number of shared neighbors that is required for establishing links between points :param amount_clusters: Optional parameter where amount of clusters that should be allocated is specified. In case of usage 'amount_clusters' connectivity radius can be greater than real, in other words, there is place for mistake in connectivity radius usage. :param ccore: if True than DLL CCORE (C++ solution) will be used for solving the problem :return: the resulting clustering.rst object """ data = cls.input_preprocess(data) model = optics(data, eps, minpts) return cls(model)
python
def optics(cls, data, eps, minpts, ccore=False): """ Constructor of OPTICS clustering.rst algorithm :param data: Input data that is presented as a list of points (objects), where each point is represented by list or tuple :param eps: Connectivity radius between points, points may be connected if distance between them less than the radius :param minpts: Minimum number of shared neighbors that is required for establishing links between points :param amount_clusters: Optional parameter where amount of clusters that should be allocated is specified. In case of usage 'amount_clusters' connectivity radius can be greater than real, in other words, there is place for mistake in connectivity radius usage. :param ccore: if True than DLL CCORE (C++ solution) will be used for solving the problem :return: the resulting clustering.rst object """ data = cls.input_preprocess(data) model = optics(data, eps, minpts) return cls(model)
[ "def", "optics", "(", "cls", ",", "data", ",", "eps", ",", "minpts", ",", "ccore", "=", "False", ")", ":", "data", "=", "cls", ".", "input_preprocess", "(", "data", ")", "model", "=", "optics", "(", "data", ",", "eps", ",", "minpts", ")", "return",...
Constructor of OPTICS clustering.rst algorithm :param data: Input data that is presented as a list of points (objects), where each point is represented by list or tuple :param eps: Connectivity radius between points, points may be connected if distance between them less than the radius :param minpts: Minimum number of shared neighbors that is required for establishing links between points :param amount_clusters: Optional parameter where amount of clusters that should be allocated is specified. In case of usage 'amount_clusters' connectivity radius can be greater than real, in other words, there is place for mistake in connectivity radius usage. :param ccore: if True than DLL CCORE (C++ solution) will be used for solving the problem :return: the resulting clustering.rst object
[ "Constructor", "of", "OPTICS", "clustering", ".", "rst", "algorithm" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L78-L93
DEIB-GECO/PyGMQL
gmql/ml/algorithms/clustering.py
Clustering.is_pyclustering_instance
def is_pyclustering_instance(model): """ Checks if the clustering.rst algorithm belongs to pyclustering :param model: the clustering.rst algorithm model :return: the truth value (Boolean) """ return any(isinstance(model, i) for i in [xmeans, clarans, rock, optics])
python
def is_pyclustering_instance(model): """ Checks if the clustering.rst algorithm belongs to pyclustering :param model: the clustering.rst algorithm model :return: the truth value (Boolean) """ return any(isinstance(model, i) for i in [xmeans, clarans, rock, optics])
[ "def", "is_pyclustering_instance", "(", "model", ")", ":", "return", "any", "(", "isinstance", "(", "model", ",", "i", ")", "for", "i", "in", "[", "xmeans", ",", "clarans", ",", "rock", ",", "optics", "]", ")" ]
Checks if the clustering.rst algorithm belongs to pyclustering :param model: the clustering.rst algorithm model :return: the truth value (Boolean)
[ "Checks", "if", "the", "clustering", ".", "rst", "algorithm", "belongs", "to", "pyclustering" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L205-L212
DEIB-GECO/PyGMQL
gmql/ml/algorithms/clustering.py
Clustering.fit
def fit(self, data=None): """ Performs clustering.rst :param data: Data to be fit :return: the clustering.rst object """ if self.is_pyclustering_instance(self.model): if isinstance(self.model, xmeans): data = self.input_preprocess(data) self.model._xmeans__pointer_data = data elif isinstance(self.model, clarans): data = self.input_preprocess(data) self.model._clarans__pointer_data = data self.model.process() else: self.model.fit(data) return self
python
def fit(self, data=None): """ Performs clustering.rst :param data: Data to be fit :return: the clustering.rst object """ if self.is_pyclustering_instance(self.model): if isinstance(self.model, xmeans): data = self.input_preprocess(data) self.model._xmeans__pointer_data = data elif isinstance(self.model, clarans): data = self.input_preprocess(data) self.model._clarans__pointer_data = data self.model.process() else: self.model.fit(data) return self
[ "def", "fit", "(", "self", ",", "data", "=", "None", ")", ":", "if", "self", ".", "is_pyclustering_instance", "(", "self", ".", "model", ")", ":", "if", "isinstance", "(", "self", ".", "model", ",", "xmeans", ")", ":", "data", "=", "self", ".", "in...
Performs clustering.rst :param data: Data to be fit :return: the clustering.rst object
[ "Performs", "clustering", ".", "rst" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L214-L232
DEIB-GECO/PyGMQL
gmql/ml/algorithms/clustering.py
Clustering._labels_from_pyclusters
def _labels_from_pyclusters(self): """ Computes and returns the list of labels indicating the data points and the corresponding cluster ids. :return: The list of labels """ clusters = self.model.get_clusters() labels = [] for i in range(0, len(clusters)): for j in clusters[i]: labels.insert(int(j), i) return labels
python
def _labels_from_pyclusters(self): """ Computes and returns the list of labels indicating the data points and the corresponding cluster ids. :return: The list of labels """ clusters = self.model.get_clusters() labels = [] for i in range(0, len(clusters)): for j in clusters[i]: labels.insert(int(j), i) return labels
[ "def", "_labels_from_pyclusters", "(", "self", ")", ":", "clusters", "=", "self", ".", "model", ".", "get_clusters", "(", ")", "labels", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "clusters", ")", ")", ":", "for", "j", "in...
Computes and returns the list of labels indicating the data points and the corresponding cluster ids. :return: The list of labels
[ "Computes", "and", "returns", "the", "list", "of", "labels", "indicating", "the", "data", "points", "and", "the", "corresponding", "cluster", "ids", "." ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L235-L246
DEIB-GECO/PyGMQL
gmql/ml/algorithms/clustering.py
Clustering.retrieve_cluster
def retrieve_cluster(self, df, cluster_no): """ Extracts the cluster at the given index from the input dataframe :param df: the dataframe that contains the clusters :param cluster_no: the cluster number :return: returns the extracted cluster """ if self.is_pyclustering_instance(self.model): clusters = self.model.get_clusters() mask = [] for i in range(0, df.shape[0]): mask.append(i in clusters[cluster_no]) else: mask = self.model.labels_ == cluster_no # a boolean mask return df[mask]
python
def retrieve_cluster(self, df, cluster_no): """ Extracts the cluster at the given index from the input dataframe :param df: the dataframe that contains the clusters :param cluster_no: the cluster number :return: returns the extracted cluster """ if self.is_pyclustering_instance(self.model): clusters = self.model.get_clusters() mask = [] for i in range(0, df.shape[0]): mask.append(i in clusters[cluster_no]) else: mask = self.model.labels_ == cluster_no # a boolean mask return df[mask]
[ "def", "retrieve_cluster", "(", "self", ",", "df", ",", "cluster_no", ")", ":", "if", "self", ".", "is_pyclustering_instance", "(", "self", ".", "model", ")", ":", "clusters", "=", "self", ".", "model", ".", "get_clusters", "(", ")", "mask", "=", "[", ...
Extracts the cluster at the given index from the input dataframe :param df: the dataframe that contains the clusters :param cluster_no: the cluster number :return: returns the extracted cluster
[ "Extracts", "the", "cluster", "at", "the", "given", "index", "from", "the", "input", "dataframe" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L248-L263
DEIB-GECO/PyGMQL
gmql/ml/algorithms/clustering.py
Clustering.get_labels
def get_labels(obj): """ Retrieve the labels of a clustering.rst object :param obj: the clustering.rst object :return: the resulting labels """ if Clustering.is_pyclustering_instance(obj.model): return obj._labels_from_pyclusters else: return obj.model.labels_
python
def get_labels(obj): """ Retrieve the labels of a clustering.rst object :param obj: the clustering.rst object :return: the resulting labels """ if Clustering.is_pyclustering_instance(obj.model): return obj._labels_from_pyclusters else: return obj.model.labels_
[ "def", "get_labels", "(", "obj", ")", ":", "if", "Clustering", ".", "is_pyclustering_instance", "(", "obj", ".", "model", ")", ":", "return", "obj", ".", "_labels_from_pyclusters", "else", ":", "return", "obj", ".", "model", ".", "labels_" ]
Retrieve the labels of a clustering.rst object :param obj: the clustering.rst object :return: the resulting labels
[ "Retrieve", "the", "labels", "of", "a", "clustering", ".", "rst", "object" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L266-L276
DEIB-GECO/PyGMQL
gmql/ml/algorithms/clustering.py
Clustering.silhouette_n_clusters
def silhouette_n_clusters(data, k_min, k_max, distance='euclidean'): """ Computes and plot the silhouette score vs number of clusters graph to help selecting the number of clusters visually :param data: The data object :param k_min: lowerbound of the cluster range :param k_max: upperbound of the cluster range :param distance: the distance metric, 'euclidean' by default :return: """ k_range = range(k_min, k_max) k_means_var = [Clustering.kmeans(k).fit(data) for k in k_range] silhouette_scores = [obj.silhouette_score(data=data, metric=distance) for obj in k_means_var] fig = plt.figure() ax = fig.add_subplot(111) ax.plot(k_range, silhouette_scores, 'b*-') ax.set_ylim((-1, 1)) plt.grid(True) plt.xlabel('n_clusters') plt.ylabel('The silhouette score') plt.title('Silhouette score vs. k') plt.show()
python
def silhouette_n_clusters(data, k_min, k_max, distance='euclidean'): """ Computes and plot the silhouette score vs number of clusters graph to help selecting the number of clusters visually :param data: The data object :param k_min: lowerbound of the cluster range :param k_max: upperbound of the cluster range :param distance: the distance metric, 'euclidean' by default :return: """ k_range = range(k_min, k_max) k_means_var = [Clustering.kmeans(k).fit(data) for k in k_range] silhouette_scores = [obj.silhouette_score(data=data, metric=distance) for obj in k_means_var] fig = plt.figure() ax = fig.add_subplot(111) ax.plot(k_range, silhouette_scores, 'b*-') ax.set_ylim((-1, 1)) plt.grid(True) plt.xlabel('n_clusters') plt.ylabel('The silhouette score') plt.title('Silhouette score vs. k') plt.show()
[ "def", "silhouette_n_clusters", "(", "data", ",", "k_min", ",", "k_max", ",", "distance", "=", "'euclidean'", ")", ":", "k_range", "=", "range", "(", "k_min", ",", "k_max", ")", "k_means_var", "=", "[", "Clustering", ".", "kmeans", "(", "k", ")", ".", ...
Computes and plot the silhouette score vs number of clusters graph to help selecting the number of clusters visually :param data: The data object :param k_min: lowerbound of the cluster range :param k_max: upperbound of the cluster range :param distance: the distance metric, 'euclidean' by default :return:
[ "Computes", "and", "plot", "the", "silhouette", "score", "vs", "number", "of", "clusters", "graph", "to", "help", "selecting", "the", "number", "of", "clusters", "visually" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L279-L303
DEIB-GECO/PyGMQL
gmql/ml/algorithms/clustering.py
Clustering.elbow_method
def elbow_method(data, k_min, k_max, distance='euclidean'): """ Calculates and plots the plot of variance explained - number of clusters Implementation reference: https://github.com/sarguido/k-means-clustering.rst :param data: The dataset :param k_min: lowerbound of the cluster range :param k_max: upperbound of the cluster range :param distance: the distance metric, 'euclidean' by default :return: """ # Determine your k range k_range = range(k_min, k_max) # Fit the kmeans model for each n_clusters = k k_means_var = [Clustering.kmeans(k).fit(data) for k in k_range] # Pull out the cluster centers for each model centroids = [X.model.cluster_centers_ for X in k_means_var] # Calculate the Euclidean distance from # each point to each cluster center k_euclid = [cdist(data, cent, distance) for cent in centroids] dist = [np.min(ke, axis=1) for ke in k_euclid] # Total within-cluster sum of squares wcss = [sum(d ** 2) for d in dist] # The total sum of squares tss = sum(pdist(data) ** 2) / data.shape[0] # The between-cluster sum of squares bss = tss - wcss # elbow curve fig = plt.figure() ax = fig.add_subplot(111) ax.plot(k_range, bss / tss * 100, 'b*-') ax.set_ylim((0, 100)) plt.grid(True) plt.xlabel('n_clusters') plt.ylabel('Percentage of variance explained') plt.title('Variance Explained vs. k') plt.show()
python
def elbow_method(data, k_min, k_max, distance='euclidean'): """ Calculates and plots the plot of variance explained - number of clusters Implementation reference: https://github.com/sarguido/k-means-clustering.rst :param data: The dataset :param k_min: lowerbound of the cluster range :param k_max: upperbound of the cluster range :param distance: the distance metric, 'euclidean' by default :return: """ # Determine your k range k_range = range(k_min, k_max) # Fit the kmeans model for each n_clusters = k k_means_var = [Clustering.kmeans(k).fit(data) for k in k_range] # Pull out the cluster centers for each model centroids = [X.model.cluster_centers_ for X in k_means_var] # Calculate the Euclidean distance from # each point to each cluster center k_euclid = [cdist(data, cent, distance) for cent in centroids] dist = [np.min(ke, axis=1) for ke in k_euclid] # Total within-cluster sum of squares wcss = [sum(d ** 2) for d in dist] # The total sum of squares tss = sum(pdist(data) ** 2) / data.shape[0] # The between-cluster sum of squares bss = tss - wcss # elbow curve fig = plt.figure() ax = fig.add_subplot(111) ax.plot(k_range, bss / tss * 100, 'b*-') ax.set_ylim((0, 100)) plt.grid(True) plt.xlabel('n_clusters') plt.ylabel('Percentage of variance explained') plt.title('Variance Explained vs. k') plt.show()
[ "def", "elbow_method", "(", "data", ",", "k_min", ",", "k_max", ",", "distance", "=", "'euclidean'", ")", ":", "# Determine your k range", "k_range", "=", "range", "(", "k_min", ",", "k_max", ")", "# Fit the kmeans model for each n_clusters = k", "k_means_var", "=",...
Calculates and plots the plot of variance explained - number of clusters Implementation reference: https://github.com/sarguido/k-means-clustering.rst :param data: The dataset :param k_min: lowerbound of the cluster range :param k_max: upperbound of the cluster range :param distance: the distance metric, 'euclidean' by default :return:
[ "Calculates", "and", "plots", "the", "plot", "of", "variance", "explained", "-", "number", "of", "clusters", "Implementation", "reference", ":", "https", ":", "//", "github", ".", "com", "/", "sarguido", "/", "k", "-", "means", "-", "clustering", ".", "rst...
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L307-L350
DEIB-GECO/PyGMQL
gmql/ml/algorithms/clustering.py
Clustering.adjusted_mutual_info
def adjusted_mutual_info(self, reference_clusters): """ Calculates the adjusted mutual information score w.r.t. the reference clusters (explicit evaluation) :param reference_clusters: Clusters that are to be used as reference :return: returns the value of the adjusted mutual information score """ return adjusted_mutual_info_score(self.get_labels(self), self.get_labels(reference_clusters))
python
def adjusted_mutual_info(self, reference_clusters): """ Calculates the adjusted mutual information score w.r.t. the reference clusters (explicit evaluation) :param reference_clusters: Clusters that are to be used as reference :return: returns the value of the adjusted mutual information score """ return adjusted_mutual_info_score(self.get_labels(self), self.get_labels(reference_clusters))
[ "def", "adjusted_mutual_info", "(", "self", ",", "reference_clusters", ")", ":", "return", "adjusted_mutual_info_score", "(", "self", ".", "get_labels", "(", "self", ")", ",", "self", ".", "get_labels", "(", "reference_clusters", ")", ")" ]
Calculates the adjusted mutual information score w.r.t. the reference clusters (explicit evaluation) :param reference_clusters: Clusters that are to be used as reference :return: returns the value of the adjusted mutual information score
[ "Calculates", "the", "adjusted", "mutual", "information", "score", "w", ".", "r", ".", "t", ".", "the", "reference", "clusters", "(", "explicit", "evaluation", ")" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L352-L359
DEIB-GECO/PyGMQL
gmql/ml/algorithms/clustering.py
Clustering.adjusted_rand_score
def adjusted_rand_score(self, reference_clusters): """ Calculates the adjusted rand score w.r.t. the reference clusters (explicit evaluation) :param reference_clusters: Clusters that are to be used as reference :return: returns the value of the adjusted rand score """ return adjusted_rand_score(self.get_labels(self), self.get_labels(reference_clusters))
python
def adjusted_rand_score(self, reference_clusters): """ Calculates the adjusted rand score w.r.t. the reference clusters (explicit evaluation) :param reference_clusters: Clusters that are to be used as reference :return: returns the value of the adjusted rand score """ return adjusted_rand_score(self.get_labels(self), self.get_labels(reference_clusters))
[ "def", "adjusted_rand_score", "(", "self", ",", "reference_clusters", ")", ":", "return", "adjusted_rand_score", "(", "self", ".", "get_labels", "(", "self", ")", ",", "self", ".", "get_labels", "(", "reference_clusters", ")", ")" ]
Calculates the adjusted rand score w.r.t. the reference clusters (explicit evaluation) :param reference_clusters: Clusters that are to be used as reference :return: returns the value of the adjusted rand score
[ "Calculates", "the", "adjusted", "rand", "score", "w", ".", "r", ".", "t", ".", "the", "reference", "clusters", "(", "explicit", "evaluation", ")" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L361-L368
DEIB-GECO/PyGMQL
gmql/ml/algorithms/clustering.py
Clustering.completeness_score
def completeness_score(self, reference_clusters): """ Calculates the completeness score w.r.t. the reference clusters (explicit evaluation) :param reference_clusters: Clusters that are to be used as reference :return: the resulting completeness score """ return completeness_score(self.get_labels(self), self.get_labels(reference_clusters))
python
def completeness_score(self, reference_clusters): """ Calculates the completeness score w.r.t. the reference clusters (explicit evaluation) :param reference_clusters: Clusters that are to be used as reference :return: the resulting completeness score """ return completeness_score(self.get_labels(self), self.get_labels(reference_clusters))
[ "def", "completeness_score", "(", "self", ",", "reference_clusters", ")", ":", "return", "completeness_score", "(", "self", ".", "get_labels", "(", "self", ")", ",", "self", ".", "get_labels", "(", "reference_clusters", ")", ")" ]
Calculates the completeness score w.r.t. the reference clusters (explicit evaluation) :param reference_clusters: Clusters that are to be used as reference :return: the resulting completeness score
[ "Calculates", "the", "completeness", "score", "w", ".", "r", ".", "t", ".", "the", "reference", "clusters", "(", "explicit", "evaluation", ")" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L379-L386
DEIB-GECO/PyGMQL
gmql/ml/algorithms/clustering.py
Clustering.fowlkes_mallows
def fowlkes_mallows(self, reference_clusters): """ Calculates the Fowlkes-Mallows index (FMI) w.r.t. the reference clusters (explicit evaluation) :param reference_clusters: Clusters that are to be used as reference :return: The resulting Fowlkes-Mallows score. """ return fowlkes_mallows_score(self.get_labels(self), self.get_labels(reference_clusters))
python
def fowlkes_mallows(self, reference_clusters): """ Calculates the Fowlkes-Mallows index (FMI) w.r.t. the reference clusters (explicit evaluation) :param reference_clusters: Clusters that are to be used as reference :return: The resulting Fowlkes-Mallows score. """ return fowlkes_mallows_score(self.get_labels(self), self.get_labels(reference_clusters))
[ "def", "fowlkes_mallows", "(", "self", ",", "reference_clusters", ")", ":", "return", "fowlkes_mallows_score", "(", "self", ".", "get_labels", "(", "self", ")", ",", "self", ".", "get_labels", "(", "reference_clusters", ")", ")" ]
Calculates the Fowlkes-Mallows index (FMI) w.r.t. the reference clusters (explicit evaluation) :param reference_clusters: Clusters that are to be used as reference :return: The resulting Fowlkes-Mallows score.
[ "Calculates", "the", "Fowlkes", "-", "Mallows", "index", "(", "FMI", ")", "w", ".", "r", ".", "t", ".", "the", "reference", "clusters", "(", "explicit", "evaluation", ")" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L388-L395
DEIB-GECO/PyGMQL
gmql/ml/algorithms/clustering.py
Clustering.homogeneity_score
def homogeneity_score(self, reference_clusters): """ Calculates the homogeneity score w.r.t. the reference clusters (explicit evaluation) :param reference_clusters: Clusters that are to be used as reference :return: The resulting homogeneity score. """ return homogeneity_score(self.get_labels(self), self.get_labels(reference_clusters))
python
def homogeneity_score(self, reference_clusters): """ Calculates the homogeneity score w.r.t. the reference clusters (explicit evaluation) :param reference_clusters: Clusters that are to be used as reference :return: The resulting homogeneity score. """ return homogeneity_score(self.get_labels(self), self.get_labels(reference_clusters))
[ "def", "homogeneity_score", "(", "self", ",", "reference_clusters", ")", ":", "return", "homogeneity_score", "(", "self", ".", "get_labels", "(", "self", ")", ",", "self", ".", "get_labels", "(", "reference_clusters", ")", ")" ]
Calculates the homogeneity score w.r.t. the reference clusters (explicit evaluation) :param reference_clusters: Clusters that are to be used as reference :return: The resulting homogeneity score.
[ "Calculates", "the", "homogeneity", "score", "w", ".", "r", ".", "t", ".", "the", "reference", "clusters", "(", "explicit", "evaluation", ")" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L397-L404
DEIB-GECO/PyGMQL
gmql/ml/algorithms/clustering.py
Clustering.mutual_info_score
def mutual_info_score(self, reference_clusters): """ Calculates the MI (mutual information) w.r.t. the reference clusters (explicit evaluation) :param reference_clusters: Clusters that are to be used as reference :return: The resulting MI score. """ return mutual_info_score(self.get_labels(self), self.get_labels(reference_clusters))
python
def mutual_info_score(self, reference_clusters): """ Calculates the MI (mutual information) w.r.t. the reference clusters (explicit evaluation) :param reference_clusters: Clusters that are to be used as reference :return: The resulting MI score. """ return mutual_info_score(self.get_labels(self), self.get_labels(reference_clusters))
[ "def", "mutual_info_score", "(", "self", ",", "reference_clusters", ")", ":", "return", "mutual_info_score", "(", "self", ".", "get_labels", "(", "self", ")", ",", "self", ".", "get_labels", "(", "reference_clusters", ")", ")" ]
Calculates the MI (mutual information) w.r.t. the reference clusters (explicit evaluation) :param reference_clusters: Clusters that are to be used as reference :return: The resulting MI score.
[ "Calculates", "the", "MI", "(", "mutual", "information", ")", "w", ".", "r", ".", "t", ".", "the", "reference", "clusters", "(", "explicit", "evaluation", ")" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L406-L413
DEIB-GECO/PyGMQL
gmql/ml/algorithms/clustering.py
Clustering.normalized_mutual_info_score
def normalized_mutual_info_score(self, reference_clusters): """ Calculates the normalized mutual information w.r.t. the reference clusters (explicit evaluation) :param reference_clusters: Clusters that are to be used as reference :return: The resulting normalized mutual information score. """ return normalized_mutual_info_score(self.get_labels(self), self.get_labels(reference_clusters))
python
def normalized_mutual_info_score(self, reference_clusters): """ Calculates the normalized mutual information w.r.t. the reference clusters (explicit evaluation) :param reference_clusters: Clusters that are to be used as reference :return: The resulting normalized mutual information score. """ return normalized_mutual_info_score(self.get_labels(self), self.get_labels(reference_clusters))
[ "def", "normalized_mutual_info_score", "(", "self", ",", "reference_clusters", ")", ":", "return", "normalized_mutual_info_score", "(", "self", ".", "get_labels", "(", "self", ")", ",", "self", ".", "get_labels", "(", "reference_clusters", ")", ")" ]
Calculates the normalized mutual information w.r.t. the reference clusters (explicit evaluation) :param reference_clusters: Clusters that are to be used as reference :return: The resulting normalized mutual information score.
[ "Calculates", "the", "normalized", "mutual", "information", "w", ".", "r", ".", "t", ".", "the", "reference", "clusters", "(", "explicit", "evaluation", ")" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L415-L422
DEIB-GECO/PyGMQL
gmql/ml/algorithms/clustering.py
Clustering.silhouette_score
def silhouette_score(self, data, metric='euclidean', sample_size=None, random_state=None, **kwds): """ Computes the mean Silhouette Coefficient of all samples (implicit evaluation) :param data: The data that the clusters are generated from :param metric: the pairwise distance metric :param sample_size: the size of the sample to use computing the Silhouette Coefficient :param random_state: If an integer is given then it fixes its seed otherwise random. :param kwds: any further parameters that are passed to the distance function :return: the mean Silhouette Coefficient of all samples """ return silhouette_score(data, self.get_labels(self), metric, sample_size, random_state, **kwds)
python
def silhouette_score(self, data, metric='euclidean', sample_size=None, random_state=None, **kwds): """ Computes the mean Silhouette Coefficient of all samples (implicit evaluation) :param data: The data that the clusters are generated from :param metric: the pairwise distance metric :param sample_size: the size of the sample to use computing the Silhouette Coefficient :param random_state: If an integer is given then it fixes its seed otherwise random. :param kwds: any further parameters that are passed to the distance function :return: the mean Silhouette Coefficient of all samples """ return silhouette_score(data, self.get_labels(self), metric, sample_size, random_state, **kwds)
[ "def", "silhouette_score", "(", "self", ",", "data", ",", "metric", "=", "'euclidean'", ",", "sample_size", "=", "None", ",", "random_state", "=", "None", ",", "*", "*", "kwds", ")", ":", "return", "silhouette_score", "(", "data", ",", "self", ".", "get_...
Computes the mean Silhouette Coefficient of all samples (implicit evaluation) :param data: The data that the clusters are generated from :param metric: the pairwise distance metric :param sample_size: the size of the sample to use computing the Silhouette Coefficient :param random_state: If an integer is given then it fixes its seed otherwise random. :param kwds: any further parameters that are passed to the distance function :return: the mean Silhouette Coefficient of all samples
[ "Computes", "the", "mean", "Silhouette", "Coefficient", "of", "all", "samples", "(", "implicit", "evaluation", ")" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L424-L435
DEIB-GECO/PyGMQL
gmql/dataset/loaders/Materializations.py
materialize
def materialize(datasets): """ Multiple materializations. Enables the user to specify a set of GMQLDataset to be materialized. The engine will perform all the materializations at the same time, if an output path is provided, while will perform each operation separately if the output_path is not specified. :param datasets: it can be a list of GMQLDataset or a dictionary {'output_path' : GMQLDataset} :return: a list of GDataframe or a dictionary {'output_path' : GDataframe} """ from .. import GMQLDataset if isinstance(datasets, dict): result = dict() for output_path in datasets.keys(): dataset = datasets[output_path] if not isinstance(dataset, GMQLDataset.GMQLDataset): raise TypeError("The values of the dictionary must be GMQLDataset." " {} was given".format(type(dataset))) gframe = dataset.materialize(output_path) result[output_path] = gframe elif isinstance(datasets, list): result = [] for dataset in datasets: if not isinstance(dataset, GMQLDataset.GMQLDataset): raise TypeError("The values of the list must be GMQLDataset." " {} was given".format(type(dataset))) gframe = dataset.materialize() result.append(gframe) else: raise TypeError("The input must be a dictionary of a list. " "{} was given".format(type(datasets))) return result
python
def materialize(datasets): """ Multiple materializations. Enables the user to specify a set of GMQLDataset to be materialized. The engine will perform all the materializations at the same time, if an output path is provided, while will perform each operation separately if the output_path is not specified. :param datasets: it can be a list of GMQLDataset or a dictionary {'output_path' : GMQLDataset} :return: a list of GDataframe or a dictionary {'output_path' : GDataframe} """ from .. import GMQLDataset if isinstance(datasets, dict): result = dict() for output_path in datasets.keys(): dataset = datasets[output_path] if not isinstance(dataset, GMQLDataset.GMQLDataset): raise TypeError("The values of the dictionary must be GMQLDataset." " {} was given".format(type(dataset))) gframe = dataset.materialize(output_path) result[output_path] = gframe elif isinstance(datasets, list): result = [] for dataset in datasets: if not isinstance(dataset, GMQLDataset.GMQLDataset): raise TypeError("The values of the list must be GMQLDataset." " {} was given".format(type(dataset))) gframe = dataset.materialize() result.append(gframe) else: raise TypeError("The input must be a dictionary of a list. " "{} was given".format(type(datasets))) return result
[ "def", "materialize", "(", "datasets", ")", ":", "from", ".", ".", "import", "GMQLDataset", "if", "isinstance", "(", "datasets", ",", "dict", ")", ":", "result", "=", "dict", "(", ")", "for", "output_path", "in", "datasets", ".", "keys", "(", ")", ":",...
Multiple materializations. Enables the user to specify a set of GMQLDataset to be materialized. The engine will perform all the materializations at the same time, if an output path is provided, while will perform each operation separately if the output_path is not specified. :param datasets: it can be a list of GMQLDataset or a dictionary {'output_path' : GMQLDataset} :return: a list of GDataframe or a dictionary {'output_path' : GDataframe}
[ "Multiple", "materializations", ".", "Enables", "the", "user", "to", "specify", "a", "set", "of", "GMQLDataset", "to", "be", "materialized", ".", "The", "engine", "will", "perform", "all", "the", "materializations", "at", "the", "same", "time", "if", "an", "...
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/loaders/Materializations.py#L9-L38
DEIB-GECO/PyGMQL
gmql/ml/algorithms/biclustering.py
Biclustering.retrieve_bicluster
def retrieve_bicluster(self, df, row_no, column_no): """ Extracts the bicluster at the given row bicluster number and the column bicluster number from the input dataframe. :param df: the input dataframe whose values were biclustered :param row_no: the number of the row bicluster :param column_no: the number of the column bicluster :return: the extracted bicluster from the dataframe """ res = df[self.model.biclusters_[0][row_no]] bicluster = res[res.columns[self.model.biclusters_[1][column_no]]] return bicluster
python
def retrieve_bicluster(self, df, row_no, column_no): """ Extracts the bicluster at the given row bicluster number and the column bicluster number from the input dataframe. :param df: the input dataframe whose values were biclustered :param row_no: the number of the row bicluster :param column_no: the number of the column bicluster :return: the extracted bicluster from the dataframe """ res = df[self.model.biclusters_[0][row_no]] bicluster = res[res.columns[self.model.biclusters_[1][column_no]]] return bicluster
[ "def", "retrieve_bicluster", "(", "self", ",", "df", ",", "row_no", ",", "column_no", ")", ":", "res", "=", "df", "[", "self", ".", "model", ".", "biclusters_", "[", "0", "]", "[", "row_no", "]", "]", "bicluster", "=", "res", "[", "res", ".", "colu...
Extracts the bicluster at the given row bicluster number and the column bicluster number from the input dataframe. :param df: the input dataframe whose values were biclustered :param row_no: the number of the row bicluster :param column_no: the number of the column bicluster :return: the extracted bicluster from the dataframe
[ "Extracts", "the", "bicluster", "at", "the", "given", "row", "bicluster", "number", "and", "the", "column", "bicluster", "number", "from", "the", "input", "dataframe", "." ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/biclustering.py#L50-L61
DEIB-GECO/PyGMQL
gmql/ml/algorithms/biclustering.py
Biclustering.bicluster_similarity
def bicluster_similarity(self, reference_model): """ Calculates the similarity between the current model of biclusters and the reference model of biclusters :param reference_model: The reference model of biclusters :return: Returns the consensus score(Hochreiter et. al., 2010), i.e. the similarity of two sets of biclusters. """ similarity_score = consensus_score(self.model.biclusters_, reference_model.biclusters_) return similarity_score
python
def bicluster_similarity(self, reference_model): """ Calculates the similarity between the current model of biclusters and the reference model of biclusters :param reference_model: The reference model of biclusters :return: Returns the consensus score(Hochreiter et. al., 2010), i.e. the similarity of two sets of biclusters. """ similarity_score = consensus_score(self.model.biclusters_, reference_model.biclusters_) return similarity_score
[ "def", "bicluster_similarity", "(", "self", ",", "reference_model", ")", ":", "similarity_score", "=", "consensus_score", "(", "self", ".", "model", ".", "biclusters_", ",", "reference_model", ".", "biclusters_", ")", "return", "similarity_score" ]
Calculates the similarity between the current model of biclusters and the reference model of biclusters :param reference_model: The reference model of biclusters :return: Returns the consensus score(Hochreiter et. al., 2010), i.e. the similarity of two sets of biclusters.
[ "Calculates", "the", "similarity", "between", "the", "current", "model", "of", "biclusters", "and", "the", "reference", "model", "of", "biclusters" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/biclustering.py#L63-L71
DEIB-GECO/PyGMQL
gmql/ml/multi_ref_model.py
MultiRefModel.load
def load(self, path, genes_uuid, regs=['chr', 'left', 'right', 'strand'], meta=[], values=[], full_load=False): """ Loads the multi referenced mapped data from the file system :param path: The path to the files :param genes_uuid: The unique identifier metadata column name to separate the data by the number of references :param regs: The region data that are to be analyzed :param meta: The metadata that are to be analyzed :param values: The values to fill the matrix :param full_load: Specifies the method of parsing the data. If False then parser omits the parsing of zero(0) values in order to speed up and save memory. However, while creating the matrix, those zero values are going to be put into the matrix. (unless a row contains "all zero columns". This parsing is strongly recommended for sparse datasets. If the full_load parameter is True then all the zero(0) data are going to be read. """ if not full_load: warnings.warn("\n\n You are using the optimized loading technique. " "All-zero rows are not going to be loaded into memory. " "To load all the data please set the full_load parameter equal to True.") p = Parser(path) all_meta_data = p.parse_meta(meta) all_data = p.parse_data(regs, values, full_load) all_data = pd.pivot_table(all_data, values=values, columns=regs, index=['sample'], fill_value=0) group1 = all_meta_data.groupby([genes_uuid]).count() for g in group1.index.values: series = all_meta_data[genes_uuid] == g m = (all_meta_data[series]) d = (all_data.loc[series]).dropna(axis=1, how='all') # not to show the NaN data self.data_model.append(GenometricSpace.from_memory(d, m)) self.all_meta_data = all_meta_data
python
def load(self, path, genes_uuid, regs=['chr', 'left', 'right', 'strand'], meta=[], values=[], full_load=False): """ Loads the multi referenced mapped data from the file system :param path: The path to the files :param genes_uuid: The unique identifier metadata column name to separate the data by the number of references :param regs: The region data that are to be analyzed :param meta: The metadata that are to be analyzed :param values: The values to fill the matrix :param full_load: Specifies the method of parsing the data. If False then parser omits the parsing of zero(0) values in order to speed up and save memory. However, while creating the matrix, those zero values are going to be put into the matrix. (unless a row contains "all zero columns". This parsing is strongly recommended for sparse datasets. If the full_load parameter is True then all the zero(0) data are going to be read. """ if not full_load: warnings.warn("\n\n You are using the optimized loading technique. " "All-zero rows are not going to be loaded into memory. " "To load all the data please set the full_load parameter equal to True.") p = Parser(path) all_meta_data = p.parse_meta(meta) all_data = p.parse_data(regs, values, full_load) all_data = pd.pivot_table(all_data, values=values, columns=regs, index=['sample'], fill_value=0) group1 = all_meta_data.groupby([genes_uuid]).count() for g in group1.index.values: series = all_meta_data[genes_uuid] == g m = (all_meta_data[series]) d = (all_data.loc[series]).dropna(axis=1, how='all') # not to show the NaN data self.data_model.append(GenometricSpace.from_memory(d, m)) self.all_meta_data = all_meta_data
[ "def", "load", "(", "self", ",", "path", ",", "genes_uuid", ",", "regs", "=", "[", "'chr'", ",", "'left'", ",", "'right'", ",", "'strand'", "]", ",", "meta", "=", "[", "]", ",", "values", "=", "[", "]", ",", "full_load", "=", "False", ")", ":", ...
Loads the multi referenced mapped data from the file system :param path: The path to the files :param genes_uuid: The unique identifier metadata column name to separate the data by the number of references :param regs: The region data that are to be analyzed :param meta: The metadata that are to be analyzed :param values: The values to fill the matrix :param full_load: Specifies the method of parsing the data. If False then parser omits the parsing of zero(0) values in order to speed up and save memory. However, while creating the matrix, those zero values are going to be put into the matrix. (unless a row contains "all zero columns". This parsing is strongly recommended for sparse datasets. If the full_load parameter is True then all the zero(0) data are going to be read.
[ "Loads", "the", "multi", "referenced", "mapped", "data", "from", "the", "file", "system", ":", "param", "path", ":", "The", "path", "to", "the", "files", ":", "param", "genes_uuid", ":", "The", "unique", "identifier", "metadata", "column", "name", "to", "s...
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/multi_ref_model.py#L21-L55
DEIB-GECO/PyGMQL
gmql/ml/multi_ref_model.py
MultiRefModel.merge
def merge(self, samples_uuid): """ The method to merge the datamodels belonging to different references :param samples_uuid: The unique identifier metadata column name to identify the identical samples having different references :return: Returns the merged dataframe """ all_meta_data = pd.DataFrame() for dm in self.data_model: all_meta_data = pd.concat([all_meta_data, dm.meta], axis=0) group = all_meta_data.groupby([samples_uuid])['sample'] sample_sets = group.apply(list).values merged_df = pd.DataFrame() multi_index = list(map(list, zip(*sample_sets))) multi_index_names = list(range(0, len(sample_sets[0]))) i = 1 for pair in sample_sets: i += 1 numbers = list(range(0, len(pair))) df_temp = pd.DataFrame() for n in numbers: try: # data.loc[pair[n]] may not be found due to the fast loading (full_load = False) df_temp = pd.concat([df_temp, self.data_model[n].data.loc[pair[n]]], axis=1) except: pass merged_df = pd.concat([merged_df, df_temp.T.bfill().iloc[[0]]], axis=0) multi_index = np.asarray(multi_index) multi_index = pd.MultiIndex.from_arrays(multi_index, names=multi_index_names) merged_df.index = multi_index return merged_df
python
def merge(self, samples_uuid): """ The method to merge the datamodels belonging to different references :param samples_uuid: The unique identifier metadata column name to identify the identical samples having different references :return: Returns the merged dataframe """ all_meta_data = pd.DataFrame() for dm in self.data_model: all_meta_data = pd.concat([all_meta_data, dm.meta], axis=0) group = all_meta_data.groupby([samples_uuid])['sample'] sample_sets = group.apply(list).values merged_df = pd.DataFrame() multi_index = list(map(list, zip(*sample_sets))) multi_index_names = list(range(0, len(sample_sets[0]))) i = 1 for pair in sample_sets: i += 1 numbers = list(range(0, len(pair))) df_temp = pd.DataFrame() for n in numbers: try: # data.loc[pair[n]] may not be found due to the fast loading (full_load = False) df_temp = pd.concat([df_temp, self.data_model[n].data.loc[pair[n]]], axis=1) except: pass merged_df = pd.concat([merged_df, df_temp.T.bfill().iloc[[0]]], axis=0) multi_index = np.asarray(multi_index) multi_index = pd.MultiIndex.from_arrays(multi_index, names=multi_index_names) merged_df.index = multi_index return merged_df
[ "def", "merge", "(", "self", ",", "samples_uuid", ")", ":", "all_meta_data", "=", "pd", ".", "DataFrame", "(", ")", "for", "dm", "in", "self", ".", "data_model", ":", "all_meta_data", "=", "pd", ".", "concat", "(", "[", "all_meta_data", ",", "dm", ".",...
The method to merge the datamodels belonging to different references :param samples_uuid: The unique identifier metadata column name to identify the identical samples having different references :return: Returns the merged dataframe
[ "The", "method", "to", "merge", "the", "datamodels", "belonging", "to", "different", "references" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/multi_ref_model.py#L57-L91
DEIB-GECO/PyGMQL
gmql/ml/multi_ref_model.py
MultiRefModel.compact_view
def compact_view(self, merged_data, selected_meta, reference_no): """ Creates and returns the compact view where the index of the dataframe is a multi index of the selected metadata. Side effect: Alters the merged_data parameter :param merged_data: The merged data that is to be used to create the compact view :param selected_meta: The selected metadata to create the multi index :param reference_no: The reference number that the metadata are going to be taken :return: Returns the multi-indexed dataframe w.r.t. the selected metadata """ meta_names = list(selected_meta) meta_index = [] for x in meta_names: meta_index.append(self.all_meta_data.ix[merged_data.index.get_level_values(reference_no)][x].values) meta_index = np.asarray(meta_index) multi_meta_index = pd.MultiIndex.from_arrays(meta_index, names=meta_names) merged_data.index = multi_meta_index return merged_data
python
def compact_view(self, merged_data, selected_meta, reference_no): """ Creates and returns the compact view where the index of the dataframe is a multi index of the selected metadata. Side effect: Alters the merged_data parameter :param merged_data: The merged data that is to be used to create the compact view :param selected_meta: The selected metadata to create the multi index :param reference_no: The reference number that the metadata are going to be taken :return: Returns the multi-indexed dataframe w.r.t. the selected metadata """ meta_names = list(selected_meta) meta_index = [] for x in meta_names: meta_index.append(self.all_meta_data.ix[merged_data.index.get_level_values(reference_no)][x].values) meta_index = np.asarray(meta_index) multi_meta_index = pd.MultiIndex.from_arrays(meta_index, names=meta_names) merged_data.index = multi_meta_index return merged_data
[ "def", "compact_view", "(", "self", ",", "merged_data", ",", "selected_meta", ",", "reference_no", ")", ":", "meta_names", "=", "list", "(", "selected_meta", ")", "meta_index", "=", "[", "]", "for", "x", "in", "meta_names", ":", "meta_index", ".", "append", ...
Creates and returns the compact view where the index of the dataframe is a multi index of the selected metadata. Side effect: Alters the merged_data parameter :param merged_data: The merged data that is to be used to create the compact view :param selected_meta: The selected metadata to create the multi index :param reference_no: The reference number that the metadata are going to be taken :return: Returns the multi-indexed dataframe w.r.t. the selected metadata
[ "Creates", "and", "returns", "the", "compact", "view", "where", "the", "index", "of", "the", "dataframe", "is", "a", "multi", "index", "of", "the", "selected", "metadata", ".", "Side", "effect", ":", "Alters", "the", "merged_data", "parameter" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/multi_ref_model.py#L93-L112
DEIB-GECO/PyGMQL
gmql/ml/algorithms/preprocessing.py
Preprocessing.prune_by_missing_percent
def prune_by_missing_percent(df, percentage=0.4): """ The method to remove the attributes (genes) with more than a percentage of missing values :param df: the dataframe containing the attributes to be pruned :param percentage: the percentage threshold (0.4 by default) :return: the pruned dataframe """ mask = (df.isnull().sum() / df.shape[0]).map(lambda x: True if x < percentage else False) pruned_df = df[df.columns[mask.values]] return pruned_df
python
def prune_by_missing_percent(df, percentage=0.4): """ The method to remove the attributes (genes) with more than a percentage of missing values :param df: the dataframe containing the attributes to be pruned :param percentage: the percentage threshold (0.4 by default) :return: the pruned dataframe """ mask = (df.isnull().sum() / df.shape[0]).map(lambda x: True if x < percentage else False) pruned_df = df[df.columns[mask.values]] return pruned_df
[ "def", "prune_by_missing_percent", "(", "df", ",", "percentage", "=", "0.4", ")", ":", "mask", "=", "(", "df", ".", "isnull", "(", ")", ".", "sum", "(", ")", "/", "df", ".", "shape", "[", "0", "]", ")", ".", "map", "(", "lambda", "x", ":", "Tru...
The method to remove the attributes (genes) with more than a percentage of missing values :param df: the dataframe containing the attributes to be pruned :param percentage: the percentage threshold (0.4 by default) :return: the pruned dataframe
[ "The", "method", "to", "remove", "the", "attributes", "(", "genes", ")", "with", "more", "than", "a", "percentage", "of", "missing", "values" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/preprocessing.py#L36-L46
DEIB-GECO/PyGMQL
gmql/ml/algorithms/preprocessing.py
Preprocessing.impute_using_statistics
def impute_using_statistics(df, method='min'): """ Imputes the missing values by the selected statistical property of each column :param df: The input dataframe that contains missing values :param method: The imputation method (min by default) "zero": fill missing entries with zeros "mean": fill with column means "median" : fill with column medians "min": fill with min value per column "random": fill with gaussian noise according to mean/std of column :return: the imputed dataframe """ sf = SimpleFill(method) imputed_matrix = sf.complete(df.values) imputed_df = pd.DataFrame(imputed_matrix, df.index, df.columns) return imputed_df
python
def impute_using_statistics(df, method='min'): """ Imputes the missing values by the selected statistical property of each column :param df: The input dataframe that contains missing values :param method: The imputation method (min by default) "zero": fill missing entries with zeros "mean": fill with column means "median" : fill with column medians "min": fill with min value per column "random": fill with gaussian noise according to mean/std of column :return: the imputed dataframe """ sf = SimpleFill(method) imputed_matrix = sf.complete(df.values) imputed_df = pd.DataFrame(imputed_matrix, df.index, df.columns) return imputed_df
[ "def", "impute_using_statistics", "(", "df", ",", "method", "=", "'min'", ")", ":", "sf", "=", "SimpleFill", "(", "method", ")", "imputed_matrix", "=", "sf", ".", "complete", "(", "df", ".", "values", ")", "imputed_df", "=", "pd", ".", "DataFrame", "(", ...
Imputes the missing values by the selected statistical property of each column :param df: The input dataframe that contains missing values :param method: The imputation method (min by default) "zero": fill missing entries with zeros "mean": fill with column means "median" : fill with column medians "min": fill with min value per column "random": fill with gaussian noise according to mean/std of column :return: the imputed dataframe
[ "Imputes", "the", "missing", "values", "by", "the", "selected", "statistical", "property", "of", "each", "column" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/preprocessing.py#L49-L65
DEIB-GECO/PyGMQL
gmql/ml/algorithms/preprocessing.py
Preprocessing.impute_knn
def impute_knn(df, k=3): """ Nearest neighbour imputations which weights samples using the mean squared difference on features for which two rows both have observed data. :param df: The input dataframe that contains missing values :param k: The number of neighbours :return: the imputed dataframe """ imputed_matrix = KNN(k=k).complete(df.values) imputed_df = pd.DataFrame(imputed_matrix, df.index, df.columns) return imputed_df
python
def impute_knn(df, k=3): """ Nearest neighbour imputations which weights samples using the mean squared difference on features for which two rows both have observed data. :param df: The input dataframe that contains missing values :param k: The number of neighbours :return: the imputed dataframe """ imputed_matrix = KNN(k=k).complete(df.values) imputed_df = pd.DataFrame(imputed_matrix, df.index, df.columns) return imputed_df
[ "def", "impute_knn", "(", "df", ",", "k", "=", "3", ")", ":", "imputed_matrix", "=", "KNN", "(", "k", "=", "k", ")", ".", "complete", "(", "df", ".", "values", ")", "imputed_df", "=", "pd", ".", "DataFrame", "(", "imputed_matrix", ",", "df", ".", ...
Nearest neighbour imputations which weights samples using the mean squared difference on features for which two rows both have observed data. :param df: The input dataframe that contains missing values :param k: The number of neighbours :return: the imputed dataframe
[ "Nearest", "neighbour", "imputations", "which", "weights", "samples", "using", "the", "mean", "squared", "difference", "on", "features", "for", "which", "two", "rows", "both", "have", "observed", "data", "." ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/preprocessing.py#L68-L78
DEIB-GECO/PyGMQL
gmql/ml/algorithms/preprocessing.py
Preprocessing.impute_svd
def impute_svd(df, rank=10, convergence_threshold=0.00001, max_iters=200): """ Imputes the missing values by using SVD decomposition Based on the following publication: 'Missing value estimation methods for DNA microarrays' by Troyanskaya et. al. :param df:The input dataframe that contains missing values :param rank: Rank value of the truncated SVD :param convergence_threshold: The threshold to stop the iterations :param max_iters: Max number of iterations :return: the imputed dataframe """ imputed_matrix = IterativeSVD(rank,convergence_threshold, max_iters).complete(df.values) imputed_df = pd.DataFrame(imputed_matrix, df.index, df.columns) return imputed_df
python
def impute_svd(df, rank=10, convergence_threshold=0.00001, max_iters=200): """ Imputes the missing values by using SVD decomposition Based on the following publication: 'Missing value estimation methods for DNA microarrays' by Troyanskaya et. al. :param df:The input dataframe that contains missing values :param rank: Rank value of the truncated SVD :param convergence_threshold: The threshold to stop the iterations :param max_iters: Max number of iterations :return: the imputed dataframe """ imputed_matrix = IterativeSVD(rank,convergence_threshold, max_iters).complete(df.values) imputed_df = pd.DataFrame(imputed_matrix, df.index, df.columns) return imputed_df
[ "def", "impute_svd", "(", "df", ",", "rank", "=", "10", ",", "convergence_threshold", "=", "0.00001", ",", "max_iters", "=", "200", ")", ":", "imputed_matrix", "=", "IterativeSVD", "(", "rank", ",", "convergence_threshold", ",", "max_iters", ")", ".", "compl...
Imputes the missing values by using SVD decomposition Based on the following publication: 'Missing value estimation methods for DNA microarrays' by Troyanskaya et. al. :param df:The input dataframe that contains missing values :param rank: Rank value of the truncated SVD :param convergence_threshold: The threshold to stop the iterations :param max_iters: Max number of iterations :return: the imputed dataframe
[ "Imputes", "the", "missing", "values", "by", "using", "SVD", "decomposition", "Based", "on", "the", "following", "publication", ":", "Missing", "value", "estimation", "methods", "for", "DNA", "microarrays", "by", "Troyanskaya", "et", ".", "al", "." ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/preprocessing.py#L81-L94
DEIB-GECO/PyGMQL
gmql/ml/algorithms/preprocessing.py
Preprocessing.feature_selection
def feature_selection(df, labels, n_features, method='chi2'): """ Reduces the number of features in the imput dataframe. Ex: labels = gs.meta['biospecimen_sample__sample_type_id'].apply(int).apply(lambda x: 0 if x < 10 else 1) chi2_fs(gs.data, labels, 50) :param df: The input dataframe :param labels: Labels for each row in the df. Type: Pandas.Series :param no_features: The desired number of features :param method: The feature selection method to be employed. It is set to 'chi2' by default To select the features using mutual information, the method value should be set to 'mi' To select the features using ANOVA, the method value should be set to 'ANOVA' :return: Returns the dataframe with the selected features """ fs_obj = None if method == 'chi2': fs_obj = chi2 elif method == 'ANOVA': fs_obj = f_classif elif method == 'mi': fs_obj = mutual_info_classif else: raise ValueError('The method is not recognized') fs = SelectKBest(fs_obj, k=n_features) fs.fit_transform(df, labels) df_reduced = df.loc[:, fs.get_support()] return df_reduced
python
def feature_selection(df, labels, n_features, method='chi2'): """ Reduces the number of features in the imput dataframe. Ex: labels = gs.meta['biospecimen_sample__sample_type_id'].apply(int).apply(lambda x: 0 if x < 10 else 1) chi2_fs(gs.data, labels, 50) :param df: The input dataframe :param labels: Labels for each row in the df. Type: Pandas.Series :param no_features: The desired number of features :param method: The feature selection method to be employed. It is set to 'chi2' by default To select the features using mutual information, the method value should be set to 'mi' To select the features using ANOVA, the method value should be set to 'ANOVA' :return: Returns the dataframe with the selected features """ fs_obj = None if method == 'chi2': fs_obj = chi2 elif method == 'ANOVA': fs_obj = f_classif elif method == 'mi': fs_obj = mutual_info_classif else: raise ValueError('The method is not recognized') fs = SelectKBest(fs_obj, k=n_features) fs.fit_transform(df, labels) df_reduced = df.loc[:, fs.get_support()] return df_reduced
[ "def", "feature_selection", "(", "df", ",", "labels", ",", "n_features", ",", "method", "=", "'chi2'", ")", ":", "fs_obj", "=", "None", "if", "method", "==", "'chi2'", ":", "fs_obj", "=", "chi2", "elif", "method", "==", "'ANOVA'", ":", "fs_obj", "=", "...
Reduces the number of features in the imput dataframe. Ex: labels = gs.meta['biospecimen_sample__sample_type_id'].apply(int).apply(lambda x: 0 if x < 10 else 1) chi2_fs(gs.data, labels, 50) :param df: The input dataframe :param labels: Labels for each row in the df. Type: Pandas.Series :param no_features: The desired number of features :param method: The feature selection method to be employed. It is set to 'chi2' by default To select the features using mutual information, the method value should be set to 'mi' To select the features using ANOVA, the method value should be set to 'ANOVA' :return: Returns the dataframe with the selected features
[ "Reduces", "the", "number", "of", "features", "in", "the", "imput", "dataframe", ".", "Ex", ":", "labels", "=", "gs", ".", "meta", "[", "biospecimen_sample__sample_type_id", "]", ".", "apply", "(", "int", ")", ".", "apply", "(", "lambda", "x", ":", "0", ...
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/preprocessing.py#L97-L125
DEIB-GECO/PyGMQL
gmql/configuration.py
Configuration.set_spark_conf
def set_spark_conf(self, key=None, value=None, d=None): """ Sets a spark property as a ('key', 'value') pair of using a dictionary {'key': 'value', ...} :param key: string :param value: string :param d: dictionary :return: None """ if isinstance(d, dict): self._properties.update(d) elif isinstance(key, str) and isinstance(value, str): self._properties[key] = value else: raise TypeError("key, value must be strings")
python
def set_spark_conf(self, key=None, value=None, d=None): """ Sets a spark property as a ('key', 'value') pair of using a dictionary {'key': 'value', ...} :param key: string :param value: string :param d: dictionary :return: None """ if isinstance(d, dict): self._properties.update(d) elif isinstance(key, str) and isinstance(value, str): self._properties[key] = value else: raise TypeError("key, value must be strings")
[ "def", "set_spark_conf", "(", "self", ",", "key", "=", "None", ",", "value", "=", "None", ",", "d", "=", "None", ")", ":", "if", "isinstance", "(", "d", ",", "dict", ")", ":", "self", ".", "_properties", ".", "update", "(", "d", ")", "elif", "isi...
Sets a spark property as a ('key', 'value') pair of using a dictionary {'key': 'value', ...} :param key: string :param value: string :param d: dictionary :return: None
[ "Sets", "a", "spark", "property", "as", "a", "(", "key", "value", ")", "pair", "of", "using", "a", "dictionary", "{", "key", ":", "value", "...", "}" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/configuration.py#L39-L53
DEIB-GECO/PyGMQL
gmql/configuration.py
Configuration.set_system_conf
def set_system_conf(self, key=None, value=None, d=None): """ Sets a java system property as a ('key', 'value') pair of using a dictionary {'key': 'value', ...} :param key: string :param value: string :param d: dictionary :return: None """ if isinstance(d, dict): self._system.update(d) elif isinstance(key, str) and isinstance(value, str): self._system[key] = value else: raise TypeError("key, value must be strings")
python
def set_system_conf(self, key=None, value=None, d=None): """ Sets a java system property as a ('key', 'value') pair of using a dictionary {'key': 'value', ...} :param key: string :param value: string :param d: dictionary :return: None """ if isinstance(d, dict): self._system.update(d) elif isinstance(key, str) and isinstance(value, str): self._system[key] = value else: raise TypeError("key, value must be strings")
[ "def", "set_system_conf", "(", "self", ",", "key", "=", "None", ",", "value", "=", "None", ",", "d", "=", "None", ")", ":", "if", "isinstance", "(", "d", ",", "dict", ")", ":", "self", ".", "_system", ".", "update", "(", "d", ")", "elif", "isinst...
Sets a java system property as a ('key', 'value') pair of using a dictionary {'key': 'value', ...} :param key: string :param value: string :param d: dictionary :return: None
[ "Sets", "a", "java", "system", "property", "as", "a", "(", "key", "value", ")", "pair", "of", "using", "a", "dictionary", "{", "key", ":", "value", "...", "}" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/configuration.py#L55-L69
DEIB-GECO/PyGMQL
gmql/dataset/DataStructures/MetaField.py
MetaField.isin
def isin(self, values): """ Selects the samples having the metadata attribute between the values provided as input :param values: a list of elements :return a new complex condition """ if not isinstance(values, list): raise TypeError("Input should be a string. {} was provided".format(type(values))) if not (self.name.startswith("(") and self.name.endswith(")")): first = True new_condition = None for v in values: if first: first = False new_condition = self.__eq__(v) else: new_condition = new_condition.__or__(self.__eq__(v)) return new_condition else: raise SyntaxError("You cannot use 'isin' with a complex condition")
python
def isin(self, values): """ Selects the samples having the metadata attribute between the values provided as input :param values: a list of elements :return a new complex condition """ if not isinstance(values, list): raise TypeError("Input should be a string. {} was provided".format(type(values))) if not (self.name.startswith("(") and self.name.endswith(")")): first = True new_condition = None for v in values: if first: first = False new_condition = self.__eq__(v) else: new_condition = new_condition.__or__(self.__eq__(v)) return new_condition else: raise SyntaxError("You cannot use 'isin' with a complex condition")
[ "def", "isin", "(", "self", ",", "values", ")", ":", "if", "not", "isinstance", "(", "values", ",", "list", ")", ":", "raise", "TypeError", "(", "\"Input should be a string. {} was provided\"", ".", "format", "(", "type", "(", "values", ")", ")", ")", "if"...
Selects the samples having the metadata attribute between the values provided as input :param values: a list of elements :return a new complex condition
[ "Selects", "the", "samples", "having", "the", "metadata", "attribute", "between", "the", "values", "provided", "as", "input" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/DataStructures/MetaField.py#L75-L95
PragmaticMates/django-flatpages-i18n
flatpages_i18n/templatetags/flatpages_i18n.py
get_flatpages_i18n
def get_flatpages_i18n(parser, token): """ Retrieves all flatpage objects available for the current site and visible to the specific user (or visible to all users if no user is specified). Populates the template context with them in a variable whose name is defined by the ``as`` clause. An optional ``for`` clause can be used to control the user whose permissions are to be used in determining which flatpages are visible. An optional argument, ``starts_with``, can be applied to limit the returned flatpages to those beginning with a particular base URL. This argument can be passed as a variable or a string, as it resolves from the template context. Syntax:: {% get_flatpages_i18n ['url_starts_with'] [for user] as context_name %} Example usage:: {% get_flatpages_i18n as flatpages %} {% get_flatpages_i18n for someuser as flatpages %} {% get_flatpages_i18n '/about/' as about_pages %} {% get_flatpages_i18n prefix as about_pages %} {% get_flatpages_i18n '/about/' for someuser as about_pages %} {% get_flatpages_i18n containing '/en/' as my_pages %} {% get_flatpages_i18n excluding '/en/' as my_pages %} """ bits = token.split_contents() syntax_message = ("%(tag_name)s expects a syntax of %(tag_name)s " "['url_starts_with'] [for user] as context_name" % dict(tag_name=bits[0])) # Must have at 3-6 bits in the tag if 3 <= len(bits) <= 8: containing = None excluding = None # If there's an even number of bits, there's no prefix if len(bits) % 2 == 0: prefix = bits[1] else: prefix = None if bits[1] == "containing": containing = bits[2] else: if bits[1] == "excluding": excluding = bits[2] # The very last bit must be the context name if bits[-2] != 'as': raise template.TemplateSyntaxError(syntax_message) context_name = bits[-1] # If there are 5 or 6 bits, there is a user defined if len(bits) >= 5: if bits[-4] != 'for': raise template.TemplateSyntaxError(syntax_message) user = bits[-3] else: user = None return FlatpageNode( context_name, starts_with=prefix, contains=containing, excludes=excluding, user=user) else: raise template.TemplateSyntaxError(syntax_message)
python
def get_flatpages_i18n(parser, token): """ Retrieves all flatpage objects available for the current site and visible to the specific user (or visible to all users if no user is specified). Populates the template context with them in a variable whose name is defined by the ``as`` clause. An optional ``for`` clause can be used to control the user whose permissions are to be used in determining which flatpages are visible. An optional argument, ``starts_with``, can be applied to limit the returned flatpages to those beginning with a particular base URL. This argument can be passed as a variable or a string, as it resolves from the template context. Syntax:: {% get_flatpages_i18n ['url_starts_with'] [for user] as context_name %} Example usage:: {% get_flatpages_i18n as flatpages %} {% get_flatpages_i18n for someuser as flatpages %} {% get_flatpages_i18n '/about/' as about_pages %} {% get_flatpages_i18n prefix as about_pages %} {% get_flatpages_i18n '/about/' for someuser as about_pages %} {% get_flatpages_i18n containing '/en/' as my_pages %} {% get_flatpages_i18n excluding '/en/' as my_pages %} """ bits = token.split_contents() syntax_message = ("%(tag_name)s expects a syntax of %(tag_name)s " "['url_starts_with'] [for user] as context_name" % dict(tag_name=bits[0])) # Must have at 3-6 bits in the tag if 3 <= len(bits) <= 8: containing = None excluding = None # If there's an even number of bits, there's no prefix if len(bits) % 2 == 0: prefix = bits[1] else: prefix = None if bits[1] == "containing": containing = bits[2] else: if bits[1] == "excluding": excluding = bits[2] # The very last bit must be the context name if bits[-2] != 'as': raise template.TemplateSyntaxError(syntax_message) context_name = bits[-1] # If there are 5 or 6 bits, there is a user defined if len(bits) >= 5: if bits[-4] != 'for': raise template.TemplateSyntaxError(syntax_message) user = bits[-3] else: user = None return FlatpageNode( context_name, starts_with=prefix, contains=containing, excludes=excluding, user=user) else: raise template.TemplateSyntaxError(syntax_message)
[ "def", "get_flatpages_i18n", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "split_contents", "(", ")", "syntax_message", "=", "(", "\"%(tag_name)s expects a syntax of %(tag_name)s \"", "\"['url_starts_with'] [for user] as context_name\"", "%", "dict", ...
Retrieves all flatpage objects available for the current site and visible to the specific user (or visible to all users if no user is specified). Populates the template context with them in a variable whose name is defined by the ``as`` clause. An optional ``for`` clause can be used to control the user whose permissions are to be used in determining which flatpages are visible. An optional argument, ``starts_with``, can be applied to limit the returned flatpages to those beginning with a particular base URL. This argument can be passed as a variable or a string, as it resolves from the template context. Syntax:: {% get_flatpages_i18n ['url_starts_with'] [for user] as context_name %} Example usage:: {% get_flatpages_i18n as flatpages %} {% get_flatpages_i18n for someuser as flatpages %} {% get_flatpages_i18n '/about/' as about_pages %} {% get_flatpages_i18n prefix as about_pages %} {% get_flatpages_i18n '/about/' for someuser as about_pages %} {% get_flatpages_i18n containing '/en/' as my_pages %} {% get_flatpages_i18n excluding '/en/' as my_pages %}
[ "Retrieves", "all", "flatpage", "objects", "available", "for", "the", "current", "site", "and", "visible", "to", "the", "specific", "user", "(", "or", "visible", "to", "all", "users", "if", "no", "user", "is", "specified", ")", ".", "Populates", "the", "te...
train
https://github.com/PragmaticMates/django-flatpages-i18n/blob/2d3ed45c14fb0c7fd6ff5263c84f501c6a0c3e9a/flatpages_i18n/templatetags/flatpages_i18n.py#L72-L139
kevinconway/daemons
daemons/daemonize/simple.py
SimpleDaemonizeManager.daemonize
def daemonize(self): """Double fork and set the pid.""" self._double_fork() # Write pidfile. self.pid = os.getpid() LOG.info( "Succesfully daemonized process {0}.".format(self.pid) )
python
def daemonize(self): """Double fork and set the pid.""" self._double_fork() # Write pidfile. self.pid = os.getpid() LOG.info( "Succesfully daemonized process {0}.".format(self.pid) )
[ "def", "daemonize", "(", "self", ")", ":", "self", ".", "_double_fork", "(", ")", "# Write pidfile.", "self", ".", "pid", "=", "os", ".", "getpid", "(", ")", "LOG", ".", "info", "(", "\"Succesfully daemonized process {0}.\"", ".", "format", "(", "self", "....
Double fork and set the pid.
[ "Double", "fork", "and", "set", "the", "pid", "." ]
train
https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/daemonize/simple.py#L22-L31
kevinconway/daemons
daemons/daemonize/simple.py
SimpleDaemonizeManager._double_fork
def _double_fork(self): """Do the UNIX double-fork magic. See Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177) http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 """ try: pid = os.fork() if pid > 0: # Exit first parent. sys.exit(0) return None except OSError as err: LOG.exception( "Fork #1 failed: {0} ({1})".format( err.errno, err.strerror, ), ) sys.exit(exit.DAEMONIZE_FAILED) return None # Decouple from parent environment. os.chdir("/") os.setsid() os.umask(0) # Do second fork. try: pid = os.fork() if pid > 0: # Exit from second parent. sys.exit(0) except OSError as err: LOG.exception( "Fork #2 failed: {0} ({1})".format( err.errno, err.strerror, ), ) sys.exit(exit.DAEMONIZE_FAILED) return None
python
def _double_fork(self): """Do the UNIX double-fork magic. See Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177) http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 """ try: pid = os.fork() if pid > 0: # Exit first parent. sys.exit(0) return None except OSError as err: LOG.exception( "Fork #1 failed: {0} ({1})".format( err.errno, err.strerror, ), ) sys.exit(exit.DAEMONIZE_FAILED) return None # Decouple from parent environment. os.chdir("/") os.setsid() os.umask(0) # Do second fork. try: pid = os.fork() if pid > 0: # Exit from second parent. sys.exit(0) except OSError as err: LOG.exception( "Fork #2 failed: {0} ({1})".format( err.errno, err.strerror, ), ) sys.exit(exit.DAEMONIZE_FAILED) return None
[ "def", "_double_fork", "(", "self", ")", ":", "try", ":", "pid", "=", "os", ".", "fork", "(", ")", "if", "pid", ">", "0", ":", "# Exit first parent.", "sys", ".", "exit", "(", "0", ")", "return", "None", "except", "OSError", "as", "err", ":", "LOG"...
Do the UNIX double-fork magic. See Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177) http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
[ "Do", "the", "UNIX", "double", "-", "fork", "magic", "." ]
train
https://github.com/kevinconway/daemons/blob/b0fe0db5821171a35aa9078596d19d630c570b38/daemons/daemonize/simple.py#L33-L83
DEIB-GECO/PyGMQL
gmql/ml/genometric_space.py
GenometricSpace.from_memory
def from_memory(cls, data, meta): """ Overloaded constructor to create the GenometricSpace object from memory data and meta variables. The indexes of the data and meta dataframes should be the same. :param data: The data model :param meta: The metadata :return: A GenometricSpace object """ obj = cls() obj.data = data obj.meta = meta return obj
python
def from_memory(cls, data, meta): """ Overloaded constructor to create the GenometricSpace object from memory data and meta variables. The indexes of the data and meta dataframes should be the same. :param data: The data model :param meta: The metadata :return: A GenometricSpace object """ obj = cls() obj.data = data obj.meta = meta return obj
[ "def", "from_memory", "(", "cls", ",", "data", ",", "meta", ")", ":", "obj", "=", "cls", "(", ")", "obj", ".", "data", "=", "data", "obj", ".", "meta", "=", "meta", "return", "obj" ]
Overloaded constructor to create the GenometricSpace object from memory data and meta variables. The indexes of the data and meta dataframes should be the same. :param data: The data model :param meta: The metadata :return: A GenometricSpace object
[ "Overloaded", "constructor", "to", "create", "the", "GenometricSpace", "object", "from", "memory", "data", "and", "meta", "variables", ".", "The", "indexes", "of", "the", "data", "and", "meta", "dataframes", "should", "be", "the", "same", "." ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/genometric_space.py#L27-L41
DEIB-GECO/PyGMQL
gmql/ml/genometric_space.py
GenometricSpace.load
def load(self, _path, regs=['chr', 'left', 'right', 'strand'], meta=[], values=[], full_load=False, file_extension="gdm"): """Parses and loads the data into instance attributes. The indexes of the data and meta dataframes should be the same. :param path: The path to the dataset on the filesystem :param regs: the regions that are to be analyzed :param meta: the meta-data that are to be analyzed :param values: the values that are to be selected :param full_load: Specifies the method of parsing the data. If False then parser omits the parsing of zero(0) values in order to speed up and save memory. However, while creating the matrix, those zero values are going to be put into the matrix. (unless a row contains "all zero columns". This parsing is strongly recommended for sparse datasets. If the full_load parameter is True then all the zero(0) data are going to be read. """ if not full_load: warnings.warn("\n\nYou are using the optimized loading technique. " "All-zero rows are not going to be loaded into memory. " "To load all the data please set the full_load parameter equal to True.") p = Parser(_path) self.meta = p.parse_meta(meta) self.data = p.parse_data(regs, values, full_load=full_load, extension=file_extension) self._path = _path
python
def load(self, _path, regs=['chr', 'left', 'right', 'strand'], meta=[], values=[], full_load=False, file_extension="gdm"): """Parses and loads the data into instance attributes. The indexes of the data and meta dataframes should be the same. :param path: The path to the dataset on the filesystem :param regs: the regions that are to be analyzed :param meta: the meta-data that are to be analyzed :param values: the values that are to be selected :param full_load: Specifies the method of parsing the data. If False then parser omits the parsing of zero(0) values in order to speed up and save memory. However, while creating the matrix, those zero values are going to be put into the matrix. (unless a row contains "all zero columns". This parsing is strongly recommended for sparse datasets. If the full_load parameter is True then all the zero(0) data are going to be read. """ if not full_load: warnings.warn("\n\nYou are using the optimized loading technique. " "All-zero rows are not going to be loaded into memory. " "To load all the data please set the full_load parameter equal to True.") p = Parser(_path) self.meta = p.parse_meta(meta) self.data = p.parse_data(regs, values, full_load=full_load, extension=file_extension) self._path = _path
[ "def", "load", "(", "self", ",", "_path", ",", "regs", "=", "[", "'chr'", ",", "'left'", ",", "'right'", ",", "'strand'", "]", ",", "meta", "=", "[", "]", ",", "values", "=", "[", "]", ",", "full_load", "=", "False", ",", "file_extension", "=", "...
Parses and loads the data into instance attributes. The indexes of the data and meta dataframes should be the same. :param path: The path to the dataset on the filesystem :param regs: the regions that are to be analyzed :param meta: the meta-data that are to be analyzed :param values: the values that are to be selected :param full_load: Specifies the method of parsing the data. If False then parser omits the parsing of zero(0) values in order to speed up and save memory. However, while creating the matrix, those zero values are going to be put into the matrix. (unless a row contains "all zero columns". This parsing is strongly recommended for sparse datasets. If the full_load parameter is True then all the zero(0) data are going to be read.
[ "Parses", "and", "loads", "the", "data", "into", "instance", "attributes", ".", "The", "indexes", "of", "the", "data", "and", "meta", "dataframes", "should", "be", "the", "same", "." ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/genometric_space.py#L43-L64
DEIB-GECO/PyGMQL
gmql/ml/genometric_space.py
GenometricSpace.set_meta
def set_meta(self, selected_meta): """Sets one axis of the 2D multi-indexed dataframe index to the selected meta data. :param selected_meta: The list of the metadata users want to index with. """ meta_names = list(selected_meta) meta_names.append('sample') meta_index = [] # To set the index for existing samples in the region dataframe. # The index size of the region dataframe does not necessarily be equal to that of metadata df. warnings.warn("\n\nThis method assumes that the last level of the index is the sample_id.\n" "In case of single index, the index itself should be the sample_id") for x in meta_names: meta_index.append(self.meta.ix[self.data.index.get_level_values(-1)][x].values) meta_index = np.asarray(meta_index) multi_meta_index = pd.MultiIndex.from_arrays(meta_index, names=meta_names) self.data.index = multi_meta_index
python
def set_meta(self, selected_meta): """Sets one axis of the 2D multi-indexed dataframe index to the selected meta data. :param selected_meta: The list of the metadata users want to index with. """ meta_names = list(selected_meta) meta_names.append('sample') meta_index = [] # To set the index for existing samples in the region dataframe. # The index size of the region dataframe does not necessarily be equal to that of metadata df. warnings.warn("\n\nThis method assumes that the last level of the index is the sample_id.\n" "In case of single index, the index itself should be the sample_id") for x in meta_names: meta_index.append(self.meta.ix[self.data.index.get_level_values(-1)][x].values) meta_index = np.asarray(meta_index) multi_meta_index = pd.MultiIndex.from_arrays(meta_index, names=meta_names) self.data.index = multi_meta_index
[ "def", "set_meta", "(", "self", ",", "selected_meta", ")", ":", "meta_names", "=", "list", "(", "selected_meta", ")", "meta_names", ".", "append", "(", "'sample'", ")", "meta_index", "=", "[", "]", "# To set the index for existing samples in the region dataframe.", ...
Sets one axis of the 2D multi-indexed dataframe index to the selected meta data. :param selected_meta: The list of the metadata users want to index with.
[ "Sets", "one", "axis", "of", "the", "2D", "multi", "-", "indexed", "dataframe", "index", "to", "the", "selected", "meta", "data", "." ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/genometric_space.py#L66-L84
DEIB-GECO/PyGMQL
gmql/ml/genometric_space.py
GenometricSpace.to_matrix
def to_matrix(self, values, selected_regions, default_value=0): """Creates a 2D multi-indexed matrix representation of the data. This representation allows the data to be sent to the machine learning algorithms. Args: :param values: The value or values that are going to fill the matrix. :param selected_regions: The index to one axis of the matrix. :param default_value: The default fill value of the matrix """ if isinstance(values, list): for v in values: try: self.data[v] = self.data[v].map(float) except: print(self.data[v]) else: self.data[values] = self.data[values].map(float) print("started pivoting") self.data = pd.pivot_table(self.data, values=values, columns=selected_regions, index=['sample'], fill_value=default_value) print("end of pivoting")
python
def to_matrix(self, values, selected_regions, default_value=0): """Creates a 2D multi-indexed matrix representation of the data. This representation allows the data to be sent to the machine learning algorithms. Args: :param values: The value or values that are going to fill the matrix. :param selected_regions: The index to one axis of the matrix. :param default_value: The default fill value of the matrix """ if isinstance(values, list): for v in values: try: self.data[v] = self.data[v].map(float) except: print(self.data[v]) else: self.data[values] = self.data[values].map(float) print("started pivoting") self.data = pd.pivot_table(self.data, values=values, columns=selected_regions, index=['sample'], fill_value=default_value) print("end of pivoting")
[ "def", "to_matrix", "(", "self", ",", "values", ",", "selected_regions", ",", "default_value", "=", "0", ")", ":", "if", "isinstance", "(", "values", ",", "list", ")", ":", "for", "v", "in", "values", ":", "try", ":", "self", ".", "data", "[", "v", ...
Creates a 2D multi-indexed matrix representation of the data. This representation allows the data to be sent to the machine learning algorithms. Args: :param values: The value or values that are going to fill the matrix. :param selected_regions: The index to one axis of the matrix. :param default_value: The default fill value of the matrix
[ "Creates", "a", "2D", "multi", "-", "indexed", "matrix", "representation", "of", "the", "data", ".", "This", "representation", "allows", "the", "data", "to", "be", "sent", "to", "the", "machine", "learning", "algorithms", "." ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/genometric_space.py#L86-L108
DEIB-GECO/PyGMQL
gmql/ml/genometric_space.py
GenometricSpace.get_values
def get_values(self, set, selected_meta): """ Retrieves the selected metadata values of the given set :param set: cluster that contains the data :param selected_meta: the values of the selected_meta :return: the values of the selected meta of the cluster """ warnings.warn("\n\nThis method assumes that the last level of the index is the sample_id.\n" "In case of single index, the index itself should be the sample_id") sample_ids = set.index.get_level_values(-1) corresponding_meta = self.meta.loc[sample_ids] values = corresponding_meta[selected_meta] try: values = values.astype(float) except ValueError: print("the values should be numeric") return values
python
def get_values(self, set, selected_meta): """ Retrieves the selected metadata values of the given set :param set: cluster that contains the data :param selected_meta: the values of the selected_meta :return: the values of the selected meta of the cluster """ warnings.warn("\n\nThis method assumes that the last level of the index is the sample_id.\n" "In case of single index, the index itself should be the sample_id") sample_ids = set.index.get_level_values(-1) corresponding_meta = self.meta.loc[sample_ids] values = corresponding_meta[selected_meta] try: values = values.astype(float) except ValueError: print("the values should be numeric") return values
[ "def", "get_values", "(", "self", ",", "set", ",", "selected_meta", ")", ":", "warnings", ".", "warn", "(", "\"\\n\\nThis method assumes that the last level of the index is the sample_id.\\n\"", "\"In case of single index, the index itself should be the sample_id\"", ")", "sample_i...
Retrieves the selected metadata values of the given set :param set: cluster that contains the data :param selected_meta: the values of the selected_meta :return: the values of the selected meta of the cluster
[ "Retrieves", "the", "selected", "metadata", "values", "of", "the", "given", "set" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/genometric_space.py#L110-L128
DEIB-GECO/PyGMQL
gmql/ml/genometric_space.py
GenometricSpace.group_statistics
def group_statistics(self, group, selected_meta, stat_code='mean'): """ Provides statistics of a group based on the meta data selected. :param group:The result of a classification or clustering.rst or biclustering algorithm :param selected_meta: The metadata that we are interested in :param stat_code: 'mean' for mean or 'variance' for variance or 'std' for standard deviation :return: returns the statistics properties of the selected metadata """ values = self.get_values(group, selected_meta) if stat_code == 'mean': res = statistics.mean(values) elif stat_code == 'variance': res = statistics.variance(values) elif stat_code == 'std': res = statistics.stdev(values) return res
python
def group_statistics(self, group, selected_meta, stat_code='mean'): """ Provides statistics of a group based on the meta data selected. :param group:The result of a classification or clustering.rst or biclustering algorithm :param selected_meta: The metadata that we are interested in :param stat_code: 'mean' for mean or 'variance' for variance or 'std' for standard deviation :return: returns the statistics properties of the selected metadata """ values = self.get_values(group, selected_meta) if stat_code == 'mean': res = statistics.mean(values) elif stat_code == 'variance': res = statistics.variance(values) elif stat_code == 'std': res = statistics.stdev(values) return res
[ "def", "group_statistics", "(", "self", ",", "group", ",", "selected_meta", ",", "stat_code", "=", "'mean'", ")", ":", "values", "=", "self", ".", "get_values", "(", "group", ",", "selected_meta", ")", "if", "stat_code", "==", "'mean'", ":", "res", "=", ...
Provides statistics of a group based on the meta data selected. :param group:The result of a classification or clustering.rst or biclustering algorithm :param selected_meta: The metadata that we are interested in :param stat_code: 'mean' for mean or 'variance' for variance or 'std' for standard deviation :return: returns the statistics properties of the selected metadata
[ "Provides", "statistics", "of", "a", "group", "based", "on", "the", "meta", "data", "selected", "." ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/genometric_space.py#L130-L146
DEIB-GECO/PyGMQL
gmql/ml/genometric_space.py
GenometricSpace.to_bag_of_genomes
def to_bag_of_genomes(self, clustering_object): """ Creates a bag of genomes representation for data mining purposes Each document (genome) in the representation is a set of metadata key and value pairs belonging to the same cluster. The bag of genomes are saved under ./bag_of_genomes/ directory :param clustering_object: The clustering.rst object """ meta_files = Parser._get_files('meta', self._path) meta_dict = {} for f in meta_files: meta_dict[Parser.get_sample_id(f)] = f clusters = [] if isinstance(clustering_object, Clustering): if Clustering.is_pyclustering_instance(clustering_object.model): no_clusters = len(clustering_object.model.get_clusters()) else: no_clusters = clustering_object.model.n_clusters for c in range(0, no_clusters): clusters.append(clustering_object.retrieve_cluster(self.data, c).index.get_level_values(-1).values) elif isinstance(clustering_object, Biclustering): no_clusters = clustering_object.model.n_clusters[0] # 0 for the rows no_col_clusters = clustering_object.model.n_clusters[1] # 1 for the columns for c in range(0, no_clusters): clusters.append(clustering_object.retrieve_bicluster(self.data, c*no_col_clusters, 0).index.get_level_values(-1).values) # sample names # to create the bag of genomes files print("creating the bag_of_genomes...") for c in tqdm(range(0, no_clusters)): document = open('./bag_of_genomes/document' + str(c) + '.bag_of_genome', 'w') for sample in clusters[c]: f = open(meta_dict[sample], 'r') for line in f: line = line.replace(' ', '_') splitted = line.split('\t') document.write(splitted[0] + '=' + splitted[1]) f.close() document.close()
python
def to_bag_of_genomes(self, clustering_object): """ Creates a bag of genomes representation for data mining purposes Each document (genome) in the representation is a set of metadata key and value pairs belonging to the same cluster. The bag of genomes are saved under ./bag_of_genomes/ directory :param clustering_object: The clustering.rst object """ meta_files = Parser._get_files('meta', self._path) meta_dict = {} for f in meta_files: meta_dict[Parser.get_sample_id(f)] = f clusters = [] if isinstance(clustering_object, Clustering): if Clustering.is_pyclustering_instance(clustering_object.model): no_clusters = len(clustering_object.model.get_clusters()) else: no_clusters = clustering_object.model.n_clusters for c in range(0, no_clusters): clusters.append(clustering_object.retrieve_cluster(self.data, c).index.get_level_values(-1).values) elif isinstance(clustering_object, Biclustering): no_clusters = clustering_object.model.n_clusters[0] # 0 for the rows no_col_clusters = clustering_object.model.n_clusters[1] # 1 for the columns for c in range(0, no_clusters): clusters.append(clustering_object.retrieve_bicluster(self.data, c*no_col_clusters, 0).index.get_level_values(-1).values) # sample names # to create the bag of genomes files print("creating the bag_of_genomes...") for c in tqdm(range(0, no_clusters)): document = open('./bag_of_genomes/document' + str(c) + '.bag_of_genome', 'w') for sample in clusters[c]: f = open(meta_dict[sample], 'r') for line in f: line = line.replace(' ', '_') splitted = line.split('\t') document.write(splitted[0] + '=' + splitted[1]) f.close() document.close()
[ "def", "to_bag_of_genomes", "(", "self", ",", "clustering_object", ")", ":", "meta_files", "=", "Parser", ".", "_get_files", "(", "'meta'", ",", "self", ".", "_path", ")", "meta_dict", "=", "{", "}", "for", "f", "in", "meta_files", ":", "meta_dict", "[", ...
Creates a bag of genomes representation for data mining purposes Each document (genome) in the representation is a set of metadata key and value pairs belonging to the same cluster. The bag of genomes are saved under ./bag_of_genomes/ directory :param clustering_object: The clustering.rst object
[ "Creates", "a", "bag", "of", "genomes", "representation", "for", "data", "mining", "purposes", "Each", "document", "(", "genome", ")", "in", "the", "representation", "is", "a", "set", "of", "metadata", "key", "and", "value", "pairs", "belonging", "to", "the"...
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/genometric_space.py#L148-L188
DEIB-GECO/PyGMQL
gmql/ml/genometric_space.py
GenometricSpace.to_term_document_matrix
def to_term_document_matrix(path_to_bag_of_genomes, max_df=0.99, min_df=1, use_idf=False): """ Creates a term-document matrix which is a mathematical matrix that describes the frequency of terms that occur in a collection of documents (in our case a collection of genomes). :param path_to_bag_of_genomes: Path to the documents (genomes) :param max_df: To prune the terms that are existing in the given portion of documents (if set to 1 then it does not prune) :return: returns the term-document dataframe """ token_dict = {} def BoG_tokenizer(_text): return _text.split('\n') print("creating the term-document matrix...") for file in tqdm(Parser._get_files('.bag_of_genome', path_to_bag_of_genomes)): f = open(file, 'r') text = f.read() token_dict[file] = text tfidf = TfidfVectorizer(tokenizer=BoG_tokenizer, use_idf=use_idf, smooth_idf=False, max_df=max_df, min_df=min_df) # max df is less than 1.0 to ignore the tokens existing in all of the documents tfs = tfidf.fit_transform(token_dict.values()) data = tfs.toarray() columns = tfidf.get_feature_names() df = pd.DataFrame(data, columns=columns) term_document_df = df.T return term_document_df
python
def to_term_document_matrix(path_to_bag_of_genomes, max_df=0.99, min_df=1, use_idf=False): """ Creates a term-document matrix which is a mathematical matrix that describes the frequency of terms that occur in a collection of documents (in our case a collection of genomes). :param path_to_bag_of_genomes: Path to the documents (genomes) :param max_df: To prune the terms that are existing in the given portion of documents (if set to 1 then it does not prune) :return: returns the term-document dataframe """ token_dict = {} def BoG_tokenizer(_text): return _text.split('\n') print("creating the term-document matrix...") for file in tqdm(Parser._get_files('.bag_of_genome', path_to_bag_of_genomes)): f = open(file, 'r') text = f.read() token_dict[file] = text tfidf = TfidfVectorizer(tokenizer=BoG_tokenizer, use_idf=use_idf, smooth_idf=False, max_df=max_df, min_df=min_df) # max df is less than 1.0 to ignore the tokens existing in all of the documents tfs = tfidf.fit_transform(token_dict.values()) data = tfs.toarray() columns = tfidf.get_feature_names() df = pd.DataFrame(data, columns=columns) term_document_df = df.T return term_document_df
[ "def", "to_term_document_matrix", "(", "path_to_bag_of_genomes", ",", "max_df", "=", "0.99", ",", "min_df", "=", "1", ",", "use_idf", "=", "False", ")", ":", "token_dict", "=", "{", "}", "def", "BoG_tokenizer", "(", "_text", ")", ":", "return", "_text", "....
Creates a term-document matrix which is a mathematical matrix that describes the frequency of terms that occur in a collection of documents (in our case a collection of genomes). :param path_to_bag_of_genomes: Path to the documents (genomes) :param max_df: To prune the terms that are existing in the given portion of documents (if set to 1 then it does not prune) :return: returns the term-document dataframe
[ "Creates", "a", "term", "-", "document", "matrix", "which", "is", "a", "mathematical", "matrix", "that", "describes", "the", "frequency", "of", "terms", "that", "occur", "in", "a", "collection", "of", "documents", "(", "in", "our", "case", "a", "collection",...
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/genometric_space.py#L191-L219
DEIB-GECO/PyGMQL
gmql/ml/genometric_space.py
GenometricSpace.tf
def tf(cluster): """ Computes the term frequency and stores it as a dictionary :param cluster: the cluster that contains the metadata :return: tf dictionary """ counts = dict() words = cluster.split(' ') for word in words: counts[word] = counts.get(word, 0) + 1 return counts
python
def tf(cluster): """ Computes the term frequency and stores it as a dictionary :param cluster: the cluster that contains the metadata :return: tf dictionary """ counts = dict() words = cluster.split(' ') for word in words: counts[word] = counts.get(word, 0) + 1 return counts
[ "def", "tf", "(", "cluster", ")", ":", "counts", "=", "dict", "(", ")", "words", "=", "cluster", ".", "split", "(", "' '", ")", "for", "word", "in", "words", ":", "counts", "[", "word", "]", "=", "counts", ".", "get", "(", "word", ",", "0", ")"...
Computes the term frequency and stores it as a dictionary :param cluster: the cluster that contains the metadata :return: tf dictionary
[ "Computes", "the", "term", "frequency", "and", "stores", "it", "as", "a", "dictionary" ]
train
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/genometric_space.py#L222-L233